gtidmac.c revision 1.12 1 /* $NetBSD: gtidmac.c,v 1.12 2017/01/07 14:36:51 kiyohara Exp $ */
2 /*
3 * Copyright (c) 2008, 2012 KIYOHARA Takashi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.12 2017/01/07 14:36:51 kiyohara Exp $");
30
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/kmem.h>
37
38 #include <uvm/uvm_param.h> /* For PAGE_SIZE */
39
40 #include <dev/dmover/dmovervar.h>
41
42 #include <dev/marvell/gtidmacreg.h>
43 #include <dev/marvell/gtidmacvar.h>
44 #include <dev/marvell/marvellreg.h>
45 #include <dev/marvell/marvellvar.h>
46
47 #include <prop/proplib.h>
48
49 #include "locators.h"
50
51 #ifdef GTIDMAC_DEBUG
52 #define DPRINTF(x) if (gtidmac_debug) printf x
53 int gtidmac_debug = 0;
54 #else
55 #define DPRINTF(x)
56 #endif
57
58 #define GTIDMAC_NDESC 64
59 #define GTIDMAC_MAXCHAN 8
60 #define MVXORE_NDESC 128
61 #define MVXORE_MAXCHAN 2
62
63 #define GTIDMAC_NSEGS ((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 #define MVXORE_NSEGS ((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65
66
67 struct gtidmac_softc;
68
69 struct gtidmac_function {
70 int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 void (*chan_free)(void *, int);
72 int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 bus_size_t);
74 void (*dma_start)(void *, int,
75 void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 bus_dmamap_t *, int));
77 uint32_t (*dma_finish)(void *, int, int);
78 };
79
80 struct gtidmac_dma_desc {
81 int dd_index;
82 union {
83 struct gtidmac_desc *idmac_vaddr;
84 struct mvxore_desc *xore_vaddr;
85 } dd_vaddr;
86 #define dd_idmac_vaddr dd_vaddr.idmac_vaddr
87 #define dd_xore_vaddr dd_vaddr.xore_vaddr
88 paddr_t dd_paddr;
89 SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 };
91
92 struct gtidmac_softc {
93 device_t sc_dev;
94
95 bus_space_tag_t sc_iot;
96 bus_space_handle_t sc_ioh;
97
98 bus_dma_tag_t sc_dmat;
99 struct gtidmac_dma_desc *sc_dd_buffer;
100 bus_dma_segment_t sc_pattern_segment;
101 struct {
102 u_char pbuf[16]; /* 16byte/pattern */
103 } *sc_pbuf; /* x256 pattern */
104
105 int sc_gtidmac_nchan;
106 struct gtidmac_desc *sc_dbuf;
107 bus_dmamap_t sc_dmap;
108 SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 struct {
110 bus_dmamap_t chan_in; /* In dmamap */
111 bus_dmamap_t chan_out; /* Out dmamap */
112 uint64_t chan_totalcnt; /* total transfered byte */
113 int chan_ddidx;
114 void *chan_running; /* opaque object data */
115 void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 bus_dmamap_t *, int);
117 } sc_cdesc[GTIDMAC_MAXCHAN];
118 struct gtidmac_intr_arg {
119 struct gtidmac_softc *ia_sc;
120 uint32_t ia_cause;
121 uint32_t ia_mask;
122 uint32_t ia_eaddr;
123 uint32_t ia_eselect;
124 } sc_intrarg[GTIDMAC_NINTRRUPT];
125
126 int sc_mvxore_nchan;
127 struct mvxore_desc *sc_dbuf_xore;
128 bus_dmamap_t sc_dmap_xore;
129 SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 struct {
131 bus_dmamap_t chan_in[MVXORE_NSRC]; /* In dmamap */
132 bus_dmamap_t chan_out; /* Out dmamap */
133 uint64_t chan_totalcnt; /* total transfered */
134 int chan_ddidx;
135 void *chan_running; /* opaque object data */
136 void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 bus_dmamap_t *, int);
138 } sc_cdesc_xore[MVXORE_MAXCHAN];
139
140 struct dmover_backend sc_dmb;
141 struct dmover_backend sc_dmb_xore;
142 int sc_dmb_busy;
143 };
144 struct gtidmac_softc *gtidmac_softc = NULL;
145
146 static int gtidmac_match(device_t, struct cfdata *, void *);
147 static void gtidmac_attach(device_t, device_t, void *);
148
149 static int gtidmac_intr(void *);
150 static int mvxore_port0_intr(void *);
151 static int mvxore_port1_intr(void *);
152 static int mvxore_intr(struct gtidmac_softc *, int);
153
154 static void gtidmac_process(struct dmover_backend *);
155 static void gtidmac_dmover_run(struct dmover_backend *);
156 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
157 int);
158 static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
159 dmover_buffer_type, dmover_buffer *, int);
160 static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
161
162 static uint32_t gtidmac_finish(void *, int, int);
163 static uint32_t mvxore_finish(void *, int, int);
164
165 static void gtidmac_wininit(struct gtidmac_softc *, enum marvell_tags *);
166 static void mvxore_wininit(struct gtidmac_softc *, enum marvell_tags *);
167
168 static int gtidmac_buffer_setup(struct gtidmac_softc *);
169 static int mvxore_buffer_setup(struct gtidmac_softc *);
170
171 #ifdef GTIDMAC_DEBUG
172 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
173 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
174 struct gtidmac_dma_desc *, uint32_t, int);
175 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
176 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
177 struct gtidmac_dma_desc *, uint32_t, int);
178 #endif
179
180
181 static struct gtidmac_function gtidmac_functions = {
182 .chan_alloc = gtidmac_chan_alloc,
183 .chan_free = gtidmac_chan_free,
184 .dma_setup = gtidmac_setup,
185 .dma_start = gtidmac_start,
186 .dma_finish = gtidmac_finish,
187 };
188
189 static struct gtidmac_function mvxore_functions = {
190 .chan_alloc = mvxore_chan_alloc,
191 .chan_free = mvxore_chan_free,
192 .dma_setup = mvxore_setup,
193 .dma_start = mvxore_start,
194 .dma_finish = mvxore_finish,
195 };
196
197 static const struct dmover_algdesc gtidmac_algdescs[] = {
198 {
199 .dad_name = DMOVER_FUNC_ZERO,
200 .dad_data = >idmac_functions,
201 .dad_ninputs = 0
202 },
203 {
204 .dad_name = DMOVER_FUNC_FILL8,
205 .dad_data = >idmac_functions,
206 .dad_ninputs = 0
207 },
208 {
209 .dad_name = DMOVER_FUNC_COPY,
210 .dad_data = >idmac_functions,
211 .dad_ninputs = 1
212 },
213 };
214
215 static const struct dmover_algdesc mvxore_algdescs[] = {
216 #if 0
217 /*
218 * As for these operations, there are a lot of restrictions. It is
219 * necessary to use IDMAC.
220 */
221 {
222 .dad_name = DMOVER_FUNC_ZERO,
223 .dad_data = &mvxore_functions,
224 .dad_ninputs = 0
225 },
226 {
227 .dad_name = DMOVER_FUNC_FILL8,
228 .dad_data = &mvxore_functions,
229 .dad_ninputs = 0
230 },
231 #endif
232 {
233 .dad_name = DMOVER_FUNC_COPY,
234 .dad_data = &mvxore_functions,
235 .dad_ninputs = 1
236 },
237 {
238 .dad_name = DMOVER_FUNC_ISCSI_CRC32C,
239 .dad_data = &mvxore_functions,
240 .dad_ninputs = 1
241 },
242 {
243 .dad_name = DMOVER_FUNC_XOR2,
244 .dad_data = &mvxore_functions,
245 .dad_ninputs = 2
246 },
247 {
248 .dad_name = DMOVER_FUNC_XOR3,
249 .dad_data = &mvxore_functions,
250 .dad_ninputs = 3
251 },
252 {
253 .dad_name = DMOVER_FUNC_XOR4,
254 .dad_data = &mvxore_functions,
255 .dad_ninputs = 4
256 },
257 {
258 .dad_name = DMOVER_FUNC_XOR5,
259 .dad_data = &mvxore_functions,
260 .dad_ninputs = 5
261 },
262 {
263 .dad_name = DMOVER_FUNC_XOR6,
264 .dad_data = &mvxore_functions,
265 .dad_ninputs = 6
266 },
267 {
268 .dad_name = DMOVER_FUNC_XOR7,
269 .dad_data = &mvxore_functions,
270 .dad_ninputs = 7
271 },
272 {
273 .dad_name = DMOVER_FUNC_XOR8,
274 .dad_data = &mvxore_functions,
275 .dad_ninputs = 8
276 },
277 };
278
279 static struct {
280 int model;
281 int idmac_nchan;
282 int idmac_irq;
283 int xore_nchan;
284 int xore_irq;
285 } channels[] = {
286 /*
287 * Marvell System Controllers:
288 * need irqs in attach_args.
289 */
290 { MARVELL_DISCOVERY, 8, -1, 0, -1 },
291 { MARVELL_DISCOVERY_II, 8, -1, 0, -1 },
292 { MARVELL_DISCOVERY_III, 8, -1, 0, -1 },
293 #if 0
294 { MARVELL_DISCOVERY_LT, 4, -1, 2, -1 },
295 { MARVELL_DISCOVERY_V, 4, -1, 2, -1 },
296 { MARVELL_DISCOVERY_VI, 4, -1, 2, -1 }, ????
297 #endif
298
299 /*
300 * Marvell System on Chips:
301 * No need irqs in attach_args. We always connecting to interrupt-pin
302 * statically.
303 */
304 { MARVELL_ORION_1_88F1181, 4, 24, 0, -1 },
305 { MARVELL_ORION_2_88F1281, 4, 24, 0, -1 },
306 { MARVELL_ORION_1_88F5082, 4, 24, 0, -1 },
307 { MARVELL_ORION_1_88F5180N, 4, 24, 0, -1 },
308 { MARVELL_ORION_1_88F5181, 4, 24, 0, -1 },
309 { MARVELL_ORION_1_88F5182, 4, 24, 2, 30 },
310 { MARVELL_ORION_2_88F5281, 4, 24, 0, -1 },
311 { MARVELL_ORION_1_88W8660, 4, 24, 0, -1 },
312 { MARVELL_KIRKWOOD_88F6180, 0, -1, 4, 5 },
313 { MARVELL_KIRKWOOD_88F6192, 0, -1, 4, 5 },
314 { MARVELL_KIRKWOOD_88F6281, 0, -1, 4, 5 },
315 { MARVELL_KIRKWOOD_88F6282, 0, -1, 4, 5 },
316 { MARVELL_ARMADAXP_MV78130, 4, 33, 2, 51 },
317 { MARVELL_ARMADAXP_MV78130, 0, -1, 2, 94 },
318 { MARVELL_ARMADAXP_MV78160, 4, 33, 2, 51 },
319 { MARVELL_ARMADAXP_MV78160, 0, -1, 2, 94 },
320 { MARVELL_ARMADAXP_MV78230, 4, 33, 2, 51 },
321 { MARVELL_ARMADAXP_MV78230, 0, -1, 2, 94 },
322 { MARVELL_ARMADAXP_MV78260, 4, 33, 2, 51 },
323 { MARVELL_ARMADAXP_MV78260, 0, -1, 2, 94 },
324 { MARVELL_ARMADAXP_MV78460, 4, 33, 2, 51 },
325 { MARVELL_ARMADAXP_MV78460, 0, -1, 2, 94 },
326 };
327
328 struct gtidmac_winacctbl *gtidmac_winacctbl;
329 struct gtidmac_winacctbl *mvxore_winacctbl;
330
331 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
332 gtidmac_match, gtidmac_attach, NULL, NULL);
333 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
334 gtidmac_match, gtidmac_attach, NULL, NULL);
335
336
337 /* ARGSUSED */
338 static int
339 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
340 {
341 struct marvell_attach_args *mva = aux;
342 int unit, i;
343
344 if (strcmp(mva->mva_name, match->cf_name) != 0)
345 return 0;
346 if (mva->mva_offset == MVA_OFFSET_DEFAULT)
347 return 0;
348 unit = 0;
349 for (i = 0; i < __arraycount(channels); i++)
350 if (mva->mva_model == channels[i].model) {
351 if (mva->mva_unit == unit) {
352 mva->mva_size = GTIDMAC_SIZE;
353 return 1;
354 }
355 unit++;
356 }
357 return 0;
358 }
359
360 /* ARGSUSED */
361 static void
362 gtidmac_attach(device_t parent, device_t self, void *aux)
363 {
364 struct gtidmac_softc *sc = device_private(self);
365 struct marvell_attach_args *mva = aux;
366 prop_dictionary_t dict = device_properties(self);
367 uint32_t idmac_irq, xore_irq, dmb_speed;
368 int unit, idmac_nchan, xore_nchan, nsegs, i, j, n;
369
370 unit = 0;
371 for (i = 0; i < __arraycount(channels); i++)
372 if (mva->mva_model == channels[i].model) {
373 if (mva->mva_unit == unit)
374 break;
375 unit++;
376 }
377 idmac_nchan = channels[i].idmac_nchan;
378 idmac_irq = channels[i].idmac_irq;
379 if (idmac_nchan != 0) {
380 if (idmac_irq == -1)
381 idmac_irq = mva->mva_irq;
382 if (idmac_irq == -1)
383 /* Discovery */
384 if (!prop_dictionary_get_uint32(dict,
385 "idmac-irq", &idmac_irq)) {
386 aprint_error(": no idmac-irq property\n");
387 return;
388 }
389 }
390 xore_nchan = channels[i].xore_nchan;
391 xore_irq = channels[i].xore_irq;
392 if (xore_nchan != 0) {
393 if (xore_irq == -1)
394 xore_irq = mva->mva_irq;
395 if (xore_irq == -1)
396 /* Discovery LT/V/VI */
397 if (!prop_dictionary_get_uint32(dict,
398 "xore-irq", &xore_irq)) {
399 aprint_error(": no xore-irq property\n");
400 return;
401 }
402 }
403
404 aprint_naive("\n");
405 aprint_normal(": Marvell IDMA Controller%s\n",
406 xore_nchan ? "/XOR Engine" : "");
407 if (idmac_nchan > 0)
408 aprint_normal_dev(self,
409 "IDMA Controller %d channels, intr %d...%d\n",
410 idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
411 if (xore_nchan > 0)
412 aprint_normal_dev(self,
413 "XOR Engine %d channels, intr %d...%d\n",
414 xore_nchan, xore_irq, xore_irq + xore_nchan - 1);
415
416 sc->sc_dev = self;
417 sc->sc_iot = mva->mva_iot;
418
419 /* Map I/O registers */
420 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
421 mva->mva_size, &sc->sc_ioh)) {
422 aprint_error_dev(self, "can't map registers\n");
423 return;
424 }
425
426 /*
427 * Initialise DMA descriptors and associated metadata
428 */
429 sc->sc_dmat = mva->mva_dmat;
430 n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
431 sc->sc_dd_buffer =
432 kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
433 if (sc->sc_dd_buffer == NULL) {
434 aprint_error_dev(self, "can't allocate memory\n");
435 goto fail1;
436 }
437 /* pattern buffer */
438 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
439 &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
440 aprint_error_dev(self,
441 "bus_dmamem_alloc failed: pattern buffer\n");
442 goto fail2;
443 }
444 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
445 (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
446 aprint_error_dev(self,
447 "bus_dmamem_map failed: pattern buffer\n");
448 goto fail3;
449 }
450 for (i = 0; i < 0x100; i++)
451 for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
452 sc->sc_pbuf[i].pbuf[j] = i;
453
454 if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
455 aprint_error_dev(self, "no dmb_speed property\n");
456 dmb_speed = 10; /* More than fast swdmover perhaps. */
457 }
458
459 /* IDMAC DMA descriptor buffer */
460 sc->sc_gtidmac_nchan = idmac_nchan;
461 if (sc->sc_gtidmac_nchan > 0) {
462 if (gtidmac_buffer_setup(sc) != 0)
463 goto fail4;
464
465 if (mva->mva_model != MARVELL_DISCOVERY)
466 gtidmac_wininit(sc, mva->mva_tags);
467
468 /* Setup interrupt */
469 for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
470 j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
471
472 sc->sc_intrarg[i].ia_sc = sc;
473 sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
474 sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
475 sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
476 marvell_intr_establish(idmac_irq + i, IPL_BIO,
477 gtidmac_intr, &sc->sc_intrarg[i]);
478 }
479
480 /* Register us with dmover. */
481 sc->sc_dmb.dmb_name = device_xname(self);
482 sc->sc_dmb.dmb_speed = dmb_speed;
483 sc->sc_dmb.dmb_cookie = sc;
484 sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
485 sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
486 sc->sc_dmb.dmb_process = gtidmac_process;
487 dmover_backend_register(&sc->sc_dmb);
488 sc->sc_dmb_busy = 0;
489 }
490
491 /* XORE DMA descriptor buffer */
492 sc->sc_mvxore_nchan = xore_nchan;
493 if (sc->sc_mvxore_nchan > 0) {
494 if (mvxore_buffer_setup(sc) != 0)
495 goto fail5;
496
497 /* Setup interrupt */
498 for (i = 0; i < sc->sc_mvxore_nchan; i++)
499 marvell_intr_establish(xore_irq + i, IPL_BIO,
500 (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
501 sc);
502
503 mvxore_wininit(sc, mva->mva_tags);
504
505 /* Register us with dmover. */
506 sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
507 sc->sc_dmb_xore.dmb_speed = dmb_speed;
508 sc->sc_dmb_xore.dmb_cookie = sc;
509 sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
510 sc->sc_dmb_xore.dmb_nalgdescs =
511 __arraycount(mvxore_algdescs);
512 sc->sc_dmb_xore.dmb_process = gtidmac_process;
513 dmover_backend_register(&sc->sc_dmb_xore);
514 }
515
516 gtidmac_softc = sc;
517
518 return;
519
520 fail5:
521 for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
522 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
523 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
524 }
525 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
526 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
527 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
528 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
529 bus_dmamem_free(sc->sc_dmat,
530 sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
531 fail4:
532 bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
533 fail3:
534 bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
535 fail2:
536 kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
537 fail1:
538 bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
539 return;
540 }
541
542
543 static int
544 gtidmac_intr(void *arg)
545 {
546 struct gtidmac_intr_arg *ia = arg;
547 struct gtidmac_softc *sc = ia->ia_sc;
548 uint32_t cause;
549 int handled = 0, chan, error;
550
551 cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
552 DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
553 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
554
555 chan = 0;
556 while (cause) {
557 error = 0;
558 if (cause & GTIDMAC_I_ADDRMISS) {
559 aprint_error_dev(sc->sc_dev, "Address Miss");
560 error = EINVAL;
561 }
562 if (cause & GTIDMAC_I_ACCPROT) {
563 aprint_error_dev(sc->sc_dev,
564 "Access Protect Violation");
565 error = EACCES;
566 }
567 if (cause & GTIDMAC_I_WRPROT) {
568 aprint_error_dev(sc->sc_dev, "Write Protect");
569 error = EACCES;
570 }
571 if (cause & GTIDMAC_I_OWN) {
572 aprint_error_dev(sc->sc_dev, "Ownership Violation");
573 error = EINVAL;
574 }
575
576 #define GTIDMAC_I_ERROR \
577 (GTIDMAC_I_ADDRMISS | \
578 GTIDMAC_I_ACCPROT | \
579 GTIDMAC_I_WRPROT | \
580 GTIDMAC_I_OWN)
581 if (cause & GTIDMAC_I_ERROR) {
582 uint32_t sel;
583 int select;
584
585 sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
586 ia->ia_eselect) & GTIDMAC_ESR_SEL;
587 select = sel - chan * GTIDMAC_I_BITS;
588 if (select >= 0 && select < GTIDMAC_I_BITS) {
589 uint32_t ear;
590
591 ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
592 ia->ia_eaddr);
593 aprint_error(": Error Address 0x%x\n", ear);
594 } else
595 aprint_error(": lost Error Address\n");
596 }
597
598 if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
599 sc->sc_cdesc[chan].chan_dma_done(
600 sc->sc_cdesc[chan].chan_running, chan,
601 &sc->sc_cdesc[chan].chan_in,
602 &sc->sc_cdesc[chan].chan_out, error);
603 handled++;
604 }
605
606 cause >>= GTIDMAC_I_BITS;
607 }
608 DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
609
610 return handled;
611 }
612
613 static int
614 mvxore_port0_intr(void *arg)
615 {
616 struct gtidmac_softc *sc = arg;
617
618 return mvxore_intr(sc, 0);
619 }
620
621 static int
622 mvxore_port1_intr(void *arg)
623 {
624 struct gtidmac_softc *sc = arg;
625
626 return mvxore_intr(sc, 1);
627 }
628
629 static int
630 mvxore_intr(struct gtidmac_softc *sc, int port)
631 {
632 uint32_t cause;
633 int handled = 0, chan, error;
634
635 cause =
636 bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
637 DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
638 printf("XORE port %d intr: cause=0x%x\n", port, cause);
639 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
640 MVXORE_XEICR(sc, port), ~cause);
641
642 chan = 0;
643 while (cause) {
644 error = 0;
645 if (cause & MVXORE_I_ADDRDECODE) {
646 aprint_error_dev(sc->sc_dev, "Failed address decoding");
647 error = EINVAL;
648 }
649 if (cause & MVXORE_I_ACCPROT) {
650 aprint_error_dev(sc->sc_dev,
651 "Access Protect Violation");
652 error = EACCES;
653 }
654 if (cause & MVXORE_I_WRPROT) {
655 aprint_error_dev(sc->sc_dev, "Write Protect");
656 error = EACCES;
657 }
658 if (cause & MVXORE_I_OWN) {
659 aprint_error_dev(sc->sc_dev, "Ownership Violation");
660 error = EINVAL;
661 }
662 if (cause & MVXORE_I_INTPARITY) {
663 aprint_error_dev(sc->sc_dev, "Parity Error");
664 error = EIO;
665 }
666 if (cause & MVXORE_I_XBAR) {
667 aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
668 error = EINVAL;
669 }
670
671 #define MVXORE_I_ERROR \
672 (MVXORE_I_ADDRDECODE | \
673 MVXORE_I_ACCPROT | \
674 MVXORE_I_WRPROT | \
675 MVXORE_I_OWN | \
676 MVXORE_I_INTPARITY | \
677 MVXORE_I_XBAR)
678 if (cause & MVXORE_I_ERROR) {
679 uint32_t type;
680 int event;
681
682 type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
683 MVXORE_XEECR(sc, port));
684 type &= MVXORE_XEECR_ERRORTYPE_MASK;
685 event = type - chan * MVXORE_I_BITS;
686 if (event >= 0 && event < MVXORE_I_BITS) {
687 uint32_t xeear;
688
689 xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
690 MVXORE_XEEAR(sc, port));
691 aprint_error(": Error Address 0x%x\n", xeear);
692 } else
693 aprint_error(": lost Error Address\n");
694 }
695
696 if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
697 sc->sc_cdesc_xore[chan].chan_dma_done(
698 sc->sc_cdesc_xore[chan].chan_running, chan,
699 sc->sc_cdesc_xore[chan].chan_in,
700 &sc->sc_cdesc_xore[chan].chan_out, error);
701 handled++;
702 }
703
704 cause >>= MVXORE_I_BITS;
705 }
706 printf("XORE port %d intr: %shandled\n", port, handled ? "" : "not ");
707 DPRINTF(("XORE port %d intr: %shandled\n",
708 port, handled ? "" : "not "));
709
710 return handled;
711 }
712
713
714 /*
715 * dmover(9) backend function.
716 */
717 static void
718 gtidmac_process(struct dmover_backend *dmb)
719 {
720 struct gtidmac_softc *sc = dmb->dmb_cookie;
721 int s;
722
723 /* If the backend is currently idle, go process the queue. */
724 s = splbio();
725 if (!sc->sc_dmb_busy)
726 gtidmac_dmover_run(dmb);
727 splx(s);
728 }
729
730 static void
731 gtidmac_dmover_run(struct dmover_backend *dmb)
732 {
733 struct gtidmac_softc *sc = dmb->dmb_cookie;
734 struct dmover_request *dreq;
735 const struct dmover_algdesc *algdesc;
736 struct gtidmac_function *df;
737 bus_dmamap_t *dmamap_in, *dmamap_out;
738 int chan, ninputs, error, i;
739
740 sc->sc_dmb_busy = 1;
741
742 for (;;) {
743 dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
744 if (dreq == NULL)
745 break;
746 algdesc = dreq->dreq_assignment->das_algdesc;
747 df = algdesc->dad_data;
748 chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
749 if (chan == -1)
750 return;
751
752 dmover_backend_remque(dmb, dreq);
753 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
754
755 /* XXXUNLOCK */
756
757 error = 0;
758
759 /* Load in/out buffers of dmover to bus_dmamap. */
760 ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
761 if (ninputs == 0) {
762 int pno = 0;
763
764 if (algdesc->dad_name == DMOVER_FUNC_FILL8)
765 pno = dreq->dreq_immediate[0];
766
767 i = 0;
768 error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
769 &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
770 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
771 if (error == 0) {
772 bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
773 sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
774
775 /*
776 * We will call gtidmac_dmmap_unload() when
777 * becoming an error.
778 */
779 i = 1;
780 }
781 } else
782 for (i = 0; i < ninputs; i++) {
783 error = gtidmac_dmmap_load(sc,
784 *(dmamap_in + i), dreq->dreq_inbuf_type,
785 &dreq->dreq_inbuf[i], 0/*write*/);
786 if (error != 0)
787 break;
788 }
789 if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
790 if (error == 0)
791 error = gtidmac_dmmap_load(sc, *dmamap_out,
792 dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
793 1/*read*/);
794
795 if (error == 0) {
796 /*
797 * The size of outbuf is always believed to be
798 * DMA transfer size in dmover request.
799 */
800 error = (*df->dma_setup)(sc, chan, ninputs,
801 dmamap_in, dmamap_out,
802 (*dmamap_out)->dm_mapsize);
803 if (error != 0)
804 gtidmac_dmmap_unload(sc, *dmamap_out,
805 1);
806 }
807 } else
808 if (error == 0)
809 error = (*df->dma_setup)(sc, chan, ninputs,
810 dmamap_in, dmamap_out,
811 (*dmamap_in)->dm_mapsize);
812
813 /* XXXLOCK */
814
815 if (error != 0) {
816 for (; i-- > 0;)
817 gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
818 (*df->chan_free)(sc, chan);
819
820 dreq->dreq_flags |= DMOVER_REQ_ERROR;
821 dreq->dreq_error = error;
822 /* XXXUNLOCK */
823 dmover_done(dreq);
824 /* XXXLOCK */
825 continue;
826 }
827
828 (*df->dma_start)(sc, chan, gtidmac_dmover_done);
829 break;
830 }
831
832 /* All done */
833 sc->sc_dmb_busy = 0;
834 }
835
836 static void
837 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
838 bus_dmamap_t *dmamap_out, int error)
839 {
840 struct gtidmac_softc *sc;
841 struct dmover_request *dreq = object;
842 struct dmover_backend *dmb;
843 struct gtidmac_function *df;
844 uint32_t result;
845 int ninputs, i;
846
847 KASSERT(dreq != NULL);
848
849 dmb = dreq->dreq_assignment->das_backend;
850 df = dreq->dreq_assignment->das_algdesc->dad_data;
851 ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
852 sc = dmb->dmb_cookie;
853
854 result = (*df->dma_finish)(sc, chan, error);
855 for (i = 0; i < ninputs; i++)
856 gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
857 if (dreq->dreq_assignment->das_algdesc->dad_name ==
858 DMOVER_FUNC_ISCSI_CRC32C)
859 memcpy(dreq->dreq_immediate, &result, sizeof(result));
860 else
861 gtidmac_dmmap_unload(sc, *dmamap_out, 1);
862
863 (*df->chan_free)(sc, chan);
864
865 if (error) {
866 dreq->dreq_error = error;
867 dreq->dreq_flags |= DMOVER_REQ_ERROR;
868 }
869
870 dmover_done(dreq);
871
872 /*
873 * See if we can start some more dmover(9) requests.
874 *
875 * Note: We're already at splbio() here.
876 */
877 if (!sc->sc_dmb_busy)
878 gtidmac_dmover_run(dmb);
879 }
880
881 static __inline int
882 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
883 dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
884 int read)
885 {
886 int error, flags;
887
888 flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
889 read ? BUS_DMA_READ : BUS_DMA_WRITE;
890
891 switch (dmbuf_type) {
892 case DMOVER_BUF_LINEAR:
893 error = bus_dmamap_load(sc->sc_dmat, dmamap,
894 dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
895 NULL, flags);
896 break;
897
898 case DMOVER_BUF_UIO:
899 if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
900 (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
901 return (EINVAL);
902
903 error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
904 dmbuf->dmbuf_uio, flags);
905 break;
906
907 default:
908 error = EINVAL;
909 }
910
911 if (error == 0)
912 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
913 read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
914
915 return error;
916 }
917
918 static __inline void
919 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
920 {
921
922 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
923 read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
924
925 bus_dmamap_unload(sc->sc_dmat, dmamap);
926 }
927
928
929 /*
930 * IDMAC functions
931 */
932 int
933 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
934 bus_dmamap_t **dmamap_out, void *object)
935 {
936 struct gtidmac_softc *sc = tag;
937 int chan;
938
939 /* maybe need lock */
940
941 for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
942 if (sc->sc_cdesc[chan].chan_running == NULL)
943 break;
944 if (chan >= sc->sc_gtidmac_nchan)
945 return -1;
946
947
948 sc->sc_cdesc[chan].chan_running = object;
949
950 /* unlock */
951
952 *dmamap_in = &sc->sc_cdesc[chan].chan_in;
953 *dmamap_out = &sc->sc_cdesc[chan].chan_out;
954
955 return chan;
956 }
957
958 void
959 gtidmac_chan_free(void *tag, int chan)
960 {
961 struct gtidmac_softc *sc = tag;
962
963 /* maybe need lock */
964
965 sc->sc_cdesc[chan].chan_running = NULL;
966
967 /* unlock */
968 }
969
970 /* ARGSUSED */
971 int
972 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
973 bus_dmamap_t *dmamap_out, bus_size_t size)
974 {
975 struct gtidmac_softc *sc = tag;
976 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
977 struct gtidmac_desc *desc;
978 uint32_t ccl, bcnt, ires, ores;
979 int n = 0, iidx, oidx;
980
981 KASSERT(ninputs == 0 || ninputs == 1);
982
983 ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
984 #ifdef DIAGNOSTIC
985 if (ccl & GTIDMAC_CCLR_CHANACT)
986 panic("gtidmac_setup: chan%d already active", chan);
987 #endif
988
989 /* We always Chain-mode and max (16M - 1)byte/desc */
990 ccl = (GTIDMAC_CCLR_DESCMODE_16M |
991 #ifdef GTIDMAC_DEBUG
992 GTIDMAC_CCLR_CDEN |
993 #endif
994 GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */ |
995 GTIDMAC_CCLR_INTMODE_NULL /* Intr Mode: Next Desc NULL */ |
996 GTIDMAC_CCLR_CHAINMODE_C /* Chain Mode: Chaind */);
997 if (size != (*dmamap_in)->dm_mapsize) {
998 ccl |= GTIDMAC_CCLR_SRCHOLD;
999 if ((*dmamap_in)->dm_mapsize == 8)
1000 ccl |= GTIDMAC_CCLR_SBL_8B;
1001 else if ((*dmamap_in)->dm_mapsize == 16)
1002 ccl |= GTIDMAC_CCLR_SBL_16B;
1003 else if ((*dmamap_in)->dm_mapsize == 32)
1004 ccl |= GTIDMAC_CCLR_SBL_32B;
1005 else if ((*dmamap_in)->dm_mapsize == 64)
1006 ccl |= GTIDMAC_CCLR_SBL_64B;
1007 else if ((*dmamap_in)->dm_mapsize == 128)
1008 ccl |= GTIDMAC_CCLR_SBL_128B;
1009 else
1010 panic("gtidmac_setup: chan%d source:"
1011 " unsupport hold size", chan);
1012 } else
1013 ccl |= GTIDMAC_CCLR_SBL_128B;
1014 if (size != (*dmamap_out)->dm_mapsize) {
1015 ccl |= GTIDMAC_CCLR_DESTHOLD;
1016 if ((*dmamap_out)->dm_mapsize == 8)
1017 ccl |= GTIDMAC_CCLR_DBL_8B;
1018 else if ((*dmamap_out)->dm_mapsize == 16)
1019 ccl |= GTIDMAC_CCLR_DBL_16B;
1020 else if ((*dmamap_out)->dm_mapsize == 32)
1021 ccl |= GTIDMAC_CCLR_DBL_32B;
1022 else if ((*dmamap_out)->dm_mapsize == 64)
1023 ccl |= GTIDMAC_CCLR_DBL_64B;
1024 else if ((*dmamap_out)->dm_mapsize == 128)
1025 ccl |= GTIDMAC_CCLR_DBL_128B;
1026 else
1027 panic("gtidmac_setup: chan%d destination:"
1028 " unsupport hold size", chan);
1029 } else
1030 ccl |= GTIDMAC_CCLR_DBL_128B;
1031
1032 fstdd = SLIST_FIRST(&sc->sc_dlist);
1033 if (fstdd == NULL) {
1034 aprint_error_dev(sc->sc_dev, "no descriptor\n");
1035 return ENOMEM;
1036 }
1037 SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1038 sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1039
1040 dd = fstdd;
1041 ires = ores = 0;
1042 iidx = oidx = 0;
1043 while (1 /*CONSTCOND*/) {
1044 if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1045 if (ccl & GTIDMAC_CCLR_DESTHOLD)
1046 bcnt = size; /* src/dst hold */
1047 else
1048 bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1049 } else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1050 bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1051 else
1052 bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1053 (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1054
1055 desc = dd->dd_idmac_vaddr;
1056 desc->bc.mode16m.bcnt =
1057 bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1058 desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1059 desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1060
1061 n += bcnt;
1062 if (n >= size)
1063 break;
1064 if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1065 ires += bcnt;
1066 if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1067 ires = 0;
1068 iidx++;
1069 KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1070 }
1071 }
1072 if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1073 ores += bcnt;
1074 if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1075 ores = 0;
1076 oidx++;
1077 KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1078 }
1079 }
1080
1081 nxtdd = SLIST_FIRST(&sc->sc_dlist);
1082 if (nxtdd == NULL) {
1083 aprint_error_dev(sc->sc_dev, "no descriptor\n");
1084 return ENOMEM;
1085 }
1086 SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1087
1088 desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1089 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1090 dd->dd_index * sizeof(*desc), sizeof(*desc),
1091 #ifdef GTIDMAC_DEBUG
1092 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1093 #else
1094 BUS_DMASYNC_PREWRITE);
1095 #endif
1096
1097 SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1098 dd = nxtdd;
1099 }
1100 desc->nextdp = (uint32_t)NULL;
1101 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1102 #ifdef GTIDMAC_DEBUG
1103 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1104 #else
1105 sizeof(*desc), BUS_DMASYNC_PREWRITE);
1106 #endif
1107
1108 /* Set paddr of descriptor to Channel Next Descriptor Pointer */
1109 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1110 fstdd->dd_paddr);
1111
1112 #if BYTE_ORDER == LITTLE_ENDIAN
1113 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1114 GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1115 #else
1116 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1117 GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1118 #endif
1119 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1120
1121 #ifdef GTIDMAC_DEBUG
1122 gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1123 #endif
1124
1125 sc->sc_cdesc[chan].chan_totalcnt += size;
1126
1127 return 0;
1128 }
1129
1130 void
1131 gtidmac_start(void *tag, int chan,
1132 void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1133 int))
1134 {
1135 struct gtidmac_softc *sc = tag;
1136 uint32_t ccl;
1137
1138 DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1139
1140 #ifdef GTIDMAC_DEBUG
1141 gtidmac_dump_idmacreg(sc, chan);
1142 #endif
1143
1144 sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1145
1146 ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1147 /* Start and 'Fetch Next Descriptor' */
1148 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1149 ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1150 }
1151
1152 static uint32_t
1153 gtidmac_finish(void *tag, int chan, int error)
1154 {
1155 struct gtidmac_softc *sc = tag;
1156 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1157 struct gtidmac_desc *desc;
1158
1159 fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1160
1161 #ifdef GTIDMAC_DEBUG
1162 if (error || gtidmac_debug > 1) {
1163 uint32_t ccl;
1164
1165 gtidmac_dump_idmacreg(sc, chan);
1166 ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1167 GTIDMAC_CCLR(chan));
1168 gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1169 }
1170 #endif
1171
1172 dd = fstdd;
1173 do {
1174 desc = dd->dd_idmac_vaddr;
1175
1176 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1177 dd->dd_index * sizeof(*desc), sizeof(*desc),
1178 #ifdef GTIDMAC_DEBUG
1179 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1180 #else
1181 BUS_DMASYNC_POSTWRITE);
1182 #endif
1183
1184 nxtdd = SLIST_NEXT(dd, dd_next);
1185 SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1186 dd = nxtdd;
1187 } while (desc->nextdp);
1188
1189 return 0;
1190 }
1191
1192 /*
1193 * XORE functions
1194 */
1195 int
1196 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1197 bus_dmamap_t **dmamap_out, void *object)
1198 {
1199 struct gtidmac_softc *sc = tag;
1200 int chan;
1201
1202 /* maybe need lock */
1203
1204 for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1205 if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1206 break;
1207 if (chan >= sc->sc_mvxore_nchan)
1208 return -1;
1209
1210
1211 sc->sc_cdesc_xore[chan].chan_running = object;
1212
1213 /* unlock */
1214
1215 *dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1216 *dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1217
1218 return chan;
1219 }
1220
1221 void
1222 mvxore_chan_free(void *tag, int chan)
1223 {
1224 struct gtidmac_softc *sc = tag;
1225
1226 /* maybe need lock */
1227
1228 sc->sc_cdesc_xore[chan].chan_running = NULL;
1229
1230 /* unlock */
1231 }
1232
1233 /* ARGSUSED */
1234 int
1235 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1236 bus_dmamap_t *dmamap_out, bus_size_t size)
1237 {
1238 struct gtidmac_softc *sc = tag;
1239 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1240 struct mvxore_desc *desc;
1241 uint32_t xexc, bcnt, cmd, lastcmd;
1242 int n = 0, i;
1243 uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1244 int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1245
1246 #ifdef DIAGNOSTIC
1247 uint32_t xexact;
1248
1249 xexact =
1250 bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1251 if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1252 MVXORE_XEXACTR_XESTATUS_ACT)
1253 panic("mvxore_setup: chan%d already active."
1254 " mvxore not support hot insertion", chan);
1255 #endif
1256
1257 xexc =
1258 (MVXORE_XEXCR_REGACCPROTECT |
1259 MVXORE_XEXCR_DBL_128B |
1260 MVXORE_XEXCR_SBL_128B);
1261 cmd = lastcmd = 0;
1262 if (ninputs > 1) {
1263 xexc |= MVXORE_XEXCR_OM_XOR;
1264 lastcmd = cmd = (1 << ninputs) - 1;
1265 } else if (ninputs == 1) {
1266 if ((*dmamap_out)->dm_nsegs == 0) {
1267 xexc |= MVXORE_XEXCR_OM_CRC32;
1268 lastcmd = MVXORE_DESC_CMD_CRCLAST;
1269 } else
1270 xexc |= MVXORE_XEXCR_OM_DMA;
1271 } else if (ninputs == 0) {
1272 if ((*dmamap_out)->dm_nsegs != 1) {
1273 aprint_error_dev(sc->sc_dev,
1274 "XORE not supports %d DMA segments\n",
1275 (*dmamap_out)->dm_nsegs);
1276 return EINVAL;
1277 }
1278
1279 if ((*dmamap_in)->dm_mapsize == 0) {
1280 xexc |= MVXORE_XEXCR_OM_ECC;
1281
1282 /* XXXXX: Maybe need to set Timer Mode registers? */
1283
1284 #if 0
1285 } else if ((*dmamap_in)->dm_mapsize == 8 ||
1286 (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1287 uint64_t pattern;
1288
1289 /* XXXX: Get pattern data */
1290
1291 KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1292 (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1293 ~PAGE_MASK) == sc->sc_pbuf);
1294 pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1295
1296 /* XXXXX: XORE has a IVR. We should get this first. */
1297 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1298 pattern);
1299 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1300 pattern >> 32);
1301
1302 xexc |= MVXORE_XEXCR_OM_MEMINIT;
1303 #endif
1304 } else {
1305 aprint_error_dev(sc->sc_dev,
1306 "XORE not supports DMA mapsize %zd\n",
1307 (*dmamap_in)->dm_mapsize);
1308 return EINVAL;
1309 }
1310 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1311 MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
1312 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1313 MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
1314
1315 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1316 MVXORE_XEXCR(sc, chan), xexc);
1317 sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1318
1319 return 0;
1320 }
1321
1322 /* Make descriptor for DMA/CRC32/XOR */
1323
1324 fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1325 if (fstdd == NULL) {
1326 aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1327 return ENOMEM;
1328 }
1329 SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1330 sc->sc_cdesc_xore[chan].chan_ddidx =
1331 fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1332
1333 dd = fstdd;
1334 while (1 /*CONSTCOND*/) {
1335 desc = dd->dd_xore_vaddr;
1336 desc->stat = MVXORE_DESC_STAT_OWN;
1337 desc->cmd = cmd;
1338 if ((*dmamap_out)->dm_nsegs != 0) {
1339 desc->dstaddr =
1340 (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1341 bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1342 } else {
1343 desc->dstaddr = 0;
1344 bcnt = MVXORE_MAXXFER; /* XXXXX */
1345 }
1346 for (i = 0; i < ninputs; i++) {
1347 desc->srcaddr[i] =
1348 (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1349 bcnt = min(bcnt,
1350 (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1351 }
1352 desc->bcnt = bcnt;
1353
1354 n += bcnt;
1355 if (n >= size)
1356 break;
1357 ores += bcnt;
1358 if ((*dmamap_out)->dm_nsegs != 0 &&
1359 ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1360 ores = 0;
1361 oidx++;
1362 KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1363 }
1364 for (i = 0; i < ninputs; i++) {
1365 ires[i] += bcnt;
1366 if (ires[i] >=
1367 (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1368 ires[i] = 0;
1369 iidx[i]++;
1370 KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1371 }
1372 }
1373
1374 nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1375 if (nxtdd == NULL) {
1376 aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1377 return ENOMEM;
1378 }
1379 SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1380
1381 desc->nextda = (uint32_t)nxtdd->dd_paddr;
1382 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1383 dd->dd_index * sizeof(*desc), sizeof(*desc),
1384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1385
1386 SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1387 dd = nxtdd;
1388 }
1389 desc->cmd = lastcmd;
1390 desc->nextda = (uint32_t)NULL;
1391 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1392 dd->dd_index * sizeof(*desc), sizeof(*desc),
1393 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1394
1395 /* Set paddr of descriptor to Channel Next Descriptor Pointer */
1396 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
1397 fstdd->dd_paddr);
1398
1399 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
1400
1401 #ifdef GTIDMAC_DEBUG
1402 gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1403 #endif
1404
1405 sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1406
1407 return 0;
1408 }
1409
1410 void
1411 mvxore_start(void *tag, int chan,
1412 void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1413 int))
1414 {
1415 struct gtidmac_softc *sc = tag;
1416 uint32_t xexact;
1417
1418 DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1419
1420 #ifdef GTIDMAC_DEBUG
1421 gtidmac_dump_xorereg(sc, chan);
1422 #endif
1423
1424 sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1425
1426 xexact =
1427 bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1428 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
1429 xexact | MVXORE_XEXACTR_XESTART);
1430 }
1431
1432 static uint32_t
1433 mvxore_finish(void *tag, int chan, int error)
1434 {
1435 struct gtidmac_softc *sc = tag;
1436 struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1437 struct mvxore_desc *desc;
1438 uint32_t xexc;
1439
1440 #ifdef GTIDMAC_DEBUG
1441 if (error || gtidmac_debug > 1)
1442 gtidmac_dump_xorereg(sc, chan);
1443 #endif
1444
1445 xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1446 if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1447 (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1448 return 0;
1449
1450 fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1451
1452 #ifdef GTIDMAC_DEBUG
1453 if (error || gtidmac_debug > 1)
1454 gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1455 #endif
1456
1457 dd = fstdd;
1458 do {
1459 desc = dd->dd_xore_vaddr;
1460
1461 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1462 dd->dd_index * sizeof(*desc), sizeof(*desc),
1463 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1464
1465 nxtdd = SLIST_NEXT(dd, dd_next);
1466 SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1467 dd = nxtdd;
1468 } while (desc->nextda);
1469
1470 if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1471 return desc->result;
1472 return 0;
1473 }
1474
1475 static void
1476 gtidmac_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1477 {
1478 device_t pdev = device_parent(sc->sc_dev);
1479 uint64_t base;
1480 uint32_t size, cxap, en, winacc;
1481 int window, target, attr, rv, i, j;
1482
1483 en = 0xff;
1484 cxap = 0;
1485 for (window = 0, i = 0;
1486 tags[i] != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW; i++) {
1487 rv = marvell_winparams_by_tag(pdev, tags[i],
1488 &target, &attr, &base, &size);
1489 if (rv != 0 || size == 0)
1490 continue;
1491
1492 if (base > 0xffffffffULL) {
1493 if (window >= GTIDMAC_NREMAP) {
1494 aprint_error_dev(sc->sc_dev,
1495 "can't remap window %d\n", window);
1496 continue;
1497 }
1498 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1499 GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1500 }
1501 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1502 GTIDMAC_BARX_TARGET(target) |
1503 GTIDMAC_BARX_ATTR(attr) |
1504 GTIDMAC_BARX_BASE(base));
1505 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1506 GTIDMAC_SRX_SIZE(size));
1507 en &= ~GTIDMAC_BAER_EN(window);
1508
1509 winacc = GTIDMAC_CXAPR_WINACC_FA;
1510 if (gtidmac_winacctbl != NULL)
1511 for (j = 0;
1512 gtidmac_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1513 j++) {
1514 if (gtidmac_winacctbl[j].tag != tags[i])
1515 continue;
1516
1517 switch (gtidmac_winacctbl[j].winacc) {
1518 case GTIDMAC_WINACC_NOACCESSALLOWED:
1519 winacc = GTIDMAC_CXAPR_WINACC_NOAA;
1520 break;
1521 case GTIDMAC_WINACC_READONLY:
1522 winacc = GTIDMAC_CXAPR_WINACC_RO;
1523 break;
1524 case GTIDMAC_WINACC_FULLACCESS:
1525 default: /* XXXX: default is full access */
1526 break;
1527 }
1528 break;
1529 }
1530 cxap |= GTIDMAC_CXAPR_WINACC(window, winacc);
1531
1532 window++;
1533 }
1534 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1535
1536 for (i = 0; i < GTIDMAC_NACCPROT; i++)
1537 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1538 cxap);
1539 }
1540
1541 static void
1542 mvxore_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1543 {
1544 device_t pdev = device_parent(sc->sc_dev);
1545 uint64_t base;
1546 uint32_t target, attr, size, xexwc, winacc;
1547 int window, rv, i, j, p;
1548
1549 xexwc = 0;
1550 for (window = 0, i = 0;
1551 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW; i++) {
1552 rv = marvell_winparams_by_tag(pdev, tags[i],
1553 &target, &attr, &base, &size);
1554 if (rv != 0 || size == 0)
1555 continue;
1556
1557 if (base > 0xffffffffULL) {
1558 if (window >= MVXORE_NREMAP) {
1559 aprint_error_dev(sc->sc_dev,
1560 "can't remap window %d\n", window);
1561 continue;
1562 }
1563 for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
1564 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1565 MVXORE_XEHARRX(sc, p, window),
1566 (base >> 32) & 0xffffffff);
1567 }
1568
1569 for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
1570 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1571 MVXORE_XEBARX(sc, p, window),
1572 MVXORE_XEBARX_TARGET(target) |
1573 MVXORE_XEBARX_ATTR(attr) |
1574 MVXORE_XEBARX_BASE(base));
1575 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1576 MVXORE_XESMRX(sc, p, window),
1577 MVXORE_XESMRX_SIZE(size));
1578 }
1579
1580 winacc = MVXORE_XEXWCR_WINACC_FA;
1581 if (mvxore_winacctbl != NULL)
1582 for (j = 0;
1583 mvxore_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1584 j++) {
1585 if (gtidmac_winacctbl[j].tag != tags[i])
1586 continue;
1587
1588 switch (gtidmac_winacctbl[j].winacc) {
1589 case GTIDMAC_WINACC_NOACCESSALLOWED:
1590 winacc = MVXORE_XEXWCR_WINACC_NOAA;
1591 break;
1592 case GTIDMAC_WINACC_READONLY:
1593 winacc = MVXORE_XEXWCR_WINACC_RO;
1594 break;
1595 case GTIDMAC_WINACC_FULLACCESS:
1596 default: /* XXXX: default is full access */
1597 break;
1598 }
1599 break;
1600 }
1601 xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1602 MVXORE_XEXWCR_WINACC(window, winacc));
1603 window++;
1604 }
1605
1606 for (i = 0; i < sc->sc_mvxore_nchan; i++) {
1607 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
1608 xexwc);
1609
1610 /* XXXXX: reset... */
1611 bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
1612 0);
1613 }
1614 }
1615
1616 static int
1617 gtidmac_buffer_setup(struct gtidmac_softc *sc)
1618 {
1619 bus_dma_segment_t segs;
1620 struct gtidmac_dma_desc *dd;
1621 uint32_t mask;
1622 int nchan, nsegs, i;
1623
1624 nchan = sc->sc_gtidmac_nchan;
1625
1626 if (bus_dmamem_alloc(sc->sc_dmat,
1627 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1628 PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1629 aprint_error_dev(sc->sc_dev,
1630 "bus_dmamem_alloc failed: descriptor buffer\n");
1631 goto fail0;
1632 }
1633 if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1634 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1635 (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
1636 aprint_error_dev(sc->sc_dev,
1637 "bus_dmamem_map failed: descriptor buffer\n");
1638 goto fail1;
1639 }
1640 if (bus_dmamap_create(sc->sc_dmat,
1641 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
1642 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
1643 BUS_DMA_NOWAIT, &sc->sc_dmap)) {
1644 aprint_error_dev(sc->sc_dev,
1645 "bus_dmamap_create failed: descriptor buffer\n");
1646 goto fail2;
1647 }
1648 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
1649 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1650 NULL, BUS_DMA_NOWAIT)) {
1651 aprint_error_dev(sc->sc_dev,
1652 "bus_dmamap_load failed: descriptor buffer\n");
1653 goto fail3;
1654 }
1655 SLIST_INIT(&sc->sc_dlist);
1656 for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
1657 dd = &sc->sc_dd_buffer[i];
1658 dd->dd_index = i;
1659 dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
1660 dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
1661 (sizeof(struct gtidmac_desc) * i);
1662 SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1663 }
1664
1665 /* Initialize IDMAC DMA channels */
1666 mask = 0;
1667 for (i = 0; i < nchan; i++) {
1668 if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
1669 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1670 GTIDMAC_IMR(i - 1), mask);
1671 mask = 0;
1672 }
1673
1674 if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1675 GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1676 &sc->sc_cdesc[i].chan_in)) {
1677 aprint_error_dev(sc->sc_dev,
1678 "bus_dmamap_create failed: chan%d in\n", i);
1679 goto fail4;
1680 }
1681 if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1682 GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1683 &sc->sc_cdesc[i].chan_out)) {
1684 aprint_error_dev(sc->sc_dev,
1685 "bus_dmamap_create failed: chan%d out\n", i);
1686 bus_dmamap_destroy(sc->sc_dmat,
1687 sc->sc_cdesc[i].chan_in);
1688 goto fail4;
1689 }
1690 sc->sc_cdesc[i].chan_totalcnt = 0;
1691 sc->sc_cdesc[i].chan_running = NULL;
1692
1693 /* Ignore bits overflow. The mask is 32bit. */
1694 mask |= GTIDMAC_I(i,
1695 GTIDMAC_I_COMP |
1696 GTIDMAC_I_ADDRMISS |
1697 GTIDMAC_I_ACCPROT |
1698 GTIDMAC_I_WRPROT |
1699 GTIDMAC_I_OWN);
1700
1701 /* 8bits/channel * 4channels => 32bit */
1702 if ((i & 0x3) == 0x3) {
1703 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1704 GTIDMAC_IMR(i), mask);
1705 mask = 0;
1706 }
1707 }
1708
1709 return 0;
1710
1711 fail4:
1712 for (; i-- > 0;) {
1713 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
1714 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
1715 }
1716 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1717 fail3:
1718 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
1719 fail2:
1720 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
1721 sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
1722 fail1:
1723 bus_dmamem_free(sc->sc_dmat, &segs, 1);
1724 fail0:
1725 return -1;
1726 }
1727
1728 static int
1729 mvxore_buffer_setup(struct gtidmac_softc *sc)
1730 {
1731 bus_dma_segment_t segs;
1732 struct gtidmac_dma_desc *dd;
1733 uint32_t mask;
1734 int nchan, nsegs, i, j;
1735
1736 nchan = sc->sc_mvxore_nchan;
1737
1738 if (bus_dmamem_alloc(sc->sc_dmat,
1739 sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1740 PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1741 aprint_error_dev(sc->sc_dev,
1742 "bus_dmamem_alloc failed: xore descriptor buffer\n");
1743 goto fail0;
1744 }
1745 if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1746 sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1747 (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
1748 aprint_error_dev(sc->sc_dev,
1749 "bus_dmamem_map failed: xore descriptor buffer\n");
1750 goto fail1;
1751 }
1752 if (bus_dmamap_create(sc->sc_dmat,
1753 sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
1754 sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
1755 BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
1756 aprint_error_dev(sc->sc_dev,
1757 "bus_dmamap_create failed: xore descriptor buffer\n");
1758 goto fail2;
1759 }
1760 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
1761 sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1762 NULL, BUS_DMA_NOWAIT)) {
1763 aprint_error_dev(sc->sc_dev,
1764 "bus_dmamap_load failed: xore descriptor buffer\n");
1765 goto fail3;
1766 }
1767 SLIST_INIT(&sc->sc_dlist_xore);
1768 for (i = 0; i < MVXORE_NDESC * nchan; i++) {
1769 dd =
1770 &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
1771 dd->dd_index = i;
1772 dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
1773 dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
1774 (sizeof(struct mvxore_desc) * i);
1775 SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1776 }
1777
1778 /* Initialize XORE DMA channels */
1779 mask = 0;
1780 for (i = 0; i < nchan; i++) {
1781 for (j = 0; j < MVXORE_NSRC; j++) {
1782 if (bus_dmamap_create(sc->sc_dmat,
1783 MVXORE_MAXXFER, MVXORE_NSEGS,
1784 MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
1785 &sc->sc_cdesc_xore[i].chan_in[j])) {
1786 aprint_error_dev(sc->sc_dev,
1787 "bus_dmamap_create failed:"
1788 " xore chan%d in[%d]\n", i, j);
1789 goto fail4;
1790 }
1791 }
1792 if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
1793 MVXORE_NSEGS, MVXORE_MAXXFER, 0,
1794 BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
1795 aprint_error_dev(sc->sc_dev,
1796 "bus_dmamap_create failed: chan%d out\n", i);
1797 goto fail5;
1798 }
1799 sc->sc_cdesc_xore[i].chan_totalcnt = 0;
1800 sc->sc_cdesc_xore[i].chan_running = NULL;
1801
1802 mask |= MVXORE_I(i,
1803 MVXORE_I_EOC |
1804 MVXORE_I_ADDRDECODE |
1805 MVXORE_I_ACCPROT |
1806 MVXORE_I_WRPROT |
1807 MVXORE_I_OWN |
1808 MVXORE_I_INTPARITY |
1809 MVXORE_I_XBAR);
1810
1811 /* 16bits/channel * 2channels => 32bit */
1812 if (i & 0x1) {
1813 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1814 MVXORE_XEIMR(sc, i >> 1), mask);
1815 mask = 0;
1816 }
1817 }
1818
1819 return 0;
1820
1821 for (; i-- > 0;) {
1822 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
1823
1824 fail5:
1825 j = MVXORE_NSRC;
1826 fail4:
1827 for (; j-- > 0;)
1828 bus_dmamap_destroy(sc->sc_dmat,
1829 sc->sc_cdesc_xore[i].chan_in[j]);
1830 }
1831 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
1832 fail3:
1833 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
1834 fail2:
1835 bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
1836 sizeof(struct mvxore_desc) * MVXORE_NDESC);
1837 fail1:
1838 bus_dmamem_free(sc->sc_dmat, &segs, 1);
1839 fail0:
1840 return -1;
1841 }
1842
1843 #ifdef GTIDMAC_DEBUG
1844 static void
1845 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1846 {
1847 uint32_t val;
1848 char buf[256];
1849
1850 printf("IDMAC Registers\n");
1851
1852 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1853 snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1854 printf(" Byte Count : %s\n", buf);
1855 printf(" ByteCnt : 0x%06x\n",
1856 val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1857 printf(" Source Address : 0x%08x\n",
1858 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1859 printf(" Destination Address : 0x%08x\n",
1860 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1861 printf(" Next Descriptor Pointer : 0x%08x\n",
1862 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1863 printf(" Current Descriptor Pointer : 0x%08x\n",
1864 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1865
1866 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1867 snprintb(buf, sizeof(buf),
1868 "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1869 "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1870 val);
1871 printf(" Channel Control (Low) : %s\n", buf);
1872 printf(" SrcBurstLimit : %s Bytes\n",
1873 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1874 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1875 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1876 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1877 (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1878 "unknwon");
1879 printf(" DstBurstLimit : %s Bytes\n",
1880 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1881 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1882 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1883 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1884 (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1885 "unknwon");
1886 printf(" ChainMode : %sChained\n",
1887 val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1888 printf(" TransferMode : %s\n",
1889 val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1890 printf(" DescMode : %s\n",
1891 val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1892 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1893 snprintb(buf, sizeof(buf),
1894 "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1895 printf(" Channel Control (High) : %s\n", buf);
1896 }
1897
1898 static void
1899 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1900 uint32_t mode, int post)
1901 {
1902 struct gtidmac_desc *desc;
1903 int i;
1904 char buf[256];
1905
1906 printf("IDMAC Descriptor\n");
1907
1908 i = 0;
1909 while (1 /*CONSTCOND*/) {
1910 if (post)
1911 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1912 dd->dd_index * sizeof(*desc), sizeof(*desc),
1913 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1914
1915 desc = dd->dd_idmac_vaddr;
1916
1917 printf("%d (0x%lx)\n", i, dd->dd_paddr);
1918 if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1919 snprintb(buf, sizeof(buf),
1920 "\177\020b\037Own\0b\036BCLeft\0",
1921 desc->bc.mode16m.bcnt);
1922 printf(" Byte Count : %s\n", buf);
1923 printf(" ByteCount : 0x%06x\n",
1924 desc->bc.mode16m.bcnt &
1925 GTIDMAC_CIDMABCR_BYTECNT_MASK);
1926 } else {
1927 printf(" Byte Count : 0x%04x\n",
1928 desc->bc.mode64k.bcnt);
1929 printf(" Remind Byte Count : 0x%04x\n",
1930 desc->bc.mode64k.rbc);
1931 }
1932 printf(" Source Address : 0x%08x\n", desc->srcaddr);
1933 printf(" Destination Address : 0x%08x\n", desc->dstaddr);
1934 printf(" Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1935
1936 if (desc->nextdp == (uint32_t)NULL)
1937 break;
1938
1939 if (!post)
1940 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1941 dd->dd_index * sizeof(*desc), sizeof(*desc),
1942 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1943
1944 i++;
1945 dd = SLIST_NEXT(dd, dd_next);
1946 }
1947 if (!post)
1948 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1949 dd->dd_index * sizeof(*desc), sizeof(*desc),
1950 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1951 }
1952
1953 static void
1954 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1955 {
1956 uint32_t val, opmode;
1957 char buf[64];
1958
1959 printf("XORE Registers\n");
1960
1961 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1962 snprintb(buf, sizeof(buf),
1963 "\177\020"
1964 "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1965 val);
1966 printf(" Configuration : 0x%s\n", buf);
1967 opmode = val & MVXORE_XEXCR_OM_MASK;
1968 printf(" OperationMode : %s operation\n",
1969 opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1970 opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1971 opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1972 opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1973 opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1974 "unknown");
1975 printf(" SrcBurstLimit : %s Bytes\n",
1976 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1977 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1978 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1979 "unknwon");
1980 printf(" DstBurstLimit : %s Bytes\n",
1981 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1982 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1983 (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1984 "unknwon");
1985 val =
1986 bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1987 printf(" Activation : 0x%08x\n", val);
1988 val &= MVXORE_XEXACTR_XESTATUS_MASK;
1989 printf(" XEstatus : %s\n",
1990 val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
1991 val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
1992 val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
1993
1994 if (opmode == MVXORE_XEXCR_OM_XOR ||
1995 opmode == MVXORE_XEXCR_OM_CRC32 ||
1996 opmode == MVXORE_XEXCR_OM_DMA) {
1997 printf(" NextDescPtr : 0x%08x\n",
1998 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1999 MVXORE_XEXNDPR(sc, chan)));
2000 printf(" CurrentDescPtr : 0x%08x\n",
2001 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2002 MVXORE_XEXCDPR(chan)));
2003 }
2004 printf(" ByteCnt : 0x%08x\n",
2005 bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
2006
2007 if (opmode == MVXORE_XEXCR_OM_ECC ||
2008 opmode == MVXORE_XEXCR_OM_MEMINIT) {
2009 printf(" DstPtr : 0x%08x\n",
2010 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2011 MVXORE_XEXDPR(sc, chan)));
2012 printf(" BlockSize : 0x%08x\n",
2013 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2014 MVXORE_XEXBSR(sc, chan)));
2015
2016 if (opmode == MVXORE_XEXCR_OM_ECC) {
2017 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2018 MVXORE_XETMCR);
2019 if (val & MVXORE_XETMCR_TIMEREN) {
2020 val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
2021 val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
2022 printf(" SectionSizeCtrl : 0x%08x\n", 2 ^ val);
2023 printf(" TimerInitVal : 0x%08x\n",
2024 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2025 MVXORE_XETMIVR));
2026 printf(" TimerCrntVal : 0x%08x\n",
2027 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2028 MVXORE_XETMCVR));
2029 }
2030 } else /* MVXORE_XEXCR_OM_MEMINIT */
2031 printf(" InitVal : 0x%08x%08x\n",
2032 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2033 MVXORE_XEIVRH),
2034 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2035 MVXORE_XEIVRL));
2036 }
2037 }
2038
2039 static void
2040 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
2041 uint32_t mode, int post)
2042 {
2043 struct mvxore_desc *desc;
2044 int i, j;
2045 char buf[256];
2046
2047 printf("XORE Descriptor\n");
2048
2049 mode &= MVXORE_XEXCR_OM_MASK;
2050
2051 i = 0;
2052 while (1 /*CONSTCOND*/) {
2053 if (post)
2054 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2055 dd->dd_index * sizeof(*desc), sizeof(*desc),
2056 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2057
2058 desc = dd->dd_xore_vaddr;
2059
2060 printf("%d (0x%lx)\n", i, dd->dd_paddr);
2061
2062 snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
2063 desc->stat);
2064 printf(" Status : 0x%s\n", buf);
2065 if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
2066 printf(" CRC-32 Result : 0x%08x\n",
2067 desc->result);
2068 snprintb(buf, sizeof(buf),
2069 "\177\020b\037EODIntEn\0b\036CRCLast\0"
2070 "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
2071 "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
2072 desc->cmd);
2073 printf(" Command : 0x%s\n", buf);
2074 printf(" Next Descriptor Address : 0x%08x\n", desc->nextda);
2075 printf(" Byte Count : 0x%06x\n", desc->bcnt);
2076 printf(" Destination Address : 0x%08x\n", desc->dstaddr);
2077 if (mode == MVXORE_XEXCR_OM_XOR) {
2078 for (j = 0; j < MVXORE_NSRC; j++)
2079 if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
2080 printf(" Source Address#%d :"
2081 " 0x%08x\n", j, desc->srcaddr[j]);
2082 } else
2083 printf(" Source Address : 0x%08x\n",
2084 desc->srcaddr[0]);
2085
2086 if (desc->nextda == (uint32_t)NULL)
2087 break;
2088
2089 if (!post)
2090 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2091 dd->dd_index * sizeof(*desc), sizeof(*desc),
2092 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2093
2094 i++;
2095 dd = SLIST_NEXT(dd, dd_next);
2096 }
2097 if (!post)
2098 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2099 dd->dd_index * sizeof(*desc), sizeof(*desc),
2100 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2101 }
2102 #endif
2103