gtidmac.c revision 1.13 1 1.13 kiyohara /* $NetBSD: gtidmac.c,v 1.13 2017/01/07 14:44:26 kiyohara Exp $ */
2 1.1 kiyohara /*
3 1.13 kiyohara * Copyright (c) 2008, 2012, 2016 KIYOHARA Takashi
4 1.1 kiyohara * All rights reserved.
5 1.1 kiyohara *
6 1.1 kiyohara * Redistribution and use in source and binary forms, with or without
7 1.1 kiyohara * modification, are permitted provided that the following conditions
8 1.1 kiyohara * are met:
9 1.1 kiyohara * 1. Redistributions of source code must retain the above copyright
10 1.1 kiyohara * notice, this list of conditions and the following disclaimer.
11 1.1 kiyohara * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 kiyohara * notice, this list of conditions and the following disclaimer in the
13 1.1 kiyohara * documentation and/or other materials provided with the distribution.
14 1.1 kiyohara *
15 1.1 kiyohara * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 1.1 kiyohara * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1 kiyohara * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1 kiyohara * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 1.1 kiyohara * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 1.1 kiyohara * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1 kiyohara * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1 kiyohara * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 1.1 kiyohara * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 1.1 kiyohara * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 1.1 kiyohara * POSSIBILITY OF SUCH DAMAGE.
26 1.1 kiyohara */
27 1.1 kiyohara
28 1.1 kiyohara #include <sys/cdefs.h>
29 1.13 kiyohara __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.13 2017/01/07 14:44:26 kiyohara Exp $");
30 1.1 kiyohara
31 1.1 kiyohara #include <sys/param.h>
32 1.1 kiyohara #include <sys/bus.h>
33 1.1 kiyohara #include <sys/device.h>
34 1.1 kiyohara #include <sys/errno.h>
35 1.1 kiyohara #include <sys/endian.h>
36 1.2 kiyohara #include <sys/kmem.h>
37 1.1 kiyohara
38 1.1 kiyohara #include <uvm/uvm_param.h> /* For PAGE_SIZE */
39 1.1 kiyohara
40 1.1 kiyohara #include <dev/dmover/dmovervar.h>
41 1.1 kiyohara
42 1.1 kiyohara #include <dev/marvell/gtidmacreg.h>
43 1.1 kiyohara #include <dev/marvell/gtidmacvar.h>
44 1.1 kiyohara #include <dev/marvell/marvellreg.h>
45 1.1 kiyohara #include <dev/marvell/marvellvar.h>
46 1.1 kiyohara
47 1.1 kiyohara #include <prop/proplib.h>
48 1.1 kiyohara
49 1.1 kiyohara #include "locators.h"
50 1.1 kiyohara
51 1.1 kiyohara #ifdef GTIDMAC_DEBUG
52 1.1 kiyohara #define DPRINTF(x) if (gtidmac_debug) printf x
53 1.1 kiyohara int gtidmac_debug = 0;
54 1.1 kiyohara #else
55 1.1 kiyohara #define DPRINTF(x)
56 1.1 kiyohara #endif
57 1.1 kiyohara
58 1.1 kiyohara #define GTIDMAC_NDESC 64
59 1.1 kiyohara #define GTIDMAC_MAXCHAN 8
60 1.1 kiyohara #define MVXORE_NDESC 128
61 1.1 kiyohara #define MVXORE_MAXCHAN 2
62 1.1 kiyohara
63 1.1 kiyohara #define GTIDMAC_NSEGS ((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 1.1 kiyohara #define MVXORE_NSEGS ((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65 1.1 kiyohara
66 1.1 kiyohara
67 1.1 kiyohara struct gtidmac_softc;
68 1.1 kiyohara
69 1.1 kiyohara struct gtidmac_function {
70 1.1 kiyohara int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 1.1 kiyohara void (*chan_free)(void *, int);
72 1.1 kiyohara int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 1.1 kiyohara bus_size_t);
74 1.1 kiyohara void (*dma_start)(void *, int,
75 1.1 kiyohara void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 1.1 kiyohara bus_dmamap_t *, int));
77 1.1 kiyohara uint32_t (*dma_finish)(void *, int, int);
78 1.1 kiyohara };
79 1.1 kiyohara
80 1.1 kiyohara struct gtidmac_dma_desc {
81 1.1 kiyohara int dd_index;
82 1.1 kiyohara union {
83 1.1 kiyohara struct gtidmac_desc *idmac_vaddr;
84 1.1 kiyohara struct mvxore_desc *xore_vaddr;
85 1.1 kiyohara } dd_vaddr;
86 1.1 kiyohara #define dd_idmac_vaddr dd_vaddr.idmac_vaddr
87 1.1 kiyohara #define dd_xore_vaddr dd_vaddr.xore_vaddr
88 1.1 kiyohara paddr_t dd_paddr;
89 1.1 kiyohara SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 1.1 kiyohara };
91 1.1 kiyohara
92 1.1 kiyohara struct gtidmac_softc {
93 1.1 kiyohara device_t sc_dev;
94 1.1 kiyohara
95 1.1 kiyohara bus_space_tag_t sc_iot;
96 1.1 kiyohara bus_space_handle_t sc_ioh;
97 1.1 kiyohara
98 1.1 kiyohara bus_dma_tag_t sc_dmat;
99 1.1 kiyohara struct gtidmac_dma_desc *sc_dd_buffer;
100 1.1 kiyohara bus_dma_segment_t sc_pattern_segment;
101 1.1 kiyohara struct {
102 1.1 kiyohara u_char pbuf[16]; /* 16byte/pattern */
103 1.1 kiyohara } *sc_pbuf; /* x256 pattern */
104 1.1 kiyohara
105 1.1 kiyohara int sc_gtidmac_nchan;
106 1.1 kiyohara struct gtidmac_desc *sc_dbuf;
107 1.1 kiyohara bus_dmamap_t sc_dmap;
108 1.1 kiyohara SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 1.1 kiyohara struct {
110 1.1 kiyohara bus_dmamap_t chan_in; /* In dmamap */
111 1.1 kiyohara bus_dmamap_t chan_out; /* Out dmamap */
112 1.1 kiyohara uint64_t chan_totalcnt; /* total transfered byte */
113 1.1 kiyohara int chan_ddidx;
114 1.1 kiyohara void *chan_running; /* opaque object data */
115 1.1 kiyohara void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 1.1 kiyohara bus_dmamap_t *, int);
117 1.1 kiyohara } sc_cdesc[GTIDMAC_MAXCHAN];
118 1.1 kiyohara struct gtidmac_intr_arg {
119 1.1 kiyohara struct gtidmac_softc *ia_sc;
120 1.1 kiyohara uint32_t ia_cause;
121 1.1 kiyohara uint32_t ia_mask;
122 1.1 kiyohara uint32_t ia_eaddr;
123 1.1 kiyohara uint32_t ia_eselect;
124 1.1 kiyohara } sc_intrarg[GTIDMAC_NINTRRUPT];
125 1.1 kiyohara
126 1.1 kiyohara int sc_mvxore_nchan;
127 1.1 kiyohara struct mvxore_desc *sc_dbuf_xore;
128 1.1 kiyohara bus_dmamap_t sc_dmap_xore;
129 1.1 kiyohara SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 1.1 kiyohara struct {
131 1.1 kiyohara bus_dmamap_t chan_in[MVXORE_NSRC]; /* In dmamap */
132 1.1 kiyohara bus_dmamap_t chan_out; /* Out dmamap */
133 1.1 kiyohara uint64_t chan_totalcnt; /* total transfered */
134 1.1 kiyohara int chan_ddidx;
135 1.1 kiyohara void *chan_running; /* opaque object data */
136 1.1 kiyohara void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 1.1 kiyohara bus_dmamap_t *, int);
138 1.1 kiyohara } sc_cdesc_xore[MVXORE_MAXCHAN];
139 1.1 kiyohara
140 1.1 kiyohara struct dmover_backend sc_dmb;
141 1.1 kiyohara struct dmover_backend sc_dmb_xore;
142 1.1 kiyohara int sc_dmb_busy;
143 1.1 kiyohara };
144 1.1 kiyohara struct gtidmac_softc *gtidmac_softc = NULL;
145 1.1 kiyohara
146 1.1 kiyohara static int gtidmac_match(device_t, struct cfdata *, void *);
147 1.1 kiyohara static void gtidmac_attach(device_t, device_t, void *);
148 1.1 kiyohara
149 1.1 kiyohara static int gtidmac_intr(void *);
150 1.8 kiyohara static int mvxore_port0_intr(void *);
151 1.8 kiyohara static int mvxore_port1_intr(void *);
152 1.8 kiyohara static int mvxore_intr(struct gtidmac_softc *, int);
153 1.1 kiyohara
154 1.1 kiyohara static void gtidmac_process(struct dmover_backend *);
155 1.1 kiyohara static void gtidmac_dmover_run(struct dmover_backend *);
156 1.1 kiyohara static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
157 1.1 kiyohara int);
158 1.9 msaitoh static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
159 1.1 kiyohara dmover_buffer_type, dmover_buffer *, int);
160 1.9 msaitoh static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
161 1.1 kiyohara
162 1.1 kiyohara static uint32_t gtidmac_finish(void *, int, int);
163 1.1 kiyohara static uint32_t mvxore_finish(void *, int, int);
164 1.1 kiyohara
165 1.11 kiyohara static void gtidmac_wininit(struct gtidmac_softc *, enum marvell_tags *);
166 1.11 kiyohara static void mvxore_wininit(struct gtidmac_softc *, enum marvell_tags *);
167 1.1 kiyohara
168 1.8 kiyohara static int gtidmac_buffer_setup(struct gtidmac_softc *);
169 1.8 kiyohara static int mvxore_buffer_setup(struct gtidmac_softc *);
170 1.8 kiyohara
171 1.1 kiyohara #ifdef GTIDMAC_DEBUG
172 1.1 kiyohara static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
173 1.1 kiyohara static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
174 1.1 kiyohara struct gtidmac_dma_desc *, uint32_t, int);
175 1.1 kiyohara static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
176 1.1 kiyohara static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
177 1.1 kiyohara struct gtidmac_dma_desc *, uint32_t, int);
178 1.1 kiyohara #endif
179 1.1 kiyohara
180 1.1 kiyohara
181 1.1 kiyohara static struct gtidmac_function gtidmac_functions = {
182 1.1 kiyohara .chan_alloc = gtidmac_chan_alloc,
183 1.1 kiyohara .chan_free = gtidmac_chan_free,
184 1.1 kiyohara .dma_setup = gtidmac_setup,
185 1.1 kiyohara .dma_start = gtidmac_start,
186 1.1 kiyohara .dma_finish = gtidmac_finish,
187 1.1 kiyohara };
188 1.1 kiyohara
189 1.1 kiyohara static struct gtidmac_function mvxore_functions = {
190 1.1 kiyohara .chan_alloc = mvxore_chan_alloc,
191 1.1 kiyohara .chan_free = mvxore_chan_free,
192 1.1 kiyohara .dma_setup = mvxore_setup,
193 1.1 kiyohara .dma_start = mvxore_start,
194 1.1 kiyohara .dma_finish = mvxore_finish,
195 1.1 kiyohara };
196 1.1 kiyohara
197 1.1 kiyohara static const struct dmover_algdesc gtidmac_algdescs[] = {
198 1.1 kiyohara {
199 1.1 kiyohara .dad_name = DMOVER_FUNC_ZERO,
200 1.1 kiyohara .dad_data = >idmac_functions,
201 1.1 kiyohara .dad_ninputs = 0
202 1.1 kiyohara },
203 1.1 kiyohara {
204 1.1 kiyohara .dad_name = DMOVER_FUNC_FILL8,
205 1.1 kiyohara .dad_data = >idmac_functions,
206 1.1 kiyohara .dad_ninputs = 0
207 1.1 kiyohara },
208 1.1 kiyohara {
209 1.1 kiyohara .dad_name = DMOVER_FUNC_COPY,
210 1.1 kiyohara .dad_data = >idmac_functions,
211 1.1 kiyohara .dad_ninputs = 1
212 1.1 kiyohara },
213 1.1 kiyohara };
214 1.1 kiyohara
215 1.1 kiyohara static const struct dmover_algdesc mvxore_algdescs[] = {
216 1.1 kiyohara #if 0
217 1.1 kiyohara /*
218 1.1 kiyohara * As for these operations, there are a lot of restrictions. It is
219 1.1 kiyohara * necessary to use IDMAC.
220 1.1 kiyohara */
221 1.1 kiyohara {
222 1.1 kiyohara .dad_name = DMOVER_FUNC_ZERO,
223 1.1 kiyohara .dad_data = &mvxore_functions,
224 1.1 kiyohara .dad_ninputs = 0
225 1.1 kiyohara },
226 1.1 kiyohara {
227 1.1 kiyohara .dad_name = DMOVER_FUNC_FILL8,
228 1.1 kiyohara .dad_data = &mvxore_functions,
229 1.1 kiyohara .dad_ninputs = 0
230 1.1 kiyohara },
231 1.1 kiyohara #endif
232 1.1 kiyohara {
233 1.1 kiyohara .dad_name = DMOVER_FUNC_COPY,
234 1.1 kiyohara .dad_data = &mvxore_functions,
235 1.1 kiyohara .dad_ninputs = 1
236 1.1 kiyohara },
237 1.1 kiyohara {
238 1.1 kiyohara .dad_name = DMOVER_FUNC_ISCSI_CRC32C,
239 1.1 kiyohara .dad_data = &mvxore_functions,
240 1.1 kiyohara .dad_ninputs = 1
241 1.1 kiyohara },
242 1.1 kiyohara {
243 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR2,
244 1.1 kiyohara .dad_data = &mvxore_functions,
245 1.1 kiyohara .dad_ninputs = 2
246 1.1 kiyohara },
247 1.1 kiyohara {
248 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR3,
249 1.1 kiyohara .dad_data = &mvxore_functions,
250 1.1 kiyohara .dad_ninputs = 3
251 1.1 kiyohara },
252 1.1 kiyohara {
253 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR4,
254 1.1 kiyohara .dad_data = &mvxore_functions,
255 1.1 kiyohara .dad_ninputs = 4
256 1.1 kiyohara },
257 1.1 kiyohara {
258 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR5,
259 1.1 kiyohara .dad_data = &mvxore_functions,
260 1.1 kiyohara .dad_ninputs = 5
261 1.1 kiyohara },
262 1.1 kiyohara {
263 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR6,
264 1.1 kiyohara .dad_data = &mvxore_functions,
265 1.1 kiyohara .dad_ninputs = 6
266 1.1 kiyohara },
267 1.1 kiyohara {
268 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR7,
269 1.1 kiyohara .dad_data = &mvxore_functions,
270 1.1 kiyohara .dad_ninputs = 7
271 1.1 kiyohara },
272 1.1 kiyohara {
273 1.1 kiyohara .dad_name = DMOVER_FUNC_XOR8,
274 1.1 kiyohara .dad_data = &mvxore_functions,
275 1.1 kiyohara .dad_ninputs = 8
276 1.1 kiyohara },
277 1.1 kiyohara };
278 1.1 kiyohara
279 1.13 kiyohara static int orion_88f5182_xore_irqs[] = { 30, 31 };
280 1.13 kiyohara static int kirkwood_xore_irqs[] = { 5, 6, 7, 8 };
281 1.13 kiyohara static int dove_xore_irqs[] = { 39, 40, 42, 43 };
282 1.13 kiyohara static int armadaxp_xore_irqs0[] = { 51, 52 };
283 1.13 kiyohara static int armadaxp_xore_irqs1[] = { 94, 95 };
284 1.13 kiyohara
285 1.8 kiyohara static struct {
286 1.8 kiyohara int model;
287 1.8 kiyohara int idmac_nchan;
288 1.8 kiyohara int idmac_irq;
289 1.8 kiyohara int xore_nchan;
290 1.13 kiyohara int *xore_irqs;
291 1.8 kiyohara } channels[] = {
292 1.8 kiyohara /*
293 1.8 kiyohara * Marvell System Controllers:
294 1.8 kiyohara * need irqs in attach_args.
295 1.8 kiyohara */
296 1.13 kiyohara { MARVELL_DISCOVERY, 8, -1, 0, NULL },
297 1.13 kiyohara { MARVELL_DISCOVERY_II, 8, -1, 0, NULL },
298 1.13 kiyohara { MARVELL_DISCOVERY_III, 8, -1, 0, NULL },
299 1.8 kiyohara #if 0
300 1.13 kiyohara { MARVELL_DISCOVERY_LT, 4, -1, 2, NULL },
301 1.13 kiyohara { MARVELL_DISCOVERY_V, 4, -1, 2, NULL },
302 1.13 kiyohara { MARVELL_DISCOVERY_VI, 4, -1, 2, NULL }, ????
303 1.8 kiyohara #endif
304 1.8 kiyohara
305 1.8 kiyohara /*
306 1.8 kiyohara * Marvell System on Chips:
307 1.8 kiyohara * No need irqs in attach_args. We always connecting to interrupt-pin
308 1.8 kiyohara * statically.
309 1.8 kiyohara */
310 1.13 kiyohara { MARVELL_ORION_1_88F1181, 4, 24, 0, NULL },
311 1.13 kiyohara { MARVELL_ORION_2_88F1281, 4, 24, 0, NULL },
312 1.13 kiyohara { MARVELL_ORION_1_88F5082, 4, 24, 0, NULL },
313 1.13 kiyohara { MARVELL_ORION_1_88F5180N, 4, 24, 0, NULL },
314 1.13 kiyohara { MARVELL_ORION_1_88F5181, 4, 24, 0, NULL },
315 1.13 kiyohara { MARVELL_ORION_1_88F5182, 4, 24, 2, orion_88f5182_xore_irqs },
316 1.13 kiyohara { MARVELL_ORION_2_88F5281, 4, 24, 0, NULL },
317 1.13 kiyohara { MARVELL_ORION_1_88W8660, 4, 24, 0, NULL },
318 1.13 kiyohara { MARVELL_KIRKWOOD_88F6180, 0, -1, 4, kirkwood_xore_irqs },
319 1.13 kiyohara { MARVELL_KIRKWOOD_88F6192, 0, -1, 4, kirkwood_xore_irqs },
320 1.13 kiyohara { MARVELL_KIRKWOOD_88F6281, 0, -1, 4, kirkwood_xore_irqs },
321 1.13 kiyohara { MARVELL_KIRKWOOD_88F6282, 0, -1, 4, kirkwood_xore_irqs },
322 1.13 kiyohara { MARVELL_DOVE_88AP510, 0, -1, 4, dove_xore_irqs },
323 1.13 kiyohara { MARVELL_ARMADAXP_MV78130, 4, 33, 2, armadaxp_xore_irqs0 },
324 1.13 kiyohara { MARVELL_ARMADAXP_MV78130, 0, -1, 2, armadaxp_xore_irqs1 },
325 1.13 kiyohara { MARVELL_ARMADAXP_MV78160, 4, 33, 2, armadaxp_xore_irqs0 },
326 1.13 kiyohara { MARVELL_ARMADAXP_MV78160, 0, -1, 2, armadaxp_xore_irqs1 },
327 1.13 kiyohara { MARVELL_ARMADAXP_MV78230, 4, 33, 2, armadaxp_xore_irqs0 },
328 1.13 kiyohara { MARVELL_ARMADAXP_MV78230, 0, -1, 2, armadaxp_xore_irqs1 },
329 1.13 kiyohara { MARVELL_ARMADAXP_MV78260, 4, 33, 2, armadaxp_xore_irqs0 },
330 1.13 kiyohara { MARVELL_ARMADAXP_MV78260, 0, -1, 2, armadaxp_xore_irqs1 },
331 1.13 kiyohara { MARVELL_ARMADAXP_MV78460, 4, 33, 2, armadaxp_xore_irqs0 },
332 1.13 kiyohara { MARVELL_ARMADAXP_MV78460, 0, -1, 2, armadaxp_xore_irqs1 },
333 1.8 kiyohara };
334 1.8 kiyohara
335 1.11 kiyohara struct gtidmac_winacctbl *gtidmac_winacctbl;
336 1.11 kiyohara struct gtidmac_winacctbl *mvxore_winacctbl;
337 1.11 kiyohara
338 1.1 kiyohara CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
339 1.1 kiyohara gtidmac_match, gtidmac_attach, NULL, NULL);
340 1.1 kiyohara CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
341 1.1 kiyohara gtidmac_match, gtidmac_attach, NULL, NULL);
342 1.1 kiyohara
343 1.1 kiyohara
344 1.1 kiyohara /* ARGSUSED */
345 1.1 kiyohara static int
346 1.1 kiyohara gtidmac_match(device_t parent, struct cfdata *match, void *aux)
347 1.1 kiyohara {
348 1.1 kiyohara struct marvell_attach_args *mva = aux;
349 1.10 kiyohara int unit, i;
350 1.1 kiyohara
351 1.1 kiyohara if (strcmp(mva->mva_name, match->cf_name) != 0)
352 1.1 kiyohara return 0;
353 1.8 kiyohara if (mva->mva_offset == MVA_OFFSET_DEFAULT)
354 1.1 kiyohara return 0;
355 1.10 kiyohara unit = 0;
356 1.8 kiyohara for (i = 0; i < __arraycount(channels); i++)
357 1.8 kiyohara if (mva->mva_model == channels[i].model) {
358 1.10 kiyohara if (mva->mva_unit == unit) {
359 1.10 kiyohara mva->mva_size = GTIDMAC_SIZE;
360 1.10 kiyohara return 1;
361 1.10 kiyohara }
362 1.10 kiyohara unit++;
363 1.8 kiyohara }
364 1.8 kiyohara return 0;
365 1.1 kiyohara }
366 1.1 kiyohara
367 1.1 kiyohara /* ARGSUSED */
368 1.1 kiyohara static void
369 1.1 kiyohara gtidmac_attach(device_t parent, device_t self, void *aux)
370 1.1 kiyohara {
371 1.1 kiyohara struct gtidmac_softc *sc = device_private(self);
372 1.1 kiyohara struct marvell_attach_args *mva = aux;
373 1.1 kiyohara prop_dictionary_t dict = device_properties(self);
374 1.13 kiyohara uint32_t idmac_irq, xore_irq, *xore_irqs, dmb_speed;
375 1.10 kiyohara int unit, idmac_nchan, xore_nchan, nsegs, i, j, n;
376 1.1 kiyohara
377 1.10 kiyohara unit = 0;
378 1.8 kiyohara for (i = 0; i < __arraycount(channels); i++)
379 1.10 kiyohara if (mva->mva_model == channels[i].model) {
380 1.10 kiyohara if (mva->mva_unit == unit)
381 1.10 kiyohara break;
382 1.10 kiyohara unit++;
383 1.10 kiyohara }
384 1.8 kiyohara idmac_nchan = channels[i].idmac_nchan;
385 1.8 kiyohara idmac_irq = channels[i].idmac_irq;
386 1.8 kiyohara if (idmac_nchan != 0) {
387 1.8 kiyohara if (idmac_irq == -1)
388 1.8 kiyohara idmac_irq = mva->mva_irq;
389 1.8 kiyohara if (idmac_irq == -1)
390 1.8 kiyohara /* Discovery */
391 1.8 kiyohara if (!prop_dictionary_get_uint32(dict,
392 1.8 kiyohara "idmac-irq", &idmac_irq)) {
393 1.8 kiyohara aprint_error(": no idmac-irq property\n");
394 1.8 kiyohara return;
395 1.8 kiyohara }
396 1.8 kiyohara }
397 1.8 kiyohara xore_nchan = channels[i].xore_nchan;
398 1.13 kiyohara xore_irqs = channels[i].xore_irqs;
399 1.13 kiyohara xore_irq = MVA_IRQ_DEFAULT;
400 1.8 kiyohara if (xore_nchan != 0) {
401 1.13 kiyohara if (xore_irqs == NULL)
402 1.8 kiyohara xore_irq = mva->mva_irq;
403 1.13 kiyohara if (xore_irqs == NULL && xore_irq == MVA_IRQ_DEFAULT)
404 1.8 kiyohara /* Discovery LT/V/VI */
405 1.8 kiyohara if (!prop_dictionary_get_uint32(dict,
406 1.8 kiyohara "xore-irq", &xore_irq)) {
407 1.8 kiyohara aprint_error(": no xore-irq property\n");
408 1.8 kiyohara return;
409 1.8 kiyohara }
410 1.1 kiyohara }
411 1.1 kiyohara
412 1.1 kiyohara aprint_naive("\n");
413 1.1 kiyohara aprint_normal(": Marvell IDMA Controller%s\n",
414 1.1 kiyohara xore_nchan ? "/XOR Engine" : "");
415 1.8 kiyohara if (idmac_nchan > 0)
416 1.8 kiyohara aprint_normal_dev(self,
417 1.8 kiyohara "IDMA Controller %d channels, intr %d...%d\n",
418 1.8 kiyohara idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
419 1.13 kiyohara if (xore_nchan > 0) {
420 1.13 kiyohara aprint_normal_dev(self, "XOR Engine %d channels", xore_nchan);
421 1.13 kiyohara if (xore_irqs == NULL)
422 1.13 kiyohara aprint_normal(", intr %d...%d\n",
423 1.13 kiyohara xore_irq, xore_irq + xore_nchan - 1);
424 1.13 kiyohara else {
425 1.13 kiyohara aprint_normal(", intr %d", xore_irqs[0]);
426 1.13 kiyohara for (i = 1; i < xore_nchan; i++)
427 1.13 kiyohara aprint_normal(", %d", xore_irqs[i]);
428 1.13 kiyohara aprint_normal("\n");
429 1.13 kiyohara }
430 1.13 kiyohara }
431 1.1 kiyohara
432 1.1 kiyohara sc->sc_dev = self;
433 1.1 kiyohara sc->sc_iot = mva->mva_iot;
434 1.1 kiyohara
435 1.1 kiyohara /* Map I/O registers */
436 1.1 kiyohara if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
437 1.1 kiyohara mva->mva_size, &sc->sc_ioh)) {
438 1.1 kiyohara aprint_error_dev(self, "can't map registers\n");
439 1.1 kiyohara return;
440 1.1 kiyohara }
441 1.1 kiyohara
442 1.1 kiyohara /*
443 1.1 kiyohara * Initialise DMA descriptors and associated metadata
444 1.1 kiyohara */
445 1.1 kiyohara sc->sc_dmat = mva->mva_dmat;
446 1.1 kiyohara n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
447 1.1 kiyohara sc->sc_dd_buffer =
448 1.2 kiyohara kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
449 1.1 kiyohara if (sc->sc_dd_buffer == NULL) {
450 1.1 kiyohara aprint_error_dev(self, "can't allocate memory\n");
451 1.1 kiyohara goto fail1;
452 1.1 kiyohara }
453 1.1 kiyohara /* pattern buffer */
454 1.1 kiyohara if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
455 1.1 kiyohara &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
456 1.1 kiyohara aprint_error_dev(self,
457 1.1 kiyohara "bus_dmamem_alloc failed: pattern buffer\n");
458 1.1 kiyohara goto fail2;
459 1.1 kiyohara }
460 1.1 kiyohara if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
461 1.1 kiyohara (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
462 1.1 kiyohara aprint_error_dev(self,
463 1.1 kiyohara "bus_dmamem_map failed: pattern buffer\n");
464 1.1 kiyohara goto fail3;
465 1.1 kiyohara }
466 1.1 kiyohara for (i = 0; i < 0x100; i++)
467 1.1 kiyohara for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
468 1.1 kiyohara sc->sc_pbuf[i].pbuf[j] = i;
469 1.1 kiyohara
470 1.1 kiyohara if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
471 1.1 kiyohara aprint_error_dev(self, "no dmb_speed property\n");
472 1.8 kiyohara dmb_speed = 10; /* More than fast swdmover perhaps. */
473 1.1 kiyohara }
474 1.1 kiyohara
475 1.8 kiyohara /* IDMAC DMA descriptor buffer */
476 1.8 kiyohara sc->sc_gtidmac_nchan = idmac_nchan;
477 1.8 kiyohara if (sc->sc_gtidmac_nchan > 0) {
478 1.8 kiyohara if (gtidmac_buffer_setup(sc) != 0)
479 1.8 kiyohara goto fail4;
480 1.8 kiyohara
481 1.8 kiyohara if (mva->mva_model != MARVELL_DISCOVERY)
482 1.11 kiyohara gtidmac_wininit(sc, mva->mva_tags);
483 1.8 kiyohara
484 1.8 kiyohara /* Setup interrupt */
485 1.8 kiyohara for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
486 1.8 kiyohara j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
487 1.8 kiyohara
488 1.8 kiyohara sc->sc_intrarg[i].ia_sc = sc;
489 1.8 kiyohara sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
490 1.8 kiyohara sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
491 1.8 kiyohara sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
492 1.8 kiyohara marvell_intr_establish(idmac_irq + i, IPL_BIO,
493 1.8 kiyohara gtidmac_intr, &sc->sc_intrarg[i]);
494 1.1 kiyohara }
495 1.1 kiyohara
496 1.8 kiyohara /* Register us with dmover. */
497 1.8 kiyohara sc->sc_dmb.dmb_name = device_xname(self);
498 1.8 kiyohara sc->sc_dmb.dmb_speed = dmb_speed;
499 1.8 kiyohara sc->sc_dmb.dmb_cookie = sc;
500 1.8 kiyohara sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
501 1.8 kiyohara sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
502 1.8 kiyohara sc->sc_dmb.dmb_process = gtidmac_process;
503 1.8 kiyohara dmover_backend_register(&sc->sc_dmb);
504 1.8 kiyohara sc->sc_dmb_busy = 0;
505 1.8 kiyohara }
506 1.8 kiyohara
507 1.8 kiyohara /* XORE DMA descriptor buffer */
508 1.8 kiyohara sc->sc_mvxore_nchan = xore_nchan;
509 1.8 kiyohara if (sc->sc_mvxore_nchan > 0) {
510 1.8 kiyohara if (mvxore_buffer_setup(sc) != 0)
511 1.8 kiyohara goto fail5;
512 1.8 kiyohara
513 1.8 kiyohara /* Setup interrupt */
514 1.8 kiyohara for (i = 0; i < sc->sc_mvxore_nchan; i++)
515 1.13 kiyohara marvell_intr_establish(
516 1.13 kiyohara xore_irqs != NULL ? xore_irqs[i] : xore_irq + i,
517 1.13 kiyohara IPL_BIO,
518 1.8 kiyohara (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
519 1.8 kiyohara sc);
520 1.1 kiyohara
521 1.11 kiyohara mvxore_wininit(sc, mva->mva_tags);
522 1.1 kiyohara
523 1.1 kiyohara /* Register us with dmover. */
524 1.1 kiyohara sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
525 1.1 kiyohara sc->sc_dmb_xore.dmb_speed = dmb_speed;
526 1.1 kiyohara sc->sc_dmb_xore.dmb_cookie = sc;
527 1.1 kiyohara sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
528 1.13 kiyohara sc->sc_dmb_xore.dmb_nalgdescs = __arraycount(mvxore_algdescs);
529 1.1 kiyohara sc->sc_dmb_xore.dmb_process = gtidmac_process;
530 1.1 kiyohara dmover_backend_register(&sc->sc_dmb_xore);
531 1.1 kiyohara }
532 1.1 kiyohara
533 1.1 kiyohara gtidmac_softc = sc;
534 1.1 kiyohara
535 1.1 kiyohara return;
536 1.1 kiyohara
537 1.8 kiyohara fail5:
538 1.8 kiyohara for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
539 1.1 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
540 1.1 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
541 1.1 kiyohara }
542 1.1 kiyohara bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
543 1.1 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
544 1.1 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
545 1.1 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
546 1.8 kiyohara bus_dmamem_free(sc->sc_dmat,
547 1.8 kiyohara sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
548 1.1 kiyohara fail4:
549 1.1 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
550 1.1 kiyohara fail3:
551 1.1 kiyohara bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
552 1.1 kiyohara fail2:
553 1.2 kiyohara kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
554 1.1 kiyohara fail1:
555 1.1 kiyohara bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
556 1.1 kiyohara return;
557 1.1 kiyohara }
558 1.1 kiyohara
559 1.1 kiyohara
560 1.1 kiyohara static int
561 1.1 kiyohara gtidmac_intr(void *arg)
562 1.1 kiyohara {
563 1.1 kiyohara struct gtidmac_intr_arg *ia = arg;
564 1.1 kiyohara struct gtidmac_softc *sc = ia->ia_sc;
565 1.1 kiyohara uint32_t cause;
566 1.1 kiyohara int handled = 0, chan, error;
567 1.1 kiyohara
568 1.1 kiyohara cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
569 1.1 kiyohara DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
570 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
571 1.1 kiyohara
572 1.1 kiyohara chan = 0;
573 1.1 kiyohara while (cause) {
574 1.1 kiyohara error = 0;
575 1.1 kiyohara if (cause & GTIDMAC_I_ADDRMISS) {
576 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Address Miss");
577 1.1 kiyohara error = EINVAL;
578 1.1 kiyohara }
579 1.1 kiyohara if (cause & GTIDMAC_I_ACCPROT) {
580 1.1 kiyohara aprint_error_dev(sc->sc_dev,
581 1.1 kiyohara "Access Protect Violation");
582 1.1 kiyohara error = EACCES;
583 1.1 kiyohara }
584 1.1 kiyohara if (cause & GTIDMAC_I_WRPROT) {
585 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Write Protect");
586 1.1 kiyohara error = EACCES;
587 1.1 kiyohara }
588 1.1 kiyohara if (cause & GTIDMAC_I_OWN) {
589 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Ownership Violation");
590 1.1 kiyohara error = EINVAL;
591 1.1 kiyohara }
592 1.1 kiyohara
593 1.1 kiyohara #define GTIDMAC_I_ERROR \
594 1.1 kiyohara (GTIDMAC_I_ADDRMISS | \
595 1.1 kiyohara GTIDMAC_I_ACCPROT | \
596 1.1 kiyohara GTIDMAC_I_WRPROT | \
597 1.1 kiyohara GTIDMAC_I_OWN)
598 1.1 kiyohara if (cause & GTIDMAC_I_ERROR) {
599 1.1 kiyohara uint32_t sel;
600 1.1 kiyohara int select;
601 1.1 kiyohara
602 1.1 kiyohara sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
603 1.1 kiyohara ia->ia_eselect) & GTIDMAC_ESR_SEL;
604 1.1 kiyohara select = sel - chan * GTIDMAC_I_BITS;
605 1.1 kiyohara if (select >= 0 && select < GTIDMAC_I_BITS) {
606 1.1 kiyohara uint32_t ear;
607 1.1 kiyohara
608 1.1 kiyohara ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
609 1.1 kiyohara ia->ia_eaddr);
610 1.1 kiyohara aprint_error(": Error Address 0x%x\n", ear);
611 1.1 kiyohara } else
612 1.1 kiyohara aprint_error(": lost Error Address\n");
613 1.1 kiyohara }
614 1.1 kiyohara
615 1.1 kiyohara if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
616 1.1 kiyohara sc->sc_cdesc[chan].chan_dma_done(
617 1.1 kiyohara sc->sc_cdesc[chan].chan_running, chan,
618 1.1 kiyohara &sc->sc_cdesc[chan].chan_in,
619 1.1 kiyohara &sc->sc_cdesc[chan].chan_out, error);
620 1.1 kiyohara handled++;
621 1.1 kiyohara }
622 1.1 kiyohara
623 1.1 kiyohara cause >>= GTIDMAC_I_BITS;
624 1.1 kiyohara }
625 1.1 kiyohara DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
626 1.1 kiyohara
627 1.1 kiyohara return handled;
628 1.1 kiyohara }
629 1.1 kiyohara
630 1.1 kiyohara static int
631 1.8 kiyohara mvxore_port0_intr(void *arg)
632 1.1 kiyohara {
633 1.1 kiyohara struct gtidmac_softc *sc = arg;
634 1.8 kiyohara
635 1.8 kiyohara return mvxore_intr(sc, 0);
636 1.8 kiyohara }
637 1.8 kiyohara
638 1.8 kiyohara static int
639 1.8 kiyohara mvxore_port1_intr(void *arg)
640 1.8 kiyohara {
641 1.8 kiyohara struct gtidmac_softc *sc = arg;
642 1.8 kiyohara
643 1.8 kiyohara return mvxore_intr(sc, 1);
644 1.8 kiyohara }
645 1.8 kiyohara
646 1.8 kiyohara static int
647 1.8 kiyohara mvxore_intr(struct gtidmac_softc *sc, int port)
648 1.8 kiyohara {
649 1.1 kiyohara uint32_t cause;
650 1.1 kiyohara int handled = 0, chan, error;
651 1.1 kiyohara
652 1.8 kiyohara cause =
653 1.8 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
654 1.8 kiyohara DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
655 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
656 1.8 kiyohara MVXORE_XEICR(sc, port), ~cause);
657 1.1 kiyohara
658 1.1 kiyohara chan = 0;
659 1.1 kiyohara while (cause) {
660 1.1 kiyohara error = 0;
661 1.1 kiyohara if (cause & MVXORE_I_ADDRDECODE) {
662 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Failed address decoding");
663 1.1 kiyohara error = EINVAL;
664 1.1 kiyohara }
665 1.1 kiyohara if (cause & MVXORE_I_ACCPROT) {
666 1.1 kiyohara aprint_error_dev(sc->sc_dev,
667 1.1 kiyohara "Access Protect Violation");
668 1.1 kiyohara error = EACCES;
669 1.1 kiyohara }
670 1.1 kiyohara if (cause & MVXORE_I_WRPROT) {
671 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Write Protect");
672 1.1 kiyohara error = EACCES;
673 1.1 kiyohara }
674 1.1 kiyohara if (cause & MVXORE_I_OWN) {
675 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Ownership Violation");
676 1.1 kiyohara error = EINVAL;
677 1.1 kiyohara }
678 1.1 kiyohara if (cause & MVXORE_I_INTPARITY) {
679 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Parity Error");
680 1.1 kiyohara error = EIO;
681 1.1 kiyohara }
682 1.1 kiyohara if (cause & MVXORE_I_XBAR) {
683 1.1 kiyohara aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
684 1.1 kiyohara error = EINVAL;
685 1.1 kiyohara }
686 1.1 kiyohara
687 1.1 kiyohara #define MVXORE_I_ERROR \
688 1.1 kiyohara (MVXORE_I_ADDRDECODE | \
689 1.1 kiyohara MVXORE_I_ACCPROT | \
690 1.1 kiyohara MVXORE_I_WRPROT | \
691 1.1 kiyohara MVXORE_I_OWN | \
692 1.1 kiyohara MVXORE_I_INTPARITY | \
693 1.1 kiyohara MVXORE_I_XBAR)
694 1.1 kiyohara if (cause & MVXORE_I_ERROR) {
695 1.1 kiyohara uint32_t type;
696 1.1 kiyohara int event;
697 1.1 kiyohara
698 1.1 kiyohara type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
699 1.8 kiyohara MVXORE_XEECR(sc, port));
700 1.8 kiyohara type &= MVXORE_XEECR_ERRORTYPE_MASK;
701 1.1 kiyohara event = type - chan * MVXORE_I_BITS;
702 1.1 kiyohara if (event >= 0 && event < MVXORE_I_BITS) {
703 1.1 kiyohara uint32_t xeear;
704 1.1 kiyohara
705 1.1 kiyohara xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
706 1.8 kiyohara MVXORE_XEEAR(sc, port));
707 1.1 kiyohara aprint_error(": Error Address 0x%x\n", xeear);
708 1.1 kiyohara } else
709 1.1 kiyohara aprint_error(": lost Error Address\n");
710 1.1 kiyohara }
711 1.1 kiyohara
712 1.1 kiyohara if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
713 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_dma_done(
714 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_running, chan,
715 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_in,
716 1.1 kiyohara &sc->sc_cdesc_xore[chan].chan_out, error);
717 1.1 kiyohara handled++;
718 1.1 kiyohara }
719 1.1 kiyohara
720 1.1 kiyohara cause >>= MVXORE_I_BITS;
721 1.1 kiyohara }
722 1.8 kiyohara DPRINTF(("XORE port %d intr: %shandled\n",
723 1.8 kiyohara port, handled ? "" : "not "));
724 1.1 kiyohara
725 1.1 kiyohara return handled;
726 1.1 kiyohara }
727 1.1 kiyohara
728 1.1 kiyohara
729 1.1 kiyohara /*
730 1.1 kiyohara * dmover(9) backend function.
731 1.1 kiyohara */
732 1.1 kiyohara static void
733 1.1 kiyohara gtidmac_process(struct dmover_backend *dmb)
734 1.1 kiyohara {
735 1.1 kiyohara struct gtidmac_softc *sc = dmb->dmb_cookie;
736 1.1 kiyohara int s;
737 1.1 kiyohara
738 1.1 kiyohara /* If the backend is currently idle, go process the queue. */
739 1.1 kiyohara s = splbio();
740 1.1 kiyohara if (!sc->sc_dmb_busy)
741 1.1 kiyohara gtidmac_dmover_run(dmb);
742 1.1 kiyohara splx(s);
743 1.1 kiyohara }
744 1.1 kiyohara
745 1.1 kiyohara static void
746 1.1 kiyohara gtidmac_dmover_run(struct dmover_backend *dmb)
747 1.1 kiyohara {
748 1.1 kiyohara struct gtidmac_softc *sc = dmb->dmb_cookie;
749 1.1 kiyohara struct dmover_request *dreq;
750 1.1 kiyohara const struct dmover_algdesc *algdesc;
751 1.1 kiyohara struct gtidmac_function *df;
752 1.1 kiyohara bus_dmamap_t *dmamap_in, *dmamap_out;
753 1.1 kiyohara int chan, ninputs, error, i;
754 1.1 kiyohara
755 1.1 kiyohara sc->sc_dmb_busy = 1;
756 1.1 kiyohara
757 1.1 kiyohara for (;;) {
758 1.1 kiyohara dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
759 1.1 kiyohara if (dreq == NULL)
760 1.1 kiyohara break;
761 1.1 kiyohara algdesc = dreq->dreq_assignment->das_algdesc;
762 1.1 kiyohara df = algdesc->dad_data;
763 1.1 kiyohara chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
764 1.1 kiyohara if (chan == -1)
765 1.1 kiyohara return;
766 1.1 kiyohara
767 1.1 kiyohara dmover_backend_remque(dmb, dreq);
768 1.1 kiyohara dreq->dreq_flags |= DMOVER_REQ_RUNNING;
769 1.1 kiyohara
770 1.1 kiyohara /* XXXUNLOCK */
771 1.1 kiyohara
772 1.1 kiyohara error = 0;
773 1.1 kiyohara
774 1.1 kiyohara /* Load in/out buffers of dmover to bus_dmamap. */
775 1.1 kiyohara ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
776 1.1 kiyohara if (ninputs == 0) {
777 1.1 kiyohara int pno = 0;
778 1.1 kiyohara
779 1.1 kiyohara if (algdesc->dad_name == DMOVER_FUNC_FILL8)
780 1.1 kiyohara pno = dreq->dreq_immediate[0];
781 1.1 kiyohara
782 1.1 kiyohara i = 0;
783 1.1 kiyohara error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
784 1.1 kiyohara &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
785 1.1 kiyohara BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
786 1.1 kiyohara if (error == 0) {
787 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
788 1.13 kiyohara sizeof(sc->sc_pbuf[pno]),
789 1.13 kiyohara BUS_DMASYNC_PREWRITE);
790 1.1 kiyohara
791 1.1 kiyohara /*
792 1.1 kiyohara * We will call gtidmac_dmmap_unload() when
793 1.1 kiyohara * becoming an error.
794 1.1 kiyohara */
795 1.1 kiyohara i = 1;
796 1.1 kiyohara }
797 1.1 kiyohara } else
798 1.1 kiyohara for (i = 0; i < ninputs; i++) {
799 1.1 kiyohara error = gtidmac_dmmap_load(sc,
800 1.1 kiyohara *(dmamap_in + i), dreq->dreq_inbuf_type,
801 1.1 kiyohara &dreq->dreq_inbuf[i], 0/*write*/);
802 1.1 kiyohara if (error != 0)
803 1.1 kiyohara break;
804 1.1 kiyohara }
805 1.1 kiyohara if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
806 1.1 kiyohara if (error == 0)
807 1.1 kiyohara error = gtidmac_dmmap_load(sc, *dmamap_out,
808 1.1 kiyohara dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
809 1.1 kiyohara 1/*read*/);
810 1.1 kiyohara
811 1.1 kiyohara if (error == 0) {
812 1.1 kiyohara /*
813 1.1 kiyohara * The size of outbuf is always believed to be
814 1.1 kiyohara * DMA transfer size in dmover request.
815 1.1 kiyohara */
816 1.1 kiyohara error = (*df->dma_setup)(sc, chan, ninputs,
817 1.1 kiyohara dmamap_in, dmamap_out,
818 1.1 kiyohara (*dmamap_out)->dm_mapsize);
819 1.1 kiyohara if (error != 0)
820 1.1 kiyohara gtidmac_dmmap_unload(sc, *dmamap_out,
821 1.1 kiyohara 1);
822 1.1 kiyohara }
823 1.1 kiyohara } else
824 1.1 kiyohara if (error == 0)
825 1.1 kiyohara error = (*df->dma_setup)(sc, chan, ninputs,
826 1.1 kiyohara dmamap_in, dmamap_out,
827 1.1 kiyohara (*dmamap_in)->dm_mapsize);
828 1.1 kiyohara
829 1.1 kiyohara /* XXXLOCK */
830 1.1 kiyohara
831 1.1 kiyohara if (error != 0) {
832 1.1 kiyohara for (; i-- > 0;)
833 1.1 kiyohara gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
834 1.1 kiyohara (*df->chan_free)(sc, chan);
835 1.1 kiyohara
836 1.1 kiyohara dreq->dreq_flags |= DMOVER_REQ_ERROR;
837 1.1 kiyohara dreq->dreq_error = error;
838 1.1 kiyohara /* XXXUNLOCK */
839 1.1 kiyohara dmover_done(dreq);
840 1.1 kiyohara /* XXXLOCK */
841 1.1 kiyohara continue;
842 1.1 kiyohara }
843 1.1 kiyohara
844 1.1 kiyohara (*df->dma_start)(sc, chan, gtidmac_dmover_done);
845 1.1 kiyohara break;
846 1.1 kiyohara }
847 1.1 kiyohara
848 1.1 kiyohara /* All done */
849 1.1 kiyohara sc->sc_dmb_busy = 0;
850 1.1 kiyohara }
851 1.1 kiyohara
852 1.1 kiyohara static void
853 1.1 kiyohara gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
854 1.1 kiyohara bus_dmamap_t *dmamap_out, int error)
855 1.1 kiyohara {
856 1.1 kiyohara struct gtidmac_softc *sc;
857 1.1 kiyohara struct dmover_request *dreq = object;
858 1.1 kiyohara struct dmover_backend *dmb;
859 1.1 kiyohara struct gtidmac_function *df;
860 1.1 kiyohara uint32_t result;
861 1.1 kiyohara int ninputs, i;
862 1.1 kiyohara
863 1.1 kiyohara KASSERT(dreq != NULL);
864 1.1 kiyohara
865 1.1 kiyohara dmb = dreq->dreq_assignment->das_backend;
866 1.1 kiyohara df = dreq->dreq_assignment->das_algdesc->dad_data;
867 1.1 kiyohara ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
868 1.1 kiyohara sc = dmb->dmb_cookie;
869 1.1 kiyohara
870 1.1 kiyohara result = (*df->dma_finish)(sc, chan, error);
871 1.1 kiyohara for (i = 0; i < ninputs; i++)
872 1.1 kiyohara gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
873 1.1 kiyohara if (dreq->dreq_assignment->das_algdesc->dad_name ==
874 1.1 kiyohara DMOVER_FUNC_ISCSI_CRC32C)
875 1.1 kiyohara memcpy(dreq->dreq_immediate, &result, sizeof(result));
876 1.1 kiyohara else
877 1.1 kiyohara gtidmac_dmmap_unload(sc, *dmamap_out, 1);
878 1.1 kiyohara
879 1.1 kiyohara (*df->chan_free)(sc, chan);
880 1.1 kiyohara
881 1.1 kiyohara if (error) {
882 1.1 kiyohara dreq->dreq_error = error;
883 1.1 kiyohara dreq->dreq_flags |= DMOVER_REQ_ERROR;
884 1.1 kiyohara }
885 1.1 kiyohara
886 1.1 kiyohara dmover_done(dreq);
887 1.1 kiyohara
888 1.1 kiyohara /*
889 1.1 kiyohara * See if we can start some more dmover(9) requests.
890 1.1 kiyohara *
891 1.1 kiyohara * Note: We're already at splbio() here.
892 1.1 kiyohara */
893 1.1 kiyohara if (!sc->sc_dmb_busy)
894 1.1 kiyohara gtidmac_dmover_run(dmb);
895 1.1 kiyohara }
896 1.1 kiyohara
897 1.9 msaitoh static __inline int
898 1.1 kiyohara gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
899 1.1 kiyohara dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
900 1.1 kiyohara int read)
901 1.1 kiyohara {
902 1.1 kiyohara int error, flags;
903 1.1 kiyohara
904 1.1 kiyohara flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
905 1.1 kiyohara read ? BUS_DMA_READ : BUS_DMA_WRITE;
906 1.1 kiyohara
907 1.1 kiyohara switch (dmbuf_type) {
908 1.1 kiyohara case DMOVER_BUF_LINEAR:
909 1.1 kiyohara error = bus_dmamap_load(sc->sc_dmat, dmamap,
910 1.1 kiyohara dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
911 1.1 kiyohara NULL, flags);
912 1.1 kiyohara break;
913 1.1 kiyohara
914 1.1 kiyohara case DMOVER_BUF_UIO:
915 1.1 kiyohara if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
916 1.1 kiyohara (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
917 1.1 kiyohara return (EINVAL);
918 1.1 kiyohara
919 1.1 kiyohara error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
920 1.1 kiyohara dmbuf->dmbuf_uio, flags);
921 1.1 kiyohara break;
922 1.1 kiyohara
923 1.1 kiyohara default:
924 1.1 kiyohara error = EINVAL;
925 1.1 kiyohara }
926 1.1 kiyohara
927 1.1 kiyohara if (error == 0)
928 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
929 1.1 kiyohara read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
930 1.1 kiyohara
931 1.1 kiyohara return error;
932 1.1 kiyohara }
933 1.1 kiyohara
934 1.9 msaitoh static __inline void
935 1.1 kiyohara gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
936 1.1 kiyohara {
937 1.1 kiyohara
938 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
939 1.1 kiyohara read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
940 1.1 kiyohara
941 1.1 kiyohara bus_dmamap_unload(sc->sc_dmat, dmamap);
942 1.1 kiyohara }
943 1.1 kiyohara
944 1.1 kiyohara
945 1.1 kiyohara /*
946 1.1 kiyohara * IDMAC functions
947 1.1 kiyohara */
948 1.1 kiyohara int
949 1.1 kiyohara gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
950 1.1 kiyohara bus_dmamap_t **dmamap_out, void *object)
951 1.1 kiyohara {
952 1.1 kiyohara struct gtidmac_softc *sc = tag;
953 1.1 kiyohara int chan;
954 1.1 kiyohara
955 1.1 kiyohara /* maybe need lock */
956 1.1 kiyohara
957 1.1 kiyohara for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
958 1.1 kiyohara if (sc->sc_cdesc[chan].chan_running == NULL)
959 1.1 kiyohara break;
960 1.1 kiyohara if (chan >= sc->sc_gtidmac_nchan)
961 1.1 kiyohara return -1;
962 1.1 kiyohara
963 1.1 kiyohara
964 1.1 kiyohara sc->sc_cdesc[chan].chan_running = object;
965 1.1 kiyohara
966 1.1 kiyohara /* unlock */
967 1.1 kiyohara
968 1.1 kiyohara *dmamap_in = &sc->sc_cdesc[chan].chan_in;
969 1.1 kiyohara *dmamap_out = &sc->sc_cdesc[chan].chan_out;
970 1.1 kiyohara
971 1.1 kiyohara return chan;
972 1.1 kiyohara }
973 1.1 kiyohara
974 1.1 kiyohara void
975 1.1 kiyohara gtidmac_chan_free(void *tag, int chan)
976 1.1 kiyohara {
977 1.1 kiyohara struct gtidmac_softc *sc = tag;
978 1.1 kiyohara
979 1.1 kiyohara /* maybe need lock */
980 1.1 kiyohara
981 1.1 kiyohara sc->sc_cdesc[chan].chan_running = NULL;
982 1.1 kiyohara
983 1.1 kiyohara /* unlock */
984 1.1 kiyohara }
985 1.1 kiyohara
986 1.1 kiyohara /* ARGSUSED */
987 1.1 kiyohara int
988 1.1 kiyohara gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
989 1.1 kiyohara bus_dmamap_t *dmamap_out, bus_size_t size)
990 1.1 kiyohara {
991 1.1 kiyohara struct gtidmac_softc *sc = tag;
992 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
993 1.1 kiyohara struct gtidmac_desc *desc;
994 1.1 kiyohara uint32_t ccl, bcnt, ires, ores;
995 1.1 kiyohara int n = 0, iidx, oidx;
996 1.1 kiyohara
997 1.1 kiyohara KASSERT(ninputs == 0 || ninputs == 1);
998 1.1 kiyohara
999 1.1 kiyohara ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1000 1.1 kiyohara #ifdef DIAGNOSTIC
1001 1.1 kiyohara if (ccl & GTIDMAC_CCLR_CHANACT)
1002 1.1 kiyohara panic("gtidmac_setup: chan%d already active", chan);
1003 1.1 kiyohara #endif
1004 1.1 kiyohara
1005 1.1 kiyohara /* We always Chain-mode and max (16M - 1)byte/desc */
1006 1.1 kiyohara ccl = (GTIDMAC_CCLR_DESCMODE_16M |
1007 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1008 1.1 kiyohara GTIDMAC_CCLR_CDEN |
1009 1.1 kiyohara #endif
1010 1.1 kiyohara GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */ |
1011 1.1 kiyohara GTIDMAC_CCLR_INTMODE_NULL /* Intr Mode: Next Desc NULL */ |
1012 1.1 kiyohara GTIDMAC_CCLR_CHAINMODE_C /* Chain Mode: Chaind */);
1013 1.1 kiyohara if (size != (*dmamap_in)->dm_mapsize) {
1014 1.1 kiyohara ccl |= GTIDMAC_CCLR_SRCHOLD;
1015 1.1 kiyohara if ((*dmamap_in)->dm_mapsize == 8)
1016 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_8B;
1017 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 16)
1018 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_16B;
1019 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 32)
1020 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_32B;
1021 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 64)
1022 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_64B;
1023 1.1 kiyohara else if ((*dmamap_in)->dm_mapsize == 128)
1024 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_128B;
1025 1.1 kiyohara else
1026 1.1 kiyohara panic("gtidmac_setup: chan%d source:"
1027 1.1 kiyohara " unsupport hold size", chan);
1028 1.1 kiyohara } else
1029 1.1 kiyohara ccl |= GTIDMAC_CCLR_SBL_128B;
1030 1.1 kiyohara if (size != (*dmamap_out)->dm_mapsize) {
1031 1.1 kiyohara ccl |= GTIDMAC_CCLR_DESTHOLD;
1032 1.1 kiyohara if ((*dmamap_out)->dm_mapsize == 8)
1033 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_8B;
1034 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 16)
1035 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_16B;
1036 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 32)
1037 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_32B;
1038 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 64)
1039 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_64B;
1040 1.1 kiyohara else if ((*dmamap_out)->dm_mapsize == 128)
1041 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_128B;
1042 1.1 kiyohara else
1043 1.1 kiyohara panic("gtidmac_setup: chan%d destination:"
1044 1.1 kiyohara " unsupport hold size", chan);
1045 1.1 kiyohara } else
1046 1.1 kiyohara ccl |= GTIDMAC_CCLR_DBL_128B;
1047 1.1 kiyohara
1048 1.1 kiyohara fstdd = SLIST_FIRST(&sc->sc_dlist);
1049 1.1 kiyohara if (fstdd == NULL) {
1050 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no descriptor\n");
1051 1.1 kiyohara return ENOMEM;
1052 1.1 kiyohara }
1053 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1054 1.1 kiyohara sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1055 1.1 kiyohara
1056 1.1 kiyohara dd = fstdd;
1057 1.1 kiyohara ires = ores = 0;
1058 1.1 kiyohara iidx = oidx = 0;
1059 1.1 kiyohara while (1 /*CONSTCOND*/) {
1060 1.1 kiyohara if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1061 1.1 kiyohara if (ccl & GTIDMAC_CCLR_DESTHOLD)
1062 1.1 kiyohara bcnt = size; /* src/dst hold */
1063 1.1 kiyohara else
1064 1.1 kiyohara bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1065 1.1 kiyohara } else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1066 1.1 kiyohara bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1067 1.1 kiyohara else
1068 1.1 kiyohara bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1069 1.1 kiyohara (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1070 1.1 kiyohara
1071 1.1 kiyohara desc = dd->dd_idmac_vaddr;
1072 1.1 kiyohara desc->bc.mode16m.bcnt =
1073 1.1 kiyohara bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1074 1.1 kiyohara desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1075 1.1 kiyohara desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1076 1.1 kiyohara
1077 1.1 kiyohara n += bcnt;
1078 1.1 kiyohara if (n >= size)
1079 1.1 kiyohara break;
1080 1.1 kiyohara if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1081 1.1 kiyohara ires += bcnt;
1082 1.1 kiyohara if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1083 1.1 kiyohara ires = 0;
1084 1.1 kiyohara iidx++;
1085 1.1 kiyohara KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1086 1.1 kiyohara }
1087 1.1 kiyohara }
1088 1.1 kiyohara if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1089 1.1 kiyohara ores += bcnt;
1090 1.1 kiyohara if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1091 1.1 kiyohara ores = 0;
1092 1.1 kiyohara oidx++;
1093 1.1 kiyohara KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1094 1.1 kiyohara }
1095 1.1 kiyohara }
1096 1.1 kiyohara
1097 1.1 kiyohara nxtdd = SLIST_FIRST(&sc->sc_dlist);
1098 1.1 kiyohara if (nxtdd == NULL) {
1099 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no descriptor\n");
1100 1.1 kiyohara return ENOMEM;
1101 1.1 kiyohara }
1102 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1103 1.1 kiyohara
1104 1.1 kiyohara desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1105 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1106 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1107 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1108 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1109 1.1 kiyohara #else
1110 1.1 kiyohara BUS_DMASYNC_PREWRITE);
1111 1.1 kiyohara #endif
1112 1.1 kiyohara
1113 1.1 kiyohara SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1114 1.1 kiyohara dd = nxtdd;
1115 1.1 kiyohara }
1116 1.1 kiyohara desc->nextdp = (uint32_t)NULL;
1117 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1118 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1119 1.1 kiyohara sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1120 1.1 kiyohara #else
1121 1.1 kiyohara sizeof(*desc), BUS_DMASYNC_PREWRITE);
1122 1.1 kiyohara #endif
1123 1.1 kiyohara
1124 1.1 kiyohara /* Set paddr of descriptor to Channel Next Descriptor Pointer */
1125 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1126 1.3 kiyohara fstdd->dd_paddr);
1127 1.1 kiyohara
1128 1.1 kiyohara #if BYTE_ORDER == LITTLE_ENDIAN
1129 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1130 1.1 kiyohara GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1131 1.3 kiyohara #else
1132 1.3 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1133 1.3 kiyohara GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1134 1.1 kiyohara #endif
1135 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1136 1.1 kiyohara
1137 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1138 1.1 kiyohara gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1139 1.1 kiyohara #endif
1140 1.1 kiyohara
1141 1.1 kiyohara sc->sc_cdesc[chan].chan_totalcnt += size;
1142 1.1 kiyohara
1143 1.1 kiyohara return 0;
1144 1.1 kiyohara }
1145 1.1 kiyohara
1146 1.1 kiyohara void
1147 1.1 kiyohara gtidmac_start(void *tag, int chan,
1148 1.1 kiyohara void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1149 1.1 kiyohara int))
1150 1.1 kiyohara {
1151 1.1 kiyohara struct gtidmac_softc *sc = tag;
1152 1.1 kiyohara uint32_t ccl;
1153 1.1 kiyohara
1154 1.1 kiyohara DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1155 1.1 kiyohara
1156 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1157 1.1 kiyohara gtidmac_dump_idmacreg(sc, chan);
1158 1.1 kiyohara #endif
1159 1.1 kiyohara
1160 1.1 kiyohara sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1161 1.1 kiyohara
1162 1.1 kiyohara ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1163 1.1 kiyohara /* Start and 'Fetch Next Descriptor' */
1164 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1165 1.1 kiyohara ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1166 1.1 kiyohara }
1167 1.1 kiyohara
1168 1.1 kiyohara static uint32_t
1169 1.1 kiyohara gtidmac_finish(void *tag, int chan, int error)
1170 1.1 kiyohara {
1171 1.1 kiyohara struct gtidmac_softc *sc = tag;
1172 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1173 1.1 kiyohara struct gtidmac_desc *desc;
1174 1.1 kiyohara
1175 1.1 kiyohara fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1176 1.1 kiyohara
1177 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1178 1.1 kiyohara if (error || gtidmac_debug > 1) {
1179 1.1 kiyohara uint32_t ccl;
1180 1.1 kiyohara
1181 1.1 kiyohara gtidmac_dump_idmacreg(sc, chan);
1182 1.1 kiyohara ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1183 1.1 kiyohara GTIDMAC_CCLR(chan));
1184 1.1 kiyohara gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1185 1.1 kiyohara }
1186 1.1 kiyohara #endif
1187 1.1 kiyohara
1188 1.1 kiyohara dd = fstdd;
1189 1.1 kiyohara do {
1190 1.1 kiyohara desc = dd->dd_idmac_vaddr;
1191 1.1 kiyohara
1192 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1193 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1194 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1195 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1196 1.1 kiyohara #else
1197 1.1 kiyohara BUS_DMASYNC_POSTWRITE);
1198 1.1 kiyohara #endif
1199 1.1 kiyohara
1200 1.1 kiyohara nxtdd = SLIST_NEXT(dd, dd_next);
1201 1.1 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1202 1.1 kiyohara dd = nxtdd;
1203 1.1 kiyohara } while (desc->nextdp);
1204 1.1 kiyohara
1205 1.1 kiyohara return 0;
1206 1.1 kiyohara }
1207 1.1 kiyohara
1208 1.1 kiyohara /*
1209 1.1 kiyohara * XORE functions
1210 1.1 kiyohara */
1211 1.1 kiyohara int
1212 1.1 kiyohara mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1213 1.1 kiyohara bus_dmamap_t **dmamap_out, void *object)
1214 1.1 kiyohara {
1215 1.1 kiyohara struct gtidmac_softc *sc = tag;
1216 1.1 kiyohara int chan;
1217 1.1 kiyohara
1218 1.1 kiyohara /* maybe need lock */
1219 1.1 kiyohara
1220 1.1 kiyohara for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1221 1.1 kiyohara if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1222 1.1 kiyohara break;
1223 1.1 kiyohara if (chan >= sc->sc_mvxore_nchan)
1224 1.1 kiyohara return -1;
1225 1.1 kiyohara
1226 1.1 kiyohara
1227 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_running = object;
1228 1.1 kiyohara
1229 1.1 kiyohara /* unlock */
1230 1.1 kiyohara
1231 1.1 kiyohara *dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1232 1.1 kiyohara *dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1233 1.1 kiyohara
1234 1.1 kiyohara return chan;
1235 1.1 kiyohara }
1236 1.1 kiyohara
1237 1.1 kiyohara void
1238 1.1 kiyohara mvxore_chan_free(void *tag, int chan)
1239 1.1 kiyohara {
1240 1.1 kiyohara struct gtidmac_softc *sc = tag;
1241 1.1 kiyohara
1242 1.1 kiyohara /* maybe need lock */
1243 1.1 kiyohara
1244 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_running = NULL;
1245 1.1 kiyohara
1246 1.1 kiyohara /* unlock */
1247 1.1 kiyohara }
1248 1.1 kiyohara
1249 1.1 kiyohara /* ARGSUSED */
1250 1.1 kiyohara int
1251 1.1 kiyohara mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1252 1.1 kiyohara bus_dmamap_t *dmamap_out, bus_size_t size)
1253 1.1 kiyohara {
1254 1.1 kiyohara struct gtidmac_softc *sc = tag;
1255 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1256 1.1 kiyohara struct mvxore_desc *desc;
1257 1.1 kiyohara uint32_t xexc, bcnt, cmd, lastcmd;
1258 1.1 kiyohara int n = 0, i;
1259 1.1 kiyohara uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1260 1.1 kiyohara int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1261 1.1 kiyohara
1262 1.1 kiyohara #ifdef DIAGNOSTIC
1263 1.13 kiyohara uint32_t xexact =
1264 1.13 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1265 1.1 kiyohara
1266 1.1 kiyohara if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1267 1.1 kiyohara MVXORE_XEXACTR_XESTATUS_ACT)
1268 1.1 kiyohara panic("mvxore_setup: chan%d already active."
1269 1.1 kiyohara " mvxore not support hot insertion", chan);
1270 1.1 kiyohara #endif
1271 1.1 kiyohara
1272 1.1 kiyohara xexc =
1273 1.1 kiyohara (MVXORE_XEXCR_REGACCPROTECT |
1274 1.1 kiyohara MVXORE_XEXCR_DBL_128B |
1275 1.1 kiyohara MVXORE_XEXCR_SBL_128B);
1276 1.1 kiyohara cmd = lastcmd = 0;
1277 1.1 kiyohara if (ninputs > 1) {
1278 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_XOR;
1279 1.1 kiyohara lastcmd = cmd = (1 << ninputs) - 1;
1280 1.1 kiyohara } else if (ninputs == 1) {
1281 1.1 kiyohara if ((*dmamap_out)->dm_nsegs == 0) {
1282 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_CRC32;
1283 1.1 kiyohara lastcmd = MVXORE_DESC_CMD_CRCLAST;
1284 1.1 kiyohara } else
1285 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_DMA;
1286 1.1 kiyohara } else if (ninputs == 0) {
1287 1.1 kiyohara if ((*dmamap_out)->dm_nsegs != 1) {
1288 1.1 kiyohara aprint_error_dev(sc->sc_dev,
1289 1.1 kiyohara "XORE not supports %d DMA segments\n",
1290 1.1 kiyohara (*dmamap_out)->dm_nsegs);
1291 1.1 kiyohara return EINVAL;
1292 1.1 kiyohara }
1293 1.1 kiyohara
1294 1.1 kiyohara if ((*dmamap_in)->dm_mapsize == 0) {
1295 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_ECC;
1296 1.1 kiyohara
1297 1.1 kiyohara /* XXXXX: Maybe need to set Timer Mode registers? */
1298 1.1 kiyohara
1299 1.1 kiyohara #if 0
1300 1.1 kiyohara } else if ((*dmamap_in)->dm_mapsize == 8 ||
1301 1.1 kiyohara (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1302 1.1 kiyohara uint64_t pattern;
1303 1.1 kiyohara
1304 1.1 kiyohara /* XXXX: Get pattern data */
1305 1.1 kiyohara
1306 1.1 kiyohara KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1307 1.1 kiyohara (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1308 1.1 kiyohara ~PAGE_MASK) == sc->sc_pbuf);
1309 1.1 kiyohara pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1310 1.1 kiyohara
1311 1.1 kiyohara /* XXXXX: XORE has a IVR. We should get this first. */
1312 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1313 1.1 kiyohara pattern);
1314 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1315 1.1 kiyohara pattern >> 32);
1316 1.1 kiyohara
1317 1.1 kiyohara xexc |= MVXORE_XEXCR_OM_MEMINIT;
1318 1.1 kiyohara #endif
1319 1.1 kiyohara } else {
1320 1.1 kiyohara aprint_error_dev(sc->sc_dev,
1321 1.1 kiyohara "XORE not supports DMA mapsize %zd\n",
1322 1.1 kiyohara (*dmamap_in)->dm_mapsize);
1323 1.1 kiyohara return EINVAL;
1324 1.1 kiyohara }
1325 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1326 1.8 kiyohara MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
1327 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1328 1.8 kiyohara MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
1329 1.1 kiyohara
1330 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1331 1.8 kiyohara MVXORE_XEXCR(sc, chan), xexc);
1332 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1333 1.1 kiyohara
1334 1.1 kiyohara return 0;
1335 1.1 kiyohara }
1336 1.1 kiyohara
1337 1.1 kiyohara /* Make descriptor for DMA/CRC32/XOR */
1338 1.1 kiyohara
1339 1.1 kiyohara fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1340 1.1 kiyohara if (fstdd == NULL) {
1341 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1342 1.1 kiyohara return ENOMEM;
1343 1.1 kiyohara }
1344 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1345 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_ddidx =
1346 1.1 kiyohara fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1347 1.1 kiyohara
1348 1.1 kiyohara dd = fstdd;
1349 1.1 kiyohara while (1 /*CONSTCOND*/) {
1350 1.1 kiyohara desc = dd->dd_xore_vaddr;
1351 1.1 kiyohara desc->stat = MVXORE_DESC_STAT_OWN;
1352 1.1 kiyohara desc->cmd = cmd;
1353 1.1 kiyohara if ((*dmamap_out)->dm_nsegs != 0) {
1354 1.1 kiyohara desc->dstaddr =
1355 1.1 kiyohara (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1356 1.1 kiyohara bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1357 1.1 kiyohara } else {
1358 1.1 kiyohara desc->dstaddr = 0;
1359 1.1 kiyohara bcnt = MVXORE_MAXXFER; /* XXXXX */
1360 1.1 kiyohara }
1361 1.1 kiyohara for (i = 0; i < ninputs; i++) {
1362 1.1 kiyohara desc->srcaddr[i] =
1363 1.1 kiyohara (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1364 1.1 kiyohara bcnt = min(bcnt,
1365 1.1 kiyohara (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1366 1.1 kiyohara }
1367 1.1 kiyohara desc->bcnt = bcnt;
1368 1.1 kiyohara
1369 1.1 kiyohara n += bcnt;
1370 1.1 kiyohara if (n >= size)
1371 1.1 kiyohara break;
1372 1.1 kiyohara ores += bcnt;
1373 1.1 kiyohara if ((*dmamap_out)->dm_nsegs != 0 &&
1374 1.1 kiyohara ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1375 1.1 kiyohara ores = 0;
1376 1.1 kiyohara oidx++;
1377 1.1 kiyohara KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1378 1.1 kiyohara }
1379 1.1 kiyohara for (i = 0; i < ninputs; i++) {
1380 1.1 kiyohara ires[i] += bcnt;
1381 1.1 kiyohara if (ires[i] >=
1382 1.1 kiyohara (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1383 1.1 kiyohara ires[i] = 0;
1384 1.1 kiyohara iidx[i]++;
1385 1.1 kiyohara KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1386 1.1 kiyohara }
1387 1.1 kiyohara }
1388 1.1 kiyohara
1389 1.1 kiyohara nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1390 1.1 kiyohara if (nxtdd == NULL) {
1391 1.1 kiyohara aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1392 1.1 kiyohara return ENOMEM;
1393 1.1 kiyohara }
1394 1.1 kiyohara SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1395 1.1 kiyohara
1396 1.1 kiyohara desc->nextda = (uint32_t)nxtdd->dd_paddr;
1397 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1398 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1399 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1400 1.1 kiyohara
1401 1.1 kiyohara SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1402 1.1 kiyohara dd = nxtdd;
1403 1.1 kiyohara }
1404 1.1 kiyohara desc->cmd = lastcmd;
1405 1.1 kiyohara desc->nextda = (uint32_t)NULL;
1406 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1407 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1408 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1409 1.1 kiyohara
1410 1.1 kiyohara /* Set paddr of descriptor to Channel Next Descriptor Pointer */
1411 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
1412 1.1 kiyohara fstdd->dd_paddr);
1413 1.1 kiyohara
1414 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
1415 1.1 kiyohara
1416 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1417 1.3 kiyohara gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1418 1.1 kiyohara #endif
1419 1.1 kiyohara
1420 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1421 1.1 kiyohara
1422 1.1 kiyohara return 0;
1423 1.1 kiyohara }
1424 1.1 kiyohara
1425 1.1 kiyohara void
1426 1.1 kiyohara mvxore_start(void *tag, int chan,
1427 1.1 kiyohara void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1428 1.1 kiyohara int))
1429 1.1 kiyohara {
1430 1.1 kiyohara struct gtidmac_softc *sc = tag;
1431 1.1 kiyohara uint32_t xexact;
1432 1.1 kiyohara
1433 1.1 kiyohara DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1434 1.1 kiyohara
1435 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1436 1.1 kiyohara gtidmac_dump_xorereg(sc, chan);
1437 1.1 kiyohara #endif
1438 1.1 kiyohara
1439 1.1 kiyohara sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1440 1.1 kiyohara
1441 1.8 kiyohara xexact =
1442 1.8 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1443 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
1444 1.1 kiyohara xexact | MVXORE_XEXACTR_XESTART);
1445 1.1 kiyohara }
1446 1.1 kiyohara
1447 1.1 kiyohara static uint32_t
1448 1.1 kiyohara mvxore_finish(void *tag, int chan, int error)
1449 1.1 kiyohara {
1450 1.1 kiyohara struct gtidmac_softc *sc = tag;
1451 1.1 kiyohara struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1452 1.1 kiyohara struct mvxore_desc *desc;
1453 1.1 kiyohara uint32_t xexc;
1454 1.1 kiyohara
1455 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1456 1.1 kiyohara if (error || gtidmac_debug > 1)
1457 1.1 kiyohara gtidmac_dump_xorereg(sc, chan);
1458 1.1 kiyohara #endif
1459 1.1 kiyohara
1460 1.8 kiyohara xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1461 1.1 kiyohara if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1462 1.1 kiyohara (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1463 1.1 kiyohara return 0;
1464 1.1 kiyohara
1465 1.1 kiyohara fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1466 1.1 kiyohara
1467 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1468 1.1 kiyohara if (error || gtidmac_debug > 1)
1469 1.1 kiyohara gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1470 1.1 kiyohara #endif
1471 1.1 kiyohara
1472 1.1 kiyohara dd = fstdd;
1473 1.1 kiyohara do {
1474 1.1 kiyohara desc = dd->dd_xore_vaddr;
1475 1.1 kiyohara
1476 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1477 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1478 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1479 1.1 kiyohara
1480 1.1 kiyohara nxtdd = SLIST_NEXT(dd, dd_next);
1481 1.1 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1482 1.1 kiyohara dd = nxtdd;
1483 1.1 kiyohara } while (desc->nextda);
1484 1.1 kiyohara
1485 1.1 kiyohara if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1486 1.1 kiyohara return desc->result;
1487 1.1 kiyohara return 0;
1488 1.1 kiyohara }
1489 1.1 kiyohara
1490 1.1 kiyohara static void
1491 1.11 kiyohara gtidmac_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1492 1.1 kiyohara {
1493 1.1 kiyohara device_t pdev = device_parent(sc->sc_dev);
1494 1.1 kiyohara uint64_t base;
1495 1.11 kiyohara uint32_t size, cxap, en, winacc;
1496 1.11 kiyohara int window, target, attr, rv, i, j;
1497 1.1 kiyohara
1498 1.1 kiyohara en = 0xff;
1499 1.1 kiyohara cxap = 0;
1500 1.1 kiyohara for (window = 0, i = 0;
1501 1.11 kiyohara tags[i] != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW; i++) {
1502 1.11 kiyohara rv = marvell_winparams_by_tag(pdev, tags[i],
1503 1.1 kiyohara &target, &attr, &base, &size);
1504 1.1 kiyohara if (rv != 0 || size == 0)
1505 1.1 kiyohara continue;
1506 1.1 kiyohara
1507 1.1 kiyohara if (base > 0xffffffffULL) {
1508 1.1 kiyohara if (window >= GTIDMAC_NREMAP) {
1509 1.1 kiyohara aprint_error_dev(sc->sc_dev,
1510 1.1 kiyohara "can't remap window %d\n", window);
1511 1.1 kiyohara continue;
1512 1.1 kiyohara }
1513 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1514 1.1 kiyohara GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1515 1.1 kiyohara }
1516 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1517 1.1 kiyohara GTIDMAC_BARX_TARGET(target) |
1518 1.1 kiyohara GTIDMAC_BARX_ATTR(attr) |
1519 1.1 kiyohara GTIDMAC_BARX_BASE(base));
1520 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1521 1.1 kiyohara GTIDMAC_SRX_SIZE(size));
1522 1.1 kiyohara en &= ~GTIDMAC_BAER_EN(window);
1523 1.11 kiyohara
1524 1.11 kiyohara winacc = GTIDMAC_CXAPR_WINACC_FA;
1525 1.11 kiyohara if (gtidmac_winacctbl != NULL)
1526 1.11 kiyohara for (j = 0;
1527 1.11 kiyohara gtidmac_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1528 1.11 kiyohara j++) {
1529 1.11 kiyohara if (gtidmac_winacctbl[j].tag != tags[i])
1530 1.11 kiyohara continue;
1531 1.11 kiyohara
1532 1.11 kiyohara switch (gtidmac_winacctbl[j].winacc) {
1533 1.11 kiyohara case GTIDMAC_WINACC_NOACCESSALLOWED:
1534 1.11 kiyohara winacc = GTIDMAC_CXAPR_WINACC_NOAA;
1535 1.11 kiyohara break;
1536 1.11 kiyohara case GTIDMAC_WINACC_READONLY:
1537 1.11 kiyohara winacc = GTIDMAC_CXAPR_WINACC_RO;
1538 1.11 kiyohara break;
1539 1.11 kiyohara case GTIDMAC_WINACC_FULLACCESS:
1540 1.11 kiyohara default: /* XXXX: default is full access */
1541 1.11 kiyohara break;
1542 1.11 kiyohara }
1543 1.11 kiyohara break;
1544 1.11 kiyohara }
1545 1.11 kiyohara cxap |= GTIDMAC_CXAPR_WINACC(window, winacc);
1546 1.11 kiyohara
1547 1.1 kiyohara window++;
1548 1.1 kiyohara }
1549 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1550 1.1 kiyohara
1551 1.1 kiyohara for (i = 0; i < GTIDMAC_NACCPROT; i++)
1552 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1553 1.1 kiyohara cxap);
1554 1.1 kiyohara }
1555 1.1 kiyohara
1556 1.1 kiyohara static void
1557 1.11 kiyohara mvxore_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1558 1.1 kiyohara {
1559 1.1 kiyohara device_t pdev = device_parent(sc->sc_dev);
1560 1.1 kiyohara uint64_t base;
1561 1.11 kiyohara uint32_t target, attr, size, xexwc, winacc;
1562 1.11 kiyohara int window, rv, i, j, p;
1563 1.1 kiyohara
1564 1.1 kiyohara xexwc = 0;
1565 1.1 kiyohara for (window = 0, i = 0;
1566 1.11 kiyohara tags[i] != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW; i++) {
1567 1.11 kiyohara rv = marvell_winparams_by_tag(pdev, tags[i],
1568 1.1 kiyohara &target, &attr, &base, &size);
1569 1.1 kiyohara if (rv != 0 || size == 0)
1570 1.1 kiyohara continue;
1571 1.1 kiyohara
1572 1.1 kiyohara if (base > 0xffffffffULL) {
1573 1.1 kiyohara if (window >= MVXORE_NREMAP) {
1574 1.1 kiyohara aprint_error_dev(sc->sc_dev,
1575 1.1 kiyohara "can't remap window %d\n", window);
1576 1.1 kiyohara continue;
1577 1.1 kiyohara }
1578 1.8 kiyohara for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
1579 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1580 1.8 kiyohara MVXORE_XEHARRX(sc, p, window),
1581 1.8 kiyohara (base >> 32) & 0xffffffff);
1582 1.8 kiyohara }
1583 1.8 kiyohara
1584 1.8 kiyohara for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
1585 1.1 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1586 1.8 kiyohara MVXORE_XEBARX(sc, p, window),
1587 1.8 kiyohara MVXORE_XEBARX_TARGET(target) |
1588 1.8 kiyohara MVXORE_XEBARX_ATTR(attr) |
1589 1.8 kiyohara MVXORE_XEBARX_BASE(base));
1590 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1591 1.8 kiyohara MVXORE_XESMRX(sc, p, window),
1592 1.8 kiyohara MVXORE_XESMRX_SIZE(size));
1593 1.1 kiyohara }
1594 1.11 kiyohara
1595 1.11 kiyohara winacc = MVXORE_XEXWCR_WINACC_FA;
1596 1.11 kiyohara if (mvxore_winacctbl != NULL)
1597 1.11 kiyohara for (j = 0;
1598 1.11 kiyohara mvxore_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1599 1.11 kiyohara j++) {
1600 1.11 kiyohara if (gtidmac_winacctbl[j].tag != tags[i])
1601 1.11 kiyohara continue;
1602 1.11 kiyohara
1603 1.11 kiyohara switch (gtidmac_winacctbl[j].winacc) {
1604 1.11 kiyohara case GTIDMAC_WINACC_NOACCESSALLOWED:
1605 1.11 kiyohara winacc = MVXORE_XEXWCR_WINACC_NOAA;
1606 1.11 kiyohara break;
1607 1.11 kiyohara case GTIDMAC_WINACC_READONLY:
1608 1.11 kiyohara winacc = MVXORE_XEXWCR_WINACC_RO;
1609 1.11 kiyohara break;
1610 1.11 kiyohara case GTIDMAC_WINACC_FULLACCESS:
1611 1.11 kiyohara default: /* XXXX: default is full access */
1612 1.11 kiyohara break;
1613 1.11 kiyohara }
1614 1.11 kiyohara break;
1615 1.11 kiyohara }
1616 1.1 kiyohara xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1617 1.11 kiyohara MVXORE_XEXWCR_WINACC(window, winacc));
1618 1.1 kiyohara window++;
1619 1.1 kiyohara }
1620 1.1 kiyohara
1621 1.8 kiyohara for (i = 0; i < sc->sc_mvxore_nchan; i++) {
1622 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
1623 1.8 kiyohara xexwc);
1624 1.8 kiyohara
1625 1.8 kiyohara /* XXXXX: reset... */
1626 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
1627 1.8 kiyohara 0);
1628 1.8 kiyohara }
1629 1.1 kiyohara }
1630 1.1 kiyohara
1631 1.8 kiyohara static int
1632 1.8 kiyohara gtidmac_buffer_setup(struct gtidmac_softc *sc)
1633 1.8 kiyohara {
1634 1.8 kiyohara bus_dma_segment_t segs;
1635 1.8 kiyohara struct gtidmac_dma_desc *dd;
1636 1.8 kiyohara uint32_t mask;
1637 1.8 kiyohara int nchan, nsegs, i;
1638 1.8 kiyohara
1639 1.8 kiyohara nchan = sc->sc_gtidmac_nchan;
1640 1.8 kiyohara
1641 1.8 kiyohara if (bus_dmamem_alloc(sc->sc_dmat,
1642 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1643 1.8 kiyohara PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1644 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1645 1.8 kiyohara "bus_dmamem_alloc failed: descriptor buffer\n");
1646 1.8 kiyohara goto fail0;
1647 1.8 kiyohara }
1648 1.8 kiyohara if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1649 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1650 1.8 kiyohara (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
1651 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1652 1.8 kiyohara "bus_dmamem_map failed: descriptor buffer\n");
1653 1.8 kiyohara goto fail1;
1654 1.8 kiyohara }
1655 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat,
1656 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
1657 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
1658 1.8 kiyohara BUS_DMA_NOWAIT, &sc->sc_dmap)) {
1659 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1660 1.8 kiyohara "bus_dmamap_create failed: descriptor buffer\n");
1661 1.8 kiyohara goto fail2;
1662 1.8 kiyohara }
1663 1.8 kiyohara if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
1664 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1665 1.8 kiyohara NULL, BUS_DMA_NOWAIT)) {
1666 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1667 1.8 kiyohara "bus_dmamap_load failed: descriptor buffer\n");
1668 1.8 kiyohara goto fail3;
1669 1.8 kiyohara }
1670 1.8 kiyohara SLIST_INIT(&sc->sc_dlist);
1671 1.8 kiyohara for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
1672 1.8 kiyohara dd = &sc->sc_dd_buffer[i];
1673 1.8 kiyohara dd->dd_index = i;
1674 1.8 kiyohara dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
1675 1.8 kiyohara dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
1676 1.8 kiyohara (sizeof(struct gtidmac_desc) * i);
1677 1.8 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1678 1.8 kiyohara }
1679 1.8 kiyohara
1680 1.8 kiyohara /* Initialize IDMAC DMA channels */
1681 1.8 kiyohara mask = 0;
1682 1.8 kiyohara for (i = 0; i < nchan; i++) {
1683 1.8 kiyohara if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
1684 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1685 1.8 kiyohara GTIDMAC_IMR(i - 1), mask);
1686 1.8 kiyohara mask = 0;
1687 1.8 kiyohara }
1688 1.8 kiyohara
1689 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1690 1.8 kiyohara GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1691 1.8 kiyohara &sc->sc_cdesc[i].chan_in)) {
1692 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1693 1.8 kiyohara "bus_dmamap_create failed: chan%d in\n", i);
1694 1.8 kiyohara goto fail4;
1695 1.8 kiyohara }
1696 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1697 1.8 kiyohara GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1698 1.8 kiyohara &sc->sc_cdesc[i].chan_out)) {
1699 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1700 1.8 kiyohara "bus_dmamap_create failed: chan%d out\n", i);
1701 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat,
1702 1.8 kiyohara sc->sc_cdesc[i].chan_in);
1703 1.8 kiyohara goto fail4;
1704 1.8 kiyohara }
1705 1.8 kiyohara sc->sc_cdesc[i].chan_totalcnt = 0;
1706 1.8 kiyohara sc->sc_cdesc[i].chan_running = NULL;
1707 1.8 kiyohara
1708 1.8 kiyohara /* Ignore bits overflow. The mask is 32bit. */
1709 1.8 kiyohara mask |= GTIDMAC_I(i,
1710 1.8 kiyohara GTIDMAC_I_COMP |
1711 1.8 kiyohara GTIDMAC_I_ADDRMISS |
1712 1.8 kiyohara GTIDMAC_I_ACCPROT |
1713 1.8 kiyohara GTIDMAC_I_WRPROT |
1714 1.8 kiyohara GTIDMAC_I_OWN);
1715 1.8 kiyohara
1716 1.8 kiyohara /* 8bits/channel * 4channels => 32bit */
1717 1.8 kiyohara if ((i & 0x3) == 0x3) {
1718 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1719 1.8 kiyohara GTIDMAC_IMR(i), mask);
1720 1.8 kiyohara mask = 0;
1721 1.8 kiyohara }
1722 1.8 kiyohara }
1723 1.8 kiyohara
1724 1.8 kiyohara return 0;
1725 1.8 kiyohara
1726 1.8 kiyohara fail4:
1727 1.8 kiyohara for (; i-- > 0;) {
1728 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
1729 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
1730 1.8 kiyohara }
1731 1.8 kiyohara bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1732 1.8 kiyohara fail3:
1733 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
1734 1.8 kiyohara fail2:
1735 1.8 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
1736 1.8 kiyohara sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
1737 1.8 kiyohara fail1:
1738 1.8 kiyohara bus_dmamem_free(sc->sc_dmat, &segs, 1);
1739 1.8 kiyohara fail0:
1740 1.8 kiyohara return -1;
1741 1.8 kiyohara }
1742 1.8 kiyohara
1743 1.8 kiyohara static int
1744 1.8 kiyohara mvxore_buffer_setup(struct gtidmac_softc *sc)
1745 1.8 kiyohara {
1746 1.8 kiyohara bus_dma_segment_t segs;
1747 1.8 kiyohara struct gtidmac_dma_desc *dd;
1748 1.8 kiyohara uint32_t mask;
1749 1.8 kiyohara int nchan, nsegs, i, j;
1750 1.8 kiyohara
1751 1.8 kiyohara nchan = sc->sc_mvxore_nchan;
1752 1.8 kiyohara
1753 1.8 kiyohara if (bus_dmamem_alloc(sc->sc_dmat,
1754 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1755 1.8 kiyohara PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1756 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1757 1.8 kiyohara "bus_dmamem_alloc failed: xore descriptor buffer\n");
1758 1.8 kiyohara goto fail0;
1759 1.8 kiyohara }
1760 1.8 kiyohara if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1761 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1762 1.8 kiyohara (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
1763 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1764 1.8 kiyohara "bus_dmamem_map failed: xore descriptor buffer\n");
1765 1.8 kiyohara goto fail1;
1766 1.8 kiyohara }
1767 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat,
1768 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
1769 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
1770 1.8 kiyohara BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
1771 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1772 1.8 kiyohara "bus_dmamap_create failed: xore descriptor buffer\n");
1773 1.8 kiyohara goto fail2;
1774 1.8 kiyohara }
1775 1.8 kiyohara if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
1776 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1777 1.8 kiyohara NULL, BUS_DMA_NOWAIT)) {
1778 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1779 1.8 kiyohara "bus_dmamap_load failed: xore descriptor buffer\n");
1780 1.8 kiyohara goto fail3;
1781 1.8 kiyohara }
1782 1.8 kiyohara SLIST_INIT(&sc->sc_dlist_xore);
1783 1.8 kiyohara for (i = 0; i < MVXORE_NDESC * nchan; i++) {
1784 1.8 kiyohara dd =
1785 1.8 kiyohara &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
1786 1.8 kiyohara dd->dd_index = i;
1787 1.8 kiyohara dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
1788 1.8 kiyohara dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
1789 1.8 kiyohara (sizeof(struct mvxore_desc) * i);
1790 1.8 kiyohara SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1791 1.8 kiyohara }
1792 1.8 kiyohara
1793 1.8 kiyohara /* Initialize XORE DMA channels */
1794 1.8 kiyohara mask = 0;
1795 1.8 kiyohara for (i = 0; i < nchan; i++) {
1796 1.8 kiyohara for (j = 0; j < MVXORE_NSRC; j++) {
1797 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat,
1798 1.8 kiyohara MVXORE_MAXXFER, MVXORE_NSEGS,
1799 1.8 kiyohara MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
1800 1.8 kiyohara &sc->sc_cdesc_xore[i].chan_in[j])) {
1801 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1802 1.8 kiyohara "bus_dmamap_create failed:"
1803 1.8 kiyohara " xore chan%d in[%d]\n", i, j);
1804 1.8 kiyohara goto fail4;
1805 1.8 kiyohara }
1806 1.8 kiyohara }
1807 1.8 kiyohara if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
1808 1.8 kiyohara MVXORE_NSEGS, MVXORE_MAXXFER, 0,
1809 1.8 kiyohara BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
1810 1.8 kiyohara aprint_error_dev(sc->sc_dev,
1811 1.8 kiyohara "bus_dmamap_create failed: chan%d out\n", i);
1812 1.8 kiyohara goto fail5;
1813 1.8 kiyohara }
1814 1.8 kiyohara sc->sc_cdesc_xore[i].chan_totalcnt = 0;
1815 1.8 kiyohara sc->sc_cdesc_xore[i].chan_running = NULL;
1816 1.8 kiyohara
1817 1.8 kiyohara mask |= MVXORE_I(i,
1818 1.8 kiyohara MVXORE_I_EOC |
1819 1.8 kiyohara MVXORE_I_ADDRDECODE |
1820 1.8 kiyohara MVXORE_I_ACCPROT |
1821 1.8 kiyohara MVXORE_I_WRPROT |
1822 1.8 kiyohara MVXORE_I_OWN |
1823 1.8 kiyohara MVXORE_I_INTPARITY |
1824 1.8 kiyohara MVXORE_I_XBAR);
1825 1.8 kiyohara
1826 1.8 kiyohara /* 16bits/channel * 2channels => 32bit */
1827 1.8 kiyohara if (i & 0x1) {
1828 1.8 kiyohara bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1829 1.8 kiyohara MVXORE_XEIMR(sc, i >> 1), mask);
1830 1.8 kiyohara mask = 0;
1831 1.8 kiyohara }
1832 1.8 kiyohara }
1833 1.8 kiyohara
1834 1.8 kiyohara return 0;
1835 1.8 kiyohara
1836 1.8 kiyohara for (; i-- > 0;) {
1837 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
1838 1.8 kiyohara
1839 1.8 kiyohara fail5:
1840 1.8 kiyohara j = MVXORE_NSRC;
1841 1.8 kiyohara fail4:
1842 1.8 kiyohara for (; j-- > 0;)
1843 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat,
1844 1.8 kiyohara sc->sc_cdesc_xore[i].chan_in[j]);
1845 1.8 kiyohara }
1846 1.8 kiyohara bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
1847 1.8 kiyohara fail3:
1848 1.8 kiyohara bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
1849 1.8 kiyohara fail2:
1850 1.8 kiyohara bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
1851 1.8 kiyohara sizeof(struct mvxore_desc) * MVXORE_NDESC);
1852 1.8 kiyohara fail1:
1853 1.8 kiyohara bus_dmamem_free(sc->sc_dmat, &segs, 1);
1854 1.8 kiyohara fail0:
1855 1.8 kiyohara return -1;
1856 1.8 kiyohara }
1857 1.1 kiyohara
1858 1.1 kiyohara #ifdef GTIDMAC_DEBUG
1859 1.1 kiyohara static void
1860 1.1 kiyohara gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1861 1.1 kiyohara {
1862 1.1 kiyohara uint32_t val;
1863 1.1 kiyohara char buf[256];
1864 1.1 kiyohara
1865 1.1 kiyohara printf("IDMAC Registers\n");
1866 1.1 kiyohara
1867 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1868 1.3 kiyohara snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1869 1.3 kiyohara printf(" Byte Count : %s\n", buf);
1870 1.1 kiyohara printf(" ByteCnt : 0x%06x\n",
1871 1.1 kiyohara val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1872 1.1 kiyohara printf(" Source Address : 0x%08x\n",
1873 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1874 1.1 kiyohara printf(" Destination Address : 0x%08x\n",
1875 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1876 1.1 kiyohara printf(" Next Descriptor Pointer : 0x%08x\n",
1877 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1878 1.1 kiyohara printf(" Current Descriptor Pointer : 0x%08x\n",
1879 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1880 1.1 kiyohara
1881 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1882 1.3 kiyohara snprintb(buf, sizeof(buf),
1883 1.1 kiyohara "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1884 1.1 kiyohara "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1885 1.3 kiyohara val);
1886 1.3 kiyohara printf(" Channel Control (Low) : %s\n", buf);
1887 1.1 kiyohara printf(" SrcBurstLimit : %s Bytes\n",
1888 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1889 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1890 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1891 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1892 1.1 kiyohara (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1893 1.1 kiyohara "unknwon");
1894 1.1 kiyohara printf(" DstBurstLimit : %s Bytes\n",
1895 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1896 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1897 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1898 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1899 1.1 kiyohara (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1900 1.1 kiyohara "unknwon");
1901 1.1 kiyohara printf(" ChainMode : %sChained\n",
1902 1.1 kiyohara val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1903 1.1 kiyohara printf(" TransferMode : %s\n",
1904 1.1 kiyohara val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1905 1.1 kiyohara printf(" DescMode : %s\n",
1906 1.1 kiyohara val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1907 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1908 1.3 kiyohara snprintb(buf, sizeof(buf),
1909 1.3 kiyohara "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1910 1.3 kiyohara printf(" Channel Control (High) : %s\n", buf);
1911 1.1 kiyohara }
1912 1.1 kiyohara
1913 1.1 kiyohara static void
1914 1.1 kiyohara gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1915 1.1 kiyohara uint32_t mode, int post)
1916 1.1 kiyohara {
1917 1.1 kiyohara struct gtidmac_desc *desc;
1918 1.1 kiyohara int i;
1919 1.1 kiyohara char buf[256];
1920 1.1 kiyohara
1921 1.1 kiyohara printf("IDMAC Descriptor\n");
1922 1.1 kiyohara
1923 1.1 kiyohara i = 0;
1924 1.1 kiyohara while (1 /*CONSTCOND*/) {
1925 1.1 kiyohara if (post)
1926 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1927 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1928 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1929 1.1 kiyohara
1930 1.1 kiyohara desc = dd->dd_idmac_vaddr;
1931 1.1 kiyohara
1932 1.1 kiyohara printf("%d (0x%lx)\n", i, dd->dd_paddr);
1933 1.1 kiyohara if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1934 1.3 kiyohara snprintb(buf, sizeof(buf),
1935 1.1 kiyohara "\177\020b\037Own\0b\036BCLeft\0",
1936 1.3 kiyohara desc->bc.mode16m.bcnt);
1937 1.3 kiyohara printf(" Byte Count : %s\n", buf);
1938 1.1 kiyohara printf(" ByteCount : 0x%06x\n",
1939 1.1 kiyohara desc->bc.mode16m.bcnt &
1940 1.1 kiyohara GTIDMAC_CIDMABCR_BYTECNT_MASK);
1941 1.1 kiyohara } else {
1942 1.1 kiyohara printf(" Byte Count : 0x%04x\n",
1943 1.1 kiyohara desc->bc.mode64k.bcnt);
1944 1.1 kiyohara printf(" Remind Byte Count : 0x%04x\n",
1945 1.1 kiyohara desc->bc.mode64k.rbc);
1946 1.1 kiyohara }
1947 1.1 kiyohara printf(" Source Address : 0x%08x\n", desc->srcaddr);
1948 1.1 kiyohara printf(" Destination Address : 0x%08x\n", desc->dstaddr);
1949 1.1 kiyohara printf(" Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1950 1.1 kiyohara
1951 1.1 kiyohara if (desc->nextdp == (uint32_t)NULL)
1952 1.1 kiyohara break;
1953 1.1 kiyohara
1954 1.1 kiyohara if (!post)
1955 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1956 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1957 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1958 1.1 kiyohara
1959 1.1 kiyohara i++;
1960 1.1 kiyohara dd = SLIST_NEXT(dd, dd_next);
1961 1.1 kiyohara }
1962 1.1 kiyohara if (!post)
1963 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1964 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
1965 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1966 1.1 kiyohara }
1967 1.1 kiyohara
1968 1.1 kiyohara static void
1969 1.1 kiyohara gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1970 1.1 kiyohara {
1971 1.1 kiyohara uint32_t val, opmode;
1972 1.1 kiyohara char buf[64];
1973 1.1 kiyohara
1974 1.1 kiyohara printf("XORE Registers\n");
1975 1.1 kiyohara
1976 1.8 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1977 1.3 kiyohara snprintb(buf, sizeof(buf),
1978 1.3 kiyohara "\177\020"
1979 1.1 kiyohara "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1980 1.3 kiyohara val);
1981 1.1 kiyohara printf(" Configuration : 0x%s\n", buf);
1982 1.1 kiyohara opmode = val & MVXORE_XEXCR_OM_MASK;
1983 1.1 kiyohara printf(" OperationMode : %s operation\n",
1984 1.1 kiyohara opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1985 1.1 kiyohara opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1986 1.1 kiyohara opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1987 1.1 kiyohara opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1988 1.1 kiyohara opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1989 1.1 kiyohara "unknown");
1990 1.1 kiyohara printf(" SrcBurstLimit : %s Bytes\n",
1991 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1992 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1993 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1994 1.1 kiyohara "unknwon");
1995 1.1 kiyohara printf(" DstBurstLimit : %s Bytes\n",
1996 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1997 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1998 1.1 kiyohara (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1999 1.1 kiyohara "unknwon");
2000 1.8 kiyohara val =
2001 1.8 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
2002 1.1 kiyohara printf(" Activation : 0x%08x\n", val);
2003 1.1 kiyohara val &= MVXORE_XEXACTR_XESTATUS_MASK;
2004 1.1 kiyohara printf(" XEstatus : %s\n",
2005 1.1 kiyohara val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
2006 1.1 kiyohara val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
2007 1.1 kiyohara val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
2008 1.1 kiyohara
2009 1.1 kiyohara if (opmode == MVXORE_XEXCR_OM_XOR ||
2010 1.1 kiyohara opmode == MVXORE_XEXCR_OM_CRC32 ||
2011 1.1 kiyohara opmode == MVXORE_XEXCR_OM_DMA) {
2012 1.1 kiyohara printf(" NextDescPtr : 0x%08x\n",
2013 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2014 1.8 kiyohara MVXORE_XEXNDPR(sc, chan)));
2015 1.1 kiyohara printf(" CurrentDescPtr : 0x%08x\n",
2016 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2017 1.1 kiyohara MVXORE_XEXCDPR(chan)));
2018 1.1 kiyohara }
2019 1.1 kiyohara printf(" ByteCnt : 0x%08x\n",
2020 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
2021 1.1 kiyohara
2022 1.1 kiyohara if (opmode == MVXORE_XEXCR_OM_ECC ||
2023 1.1 kiyohara opmode == MVXORE_XEXCR_OM_MEMINIT) {
2024 1.1 kiyohara printf(" DstPtr : 0x%08x\n",
2025 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2026 1.8 kiyohara MVXORE_XEXDPR(sc, chan)));
2027 1.1 kiyohara printf(" BlockSize : 0x%08x\n",
2028 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2029 1.8 kiyohara MVXORE_XEXBSR(sc, chan)));
2030 1.1 kiyohara
2031 1.1 kiyohara if (opmode == MVXORE_XEXCR_OM_ECC) {
2032 1.1 kiyohara val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2033 1.1 kiyohara MVXORE_XETMCR);
2034 1.1 kiyohara if (val & MVXORE_XETMCR_TIMEREN) {
2035 1.1 kiyohara val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
2036 1.1 kiyohara val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
2037 1.1 kiyohara printf(" SectionSizeCtrl : 0x%08x\n", 2 ^ val);
2038 1.1 kiyohara printf(" TimerInitVal : 0x%08x\n",
2039 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2040 1.1 kiyohara MVXORE_XETMIVR));
2041 1.1 kiyohara printf(" TimerCrntVal : 0x%08x\n",
2042 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2043 1.1 kiyohara MVXORE_XETMCVR));
2044 1.1 kiyohara }
2045 1.1 kiyohara } else /* MVXORE_XEXCR_OM_MEMINIT */
2046 1.1 kiyohara printf(" InitVal : 0x%08x%08x\n",
2047 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2048 1.1 kiyohara MVXORE_XEIVRH),
2049 1.1 kiyohara bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2050 1.1 kiyohara MVXORE_XEIVRL));
2051 1.1 kiyohara }
2052 1.1 kiyohara }
2053 1.1 kiyohara
2054 1.1 kiyohara static void
2055 1.1 kiyohara gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
2056 1.1 kiyohara uint32_t mode, int post)
2057 1.1 kiyohara {
2058 1.3 kiyohara struct mvxore_desc *desc;
2059 1.1 kiyohara int i, j;
2060 1.1 kiyohara char buf[256];
2061 1.1 kiyohara
2062 1.1 kiyohara printf("XORE Descriptor\n");
2063 1.1 kiyohara
2064 1.1 kiyohara mode &= MVXORE_XEXCR_OM_MASK;
2065 1.1 kiyohara
2066 1.1 kiyohara i = 0;
2067 1.1 kiyohara while (1 /*CONSTCOND*/) {
2068 1.1 kiyohara if (post)
2069 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2070 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
2071 1.1 kiyohara BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2072 1.1 kiyohara
2073 1.1 kiyohara desc = dd->dd_xore_vaddr;
2074 1.1 kiyohara
2075 1.1 kiyohara printf("%d (0x%lx)\n", i, dd->dd_paddr);
2076 1.1 kiyohara
2077 1.3 kiyohara snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
2078 1.3 kiyohara desc->stat);
2079 1.1 kiyohara printf(" Status : 0x%s\n", buf);
2080 1.1 kiyohara if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
2081 1.1 kiyohara printf(" CRC-32 Result : 0x%08x\n",
2082 1.1 kiyohara desc->result);
2083 1.3 kiyohara snprintb(buf, sizeof(buf),
2084 1.1 kiyohara "\177\020b\037EODIntEn\0b\036CRCLast\0"
2085 1.1 kiyohara "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
2086 1.1 kiyohara "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
2087 1.3 kiyohara desc->cmd);
2088 1.1 kiyohara printf(" Command : 0x%s\n", buf);
2089 1.1 kiyohara printf(" Next Descriptor Address : 0x%08x\n", desc->nextda);
2090 1.1 kiyohara printf(" Byte Count : 0x%06x\n", desc->bcnt);
2091 1.1 kiyohara printf(" Destination Address : 0x%08x\n", desc->dstaddr);
2092 1.1 kiyohara if (mode == MVXORE_XEXCR_OM_XOR) {
2093 1.1 kiyohara for (j = 0; j < MVXORE_NSRC; j++)
2094 1.1 kiyohara if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
2095 1.1 kiyohara printf(" Source Address#%d :"
2096 1.1 kiyohara " 0x%08x\n", j, desc->srcaddr[j]);
2097 1.1 kiyohara } else
2098 1.1 kiyohara printf(" Source Address : 0x%08x\n",
2099 1.1 kiyohara desc->srcaddr[0]);
2100 1.1 kiyohara
2101 1.1 kiyohara if (desc->nextda == (uint32_t)NULL)
2102 1.1 kiyohara break;
2103 1.1 kiyohara
2104 1.1 kiyohara if (!post)
2105 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2106 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
2107 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2108 1.1 kiyohara
2109 1.1 kiyohara i++;
2110 1.1 kiyohara dd = SLIST_NEXT(dd, dd_next);
2111 1.1 kiyohara }
2112 1.1 kiyohara if (!post)
2113 1.1 kiyohara bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2114 1.1 kiyohara dd->dd_index * sizeof(*desc), sizeof(*desc),
2115 1.1 kiyohara BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2116 1.1 kiyohara }
2117 1.1 kiyohara #endif
2118