nextdma.c revision 1.22 1 1.22 tv /* $NetBSD: nextdma.c,v 1.22 2000/08/09 02:26:26 tv Exp $ */
2 1.1 dbj /*
3 1.1 dbj * Copyright (c) 1998 Darrin B. Jewell
4 1.1 dbj * All rights reserved.
5 1.1 dbj *
6 1.1 dbj * Redistribution and use in source and binary forms, with or without
7 1.1 dbj * modification, are permitted provided that the following conditions
8 1.1 dbj * are met:
9 1.1 dbj * 1. Redistributions of source code must retain the above copyright
10 1.1 dbj * notice, this list of conditions and the following disclaimer.
11 1.1 dbj * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 dbj * notice, this list of conditions and the following disclaimer in the
13 1.1 dbj * documentation and/or other materials provided with the distribution.
14 1.1 dbj * 3. All advertising materials mentioning features or use of this software
15 1.1 dbj * must display the following acknowledgement:
16 1.1 dbj * This product includes software developed by Darrin B. Jewell
17 1.1 dbj * 4. The name of the author may not be used to endorse or promote products
18 1.1 dbj * derived from this software without specific prior written permission
19 1.1 dbj *
20 1.1 dbj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 1.1 dbj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 1.1 dbj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 1.1 dbj * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 1.1 dbj * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 1.1 dbj * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 1.1 dbj * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 1.1 dbj * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 1.1 dbj * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 1.1 dbj * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 1.1 dbj */
31 1.1 dbj
32 1.1 dbj #include <sys/param.h>
33 1.1 dbj #include <sys/systm.h>
34 1.1 dbj #include <sys/mbuf.h>
35 1.1 dbj #include <sys/syslog.h>
36 1.1 dbj #include <sys/socket.h>
37 1.1 dbj #include <sys/device.h>
38 1.1 dbj #include <sys/malloc.h>
39 1.1 dbj #include <sys/ioctl.h>
40 1.1 dbj #include <sys/errno.h>
41 1.1 dbj
42 1.1 dbj #include <machine/autoconf.h>
43 1.1 dbj #include <machine/cpu.h>
44 1.1 dbj #include <machine/intr.h>
45 1.5 dbj
46 1.5 dbj #include <m68k/cacheops.h>
47 1.1 dbj
48 1.1 dbj #include <next68k/next68k/isr.h>
49 1.1 dbj
50 1.16 dbj #define _NEXT68K_BUS_DMA_PRIVATE
51 1.1 dbj #include <machine/bus.h>
52 1.1 dbj
53 1.1 dbj #include "nextdmareg.h"
54 1.1 dbj #include "nextdmavar.h"
55 1.1 dbj
56 1.8 dbj #if 1
57 1.1 dbj #define ND_DEBUG
58 1.1 dbj #endif
59 1.1 dbj
60 1.1 dbj #if defined(ND_DEBUG)
61 1.8 dbj int nextdma_debug = 0;
62 1.8 dbj #define DPRINTF(x) if (nextdma_debug) printf x;
63 1.1 dbj #else
64 1.1 dbj #define DPRINTF(x)
65 1.1 dbj #endif
66 1.1 dbj
67 1.1 dbj void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
68 1.1 dbj bus_size_t, int));
69 1.1 dbj int next_dma_continue __P((struct nextdma_config *));
70 1.1 dbj void next_dma_rotate __P((struct nextdma_config *));
71 1.1 dbj
72 1.1 dbj void next_dma_setup_cont_regs __P((struct nextdma_config *));
73 1.1 dbj void next_dma_setup_curr_regs __P((struct nextdma_config *));
74 1.20 dbj void next_dma_finish_xfer __P((struct nextdma_config *));
75 1.1 dbj
76 1.1 dbj void
77 1.1 dbj nextdma_config(nd)
78 1.1 dbj struct nextdma_config *nd;
79 1.1 dbj {
80 1.1 dbj /* Initialize the dma_tag. As a hack, we currently
81 1.1 dbj * put the dma tag in the structure itself. It shouldn't be there.
82 1.1 dbj */
83 1.1 dbj
84 1.1 dbj {
85 1.1 dbj bus_dma_tag_t t;
86 1.1 dbj t = &nd->_nd_dmat;
87 1.1 dbj t->_cookie = nd;
88 1.1 dbj t->_dmamap_create = _bus_dmamap_create;
89 1.1 dbj t->_dmamap_destroy = _bus_dmamap_destroy;
90 1.1 dbj t->_dmamap_load = _bus_dmamap_load_direct;
91 1.1 dbj t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
92 1.1 dbj t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
93 1.1 dbj t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
94 1.1 dbj t->_dmamap_unload = _bus_dmamap_unload;
95 1.16 dbj t->_dmamap_sync = _bus_dmamap_sync;
96 1.1 dbj
97 1.1 dbj t->_dmamem_alloc = _bus_dmamem_alloc;
98 1.1 dbj t->_dmamem_free = _bus_dmamem_free;
99 1.1 dbj t->_dmamem_map = _bus_dmamem_map;
100 1.1 dbj t->_dmamem_unmap = _bus_dmamem_unmap;
101 1.1 dbj t->_dmamem_mmap = _bus_dmamem_mmap;
102 1.1 dbj
103 1.1 dbj nd->nd_dmat = t;
104 1.1 dbj }
105 1.1 dbj
106 1.1 dbj nextdma_init(nd);
107 1.1 dbj
108 1.14 dbj isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
109 1.14 dbj INTR_ENABLE(nd->nd_intr);
110 1.1 dbj }
111 1.1 dbj
112 1.1 dbj void
113 1.1 dbj nextdma_init(nd)
114 1.1 dbj struct nextdma_config *nd;
115 1.1 dbj {
116 1.22 tv #ifdef ND_DEBUG
117 1.22 tv if (nextdma_debug) {
118 1.22 tv char sbuf[256];
119 1.22 tv
120 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
121 1.22 tv sbuf, sizeof(sbuf));
122 1.22 tv printf("DMA init ipl (%ld) intr(0x%s)\n",
123 1.22 tv NEXT_I_IPL(nd->nd_intr), sbuf);
124 1.22 tv }
125 1.22 tv #endif
126 1.1 dbj
127 1.1 dbj nd->_nd_map = NULL;
128 1.1 dbj nd->_nd_idx = 0;
129 1.1 dbj nd->_nd_map_cont = NULL;
130 1.1 dbj nd->_nd_idx_cont = 0;
131 1.1 dbj
132 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
133 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
134 1.20 dbj DMACSR_RESET | DMACSR_INITBUF);
135 1.1 dbj
136 1.1 dbj next_dma_setup_curr_regs(nd);
137 1.1 dbj next_dma_setup_cont_regs(nd);
138 1.1 dbj
139 1.20 dbj #if defined(DIAGNOSTIC)
140 1.1 dbj {
141 1.1 dbj u_long state;
142 1.1 dbj state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
143 1.20 dbj
144 1.20 dbj #if 1
145 1.20 dbj /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
146 1.20 dbj * milo (a 25Mhz 68040 mono cube) didn't have this problem
147 1.20 dbj * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
148 1.20 dbj */
149 1.20 dbj state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
150 1.20 dbj #else
151 1.1 dbj state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
152 1.1 dbj DMACSR_SUPDATE | DMACSR_ENABLE);
153 1.20 dbj #endif
154 1.1 dbj if (state) {
155 1.1 dbj next_dma_print(nd);
156 1.20 dbj panic("DMA did not reset");
157 1.1 dbj }
158 1.1 dbj }
159 1.1 dbj #endif
160 1.1 dbj }
161 1.1 dbj
162 1.4 dbj
163 1.1 dbj void
164 1.1 dbj nextdma_reset(nd)
165 1.1 dbj struct nextdma_config *nd;
166 1.1 dbj {
167 1.1 dbj int s;
168 1.18 dbj s = spldma();
169 1.8 dbj
170 1.8 dbj DPRINTF(("DMA reset\n"));
171 1.8 dbj
172 1.8 dbj #if (defined(ND_DEBUG))
173 1.8 dbj if (nextdma_debug) next_dma_print(nd);
174 1.8 dbj #endif
175 1.8 dbj
176 1.20 dbj /* @@@ clean up dma maps */
177 1.20 dbj
178 1.1 dbj nextdma_init(nd);
179 1.1 dbj splx(s);
180 1.1 dbj }
181 1.1 dbj
182 1.1 dbj /****************************************************************/
183 1.1 dbj
184 1.1 dbj
185 1.1 dbj /* Call the completed and continue callbacks to try to fill
186 1.1 dbj * in the dma continue buffers.
187 1.1 dbj */
188 1.1 dbj void
189 1.1 dbj next_dma_rotate(nd)
190 1.1 dbj struct nextdma_config *nd;
191 1.1 dbj {
192 1.1 dbj
193 1.1 dbj DPRINTF(("DMA next_dma_rotate()\n"));
194 1.1 dbj
195 1.1 dbj /* Rotate the continue map into the current map */
196 1.1 dbj nd->_nd_map = nd->_nd_map_cont;
197 1.1 dbj nd->_nd_idx = nd->_nd_idx_cont;
198 1.1 dbj
199 1.1 dbj if ((!nd->_nd_map_cont) ||
200 1.1 dbj ((nd->_nd_map_cont) &&
201 1.1 dbj (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
202 1.1 dbj if (nd->nd_continue_cb) {
203 1.1 dbj nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
204 1.1 dbj } else {
205 1.1 dbj nd->_nd_map_cont = 0;
206 1.1 dbj }
207 1.1 dbj nd->_nd_idx_cont = 0;
208 1.1 dbj }
209 1.7 dbj
210 1.7 dbj #ifdef DIAGNOSTIC
211 1.16 dbj if (nd->_nd_map) {
212 1.17 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0x1234beef;
213 1.16 dbj }
214 1.16 dbj #endif
215 1.16 dbj
216 1.16 dbj #ifdef DIAGNOSTIC
217 1.7 dbj if (nd->_nd_map_cont) {
218 1.12 dbj if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
219 1.12 dbj next_dma_print(nd);
220 1.7 dbj panic("DMA request unaligned at start\n");
221 1.7 dbj }
222 1.12 dbj if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
223 1.12 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
224 1.12 dbj next_dma_print(nd);
225 1.7 dbj panic("DMA request unaligned at end\n");
226 1.7 dbj }
227 1.7 dbj }
228 1.7 dbj #endif
229 1.7 dbj
230 1.1 dbj }
231 1.1 dbj
232 1.1 dbj void
233 1.1 dbj next_dma_setup_cont_regs(nd)
234 1.1 dbj struct nextdma_config *nd;
235 1.1 dbj {
236 1.20 dbj bus_addr_t dd_start;
237 1.20 dbj bus_addr_t dd_stop;
238 1.20 dbj bus_addr_t dd_saved_start;
239 1.20 dbj bus_addr_t dd_saved_stop;
240 1.20 dbj
241 1.1 dbj DPRINTF(("DMA next_dma_setup_regs()\n"));
242 1.1 dbj
243 1.1 dbj if (nd->_nd_map_cont) {
244 1.20 dbj dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
245 1.20 dbj dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
246 1.20 dbj nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
247 1.1 dbj
248 1.1 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
249 1.20 dbj dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
250 1.20 dbj }
251 1.20 dbj } else {
252 1.20 dbj dd_start = 0xdeadbeef;
253 1.20 dbj dd_stop = 0xdeadbeef;
254 1.20 dbj }
255 1.1 dbj
256 1.20 dbj dd_saved_start = dd_start;
257 1.20 dbj dd_saved_stop = dd_stop;
258 1.15 dbj
259 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
260 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
261 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
262 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
263 1.1 dbj
264 1.20 dbj #ifdef DIAGNOSTIC
265 1.20 dbj if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
266 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
267 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
268 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
269 1.20 dbj next_dma_print(nd);
270 1.20 dbj panic("DMA failure writing to continue regs");
271 1.1 dbj }
272 1.7 dbj #endif
273 1.1 dbj }
274 1.1 dbj
275 1.1 dbj void
276 1.1 dbj next_dma_setup_curr_regs(nd)
277 1.1 dbj struct nextdma_config *nd;
278 1.1 dbj {
279 1.20 dbj bus_addr_t dd_next;
280 1.20 dbj bus_addr_t dd_limit;
281 1.20 dbj bus_addr_t dd_saved_next;
282 1.20 dbj bus_addr_t dd_saved_limit;
283 1.20 dbj
284 1.1 dbj DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
285 1.1 dbj
286 1.15 dbj
287 1.15 dbj if (nd->_nd_map) {
288 1.20 dbj dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
289 1.20 dbj dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
290 1.20 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
291 1.15 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
292 1.20 dbj dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
293 1.20 dbj }
294 1.20 dbj } else {
295 1.20 dbj dd_next = 0xdeadbeef;
296 1.20 dbj dd_limit = 0xdeadbeef;
297 1.20 dbj }
298 1.1 dbj
299 1.20 dbj dd_saved_next = dd_next;
300 1.20 dbj dd_saved_limit = dd_limit;
301 1.1 dbj
302 1.20 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
303 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
304 1.15 dbj } else {
305 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
306 1.15 dbj }
307 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
308 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
309 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
310 1.1 dbj
311 1.20 dbj #ifdef DIAGNOSTIC
312 1.20 dbj if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
313 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
314 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
315 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
316 1.20 dbj (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
317 1.20 dbj next_dma_print(nd);
318 1.20 dbj panic("DMA failure writing to current regs");
319 1.20 dbj }
320 1.7 dbj #endif
321 1.1 dbj }
322 1.1 dbj
323 1.1 dbj
324 1.1 dbj /* This routine is used for debugging */
325 1.1 dbj
326 1.1 dbj void
327 1.1 dbj next_dma_print(nd)
328 1.1 dbj struct nextdma_config *nd;
329 1.1 dbj {
330 1.1 dbj u_long dd_csr;
331 1.1 dbj u_long dd_next;
332 1.1 dbj u_long dd_next_initbuf;
333 1.1 dbj u_long dd_limit;
334 1.1 dbj u_long dd_start;
335 1.1 dbj u_long dd_stop;
336 1.1 dbj u_long dd_saved_next;
337 1.1 dbj u_long dd_saved_limit;
338 1.1 dbj u_long dd_saved_start;
339 1.1 dbj u_long dd_saved_stop;
340 1.22 tv char sbuf[256];
341 1.1 dbj
342 1.22 tv /* Read all of the registers before we print anything out,
343 1.1 dbj * in case something changes
344 1.1 dbj */
345 1.1 dbj dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
346 1.1 dbj dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
347 1.1 dbj dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
348 1.1 dbj dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
349 1.1 dbj dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
350 1.1 dbj dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
351 1.1 dbj dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
352 1.1 dbj dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
353 1.1 dbj dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
354 1.1 dbj dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
355 1.1 dbj
356 1.22 tv bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
357 1.22 tv NEXT_INTR_BITS, sbuf, sizeof(sbuf));
358 1.22 tv printf("NDMAP: *intrstat = 0x%s\n", sbuf);
359 1.22 tv
360 1.22 tv bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
361 1.22 tv NEXT_INTR_BITS, sbuf, sizeof(sbuf));
362 1.22 tv printf("NDMAP: *intrmask = 0x%s\n", sbuf);
363 1.20 dbj
364 1.12 dbj /* NDMAP is Next DMA Print (really!) */
365 1.12 dbj
366 1.1 dbj if (nd->_nd_map) {
367 1.11 dbj printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
368 1.11 dbj nd->_nd_map->dm_mapsize);
369 1.11 dbj printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
370 1.11 dbj nd->_nd_map->dm_nsegs);
371 1.1 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
372 1.1 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
373 1.1 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
374 1.1 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
375 1.17 dbj printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
376 1.17 dbj nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
377 1.1 dbj } else {
378 1.1 dbj printf("NDMAP: nd->_nd_map = NULL\n");
379 1.1 dbj }
380 1.1 dbj if (nd->_nd_map_cont) {
381 1.11 dbj printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
382 1.11 dbj nd->_nd_map_cont->dm_mapsize);
383 1.11 dbj printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
384 1.11 dbj nd->_nd_map_cont->dm_nsegs);
385 1.1 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
386 1.1 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
387 1.2 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
388 1.1 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
389 1.17 dbj printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
390 1.17 dbj nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
391 1.1 dbj } else {
392 1.1 dbj printf("NDMAP: nd->_nd_map_cont = NULL\n");
393 1.1 dbj }
394 1.1 dbj
395 1.22 tv bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
396 1.22 tv printf("NDMAP: dd->dd_csr = 0x%s\n", sbuf);
397 1.22 tv
398 1.1 dbj printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
399 1.1 dbj printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
400 1.1 dbj printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
401 1.1 dbj printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
402 1.1 dbj printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
403 1.1 dbj printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
404 1.1 dbj printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
405 1.1 dbj printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
406 1.1 dbj printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
407 1.1 dbj
408 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
409 1.22 tv sbuf, sizeof(sbuf));
410 1.22 tv printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
411 1.22 tv NEXT_I_IPL(nd->nd_intr), sbuf);
412 1.1 dbj }
413 1.1 dbj
414 1.1 dbj /****************************************************************/
415 1.20 dbj void
416 1.20 dbj next_dma_finish_xfer(nd)
417 1.20 dbj struct nextdma_config *nd;
418 1.20 dbj {
419 1.20 dbj bus_addr_t onext;
420 1.20 dbj bus_addr_t olimit;
421 1.20 dbj bus_addr_t slimit;
422 1.20 dbj
423 1.20 dbj onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
424 1.20 dbj olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
425 1.20 dbj
426 1.20 dbj if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
427 1.20 dbj slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
428 1.20 dbj } else {
429 1.20 dbj slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
430 1.20 dbj }
431 1.20 dbj
432 1.20 dbj if (nd->nd_intr == NEXT_I_ENETX_DMA) {
433 1.20 dbj slimit &= ~0x80000000;
434 1.20 dbj }
435 1.20 dbj
436 1.20 dbj #ifdef DIAGNOSTIC
437 1.20 dbj if ((slimit < onext) || (slimit > olimit)) {
438 1.20 dbj next_dma_print(nd);
439 1.20 dbj panic("DMA: Unexpected registers in finish_xfer\n");
440 1.20 dbj }
441 1.20 dbj #endif
442 1.20 dbj
443 1.20 dbj nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
444 1.20 dbj
445 1.20 dbj /* If we've reached the end of the current map, then inform
446 1.20 dbj * that we've completed that map.
447 1.20 dbj */
448 1.20 dbj if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
449 1.20 dbj if (nd->nd_completed_cb)
450 1.20 dbj (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
451 1.20 dbj }
452 1.20 dbj nd->_nd_map = 0;
453 1.20 dbj nd->_nd_idx = 0;
454 1.20 dbj }
455 1.20 dbj
456 1.1 dbj
457 1.1 dbj int
458 1.1 dbj nextdma_intr(arg)
459 1.1 dbj void *arg;
460 1.1 dbj {
461 1.1 dbj /* @@@ This is bogus, we can't be certain of arg's type
462 1.18 dbj * unless the interrupt is for us. For now we successfully
463 1.18 dbj * cheat because DMA interrupts are the only things invoked
464 1.18 dbj * at this interrupt level.
465 1.1 dbj */
466 1.18 dbj struct nextdma_config *nd = arg;
467 1.1 dbj
468 1.1 dbj if (!INTR_OCCURRED(nd->nd_intr)) return 0;
469 1.1 dbj /* Handle dma interrupts */
470 1.1 dbj
471 1.22 tv #ifdef ND_DEBUG
472 1.22 tv if (nextdma_debug) {
473 1.22 tv char sbuf[256];
474 1.22 tv
475 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
476 1.22 tv sbuf, sizeof(sbuf));
477 1.22 tv printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
478 1.22 tv NEXT_I_IPL(nd->nd_intr), sbuf);
479 1.22 tv }
480 1.22 tv #endif
481 1.1 dbj
482 1.7 dbj #ifdef DIAGNOSTIC
483 1.7 dbj if (!nd->_nd_map) {
484 1.7 dbj next_dma_print(nd);
485 1.7 dbj panic("DMA missing current map in interrupt!\n");
486 1.7 dbj }
487 1.7 dbj #endif
488 1.7 dbj
489 1.1 dbj {
490 1.1 dbj int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
491 1.1 dbj
492 1.7 dbj #ifdef DIAGNOSTIC
493 1.20 dbj if ((!(state & DMACSR_COMPLETE)) || (state & DMACSR_SUPDATE)) {
494 1.22 tv char sbuf[256];
495 1.22 tv
496 1.1 dbj next_dma_print(nd);
497 1.22 tv
498 1.22 tv bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
499 1.22 tv panic("DMA Unexpected dma state in interrupt (0x%s)", sbuf);
500 1.7 dbj }
501 1.1 dbj #endif
502 1.1 dbj
503 1.20 dbj next_dma_finish_xfer(nd);
504 1.7 dbj
505 1.7 dbj /* Check to see if we are expecting dma to shut down */
506 1.20 dbj if ((nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL)) {
507 1.12 dbj
508 1.12 dbj #ifdef DIAGNOSTIC
509 1.20 dbj if (state & DMACSR_ENABLE) {
510 1.22 tv char sbuf[256];
511 1.22 tv
512 1.12 dbj next_dma_print(nd);
513 1.22 tv
514 1.22 tv bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
515 1.22 tv panic("DMA: unexpected DMA state at shutdown (0x%s)\n", sbuf);
516 1.7 dbj }
517 1.7 dbj #endif
518 1.7 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
519 1.7 dbj DMACSR_CLRCOMPLETE | DMACSR_RESET);
520 1.7 dbj
521 1.7 dbj DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
522 1.7 dbj if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
523 1.7 dbj
524 1.2 dbj return(1);
525 1.7 dbj }
526 1.7 dbj
527 1.20 dbj next_dma_rotate(nd);
528 1.20 dbj next_dma_setup_cont_regs(nd);
529 1.1 dbj
530 1.7 dbj {
531 1.20 dbj u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
532 1.1 dbj
533 1.20 dbj if (state & DMACSR_READ) {
534 1.20 dbj dmadir = DMACSR_SETREAD;
535 1.20 dbj } else {
536 1.20 dbj dmadir = DMACSR_SETWRITE;
537 1.20 dbj }
538 1.1 dbj
539 1.21 dbj /* we used to SETENABLE here only
540 1.21 dbj conditionally, but we got burned
541 1.21 dbj because DMA sometimes would shut
542 1.21 dbj down between when we checked and
543 1.21 dbj when we acted upon it. CL19991211 */
544 1.21 dbj if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
545 1.21 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
546 1.21 dbj DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETENABLE);
547 1.7 dbj } else {
548 1.21 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
549 1.21 dbj DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE | DMACSR_SETENABLE);
550 1.21 dbj }
551 1.1 dbj
552 1.1 dbj }
553 1.1 dbj
554 1.1 dbj }
555 1.1 dbj
556 1.22 tv #ifdef ND_DEBUG
557 1.22 tv if (nextdma_debug) {
558 1.22 tv char sbuf[256];
559 1.22 tv
560 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
561 1.22 tv sbuf, sizeof(sbuf));
562 1.22 tv printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
563 1.22 tv NEXT_I_IPL(nd->nd_intr), sbuf);
564 1.22 tv }
565 1.22 tv #endif
566 1.1 dbj
567 1.1 dbj return(1);
568 1.1 dbj }
569 1.1 dbj
570 1.1 dbj /*
571 1.1 dbj * Check to see if dma has finished for a channel */
572 1.1 dbj int
573 1.1 dbj nextdma_finished(nd)
574 1.1 dbj struct nextdma_config *nd;
575 1.1 dbj {
576 1.1 dbj int r;
577 1.1 dbj int s;
578 1.1 dbj s = spldma(); /* @@@ should this be splimp()? */
579 1.1 dbj r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
580 1.1 dbj splx(s);
581 1.1 dbj return(r);
582 1.1 dbj }
583 1.1 dbj
584 1.1 dbj void
585 1.1 dbj nextdma_start(nd, dmadir)
586 1.1 dbj struct nextdma_config *nd;
587 1.19 dbj u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
588 1.1 dbj {
589 1.1 dbj
590 1.1 dbj #ifdef DIAGNOSTIC
591 1.1 dbj if (!nextdma_finished(nd)) {
592 1.22 tv char sbuf[256];
593 1.22 tv
594 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
595 1.22 tv sbuf, sizeof(sbuf));
596 1.22 tv panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
597 1.1 dbj }
598 1.1 dbj #endif
599 1.1 dbj
600 1.22 tv #ifdef ND_DEBUG
601 1.22 tv if (nextdma_debug) {
602 1.22 tv char sbuf[256];
603 1.22 tv
604 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
605 1.22 tv sbuf, sizeof(sbuf));
606 1.22 tv printf("DMA start (%ld) intr(0x%s)\n",
607 1.22 tv NEXT_I_IPL(nd->nd_intr), sbuf);
608 1.22 tv }
609 1.22 tv #endif
610 1.1 dbj
611 1.1 dbj #ifdef DIAGNOSTIC
612 1.1 dbj if (nd->_nd_map) {
613 1.1 dbj next_dma_print(nd);
614 1.1 dbj panic("DMA: nextdma_start() with non null map\n");
615 1.1 dbj }
616 1.1 dbj if (nd->_nd_map_cont) {
617 1.1 dbj next_dma_print(nd);
618 1.1 dbj panic("DMA: nextdma_start() with non null continue map\n");
619 1.1 dbj }
620 1.1 dbj #endif
621 1.1 dbj
622 1.9 dbj #ifdef DIAGNOSTIC
623 1.19 dbj if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
624 1.19 dbj panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
625 1.9 dbj }
626 1.9 dbj #endif
627 1.9 dbj
628 1.7 dbj /* preload both the current and the continue maps */
629 1.1 dbj next_dma_rotate(nd);
630 1.1 dbj
631 1.1 dbj #ifdef DIAGNOSTIC
632 1.1 dbj if (!nd->_nd_map_cont) {
633 1.1 dbj panic("No map available in nextdma_start()");
634 1.1 dbj }
635 1.1 dbj #endif
636 1.1 dbj
637 1.7 dbj next_dma_rotate(nd);
638 1.7 dbj
639 1.22 tv #ifdef ND_DEBUG
640 1.22 tv if (nextdma_debug) {
641 1.22 tv char sbuf[256];
642 1.22 tv
643 1.22 tv bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
644 1.22 tv sbuf, sizeof(sbuf));
645 1.22 tv printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
646 1.22 tv (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
647 1.22 tv }
648 1.22 tv #endif
649 1.1 dbj
650 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
651 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
652 1.20 dbj DMACSR_INITBUF | DMACSR_RESET | dmadir);
653 1.1 dbj
654 1.7 dbj next_dma_setup_curr_regs(nd);
655 1.1 dbj next_dma_setup_cont_regs(nd);
656 1.1 dbj
657 1.4 dbj #if (defined(ND_DEBUG))
658 1.8 dbj if (nextdma_debug) next_dma_print(nd);
659 1.4 dbj #endif
660 1.1 dbj
661 1.20 dbj if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
662 1.20 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
663 1.20 dbj DMACSR_SETENABLE | dmadir);
664 1.20 dbj } else {
665 1.1 dbj bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
666 1.20 dbj DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
667 1.1 dbj }
668 1.1 dbj }
669