nextdma.c revision 1.13 1 /* $NetBSD: nextdma.c,v 1.13 1999/03/02 12:11:25 dbj Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _GENERIC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 /* @@@ for debugging */
68 struct nextdma_config *debugernd;
69 struct nextdma_config *debugexnd;
70
71 int nextdma_intr __P((void *));
72 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
73 bus_size_t, int));
74 int next_dma_continue __P((struct nextdma_config *));
75 void next_dma_rotate __P((struct nextdma_config *));
76
77 void next_dma_setup_cont_regs __P((struct nextdma_config *));
78 void next_dma_setup_curr_regs __P((struct nextdma_config *));
79
80 void next_dma_print __P((struct nextdma_config *));
81
82 void
83 nextdma_config(nd)
84 struct nextdma_config *nd;
85 {
86 /* Initialize the dma_tag. As a hack, we currently
87 * put the dma tag in the structure itself. It shouldn't be there.
88 */
89
90 {
91 bus_dma_tag_t t;
92 t = &nd->_nd_dmat;
93 t->_cookie = nd;
94 t->_get_tag = NULL; /* lose */
95 t->_dmamap_create = _bus_dmamap_create;
96 t->_dmamap_destroy = _bus_dmamap_destroy;
97 t->_dmamap_load = _bus_dmamap_load_direct;
98 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
99 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
100 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
101 t->_dmamap_unload = _bus_dmamap_unload;
102 t->_dmamap_sync = next_dmamap_sync;
103
104 t->_dmamem_alloc = _bus_dmamem_alloc;
105 t->_dmamem_free = _bus_dmamem_free;
106 t->_dmamem_map = _bus_dmamem_map;
107 t->_dmamem_unmap = _bus_dmamem_unmap;
108 t->_dmamem_mmap = _bus_dmamem_mmap;
109
110 nd->nd_dmat = t;
111 }
112
113 /* @@@ for debugging */
114 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
115 debugernd = nd;
116 }
117 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
118 debugexnd = nd;
119 }
120
121 nextdma_init(nd);
122
123 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
124 INTR_ENABLE(nd->nd_intr);
125 }
126
127 void
128 nextdma_init(nd)
129 struct nextdma_config *nd;
130 {
131 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
132 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
133
134 /* @@@ should probably check and free these maps */
135 nd->_nd_map = NULL;
136 nd->_nd_idx = 0;
137 nd->_nd_map_cont = NULL;
138 nd->_nd_idx_cont = 0;
139
140 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
141 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
142 DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
143
144 next_dma_setup_curr_regs(nd);
145 next_dma_setup_cont_regs(nd);
146
147 #if 0 && defined(DIAGNOSTIC)
148 /* Today, my computer (mourning) appears to fail this test.
149 * yesterday, another NeXT (milo) didn't have this problem
150 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
151 */
152 {
153 u_long state;
154 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
155 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
156 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
157 DMACSR_SUPDATE | DMACSR_ENABLE);
158
159 if (state) {
160 next_dma_print(nd);
161 panic("DMA did not reset\n");
162 }
163 }
164 #endif
165 }
166
167
168 void
169 nextdma_reset(nd)
170 struct nextdma_config *nd;
171 {
172 int s;
173 s = spldma(); /* @@@ should this be splimp()? */
174
175 DPRINTF(("DMA reset\n"));
176
177 #if (defined(ND_DEBUG))
178 if (nextdma_debug) next_dma_print(nd);
179 #endif
180
181 nextdma_init(nd);
182 splx(s);
183 }
184
185 /****************************************************************/
186
187 /* If the next had multiple busses, this should probably
188 * go elsewhere, but it is here anyway */
189 void
190 next_dmamap_sync(t, map, offset, len, ops)
191 bus_dma_tag_t t;
192 bus_dmamap_t map;
193 bus_addr_t offset;
194 bus_size_t len;
195 int ops;
196 {
197 /* flush/purge the cache.
198 * assumes pointers are aligned
199 * @@@ should probably be fixed to use offset and len args.
200 * should also optimize this to work on pages for larger regions?
201 */
202 if ((ops & BUS_DMASYNC_PREWRITE) ||
203 (ops & BUS_DMASYNC_PREREAD)) {
204 int i;
205 for(i=0;i<map->dm_nsegs;i++) {
206 bus_addr_t p = map->dm_segs[i].ds_addr;
207 bus_addr_t e = p+map->dm_segs[i].ds_len;
208 while(p<e) {
209 DCFL(p); /* flush */
210 p += 16; /* cache line length */
211 }
212 }
213 }
214
215 if ((ops & BUS_DMASYNC_POSTREAD) ||
216 (ops & BUS_DMASYNC_POSTWRITE)) {
217 int i;
218 for(i=0;i<map->dm_nsegs;i++) {
219 bus_addr_t p = map->dm_segs[i].ds_addr;
220 bus_addr_t e = p+map->dm_segs[i].ds_len;
221 while(p<e) {
222 DCPL(p); /* purge */
223 p += 16; /* cache line length */
224 }
225 }
226 }
227 }
228
229 /****************************************************************/
230
231
232 /* Call the completed and continue callbacks to try to fill
233 * in the dma continue buffers.
234 */
235 void
236 next_dma_rotate(nd)
237 struct nextdma_config *nd;
238 {
239
240 DPRINTF(("DMA next_dma_rotate()\n"));
241
242 /* If we've reached the end of the current map, then inform
243 * that we've completed that map.
244 */
245 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
246 if (nd->nd_completed_cb)
247 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
248 }
249
250 /* Rotate the continue map into the current map */
251 nd->_nd_map = nd->_nd_map_cont;
252 nd->_nd_idx = nd->_nd_idx_cont;
253
254 if ((!nd->_nd_map_cont) ||
255 ((nd->_nd_map_cont) &&
256 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
257 if (nd->nd_continue_cb) {
258 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
259 } else {
260 nd->_nd_map_cont = 0;
261 }
262 nd->_nd_idx_cont = 0;
263 }
264
265 #ifdef DIAGNOSTIC
266 if (nd->_nd_map_cont) {
267 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
268 next_dma_print(nd);
269 panic("DMA request unaligned at start\n");
270 }
271 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
272 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
273 next_dma_print(nd);
274 panic("DMA request unaligned at end\n");
275 }
276 }
277 #endif
278
279 }
280
281 void
282 next_dma_setup_cont_regs(nd)
283 struct nextdma_config *nd;
284 {
285 DPRINTF(("DMA next_dma_setup_regs()\n"));
286
287 if (nd->_nd_map_cont) {
288
289 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
290 /* Ethernet transmit needs secret magic */
291
292 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
293 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
294 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
295 ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
296 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
297 + 0x0) | 0x80000000);
298 } else {
299 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
300 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
301 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
302 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
303 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
304 }
305
306 } else {
307
308 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, 0xdeadbeef);
309 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0xdeadbeef);
310 }
311
312 #if 1 /* 0xfeedbeef in these registers leads to instability. it will
313 * panic after a short while with 0xfeedbeef in the DD_START and DD_STOP
314 * registers. I suspect that an unexpected hardware restart
315 * is cycling the bogus values into the active registers. Until
316 * that is understood, we seed these with the same as DD_START and DD_STOP
317 */
318 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
319 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
320 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
321 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
322 #else
323 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, 0xfeedbeef);
324 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, 0xfeedbeef);
325 #endif
326
327 }
328
329 void
330 next_dma_setup_curr_regs(nd)
331 struct nextdma_config *nd;
332 {
333 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
334
335 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
336 /* Ethernet transmit needs secret magic */
337
338 if (nd->_nd_map) {
339
340 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
341 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
342 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
343 ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
344 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
345 + 0x0) | 0x80000000);
346 } else {
347 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0xdeadbeef);
348 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
349
350 }
351
352 #if 1 /* See comment in next_dma_setup_cont_regs() above */
353 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
354 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
355 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
356 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
357 #else
358 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
359 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
360 #endif
361
362 } else {
363
364 if (nd->_nd_map) {
365
366 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
367 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
368 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
369 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
370 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
371 } else {
372 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, 0xdeadbeef);
373 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
374
375 }
376
377 #if 1 /* See comment in next_dma_setup_cont_regs() above */
378 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
379 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
380 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
381 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
382 #else
383 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
384 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
385 #endif
386
387 }
388
389 }
390
391
392 /* This routine is used for debugging */
393
394 void
395 next_dma_print(nd)
396 struct nextdma_config *nd;
397 {
398 u_long dd_csr;
399 u_long dd_next;
400 u_long dd_next_initbuf;
401 u_long dd_limit;
402 u_long dd_start;
403 u_long dd_stop;
404 u_long dd_saved_next;
405 u_long dd_saved_limit;
406 u_long dd_saved_start;
407 u_long dd_saved_stop;
408
409 /* Read all of the registers before we print anything out,
410 * in case something changes
411 */
412 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
413 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
414 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
415 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
416 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
417 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
418 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
419 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
420 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
421 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
422
423 /* NDMAP is Next DMA Print (really!) */
424
425 printf("NDMAP: nd->_nd_dmadir = 0x%08x\n",nd->_nd_dmadir);
426
427 if (nd->_nd_map) {
428 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
429 nd->_nd_map->dm_mapsize);
430 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
431 nd->_nd_map->dm_nsegs);
432 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
433 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
434 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
435 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
436 } else {
437 printf("NDMAP: nd->_nd_map = NULL\n");
438 }
439 if (nd->_nd_map_cont) {
440 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
441 nd->_nd_map_cont->dm_mapsize);
442 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
443 nd->_nd_map_cont->dm_nsegs);
444 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
445 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
446 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
447 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
448 } else {
449 printf("NDMAP: nd->_nd_map_cont = NULL\n");
450 }
451
452 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
453 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
454 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
455 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
456 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
457 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
458 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
459 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
460 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
461 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
462
463 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
464 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
465 }
466
467 /****************************************************************/
468
469 int
470 nextdma_intr(arg)
471 void *arg;
472 {
473 struct nextdma_config *nd = arg;
474
475 /* @@@ This is bogus, we can't be certain of arg's type
476 * unless the interrupt is for us
477 */
478
479 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
480 /* Handle dma interrupts */
481
482 #ifdef DIAGNOSTIC
483 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
484 if (debugernd != nd) {
485 panic("DMA incorrect handling of rx nd->nd_intr");
486 }
487 }
488 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
489 if (debugexnd != nd) {
490 panic("DMA incorrect handling of tx nd->nd_intr");
491 }
492 }
493 #endif
494
495 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
496 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
497
498 #ifdef DIAGNOSTIC
499 if (!nd->_nd_map) {
500 next_dma_print(nd);
501 panic("DMA missing current map in interrupt!\n");
502 }
503 #endif
504
505 {
506 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
507
508 #ifdef DIAGNOSTIC
509 if (!(state & DMACSR_COMPLETE)) {
510 next_dma_print(nd);
511 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
512 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
513 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
514 }
515 #endif
516
517 #if 0 /* This bit gets set sometimes & I don't know why. */
518 #ifdef DIAGNOSTIC
519 if (state & DMACSR_BUSEXC) {
520 next_dma_print(nd);
521 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
522 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
523 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
524 }
525 #endif
526 #endif
527
528 /* Check to see if we are expecting dma to shut down */
529 if (!nd->_nd_map_cont) {
530
531 #ifdef DIAGNOSTIC
532 #if 1 /* Sometimes the DMA registers have totally bogus values when read.
533 * Until that's understood, we skip this check
534 */
535
536 /* Verify that the registers are laid out as expected */
537 {
538 bus_addr_t next;
539 bus_addr_t limit;
540 bus_addr_t expected_limit;
541 expected_limit =
542 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
543 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
544
545 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
546 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
547 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) & ~0x80000000;
548 } else {
549 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
550 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
551 }
552
553 if ((next != limit) || (limit != expected_limit)) {
554 next_dma_print(nd);
555 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
556 panic("unexpected DMA limit at shutdown 0x%08x, 0x%08x, 0x%08x",
557 next,limit,expected_limit);
558 }
559 }
560 #endif
561 #endif
562
563 #if 1
564 #ifdef DIAGNOSTIC
565 if (state & (DMACSR_SUPDATE|DMACSR_ENABLE)) {
566 next_dma_print(nd);
567 panic("DMA: unexpected bits set in DMA state at shutdown (0x%b)\n",
568 state,DMACSR_BITS);
569 }
570 #endif
571 #endif
572
573 if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
574 if (nd->nd_completed_cb)
575 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
576 }
577 nd->_nd_map = 0;
578 nd->_nd_idx = 0;
579
580 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
581 DMACSR_CLRCOMPLETE | DMACSR_RESET);
582
583 DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
584 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
585
586 return(1);
587 }
588
589 #if 0
590 #ifdef DIAGNOSTIC
591 if (!(state & DMACSR_SUPDATE)) {
592 next_dma_print(nd);
593 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
594 panic("SUPDATE not set with continuing DMA");
595 }
596 #endif
597 #endif
598
599 /* Check that the buffer we are interrupted for is the one we expect.
600 * Shorten the buffer if the dma completed with a short buffer
601 */
602 {
603 bus_addr_t next;
604 bus_addr_t limit;
605 bus_addr_t expected_next;
606 bus_addr_t expected_limit;
607
608 expected_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
609 expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
610
611 #if 0 /* for some unknown reason, somtimes DD_SAVED_NEXT has value from
612 * nd->_nd_map and sometimes it has value from nd->_nd_map_cont.
613 * Somtimes, it has a completely different unknown value.
614 * Until that's understood, we won't sanity check the expected_next value.
615 */
616 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
617 #else
618 next = expected_next;
619 #endif
620 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
621
622 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
623 limit &= ~0x80000000;
624 }
625
626 if ((limit-next < 0) ||
627 (limit-next >= expected_limit-expected_next)) {
628 #ifdef DIAGNOSTIC
629 #if 0 /* Sometimes, (under load I think) even DD_SAVED_LIMIT has
630 * a bogus value. Until that's understood, we don't panic
631 * here.
632 */
633 next_dma_print(nd);
634 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
635 panic("Unexpected saved registers values.");
636 #endif
637 #endif
638 } else {
639 /* Set the length of the segment to match actual length.
640 * @@@ is it okay to resize dma segments here?
641 * i should probably ask jason about this.
642 */
643 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit-next;
644 expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
645 }
646
647 #if 0 /* these checks are turned off until the above mentioned weirdness is fixed. */
648 #ifdef DIAGNOSTIC
649 if (next != expected_next) {
650 next_dma_print(nd);
651 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
652 panic("unexpected DMA next buffer in interrupt (found 0x%08x, expected 0x%08x)",
653 next,expected_next);
654 }
655 if (limit != expected_limit) {
656 next_dma_print(nd);
657 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
658 panic("unexpected DMA limit buffer in interrupt (found 0x%08x, expected 0x%08x)",
659 limit,expected_limit);
660 }
661 #endif
662 #endif
663 }
664
665 next_dma_rotate(nd);
666 next_dma_setup_cont_regs(nd);
667
668 if (!(state & DMACSR_ENABLE)) {
669
670 DPRINTF(("Unexpected DMA shutdown, restarting\n"));
671
672 if (nd->_nd_map_cont) {
673 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
674 DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
675 } else {
676 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
677 DMACSR_SETENABLE | nd->_nd_dmadir);
678 }
679
680 } else {
681
682 if (nd->_nd_map_cont) {
683 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
684 DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
685 } else {
686 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
687 DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
688 }
689 }
690
691 }
692
693 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
694 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
695
696 return(1);
697 }
698
699 /*
700 * Check to see if dma has finished for a channel */
701 int
702 nextdma_finished(nd)
703 struct nextdma_config *nd;
704 {
705 int r;
706 int s;
707 s = spldma(); /* @@@ should this be splimp()? */
708 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
709 splx(s);
710 return(r);
711 }
712
713 void
714 nextdma_start(nd, dmadir)
715 struct nextdma_config *nd;
716 u_long dmadir; /* DMACSR_READ or DMACSR_WRITE */
717 {
718
719 #ifdef DIAGNOSTIC
720 if (!nextdma_finished(nd)) {
721 panic("DMA trying to start before previous finished on intr(0x%b)\n",
722 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
723 }
724 #endif
725
726 DPRINTF(("DMA start (%ld) intr(0x%b)\n",
727 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
728
729 #ifdef DIAGNOSTIC
730 if (nd->_nd_map) {
731 next_dma_print(nd);
732 panic("DMA: nextdma_start() with non null map\n");
733 }
734 if (nd->_nd_map_cont) {
735 next_dma_print(nd);
736 panic("DMA: nextdma_start() with non null continue map\n");
737 }
738 #endif
739
740 #ifdef DIAGNOSTIC
741 if ((dmadir != DMACSR_READ) && (dmadir != DMACSR_WRITE)) {
742 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_READ or DMACSR_WRITE\n");
743 }
744 #endif
745
746 nd->_nd_dmadir = dmadir;
747
748 /* preload both the current and the continue maps */
749 next_dma_rotate(nd);
750
751 #ifdef DIAGNOSTIC
752 if (!nd->_nd_map_cont) {
753 panic("No map available in nextdma_start()");
754 }
755 #endif
756
757 next_dma_rotate(nd);
758
759 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
760 (nd->_nd_dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map->dm_nsegs,
761 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
762
763 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
764 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
765 DMACSR_INITBUF | DMACSR_RESET | nd->_nd_dmadir);
766
767 next_dma_setup_curr_regs(nd);
768 next_dma_setup_cont_regs(nd);
769
770 #if (defined(ND_DEBUG))
771 if (nextdma_debug) next_dma_print(nd);
772 #endif
773
774 if (nd->_nd_map_cont) {
775 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
776 DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
777 } else {
778 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
779 DMACSR_SETENABLE | nd->_nd_dmadir);
780 }
781
782 }
783