nextdma.c revision 1.15 1 /* $NetBSD: nextdma.c,v 1.15 1999/03/14 10:31:05 dbj Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _GENERIC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 /* @@@ for debugging */
68 struct nextdma_config *debugernd;
69 struct nextdma_config *debugexnd;
70
71 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
72 bus_size_t, int));
73 int next_dma_continue __P((struct nextdma_config *));
74 void next_dma_rotate __P((struct nextdma_config *));
75
76 void next_dma_setup_cont_regs __P((struct nextdma_config *));
77 void next_dma_setup_curr_regs __P((struct nextdma_config *));
78
79 void
80 nextdma_config(nd)
81 struct nextdma_config *nd;
82 {
83 /* Initialize the dma_tag. As a hack, we currently
84 * put the dma tag in the structure itself. It shouldn't be there.
85 */
86
87 {
88 bus_dma_tag_t t;
89 t = &nd->_nd_dmat;
90 t->_cookie = nd;
91 t->_get_tag = NULL; /* lose */
92 t->_dmamap_create = _bus_dmamap_create;
93 t->_dmamap_destroy = _bus_dmamap_destroy;
94 t->_dmamap_load = _bus_dmamap_load_direct;
95 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
96 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
97 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
98 t->_dmamap_unload = _bus_dmamap_unload;
99 t->_dmamap_sync = next_dmamap_sync;
100
101 t->_dmamem_alloc = _bus_dmamem_alloc;
102 t->_dmamem_free = _bus_dmamem_free;
103 t->_dmamem_map = _bus_dmamem_map;
104 t->_dmamem_unmap = _bus_dmamem_unmap;
105 t->_dmamem_mmap = _bus_dmamem_mmap;
106
107 nd->nd_dmat = t;
108 }
109
110 /* @@@ for debugging */
111 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
112 debugernd = nd;
113 }
114 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
115 debugexnd = nd;
116 }
117
118 nextdma_init(nd);
119
120 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
121 INTR_ENABLE(nd->nd_intr);
122 }
123
124 void
125 nextdma_init(nd)
126 struct nextdma_config *nd;
127 {
128 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
129 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
130
131 /* @@@ should probably check and free these maps */
132 nd->_nd_map = NULL;
133 nd->_nd_idx = 0;
134 nd->_nd_map_cont = NULL;
135 nd->_nd_idx_cont = 0;
136
137 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
138 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
139 DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
140
141 next_dma_setup_curr_regs(nd);
142 next_dma_setup_cont_regs(nd);
143
144 #if 0 && defined(DIAGNOSTIC)
145 /* Today, my computer (mourning) appears to fail this test.
146 * yesterday, another NeXT (milo) didn't have this problem
147 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
148 */
149 {
150 u_long state;
151 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
152 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
153 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
154 DMACSR_SUPDATE | DMACSR_ENABLE);
155
156 if (state) {
157 next_dma_print(nd);
158 panic("DMA did not reset\n");
159 }
160 }
161 #endif
162 }
163
164
165 void
166 nextdma_reset(nd)
167 struct nextdma_config *nd;
168 {
169 int s;
170 s = spldma(); /* @@@ should this be splimp()? */
171
172 DPRINTF(("DMA reset\n"));
173
174 #if (defined(ND_DEBUG))
175 if (nextdma_debug) next_dma_print(nd);
176 #endif
177
178 nextdma_init(nd);
179 splx(s);
180 }
181
182 /****************************************************************/
183
184 /* If the next had multiple busses, this should probably
185 * go elsewhere, but it is here anyway */
186 void
187 next_dmamap_sync(t, map, offset, len, ops)
188 bus_dma_tag_t t;
189 bus_dmamap_t map;
190 bus_addr_t offset;
191 bus_size_t len;
192 int ops;
193 {
194 /* flush/purge the cache.
195 * assumes pointers are aligned
196 * @@@ should probably be fixed to use offset and len args.
197 * should also optimize this to work on pages for larger regions?
198 */
199
200 if ((ops & BUS_DMASYNC_PREWRITE) ||
201 (ops & BUS_DMASYNC_PREREAD)) {
202 int i;
203 for(i=0;i<map->dm_nsegs;i++) {
204 bus_addr_t p = map->dm_segs[i].ds_addr;
205 bus_addr_t e = p+map->dm_segs[i].ds_len;
206 #ifdef DIAGNOSTIC
207 if ((p % 16) || (e % 16)) {
208 panic("unaligned address in next_dmamap_sync while flushing.\n"
209 "address=0x%08x, length=0x%08x, ops=0x%x",
210 p,e,ops);
211 }
212 #endif
213 while(p<e) {
214 DCFL(p); /* flush */
215 p += 16; /* cache line length */
216 }
217 }
218 }
219
220 if ((ops & BUS_DMASYNC_POSTREAD) ||
221 (ops & BUS_DMASYNC_POSTWRITE)) {
222 int i;
223 for(i=0;i<map->dm_nsegs;i++) {
224 bus_addr_t p = map->dm_segs[i].ds_addr;
225 bus_addr_t e = p+map->dm_segs[i].ds_len;
226 #ifdef DIAGNOSTIC
227 /* We don't check the end address for alignment since if the
228 * dma operation stops short, the end address may be modified.
229 */
230 if (p % 16) {
231 panic("unaligned address in next_dmamap_sync while purging.\n"
232 "address=0x%08x, length=0x%08x, ops=0x%x",
233 p,e,ops);
234 }
235 #endif
236 while(p<e) {
237 DCPL(p); /* purge */
238 p += 16; /* cache line length */
239 }
240 }
241 }
242 }
243
244 /****************************************************************/
245
246
247 /* Call the completed and continue callbacks to try to fill
248 * in the dma continue buffers.
249 */
250 void
251 next_dma_rotate(nd)
252 struct nextdma_config *nd;
253 {
254
255 DPRINTF(("DMA next_dma_rotate()\n"));
256
257 /* If we've reached the end of the current map, then inform
258 * that we've completed that map.
259 */
260 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
261 if (nd->nd_completed_cb)
262 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
263 }
264
265 /* Rotate the continue map into the current map */
266 nd->_nd_map = nd->_nd_map_cont;
267 nd->_nd_idx = nd->_nd_idx_cont;
268
269 if ((!nd->_nd_map_cont) ||
270 ((nd->_nd_map_cont) &&
271 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
272 if (nd->nd_continue_cb) {
273 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
274 } else {
275 nd->_nd_map_cont = 0;
276 }
277 nd->_nd_idx_cont = 0;
278 }
279
280 #ifdef DIAGNOSTIC
281 if (nd->_nd_map_cont) {
282 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
283 next_dma_print(nd);
284 panic("DMA request unaligned at start\n");
285 }
286 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
287 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
288 next_dma_print(nd);
289 panic("DMA request unaligned at end\n");
290 }
291 }
292 #endif
293
294 }
295
296 void
297 next_dma_setup_cont_regs(nd)
298 struct nextdma_config *nd;
299 {
300 DPRINTF(("DMA next_dma_setup_regs()\n"));
301
302 if (nd->_nd_map_cont) {
303
304 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
305 /* Ethernet transmit needs secret magic */
306
307 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
308 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
309 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
310 ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
311 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
312 + 0x0) | 0x80000000);
313
314 }
315 #ifdef NEXTDMA_SCSI_HACK
316 else if ((nd->nd_intr == NEXT_I_SCSI_DMA) && (nd->_nd_dmadir == DMACSR_WRITE)) {
317
318 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
319 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
320
321 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
322 ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
323 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
324 + 0x20));
325 }
326 #endif
327 else {
328 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
329 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
330 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
331 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
332 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
333 }
334
335 } else {
336
337 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, 0xdeadbeef);
338 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0xdeadbeef);
339 }
340
341 #if 1 /* 0xfeedbeef in these registers leads to instability. it will
342 * panic after a short while with 0xfeedbeef in the DD_START and DD_STOP
343 * registers. I suspect that an unexpected hardware restart
344 * is cycling the bogus values into the active registers. Until
345 * that is understood, we seed these with the same as DD_START and DD_STOP
346 */
347 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
348 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
349 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
350 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
351 #else
352 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, 0xfeedbeef);
353 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, 0xfeedbeef);
354 #endif
355
356 }
357
358 void
359 next_dma_setup_curr_regs(nd)
360 struct nextdma_config *nd;
361 {
362 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
363
364
365 if (nd->_nd_map) {
366
367 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
368 /* Ethernet transmit needs secret magic */
369
370 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
371 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
372 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
373 ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
374 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
375 + 0x0) | 0x80000000);
376
377 }
378 #ifdef NEXTDMA_SCSI_HACK
379 else if ((nd->nd_intr == NEXT_I_SCSI_DMA) && (nd->_nd_dmadir == DMACSR_WRITE)) {
380 /* SCSI needs secret magic */
381
382 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
383 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
384 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
385 ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
386 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
387 + 0x20));
388
389 }
390 #endif
391 else {
392 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
393 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
394 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
395 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
396 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
397 }
398
399 } else {
400 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0xdeadbeef);
401 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
402 }
403
404 #if 1 /* See comment in next_dma_setup_cont_regs() above */
405 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
406 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
407 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
408 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
409 #else
410 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
411 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
412 #endif
413
414 }
415
416
417 /* This routine is used for debugging */
418
419 void
420 next_dma_print(nd)
421 struct nextdma_config *nd;
422 {
423 u_long dd_csr;
424 u_long dd_next;
425 u_long dd_next_initbuf;
426 u_long dd_limit;
427 u_long dd_start;
428 u_long dd_stop;
429 u_long dd_saved_next;
430 u_long dd_saved_limit;
431 u_long dd_saved_start;
432 u_long dd_saved_stop;
433
434 /* Read all of the registers before we print anything out,
435 * in case something changes
436 */
437 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
438 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
439 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
440 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
441 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
442 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
443 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
444 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
445 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
446 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
447
448 /* NDMAP is Next DMA Print (really!) */
449
450 printf("NDMAP: nd->_nd_dmadir = 0x%08x\n",nd->_nd_dmadir);
451
452 if (nd->_nd_map) {
453 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
454 nd->_nd_map->dm_mapsize);
455 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
456 nd->_nd_map->dm_nsegs);
457 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
458 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
459 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
460 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
461 } else {
462 printf("NDMAP: nd->_nd_map = NULL\n");
463 }
464 if (nd->_nd_map_cont) {
465 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
466 nd->_nd_map_cont->dm_mapsize);
467 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
468 nd->_nd_map_cont->dm_nsegs);
469 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
470 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
471 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
472 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
473 } else {
474 printf("NDMAP: nd->_nd_map_cont = NULL\n");
475 }
476
477 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
478 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
479 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
480 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
481 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
482 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
483 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
484 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
485 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
486 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
487
488 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
489 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
490 }
491
492 /****************************************************************/
493
494 int
495 nextdma_intr(arg)
496 void *arg;
497 {
498 struct nextdma_config *nd = arg;
499
500 /* @@@ This is bogus, we can't be certain of arg's type
501 * unless the interrupt is for us
502 */
503
504 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
505 /* Handle dma interrupts */
506
507 #ifdef DIAGNOSTIC
508 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
509 if (debugernd != nd) {
510 panic("DMA incorrect handling of rx nd->nd_intr");
511 }
512 }
513 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
514 if (debugexnd != nd) {
515 panic("DMA incorrect handling of tx nd->nd_intr");
516 }
517 }
518 #endif
519
520 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
521 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
522
523 #ifdef DIAGNOSTIC
524 if (!nd->_nd_map) {
525 next_dma_print(nd);
526 panic("DMA missing current map in interrupt!\n");
527 }
528 #endif
529
530 {
531 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
532
533 #ifdef DIAGNOSTIC
534 if (!(state & DMACSR_COMPLETE)) {
535 next_dma_print(nd);
536 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
537 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
538 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
539 }
540 #endif
541
542 #if 0 /* This bit gets set sometimes & I don't know why. */
543 #ifdef DIAGNOSTIC
544 if (state & DMACSR_BUSEXC) {
545 next_dma_print(nd);
546 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
547 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
548 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
549 }
550 #endif
551 #endif
552
553 /* Check to see if we are expecting dma to shut down */
554 if (!nd->_nd_map_cont) {
555
556 #ifdef DIAGNOSTIC
557 #if 1 /* Sometimes the DMA registers have totally bogus values when read.
558 * Until that's understood, we skip this check
559 */
560
561 /* Verify that the registers are laid out as expected */
562 {
563 bus_addr_t next;
564 bus_addr_t limit;
565 bus_addr_t expected_limit;
566 expected_limit =
567 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
568 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
569
570 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
571 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
572 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) & ~0x80000000;
573 }
574 #ifdef NEXTDMA_SCSI_HACK
575 else if ((nd->nd_intr == NEXT_I_SCSI_DMA) && (nd->_nd_dmadir == DMACSR_WRITE)) {
576 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) - 0x20;
577 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) - 0x20;
578 }
579 #endif
580 else {
581 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
582 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
583 }
584
585 if ((next != limit) || (limit != expected_limit)) {
586 next_dma_print(nd);
587 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
588 panic("unexpected DMA limit at shutdown 0x%08x, 0x%08x, 0x%08x",
589 next,limit,expected_limit);
590 }
591 }
592 #endif
593 #endif
594
595 #if 1
596 #ifdef DIAGNOSTIC
597 if (state & (DMACSR_SUPDATE|DMACSR_ENABLE)) {
598 next_dma_print(nd);
599 panic("DMA: unexpected bits set in DMA state at shutdown (0x%b)\n",
600 state,DMACSR_BITS);
601 }
602 #endif
603 #endif
604
605 if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
606 if (nd->nd_completed_cb)
607 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
608 }
609 nd->_nd_map = 0;
610 nd->_nd_idx = 0;
611
612 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
613 DMACSR_CLRCOMPLETE | DMACSR_RESET);
614
615 DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
616 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
617
618 return(1);
619 }
620
621 #if 0
622 #ifdef DIAGNOSTIC
623 if (!(state & DMACSR_SUPDATE)) {
624 next_dma_print(nd);
625 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
626 panic("SUPDATE not set with continuing DMA");
627 }
628 #endif
629 #endif
630
631 /* Check that the buffer we are interrupted for is the one we expect.
632 * Shorten the buffer if the dma completed with a short buffer
633 */
634 {
635 bus_addr_t next;
636 bus_addr_t limit;
637 bus_addr_t expected_next;
638 bus_addr_t expected_limit;
639
640 expected_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
641 expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
642
643 #if 0 /* for some unknown reason, somtimes DD_SAVED_NEXT has value from
644 * nd->_nd_map and sometimes it has value from nd->_nd_map_cont.
645 * Somtimes, it has a completely different unknown value.
646 * Until that's understood, we won't sanity check the expected_next value.
647 */
648 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
649 #else
650 next = expected_next;
651 #endif
652 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
653
654 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
655 limit &= ~0x80000000;
656 }
657 #ifdef NEXTDMA_SCSI_HACK
658 else if ((nd->nd_intr == NEXT_I_SCSI_DMA) && (nd->_nd_dmadir == DMACSR_WRITE)) {
659 limit -= 0x20;
660 }
661 #endif
662
663 if ((limit-next < 0) ||
664 (limit-next >= expected_limit-expected_next)) {
665 #ifdef DIAGNOSTIC
666 #if 0 /* Sometimes, (under load I think) even DD_SAVED_LIMIT has
667 * a bogus value. Until that's understood, we don't panic
668 * here.
669 */
670 next_dma_print(nd);
671 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
672 panic("Unexpected saved registers values.");
673 #endif
674 #endif
675 } else {
676 /* Set the length of the segment to match actual length.
677 * @@@ is it okay to resize dma segments here?
678 * i should probably ask jason about this.
679 */
680 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit-next;
681 expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
682 }
683
684 #if 0 /* these checks are turned off until the above mentioned weirdness is fixed. */
685 #ifdef DIAGNOSTIC
686 if (next != expected_next) {
687 next_dma_print(nd);
688 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
689 panic("unexpected DMA next buffer in interrupt (found 0x%08x, expected 0x%08x)",
690 next,expected_next);
691 }
692 if (limit != expected_limit) {
693 next_dma_print(nd);
694 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
695 panic("unexpected DMA limit buffer in interrupt (found 0x%08x, expected 0x%08x)",
696 limit,expected_limit);
697 }
698 #endif
699 #endif
700 }
701
702 next_dma_rotate(nd);
703 next_dma_setup_cont_regs(nd);
704
705 if (!(state & DMACSR_ENABLE)) {
706
707 DPRINTF(("Unexpected DMA shutdown, restarting\n"));
708
709 if (nd->_nd_map_cont) {
710 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
711 DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
712 } else {
713 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
714 DMACSR_SETENABLE | nd->_nd_dmadir);
715 }
716
717 } else {
718
719 if (nd->_nd_map_cont) {
720 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
721 DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
722 } else {
723 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
724 DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
725 }
726 }
727
728 }
729
730 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
731 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
732
733 return(1);
734 }
735
736 /*
737 * Check to see if dma has finished for a channel */
738 int
739 nextdma_finished(nd)
740 struct nextdma_config *nd;
741 {
742 int r;
743 int s;
744 s = spldma(); /* @@@ should this be splimp()? */
745 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
746 splx(s);
747 return(r);
748 }
749
750 void
751 nextdma_start(nd, dmadir)
752 struct nextdma_config *nd;
753 u_long dmadir; /* DMACSR_READ or DMACSR_WRITE */
754 {
755
756 #ifdef DIAGNOSTIC
757 if (!nextdma_finished(nd)) {
758 panic("DMA trying to start before previous finished on intr(0x%b)\n",
759 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
760 }
761 #endif
762
763 DPRINTF(("DMA start (%ld) intr(0x%b)\n",
764 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
765
766 #ifdef DIAGNOSTIC
767 if (nd->_nd_map) {
768 next_dma_print(nd);
769 panic("DMA: nextdma_start() with non null map\n");
770 }
771 if (nd->_nd_map_cont) {
772 next_dma_print(nd);
773 panic("DMA: nextdma_start() with non null continue map\n");
774 }
775 #endif
776
777 #ifdef DIAGNOSTIC
778 if ((dmadir != DMACSR_READ) && (dmadir != DMACSR_WRITE)) {
779 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_READ or DMACSR_WRITE\n");
780 }
781 #endif
782
783 nd->_nd_dmadir = dmadir;
784
785 /* preload both the current and the continue maps */
786 next_dma_rotate(nd);
787
788 #ifdef DIAGNOSTIC
789 if (!nd->_nd_map_cont) {
790 panic("No map available in nextdma_start()");
791 }
792 #endif
793
794 next_dma_rotate(nd);
795
796 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
797 (nd->_nd_dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map->dm_nsegs,
798 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
799
800 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
801 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
802 DMACSR_INITBUF | DMACSR_RESET | nd->_nd_dmadir);
803
804 next_dma_setup_curr_regs(nd);
805 next_dma_setup_cont_regs(nd);
806
807 #if (defined(ND_DEBUG))
808 if (nextdma_debug) next_dma_print(nd);
809 #endif
810
811 if (nd->_nd_map_cont) {
812 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
813 DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
814 } else {
815 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
816 DMACSR_SETENABLE | nd->_nd_dmadir);
817 }
818
819 }
820