nextdma.c revision 1.14 1 /* $NetBSD: nextdma.c,v 1.14 1999/03/04 14:18:26 dbj Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _GENERIC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 /* @@@ for debugging */
68 struct nextdma_config *debugernd;
69 struct nextdma_config *debugexnd;
70
71 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
72 bus_size_t, int));
73 int next_dma_continue __P((struct nextdma_config *));
74 void next_dma_rotate __P((struct nextdma_config *));
75
76 void next_dma_setup_cont_regs __P((struct nextdma_config *));
77 void next_dma_setup_curr_regs __P((struct nextdma_config *));
78
79 void next_dma_print __P((struct nextdma_config *));
80
81 void
82 nextdma_config(nd)
83 struct nextdma_config *nd;
84 {
85 /* Initialize the dma_tag. As a hack, we currently
86 * put the dma tag in the structure itself. It shouldn't be there.
87 */
88
89 {
90 bus_dma_tag_t t;
91 t = &nd->_nd_dmat;
92 t->_cookie = nd;
93 t->_get_tag = NULL; /* lose */
94 t->_dmamap_create = _bus_dmamap_create;
95 t->_dmamap_destroy = _bus_dmamap_destroy;
96 t->_dmamap_load = _bus_dmamap_load_direct;
97 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
98 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
99 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
100 t->_dmamap_unload = _bus_dmamap_unload;
101 t->_dmamap_sync = next_dmamap_sync;
102
103 t->_dmamem_alloc = _bus_dmamem_alloc;
104 t->_dmamem_free = _bus_dmamem_free;
105 t->_dmamem_map = _bus_dmamem_map;
106 t->_dmamem_unmap = _bus_dmamem_unmap;
107 t->_dmamem_mmap = _bus_dmamem_mmap;
108
109 nd->nd_dmat = t;
110 }
111
112 /* @@@ for debugging */
113 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
114 debugernd = nd;
115 }
116 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
117 debugexnd = nd;
118 }
119
120 nextdma_init(nd);
121
122 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
123 INTR_ENABLE(nd->nd_intr);
124 }
125
126 void
127 nextdma_init(nd)
128 struct nextdma_config *nd;
129 {
130 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
131 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
132
133 /* @@@ should probably check and free these maps */
134 nd->_nd_map = NULL;
135 nd->_nd_idx = 0;
136 nd->_nd_map_cont = NULL;
137 nd->_nd_idx_cont = 0;
138
139 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
140 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
141 DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
142
143 next_dma_setup_curr_regs(nd);
144 next_dma_setup_cont_regs(nd);
145
146 #if 0 && defined(DIAGNOSTIC)
147 /* Today, my computer (mourning) appears to fail this test.
148 * yesterday, another NeXT (milo) didn't have this problem
149 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
150 */
151 {
152 u_long state;
153 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
154 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
155 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
156 DMACSR_SUPDATE | DMACSR_ENABLE);
157
158 if (state) {
159 next_dma_print(nd);
160 panic("DMA did not reset\n");
161 }
162 }
163 #endif
164 }
165
166
167 void
168 nextdma_reset(nd)
169 struct nextdma_config *nd;
170 {
171 int s;
172 s = spldma(); /* @@@ should this be splimp()? */
173
174 DPRINTF(("DMA reset\n"));
175
176 #if (defined(ND_DEBUG))
177 if (nextdma_debug) next_dma_print(nd);
178 #endif
179
180 nextdma_init(nd);
181 splx(s);
182 }
183
184 /****************************************************************/
185
186 /* If the next had multiple busses, this should probably
187 * go elsewhere, but it is here anyway */
188 void
189 next_dmamap_sync(t, map, offset, len, ops)
190 bus_dma_tag_t t;
191 bus_dmamap_t map;
192 bus_addr_t offset;
193 bus_size_t len;
194 int ops;
195 {
196 /* flush/purge the cache.
197 * assumes pointers are aligned
198 * @@@ should probably be fixed to use offset and len args.
199 * should also optimize this to work on pages for larger regions?
200 */
201 if ((ops & BUS_DMASYNC_PREWRITE) ||
202 (ops & BUS_DMASYNC_PREREAD)) {
203 int i;
204 for(i=0;i<map->dm_nsegs;i++) {
205 bus_addr_t p = map->dm_segs[i].ds_addr;
206 bus_addr_t e = p+map->dm_segs[i].ds_len;
207 while(p<e) {
208 DCFL(p); /* flush */
209 p += 16; /* cache line length */
210 }
211 }
212 }
213
214 if ((ops & BUS_DMASYNC_POSTREAD) ||
215 (ops & BUS_DMASYNC_POSTWRITE)) {
216 int i;
217 for(i=0;i<map->dm_nsegs;i++) {
218 bus_addr_t p = map->dm_segs[i].ds_addr;
219 bus_addr_t e = p+map->dm_segs[i].ds_len;
220 while(p<e) {
221 DCPL(p); /* purge */
222 p += 16; /* cache line length */
223 }
224 }
225 }
226 }
227
228 /****************************************************************/
229
230
231 /* Call the completed and continue callbacks to try to fill
232 * in the dma continue buffers.
233 */
234 void
235 next_dma_rotate(nd)
236 struct nextdma_config *nd;
237 {
238
239 DPRINTF(("DMA next_dma_rotate()\n"));
240
241 /* If we've reached the end of the current map, then inform
242 * that we've completed that map.
243 */
244 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
245 if (nd->nd_completed_cb)
246 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
247 }
248
249 /* Rotate the continue map into the current map */
250 nd->_nd_map = nd->_nd_map_cont;
251 nd->_nd_idx = nd->_nd_idx_cont;
252
253 if ((!nd->_nd_map_cont) ||
254 ((nd->_nd_map_cont) &&
255 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
256 if (nd->nd_continue_cb) {
257 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
258 } else {
259 nd->_nd_map_cont = 0;
260 }
261 nd->_nd_idx_cont = 0;
262 }
263
264 #ifdef DIAGNOSTIC
265 if (nd->_nd_map_cont) {
266 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
267 next_dma_print(nd);
268 panic("DMA request unaligned at start\n");
269 }
270 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
271 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
272 next_dma_print(nd);
273 panic("DMA request unaligned at end\n");
274 }
275 }
276 #endif
277
278 }
279
280 void
281 next_dma_setup_cont_regs(nd)
282 struct nextdma_config *nd;
283 {
284 DPRINTF(("DMA next_dma_setup_regs()\n"));
285
286 if (nd->_nd_map_cont) {
287
288 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
289 /* Ethernet transmit needs secret magic */
290
291 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
292 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
293 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
294 ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
295 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
296 + 0x0) | 0x80000000);
297 } else {
298 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
299 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
300 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
301 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
302 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
303 }
304
305 } else {
306
307 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, 0xdeadbeef);
308 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0xdeadbeef);
309 }
310
311 #if 1 /* 0xfeedbeef in these registers leads to instability. it will
312 * panic after a short while with 0xfeedbeef in the DD_START and DD_STOP
313 * registers. I suspect that an unexpected hardware restart
314 * is cycling the bogus values into the active registers. Until
315 * that is understood, we seed these with the same as DD_START and DD_STOP
316 */
317 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
318 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
319 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
320 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
321 #else
322 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, 0xfeedbeef);
323 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, 0xfeedbeef);
324 #endif
325
326 }
327
328 void
329 next_dma_setup_curr_regs(nd)
330 struct nextdma_config *nd;
331 {
332 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
333
334 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
335 /* Ethernet transmit needs secret magic */
336
337 if (nd->_nd_map) {
338
339 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
340 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
341 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
342 ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
343 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
344 + 0x0) | 0x80000000);
345 } else {
346 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0xdeadbeef);
347 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
348
349 }
350
351 #if 1 /* See comment in next_dma_setup_cont_regs() above */
352 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
353 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
354 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
355 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
356 #else
357 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
358 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
359 #endif
360
361 } else {
362
363 if (nd->_nd_map) {
364
365 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
366 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
367 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
368 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
369 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
370 } else {
371 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, 0xdeadbeef);
372 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
373
374 }
375
376 #if 1 /* See comment in next_dma_setup_cont_regs() above */
377 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
378 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
379 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
380 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
381 #else
382 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
383 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
384 #endif
385
386 }
387
388 }
389
390
391 /* This routine is used for debugging */
392
393 void
394 next_dma_print(nd)
395 struct nextdma_config *nd;
396 {
397 u_long dd_csr;
398 u_long dd_next;
399 u_long dd_next_initbuf;
400 u_long dd_limit;
401 u_long dd_start;
402 u_long dd_stop;
403 u_long dd_saved_next;
404 u_long dd_saved_limit;
405 u_long dd_saved_start;
406 u_long dd_saved_stop;
407
408 /* Read all of the registers before we print anything out,
409 * in case something changes
410 */
411 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
412 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
413 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
414 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
415 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
416 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
417 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
418 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
419 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
420 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
421
422 /* NDMAP is Next DMA Print (really!) */
423
424 printf("NDMAP: nd->_nd_dmadir = 0x%08x\n",nd->_nd_dmadir);
425
426 if (nd->_nd_map) {
427 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
428 nd->_nd_map->dm_mapsize);
429 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
430 nd->_nd_map->dm_nsegs);
431 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
432 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
433 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
434 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
435 } else {
436 printf("NDMAP: nd->_nd_map = NULL\n");
437 }
438 if (nd->_nd_map_cont) {
439 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
440 nd->_nd_map_cont->dm_mapsize);
441 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
442 nd->_nd_map_cont->dm_nsegs);
443 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
444 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
445 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
446 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
447 } else {
448 printf("NDMAP: nd->_nd_map_cont = NULL\n");
449 }
450
451 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
452 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
453 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
454 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
455 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
456 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
457 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
458 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
459 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
460 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
461
462 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
463 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
464 }
465
466 /****************************************************************/
467
468 int
469 nextdma_intr(arg)
470 void *arg;
471 {
472 struct nextdma_config *nd = arg;
473
474 /* @@@ This is bogus, we can't be certain of arg's type
475 * unless the interrupt is for us
476 */
477
478 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
479 /* Handle dma interrupts */
480
481 #ifdef DIAGNOSTIC
482 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
483 if (debugernd != nd) {
484 panic("DMA incorrect handling of rx nd->nd_intr");
485 }
486 }
487 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
488 if (debugexnd != nd) {
489 panic("DMA incorrect handling of tx nd->nd_intr");
490 }
491 }
492 #endif
493
494 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
495 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
496
497 #ifdef DIAGNOSTIC
498 if (!nd->_nd_map) {
499 next_dma_print(nd);
500 panic("DMA missing current map in interrupt!\n");
501 }
502 #endif
503
504 {
505 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
506
507 #ifdef DIAGNOSTIC
508 if (!(state & DMACSR_COMPLETE)) {
509 next_dma_print(nd);
510 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
511 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
512 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
513 }
514 #endif
515
516 #if 0 /* This bit gets set sometimes & I don't know why. */
517 #ifdef DIAGNOSTIC
518 if (state & DMACSR_BUSEXC) {
519 next_dma_print(nd);
520 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
521 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
522 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
523 }
524 #endif
525 #endif
526
527 /* Check to see if we are expecting dma to shut down */
528 if (!nd->_nd_map_cont) {
529
530 #ifdef DIAGNOSTIC
531 #if 1 /* Sometimes the DMA registers have totally bogus values when read.
532 * Until that's understood, we skip this check
533 */
534
535 /* Verify that the registers are laid out as expected */
536 {
537 bus_addr_t next;
538 bus_addr_t limit;
539 bus_addr_t expected_limit;
540 expected_limit =
541 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
542 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
543
544 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
545 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
546 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) & ~0x80000000;
547 } else {
548 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
549 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
550 }
551
552 if ((next != limit) || (limit != expected_limit)) {
553 next_dma_print(nd);
554 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
555 panic("unexpected DMA limit at shutdown 0x%08x, 0x%08x, 0x%08x",
556 next,limit,expected_limit);
557 }
558 }
559 #endif
560 #endif
561
562 #if 1
563 #ifdef DIAGNOSTIC
564 if (state & (DMACSR_SUPDATE|DMACSR_ENABLE)) {
565 next_dma_print(nd);
566 panic("DMA: unexpected bits set in DMA state at shutdown (0x%b)\n",
567 state,DMACSR_BITS);
568 }
569 #endif
570 #endif
571
572 if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
573 if (nd->nd_completed_cb)
574 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
575 }
576 nd->_nd_map = 0;
577 nd->_nd_idx = 0;
578
579 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
580 DMACSR_CLRCOMPLETE | DMACSR_RESET);
581
582 DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
583 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
584
585 return(1);
586 }
587
588 #if 0
589 #ifdef DIAGNOSTIC
590 if (!(state & DMACSR_SUPDATE)) {
591 next_dma_print(nd);
592 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
593 panic("SUPDATE not set with continuing DMA");
594 }
595 #endif
596 #endif
597
598 /* Check that the buffer we are interrupted for is the one we expect.
599 * Shorten the buffer if the dma completed with a short buffer
600 */
601 {
602 bus_addr_t next;
603 bus_addr_t limit;
604 bus_addr_t expected_next;
605 bus_addr_t expected_limit;
606
607 expected_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
608 expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
609
610 #if 0 /* for some unknown reason, somtimes DD_SAVED_NEXT has value from
611 * nd->_nd_map and sometimes it has value from nd->_nd_map_cont.
612 * Somtimes, it has a completely different unknown value.
613 * Until that's understood, we won't sanity check the expected_next value.
614 */
615 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
616 #else
617 next = expected_next;
618 #endif
619 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
620
621 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
622 limit &= ~0x80000000;
623 }
624
625 if ((limit-next < 0) ||
626 (limit-next >= expected_limit-expected_next)) {
627 #ifdef DIAGNOSTIC
628 #if 0 /* Sometimes, (under load I think) even DD_SAVED_LIMIT has
629 * a bogus value. Until that's understood, we don't panic
630 * here.
631 */
632 next_dma_print(nd);
633 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
634 panic("Unexpected saved registers values.");
635 #endif
636 #endif
637 } else {
638 /* Set the length of the segment to match actual length.
639 * @@@ is it okay to resize dma segments here?
640 * i should probably ask jason about this.
641 */
642 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit-next;
643 expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
644 }
645
646 #if 0 /* these checks are turned off until the above mentioned weirdness is fixed. */
647 #ifdef DIAGNOSTIC
648 if (next != expected_next) {
649 next_dma_print(nd);
650 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
651 panic("unexpected DMA next buffer in interrupt (found 0x%08x, expected 0x%08x)",
652 next,expected_next);
653 }
654 if (limit != expected_limit) {
655 next_dma_print(nd);
656 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
657 panic("unexpected DMA limit buffer in interrupt (found 0x%08x, expected 0x%08x)",
658 limit,expected_limit);
659 }
660 #endif
661 #endif
662 }
663
664 next_dma_rotate(nd);
665 next_dma_setup_cont_regs(nd);
666
667 if (!(state & DMACSR_ENABLE)) {
668
669 DPRINTF(("Unexpected DMA shutdown, restarting\n"));
670
671 if (nd->_nd_map_cont) {
672 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
673 DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
674 } else {
675 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
676 DMACSR_SETENABLE | nd->_nd_dmadir);
677 }
678
679 } else {
680
681 if (nd->_nd_map_cont) {
682 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
683 DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
684 } else {
685 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
686 DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
687 }
688 }
689
690 }
691
692 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
693 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
694
695 return(1);
696 }
697
698 /*
699 * Check to see if dma has finished for a channel */
700 int
701 nextdma_finished(nd)
702 struct nextdma_config *nd;
703 {
704 int r;
705 int s;
706 s = spldma(); /* @@@ should this be splimp()? */
707 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
708 splx(s);
709 return(r);
710 }
711
712 void
713 nextdma_start(nd, dmadir)
714 struct nextdma_config *nd;
715 u_long dmadir; /* DMACSR_READ or DMACSR_WRITE */
716 {
717
718 #ifdef DIAGNOSTIC
719 if (!nextdma_finished(nd)) {
720 panic("DMA trying to start before previous finished on intr(0x%b)\n",
721 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
722 }
723 #endif
724
725 DPRINTF(("DMA start (%ld) intr(0x%b)\n",
726 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
727
728 #ifdef DIAGNOSTIC
729 if (nd->_nd_map) {
730 next_dma_print(nd);
731 panic("DMA: nextdma_start() with non null map\n");
732 }
733 if (nd->_nd_map_cont) {
734 next_dma_print(nd);
735 panic("DMA: nextdma_start() with non null continue map\n");
736 }
737 #endif
738
739 #ifdef DIAGNOSTIC
740 if ((dmadir != DMACSR_READ) && (dmadir != DMACSR_WRITE)) {
741 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_READ or DMACSR_WRITE\n");
742 }
743 #endif
744
745 nd->_nd_dmadir = dmadir;
746
747 /* preload both the current and the continue maps */
748 next_dma_rotate(nd);
749
750 #ifdef DIAGNOSTIC
751 if (!nd->_nd_map_cont) {
752 panic("No map available in nextdma_start()");
753 }
754 #endif
755
756 next_dma_rotate(nd);
757
758 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
759 (nd->_nd_dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map->dm_nsegs,
760 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
761
762 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
763 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
764 DMACSR_INITBUF | DMACSR_RESET | nd->_nd_dmadir);
765
766 next_dma_setup_curr_regs(nd);
767 next_dma_setup_cont_regs(nd);
768
769 #if (defined(ND_DEBUG))
770 if (nextdma_debug) next_dma_print(nd);
771 #endif
772
773 if (nd->_nd_map_cont) {
774 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
775 DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
776 } else {
777 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
778 DMACSR_SETENABLE | nd->_nd_dmadir);
779 }
780
781 }
782