nextdma.c revision 1.30 1 /* $NetBSD: nextdma.c,v 1.30 2002/07/11 16:03:12 christos Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _NEXT68K_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 /* #define panic __asm __volatile("trap #15"); printf */
61
62 #define NEXTDMA_DEBUG nd->nd_continue_cb == esp_dmacb_continue && nextdma_debug
63 #if defined(ND_DEBUG)
64 int nextdma_debug = 0;
65 bus_dmamap_t esp_dmacb_continue __P((void *));
66 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
67 #else
68 #define DPRINTF(x)
69 #endif
70 #define PRINTF printf
71 extern char *esplogp, *esplog;
72 #define ESPLOGIF (10 && nd->nd_intr == NEXT_I_SCSI_DMA && esplogp < (esplog + 8192))
73
74 #if defined(ND_DEBUG)
75 int nextdma_debug_enetr_idx = 0;
76 unsigned int nextdma_debug_enetr_state[100] = { 0 };
77 int nextdma_debug_scsi_idx = 0;
78 unsigned int nextdma_debug_scsi_state[100] = { 0 };
79
80 void nextdma_debug_initstate(struct nextdma_config *nd);
81 void nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state);
82 void nextdma_debug_scsi_dumpstate(void);
83 void nextdma_debug_enetr_dumpstate(void);
84
85 void
86 nextdma_debug_initstate(struct nextdma_config *nd)
87 {
88 switch(nd->nd_intr) {
89 case NEXT_I_ENETR_DMA:
90 memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
91 break;
92 case NEXT_I_SCSI_DMA:
93 memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
94 break;
95 }
96 }
97
98 void
99 nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state)
100 {
101 switch(nd->nd_intr) {
102 case NEXT_I_ENETR_DMA:
103 nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
104 nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
105 break;
106 case NEXT_I_SCSI_DMA:
107 nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
108 nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
109 break;
110 }
111 }
112
113 void
114 nextdma_debug_enetr_dumpstate(void)
115 {
116 int i;
117 int s;
118 s = spldma();
119 i = nextdma_debug_enetr_idx;
120 do {
121 char sbuf[256];
122 if (nextdma_debug_enetr_state[i]) {
123 bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
124 printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
125 }
126 i++;
127 i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
128 } while (i != nextdma_debug_enetr_idx);
129 splx(s);
130 }
131
132 void
133 nextdma_debug_scsi_dumpstate(void)
134 {
135 int i;
136 int s;
137 s = spldma();
138 i = nextdma_debug_scsi_idx;
139 do {
140 char sbuf[256];
141 if (nextdma_debug_scsi_state[i]) {
142 bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
143 printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
144 }
145 i++;
146 i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
147 } while (i != nextdma_debug_scsi_idx);
148 splx(s);
149 }
150 #endif
151
152
153 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
154 bus_size_t, int));
155 int next_dma_continue __P((struct nextdma_config *));
156 void next_dma_rotate __P((struct nextdma_config *));
157
158 void next_dma_setup_cont_regs __P((struct nextdma_config *));
159 void next_dma_setup_curr_regs __P((struct nextdma_config *));
160
161 void
162 nextdma_config(nd)
163 struct nextdma_config *nd;
164 {
165 /* Initialize the dma_tag. As a hack, we currently
166 * put the dma tag in the structure itself. It shouldn't be there.
167 */
168
169 {
170 bus_dma_tag_t t;
171 t = &nd->_nd_dmat;
172 t->_cookie = nd;
173 t->_dmamap_create = _bus_dmamap_create;
174 t->_dmamap_destroy = _bus_dmamap_destroy;
175 t->_dmamap_load = _bus_dmamap_load_direct;
176 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
177 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
178 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
179 t->_dmamap_unload = _bus_dmamap_unload;
180 t->_dmamap_sync = _bus_dmamap_sync;
181
182 t->_dmamem_alloc = _bus_dmamem_alloc;
183 t->_dmamem_free = _bus_dmamem_free;
184 t->_dmamem_map = _bus_dmamem_map;
185 t->_dmamem_unmap = _bus_dmamem_unmap;
186 t->_dmamem_mmap = _bus_dmamem_mmap;
187
188 nd->nd_dmat = t;
189 }
190
191 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
192 INTR_ENABLE(nd->nd_intr);
193
194 nextdma_init(nd);
195
196 }
197
198 void
199 nextdma_init(nd)
200 struct nextdma_config *nd;
201 {
202 #ifdef ND_DEBUG
203 if (NEXTDMA_DEBUG) {
204 char sbuf[256];
205
206 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
207 sbuf, sizeof(sbuf));
208 printf("DMA init ipl (%ld) intr(0x%s)\n",
209 NEXT_I_IPL(nd->nd_intr), sbuf);
210 }
211 #endif
212
213 nd->_nd_map = NULL;
214 nd->_nd_idx = 0;
215 nd->_nd_map_cont = NULL;
216 nd->_nd_idx_cont = 0;
217
218 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
219 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
220 DMACSR_RESET | DMACSR_INITBUF);
221
222 next_dma_setup_curr_regs(nd);
223 next_dma_setup_cont_regs(nd);
224
225 #if defined(DIAGNOSTIC)
226 {
227 u_long state;
228 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
229
230 #if 1
231 /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
232 * milo (a 25Mhz 68040 mono cube) didn't have this problem
233 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
234 */
235 state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
236 #else
237 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
238 DMACSR_SUPDATE | DMACSR_ENABLE);
239 #endif
240 if (state) {
241 next_dma_print(nd);
242 panic("DMA did not reset");
243 }
244 }
245 #endif
246 }
247
248
249 void
250 nextdma_reset(nd)
251 struct nextdma_config *nd;
252 {
253 int s;
254 s = spldma();
255
256 DPRINTF(("DMA reset\n"));
257
258 #if (defined(ND_DEBUG))
259 if (NEXTDMA_DEBUG) next_dma_print(nd);
260 #endif
261
262 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
263 if ((nd->_nd_map) || (nd->_nd_map_cont)) {
264 /* @@@ clean up dma maps */
265 /* panic("DMA abort not implemented\n"); */
266 if (nd->_nd_map_cont) {
267 DPRINTF(("DMA: resetting with non null continue map\n"));
268 if (nd->nd_completed_cb)
269 (*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg);
270
271 nd->_nd_map_cont = 0;
272 nd->_nd_idx_cont = 0;
273 }
274 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
275 nd->_nd_map = 0;
276 nd->_nd_idx = 0;
277 }
278
279 /* nextdma_init(nd); */
280 splx(s);
281 }
282
283 /****************************************************************/
284
285
286 /* Call the completed and continue callbacks to try to fill
287 * in the dma continue buffers.
288 */
289 void
290 next_dma_rotate(nd)
291 struct nextdma_config *nd;
292 {
293
294 if (ESPLOGIF) *esplogp++ = 'r';
295 DPRINTF(("DMA next_dma_rotate()\n"));
296
297 /* Rotate the continue map into the current map */
298 nd->_nd_map = nd->_nd_map_cont;
299 nd->_nd_idx = nd->_nd_idx_cont;
300
301 if ((!nd->_nd_map_cont) ||
302 ((nd->_nd_map_cont) &&
303 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
304 if (nd->nd_continue_cb) {
305 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
306 if (nd->_nd_map_cont) {
307 nd->_nd_map_cont->dm_xfer_len = 0;
308 }
309 } else {
310 nd->_nd_map_cont = 0;
311 }
312 nd->_nd_idx_cont = 0;
313 }
314
315 #if defined(DIAGNOSTIC) && 0
316 if (nd->_nd_map_cont) {
317 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
318 next_dma_print(nd);
319 panic("DMA request unaligned at start\n");
320 }
321 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
322 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
323 next_dma_print(nd);
324 panic("DMA request unaligned at end\n");
325 }
326 }
327 #endif
328
329 }
330
331 void
332 next_dma_setup_cont_regs(nd)
333 struct nextdma_config *nd;
334 {
335 bus_addr_t dd_start;
336 bus_addr_t dd_stop;
337 bus_addr_t dd_saved_start;
338 bus_addr_t dd_saved_stop;
339
340 if (ESPLOGIF) *esplogp++ = 'c';
341 DPRINTF(("DMA next_dma_setup_regs()\n"));
342
343 if (nd->_nd_map_cont) {
344 dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
345 dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
346 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
347
348 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
349 dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
350 dd_stop += 15;
351 }
352 } else {
353 dd_start = 0xdeadbeef;
354 dd_stop = 0xdeadbeef;
355 }
356
357 dd_saved_start = dd_start;
358 dd_saved_stop = dd_stop;
359
360 if (nd->_nd_map_cont && ESPLOGIF) {
361 sprintf (esplogp, "%ld", nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
362 esplogp += strlen (esplogp);
363 }
364
365 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
366 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
367 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
368 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
369
370 #ifdef DIAGNOSTIC
371 if ( (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start)
372 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop)
373 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start)
374 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)
375 ) {
376 next_dma_print(nd);
377 panic("DMA failure writing to continue regs");
378 }
379 #endif
380 }
381
382 void
383 next_dma_setup_curr_regs(nd)
384 struct nextdma_config *nd;
385 {
386 bus_addr_t dd_next;
387 bus_addr_t dd_limit;
388 bus_addr_t dd_saved_next;
389 bus_addr_t dd_saved_limit;
390
391 if (ESPLOGIF) *esplogp++ = 'C';
392 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
393
394
395 if (nd->_nd_map) {
396 dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
397 dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
398 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
399
400 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
401 dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
402 dd_limit += 15;
403 }
404 } else {
405 dd_next = 0xdeadbeef;
406 dd_limit = 0xdeadbeef;
407 }
408
409 dd_saved_next = dd_next;
410 dd_saved_limit = dd_limit;
411
412 if (nd->_nd_map && ESPLOGIF) {
413 sprintf (esplogp, "%ld", nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
414 esplogp += strlen (esplogp);
415 }
416
417 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
418 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
419 } else {
420 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
421 }
422 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
423 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
424 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
425
426 #ifdef DIAGNOSTIC
427 if ( (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next)
428 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next)
429 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit)
430 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next)
431 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)
432 ) {
433 next_dma_print(nd);
434 panic("DMA failure writing to current regs");
435 }
436 #endif
437 }
438
439
440 /* This routine is used for debugging */
441
442 void
443 next_dma_print(nd)
444 struct nextdma_config *nd;
445 {
446 u_long dd_csr;
447 u_long dd_next;
448 u_long dd_next_initbuf;
449 u_long dd_limit;
450 u_long dd_start;
451 u_long dd_stop;
452 u_long dd_saved_next;
453 u_long dd_saved_limit;
454 u_long dd_saved_start;
455 u_long dd_saved_stop;
456 char sbuf[256];
457
458 /* Read all of the registers before we print anything out,
459 * in case something changes
460 */
461 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
462 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
463 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
464 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
465 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
466 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
467 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
468 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
469 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
470 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
471
472 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
473 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
474 printf("NDMAP: *intrstat = 0x%s\n", sbuf);
475
476 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
477 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
478 printf("NDMAP: *intrmask = 0x%s\n", sbuf);
479
480 /* NDMAP is Next DMA Print (really!) */
481
482 if (nd->_nd_map) {
483 printf("NDMAP: nd->_nd_map->dm_mapsize = %ld\n",
484 nd->_nd_map->dm_mapsize);
485 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
486 nd->_nd_map->dm_nsegs);
487 printf("NDMAP: nd->_nd_map->dm_xfer_len = %ld\n",
488 nd->_nd_map->dm_xfer_len);
489 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
490 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
491 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %ld\n",
492 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
493 {
494 int i;
495 printf("NDMAP: Entire map;\n");
496 for(i=0;i<nd->_nd_map->dm_nsegs;i++) {
497 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
498 i,nd->_nd_map->dm_segs[i].ds_addr);
499 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %ld\n",
500 i,nd->_nd_map->dm_segs[i].ds_len);
501 }
502 }
503 } else {
504 printf("NDMAP: nd->_nd_map = NULL\n");
505 }
506 if (nd->_nd_map_cont) {
507 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %ld\n",
508 nd->_nd_map_cont->dm_mapsize);
509 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
510 nd->_nd_map_cont->dm_nsegs);
511 printf("NDMAP: nd->_nd_map_cont->dm_xfer_len = %ld\n",
512 nd->_nd_map_cont->dm_xfer_len);
513 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
514 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
515 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %ld\n",
516 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
517 if (nd->_nd_map_cont != nd->_nd_map) {
518 int i;
519 printf("NDMAP: Entire map;\n");
520 for(i=0;i<nd->_nd_map_cont->dm_nsegs;i++) {
521 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
522 i,nd->_nd_map_cont->dm_segs[i].ds_addr);
523 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %ld\n",
524 i,nd->_nd_map_cont->dm_segs[i].ds_len);
525 }
526 }
527 } else {
528 printf("NDMAP: nd->_nd_map_cont = NULL\n");
529 }
530
531 bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
532 printf("NDMAP: dd->dd_csr = 0x%s\n", sbuf);
533
534 printf("NDMAP: dd->dd_saved_next = 0x%08lx\n", dd_saved_next);
535 printf("NDMAP: dd->dd_saved_limit = 0x%08lx\n", dd_saved_limit);
536 printf("NDMAP: dd->dd_saved_start = 0x%08lx\n", dd_saved_start);
537 printf("NDMAP: dd->dd_saved_stop = 0x%08lx\n", dd_saved_stop);
538 printf("NDMAP: dd->dd_next = 0x%08lx\n", dd_next);
539 printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
540 printf("NDMAP: dd->dd_limit = 0x%08lx\n", dd_limit);
541 printf("NDMAP: dd->dd_start = 0x%08lx\n", dd_start);
542 printf("NDMAP: dd->dd_stop = 0x%08lx\n", dd_stop);
543
544 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
545 sbuf, sizeof(sbuf));
546 printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
547 NEXT_I_IPL(nd->nd_intr), sbuf);
548 }
549
550 /****************************************************************/
551
552 int
553 nextdma_intr(arg)
554 void *arg;
555 {
556 /* @@@ This is bogus, we can't be certain of arg's type
557 * unless the interrupt is for us. For now we successfully
558 * cheat because DMA interrupts are the only things invoked
559 * at this interrupt level.
560 */
561 struct nextdma_config *nd = arg;
562
563 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
564 /* Handle dma interrupts */
565
566 #if 01
567 if (nd->nd_intr == NEXT_I_SCSI_DMA) {
568 int esp_dma_int __P((void *));
569 return esp_dma_int (nd->nd_cb_arg);
570 }
571 #endif
572
573 if (ESPLOGIF) *esplogp++ = 'D';
574 #ifdef ND_DEBUG
575 if (NEXTDMA_DEBUG) {
576 char sbuf[256];
577
578 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
579 sbuf, sizeof(sbuf));
580 printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
581 NEXT_I_IPL(nd->nd_intr), sbuf);
582 }
583 #endif
584
585 #ifdef DIAGNOSTIC
586 if (!nd->_nd_map) {
587 next_dma_print(nd);
588 panic("DMA missing current map in interrupt!\n");
589 }
590 #endif
591
592 {
593 unsigned int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
594
595 #if defined(ND_DEBUG)
596 nextdma_debug_savestate(nd,state);
597 #endif
598
599 #ifdef DIAGNOSTIC
600 if (!(state & DMACSR_COMPLETE)) {
601 char sbuf[256];
602 next_dma_print(nd);
603 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
604 printf("DMA: state 0x%s\n",sbuf);
605 panic("DMA complete not set in interrupt\n");
606 }
607 #endif
608
609 {
610 bus_addr_t onext;
611 bus_addr_t olimit;
612 bus_addr_t slimit;
613
614 DPRINTF(("DMA: finishing xfer\n"));
615
616 onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
617 olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
618
619 {
620 int result = 0;
621 if (state & DMACSR_ENABLE) {
622 /* enable bit was set */
623 result |= 0x01;
624 }
625 if (state & DMACSR_SUPDATE) {
626 /* supdate bit was set */
627 result |= 0x02;
628 }
629 if (nd->_nd_map_cont == NULL) {
630 KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs);
631 /* Expecting a shutdown, didn't SETSUPDATE last turn */
632 result |= 0x04;
633 }
634 if (state & DMACSR_BUSEXC) {
635 /* bus exception bit was set */
636 result |= 0x08;
637 }
638 switch (result) {
639 case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
640 case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
641 if (nd->nd_intr == NEXT_I_SCSI_DMA) {
642 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
643 } else {
644 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
645 }
646 break;
647 case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
648 case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
649 if (nd->nd_intr == NEXT_I_SCSI_DMA) {
650 bus_addr_t snext;
651 snext = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
652 if (snext != onext) {
653 slimit = olimit;
654 } else {
655 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
656 }
657 } else {
658 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
659 }
660 break;
661 case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
662 case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
663 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
664 break;
665 case 0x04: /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
666 case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
667 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
668 break;
669 default:
670 #ifdef DIAGNOSTIC
671 {
672 char sbuf[256];
673 printf("DMA: please send this output to port-next68k-maintainer (at) netbsd.org:\n");
674 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
675 printf("DMA: state 0x%s\n",sbuf);
676 next_dma_print(nd);
677 panic("DMA: condition 0x%02x not yet documented to occur\n",result);
678 }
679 #endif
680 slimit = olimit;
681 break;
682 }
683 }
684
685 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
686 slimit &= ~0x80000000;
687 slimit -= 15;
688 }
689
690 #ifdef DIAGNOSTIC
691 if ((slimit < onext) || (slimit > olimit)) {
692 char sbuf[256];
693 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
694 printf("DMA: state 0x%s\n",sbuf);
695 next_dma_print(nd);
696 panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer\n",slimit);
697 }
698 #endif
699
700 #ifdef DIAGNOSTIC
701 if ((state & DMACSR_ENABLE) && ((nd->_nd_idx+1) != nd->_nd_map->dm_nsegs)) {
702 if (slimit != olimit) {
703 char sbuf[256];
704 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
705 printf("DMA: state 0x%s\n",sbuf);
706 next_dma_print(nd);
707 panic("DMA: short limit register (0x%08lx) w/o finishing map.\n",slimit);
708 }
709 }
710 #endif
711
712 #if (defined(ND_DEBUG))
713 if (NEXTDMA_DEBUG > 2) next_dma_print(nd);
714 #endif
715
716 nd->_nd_map->dm_xfer_len += slimit-onext;
717
718 /* If we've reached the end of the current map, then inform
719 * that we've completed that map.
720 */
721 if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
722 if (nd->nd_completed_cb)
723 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
724 } else {
725 KASSERT(nd->_nd_map == nd->_nd_map_cont);
726 KASSERT(nd->_nd_idx+1 == nd->_nd_idx_cont);
727 }
728 nd->_nd_map = 0;
729 nd->_nd_idx = 0;
730 }
731
732 if (NEXTDMA_DEBUG) {
733 char sbuf[256];
734 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
735 printf("CLNDMAP: dd->dd_csr = 0x%s\n", sbuf);
736 }
737 if (state & DMACSR_ENABLE) {
738
739 next_dma_rotate(nd);
740 next_dma_setup_cont_regs(nd);
741
742 {
743 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
744
745 if (state & DMACSR_READ) {
746 dmadir = DMACSR_SETREAD;
747 } else {
748 dmadir = DMACSR_SETWRITE;
749 }
750
751 if (nd->_nd_map_cont == NULL) {
752 KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs);
753 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
754 DMACSR_CLRCOMPLETE | dmadir);
755 if (ESPLOGIF) *esplogp++ = 'g';
756 } else {
757 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
758 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
759 if (ESPLOGIF) *esplogp++ = 'G';
760 }
761 }
762
763 } else {
764
765 DPRINTF(("DMA: a shutdown occurred\n"));
766 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
767
768 /* Cleanup more incomplete transfers */
769 #if 1
770 /* cleanup continue map */
771 if (nd->_nd_map_cont) {
772 DPRINTF(("DMA: shutting down with non null continue map\n"));
773 if (nd->nd_completed_cb)
774 (*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg);
775
776 nd->_nd_map_cont = 0;
777 nd->_nd_idx_cont = 0;
778 }
779 #else
780 /* Do an automatic dma restart */
781 if (nd->_nd_map_cont) {
782 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
783
784 next_dma_rotate(nd);
785
786 if (state & DMACSR_READ) {
787 dmadir = DMACSR_SETREAD;
788 } else {
789 dmadir = DMACSR_SETWRITE;
790 }
791
792 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
793 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
794 DMACSR_INITBUF | DMACSR_RESET | dmadir);
795
796 next_dma_setup_curr_regs(nd);
797 next_dma_setup_cont_regs(nd);
798
799 if (nd->_nd_map_cont == NULL) {
800 KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs);
801 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
802 DMACSR_SETENABLE | dmadir);
803 } else {
804 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
805 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
806 }
807 return 1;
808 }
809 #endif
810 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
811 }
812 }
813
814 #ifdef ND_DEBUG
815 if (NEXTDMA_DEBUG) {
816 char sbuf[256];
817
818 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
819 sbuf, sizeof(sbuf));
820 printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
821 NEXT_I_IPL(nd->nd_intr), sbuf);
822 }
823 #endif
824
825 return(1);
826 }
827
828 /*
829 * Check to see if dma has finished for a channel */
830 int
831 nextdma_finished(nd)
832 struct nextdma_config *nd;
833 {
834 int r;
835 int s;
836 s = spldma(); /* @@@ should this be splimp()? */
837 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
838 splx(s);
839 return(r);
840 }
841
842 void
843 nextdma_start(nd, dmadir)
844 struct nextdma_config *nd;
845 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
846 {
847
848 if (ESPLOGIF) *esplogp++ = 'n';
849 #ifdef DIAGNOSTIC
850 if (!nextdma_finished(nd)) {
851 char sbuf[256];
852
853 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
854 sbuf, sizeof(sbuf));
855 panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
856 }
857 #endif
858
859 #ifdef ND_DEBUG
860 if (NEXTDMA_DEBUG) {
861 char sbuf[256];
862
863 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
864 sbuf, sizeof(sbuf));
865 printf("DMA start (%ld) intr(0x%s)\n",
866 NEXT_I_IPL(nd->nd_intr), sbuf);
867 }
868 #endif
869
870 #ifdef DIAGNOSTIC
871 if (nd->_nd_map) {
872 next_dma_print(nd);
873 panic("DMA: nextdma_start() with non null map\n");
874 }
875 if (nd->_nd_map_cont) {
876 next_dma_print(nd);
877 panic("DMA: nextdma_start() with non null continue map\n");
878 }
879 #endif
880
881 #ifdef DIAGNOSTIC
882 if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
883 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
884 }
885 #endif
886
887 #if defined(ND_DEBUG)
888 nextdma_debug_initstate(nd);
889 #endif
890
891 /* preload both the current and the continue maps */
892 next_dma_rotate(nd);
893
894 #ifdef DIAGNOSTIC
895 if (!nd->_nd_map_cont) {
896 panic("No map available in nextdma_start()");
897 }
898 #endif
899
900 next_dma_rotate(nd);
901
902 #ifdef ND_DEBUG
903 if (NEXTDMA_DEBUG) {
904 char sbuf[256];
905
906 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
907 sbuf, sizeof(sbuf));
908 printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
909 (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
910 }
911 #endif
912
913 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
914 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
915 DMACSR_INITBUF | DMACSR_RESET | dmadir);
916
917 next_dma_setup_curr_regs(nd);
918 next_dma_setup_cont_regs(nd);
919
920 #if (defined(ND_DEBUG))
921 if (NEXTDMA_DEBUG > 2) next_dma_print(nd);
922 #endif
923
924 if (nd->_nd_map_cont == NULL) {
925 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
926 DMACSR_SETENABLE | dmadir);
927 } else {
928 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
929 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
930 }
931 }
932