nextdma.c revision 1.20.2.1 1 /* $NetBSD: nextdma.c,v 1.20.2.1 2000/11/20 20:18:13 bouyer Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _NEXT68K_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
68 bus_size_t, int));
69 int next_dma_continue __P((struct nextdma_config *));
70 void next_dma_rotate __P((struct nextdma_config *));
71
72 void next_dma_setup_cont_regs __P((struct nextdma_config *));
73 void next_dma_setup_curr_regs __P((struct nextdma_config *));
74 void next_dma_finish_xfer __P((struct nextdma_config *));
75
76 void
77 nextdma_config(nd)
78 struct nextdma_config *nd;
79 {
80 /* Initialize the dma_tag. As a hack, we currently
81 * put the dma tag in the structure itself. It shouldn't be there.
82 */
83
84 {
85 bus_dma_tag_t t;
86 t = &nd->_nd_dmat;
87 t->_cookie = nd;
88 t->_dmamap_create = _bus_dmamap_create;
89 t->_dmamap_destroy = _bus_dmamap_destroy;
90 t->_dmamap_load = _bus_dmamap_load_direct;
91 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
92 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
93 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
94 t->_dmamap_unload = _bus_dmamap_unload;
95 t->_dmamap_sync = _bus_dmamap_sync;
96
97 t->_dmamem_alloc = _bus_dmamem_alloc;
98 t->_dmamem_free = _bus_dmamem_free;
99 t->_dmamem_map = _bus_dmamem_map;
100 t->_dmamem_unmap = _bus_dmamem_unmap;
101 t->_dmamem_mmap = _bus_dmamem_mmap;
102
103 nd->nd_dmat = t;
104 }
105
106 nextdma_init(nd);
107
108 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
109 INTR_ENABLE(nd->nd_intr);
110 }
111
112 void
113 nextdma_init(nd)
114 struct nextdma_config *nd;
115 {
116 #ifdef ND_DEBUG
117 if (nextdma_debug) {
118 char sbuf[256];
119
120 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
121 sbuf, sizeof(sbuf));
122 printf("DMA init ipl (%ld) intr(0x%s)\n",
123 NEXT_I_IPL(nd->nd_intr), sbuf);
124 }
125 #endif
126
127 nd->_nd_map = NULL;
128 nd->_nd_idx = 0;
129 nd->_nd_map_cont = NULL;
130 nd->_nd_idx_cont = 0;
131
132 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
133 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
134 DMACSR_RESET | DMACSR_INITBUF);
135
136 next_dma_setup_curr_regs(nd);
137 next_dma_setup_cont_regs(nd);
138
139 #if defined(DIAGNOSTIC)
140 {
141 u_long state;
142 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
143
144 #if 1
145 /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
146 * milo (a 25Mhz 68040 mono cube) didn't have this problem
147 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
148 */
149 state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
150 #else
151 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
152 DMACSR_SUPDATE | DMACSR_ENABLE);
153 #endif
154 if (state) {
155 next_dma_print(nd);
156 panic("DMA did not reset");
157 }
158 }
159 #endif
160 }
161
162
163 void
164 nextdma_reset(nd)
165 struct nextdma_config *nd;
166 {
167 int s;
168 s = spldma();
169
170 DPRINTF(("DMA reset\n"));
171
172 #if (defined(ND_DEBUG))
173 if (nextdma_debug) next_dma_print(nd);
174 #endif
175
176 /* @@@ clean up dma maps */
177
178 nextdma_init(nd);
179 splx(s);
180 }
181
182 /****************************************************************/
183
184
185 /* Call the completed and continue callbacks to try to fill
186 * in the dma continue buffers.
187 */
188 void
189 next_dma_rotate(nd)
190 struct nextdma_config *nd;
191 {
192
193 DPRINTF(("DMA next_dma_rotate()\n"));
194
195 /* Rotate the continue map into the current map */
196 nd->_nd_map = nd->_nd_map_cont;
197 nd->_nd_idx = nd->_nd_idx_cont;
198
199 if ((!nd->_nd_map_cont) ||
200 ((nd->_nd_map_cont) &&
201 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
202 if (nd->nd_continue_cb) {
203 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
204 } else {
205 nd->_nd_map_cont = 0;
206 }
207 nd->_nd_idx_cont = 0;
208 }
209
210 #ifdef DIAGNOSTIC
211 if (nd->_nd_map) {
212 nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0x1234beef;
213 }
214 #endif
215
216 #ifdef DIAGNOSTIC
217 if (nd->_nd_map_cont) {
218 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
219 next_dma_print(nd);
220 panic("DMA request unaligned at start\n");
221 }
222 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
223 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
224 next_dma_print(nd);
225 panic("DMA request unaligned at end\n");
226 }
227 }
228 #endif
229
230 }
231
232 void
233 next_dma_setup_cont_regs(nd)
234 struct nextdma_config *nd;
235 {
236 bus_addr_t dd_start;
237 bus_addr_t dd_stop;
238 bus_addr_t dd_saved_start;
239 bus_addr_t dd_saved_stop;
240
241 DPRINTF(("DMA next_dma_setup_regs()\n"));
242
243 if (nd->_nd_map_cont) {
244 dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
245 dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
246 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
247
248 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
249 dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
250 }
251 } else {
252 dd_start = 0xdeadbeef;
253 dd_stop = 0xdeadbeef;
254 }
255
256 dd_saved_start = dd_start;
257 dd_saved_stop = dd_stop;
258
259 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
260 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
261 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
262 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
263
264 #ifdef DIAGNOSTIC
265 if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
266 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
267 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
268 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
269 next_dma_print(nd);
270 panic("DMA failure writing to continue regs");
271 }
272 #endif
273 }
274
275 void
276 next_dma_setup_curr_regs(nd)
277 struct nextdma_config *nd;
278 {
279 bus_addr_t dd_next;
280 bus_addr_t dd_limit;
281 bus_addr_t dd_saved_next;
282 bus_addr_t dd_saved_limit;
283
284 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
285
286
287 if (nd->_nd_map) {
288 dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
289 dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
290 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
291 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
292 dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
293 }
294 } else {
295 dd_next = 0xdeadbeef;
296 dd_limit = 0xdeadbeef;
297 }
298
299 dd_saved_next = dd_next;
300 dd_saved_limit = dd_limit;
301
302 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
303 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
304 } else {
305 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
306 }
307 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
308 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
309 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
310
311 #ifdef DIAGNOSTIC
312 if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
313 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
314 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
315 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
316 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
317 next_dma_print(nd);
318 panic("DMA failure writing to current regs");
319 }
320 #endif
321 }
322
323
324 /* This routine is used for debugging */
325
326 void
327 next_dma_print(nd)
328 struct nextdma_config *nd;
329 {
330 u_long dd_csr;
331 u_long dd_next;
332 u_long dd_next_initbuf;
333 u_long dd_limit;
334 u_long dd_start;
335 u_long dd_stop;
336 u_long dd_saved_next;
337 u_long dd_saved_limit;
338 u_long dd_saved_start;
339 u_long dd_saved_stop;
340 char sbuf[256];
341
342 /* Read all of the registers before we print anything out,
343 * in case something changes
344 */
345 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
346 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
347 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
348 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
349 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
350 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
351 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
352 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
353 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
354 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
355
356 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
357 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
358 printf("NDMAP: *intrstat = 0x%s\n", sbuf);
359
360 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
361 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
362 printf("NDMAP: *intrmask = 0x%s\n", sbuf);
363
364 /* NDMAP is Next DMA Print (really!) */
365
366 if (nd->_nd_map) {
367 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
368 nd->_nd_map->dm_mapsize);
369 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
370 nd->_nd_map->dm_nsegs);
371 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
372 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
373 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
374 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
375 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
376 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
377 } else {
378 printf("NDMAP: nd->_nd_map = NULL\n");
379 }
380 if (nd->_nd_map_cont) {
381 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
382 nd->_nd_map_cont->dm_mapsize);
383 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
384 nd->_nd_map_cont->dm_nsegs);
385 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
386 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
387 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
388 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
389 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
390 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
391 } else {
392 printf("NDMAP: nd->_nd_map_cont = NULL\n");
393 }
394
395 bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
396 printf("NDMAP: dd->dd_csr = 0x%s\n", sbuf);
397
398 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
399 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
400 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
401 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
402 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
403 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
404 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
405 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
406 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
407
408 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
409 sbuf, sizeof(sbuf));
410 printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
411 NEXT_I_IPL(nd->nd_intr), sbuf);
412 }
413
414 /****************************************************************/
415 void
416 next_dma_finish_xfer(nd)
417 struct nextdma_config *nd;
418 {
419 bus_addr_t onext;
420 bus_addr_t olimit;
421 bus_addr_t slimit;
422
423 onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
424 olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
425
426 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
427 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
428 } else {
429 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
430 }
431
432 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
433 slimit &= ~0x80000000;
434 }
435
436 #ifdef DIAGNOSTIC
437 if ((slimit < onext) || (slimit > olimit)) {
438 next_dma_print(nd);
439 panic("DMA: Unexpected registers in finish_xfer\n");
440 }
441 #endif
442
443 nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
444
445 /* If we've reached the end of the current map, then inform
446 * that we've completed that map.
447 */
448 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
449 if (nd->nd_completed_cb)
450 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
451 }
452 nd->_nd_map = 0;
453 nd->_nd_idx = 0;
454 }
455
456
457 int
458 nextdma_intr(arg)
459 void *arg;
460 {
461 /* @@@ This is bogus, we can't be certain of arg's type
462 * unless the interrupt is for us. For now we successfully
463 * cheat because DMA interrupts are the only things invoked
464 * at this interrupt level.
465 */
466 struct nextdma_config *nd = arg;
467
468 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
469 /* Handle dma interrupts */
470
471 #ifdef ND_DEBUG
472 if (nextdma_debug) {
473 char sbuf[256];
474
475 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
476 sbuf, sizeof(sbuf));
477 printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
478 NEXT_I_IPL(nd->nd_intr), sbuf);
479 }
480 #endif
481
482 #ifdef DIAGNOSTIC
483 if (!nd->_nd_map) {
484 next_dma_print(nd);
485 panic("DMA missing current map in interrupt!\n");
486 }
487 #endif
488
489 {
490 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
491
492 #ifdef DIAGNOSTIC
493 if ((!(state & DMACSR_COMPLETE)) || (state & DMACSR_SUPDATE)) {
494 char sbuf[256];
495
496 next_dma_print(nd);
497
498 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
499 panic("DMA Unexpected dma state in interrupt (0x%s)", sbuf);
500 }
501 #endif
502
503 next_dma_finish_xfer(nd);
504
505 /* Check to see if we are expecting dma to shut down */
506 if ((nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL)) {
507
508 #ifdef DIAGNOSTIC
509 if (state & DMACSR_ENABLE) {
510 char sbuf[256];
511
512 next_dma_print(nd);
513
514 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
515 panic("DMA: unexpected DMA state at shutdown (0x%s)\n", sbuf);
516 }
517 #endif
518 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
519 DMACSR_CLRCOMPLETE | DMACSR_RESET);
520
521 DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
522 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
523
524 return(1);
525 }
526
527 next_dma_rotate(nd);
528 next_dma_setup_cont_regs(nd);
529
530 {
531 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
532
533 if (state & DMACSR_READ) {
534 dmadir = DMACSR_SETREAD;
535 } else {
536 dmadir = DMACSR_SETWRITE;
537 }
538
539 /* we used to SETENABLE here only
540 conditionally, but we got burned
541 because DMA sometimes would shut
542 down between when we checked and
543 when we acted upon it. CL19991211 */
544 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
545 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
546 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETENABLE);
547 } else {
548 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
549 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE | DMACSR_SETENABLE);
550 }
551
552 }
553
554 }
555
556 #ifdef ND_DEBUG
557 if (nextdma_debug) {
558 char sbuf[256];
559
560 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
561 sbuf, sizeof(sbuf));
562 printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
563 NEXT_I_IPL(nd->nd_intr), sbuf);
564 }
565 #endif
566
567 return(1);
568 }
569
570 /*
571 * Check to see if dma has finished for a channel */
572 int
573 nextdma_finished(nd)
574 struct nextdma_config *nd;
575 {
576 int r;
577 int s;
578 s = spldma(); /* @@@ should this be splimp()? */
579 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
580 splx(s);
581 return(r);
582 }
583
584 void
585 nextdma_start(nd, dmadir)
586 struct nextdma_config *nd;
587 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
588 {
589
590 #ifdef DIAGNOSTIC
591 if (!nextdma_finished(nd)) {
592 char sbuf[256];
593
594 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
595 sbuf, sizeof(sbuf));
596 panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
597 }
598 #endif
599
600 #ifdef ND_DEBUG
601 if (nextdma_debug) {
602 char sbuf[256];
603
604 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
605 sbuf, sizeof(sbuf));
606 printf("DMA start (%ld) intr(0x%s)\n",
607 NEXT_I_IPL(nd->nd_intr), sbuf);
608 }
609 #endif
610
611 #ifdef DIAGNOSTIC
612 if (nd->_nd_map) {
613 next_dma_print(nd);
614 panic("DMA: nextdma_start() with non null map\n");
615 }
616 if (nd->_nd_map_cont) {
617 next_dma_print(nd);
618 panic("DMA: nextdma_start() with non null continue map\n");
619 }
620 #endif
621
622 #ifdef DIAGNOSTIC
623 if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
624 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
625 }
626 #endif
627
628 /* preload both the current and the continue maps */
629 next_dma_rotate(nd);
630
631 #ifdef DIAGNOSTIC
632 if (!nd->_nd_map_cont) {
633 panic("No map available in nextdma_start()");
634 }
635 #endif
636
637 next_dma_rotate(nd);
638
639 #ifdef ND_DEBUG
640 if (nextdma_debug) {
641 char sbuf[256];
642
643 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
644 sbuf, sizeof(sbuf));
645 printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
646 (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
647 }
648 #endif
649
650 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
651 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
652 DMACSR_INITBUF | DMACSR_RESET | dmadir);
653
654 next_dma_setup_curr_regs(nd);
655 next_dma_setup_cont_regs(nd);
656
657 #if (defined(ND_DEBUG))
658 if (nextdma_debug) next_dma_print(nd);
659 #endif
660
661 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
662 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
663 DMACSR_SETENABLE | dmadir);
664 } else {
665 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
666 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
667 }
668 }
669