nextdma.c revision 1.20 1 /* $NetBSD: nextdma.c,v 1.20 1999/08/29 05:56:26 dbj Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _NEXT68K_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
68 bus_size_t, int));
69 int next_dma_continue __P((struct nextdma_config *));
70 void next_dma_rotate __P((struct nextdma_config *));
71
72 void next_dma_setup_cont_regs __P((struct nextdma_config *));
73 void next_dma_setup_curr_regs __P((struct nextdma_config *));
74 void next_dma_finish_xfer __P((struct nextdma_config *));
75
76 void
77 nextdma_config(nd)
78 struct nextdma_config *nd;
79 {
80 /* Initialize the dma_tag. As a hack, we currently
81 * put the dma tag in the structure itself. It shouldn't be there.
82 */
83
84 {
85 bus_dma_tag_t t;
86 t = &nd->_nd_dmat;
87 t->_cookie = nd;
88 t->_dmamap_create = _bus_dmamap_create;
89 t->_dmamap_destroy = _bus_dmamap_destroy;
90 t->_dmamap_load = _bus_dmamap_load_direct;
91 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
92 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
93 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
94 t->_dmamap_unload = _bus_dmamap_unload;
95 t->_dmamap_sync = _bus_dmamap_sync;
96
97 t->_dmamem_alloc = _bus_dmamem_alloc;
98 t->_dmamem_free = _bus_dmamem_free;
99 t->_dmamem_map = _bus_dmamem_map;
100 t->_dmamem_unmap = _bus_dmamem_unmap;
101 t->_dmamem_mmap = _bus_dmamem_mmap;
102
103 nd->nd_dmat = t;
104 }
105
106 nextdma_init(nd);
107
108 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
109 INTR_ENABLE(nd->nd_intr);
110 }
111
112 void
113 nextdma_init(nd)
114 struct nextdma_config *nd;
115 {
116 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
117 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
118
119 nd->_nd_map = NULL;
120 nd->_nd_idx = 0;
121 nd->_nd_map_cont = NULL;
122 nd->_nd_idx_cont = 0;
123
124 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
125 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
126 DMACSR_RESET | DMACSR_INITBUF);
127
128 next_dma_setup_curr_regs(nd);
129 next_dma_setup_cont_regs(nd);
130
131 #if defined(DIAGNOSTIC)
132 {
133 u_long state;
134 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
135
136 #if 1
137 /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
138 * milo (a 25Mhz 68040 mono cube) didn't have this problem
139 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
140 */
141 state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
142 #else
143 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
144 DMACSR_SUPDATE | DMACSR_ENABLE);
145 #endif
146 if (state) {
147 next_dma_print(nd);
148 panic("DMA did not reset");
149 }
150 }
151 #endif
152 }
153
154
155 void
156 nextdma_reset(nd)
157 struct nextdma_config *nd;
158 {
159 int s;
160 s = spldma();
161
162 DPRINTF(("DMA reset\n"));
163
164 #if (defined(ND_DEBUG))
165 if (nextdma_debug) next_dma_print(nd);
166 #endif
167
168 /* @@@ clean up dma maps */
169
170 nextdma_init(nd);
171 splx(s);
172 }
173
174 /****************************************************************/
175
176
177 /* Call the completed and continue callbacks to try to fill
178 * in the dma continue buffers.
179 */
180 void
181 next_dma_rotate(nd)
182 struct nextdma_config *nd;
183 {
184
185 DPRINTF(("DMA next_dma_rotate()\n"));
186
187 /* Rotate the continue map into the current map */
188 nd->_nd_map = nd->_nd_map_cont;
189 nd->_nd_idx = nd->_nd_idx_cont;
190
191 if ((!nd->_nd_map_cont) ||
192 ((nd->_nd_map_cont) &&
193 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
194 if (nd->nd_continue_cb) {
195 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
196 } else {
197 nd->_nd_map_cont = 0;
198 }
199 nd->_nd_idx_cont = 0;
200 }
201
202 #ifdef DIAGNOSTIC
203 if (nd->_nd_map) {
204 nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0x1234beef;
205 }
206 #endif
207
208 #ifdef DIAGNOSTIC
209 if (nd->_nd_map_cont) {
210 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
211 next_dma_print(nd);
212 panic("DMA request unaligned at start\n");
213 }
214 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
215 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
216 next_dma_print(nd);
217 panic("DMA request unaligned at end\n");
218 }
219 }
220 #endif
221
222 }
223
224 void
225 next_dma_setup_cont_regs(nd)
226 struct nextdma_config *nd;
227 {
228 bus_addr_t dd_start;
229 bus_addr_t dd_stop;
230 bus_addr_t dd_saved_start;
231 bus_addr_t dd_saved_stop;
232
233 DPRINTF(("DMA next_dma_setup_regs()\n"));
234
235 if (nd->_nd_map_cont) {
236 dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
237 dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
238 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
239
240 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
241 dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
242 }
243 } else {
244 dd_start = 0xdeadbeef;
245 dd_stop = 0xdeadbeef;
246 }
247
248 dd_saved_start = dd_start;
249 dd_saved_stop = dd_stop;
250
251 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
252 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
253 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
254 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
255
256 #ifdef DIAGNOSTIC
257 if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
258 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
259 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
260 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
261 next_dma_print(nd);
262 panic("DMA failure writing to continue regs");
263 }
264 #endif
265 }
266
267 void
268 next_dma_setup_curr_regs(nd)
269 struct nextdma_config *nd;
270 {
271 bus_addr_t dd_next;
272 bus_addr_t dd_limit;
273 bus_addr_t dd_saved_next;
274 bus_addr_t dd_saved_limit;
275
276 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
277
278
279 if (nd->_nd_map) {
280 dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
281 dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
282 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
283 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
284 dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
285 }
286 } else {
287 dd_next = 0xdeadbeef;
288 dd_limit = 0xdeadbeef;
289 }
290
291 dd_saved_next = dd_next;
292 dd_saved_limit = dd_limit;
293
294 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
295 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
296 } else {
297 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
298 }
299 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
300 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
301 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
302
303 #ifdef DIAGNOSTIC
304 if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
305 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
306 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
307 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
308 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
309 next_dma_print(nd);
310 panic("DMA failure writing to current regs");
311 }
312 #endif
313 }
314
315
316 /* This routine is used for debugging */
317
318 void
319 next_dma_print(nd)
320 struct nextdma_config *nd;
321 {
322 u_long dd_csr;
323 u_long dd_next;
324 u_long dd_next_initbuf;
325 u_long dd_limit;
326 u_long dd_start;
327 u_long dd_stop;
328 u_long dd_saved_next;
329 u_long dd_saved_limit;
330 u_long dd_saved_start;
331 u_long dd_saved_stop;
332
333 /* Read all of the registers before we print anything out,
334 * in case something changes
335 */
336 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
337 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
338 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
339 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
340 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
341 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
342 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
343 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
344 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
345 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
346
347 printf("NDMAP: *intrstat = 0x%b\n",
348 (*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),NEXT_INTR_BITS);
349 printf("NDMAP: *intrmask = 0x%b\n",
350 (*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),NEXT_INTR_BITS);
351
352 /* NDMAP is Next DMA Print (really!) */
353
354 if (nd->_nd_map) {
355 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
356 nd->_nd_map->dm_mapsize);
357 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
358 nd->_nd_map->dm_nsegs);
359 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
360 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
361 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
362 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
363 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
364 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
365 } else {
366 printf("NDMAP: nd->_nd_map = NULL\n");
367 }
368 if (nd->_nd_map_cont) {
369 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
370 nd->_nd_map_cont->dm_mapsize);
371 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
372 nd->_nd_map_cont->dm_nsegs);
373 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
374 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
375 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
376 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
377 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
378 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
379 } else {
380 printf("NDMAP: nd->_nd_map_cont = NULL\n");
381 }
382
383 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
384 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
385 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
386 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
387 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
388 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
389 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
390 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
391 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
392 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
393
394 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
395 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
396 }
397
398 /****************************************************************/
399 void
400 next_dma_finish_xfer(nd)
401 struct nextdma_config *nd;
402 {
403 bus_addr_t onext;
404 bus_addr_t olimit;
405 bus_addr_t slimit;
406
407 onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
408 olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
409
410 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
411 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
412 } else {
413 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
414 }
415
416 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
417 slimit &= ~0x80000000;
418 }
419
420 #ifdef DIAGNOSTIC
421 if ((slimit < onext) || (slimit > olimit)) {
422 next_dma_print(nd);
423 panic("DMA: Unexpected registers in finish_xfer\n");
424 }
425 #endif
426
427 nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
428
429 /* If we've reached the end of the current map, then inform
430 * that we've completed that map.
431 */
432 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
433 if (nd->nd_completed_cb)
434 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
435 }
436 nd->_nd_map = 0;
437 nd->_nd_idx = 0;
438 }
439
440
441 int
442 nextdma_intr(arg)
443 void *arg;
444 {
445 /* @@@ This is bogus, we can't be certain of arg's type
446 * unless the interrupt is for us. For now we successfully
447 * cheat because DMA interrupts are the only things invoked
448 * at this interrupt level.
449 */
450 struct nextdma_config *nd = arg;
451
452 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
453 /* Handle dma interrupts */
454
455 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
456 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
457
458 #ifdef DIAGNOSTIC
459 if (!nd->_nd_map) {
460 next_dma_print(nd);
461 panic("DMA missing current map in interrupt!\n");
462 }
463 #endif
464
465 {
466 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
467
468 #ifdef DIAGNOSTIC
469 if ((!(state & DMACSR_COMPLETE)) || (state & DMACSR_SUPDATE)) {
470 next_dma_print(nd);
471 panic("DMA Unexpected dma state in interrupt (0x%b)",state,DMACSR_BITS);
472 }
473 #endif
474
475 next_dma_finish_xfer(nd);
476
477 /* Check to see if we are expecting dma to shut down */
478 if ((nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL)) {
479
480 #ifdef DIAGNOSTIC
481 if (state & DMACSR_ENABLE) {
482 next_dma_print(nd);
483 panic("DMA: unexpected DMA state at shutdown (0x%b)\n",
484 state,DMACSR_BITS);
485 }
486 #endif
487 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
488 DMACSR_CLRCOMPLETE | DMACSR_RESET);
489
490 DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
491 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
492
493 return(1);
494 }
495
496 next_dma_rotate(nd);
497 next_dma_setup_cont_regs(nd);
498
499 {
500 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
501
502 if (state & DMACSR_READ) {
503 dmadir = DMACSR_SETREAD;
504 } else {
505 dmadir = DMACSR_SETWRITE;
506 }
507
508 if (state & DMACSR_ENABLE) {
509
510 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
511 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
512 DMACSR_CLRCOMPLETE | dmadir);
513 } else {
514 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
515 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
516 }
517
518 } else {
519
520 #if (defined(ND_DEBUG))
521 if (nextdma_debug) next_dma_print(nd);
522 #endif
523 #if 0 && defined(DIAGNOSTIC)
524 printf("DMA: Unexpected shutdown, restarting intr(0x%b)\n",
525 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
526 #endif
527
528 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
529 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
530 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETENABLE);
531 } else {
532 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
533 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE | DMACSR_SETENABLE);
534 }
535 }
536 }
537
538 }
539
540 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
541 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
542
543 return(1);
544 }
545
546 /*
547 * Check to see if dma has finished for a channel */
548 int
549 nextdma_finished(nd)
550 struct nextdma_config *nd;
551 {
552 int r;
553 int s;
554 s = spldma(); /* @@@ should this be splimp()? */
555 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
556 splx(s);
557 return(r);
558 }
559
560 void
561 nextdma_start(nd, dmadir)
562 struct nextdma_config *nd;
563 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
564 {
565
566 #ifdef DIAGNOSTIC
567 if (!nextdma_finished(nd)) {
568 panic("DMA trying to start before previous finished on intr(0x%b)\n",
569 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
570 }
571 #endif
572
573 DPRINTF(("DMA start (%ld) intr(0x%b)\n",
574 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
575
576 #ifdef DIAGNOSTIC
577 if (nd->_nd_map) {
578 next_dma_print(nd);
579 panic("DMA: nextdma_start() with non null map\n");
580 }
581 if (nd->_nd_map_cont) {
582 next_dma_print(nd);
583 panic("DMA: nextdma_start() with non null continue map\n");
584 }
585 #endif
586
587 #ifdef DIAGNOSTIC
588 if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
589 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
590 }
591 #endif
592
593 /* preload both the current and the continue maps */
594 next_dma_rotate(nd);
595
596 #ifdef DIAGNOSTIC
597 if (!nd->_nd_map_cont) {
598 panic("No map available in nextdma_start()");
599 }
600 #endif
601
602 next_dma_rotate(nd);
603
604 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
605 (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs,
606 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
607
608 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
609 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
610 DMACSR_INITBUF | DMACSR_RESET | dmadir);
611
612 next_dma_setup_curr_regs(nd);
613 next_dma_setup_cont_regs(nd);
614
615 #if (defined(ND_DEBUG))
616 if (nextdma_debug) next_dma_print(nd);
617 #endif
618
619 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
620 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
621 DMACSR_SETENABLE | dmadir);
622 } else {
623 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
624 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
625 }
626 }
627