nextdma.c revision 1.21.4.1 1 /* $NetBSD: nextdma.c,v 1.21.4.1 2001/06/16 20:30:47 he Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <m68k/cacheops.h>
47
48 #include <next68k/next68k/isr.h>
49
50 #define _NEXT68K_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55
56 #if 1
57 #define ND_DEBUG
58 #endif
59
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66
67 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
68 bus_size_t, int));
69 int next_dma_continue __P((struct nextdma_config *));
70 void next_dma_rotate __P((struct nextdma_config *));
71
72 void next_dma_setup_cont_regs __P((struct nextdma_config *));
73 void next_dma_setup_curr_regs __P((struct nextdma_config *));
74 void next_dma_finish_xfer __P((struct nextdma_config *));
75
76 void
77 nextdma_config(nd)
78 struct nextdma_config *nd;
79 {
80 /* Initialize the dma_tag. As a hack, we currently
81 * put the dma tag in the structure itself. It shouldn't be there.
82 */
83
84 {
85 bus_dma_tag_t t;
86 t = &nd->_nd_dmat;
87 t->_cookie = nd;
88 t->_dmamap_create = _bus_dmamap_create;
89 t->_dmamap_destroy = _bus_dmamap_destroy;
90 t->_dmamap_load = _bus_dmamap_load_direct;
91 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
92 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
93 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
94 t->_dmamap_unload = _bus_dmamap_unload;
95 t->_dmamap_sync = _bus_dmamap_sync;
96
97 t->_dmamem_alloc = _bus_dmamem_alloc;
98 t->_dmamem_free = _bus_dmamem_free;
99 t->_dmamem_map = _bus_dmamem_map;
100 t->_dmamem_unmap = _bus_dmamem_unmap;
101 t->_dmamem_mmap = _bus_dmamem_mmap;
102
103 nd->nd_dmat = t;
104 }
105
106 nextdma_init(nd);
107
108 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
109 INTR_ENABLE(nd->nd_intr);
110 }
111
112 void
113 nextdma_init(nd)
114 struct nextdma_config *nd;
115 {
116 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
117 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
118
119 nd->_nd_map = NULL;
120 nd->_nd_idx = 0;
121 nd->_nd_map_cont = NULL;
122 nd->_nd_idx_cont = 0;
123
124 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
125 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
126 DMACSR_RESET | DMACSR_INITBUF);
127
128 next_dma_setup_curr_regs(nd);
129 next_dma_setup_cont_regs(nd);
130
131 #if defined(DIAGNOSTIC)
132 {
133 u_long state;
134 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
135
136 #if 1
137 /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
138 * milo (a 25Mhz 68040 mono cube) didn't have this problem
139 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
140 */
141 state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
142 #else
143 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
144 DMACSR_SUPDATE | DMACSR_ENABLE);
145 #endif
146 if (state) {
147 next_dma_print(nd);
148 panic("DMA did not reset");
149 }
150 }
151 #endif
152 }
153
154
155 void
156 nextdma_reset(nd)
157 struct nextdma_config *nd;
158 {
159 int s;
160 s = spldma();
161
162 DPRINTF(("DMA reset\n"));
163
164 #if (defined(ND_DEBUG))
165 if (nextdma_debug) next_dma_print(nd);
166 #endif
167
168 /* @@@ clean up dma maps */
169
170 nextdma_init(nd);
171 splx(s);
172 }
173
174 /****************************************************************/
175
176
177 /* Call the completed and continue callbacks to try to fill
178 * in the dma continue buffers.
179 */
180 void
181 next_dma_rotate(nd)
182 struct nextdma_config *nd;
183 {
184
185 DPRINTF(("DMA next_dma_rotate()\n"));
186
187 /* Rotate the continue map into the current map */
188 nd->_nd_map = nd->_nd_map_cont;
189 nd->_nd_idx = nd->_nd_idx_cont;
190
191 if ((!nd->_nd_map_cont) ||
192 ((nd->_nd_map_cont) &&
193 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
194 if (nd->nd_continue_cb) {
195 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
196 } else {
197 nd->_nd_map_cont = 0;
198 }
199 nd->_nd_idx_cont = 0;
200 }
201
202 #ifdef DIAGNOSTIC
203 if (nd->_nd_map) {
204 nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0x1234beef;
205 }
206 #endif
207
208 #if defined(DIAGNOSTIC) && 0
209 if (nd->_nd_map_cont) {
210 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
211 next_dma_print(nd);
212 panic("DMA request unaligned at start\n");
213 }
214 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
215 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
216 next_dma_print(nd);
217 panic("DMA request unaligned at end\n");
218 }
219 }
220 #endif
221
222 }
223
224 void
225 next_dma_setup_cont_regs(nd)
226 struct nextdma_config *nd;
227 {
228 bus_addr_t dd_start;
229 bus_addr_t dd_stop;
230 bus_addr_t dd_saved_start;
231 bus_addr_t dd_saved_stop;
232
233 DPRINTF(("DMA next_dma_setup_regs()\n"));
234
235 if (nd->_nd_map_cont) {
236 dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
237 dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
238 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
239
240 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
241 dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
242 dd_stop += 15;
243 }
244 } else {
245 dd_start = 0xdeadbeef;
246 dd_stop = 0xdeadbeef;
247 }
248
249 dd_saved_start = dd_start;
250 dd_saved_stop = dd_stop;
251
252 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
253 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
254 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
255 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
256
257 #ifdef DIAGNOSTIC
258 if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
259 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
260 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
261 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
262 next_dma_print(nd);
263 panic("DMA failure writing to continue regs");
264 }
265 #endif
266 }
267
268 void
269 next_dma_setup_curr_regs(nd)
270 struct nextdma_config *nd;
271 {
272 bus_addr_t dd_next;
273 bus_addr_t dd_limit;
274 bus_addr_t dd_saved_next;
275 bus_addr_t dd_saved_limit;
276
277 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
278
279
280 if (nd->_nd_map) {
281 dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
282 dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
283 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
284 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
285 dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
286 dd_limit += 15;
287 }
288 } else {
289 dd_next = 0xdeadbeef;
290 dd_limit = 0xdeadbeef;
291 }
292
293 dd_saved_next = dd_next;
294 dd_saved_limit = dd_limit;
295
296 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
297 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
298 } else {
299 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
300 }
301 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
302 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
303 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
304
305 #ifdef DIAGNOSTIC
306 if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
307 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
308 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
309 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
310 (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
311 next_dma_print(nd);
312 panic("DMA failure writing to current regs");
313 }
314 #endif
315 }
316
317
318 /* This routine is used for debugging */
319
320 void
321 next_dma_print(nd)
322 struct nextdma_config *nd;
323 {
324 u_long dd_csr;
325 u_long dd_next;
326 u_long dd_next_initbuf;
327 u_long dd_limit;
328 u_long dd_start;
329 u_long dd_stop;
330 u_long dd_saved_next;
331 u_long dd_saved_limit;
332 u_long dd_saved_start;
333 u_long dd_saved_stop;
334
335 /* Read all of the registers before we print anything out,
336 * in case something changes
337 */
338 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
339 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
340 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
341 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
342 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
343 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
344 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
345 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
346 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
347 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
348
349 printf("NDMAP: *intrstat = 0x%b\n",
350 (*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),NEXT_INTR_BITS);
351 printf("NDMAP: *intrmask = 0x%b\n",
352 (*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),NEXT_INTR_BITS);
353
354 /* NDMAP is Next DMA Print (really!) */
355
356 if (nd->_nd_map) {
357 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
358 nd->_nd_map->dm_mapsize);
359 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
360 nd->_nd_map->dm_nsegs);
361 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
362 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
363 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
364 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
365 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
366 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
367 } else {
368 printf("NDMAP: nd->_nd_map = NULL\n");
369 }
370 if (nd->_nd_map_cont) {
371 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
372 nd->_nd_map_cont->dm_mapsize);
373 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
374 nd->_nd_map_cont->dm_nsegs);
375 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
376 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
377 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
378 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
379 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
380 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
381 } else {
382 printf("NDMAP: nd->_nd_map_cont = NULL\n");
383 }
384
385 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
386 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
387 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
388 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
389 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
390 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
391 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
392 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
393 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
394 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
395
396 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
397 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
398 }
399
400 /****************************************************************/
401 void
402 next_dma_finish_xfer(nd)
403 struct nextdma_config *nd;
404 {
405 bus_addr_t onext;
406 bus_addr_t olimit;
407 bus_addr_t slimit;
408
409 onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
410 olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
411
412 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
413 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
414 } else {
415 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
416 }
417
418 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
419 slimit &= ~0x80000000;
420 slimit -= 15;
421 }
422
423 #ifdef DIAGNOSTIC
424 if ((slimit < onext) || (slimit > olimit)) {
425 next_dma_print(nd);
426 panic("DMA: Unexpected registers in finish_xfer\n");
427 }
428 #endif
429
430 nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
431
432 /* If we've reached the end of the current map, then inform
433 * that we've completed that map.
434 */
435 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
436 if (nd->nd_completed_cb)
437 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
438 }
439 nd->_nd_map = 0;
440 nd->_nd_idx = 0;
441 }
442
443
444 int
445 nextdma_intr(arg)
446 void *arg;
447 {
448 /* @@@ This is bogus, we can't be certain of arg's type
449 * unless the interrupt is for us. For now we successfully
450 * cheat because DMA interrupts are the only things invoked
451 * at this interrupt level.
452 */
453 struct nextdma_config *nd = arg;
454
455 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
456 /* Handle dma interrupts */
457
458 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
459 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
460
461 #ifdef DIAGNOSTIC
462 if (!nd->_nd_map) {
463 next_dma_print(nd);
464 panic("DMA missing current map in interrupt!\n");
465 }
466 #endif
467
468 {
469 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
470
471 #ifdef DIAGNOSTIC
472 if ((!(state & DMACSR_COMPLETE)) || (state & DMACSR_SUPDATE)) {
473 next_dma_print(nd);
474 panic("DMA Unexpected dma state in interrupt (0x%b)",state,DMACSR_BITS);
475 }
476 #endif
477
478 next_dma_finish_xfer(nd);
479
480 /* Check to see if we are expecting dma to shut down */
481 if ((nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL)) {
482
483 #ifdef DIAGNOSTIC
484 if (state & DMACSR_ENABLE) {
485 next_dma_print(nd);
486 panic("DMA: unexpected DMA state at shutdown (0x%b)\n",
487 state,DMACSR_BITS);
488 }
489 #endif
490 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
491 DMACSR_CLRCOMPLETE | DMACSR_RESET);
492
493 DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
494 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
495
496 return(1);
497 }
498
499 next_dma_rotate(nd);
500 next_dma_setup_cont_regs(nd);
501
502 {
503 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
504
505 if (state & DMACSR_READ) {
506 dmadir = DMACSR_SETREAD;
507 } else {
508 dmadir = DMACSR_SETWRITE;
509 }
510
511 /* we used to SETENABLE here only
512 conditionally, but we got burned
513 because DMA sometimes would shut
514 down between when we checked and
515 when we acted upon it. CL19991211 */
516 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
517 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
518 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETENABLE);
519 } else {
520 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
521 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE | DMACSR_SETENABLE);
522 }
523
524 }
525
526 }
527
528 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
529 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
530
531 return(1);
532 }
533
534 /*
535 * Check to see if dma has finished for a channel */
536 int
537 nextdma_finished(nd)
538 struct nextdma_config *nd;
539 {
540 int r;
541 int s;
542 s = spldma(); /* @@@ should this be splimp()? */
543 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
544 splx(s);
545 return(r);
546 }
547
548 void
549 nextdma_start(nd, dmadir)
550 struct nextdma_config *nd;
551 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */
552 {
553
554 #ifdef DIAGNOSTIC
555 if (!nextdma_finished(nd)) {
556 panic("DMA trying to start before previous finished on intr(0x%b)\n",
557 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
558 }
559 #endif
560
561 DPRINTF(("DMA start (%ld) intr(0x%b)\n",
562 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
563
564 #ifdef DIAGNOSTIC
565 if (nd->_nd_map) {
566 next_dma_print(nd);
567 panic("DMA: nextdma_start() with non null map\n");
568 }
569 if (nd->_nd_map_cont) {
570 next_dma_print(nd);
571 panic("DMA: nextdma_start() with non null continue map\n");
572 }
573 #endif
574
575 #ifdef DIAGNOSTIC
576 if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
577 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
578 }
579 #endif
580
581 /* preload both the current and the continue maps */
582 next_dma_rotate(nd);
583
584 #ifdef DIAGNOSTIC
585 if (!nd->_nd_map_cont) {
586 panic("No map available in nextdma_start()");
587 }
588 #endif
589
590 next_dma_rotate(nd);
591
592 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
593 (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs,
594 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
595
596 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
597 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
598 DMACSR_INITBUF | DMACSR_RESET | dmadir);
599
600 next_dma_setup_curr_regs(nd);
601 next_dma_setup_cont_regs(nd);
602
603 #if (defined(ND_DEBUG))
604 if (nextdma_debug) next_dma_print(nd);
605 #endif
606
607 if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
608 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
609 DMACSR_SETENABLE | dmadir);
610 } else {
611 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
612 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
613 }
614 }
615