nextdma.c revision 1.4 1 /* $NetBSD: nextdma.c,v 1.4 1998/07/21 06:17:35 dbj Exp $ */
2 /*
3 * Copyright (c) 1998 Darrin B. Jewell
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Darrin B. Jewell
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45
46 #include <next68k/next68k/isr.h>
47
48 #define _GENERIC_BUS_DMA_PRIVATE
49 #include <machine/bus.h>
50
51 #include "nextdmareg.h"
52 #include "nextdmavar.h"
53
54 #if 0
55 #define ND_DEBUG
56 #endif
57
58 #if defined(ND_DEBUG)
59 #define DPRINTF(x) printf x;
60 #else
61 #define DPRINTF(x)
62 #endif
63
64 /* @@@ for debugging */
65 struct nextdma_config *debugernd;
66 struct nextdma_config *debugexnd;
67
68 int nextdma_intr __P((void *));
69 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
70 bus_size_t, int));
71 int next_dma_continue __P((struct nextdma_config *));
72 void next_dma_rotate __P((struct nextdma_config *));
73
74 void next_dma_setup_cont_regs __P((struct nextdma_config *));
75 void next_dma_setup_curr_regs __P((struct nextdma_config *));
76
77 void next_dma_print __P((struct nextdma_config *));
78
79 void
80 nextdma_config(nd)
81 struct nextdma_config *nd;
82 {
83 /* Initialize the dma_tag. As a hack, we currently
84 * put the dma tag in the structure itself. It shouldn't be there.
85 */
86
87 {
88 bus_dma_tag_t t;
89 t = &nd->_nd_dmat;
90 t->_cookie = nd;
91 t->_get_tag = NULL; /* lose */
92 t->_dmamap_create = _bus_dmamap_create;
93 t->_dmamap_destroy = _bus_dmamap_destroy;
94 t->_dmamap_load = _bus_dmamap_load_direct;
95 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
96 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
97 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
98 t->_dmamap_unload = _bus_dmamap_unload;
99 t->_dmamap_sync = next_dmamap_sync;
100
101 t->_dmamem_alloc = _bus_dmamem_alloc;
102 t->_dmamem_free = _bus_dmamem_free;
103 t->_dmamem_map = _bus_dmamem_map;
104 t->_dmamem_unmap = _bus_dmamem_unmap;
105 t->_dmamem_mmap = _bus_dmamem_mmap;
106
107 nd->nd_dmat = t;
108 }
109
110 /* @@@ for debugging */
111 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
112 debugernd = nd;
113 }
114 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
115 debugexnd = nd;
116 }
117
118 nextdma_init(nd);
119
120 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
121 INTR_ENABLE(nd->nd_intr);
122 }
123
124 void
125 nextdma_init(nd)
126 struct nextdma_config *nd;
127 {
128 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
129 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
130
131 /* @@@ should probably check and free these maps */
132 nd->_nd_map = NULL;
133 nd->_nd_idx = 0;
134 nd->_nd_map_cont = NULL;
135 nd->_nd_idx_cont = 0;
136
137 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
138 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
139 DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
140
141 next_dma_setup_curr_regs(nd);
142 next_dma_setup_cont_regs(nd);
143
144 #if 0 && defined(DIAGNOSTIC)
145 /* Today, my computer (mourning) appears to fail this test.
146 * yesterday, another NeXT (milo) didn't have this problem
147 * Darrin B. Jewell <jewell (at) mit.edu> Mon May 25 07:53:05 1998
148 */
149 {
150 u_long state;
151 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
152 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
153 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
154 DMACSR_SUPDATE | DMACSR_ENABLE);
155
156 if (state) {
157 next_dma_print(nd);
158 panic("DMA did not reset\n");
159 }
160 }
161 #endif
162 }
163
164
165 void
166 nextdma_reset(nd)
167 struct nextdma_config *nd;
168 {
169 int s;
170 s = spldma(); /* @@@ should this be splimp()? */
171 nextdma_init(nd);
172 splx(s);
173 }
174
175 /****************************************************************/
176
177 /* If the next had multiple busses, this should probably
178 * go elsewhere, but it is here anyway */
179 void
180 next_dmamap_sync(t, map, offset, len, ops)
181 bus_dma_tag_t t;
182 bus_dmamap_t map;
183 bus_addr_t offset;
184 bus_size_t len;
185 int ops;
186 {
187 /* flush/purge the cache.
188 * assumes pointers are aligned
189 * @@@ should probably be fixed to use offset and len args.
190 * should also optimize this to work on pages for larger regions?
191 */
192 if (ops & BUS_DMASYNC_PREWRITE) {
193 int i;
194 for(i=0;i<map->dm_nsegs;i++) {
195 bus_addr_t p = map->dm_segs[i].ds_addr;
196 bus_addr_t e = p+map->dm_segs[i].ds_len;
197 while(p<e) {
198 DCFL(p); /* flush */
199 p += 16; /* cache line length */
200 }
201 }
202 }
203
204 if (ops & BUS_DMASYNC_POSTREAD) {
205 int i;
206 for(i=0;i<map->dm_nsegs;i++) {
207 bus_addr_t p = map->dm_segs[i].ds_addr;
208 bus_addr_t e = p+map->dm_segs[i].ds_len;
209 while(p<e) {
210 DCPL(p); /* purge */
211 p += 16; /* cache line length */
212 }
213 }
214 }
215 }
216
217 /****************************************************************/
218
219
220 /* Call the completed and continue callbacks to try to fill
221 * in the dma continue buffers.
222 */
223 void
224 next_dma_rotate(nd)
225 struct nextdma_config *nd;
226 {
227
228 DPRINTF(("DMA next_dma_rotate()\n"));
229
230 /* If we've reached the end of the current map, then inform
231 * that we've completed that map.
232 */
233 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
234 if (nd->nd_completed_cb)
235 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
236 }
237
238 /* Rotate the continue map into the current map */
239 nd->_nd_map = nd->_nd_map_cont;
240 nd->_nd_idx = nd->_nd_idx_cont;
241
242 if ((!nd->_nd_map_cont) ||
243 ((nd->_nd_map_cont) &&
244 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
245 if (nd->nd_continue_cb) {
246 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
247 } else {
248 nd->_nd_map_cont = 0;
249 }
250 nd->_nd_idx_cont = 0;
251 }
252 }
253
254 void
255 next_dma_setup_cont_regs(nd)
256 struct nextdma_config *nd;
257 {
258 DPRINTF(("DMA next_dma_setup_regs()\n"));
259
260 if (nd->_nd_map_cont) {
261
262 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
263 /* Ethernet transmit needs secret magic */
264
265 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
266 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
267 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
268 ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
269 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
270 + 0x0) | 0x80000000);
271 } else {
272 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
273 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
274 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
275 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
276 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
277 }
278
279 } else {
280
281 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,0);
282 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0);
283 }
284
285 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
286 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
287 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
288 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
289
290 }
291
292 void
293 next_dma_setup_curr_regs(nd)
294 struct nextdma_config *nd;
295 {
296 DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
297
298 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
299 /* Ethernet transmit needs secret magic */
300
301 if (nd->_nd_map) {
302
303 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
304 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
305 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
306 ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
307 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
308 + 0x0) | 0x80000000);
309 } else {
310 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0);
311 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0);
312
313 }
314
315 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
316 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
317 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
318 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
319
320 } else {
321
322 if (nd->_nd_map) {
323
324 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,
325 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
326 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
327 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
328 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
329 } else {
330 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,0);
331 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0);
332
333 }
334
335 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
336 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT));
337 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
338 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
339
340 }
341
342 }
343
344
345 /* This routine is used for debugging */
346
347 void
348 next_dma_print(nd)
349 struct nextdma_config *nd;
350 {
351 u_long dd_csr;
352 u_long dd_next;
353 u_long dd_next_initbuf;
354 u_long dd_limit;
355 u_long dd_start;
356 u_long dd_stop;
357 u_long dd_saved_next;
358 u_long dd_saved_limit;
359 u_long dd_saved_start;
360 u_long dd_saved_stop;
361
362 /* Read all of the registers before we print anything out,
363 * in case something changes
364 */
365 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
366 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
367 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
368 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
369 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
370 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
371 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
372 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
373 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
374 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
375
376 if (nd->_nd_map) {
377 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
378 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
379 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
380 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
381 } else {
382 printf("NDMAP: nd->_nd_map = NULL\n");
383 }
384 if (nd->_nd_map_cont) {
385 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
386 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
387 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
388 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
389 } else {
390 printf("NDMAP: nd->_nd_map_cont = NULL\n");
391 }
392
393 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS);
394 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next);
395 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit);
396 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start);
397 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop);
398 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next);
399 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
400 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit);
401 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start);
402 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop);
403
404 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
405 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
406 }
407
408 /****************************************************************/
409
410 int
411 nextdma_intr(arg)
412 void *arg;
413 {
414 struct nextdma_config *nd = arg;
415
416 /* @@@ This is bogus, we can't be certain of arg's type
417 * unless the interrupt is for us
418 */
419
420 if (!INTR_OCCURRED(nd->nd_intr)) return 0;
421 /* Handle dma interrupts */
422
423 #ifdef DIAGNOSTIC
424 if (nd->nd_intr == NEXT_I_ENETR_DMA) {
425 if (debugernd != nd) {
426 panic("DMA incorrect handling of rx nd->nd_intr");
427 }
428 }
429 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
430 if (debugexnd != nd) {
431 panic("DMA incorrect handling of tx nd->nd_intr");
432 }
433 }
434 #endif
435
436 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
437 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
438
439 {
440 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
441
442 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
443 DMACSR_SUPDATE | DMACSR_ENABLE);
444
445 if (state & DMACSR_BUSEXC) {
446 #if 0 /* This bit seems to get set periodically and I don't know why */
447 next_dma_print(nd);
448 panic("Bus exception in DMA ipl (%ld) intr(0x%b)\n",
449 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
450 #endif
451 }
452
453 #ifdef DIAGNOSTIC
454 if (!(state & DMACSR_COMPLETE)) {
455 next_dma_print(nd);
456 #if 0 /* This bit doesn't seem to get set every once in a while,
457 * and I don't know why. Let's try treating it as a spurious
458 * interrupt. ie. report it and ignore the interrupt.
459 */
460 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
461 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
462 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
463 #else
464 printf("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
465 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
466 return(1);
467 #endif
468 }
469 #endif
470
471 /* Set the length of the segment to match actual length.
472 * @@@ is it okay to resize dma segments here?
473 * i should probably ask jason about this.
474 */
475 if (nd->_nd_map) {
476
477 bus_addr_t next;
478 bus_addr_t limit;
479
480 #if 0
481 if (state & DMACSR_ENABLE) {
482 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
483 } else {
484 next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
485 }
486 #else
487 next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
488 #endif
489 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
490
491 if (nd->nd_intr == NEXT_I_ENETX_DMA) {
492 limit &= ~0x80000000;
493 }
494
495 #ifdef DIAGNOSTIC
496 if (next != nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr) {
497 next_dma_print(nd);
498 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
499
500 panic("DMA ipl (%ld) intr(0x%b), unexpected completed address\n",
501 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
502 }
503 #endif
504
505 /* @@@ I observed a case where DMACSR_ENABLE wasn't set and
506 * DD_SAVED_LIMIT didn't contain the expected limit value. This
507 * should be tested, fixed, and removed. */
508
509 if (((limit-next) > nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
510 || (limit-next < 0)) {
511 #if 0
512 next_dma_print(nd);
513 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
514 panic("DMA packlen: next = 0x%08x limit = 0x%08x\n",next,limit);
515 #else
516 DPRINTF(("DMA packlen: next = 0x%08x limit = 0x%08x",next,limit));
517 #endif
518
519 } else {
520 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit - next;
521 }
522 }
523
524
525 if ((state & DMACSR_ENABLE) == 0) {
526
527 /* Non chaining interrupts shutdown immediately */
528 if (!nd->nd_chaining_flag) {
529 nd->_nd_map = nd->_nd_map_cont;
530 nd->_nd_idx = nd->_nd_idx_cont;
531 nd->_nd_map_cont = 0;
532 nd->_nd_idx_cont = 0;
533 }
534
535 /* Call the completed callback for the last packet */
536 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
537 if (nd->nd_completed_cb)
538 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
539 }
540 nd->_nd_map = 0;
541 nd->_nd_idx = 0;
542
543 if (nd->_nd_map_cont) {
544 DPRINTF(("DMA ipl (%ld) intr(0x%b), restarting\n",
545 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
546
547 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
548 DMACSR_SETSUPDATE | DMACSR_SETENABLE);
549
550 } else {
551 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
552 DMACSR_CLRCOMPLETE | DMACSR_RESET);
553 DPRINTF(("DMA: enable not set w/o continue map, shutting down dma\n"));
554 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
555 }
556
557 } else {
558 next_dma_rotate(nd);
559 next_dma_setup_cont_regs(nd);
560
561 if (nd->_nd_map_cont) {
562 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
563 DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE);
564 } else {
565 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
566 DMACSR_CLRCOMPLETE);
567 }
568
569 }
570
571 }
572
573 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
574 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
575
576 return(1);
577 }
578
579 /*
580 * Check to see if dma has finished for a channel */
581 int
582 nextdma_finished(nd)
583 struct nextdma_config *nd;
584 {
585 int r;
586 int s;
587 s = spldma(); /* @@@ should this be splimp()? */
588 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
589 splx(s);
590 return(r);
591 }
592
593 void
594 nextdma_start(nd, dmadir)
595 struct nextdma_config *nd;
596 u_long dmadir; /* DMACSR_READ or DMACSR_WRITE */
597 {
598
599 #ifdef DIAGNOSTIC
600 if (!nextdma_finished(nd)) {
601 panic("DMA trying to start before previous finished on intr(0x%b)\n",
602 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
603 }
604 #endif
605
606
607 DPRINTF(("DMA start (%ld) intr(0x%b)\n",
608 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
609
610 #ifdef DIAGNOSTIC
611 if (nd->_nd_map) {
612 next_dma_print(nd);
613 panic("DMA: nextdma_start() with non null map\n");
614 }
615 if (nd->_nd_map_cont) {
616 next_dma_print(nd);
617 panic("DMA: nextdma_start() with non null continue map\n");
618 }
619 #endif
620
621 next_dma_rotate(nd);
622
623 #ifdef DIAGNOSTIC
624 if (!nd->_nd_map_cont) {
625 panic("No map available in nextdma_start()");
626 }
627 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr)) {
628 panic("unaligned begin dma at start\n");
629 }
630 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr +
631 nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_len)) {
632 panic("unaligned end dma at start\n");
633 }
634 #endif
635
636 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
637 (dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map_cont->dm_nsegs,
638 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
639
640 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
641 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
642 DMACSR_INITBUF | DMACSR_RESET | dmadir);
643
644 next_dma_setup_cont_regs(nd);
645
646 /* When starting DMA, we must put the continue map
647 * into the current register. We reset the nd->_nd_map
648 * pointer here to avoid duplicated completed callbacks
649 * for the first buffer.
650 */
651 nd->_nd_map = nd->_nd_map_cont;
652 nd->_nd_idx = nd->_nd_idx_cont;
653 next_dma_setup_curr_regs(nd);
654 nd->_nd_map = 0;
655 nd->_nd_idx = 0;
656
657
658 #if (defined(ND_DEBUG))
659 next_dma_print(nd);
660 #endif
661
662 if (nd->nd_chaining_flag) {
663 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
664 DMACSR_SETSUPDATE | DMACSR_SETENABLE);
665 } else {
666 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
667 DMACSR_SETENABLE);
668 }
669
670 }
671