Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.23
      1 /*	$NetBSD: nextdma.c,v 1.23 2001/04/02 05:29:43 dbj Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *      This product includes software developed by Darrin B. Jewell
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/mbuf.h>
     35 #include <sys/syslog.h>
     36 #include <sys/socket.h>
     37 #include <sys/device.h>
     38 #include <sys/malloc.h>
     39 #include <sys/ioctl.h>
     40 #include <sys/errno.h>
     41 
     42 #include <machine/autoconf.h>
     43 #include <machine/cpu.h>
     44 #include <machine/intr.h>
     45 
     46 #include <m68k/cacheops.h>
     47 
     48 #include <next68k/next68k/isr.h>
     49 
     50 #define _NEXT68K_BUS_DMA_PRIVATE
     51 #include <machine/bus.h>
     52 
     53 #include "nextdmareg.h"
     54 #include "nextdmavar.h"
     55 
     56 #if 1
     57 #define ND_DEBUG
     58 #endif
     59 
     60 #if defined(ND_DEBUG)
     61 int nextdma_debug = 0;
     62 #define DPRINTF(x) if (nextdma_debug) printf x;
     63 #else
     64 #define DPRINTF(x)
     65 #endif
     66 
     67 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     68                        bus_size_t, int));
     69 int next_dma_continue __P((struct nextdma_config *));
     70 void next_dma_rotate __P((struct nextdma_config *));
     71 
     72 void next_dma_setup_cont_regs __P((struct nextdma_config *));
     73 void next_dma_setup_curr_regs __P((struct nextdma_config *));
     74 
     75 void
     76 nextdma_config(nd)
     77 	struct nextdma_config *nd;
     78 {
     79 	/* Initialize the dma_tag. As a hack, we currently
     80 	 * put the dma tag in the structure itself.  It shouldn't be there.
     81 	 */
     82 
     83 	{
     84 		bus_dma_tag_t t;
     85 		t = &nd->_nd_dmat;
     86 		t->_cookie = nd;
     87 		t->_dmamap_create = _bus_dmamap_create;
     88 		t->_dmamap_destroy = _bus_dmamap_destroy;
     89 		t->_dmamap_load = _bus_dmamap_load_direct;
     90 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
     91 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
     92 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
     93 		t->_dmamap_unload = _bus_dmamap_unload;
     94 		t->_dmamap_sync = _bus_dmamap_sync;
     95 
     96 		t->_dmamem_alloc = _bus_dmamem_alloc;
     97 		t->_dmamem_free = _bus_dmamem_free;
     98 		t->_dmamem_map = _bus_dmamem_map;
     99 		t->_dmamem_unmap = _bus_dmamem_unmap;
    100 		t->_dmamem_mmap = _bus_dmamem_mmap;
    101 
    102 		nd->nd_dmat = t;
    103 	}
    104 
    105 	nextdma_init(nd);
    106 
    107 	isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
    108 	INTR_ENABLE(nd->nd_intr);
    109 }
    110 
    111 void
    112 nextdma_init(nd)
    113 	struct nextdma_config *nd;
    114 {
    115 #ifdef ND_DEBUG
    116 	if (nextdma_debug) {
    117 		char sbuf[256];
    118 
    119 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    120 				 sbuf, sizeof(sbuf));
    121 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    122 			NEXT_I_IPL(nd->nd_intr), sbuf);
    123 	}
    124 #endif
    125 
    126 	nd->_nd_map = NULL;
    127 	nd->_nd_idx = 0;
    128 	nd->_nd_map_cont = NULL;
    129 	nd->_nd_idx_cont = 0;
    130 
    131 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    132 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    133 			DMACSR_RESET | DMACSR_INITBUF);
    134 
    135 	next_dma_setup_curr_regs(nd);
    136 	next_dma_setup_cont_regs(nd);
    137 
    138 #if defined(DIAGNOSTIC)
    139 	{
    140 		u_long state;
    141 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    142 
    143 #if 1
    144 	/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
    145 	 * milo (a 25Mhz 68040 mono cube) didn't have this problem
    146 	 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    147 	 */
    148     state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    149 #else
    150     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    151               DMACSR_SUPDATE | DMACSR_ENABLE);
    152 #endif
    153 		if (state) {
    154 			next_dma_print(nd);
    155 			panic("DMA did not reset");
    156 		}
    157 	}
    158 #endif
    159 }
    160 
    161 
    162 void
    163 nextdma_reset(nd)
    164 	struct nextdma_config *nd;
    165 {
    166 	int s;
    167 	s = spldma();
    168 
    169 	DPRINTF(("DMA reset\n"));
    170 
    171 #if (defined(ND_DEBUG))
    172 	if (nextdma_debug) next_dma_print(nd);
    173 #endif
    174 
    175 	/* @@@ clean up dma maps */
    176 
    177 	nextdma_init(nd);
    178 	splx(s);
    179 }
    180 
    181 /****************************************************************/
    182 
    183 
    184 /* Call the completed and continue callbacks to try to fill
    185  * in the dma continue buffers.
    186  */
    187 void
    188 next_dma_rotate(nd)
    189 	struct nextdma_config *nd;
    190 {
    191 
    192 	DPRINTF(("DMA next_dma_rotate()\n"));
    193 
    194 	/* Rotate the continue map into the current map */
    195 	nd->_nd_map = nd->_nd_map_cont;
    196 	nd->_nd_idx = nd->_nd_idx_cont;
    197 
    198 	if ((!nd->_nd_map_cont) ||
    199 			((nd->_nd_map_cont) &&
    200 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
    201 		if (nd->nd_continue_cb) {
    202 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
    203 		} else {
    204 			nd->_nd_map_cont = 0;
    205 		}
    206 		nd->_nd_idx_cont = 0;
    207 	}
    208 
    209 #ifdef DIAGNOSTIC
    210 	if (nd->_nd_map) {
    211 		nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 666666666;
    212 	}
    213 #endif
    214 
    215 #ifdef DIAGNOSTIC
    216 	if (nd->_nd_map_cont) {
    217 		if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
    218 			next_dma_print(nd);
    219 			panic("DMA request unaligned at start\n");
    220 		}
    221 		if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    222 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
    223 			next_dma_print(nd);
    224 			panic("DMA request unaligned at end\n");
    225 		}
    226 	}
    227 #endif
    228 
    229 }
    230 
    231 void
    232 next_dma_setup_cont_regs(nd)
    233 	struct nextdma_config *nd;
    234 {
    235 	bus_addr_t dd_start;
    236 	bus_addr_t dd_stop;
    237 	bus_addr_t dd_saved_start;
    238 	bus_addr_t dd_saved_stop;
    239 
    240 	DPRINTF(("DMA next_dma_setup_regs()\n"));
    241 
    242 	if (nd->_nd_map_cont) {
    243 		dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
    244 		dd_stop  = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    245 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    246 
    247 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    248 			dd_stop |= 0x80000000;		/* Ethernet transmit needs secret magic */
    249 		}
    250 	} else {
    251 		dd_start = 0xdeadbeef;
    252 		dd_stop = 0xdeadbeef;
    253 	}
    254 
    255 	dd_saved_start = dd_start;
    256 	dd_saved_stop  = dd_stop;
    257 
    258 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
    259 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
    260 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
    261 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
    262 
    263 #ifdef DIAGNOSTIC
    264 	if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
    265 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
    266 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
    267 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
    268 		next_dma_print(nd);
    269 		panic("DMA failure writing to continue regs");
    270 	}
    271 #endif
    272 }
    273 
    274 void
    275 next_dma_setup_curr_regs(nd)
    276 	struct nextdma_config *nd;
    277 {
    278 	bus_addr_t dd_next;
    279 	bus_addr_t dd_limit;
    280 	bus_addr_t dd_saved_next;
    281 	bus_addr_t dd_saved_limit;
    282 
    283 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
    284 
    285 
    286 	if (nd->_nd_map) {
    287 		dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    288 		dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
    289 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    290 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    291 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    292 		}
    293 	} else {
    294 		dd_next = 0xdeadbeef;
    295 		dd_limit = 0xdeadbeef;
    296 	}
    297 
    298 	dd_saved_next = dd_next;
    299 	dd_saved_limit = dd_limit;
    300 
    301 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    302 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
    303 	} else {
    304 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
    305 	}
    306 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
    307 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
    308 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
    309 
    310 #ifdef DIAGNOSTIC
    311 	if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
    312 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
    313 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
    314 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
    315 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
    316 		next_dma_print(nd);
    317 		panic("DMA failure writing to current regs");
    318 	}
    319 #endif
    320 }
    321 
    322 
    323 /* This routine is used for debugging */
    324 
    325 void
    326 next_dma_print(nd)
    327 	struct nextdma_config *nd;
    328 {
    329 	u_long dd_csr;
    330 	u_long dd_next;
    331 	u_long dd_next_initbuf;
    332 	u_long dd_limit;
    333 	u_long dd_start;
    334 	u_long dd_stop;
    335 	u_long dd_saved_next;
    336 	u_long dd_saved_limit;
    337 	u_long dd_saved_start;
    338 	u_long dd_saved_stop;
    339 	char sbuf[256];
    340 
    341 	/* Read all of the registers before we print anything out,
    342 	 * in case something changes
    343 	 */
    344 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    345 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    346 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
    347 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    348 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
    349 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
    350 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
    351 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    352 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
    353 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
    354 
    355 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
    356 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    357 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    358 
    359 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
    360 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    361 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    362 
    363 	/* NDMAP is Next DMA Print (really!) */
    364 
    365 	if (nd->_nd_map) {
    366 		printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
    367 				nd->_nd_map->dm_mapsize);
    368 		printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
    369 				nd->_nd_map->dm_nsegs);
    370 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    371 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
    372 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
    373 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    374 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
    375 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
    376 	} else {
    377 		printf("NDMAP: nd->_nd_map = NULL\n");
    378 	}
    379 	if (nd->_nd_map_cont) {
    380 		printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
    381 				nd->_nd_map_cont->dm_mapsize);
    382 		printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
    383 				nd->_nd_map_cont->dm_nsegs);
    384 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    385 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
    386 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
    387 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    388 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
    389 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
    390 	} else {
    391 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
    392 	}
    393 
    394 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
    395 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    396 
    397 	printf("NDMAP: dd->dd_saved_next   = 0x%08x\n", dd_saved_next);
    398 	printf("NDMAP: dd->dd_saved_limit  = 0x%08x\n", dd_saved_limit);
    399 	printf("NDMAP: dd->dd_saved_start  = 0x%08x\n", dd_saved_start);
    400 	printf("NDMAP: dd->dd_saved_stop   = 0x%08x\n", dd_saved_stop);
    401 	printf("NDMAP: dd->dd_next         = 0x%08x\n", dd_next);
    402 	printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
    403 	printf("NDMAP: dd->dd_limit        = 0x%08x\n", dd_limit);
    404 	printf("NDMAP: dd->dd_start        = 0x%08x\n", dd_start);
    405 	printf("NDMAP: dd->dd_stop         = 0x%08x\n", dd_stop);
    406 
    407 	bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    408 			 sbuf, sizeof(sbuf));
    409 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    410 			NEXT_I_IPL(nd->nd_intr), sbuf);
    411 }
    412 
    413 /****************************************************************/
    414 
    415 int
    416 nextdma_intr(arg)
    417      void *arg;
    418 {
    419   /* @@@ This is bogus, we can't be certain of arg's type
    420 	 * unless the interrupt is for us.  For now we successfully
    421 	 * cheat because DMA interrupts are the only things invoked
    422 	 * at this interrupt level.
    423 	 */
    424   struct nextdma_config *nd = arg;
    425 
    426   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
    427   /* Handle dma interrupts */
    428 
    429 #ifdef ND_DEBUG
    430 	if (nextdma_debug) {
    431 		char sbuf[256];
    432 
    433 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    434 				 sbuf, sizeof(sbuf));
    435 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    436 			NEXT_I_IPL(nd->nd_intr), sbuf);
    437 	}
    438 #endif
    439 
    440 #ifdef DIAGNOSTIC
    441 	if (!nd->_nd_map) {
    442 		next_dma_print(nd);
    443 		panic("DMA missing current map in interrupt!\n");
    444 	}
    445 #endif
    446 
    447   {
    448     int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    449 
    450 		if (state & DMACSR_COMPLETE) {
    451 			bus_addr_t onext;
    452 			bus_addr_t olimit;
    453 			bus_addr_t slimit;
    454 
    455 			DPRINTF(("DMA: finishing xfer\n"));
    456 
    457 			onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    458 			olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
    459 
    460 			if (state & DMACSR_ENABLE) {
    461 				if (state & DMACSR_SUPDATE) {
    462 #ifdef DIAGNOSTIC
    463 					printf("DMA: please send this output to port-next68k-maintainer (at) netbsd.org:\n");
    464 					next_dma_print(nd);
    465 					panic("DMA: condition not yet documented to occur\n");
    466 #endif
    467 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    468 				} else {
    469 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    470 				}
    471 			} else {
    472 				if (state & DMACSR_SUPDATE) {
    473 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    474 				} else {
    475 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    476 				}
    477 			}
    478 
    479 			if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    480 				slimit &= ~0x80000000;
    481 			}
    482 
    483 #ifdef DIAGNOSTIC
    484 			if ((slimit < onext) || (slimit > olimit)) {
    485 				next_dma_print(nd);
    486 				panic("DMA: Unexpected limit registers in finish_xfer\n");
    487 			}
    488 #endif
    489 
    490 #if (defined(ND_DEBUG))
    491 			if (nextdma_debug > 2) next_dma_print(nd);
    492 #endif
    493 
    494 			nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
    495 
    496 			/* If we've reached the end of the current map, then inform
    497 			 * that we've completed that map.
    498 			 */
    499 			if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
    500 				if (nd->nd_completed_cb)
    501 					(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    502 			}
    503 			nd->_nd_map = 0;
    504 			nd->_nd_idx = 0;
    505 		}
    506 #ifdef DIAGNOSTIC
    507 		else if ((state & DMACSR_ENABLE) || (state & DMACSR_SUPDATE)) {
    508 			next_dma_print(nd);
    509 			panic("DMA Unexpected dma state in interrupt\n");
    510 		}
    511 #endif
    512 
    513 		if (state & DMACSR_ENABLE) {
    514 
    515 			next_dma_rotate(nd);
    516 			next_dma_setup_cont_regs(nd);
    517 
    518 			{
    519 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    520 
    521 				if (state & DMACSR_READ) {
    522 					dmadir = DMACSR_SETREAD;
    523 				} else {
    524 					dmadir = DMACSR_SETWRITE;
    525 				}
    526 
    527 				if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
    528 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    529 							DMACSR_CLRCOMPLETE | dmadir);
    530 				} else {
    531 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    532 							DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    533 				}
    534 			}
    535 
    536 		} else {
    537 
    538 			/* Cleanup incomplete transfers */
    539 			if (nd->_nd_map) {
    540 				DPRINTF(("DMA: shutting down with non null map\n"));
    541 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0;
    542 				if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
    543 					if (nd->nd_completed_cb)
    544 						(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    545 				}
    546 				nd->_nd_map = 0;
    547 				nd->_nd_idx = 0;
    548 			}
    549 
    550 			/* Cleanup more incomplete transfers */
    551 			if (nd->_nd_map_cont) {
    552 				DPRINTF(("DMA: shutting down with non null continue map\n"));
    553 
    554 				while(nd->_nd_idx_cont < nd->_nd_map_cont->dm_nsegs) {
    555 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len = 0;
    556 					nd->_nd_idx_cont++;
    557 				}
    558 				if (nd->nd_completed_cb)
    559 					(*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg);
    560 
    561 				nd->_nd_map_cont = 0;
    562 				nd->_nd_idx_cont = 0;
    563 			}
    564 
    565 			DPRINTF(("DMA: a shutdown occurred\n"));
    566 
    567 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    568 
    569 			if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
    570 			return(1);
    571 		}
    572 	}
    573 
    574 #ifdef ND_DEBUG
    575 	if (nextdma_debug) {
    576 		char sbuf[256];
    577 
    578 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    579 				 sbuf, sizeof(sbuf));
    580 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    581 			NEXT_I_IPL(nd->nd_intr), sbuf);
    582 	}
    583 #endif
    584 
    585   return(1);
    586 }
    587 
    588 /*
    589  * Check to see if dma has finished for a channel */
    590 int
    591 nextdma_finished(nd)
    592 	struct nextdma_config *nd;
    593 {
    594 	int r;
    595 	int s;
    596 	s = spldma();									/* @@@ should this be splimp()? */
    597 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
    598 	splx(s);
    599 	return(r);
    600 }
    601 
    602 void
    603 nextdma_start(nd, dmadir)
    604 	struct nextdma_config *nd;
    605 	u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    606 {
    607 
    608 #ifdef DIAGNOSTIC
    609 	if (!nextdma_finished(nd)) {
    610 		char sbuf[256];
    611 
    612 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    613 				 sbuf, sizeof(sbuf));
    614 		panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
    615 	}
    616 #endif
    617 
    618 #ifdef ND_DEBUG
    619 	if (nextdma_debug) {
    620 		char sbuf[256];
    621 
    622 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    623 				 sbuf, sizeof(sbuf));
    624 		printf("DMA start (%ld) intr(0x%s)\n",
    625 			NEXT_I_IPL(nd->nd_intr), sbuf);
    626 	}
    627 #endif
    628 
    629 #ifdef DIAGNOSTIC
    630 	if (nd->_nd_map) {
    631 		next_dma_print(nd);
    632 		panic("DMA: nextdma_start() with non null map\n");
    633 	}
    634 	if (nd->_nd_map_cont) {
    635 		next_dma_print(nd);
    636 		panic("DMA: nextdma_start() with non null continue map\n");
    637 	}
    638 #endif
    639 
    640 #ifdef DIAGNOSTIC
    641 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    642 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
    643 	}
    644 #endif
    645 
    646 	/* preload both the current and the continue maps */
    647 	next_dma_rotate(nd);
    648 
    649 #ifdef DIAGNOSTIC
    650 	if (!nd->_nd_map_cont) {
    651 		panic("No map available in nextdma_start()");
    652 	}
    653 #endif
    654 
    655 	next_dma_rotate(nd);
    656 
    657 #ifdef ND_DEBUG
    658 	if (nextdma_debug) {
    659 		char sbuf[256];
    660 
    661 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    662 				 sbuf, sizeof(sbuf));
    663 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    664 			(dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
    665 	}
    666 #endif
    667 
    668 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    669 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    670 			DMACSR_INITBUF | DMACSR_RESET | dmadir);
    671 
    672 	next_dma_setup_curr_regs(nd);
    673 	next_dma_setup_cont_regs(nd);
    674 
    675 #if (defined(ND_DEBUG))
    676 	if (nextdma_debug > 2) next_dma_print(nd);
    677 #endif
    678 
    679 	if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
    680 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    681 				DMACSR_SETENABLE | dmadir);
    682 	} else {
    683 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    684 				DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    685 	}
    686 }
    687