Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.3
      1 /*	$NetBSD: nextdma.c,v 1.3 1998/07/19 21:41:17 dbj Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *      This product includes software developed by Darrin B. Jewell
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/mbuf.h>
     35 #include <sys/syslog.h>
     36 #include <sys/socket.h>
     37 #include <sys/device.h>
     38 #include <sys/malloc.h>
     39 #include <sys/ioctl.h>
     40 #include <sys/errno.h>
     41 
     42 #include <machine/autoconf.h>
     43 #include <machine/cpu.h>
     44 #include <machine/intr.h>
     45 
     46 #include <next68k/next68k/isr.h>
     47 
     48 #define _GENERIC_BUS_DMA_PRIVATE
     49 #include <machine/bus.h>
     50 
     51 #include "nextdmareg.h"
     52 #include "nextdmavar.h"
     53 
     54 #if 0
     55 #define ND_DEBUG
     56 #endif
     57 
     58 #if defined(ND_DEBUG)
     59 #define DPRINTF(x) printf x;
     60 #else
     61 #define DPRINTF(x)
     62 #endif
     63 
     64   /* @@@ for debugging */
     65 struct nextdma_config *debugernd;
     66 struct nextdma_config *debugexnd;
     67 
     68 int nextdma_intr __P((void *));
     69 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     70                        bus_size_t, int));
     71 int next_dma_continue __P((struct nextdma_config *));
     72 void next_dma_rotate __P((struct nextdma_config *));
     73 
     74 void next_dma_setup_cont_regs __P((struct nextdma_config *));
     75 void next_dma_setup_curr_regs __P((struct nextdma_config *));
     76 
     77 void next_dma_print __P((struct nextdma_config *));
     78 
     79 void
     80 nextdma_config(nd)
     81 	struct nextdma_config *nd;
     82 {
     83 	/* Initialize the dma_tag. As a hack, we currently
     84 	 * put the dma tag in the structure itself.  It shouldn't be there.
     85 	 */
     86 
     87 	{
     88 		bus_dma_tag_t t;
     89 		t = &nd->_nd_dmat;
     90 		t->_cookie = nd;
     91 		t->_get_tag = NULL;           /* lose */
     92 		t->_dmamap_create = _bus_dmamap_create;
     93 		t->_dmamap_destroy = _bus_dmamap_destroy;
     94 		t->_dmamap_load = _bus_dmamap_load_direct;
     95 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
     96 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
     97 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
     98 		t->_dmamap_unload = _bus_dmamap_unload;
     99 		t->_dmamap_sync = next_dmamap_sync;
    100 
    101 		t->_dmamem_alloc = _bus_dmamem_alloc;
    102 		t->_dmamem_free = _bus_dmamem_free;
    103 		t->_dmamem_map = _bus_dmamem_map;
    104 		t->_dmamem_unmap = _bus_dmamem_unmap;
    105 		t->_dmamem_mmap = _bus_dmamem_mmap;
    106 
    107 		nd->nd_dmat = t;
    108 	}
    109 
    110   /* @@@ for debugging */
    111 	if (nd->nd_intr == NEXT_I_ENETR_DMA) {
    112 		debugernd = nd;
    113 	}
    114 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    115 		debugexnd = nd;
    116 	}
    117 
    118 	nextdma_init(nd);
    119 
    120   isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
    121   INTR_ENABLE(nd->nd_intr);
    122 }
    123 
    124 void
    125 nextdma_init(nd)
    126 	struct nextdma_config *nd;
    127 {
    128   DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
    129 			NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
    130 
    131 	/* @@@ should probably check and free these maps */
    132 	nd->_nd_map = NULL;
    133 	nd->_nd_idx = 0;
    134 	nd->_nd_map_cont = NULL;
    135 	nd->_nd_idx_cont = 0;
    136 
    137 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    138 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    139 			DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
    140 
    141 	next_dma_setup_curr_regs(nd);
    142 	next_dma_setup_cont_regs(nd);
    143 
    144 #if 0 && defined(DIAGNOSTIC)
    145 	/* Today, my computer (mourning) appears to fail this test.
    146 	 * yesterday, another NeXT (milo) didn't have this problem
    147 	 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    148 	 */
    149 	{
    150 		u_long state;
    151 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    152 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    153     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    154               DMACSR_SUPDATE | DMACSR_ENABLE);
    155 
    156 		if (state) {
    157 			next_dma_print(nd);
    158 			panic("DMA did not reset\n");
    159 		}
    160 	}
    161 #endif
    162 }
    163 
    164 void
    165 nextdma_reset(nd)
    166 	struct nextdma_config *nd;
    167 {
    168 	int s;
    169 	s = spldma();									/* @@@ should this be splimp()? */
    170 	nextdma_init(nd);
    171 	splx(s);
    172 }
    173 
    174 /****************************************************************/
    175 
    176 /* If the next had multiple busses, this should probably
    177  * go elsewhere, but it is here anyway */
    178 void
    179 next_dmamap_sync(t, map, offset, len, ops)
    180      bus_dma_tag_t t;
    181      bus_dmamap_t map;
    182      bus_addr_t offset;
    183      bus_size_t len;
    184      int ops;
    185 {
    186 	/* flush/purge the cache.
    187 	 * assumes pointers are aligned
    188 	 * @@@ should probably be fixed to use offset and len args.
    189 	 * should also optimize this to work on pages for larger regions?
    190 	 */
    191 	if (ops & BUS_DMASYNC_PREWRITE) {
    192 		int i;
    193 		for(i=0;i<map->dm_nsegs;i++) {
    194 			bus_addr_t p = map->dm_segs[i].ds_addr;
    195 			bus_addr_t e = p+map->dm_segs[i].ds_len;
    196 			while(p<e) {
    197 				DCFL(p);								/* flush */
    198 				p += 16;								/* cache line length */
    199 			}
    200 		}
    201 	}
    202 
    203 	if (ops & BUS_DMASYNC_POSTREAD) {
    204 		int i;
    205 		for(i=0;i<map->dm_nsegs;i++) {
    206 			bus_addr_t p = map->dm_segs[i].ds_addr;
    207 			bus_addr_t e = p+map->dm_segs[i].ds_len;
    208 			while(p<e) {
    209 				DCPL(p);								/* purge */
    210 				p += 16;								/* cache line length */
    211 			}
    212 		}
    213 	}
    214 }
    215 
    216 /****************************************************************/
    217 
    218 
    219 /* Call the completed and continue callbacks to try to fill
    220  * in the dma continue buffers.
    221  */
    222 void
    223 next_dma_rotate(nd)
    224 	struct nextdma_config *nd;
    225 {
    226 
    227 	DPRINTF(("DMA next_dma_rotate()\n"));
    228 
    229 	/* If we've reached the end of the current map, then inform
    230 	 * that we've completed that map.
    231 	 */
    232 	if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
    233 		if (nd->nd_completed_cb)
    234 			(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    235 	}
    236 
    237 	/* Rotate the continue map into the current map */
    238 	nd->_nd_map = nd->_nd_map_cont;
    239 	nd->_nd_idx = nd->_nd_idx_cont;
    240 
    241 	if ((!nd->_nd_map_cont) ||
    242 			((nd->_nd_map_cont) &&
    243 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
    244 		if (nd->nd_continue_cb) {
    245 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
    246 		} else {
    247 			nd->_nd_map_cont = 0;
    248 		}
    249 		nd->_nd_idx_cont = 0;
    250 	}
    251 }
    252 
    253 void
    254 next_dma_setup_cont_regs(nd)
    255 	struct nextdma_config *nd;
    256 {
    257 	DPRINTF(("DMA next_dma_setup_regs()\n"));
    258 
    259 	if (nd->_nd_map_cont) {
    260 
    261 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    262 			/* Ethernet transmit needs secret magic */
    263 
    264 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
    265 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
    266 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
    267 					((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    268 							nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
    269 							+ 0x0) | 0x80000000);
    270 		} else {
    271 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
    272 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
    273 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
    274 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    275 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    276 		}
    277 
    278 	} else {
    279 
    280 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,0);
    281 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0);
    282 	}
    283 
    284 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
    285 			bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
    286 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
    287 			bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
    288 
    289 }
    290 
    291 void
    292 next_dma_setup_curr_regs(nd)
    293 	struct nextdma_config *nd;
    294 {
    295 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
    296 
    297 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    298 			/* Ethernet transmit needs secret magic */
    299 
    300 		if (nd->_nd_map) {
    301 
    302 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
    303 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
    304 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
    305 					((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
    306 							nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
    307 							+ 0x0) | 0x80000000);
    308 		} else {
    309 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0);
    310 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0);
    311 
    312 		}
    313 
    314 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
    315 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
    316 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
    317 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
    318 
    319 	} else {
    320 
    321 		if (nd->_nd_map) {
    322 
    323 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,
    324 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
    325 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
    326 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
    327 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    328 		} else {
    329 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,0);
    330 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0);
    331 
    332 		}
    333 
    334 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
    335 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT));
    336 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
    337 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
    338 
    339 	}
    340 
    341 }
    342 
    343 
    344 /* This routine is used for debugging */
    345 
    346 void
    347 next_dma_print(nd)
    348 	struct nextdma_config *nd;
    349 {
    350 	u_long dd_csr;
    351 	u_long dd_next;
    352 	u_long dd_next_initbuf;
    353 	u_long dd_limit;
    354 	u_long dd_start;
    355 	u_long dd_stop;
    356 	u_long dd_saved_next;
    357 	u_long dd_saved_limit;
    358 	u_long dd_saved_start;
    359 	u_long dd_saved_stop;
    360 
    361   /* Read all of the registers before we print anything out,
    362 	 * in case something changes
    363 	 */
    364 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    365 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    366 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
    367 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    368 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
    369 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
    370 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
    371 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    372 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
    373 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
    374 
    375 	if (nd->_nd_map) {
    376 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    377 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
    378 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
    379 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    380 	} else {
    381 		printf("NDMAP: nd->_nd_map = NULL\n");
    382 	}
    383 	if (nd->_nd_map_cont) {
    384 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    385 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
    386 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
    387 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    388 	} else {
    389 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
    390 	}
    391 
    392 	printf("NDMAP: dd->dd_csr          = 0x%b\n",   dd_csr,   DMACSR_BITS);
    393 	printf("NDMAP: dd->dd_saved_next   = 0x%08x\n", dd_saved_next);
    394 	printf("NDMAP: dd->dd_saved_limit  = 0x%08x\n", dd_saved_limit);
    395 	printf("NDMAP: dd->dd_saved_start  = 0x%08x\n", dd_saved_start);
    396 	printf("NDMAP: dd->dd_saved_stop   = 0x%08x\n", dd_saved_stop);
    397 	printf("NDMAP: dd->dd_next         = 0x%08x\n", dd_next);
    398 	printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
    399 	printf("NDMAP: dd->dd_limit        = 0x%08x\n", dd_limit);
    400 	printf("NDMAP: dd->dd_start        = 0x%08x\n", dd_start);
    401 	printf("NDMAP: dd->dd_stop         = 0x%08x\n", dd_stop);
    402 
    403 	printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
    404 			NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
    405 }
    406 
    407 /****************************************************************/
    408 
    409 int
    410 nextdma_intr(arg)
    411      void *arg;
    412 {
    413   struct nextdma_config *nd = arg;
    414 
    415   /* @@@ This is bogus, we can't be certain of arg's type
    416 	 * unless the interrupt is for us
    417 	 */
    418 
    419   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
    420   /* Handle dma interrupts */
    421 
    422 #ifdef DIAGNOSTIC
    423 	if (nd->nd_intr == NEXT_I_ENETR_DMA) {
    424 		if (debugernd != nd) {
    425 			panic("DMA incorrect handling of rx nd->nd_intr");
    426 		}
    427 	}
    428 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    429 		if (debugexnd != nd) {
    430 			panic("DMA incorrect handling of tx nd->nd_intr");
    431 		}
    432 	}
    433 #endif
    434 
    435   DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
    436           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
    437 
    438   {
    439     int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    440 
    441     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    442               DMACSR_SUPDATE | DMACSR_ENABLE);
    443 
    444     if (state & DMACSR_BUSEXC) {
    445 #if 0 /* This bit seems to get set periodically and I don't know why */
    446 			next_dma_print(nd);
    447       panic("Bus exception in DMA ipl (%ld) intr(0x%b)\n",
    448              NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
    449 #endif
    450     }
    451 
    452 #ifdef DIAGNOSTIC
    453 		if (!(state & DMACSR_COMPLETE)) {
    454 			next_dma_print(nd);
    455 #if 0 /* This bit doesn't seem to get set every once in a while,
    456 			 * and I don't know why.  Let's try treating it as a spurious
    457 			 * interrupt.  ie. report it and ignore the interrupt.
    458 			 */
    459 			printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
    460 			panic("DMA  ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
    461 					NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
    462 #else
    463 			printf("DMA  ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
    464 					NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
    465 			return(1);
    466 #endif
    467 		}
    468 #endif
    469 
    470 		/* Set the length of the segment to match actual length.
    471 		 * @@@ is it okay to resize dma segments here?
    472 		 * i should probably ask jason about this.
    473 		 */
    474 		if (nd->_nd_map) {
    475 
    476 			bus_addr_t next;
    477 			bus_addr_t limit;
    478 
    479 #if 0
    480 			if (state & DMACSR_ENABLE) {
    481 				next  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
    482 			} else {
    483 				next  = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    484 			}
    485 #else
    486 			next  = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    487 #endif
    488 			limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    489 
    490 			if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    491 				limit &= ~0x80000000;
    492 			}
    493 
    494 #ifdef DIAGNOSTIC
    495 			if (next != nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr) {
    496 				next_dma_print(nd);
    497 				printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
    498 
    499 				panic("DMA  ipl (%ld) intr(0x%b), unexpected completed address\n",
    500 						NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
    501 			}
    502 #endif
    503 
    504 			/* @@@ I observed a case where DMACSR_ENABLE wasn't set and
    505 			 * DD_SAVED_LIMIT didn't contain the expected limit value.  This
    506 			 * should be tested, fixed, and removed.  */
    507 
    508 			if (((limit-next) > nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
    509 					|| (limit-next < 0)) {
    510 #if 0
    511 				next_dma_print(nd);
    512 				printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
    513 				panic("DMA packlen: next = 0x%08x limit = 0x%08x\n",next,limit);
    514 #else
    515 				DPRINTF(("DMA packlen: next = 0x%08x limit = 0x%08x",next,limit));
    516 #endif
    517 
    518 			} else {
    519 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit - next;
    520 			}
    521 		}
    522 
    523 
    524 		if ((state & DMACSR_ENABLE) == 0) {
    525 
    526 			/* Non chaining interrupts shutdown immediately */
    527 			if (!nd->nd_chaining_flag) {
    528 				nd->_nd_map = nd->_nd_map_cont;
    529 				nd->_nd_idx = nd->_nd_idx_cont;
    530 				nd->_nd_map_cont = 0;
    531 				nd->_nd_idx_cont = 0;
    532 			}
    533 
    534 			/* Call the completed callback for the last packet */
    535 			if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
    536 				if (nd->nd_completed_cb)
    537 					(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    538 			}
    539 			nd->_nd_map = 0;
    540 			nd->_nd_idx = 0;
    541 
    542 			if (nd->_nd_map_cont) {
    543 				DPRINTF(("DMA  ipl (%ld) intr(0x%b), restarting\n",
    544 					NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
    545 
    546 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    547 						DMACSR_SETSUPDATE | DMACSR_SETENABLE);
    548 
    549 			} else {
    550 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    551 						DMACSR_CLRCOMPLETE | DMACSR_RESET);
    552 				DPRINTF(("DMA: enable not set w/o continue map, shutting down dma\n"));
    553 				if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
    554 			}
    555 
    556 		} else {
    557 			next_dma_rotate(nd);
    558 			next_dma_setup_cont_regs(nd);
    559 
    560 			if (nd->_nd_map_cont) {
    561 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    562 						DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE);
    563 			} else {
    564 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    565 						DMACSR_CLRCOMPLETE);
    566 			}
    567 
    568 		}
    569 
    570 	}
    571 
    572   DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
    573           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
    574 
    575   return(1);
    576 }
    577 
    578 /*
    579  * Check to see if dma has finished for a channel */
    580 int
    581 nextdma_finished(nd)
    582 	struct nextdma_config *nd;
    583 {
    584 	int r;
    585 	int s;
    586 	s = spldma();									/* @@@ should this be splimp()? */
    587 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
    588 	splx(s);
    589 	return(r);
    590 }
    591 
    592 void
    593 nextdma_start(nd, dmadir)
    594 	struct nextdma_config *nd;
    595 	u_long dmadir;								/* 	DMACSR_READ or DMACSR_WRITE */
    596 {
    597 
    598 #ifdef DIAGNOSTIC
    599 	if (!nextdma_finished(nd)) {
    600 		panic("DMA trying to start before previous finished on intr(0x%b)\n",
    601 				NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
    602 	}
    603 #endif
    604 
    605 
    606   DPRINTF(("DMA start (%ld) intr(0x%b)\n",
    607           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
    608 
    609 #ifdef DIAGNOSTIC
    610 	if (nd->_nd_map) {
    611 		next_dma_print(nd);
    612 		panic("DMA: nextdma_start() with non null map\n");
    613 	}
    614 	if (nd->_nd_map_cont) {
    615 		next_dma_print(nd);
    616 		panic("DMA: nextdma_start() with non null continue map\n");
    617 	}
    618 #endif
    619 
    620 	next_dma_rotate(nd);
    621 
    622 #ifdef DIAGNOSTIC
    623 	if (!nd->_nd_map_cont) {
    624 		panic("No map available in nextdma_start()");
    625 	}
    626 	if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr)) {
    627 		panic("unaligned begin dma at start\n");
    628 	}
    629 	if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr +
    630 			nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_len)) {
    631 		panic("unaligned end dma at start\n");
    632 	}
    633 #endif
    634 
    635 	DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
    636 			(dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map_cont->dm_nsegs,
    637 			NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
    638 
    639 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    640 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    641 			DMACSR_INITBUF | DMACSR_RESET | dmadir);
    642 
    643 	next_dma_setup_cont_regs(nd);
    644 
    645 	/* When starting DMA, we must put the continue map
    646 	 * into the current register.  We reset the nd->_nd_map
    647 	 * pointer here to avoid duplicated completed callbacks
    648 	 * for the first buffer.
    649 	 */
    650 	nd->_nd_map = nd->_nd_map_cont;
    651 	nd->_nd_idx = nd->_nd_idx_cont;
    652 	next_dma_setup_curr_regs(nd);
    653 	nd->_nd_map = 0;
    654 	nd->_nd_idx = 0;
    655 
    656 	if (nd->nd_chaining_flag) {
    657 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    658 				DMACSR_SETSUPDATE | DMACSR_SETENABLE);
    659 	} else {
    660 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    661 				DMACSR_SETENABLE);
    662 	}
    663 
    664 }
    665