Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.25
      1 /*	$NetBSD: nextdma.c,v 1.25 2001/04/07 13:02:55 dbj Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *      This product includes software developed by Darrin B. Jewell
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/mbuf.h>
     35 #include <sys/syslog.h>
     36 #include <sys/socket.h>
     37 #include <sys/device.h>
     38 #include <sys/malloc.h>
     39 #include <sys/ioctl.h>
     40 #include <sys/errno.h>
     41 
     42 #include <machine/autoconf.h>
     43 #include <machine/cpu.h>
     44 #include <machine/intr.h>
     45 
     46 #include <m68k/cacheops.h>
     47 
     48 #include <next68k/next68k/isr.h>
     49 
     50 #define _NEXT68K_BUS_DMA_PRIVATE
     51 #include <machine/bus.h>
     52 
     53 #include "nextdmareg.h"
     54 #include "nextdmavar.h"
     55 
     56 #if 1
     57 #define ND_DEBUG
     58 #endif
     59 
     60 #if defined(ND_DEBUG)
     61 int nextdma_debug = 0;
     62 #define DPRINTF(x) if (nextdma_debug) printf x;
     63 #else
     64 #define DPRINTF(x)
     65 #endif
     66 
     67 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     68                        bus_size_t, int));
     69 int next_dma_continue __P((struct nextdma_config *));
     70 void next_dma_rotate __P((struct nextdma_config *));
     71 
     72 void next_dma_setup_cont_regs __P((struct nextdma_config *));
     73 void next_dma_setup_curr_regs __P((struct nextdma_config *));
     74 
     75 void
     76 nextdma_config(nd)
     77 	struct nextdma_config *nd;
     78 {
     79 	/* Initialize the dma_tag. As a hack, we currently
     80 	 * put the dma tag in the structure itself.  It shouldn't be there.
     81 	 */
     82 
     83 	{
     84 		bus_dma_tag_t t;
     85 		t = &nd->_nd_dmat;
     86 		t->_cookie = nd;
     87 		t->_dmamap_create = _bus_dmamap_create;
     88 		t->_dmamap_destroy = _bus_dmamap_destroy;
     89 		t->_dmamap_load = _bus_dmamap_load_direct;
     90 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
     91 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
     92 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
     93 		t->_dmamap_unload = _bus_dmamap_unload;
     94 		t->_dmamap_sync = _bus_dmamap_sync;
     95 
     96 		t->_dmamem_alloc = _bus_dmamem_alloc;
     97 		t->_dmamem_free = _bus_dmamem_free;
     98 		t->_dmamem_map = _bus_dmamem_map;
     99 		t->_dmamem_unmap = _bus_dmamem_unmap;
    100 		t->_dmamem_mmap = _bus_dmamem_mmap;
    101 
    102 		nd->nd_dmat = t;
    103 	}
    104 
    105 	nextdma_init(nd);
    106 
    107 	isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
    108 	INTR_ENABLE(nd->nd_intr);
    109 }
    110 
    111 void
    112 nextdma_init(nd)
    113 	struct nextdma_config *nd;
    114 {
    115 #ifdef ND_DEBUG
    116 	if (nextdma_debug) {
    117 		char sbuf[256];
    118 
    119 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    120 				 sbuf, sizeof(sbuf));
    121 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    122 			NEXT_I_IPL(nd->nd_intr), sbuf);
    123 	}
    124 #endif
    125 
    126 	nd->_nd_map = NULL;
    127 	nd->_nd_idx = 0;
    128 	nd->_nd_map_cont = NULL;
    129 	nd->_nd_idx_cont = 0;
    130 
    131 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    132 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    133 			DMACSR_RESET | DMACSR_INITBUF);
    134 
    135 	next_dma_setup_curr_regs(nd);
    136 	next_dma_setup_cont_regs(nd);
    137 
    138 #if defined(DIAGNOSTIC)
    139 	{
    140 		u_long state;
    141 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    142 
    143 #if 1
    144 	/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
    145 	 * milo (a 25Mhz 68040 mono cube) didn't have this problem
    146 	 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    147 	 */
    148     state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    149 #else
    150     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    151               DMACSR_SUPDATE | DMACSR_ENABLE);
    152 #endif
    153 		if (state) {
    154 			next_dma_print(nd);
    155 			panic("DMA did not reset");
    156 		}
    157 	}
    158 #endif
    159 }
    160 
    161 
    162 void
    163 nextdma_reset(nd)
    164 	struct nextdma_config *nd;
    165 {
    166 	int s;
    167 	s = spldma();
    168 
    169 	DPRINTF(("DMA reset\n"));
    170 
    171 #if (defined(ND_DEBUG))
    172 	if (nextdma_debug) next_dma_print(nd);
    173 #endif
    174 
    175 	/* @@@ clean up dma maps */
    176 
    177 	nextdma_init(nd);
    178 	splx(s);
    179 }
    180 
    181 /****************************************************************/
    182 
    183 
    184 /* Call the completed and continue callbacks to try to fill
    185  * in the dma continue buffers.
    186  */
    187 void
    188 next_dma_rotate(nd)
    189 	struct nextdma_config *nd;
    190 {
    191 
    192 	DPRINTF(("DMA next_dma_rotate()\n"));
    193 
    194 	/* Rotate the continue map into the current map */
    195 	nd->_nd_map = nd->_nd_map_cont;
    196 	nd->_nd_idx = nd->_nd_idx_cont;
    197 
    198 	if ((!nd->_nd_map_cont) ||
    199 			((nd->_nd_map_cont) &&
    200 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
    201 		if (nd->nd_continue_cb) {
    202 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
    203 		} else {
    204 			nd->_nd_map_cont = 0;
    205 		}
    206 		nd->_nd_idx_cont = 0;
    207 	}
    208 
    209 #ifdef DIAGNOSTIC
    210 	if (nd->_nd_map_cont) {
    211 		nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len = 666666666;
    212 	}
    213 #endif
    214 
    215 #ifdef DIAGNOSTIC
    216 	if (nd->_nd_map_cont) {
    217 		if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
    218 			next_dma_print(nd);
    219 			panic("DMA request unaligned at start\n");
    220 		}
    221 		if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    222 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
    223 			next_dma_print(nd);
    224 			panic("DMA request unaligned at end\n");
    225 		}
    226 	}
    227 #endif
    228 
    229 }
    230 
    231 void
    232 next_dma_setup_cont_regs(nd)
    233 	struct nextdma_config *nd;
    234 {
    235 	bus_addr_t dd_start;
    236 	bus_addr_t dd_stop;
    237 	bus_addr_t dd_saved_start;
    238 	bus_addr_t dd_saved_stop;
    239 
    240 	DPRINTF(("DMA next_dma_setup_regs()\n"));
    241 
    242 	if (nd->_nd_map_cont) {
    243 		dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
    244 		dd_stop  = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    245 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    246 
    247 #ifdef DIAGNOSTIC
    248 		nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len = 555555555;
    249 #endif
    250 
    251 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    252 			dd_stop |= 0x80000000;		/* Ethernet transmit needs secret magic */
    253 		}
    254 	} else {
    255 		dd_start = 0xdeadbeef;
    256 		dd_stop = 0xdeadbeef;
    257 	}
    258 
    259 	dd_saved_start = dd_start;
    260 	dd_saved_stop  = dd_stop;
    261 
    262 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
    263 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
    264 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
    265 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
    266 
    267 #ifdef DIAGNOSTIC
    268 	if (   (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start)
    269 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop)
    270 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start)
    271 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)
    272 			) {
    273 		next_dma_print(nd);
    274 		panic("DMA failure writing to continue regs");
    275 	}
    276 #endif
    277 }
    278 
    279 void
    280 next_dma_setup_curr_regs(nd)
    281 	struct nextdma_config *nd;
    282 {
    283 	bus_addr_t dd_next;
    284 	bus_addr_t dd_limit;
    285 	bus_addr_t dd_saved_next;
    286 	bus_addr_t dd_saved_limit;
    287 
    288 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
    289 
    290 
    291 	if (nd->_nd_map) {
    292 		dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    293 		dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
    294 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    295 
    296 #ifdef DIAGNOSTIC
    297 		nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 444444444;
    298 #endif
    299 
    300 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    301 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    302 		}
    303 	} else {
    304 		dd_next = 0xdeadbeef;
    305 		dd_limit = 0xdeadbeef;
    306 	}
    307 
    308 	dd_saved_next = dd_next;
    309 	dd_saved_limit = dd_limit;
    310 
    311 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    312 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
    313 	} else {
    314 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
    315 	}
    316 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
    317 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
    318 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
    319 
    320 #ifdef DIAGNOSTIC
    321 	if (   (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next)
    322 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next)
    323 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit)
    324 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next)
    325 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)
    326 			) {
    327 		next_dma_print(nd);
    328 		panic("DMA failure writing to current regs");
    329 	}
    330 #endif
    331 }
    332 
    333 
    334 /* This routine is used for debugging */
    335 
    336 void
    337 next_dma_print(nd)
    338 	struct nextdma_config *nd;
    339 {
    340 	u_long dd_csr;
    341 	u_long dd_next;
    342 	u_long dd_next_initbuf;
    343 	u_long dd_limit;
    344 	u_long dd_start;
    345 	u_long dd_stop;
    346 	u_long dd_saved_next;
    347 	u_long dd_saved_limit;
    348 	u_long dd_saved_start;
    349 	u_long dd_saved_stop;
    350 	char sbuf[256];
    351 
    352 	/* Read all of the registers before we print anything out,
    353 	 * in case something changes
    354 	 */
    355 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    356 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    357 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
    358 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    359 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
    360 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
    361 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
    362 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    363 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
    364 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
    365 
    366 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
    367 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    368 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    369 
    370 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
    371 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    372 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    373 
    374 	/* NDMAP is Next DMA Print (really!) */
    375 
    376 	if (nd->_nd_map) {
    377 		printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
    378 				nd->_nd_map->dm_mapsize);
    379 		printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
    380 				nd->_nd_map->dm_nsegs);
    381 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    382 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
    383 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
    384 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    385 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
    386 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
    387 		{
    388 			int i;
    389 			printf("NDMAP: Entire map;\n");
    390 			for(i=0;i<nd->_nd_map->dm_nsegs;i++) {
    391 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    392 						i,nd->_nd_map->dm_segs[i].ds_addr);
    393 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_len = %d\n",
    394 						i,nd->_nd_map->dm_segs[i].ds_len);
    395 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
    396 						i,nd->_nd_map->dm_segs[i].ds_xfer_len);
    397 			}
    398 		}
    399 	} else {
    400 		printf("NDMAP: nd->_nd_map = NULL\n");
    401 	}
    402 	if (nd->_nd_map_cont) {
    403 		printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
    404 				nd->_nd_map_cont->dm_mapsize);
    405 		printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
    406 				nd->_nd_map_cont->dm_nsegs);
    407 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    408 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
    409 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
    410 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    411 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
    412 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
    413 		if (nd->_nd_map_cont != nd->_nd_map) {
    414 			int i;
    415 			printf("NDMAP: Entire map;\n");
    416 			for(i=0;i<nd->_nd_map_cont->dm_nsegs;i++) {
    417 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    418 						i,nd->_nd_map_cont->dm_segs[i].ds_addr);
    419 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
    420 						i,nd->_nd_map_cont->dm_segs[i].ds_len);
    421 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
    422 						i,nd->_nd_map_cont->dm_segs[i].ds_xfer_len);
    423 			}
    424 		}
    425 	} else {
    426 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
    427 	}
    428 
    429 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
    430 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    431 
    432 	printf("NDMAP: dd->dd_saved_next   = 0x%08x\n", dd_saved_next);
    433 	printf("NDMAP: dd->dd_saved_limit  = 0x%08x\n", dd_saved_limit);
    434 	printf("NDMAP: dd->dd_saved_start  = 0x%08x\n", dd_saved_start);
    435 	printf("NDMAP: dd->dd_saved_stop   = 0x%08x\n", dd_saved_stop);
    436 	printf("NDMAP: dd->dd_next         = 0x%08x\n", dd_next);
    437 	printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
    438 	printf("NDMAP: dd->dd_limit        = 0x%08x\n", dd_limit);
    439 	printf("NDMAP: dd->dd_start        = 0x%08x\n", dd_start);
    440 	printf("NDMAP: dd->dd_stop         = 0x%08x\n", dd_stop);
    441 
    442 	bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    443 			 sbuf, sizeof(sbuf));
    444 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    445 			NEXT_I_IPL(nd->nd_intr), sbuf);
    446 }
    447 
    448 /****************************************************************/
    449 
    450 int
    451 nextdma_intr(arg)
    452      void *arg;
    453 {
    454   /* @@@ This is bogus, we can't be certain of arg's type
    455 	 * unless the interrupt is for us.  For now we successfully
    456 	 * cheat because DMA interrupts are the only things invoked
    457 	 * at this interrupt level.
    458 	 */
    459   struct nextdma_config *nd = arg;
    460 
    461   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
    462   /* Handle dma interrupts */
    463 
    464 #ifdef ND_DEBUG
    465 	if (nextdma_debug) {
    466 		char sbuf[256];
    467 
    468 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    469 				 sbuf, sizeof(sbuf));
    470 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    471 			NEXT_I_IPL(nd->nd_intr), sbuf);
    472 	}
    473 #endif
    474 
    475 #ifdef DIAGNOSTIC
    476 	if (!nd->_nd_map) {
    477 		next_dma_print(nd);
    478 		panic("DMA missing current map in interrupt!\n");
    479 	}
    480 #endif
    481 
    482   {
    483     unsigned int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    484 
    485 		if (state & DMACSR_COMPLETE) {
    486 			bus_addr_t onext;
    487 			bus_addr_t olimit;
    488 			bus_addr_t slimit;
    489 
    490 			DPRINTF(("DMA: finishing xfer\n"));
    491 
    492 			onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    493 			olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
    494 
    495 			{
    496 				int result = 0;
    497 				if (state & DMACSR_ENABLE) {
    498 					/* enable bit was set */
    499 					result |= 0x01;
    500 				}
    501 				if (state & DMACSR_SUPDATE) {
    502 					/* supdate bit was set */
    503 					result |= 0x02;
    504 				}
    505 				if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
    506 					/* Expecting a shutdown, didn't SETSUPDATE last turn */
    507 					result |= 0x04;
    508 				}
    509 				if (state & DMACSR_BUSEXC) {
    510 					/* bus exception bit was set */
    511 					result |= 0x08;
    512 				}
    513 				switch (result) {
    514 				case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    515 				case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    516 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    517 					break;
    518 				case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    519 				case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    520 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    521 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, result);
    522 					if ((slimit == 0x01) || (slimit == 0x09)) {
    523 						slimit = olimit;
    524 					}
    525 					break;
    526 				case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    527 				case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    528 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    529 					break;
    530 				case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    531 				case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    532 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    533 					break;
    534 				default:
    535 #ifdef DIAGNOSTIC
    536 					printf("DMA: please send this output to port-next68k-maintainer (at) netbsd.org:\n");
    537 					next_dma_print(nd);
    538 					panic("DMA: condition 0x%02x not yet documented to occur\n",result);
    539 #endif
    540 					slimit = olimit;
    541 					break;
    542 				}
    543 			}
    544 
    545 			if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    546 				slimit &= ~0x80000000;
    547 			}
    548 
    549 #ifdef DIAGNOSTIC
    550 			if ((slimit < onext) || (slimit > olimit)) {
    551 				next_dma_print(nd);
    552 				panic("DMA: Unexpected limit register (0x%08x) in finish_xfer\n",slimit);
    553 			}
    554 #endif
    555 
    556 #if (defined(ND_DEBUG))
    557 			if (nextdma_debug > 2) next_dma_print(nd);
    558 #endif
    559 
    560 			nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
    561 
    562 			/* If we've reached the end of the current map, then inform
    563 			 * that we've completed that map.
    564 			 */
    565 			if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
    566 				if (nd->nd_completed_cb)
    567 					(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    568 			}
    569 			nd->_nd_map = 0;
    570 			nd->_nd_idx = 0;
    571 		}
    572 #ifdef DIAGNOSTIC
    573 		else if ((state & DMACSR_ENABLE) || (state & DMACSR_SUPDATE)) {
    574 			next_dma_print(nd);
    575 			panic("DMA Unexpected dma state in interrupt\n");
    576 		}
    577 #endif
    578 
    579 		if (state & DMACSR_ENABLE) {
    580 
    581 			next_dma_rotate(nd);
    582 			next_dma_setup_cont_regs(nd);
    583 
    584 			{
    585 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    586 
    587 				if (state & DMACSR_READ) {
    588 					dmadir = DMACSR_SETREAD;
    589 				} else {
    590 					dmadir = DMACSR_SETWRITE;
    591 				}
    592 
    593 				if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
    594 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    595 							DMACSR_CLRCOMPLETE | dmadir);
    596 				} else {
    597 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    598 							DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    599 				}
    600 			}
    601 
    602 		} else {
    603 
    604 			DPRINTF(("DMA: a shutdown occurred\n"));
    605 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    606 
    607 #if 0
    608 			/* Cleanup incomplete transfers */
    609 			if (nd->_nd_map) {
    610 				DPRINTF(("DMA: shutting down with non null map\n"));
    611 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0;
    612 				if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
    613 					if (nd->nd_completed_cb)
    614 						(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    615 				}
    616 				nd->_nd_map = 0;
    617 				nd->_nd_idx = 0;
    618 			}
    619 
    620 			/* Cleanup more incomplete transfers */
    621 			if (nd->_nd_map_cont) {
    622 				DPRINTF(("DMA: shutting down with non null continue map\n"));
    623 
    624 				while(nd->_nd_idx_cont < nd->_nd_map_cont->dm_nsegs) {
    625 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len = 0;
    626 					nd->_nd_idx_cont++;
    627 				}
    628 				if (nd->nd_completed_cb)
    629 					(*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg);
    630 
    631 				nd->_nd_map_cont = 0;
    632 				nd->_nd_idx_cont = 0;
    633 			}
    634 #else
    635 			/* Do a dma restart */
    636 			if (!nd->_nd_map && nd->_nd_map_cont) {
    637 				next_dma_rotate(nd);
    638 			}
    639 			if (nd->_nd_map) {
    640 
    641 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    642 
    643 				if (state & DMACSR_READ) {
    644 					dmadir = DMACSR_SETREAD;
    645 				} else {
    646 					dmadir = DMACSR_SETWRITE;
    647 				}
    648 
    649 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    650 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    651 						DMACSR_INITBUF | DMACSR_RESET | dmadir);
    652 
    653 				next_dma_setup_curr_regs(nd);
    654 				next_dma_setup_cont_regs(nd);
    655 
    656 				if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
    657 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    658 							DMACSR_SETENABLE | dmadir);
    659 				} else {
    660 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    661 							DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    662 				}
    663 				return 1;
    664 			}
    665 #endif
    666 
    667 			if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
    668 			return(1);
    669 		}
    670 	}
    671 
    672 #ifdef ND_DEBUG
    673 	if (nextdma_debug) {
    674 		char sbuf[256];
    675 
    676 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    677 				 sbuf, sizeof(sbuf));
    678 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    679 			NEXT_I_IPL(nd->nd_intr), sbuf);
    680 	}
    681 #endif
    682 
    683   return(1);
    684 }
    685 
    686 /*
    687  * Check to see if dma has finished for a channel */
    688 int
    689 nextdma_finished(nd)
    690 	struct nextdma_config *nd;
    691 {
    692 	int r;
    693 	int s;
    694 	s = spldma();									/* @@@ should this be splimp()? */
    695 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
    696 	splx(s);
    697 	return(r);
    698 }
    699 
    700 void
    701 nextdma_start(nd, dmadir)
    702 	struct nextdma_config *nd;
    703 	u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    704 {
    705 
    706 #ifdef DIAGNOSTIC
    707 	if (!nextdma_finished(nd)) {
    708 		char sbuf[256];
    709 
    710 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    711 				 sbuf, sizeof(sbuf));
    712 		panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
    713 	}
    714 #endif
    715 
    716 #ifdef ND_DEBUG
    717 	if (nextdma_debug) {
    718 		char sbuf[256];
    719 
    720 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    721 				 sbuf, sizeof(sbuf));
    722 		printf("DMA start (%ld) intr(0x%s)\n",
    723 			NEXT_I_IPL(nd->nd_intr), sbuf);
    724 	}
    725 #endif
    726 
    727 #ifdef DIAGNOSTIC
    728 	if (nd->_nd_map) {
    729 		next_dma_print(nd);
    730 		panic("DMA: nextdma_start() with non null map\n");
    731 	}
    732 	if (nd->_nd_map_cont) {
    733 		next_dma_print(nd);
    734 		panic("DMA: nextdma_start() with non null continue map\n");
    735 	}
    736 #endif
    737 
    738 #ifdef DIAGNOSTIC
    739 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    740 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
    741 	}
    742 #endif
    743 
    744 	/* preload both the current and the continue maps */
    745 	next_dma_rotate(nd);
    746 
    747 #ifdef DIAGNOSTIC
    748 	if (!nd->_nd_map_cont) {
    749 		panic("No map available in nextdma_start()");
    750 	}
    751 #endif
    752 
    753 	next_dma_rotate(nd);
    754 
    755 #ifdef ND_DEBUG
    756 	if (nextdma_debug) {
    757 		char sbuf[256];
    758 
    759 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    760 				 sbuf, sizeof(sbuf));
    761 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    762 			(dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
    763 	}
    764 #endif
    765 
    766 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    767 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    768 			DMACSR_INITBUF | DMACSR_RESET | dmadir);
    769 
    770 	next_dma_setup_curr_regs(nd);
    771 	next_dma_setup_cont_regs(nd);
    772 
    773 #if (defined(ND_DEBUG))
    774 	if (nextdma_debug > 2) next_dma_print(nd);
    775 #endif
    776 
    777 	if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
    778 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    779 				DMACSR_SETENABLE | dmadir);
    780 	} else {
    781 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    782 				DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    783 	}
    784 }
    785