Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.26
      1 /*	$NetBSD: nextdma.c,v 1.26 2001/04/16 14:12:12 dbj Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *      This product includes software developed by Darrin B. Jewell
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/mbuf.h>
     35 #include <sys/syslog.h>
     36 #include <sys/socket.h>
     37 #include <sys/device.h>
     38 #include <sys/malloc.h>
     39 #include <sys/ioctl.h>
     40 #include <sys/errno.h>
     41 
     42 #include <machine/autoconf.h>
     43 #include <machine/cpu.h>
     44 #include <machine/intr.h>
     45 
     46 #include <m68k/cacheops.h>
     47 
     48 #include <next68k/next68k/isr.h>
     49 
     50 #define _NEXT68K_BUS_DMA_PRIVATE
     51 #include <machine/bus.h>
     52 
     53 #include "nextdmareg.h"
     54 #include "nextdmavar.h"
     55 
     56 #if 1
     57 #define ND_DEBUG
     58 #endif
     59 
     60 #if defined(ND_DEBUG)
     61 int nextdma_debug = 0;
     62 #define DPRINTF(x) if (nextdma_debug) printf x;
     63 #else
     64 #define DPRINTF(x)
     65 #endif
     66 
     67 #if defined(ND_DEBUG)
     68 int nextdma_debug_enetr_idx = 0;
     69 unsigned int nextdma_debug_enetr_state[100] = { 0 };
     70 int nextdma_debug_scsi_idx = 0;
     71 unsigned int nextdma_debug_scsi_state[100] = { 0 };
     72 
     73 void nextdma_debug_initstate(struct nextdma_config *nd);
     74 void nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state);
     75 void nextdma_debug_scsi_dumpstate(void);
     76 void nextdma_debug_enetr_dumpstate(void);
     77 
     78 void
     79 nextdma_debug_initstate(struct nextdma_config *nd)
     80 {
     81 	switch(nd->nd_intr) {
     82 	case NEXT_I_ENETR_DMA:
     83 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
     84 		break;
     85 	case NEXT_I_SCSI_DMA:
     86 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
     87 		break;
     88 	}
     89 }
     90 
     91 void
     92 nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state)
     93 {
     94 	switch(nd->nd_intr) {
     95 	case NEXT_I_ENETR_DMA:
     96 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
     97 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
     98 		break;
     99 	case NEXT_I_SCSI_DMA:
    100 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
    101 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    102 		break;
    103 	}
    104 }
    105 
    106 void
    107 nextdma_debug_enetr_dumpstate(void)
    108 {
    109 	int i;
    110 	int s;
    111 	s = spldma();
    112 	i = nextdma_debug_enetr_idx;
    113 	do {
    114 		char sbuf[256];
    115 		if (nextdma_debug_enetr_state[i]) {
    116 			bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
    117 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    118 		}
    119 		i++;
    120 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    121 	} while (i != nextdma_debug_enetr_idx);
    122 	splx(s);
    123 }
    124 
    125 void
    126 nextdma_debug_scsi_dumpstate(void)
    127 {
    128 	int i;
    129 	int s;
    130 	s = spldma();
    131 	i = nextdma_debug_scsi_idx;
    132 	do {
    133 		char sbuf[256];
    134 		if (nextdma_debug_scsi_state[i]) {
    135 			bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
    136 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    137 		}
    138 		i++;
    139 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    140 	} while (i != nextdma_debug_scsi_idx);
    141 	splx(s);
    142 }
    143 #endif
    144 
    145 
    146 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
    147                        bus_size_t, int));
    148 int next_dma_continue __P((struct nextdma_config *));
    149 void next_dma_rotate __P((struct nextdma_config *));
    150 
    151 void next_dma_setup_cont_regs __P((struct nextdma_config *));
    152 void next_dma_setup_curr_regs __P((struct nextdma_config *));
    153 
    154 void
    155 nextdma_config(nd)
    156 	struct nextdma_config *nd;
    157 {
    158 	/* Initialize the dma_tag. As a hack, we currently
    159 	 * put the dma tag in the structure itself.  It shouldn't be there.
    160 	 */
    161 
    162 	{
    163 		bus_dma_tag_t t;
    164 		t = &nd->_nd_dmat;
    165 		t->_cookie = nd;
    166 		t->_dmamap_create = _bus_dmamap_create;
    167 		t->_dmamap_destroy = _bus_dmamap_destroy;
    168 		t->_dmamap_load = _bus_dmamap_load_direct;
    169 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
    170 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
    171 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
    172 		t->_dmamap_unload = _bus_dmamap_unload;
    173 		t->_dmamap_sync = _bus_dmamap_sync;
    174 
    175 		t->_dmamem_alloc = _bus_dmamem_alloc;
    176 		t->_dmamem_free = _bus_dmamem_free;
    177 		t->_dmamem_map = _bus_dmamem_map;
    178 		t->_dmamem_unmap = _bus_dmamem_unmap;
    179 		t->_dmamem_mmap = _bus_dmamem_mmap;
    180 
    181 		nd->nd_dmat = t;
    182 	}
    183 
    184 	nextdma_init(nd);
    185 
    186 	isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
    187 	INTR_ENABLE(nd->nd_intr);
    188 }
    189 
    190 void
    191 nextdma_init(nd)
    192 	struct nextdma_config *nd;
    193 {
    194 #ifdef ND_DEBUG
    195 	if (nextdma_debug) {
    196 		char sbuf[256];
    197 
    198 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    199 				 sbuf, sizeof(sbuf));
    200 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    201 			NEXT_I_IPL(nd->nd_intr), sbuf);
    202 	}
    203 #endif
    204 
    205 	nd->_nd_map = NULL;
    206 	nd->_nd_idx = 0;
    207 	nd->_nd_map_cont = NULL;
    208 	nd->_nd_idx_cont = 0;
    209 
    210 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    211 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    212 			DMACSR_RESET | DMACSR_INITBUF);
    213 
    214 	next_dma_setup_curr_regs(nd);
    215 	next_dma_setup_cont_regs(nd);
    216 
    217 #if defined(DIAGNOSTIC)
    218 	{
    219 		u_long state;
    220 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    221 
    222 #if 1
    223 	/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
    224 	 * milo (a 25Mhz 68040 mono cube) didn't have this problem
    225 	 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    226 	 */
    227     state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    228 #else
    229     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    230               DMACSR_SUPDATE | DMACSR_ENABLE);
    231 #endif
    232 		if (state) {
    233 			next_dma_print(nd);
    234 			panic("DMA did not reset");
    235 		}
    236 	}
    237 #endif
    238 }
    239 
    240 
    241 void
    242 nextdma_reset(nd)
    243 	struct nextdma_config *nd;
    244 {
    245 	int s;
    246 	s = spldma();
    247 
    248 	DPRINTF(("DMA reset\n"));
    249 
    250 #if (defined(ND_DEBUG))
    251 	if (nextdma_debug) next_dma_print(nd);
    252 #endif
    253 
    254 	if ((nd->_nd_map) || (nd->_nd_map_cont)) {
    255 		/* @@@ clean up dma maps */
    256 		panic("DMA abort not implemented\n");
    257 	}
    258 
    259 	nextdma_init(nd);
    260 	splx(s);
    261 }
    262 
    263 /****************************************************************/
    264 
    265 
    266 /* Call the completed and continue callbacks to try to fill
    267  * in the dma continue buffers.
    268  */
    269 void
    270 next_dma_rotate(nd)
    271 	struct nextdma_config *nd;
    272 {
    273 
    274 	DPRINTF(("DMA next_dma_rotate()\n"));
    275 
    276 	/* Rotate the continue map into the current map */
    277 	nd->_nd_map = nd->_nd_map_cont;
    278 	nd->_nd_idx = nd->_nd_idx_cont;
    279 
    280 	if ((!nd->_nd_map_cont) ||
    281 			((nd->_nd_map_cont) &&
    282 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
    283 		if (nd->nd_continue_cb) {
    284 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
    285 			if (nd->_nd_map_cont) {
    286 				nd->_nd_map_cont->dm_xfer_len = 0;
    287 			}
    288 		} else {
    289 			nd->_nd_map_cont = 0;
    290 		}
    291 		nd->_nd_idx_cont = 0;
    292 	}
    293 
    294 #ifdef DIAGNOSTIC
    295 	if (nd->_nd_map_cont) {
    296 		if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
    297 			next_dma_print(nd);
    298 			panic("DMA request unaligned at start\n");
    299 		}
    300 		if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    301 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
    302 			next_dma_print(nd);
    303 			panic("DMA request unaligned at end\n");
    304 		}
    305 	}
    306 #endif
    307 
    308 }
    309 
    310 void
    311 next_dma_setup_cont_regs(nd)
    312 	struct nextdma_config *nd;
    313 {
    314 	bus_addr_t dd_start;
    315 	bus_addr_t dd_stop;
    316 	bus_addr_t dd_saved_start;
    317 	bus_addr_t dd_saved_stop;
    318 
    319 	DPRINTF(("DMA next_dma_setup_regs()\n"));
    320 
    321 	if (nd->_nd_map_cont) {
    322 		dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
    323 		dd_stop  = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
    324 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    325 
    326 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    327 			dd_stop |= 0x80000000;		/* Ethernet transmit needs secret magic */
    328 		}
    329 	} else {
    330 		dd_start = 0xdeadbeef;
    331 		dd_stop = 0xdeadbeef;
    332 	}
    333 
    334 	dd_saved_start = dd_start;
    335 	dd_saved_stop  = dd_stop;
    336 
    337 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
    338 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
    339 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
    340 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
    341 
    342 #ifdef DIAGNOSTIC
    343 	if (   (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start)
    344 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop)
    345 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start)
    346 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)
    347 			) {
    348 		next_dma_print(nd);
    349 		panic("DMA failure writing to continue regs");
    350 	}
    351 #endif
    352 }
    353 
    354 void
    355 next_dma_setup_curr_regs(nd)
    356 	struct nextdma_config *nd;
    357 {
    358 	bus_addr_t dd_next;
    359 	bus_addr_t dd_limit;
    360 	bus_addr_t dd_saved_next;
    361 	bus_addr_t dd_saved_limit;
    362 
    363 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
    364 
    365 
    366 	if (nd->_nd_map) {
    367 		dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    368 		dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
    369 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    370 
    371 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    372 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    373 		}
    374 	} else {
    375 		dd_next = 0xdeadbeef;
    376 		dd_limit = 0xdeadbeef;
    377 	}
    378 
    379 	dd_saved_next = dd_next;
    380 	dd_saved_limit = dd_limit;
    381 
    382 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    383 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
    384 	} else {
    385 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
    386 	}
    387 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
    388 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
    389 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
    390 
    391 #ifdef DIAGNOSTIC
    392 	if (   (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next)
    393 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next)
    394 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit)
    395 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next)
    396 			|| (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)
    397 			) {
    398 		next_dma_print(nd);
    399 		panic("DMA failure writing to current regs");
    400 	}
    401 #endif
    402 }
    403 
    404 
    405 /* This routine is used for debugging */
    406 
    407 void
    408 next_dma_print(nd)
    409 	struct nextdma_config *nd;
    410 {
    411 	u_long dd_csr;
    412 	u_long dd_next;
    413 	u_long dd_next_initbuf;
    414 	u_long dd_limit;
    415 	u_long dd_start;
    416 	u_long dd_stop;
    417 	u_long dd_saved_next;
    418 	u_long dd_saved_limit;
    419 	u_long dd_saved_start;
    420 	u_long dd_saved_stop;
    421 	char sbuf[256];
    422 
    423 	/* Read all of the registers before we print anything out,
    424 	 * in case something changes
    425 	 */
    426 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    427 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    428 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
    429 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    430 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
    431 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
    432 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
    433 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    434 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
    435 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
    436 
    437 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
    438 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    439 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    440 
    441 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
    442 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    443 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    444 
    445 	/* NDMAP is Next DMA Print (really!) */
    446 
    447 	if (nd->_nd_map) {
    448 		printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
    449 				nd->_nd_map->dm_mapsize);
    450 		printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
    451 				nd->_nd_map->dm_nsegs);
    452 		printf("NDMAP: nd->_nd_map->dm_xfer_len = %d\n",
    453 				nd->_nd_map->dm_xfer_len);
    454 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    455 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
    456 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
    457 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
    458 		{
    459 			int i;
    460 			printf("NDMAP: Entire map;\n");
    461 			for(i=0;i<nd->_nd_map->dm_nsegs;i++) {
    462 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    463 						i,nd->_nd_map->dm_segs[i].ds_addr);
    464 				printf("NDMAP:   nd->_nd_map->dm_segs[%d].ds_len = %d\n",
    465 						i,nd->_nd_map->dm_segs[i].ds_len);
    466 			}
    467 		}
    468 	} else {
    469 		printf("NDMAP: nd->_nd_map = NULL\n");
    470 	}
    471 	if (nd->_nd_map_cont) {
    472 		printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
    473 				nd->_nd_map_cont->dm_mapsize);
    474 		printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
    475 				nd->_nd_map_cont->dm_nsegs);
    476 		printf("NDMAP: nd->_nd_map_cont->dm_xfer_len = %d\n",
    477 				nd->_nd_map_cont->dm_xfer_len);
    478 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    479 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
    480 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
    481 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
    482 		if (nd->_nd_map_cont != nd->_nd_map) {
    483 			int i;
    484 			printf("NDMAP: Entire map;\n");
    485 			for(i=0;i<nd->_nd_map_cont->dm_nsegs;i++) {
    486 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    487 						i,nd->_nd_map_cont->dm_segs[i].ds_addr);
    488 				printf("NDMAP:   nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
    489 						i,nd->_nd_map_cont->dm_segs[i].ds_len);
    490 			}
    491 		}
    492 	} else {
    493 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
    494 	}
    495 
    496 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
    497 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    498 
    499 	printf("NDMAP: dd->dd_saved_next   = 0x%08x\n", dd_saved_next);
    500 	printf("NDMAP: dd->dd_saved_limit  = 0x%08x\n", dd_saved_limit);
    501 	printf("NDMAP: dd->dd_saved_start  = 0x%08x\n", dd_saved_start);
    502 	printf("NDMAP: dd->dd_saved_stop   = 0x%08x\n", dd_saved_stop);
    503 	printf("NDMAP: dd->dd_next         = 0x%08x\n", dd_next);
    504 	printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
    505 	printf("NDMAP: dd->dd_limit        = 0x%08x\n", dd_limit);
    506 	printf("NDMAP: dd->dd_start        = 0x%08x\n", dd_start);
    507 	printf("NDMAP: dd->dd_stop         = 0x%08x\n", dd_stop);
    508 
    509 	bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    510 			 sbuf, sizeof(sbuf));
    511 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    512 			NEXT_I_IPL(nd->nd_intr), sbuf);
    513 }
    514 
    515 /****************************************************************/
    516 
    517 int
    518 nextdma_intr(arg)
    519      void *arg;
    520 {
    521   /* @@@ This is bogus, we can't be certain of arg's type
    522 	 * unless the interrupt is for us.  For now we successfully
    523 	 * cheat because DMA interrupts are the only things invoked
    524 	 * at this interrupt level.
    525 	 */
    526   struct nextdma_config *nd = arg;
    527 
    528   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
    529   /* Handle dma interrupts */
    530 
    531 #ifdef ND_DEBUG
    532 	if (nextdma_debug) {
    533 		char sbuf[256];
    534 
    535 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    536 				 sbuf, sizeof(sbuf));
    537 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    538 			NEXT_I_IPL(nd->nd_intr), sbuf);
    539 	}
    540 #endif
    541 
    542 #ifdef DIAGNOSTIC
    543 	if (!nd->_nd_map) {
    544 		next_dma_print(nd);
    545 		panic("DMA missing current map in interrupt!\n");
    546 	}
    547 #endif
    548 
    549   {
    550     unsigned int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
    551 
    552 #if defined(ND_DEBUG)
    553 		nextdma_debug_savestate(nd,state);
    554 #endif
    555 
    556 #ifdef DIAGNOSTIC
    557 		if (!(state & DMACSR_COMPLETE)) {
    558 			char sbuf[256];
    559 			next_dma_print(nd);
    560 			bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    561 			printf("DMA: state 0x%s\n",sbuf);
    562 			panic("DMA complete not set in interrupt\n");
    563 		}
    564 #endif
    565 
    566 		{
    567 			bus_addr_t onext;
    568 			bus_addr_t olimit;
    569 			bus_addr_t slimit;
    570 
    571 			DPRINTF(("DMA: finishing xfer\n"));
    572 
    573 			onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
    574 			olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
    575 
    576 			{
    577 				int result = 0;
    578 				if (state & DMACSR_ENABLE) {
    579 					/* enable bit was set */
    580 					result |= 0x01;
    581 				}
    582 				if (state & DMACSR_SUPDATE) {
    583 					/* supdate bit was set */
    584 					result |= 0x02;
    585 				}
    586 				if (nd->_nd_map_cont == NULL) {
    587 					/* Expecting a shutdown, didn't SETSUPDATE last turn */
    588 					result |= 0x04;
    589 				}
    590 				if (state & DMACSR_BUSEXC) {
    591 					/* bus exception bit was set */
    592 					result |= 0x08;
    593 				}
    594 				switch (result) {
    595 				case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    596 				case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    597 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    598 					break;
    599 				case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    600 				case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    601 					if (nd->nd_intr == NEXT_I_SCSI_DMA) {
    602 						bus_addr_t snext;
    603 						snext = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
    604 						if (snext != onext) {
    605 							slimit = olimit;
    606 						} else {
    607 							slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    608 						}
    609 					} else {
    610 						slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
    611 					}
    612 					break;
    613 				case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    614 				case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    615 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
    616 					break;
    617 				case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    618 				case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    619 					slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
    620 					break;
    621 				default:
    622 #ifdef DIAGNOSTIC
    623 					{
    624 						char sbuf[256];
    625 						printf("DMA: please send this output to port-next68k-maintainer (at) netbsd.org:\n");
    626 						bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    627 						printf("DMA: state 0x%s\n",sbuf);
    628 						next_dma_print(nd);
    629 						panic("DMA: condition 0x%02x not yet documented to occur\n",result);
    630 					}
    631 #endif
    632 					slimit = olimit;
    633 					break;
    634 				}
    635 			}
    636 
    637 			if (nd->nd_intr == NEXT_I_ENETX_DMA) {
    638 				slimit &= ~0x80000000;
    639 			}
    640 
    641 #ifdef DIAGNOSTIC
    642 			if ((slimit < onext) || (slimit > olimit)) {
    643 				char sbuf[256];
    644 				bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    645 				printf("DMA: state 0x%s\n",sbuf);
    646 				next_dma_print(nd);
    647 				panic("DMA: Unexpected limit register (0x%08x) in finish_xfer\n",slimit);
    648 			}
    649 #endif
    650 
    651 #ifdef DIAGNOSTIC
    652 			if ((state & DMACSR_ENABLE) && ((nd->_nd_idx+1) != nd->_nd_map->dm_nsegs)) {
    653 				if (slimit != olimit) {
    654 					char sbuf[256];
    655 					bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    656 					printf("DMA: state 0x%s\n",sbuf);
    657 					next_dma_print(nd);
    658 					panic("DMA: short limit register (0x%08x) w/o finishing map.\n",slimit);
    659 				}
    660 			}
    661 #endif
    662 
    663 #if (defined(ND_DEBUG))
    664 			if (nextdma_debug > 2) next_dma_print(nd);
    665 #endif
    666 
    667 			nd->_nd_map->dm_xfer_len += slimit-onext;
    668 
    669 			/* If we've reached the end of the current map, then inform
    670 			 * that we've completed that map.
    671 			 */
    672 			if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
    673 				if (nd->nd_completed_cb)
    674 					(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
    675 			}
    676 			nd->_nd_map = 0;
    677 			nd->_nd_idx = 0;
    678 		}
    679 
    680 		if (state & DMACSR_ENABLE) {
    681 
    682 			next_dma_rotate(nd);
    683 			next_dma_setup_cont_regs(nd);
    684 
    685 			{
    686 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    687 
    688 				if (state & DMACSR_READ) {
    689 					dmadir = DMACSR_SETREAD;
    690 				} else {
    691 					dmadir = DMACSR_SETWRITE;
    692 				}
    693 
    694 				if (nd->_nd_map_cont == NULL) {
    695 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    696 							DMACSR_CLRCOMPLETE | dmadir);
    697 				} else {
    698 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    699 							DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    700 				}
    701 			}
    702 
    703 		} else {
    704 
    705 			DPRINTF(("DMA: a shutdown occurred\n"));
    706 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    707 
    708 			/* Cleanup more incomplete transfers */
    709 #if 1
    710 			/* cleanup continue map */
    711 			if (nd->_nd_map_cont) {
    712 				DPRINTF(("DMA: shutting down with non null continue map\n"));
    713 				if (nd->nd_completed_cb)
    714 					(*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg);
    715 
    716 				nd->_nd_map_cont = 0;
    717 				nd->_nd_idx_cont = 0;
    718 			}
    719 #else
    720 			/* Do an automatic dma restart */
    721 			if (nd->_nd_map_cont) {
    722 				u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    723 
    724 				next_dma_rotate(nd);
    725 
    726 				if (state & DMACSR_READ) {
    727 					dmadir = DMACSR_SETREAD;
    728 				} else {
    729 					dmadir = DMACSR_SETWRITE;
    730 				}
    731 
    732 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    733 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    734 						DMACSR_INITBUF | DMACSR_RESET | dmadir);
    735 
    736 				next_dma_setup_curr_regs(nd);
    737 				next_dma_setup_cont_regs(nd);
    738 
    739 				if (nd->_nd_map_cont == NULL) {
    740 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    741 							DMACSR_SETENABLE | dmadir);
    742 				} else {
    743 					bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    744 							DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    745 				}
    746 				return 1;
    747 			}
    748 #endif
    749 			if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
    750 		}
    751 	}
    752 
    753 #ifdef ND_DEBUG
    754 	if (nextdma_debug) {
    755 		char sbuf[256];
    756 
    757 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    758 				 sbuf, sizeof(sbuf));
    759 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    760 			NEXT_I_IPL(nd->nd_intr), sbuf);
    761 	}
    762 #endif
    763 
    764   return(1);
    765 }
    766 
    767 /*
    768  * Check to see if dma has finished for a channel */
    769 int
    770 nextdma_finished(nd)
    771 	struct nextdma_config *nd;
    772 {
    773 	int r;
    774 	int s;
    775 	s = spldma();									/* @@@ should this be splimp()? */
    776 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
    777 	splx(s);
    778 	return(r);
    779 }
    780 
    781 void
    782 nextdma_start(nd, dmadir)
    783 	struct nextdma_config *nd;
    784 	u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
    785 {
    786 
    787 #ifdef DIAGNOSTIC
    788 	if (!nextdma_finished(nd)) {
    789 		char sbuf[256];
    790 
    791 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    792 				 sbuf, sizeof(sbuf));
    793 		panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
    794 	}
    795 #endif
    796 
    797 #ifdef ND_DEBUG
    798 	if (nextdma_debug) {
    799 		char sbuf[256];
    800 
    801 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    802 				 sbuf, sizeof(sbuf));
    803 		printf("DMA start (%ld) intr(0x%s)\n",
    804 			NEXT_I_IPL(nd->nd_intr), sbuf);
    805 	}
    806 #endif
    807 
    808 #ifdef DIAGNOSTIC
    809 	if (nd->_nd_map) {
    810 		next_dma_print(nd);
    811 		panic("DMA: nextdma_start() with non null map\n");
    812 	}
    813 	if (nd->_nd_map_cont) {
    814 		next_dma_print(nd);
    815 		panic("DMA: nextdma_start() with non null continue map\n");
    816 	}
    817 #endif
    818 
    819 #ifdef DIAGNOSTIC
    820 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    821 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
    822 	}
    823 #endif
    824 
    825 #if defined(ND_DEBUG)
    826 	nextdma_debug_initstate(nd);
    827 #endif
    828 
    829 	/* preload both the current and the continue maps */
    830 	next_dma_rotate(nd);
    831 
    832 #ifdef DIAGNOSTIC
    833 	if (!nd->_nd_map_cont) {
    834 		panic("No map available in nextdma_start()");
    835 	}
    836 #endif
    837 
    838 	next_dma_rotate(nd);
    839 
    840 #ifdef ND_DEBUG
    841 	if (nextdma_debug) {
    842 		char sbuf[256];
    843 
    844 		bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS,
    845 				 sbuf, sizeof(sbuf));
    846 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    847 			(dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf);
    848 	}
    849 #endif
    850 
    851 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
    852 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    853 			DMACSR_INITBUF | DMACSR_RESET | dmadir);
    854 
    855 	next_dma_setup_curr_regs(nd);
    856 	next_dma_setup_cont_regs(nd);
    857 
    858 #if (defined(ND_DEBUG))
    859 	if (nextdma_debug > 2) next_dma_print(nd);
    860 #endif
    861 
    862 	if (nd->_nd_map_cont == NULL) {
    863 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    864 				DMACSR_SETENABLE | dmadir);
    865 	} else {
    866 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
    867 				DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    868 	}
    869 }
    870