Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.47.18.2
      1 /*	$NetBSD: nextdma.c,v 1.47.18.2 2014/08/20 00:03:16 tls Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.47.18.2 2014/08/20 00:03:16 tls Exp $");
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/syslog.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/malloc.h>
     37 #include <sys/ioctl.h>
     38 #include <sys/errno.h>
     39 
     40 #define _M68K_BUS_DMA_PRIVATE
     41 #include <machine/autoconf.h>
     42 #include <machine/cpu.h>
     43 #include <machine/intr.h>
     44 
     45 #include <m68k/cacheops.h>
     46 
     47 #include <next68k/next68k/isr.h>
     48 #include <next68k/next68k/nextrom.h>
     49 
     50 #include <next68k/dev/intiovar.h>
     51 
     52 #include "nextdmareg.h"
     53 #include "nextdmavar.h"
     54 
     55 #include "esp.h"
     56 #include "xe.h"
     57 
     58 #if DEBUG
     59 #define ND_DEBUG
     60 #endif
     61 
     62 extern int turbo;
     63 
     64 #define panic		__asm volatile("trap  #15"); printf
     65 
     66 #define NEXTDMA_DEBUG nextdma_debug
     67 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
     68 #if defined(ND_DEBUG)
     69 int nextdma_debug = 0;
     70 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
     71 int ndtrace_show = 0;
     72 char ndtrace_buf[8192+100];
     73 size_t ndtrace_len = 0;
     74 #define NDTRACEIF(x) if (10) do {x;} while (0)
     75 #else
     76 #define DPRINTF(x)
     77 #define NDTRACEIF(x)
     78 #endif
     79 #define PRINTF(x) printf x
     80 
     81 void
     82 ndtrace_printf(const char *fmt, ...) {
     83 #ifdef ND_DEBUG
     84 	int len;
     85 	va_list ap;
     86 
     87 	va_start(ap, fmt);
     88 	len = vsnprintf(ndtrace_buf + ndtrace_len, sizeof(ndtrace_buf)
     89 	    - ndtrace_len, fmt, ap);
     90 	va_end(ap);
     91 	ndtrace_len += len;
     92 #endif
     93 }
     94 
     95 int
     96 ndtrace_empty(void) {
     97 #ifdef ND_DEBUG
     98 	return ndtrace_len == 0;
     99 #else
    100 	return 1;
    101 #endif
    102 }
    103 
    104 void
    105 ndtrace_reset(void) {
    106 #ifdef ND_DEBUG
    107 	ndtrace_len = 0;
    108 #endif
    109 }
    110 
    111 void
    112 ndtrace_addc(int c) {
    113 #ifdef ND_DEBUG
    114 	if (ndtrace_len < sizeof(ndtrace_buf) - 1) {
    115 		ndtrace_buf[ndtrace_len++] = c;
    116 		ndtrace_buf[ndtrace_len] = '\0';
    117 	}
    118 #endif
    119 }
    120 
    121 const char *
    122 ndtrace_get(void) {
    123 #ifdef ND_DEBUG
    124 	return ndtrace_buf;
    125 #else
    126 	return NULL;
    127 #endif
    128 }
    129 
    130 
    131 #if defined(ND_DEBUG)
    132 int nextdma_debug_enetr_idx = 0;
    133 unsigned int nextdma_debug_enetr_state[100] = { 0 };
    134 int nextdma_debug_scsi_idx = 0;
    135 unsigned int nextdma_debug_scsi_state[100] = { 0 };
    136 
    137 void nextdma_debug_initstate(struct nextdma_softc *);
    138 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
    139 void nextdma_debug_scsi_dumpstate(void);
    140 void nextdma_debug_enetr_dumpstate(void);
    141 #endif
    142 
    143 
    144 int	nextdma_match(device_t, cfdata_t, void *);
    145 void	nextdma_attach(device_t, device_t, void *);
    146 
    147 void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
    148 int nextdma_continue(struct nextdma_softc *);
    149 void nextdma_rotate(struct nextdma_softc *);
    150 
    151 void nextdma_setup_cont_regs(struct nextdma_softc *);
    152 void nextdma_setup_curr_regs(struct nextdma_softc *);
    153 
    154 #if NESP > 0
    155 static int nextdma_esp_intr(void *);
    156 #endif
    157 #if NXE > 0
    158 static int nextdma_enet_intr(void *);
    159 #endif
    160 
    161 #define nd_bsr4(reg) \
    162 	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
    163 #define nd_bsw4(reg,val) \
    164 	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
    165 
    166 CFATTACH_DECL_NEW(nextdma, sizeof(struct nextdma_softc),
    167     nextdma_match, nextdma_attach, NULL, NULL);
    168 
    169 static struct nextdma_channel nextdma_channel[] = {
    170 #if NESP > 0
    171 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
    172 #endif
    173 #if NXE > 0
    174 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
    175 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
    176 #endif
    177 };
    178 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
    179 
    180 static int attached = 0;
    181 
    182 struct nextdma_softc *
    183 nextdma_findchannel(const char *name)
    184 {
    185 	device_t dev;
    186 	deviter_t di;
    187 
    188 	for (dev = deviter_first(&di, DEVITER_F_ROOT_FIRST);
    189 	     dev != NULL;
    190 	     dev = deviter_next(&di)) {
    191 		if (strncmp(device_xname(dev), "nextdma", 7) == 0) {
    192 			struct nextdma_softc *nsc = device_private(dev);
    193 			if (strcmp(nsc->sc_chan->nd_name, name) == 0)
    194 				break;
    195 		}
    196 	}
    197 	deviter_release(&di);
    198 	if (dev == NULL)
    199 		return NULL;
    200 	return device_private(dev);
    201 }
    202 
    203 int
    204 nextdma_match(device_t parent, cfdata_t match, void *aux)
    205 {
    206 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    207 
    208 	if (attached >= nnextdma_channels)
    209 		return (0);
    210 
    211 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
    212 
    213 	return (1);
    214 }
    215 
    216 void
    217 nextdma_attach(device_t parent, device_t self, void *aux)
    218 {
    219 	struct nextdma_softc *nsc = device_private(self);
    220 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    221 
    222 	if (attached >= nnextdma_channels)
    223 		return;
    224 
    225 	nsc->sc_dev = self;
    226 	nsc->sc_chan = &nextdma_channel[attached];
    227 
    228 	nsc->sc_dmat = ia->ia_dmat;
    229 	nsc->sc_bst = ia->ia_bst;
    230 
    231 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
    232 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
    233 		panic("%s: can't map DMA registers for channel %s",
    234 		      device_xname(self), nsc->sc_chan->nd_name);
    235 	}
    236 
    237 	nextdma_init (nsc);
    238 
    239 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
    240 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
    241 	INTR_ENABLE(nsc->sc_chan->nd_intr);
    242 
    243 	printf (": channel %d (%s)\n", attached,
    244 		nsc->sc_chan->nd_name);
    245 	attached++;
    246 
    247 	return;
    248 }
    249 
    250 void
    251 nextdma_init(struct nextdma_softc *nsc)
    252 {
    253 #ifdef ND_DEBUG
    254 	if (NEXTDMA_DEBUG) {
    255 		char sbuf[256];
    256 
    257 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    258 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    259 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    260 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    261 	}
    262 #endif
    263 
    264 	nsc->sc_stat.nd_map = NULL;
    265 	nsc->sc_stat.nd_idx = 0;
    266 	nsc->sc_stat.nd_map_cont = NULL;
    267 	nsc->sc_stat.nd_idx_cont = 0;
    268 	nsc->sc_stat.nd_exception = 0;
    269 
    270 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
    271 	nd_bsw4 (DD_CSR, 0);
    272 
    273 #if 01
    274 	nextdma_setup_curr_regs(nsc);
    275 	nextdma_setup_cont_regs(nsc);
    276 #endif
    277 
    278 #if defined(DIAGNOSTIC)
    279 	{
    280 		u_long state;
    281 		state = nd_bsr4 (DD_CSR);
    282 
    283 #if 1
    284 		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
    285 		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
    286 		 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    287 		 */
    288 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    289 #else
    290 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    291 			  DMACSR_SUPDATE | DMACSR_ENABLE);
    292 #endif
    293 		if (state) {
    294 			nextdma_print(nsc);
    295 			panic("DMA did not reset");
    296 		}
    297 	}
    298 #endif
    299 }
    300 
    301 void
    302 nextdma_reset(struct nextdma_softc *nsc)
    303 {
    304 	int s;
    305 	struct nextdma_status *stat = &nsc->sc_stat;
    306 
    307 	s = spldma();
    308 
    309 	DPRINTF(("DMA reset\n"));
    310 
    311 #if (defined(ND_DEBUG))
    312 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
    313 #endif
    314 
    315 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    316 	if ((stat->nd_map) || (stat->nd_map_cont)) {
    317 		if (stat->nd_map_cont) {
    318 			DPRINTF(("DMA: resetting with non null continue map\n"));
    319 			if (nsc->sc_conf.nd_completed_cb)
    320 				(*nsc->sc_conf.nd_completed_cb)
    321 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    322 
    323 			stat->nd_map_cont = 0;
    324 			stat->nd_idx_cont = 0;
    325 		}
    326 		if (nsc->sc_conf.nd_shutdown_cb)
    327 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    328 		stat->nd_map = 0;
    329 		stat->nd_idx = 0;
    330 	}
    331 
    332 	splx(s);
    333 }
    334 
    335 /****************************************************************/
    336 
    337 
    338 /* Call the completed and continue callbacks to try to fill
    339  * in the dma continue buffers.
    340  */
    341 void
    342 nextdma_rotate(struct nextdma_softc *nsc)
    343 {
    344 	struct nextdma_status *stat = &nsc->sc_stat;
    345 
    346 	NDTRACEIF (ndtrace_addc('r'));
    347 	DPRINTF(("DMA nextdma_rotate()\n"));
    348 
    349 	/* Rotate the continue map into the current map */
    350 	stat->nd_map = stat->nd_map_cont;
    351 	stat->nd_idx = stat->nd_idx_cont;
    352 
    353 	if ((!stat->nd_map_cont) ||
    354 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
    355 		if (nsc->sc_conf.nd_continue_cb) {
    356 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
    357 				(nsc->sc_conf.nd_cb_arg);
    358 			if (stat->nd_map_cont) {
    359 				stat->nd_map_cont->dm_xfer_len = 0;
    360 			}
    361 		} else {
    362 			stat->nd_map_cont = 0;
    363 		}
    364 		stat->nd_idx_cont = 0;
    365 	}
    366 
    367 #if defined(DIAGNOSTIC) && 0
    368 	if (stat->nd_map_cont) {
    369 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
    370 			nextdma_print(nsc);
    371 			panic("DMA request unaligned at start");
    372 		}
    373 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    374 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
    375 			nextdma_print(nsc);
    376 			panic("DMA request unaligned at end");
    377 		}
    378 	}
    379 #endif
    380 
    381 }
    382 
    383 void
    384 nextdma_setup_curr_regs(struct nextdma_softc *nsc)
    385 {
    386 	bus_addr_t dd_next;
    387 	bus_addr_t dd_limit;
    388 	bus_addr_t dd_saved_next;
    389 	bus_addr_t dd_saved_limit;
    390 	struct nextdma_status *stat = &nsc->sc_stat;
    391 
    392 	NDTRACEIF (ndtrace_addc('C'));
    393 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
    394 
    395 	if (stat->nd_map) {
    396 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    397 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
    398 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    399 
    400 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    401 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    402 			dd_limit += 15;
    403 		}
    404 	} else {
    405 		dd_next = turbo ? 0 : 0xdeadbeef;
    406 		dd_limit = turbo ? 0 : 0xdeadbeef;
    407 	}
    408 
    409 	dd_saved_next = dd_next;
    410 	dd_saved_limit = dd_limit;
    411 
    412 	NDTRACEIF (if (stat->nd_map) {
    413 		ndtrace_printf("%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    414 	});
    415 
    416 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
    417 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
    418 	} else {
    419 		nd_bsw4 (DD_NEXT, dd_next);
    420 	}
    421 	nd_bsw4 (DD_LIMIT, dd_limit);
    422 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
    423 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
    424 
    425 #ifdef DIAGNOSTIC
    426 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
    427 	    || (nd_bsr4 (DD_NEXT) != dd_next)
    428 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
    429 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
    430 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
    431 		) {
    432 		nextdma_print(nsc);
    433 		panic("DMA failure writing to current regs");
    434 	}
    435 #endif
    436 }
    437 
    438 void
    439 nextdma_setup_cont_regs(struct nextdma_softc *nsc)
    440 {
    441 	bus_addr_t dd_start;
    442 	bus_addr_t dd_stop;
    443 	bus_addr_t dd_saved_start;
    444 	bus_addr_t dd_saved_stop;
    445 	struct nextdma_status *stat = &nsc->sc_stat;
    446 
    447 	NDTRACEIF (ndtrace_addc('c'));
    448 	DPRINTF(("DMA nextdma_setup_regs()\n"));
    449 
    450 	if (stat->nd_map_cont) {
    451 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
    452 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    453 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    454 
    455 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    456 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
    457 			dd_stop += 15;
    458 		}
    459 	} else {
    460 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
    461 		dd_stop = turbo ? 0 : 0xdeadbee0;
    462 	}
    463 
    464 	dd_saved_start = dd_start;
    465 	dd_saved_stop  = dd_stop;
    466 
    467 	NDTRACEIF (if (stat->nd_map_cont) {
    468 		ndtrace_printf("%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    469 	});
    470 
    471 	nd_bsw4 (DD_START, dd_start);
    472 	nd_bsw4 (DD_STOP, dd_stop);
    473 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
    474 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
    475 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
    476 		nd_bsw4 (DD_STOP - 0x40, dd_start);
    477 
    478 #ifdef DIAGNOSTIC
    479 	if ((nd_bsr4 (DD_START) != dd_start)
    480 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
    481 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
    482 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
    483 		) {
    484 		nextdma_print(nsc);
    485 		panic("DMA failure writing to continue regs");
    486 	}
    487 #endif
    488 }
    489 
    490 /****************************************************************/
    491 
    492 #if NESP > 0
    493 static int
    494 nextdma_esp_intr(void *arg)
    495 {
    496 	/* @@@ This is bogus, we can't be certain of arg's type
    497 	 * unless the interrupt is for us.  For now we successfully
    498 	 * cheat because DMA interrupts are the only things invoked
    499 	 * at this interrupt level.
    500 	 */
    501 	struct nextdma_softc *nsc = arg;
    502 	int esp_dma_int(void *); /* XXX */
    503 
    504 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    505 		return 0;
    506 	/* Handle dma interrupts */
    507 
    508 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
    509 
    510 }
    511 #endif
    512 
    513 #if NXE > 0
    514 static int
    515 nextdma_enet_intr(void *arg)
    516 {
    517 	/* @@@ This is bogus, we can't be certain of arg's type
    518 	 * unless the interrupt is for us.  For now we successfully
    519 	 * cheat because DMA interrupts are the only things invoked
    520 	 * at this interrupt level.
    521 	 */
    522 	struct nextdma_softc *nsc = arg;
    523 	unsigned int state;
    524 	bus_addr_t onext;
    525 	bus_addr_t olimit;
    526 	bus_addr_t slimit;
    527 	int result;
    528 	struct nextdma_status *stat = &nsc->sc_stat;
    529 
    530 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    531 		return 0;
    532 	/* Handle dma interrupts */
    533 
    534 	NDTRACEIF (ndtrace_addc('D'));
    535 #ifdef ND_DEBUG
    536 	if (NEXTDMA_DEBUG) {
    537 		char sbuf[256];
    538 
    539 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    540 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    541 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    542 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    543 	}
    544 #endif
    545 
    546 #ifdef DIAGNOSTIC
    547 	if (!stat->nd_map) {
    548 		nextdma_print(nsc);
    549 		panic("DMA missing current map in interrupt!");
    550 	}
    551 #endif
    552 
    553 	state = nd_bsr4 (DD_CSR);
    554 
    555 #if defined(ND_DEBUG)
    556 	nextdma_debug_savestate(nsc, state);
    557 #endif
    558 
    559 #ifdef DIAGNOSTIC
    560 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
    561 		char sbuf[256];
    562 		nextdma_print(nsc);
    563 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    564 		printf("DMA: state 0x%s\n",sbuf);
    565 		panic("DMA complete not set in interrupt");
    566 	}
    567 #endif
    568 
    569 	DPRINTF(("DMA: finishing xfer\n"));
    570 
    571 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    572 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
    573 
    574 	result = 0;
    575 	if (state & DMACSR_ENABLE) {
    576 		/* enable bit was set */
    577 		result |= 0x01;
    578 	}
    579 	if (state & DMACSR_SUPDATE) {
    580 		/* supdate bit was set */
    581 		result |= 0x02;
    582 	}
    583 	if (stat->nd_map_cont == NULL) {
    584 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    585 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
    586 		result |= 0x04;
    587 	}
    588 	if (state & DMACSR_BUSEXC) {
    589 		/* bus exception bit was set */
    590 		result |= 0x08;
    591 	}
    592 	switch (result) {
    593 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    594 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    595 		if (turbo) {
    596 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    597 			slimit = *limit;
    598 		} else {
    599 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    600 		}
    601 		break;
    602 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    603 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    604 		if (turbo) {
    605 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    606 			slimit = *limit;
    607 		} else {
    608 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    609 		}
    610 		break;
    611 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    612 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    613 		slimit = nd_bsr4 (DD_NEXT);
    614 		break;
    615 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    616 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    617 		slimit = nd_bsr4 (DD_LIMIT);
    618 		break;
    619 	default:
    620 #ifdef DIAGNOSTIC
    621 	{
    622 		char sbuf[256];
    623 		printf("DMA: please send this output to port-next68k-maintainer (at) NetBSD.org:\n");
    624 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    625 		printf("DMA: state 0x%s\n",sbuf);
    626 		nextdma_print(nsc);
    627 		panic("DMA: condition 0x%02x not yet documented to occur",result);
    628 	}
    629 #endif
    630 	slimit = olimit;
    631 	break;
    632 	}
    633 
    634 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    635 		slimit &= ~0x80000000;
    636 		slimit -= 15;
    637 	}
    638 
    639 #ifdef DIAGNOSTIC
    640 	if ((state & DMACSR_READ))
    641 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
    642 			  (state & DMACSR_READ) ? "read" : "write"));
    643 	if ((slimit < onext) || (slimit > olimit)) {
    644 		char sbuf[256];
    645 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    646 		printf("DMA: state 0x%s\n",sbuf);
    647 		nextdma_print(nsc);
    648 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
    649 	}
    650 #endif
    651 
    652 #ifdef DIAGNOSTIC
    653 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
    654 		if (slimit != olimit) {
    655 			char sbuf[256];
    656 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    657 			printf("DMA: state 0x%s\n",sbuf);
    658 			nextdma_print(nsc);
    659 			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
    660 		}
    661 	}
    662 #endif
    663 
    664 #if (defined(ND_DEBUG))
    665 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    666 #endif
    667 
    668 	stat->nd_map->dm_xfer_len += slimit-onext;
    669 
    670 	/* If we've reached the end of the current map, then inform
    671 	 * that we've completed that map.
    672 	 */
    673 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
    674 		if (nsc->sc_conf.nd_completed_cb)
    675 			(*nsc->sc_conf.nd_completed_cb)
    676 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
    677 	} else {
    678 		KASSERT(stat->nd_map == stat->nd_map_cont);
    679 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
    680 	}
    681 	stat->nd_map = 0;
    682 	stat->nd_idx = 0;
    683 
    684 #if (defined(ND_DEBUG))
    685 	if (NEXTDMA_DEBUG) {
    686 		char sbuf[256];
    687 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    688 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    689 	}
    690 #endif
    691 	if (state & DMACSR_ENABLE) {
    692 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    693 
    694 		nextdma_rotate(nsc);
    695 		nextdma_setup_cont_regs(nsc);
    696 
    697 		if (state & DMACSR_READ) {
    698 			dmadir = DMACSR_SETREAD;
    699 		} else {
    700 			dmadir = DMACSR_SETWRITE;
    701 		}
    702 
    703 		if (stat->nd_map_cont == NULL) {
    704 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    705 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
    706 			NDTRACEIF (ndtrace_addc('g'));
    707 		} else {
    708 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    709 			NDTRACEIF (ndtrace_addc('G'));
    710 		}
    711 	} else {
    712 		DPRINTF(("DMA: a shutdown occurred\n"));
    713 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    714 
    715 		/* Cleanup more incomplete transfers */
    716 		/* cleanup continue map */
    717 		if (stat->nd_map_cont) {
    718 			DPRINTF(("DMA: shutting down with non null continue map\n"));
    719 			if (nsc->sc_conf.nd_completed_cb)
    720 				(*nsc->sc_conf.nd_completed_cb)
    721 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    722 
    723 			stat->nd_map_cont = 0;
    724 			stat->nd_idx_cont = 0;
    725 		}
    726 		if (nsc->sc_conf.nd_shutdown_cb)
    727 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    728 	}
    729 
    730 #ifdef ND_DEBUG
    731 	if (NEXTDMA_DEBUG) {
    732 		char sbuf[256];
    733 
    734 		snprintb(sbuf, sizeof(sbuf),
    735 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    736 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    737 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    738 	}
    739 #endif
    740 
    741 	return(1);
    742 }
    743 #endif
    744 
    745 /*
    746  * Check to see if dma has finished for a channel */
    747 int
    748 nextdma_finished(struct nextdma_softc *nsc)
    749 {
    750 	int r;
    751 	int s;
    752 	struct nextdma_status *stat = &nsc->sc_stat;
    753 
    754 	s = spldma();
    755 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
    756 	splx(s);
    757 
    758 	return(r);
    759 }
    760 
    761 void
    762 nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
    763 {
    764 	struct nextdma_status *stat = &nsc->sc_stat;
    765 
    766 	NDTRACEIF (ndtrace_addc('n'));
    767 #ifdef DIAGNOSTIC
    768 	if (!nextdma_finished(nsc)) {
    769 		char sbuf[256];
    770 
    771 		snprintb(sbuf, sizeof(sbuf),
    772 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    773 		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
    774 	}
    775 #endif
    776 
    777 #ifdef ND_DEBUG
    778 	if (NEXTDMA_DEBUG) {
    779 		char sbuf[256];
    780 
    781 		snprintb(sbuf, sizeof(sbuf),
    782 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    783 		printf("DMA start (%ld) intr(0x%s)\n",
    784 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    785 	}
    786 #endif
    787 
    788 #ifdef DIAGNOSTIC
    789 	if (stat->nd_map) {
    790 		nextdma_print(nsc);
    791 		panic("DMA: nextdma_start() with non null map");
    792 	}
    793 	if (stat->nd_map_cont) {
    794 		nextdma_print(nsc);
    795 		panic("DMA: nextdma_start() with non null continue map");
    796 	}
    797 #endif
    798 
    799 #ifdef DIAGNOSTIC
    800 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    801 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
    802 	}
    803 #endif
    804 
    805 #if defined(ND_DEBUG)
    806 	nextdma_debug_initstate(nsc);
    807 #endif
    808 
    809 	/* preload both the current and the continue maps */
    810 	nextdma_rotate(nsc);
    811 
    812 #ifdef DIAGNOSTIC
    813 	if (!stat->nd_map_cont) {
    814 		panic("No map available in nextdma_start()");
    815 	}
    816 #endif
    817 
    818 	nextdma_rotate(nsc);
    819 
    820 #ifdef ND_DEBUG
    821 	if (NEXTDMA_DEBUG) {
    822 		char sbuf[256];
    823 
    824 		snprintb(sbuf, sizeof(sbuf),
    825 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    826 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    827 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
    828 	}
    829 #endif
    830 
    831 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
    832 		 DMACSR_RESET | dmadir);
    833 	nd_bsw4 (DD_CSR, 0);
    834 
    835 	nextdma_setup_curr_regs(nsc);
    836 	nextdma_setup_cont_regs(nsc);
    837 
    838 #if (defined(ND_DEBUG))
    839 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    840 #endif
    841 
    842 	if (stat->nd_map_cont == NULL) {
    843 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
    844 	} else {
    845 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    846 	}
    847 }
    848 
    849 /* This routine is used for debugging */
    850 void
    851 nextdma_print(struct nextdma_softc *nsc)
    852 {
    853 	u_long dd_csr;
    854 	u_long dd_next;
    855 	u_long dd_next_initbuf;
    856 	u_long dd_limit;
    857 	u_long dd_start;
    858 	u_long dd_stop;
    859 	u_long dd_saved_next;
    860 	u_long dd_saved_limit;
    861 	u_long dd_saved_start;
    862 	u_long dd_saved_stop;
    863 	char sbuf[256];
    864 	struct nextdma_status *stat = &nsc->sc_stat;
    865 
    866 	/* Read all of the registers before we print anything out,
    867 	 * in case something changes
    868 	 */
    869 	dd_csr          = nd_bsr4 (DD_CSR);
    870 	dd_next         = nd_bsr4 (DD_NEXT);
    871 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
    872 	dd_limit        = nd_bsr4 (DD_LIMIT);
    873 	dd_start        = nd_bsr4 (DD_START);
    874 	dd_stop         = nd_bsr4 (DD_STOP);
    875 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
    876 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
    877 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
    878 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
    879 
    880 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    881 	    *(volatile u_long *)IIOV(NEXT_P_INTRSTAT));
    882 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    883 
    884 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    885 	    *(volatile u_long *)IIOV(NEXT_P_INTRMASK));
    886 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    887 
    888 	/* NDMAP is Next DMA Print (really!) */
    889 
    890 	if (stat->nd_map) {
    891 		int i;
    892 
    893 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
    894 		       stat->nd_map->dm_mapsize);
    895 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
    896 		       stat->nd_map->dm_nsegs);
    897 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
    898 		       stat->nd_map->dm_xfer_len);
    899 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    900 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
    901 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
    902 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    903 
    904 		printf("NDMAP: Entire map;\n");
    905 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
    906 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    907 			       i,stat->nd_map->dm_segs[i].ds_addr);
    908 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
    909 			       i,stat->nd_map->dm_segs[i].ds_len);
    910 		}
    911 	} else {
    912 		printf("NDMAP: nd_map = NULL\n");
    913 	}
    914 	if (stat->nd_map_cont) {
    915 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
    916 		       stat->nd_map_cont->dm_mapsize);
    917 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
    918 		       stat->nd_map_cont->dm_nsegs);
    919 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
    920 		       stat->nd_map_cont->dm_xfer_len);
    921 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    922 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
    923 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    924 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    925 		if (stat->nd_map_cont != stat->nd_map) {
    926 			int i;
    927 			printf("NDMAP: Entire map;\n");
    928 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
    929 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    930 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
    931 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    932 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
    933 			}
    934 		}
    935 	} else {
    936 		printf("NDMAP: nd_map_cont = NULL\n");
    937 	}
    938 
    939 	snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, dd_csr);
    940 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    941 
    942 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
    943 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
    944 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
    945 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
    946 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
    947 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
    948 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
    949 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
    950 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
    951 
    952 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    953 	    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    954 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    955 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    956 }
    957 
    958 #if defined(ND_DEBUG)
    959 void
    960 nextdma_debug_initstate(struct nextdma_softc *nsc)
    961 {
    962 	switch(nsc->sc_chan->nd_intr) {
    963 	case NEXT_I_ENETR_DMA:
    964 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
    965 		break;
    966 	case NEXT_I_SCSI_DMA:
    967 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
    968 		break;
    969 	}
    970 }
    971 
    972 void
    973 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
    974 {
    975 	switch(nsc->sc_chan->nd_intr) {
    976 	case NEXT_I_ENETR_DMA:
    977 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
    978 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    979 		break;
    980 	case NEXT_I_SCSI_DMA:
    981 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
    982 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    983 		break;
    984 	}
    985 }
    986 
    987 void
    988 nextdma_debug_enetr_dumpstate(void)
    989 {
    990 	int i;
    991 	int s;
    992 	s = spldma();
    993 	i = nextdma_debug_enetr_idx;
    994 	do {
    995 		char sbuf[256];
    996 		if (nextdma_debug_enetr_state[i]) {
    997 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_enetr_state[i]);
    998 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    999 		}
   1000 		i++;
   1001 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
   1002 	} while (i != nextdma_debug_enetr_idx);
   1003 	splx(s);
   1004 }
   1005 
   1006 void
   1007 nextdma_debug_scsi_dumpstate(void)
   1008 {
   1009 	int i;
   1010 	int s;
   1011 	s = spldma();
   1012 	i = nextdma_debug_scsi_idx;
   1013 	do {
   1014 		char sbuf[256];
   1015 		if (nextdma_debug_scsi_state[i]) {
   1016 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_scsi_state[i]);
   1017 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
   1018 		}
   1019 		i++;
   1020 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
   1021 	} while (i != nextdma_debug_scsi_idx);
   1022 	splx(s);
   1023 }
   1024 #endif
   1025 
   1026