Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.48
      1 /*	$NetBSD: nextdma.c,v 1.48 2012/10/27 17:18:05 chs Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.48 2012/10/27 17:18:05 chs Exp $");
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/syslog.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/malloc.h>
     37 #include <sys/ioctl.h>
     38 #include <sys/errno.h>
     39 
     40 #define _M68K_BUS_DMA_PRIVATE
     41 #include <machine/autoconf.h>
     42 #include <machine/cpu.h>
     43 #include <machine/intr.h>
     44 
     45 #include <m68k/cacheops.h>
     46 
     47 #include <next68k/next68k/isr.h>
     48 #include <next68k/next68k/nextrom.h>
     49 
     50 #include <next68k/dev/intiovar.h>
     51 
     52 #include "nextdmareg.h"
     53 #include "nextdmavar.h"
     54 
     55 #include "esp.h"
     56 #include "xe.h"
     57 
     58 #if DEBUG
     59 #define ND_DEBUG
     60 #endif
     61 
     62 extern int turbo;
     63 
     64 #define panic		__asm volatile("trap  #15"); printf
     65 
     66 #define NEXTDMA_DEBUG nextdma_debug
     67 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
     68 #if defined(ND_DEBUG)
     69 int nextdma_debug = 0;
     70 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
     71 int ndtraceshow = 0;
     72 char ndtrace[8192+100];
     73 char *ndtracep = ndtrace;
     74 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
     75 #else
     76 #define DPRINTF(x)
     77 #define NDTRACEIF(x)
     78 #endif
     79 #define PRINTF(x) printf x
     80 
     81 #if defined(ND_DEBUG)
     82 int nextdma_debug_enetr_idx = 0;
     83 unsigned int nextdma_debug_enetr_state[100] = { 0 };
     84 int nextdma_debug_scsi_idx = 0;
     85 unsigned int nextdma_debug_scsi_state[100] = { 0 };
     86 
     87 void nextdma_debug_initstate(struct nextdma_softc *);
     88 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
     89 void nextdma_debug_scsi_dumpstate(void);
     90 void nextdma_debug_enetr_dumpstate(void);
     91 #endif
     92 
     93 
     94 int	nextdma_match(device_t, cfdata_t, void *);
     95 void	nextdma_attach(device_t, device_t, void *);
     96 
     97 void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
     98 int nextdma_continue(struct nextdma_softc *);
     99 void nextdma_rotate(struct nextdma_softc *);
    100 
    101 void nextdma_setup_cont_regs(struct nextdma_softc *);
    102 void nextdma_setup_curr_regs(struct nextdma_softc *);
    103 
    104 #if NESP > 0
    105 static int nextdma_esp_intr(void *);
    106 #endif
    107 #if NXE > 0
    108 static int nextdma_enet_intr(void *);
    109 #endif
    110 
    111 #define nd_bsr4(reg) \
    112 	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
    113 #define nd_bsw4(reg,val) \
    114 	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
    115 
    116 CFATTACH_DECL_NEW(nextdma, sizeof(struct nextdma_softc),
    117     nextdma_match, nextdma_attach, NULL, NULL);
    118 
    119 static struct nextdma_channel nextdma_channel[] = {
    120 #if NESP > 0
    121 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
    122 #endif
    123 #if NXE > 0
    124 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
    125 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
    126 #endif
    127 };
    128 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
    129 
    130 static int attached = 0;
    131 
    132 struct nextdma_softc *
    133 nextdma_findchannel(const char *name)
    134 {
    135 	device_t dev;
    136 	deviter_t di;
    137 
    138 	for (dev = deviter_first(&di, DEVITER_F_ROOT_FIRST);
    139 	     dev != NULL;
    140 	     dev = deviter_next(&di)) {
    141 		if (strncmp(device_xname(dev), "nextdma", 7) == 0) {
    142 			struct nextdma_softc *nsc = device_private(dev);
    143 			if (strcmp(nsc->sc_chan->nd_name, name) == 0)
    144 				break;
    145 		}
    146 	}
    147 	deviter_release(&di);
    148 	if (dev == NULL)
    149 		return NULL;
    150 	return device_private(dev);
    151 }
    152 
    153 int
    154 nextdma_match(device_t parent, cfdata_t match, void *aux)
    155 {
    156 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    157 
    158 	if (attached >= nnextdma_channels)
    159 		return (0);
    160 
    161 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
    162 
    163 	return (1);
    164 }
    165 
    166 void
    167 nextdma_attach(device_t parent, device_t self, void *aux)
    168 {
    169 	struct nextdma_softc *nsc = device_private(self);
    170 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    171 
    172 	if (attached >= nnextdma_channels)
    173 		return;
    174 
    175 	nsc->sc_dev = self;
    176 	nsc->sc_chan = &nextdma_channel[attached];
    177 
    178 	nsc->sc_dmat = ia->ia_dmat;
    179 	nsc->sc_bst = ia->ia_bst;
    180 
    181 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
    182 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
    183 		panic("%s: can't map DMA registers for channel %s",
    184 		      device_xname(self), nsc->sc_chan->nd_name);
    185 	}
    186 
    187 	nextdma_init (nsc);
    188 
    189 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
    190 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
    191 	INTR_ENABLE(nsc->sc_chan->nd_intr);
    192 
    193 	printf (": channel %d (%s)\n", attached,
    194 		nsc->sc_chan->nd_name);
    195 	attached++;
    196 
    197 	return;
    198 }
    199 
    200 void
    201 nextdma_init(struct nextdma_softc *nsc)
    202 {
    203 #ifdef ND_DEBUG
    204 	if (NEXTDMA_DEBUG) {
    205 		char sbuf[256];
    206 
    207 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    208 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    209 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    210 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    211 	}
    212 #endif
    213 
    214 	nsc->sc_stat.nd_map = NULL;
    215 	nsc->sc_stat.nd_idx = 0;
    216 	nsc->sc_stat.nd_map_cont = NULL;
    217 	nsc->sc_stat.nd_idx_cont = 0;
    218 	nsc->sc_stat.nd_exception = 0;
    219 
    220 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
    221 	nd_bsw4 (DD_CSR, 0);
    222 
    223 #if 01
    224 	nextdma_setup_curr_regs(nsc);
    225 	nextdma_setup_cont_regs(nsc);
    226 #endif
    227 
    228 #if defined(DIAGNOSTIC)
    229 	{
    230 		u_long state;
    231 		state = nd_bsr4 (DD_CSR);
    232 
    233 #if 1
    234 		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
    235 		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
    236 		 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    237 		 */
    238 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    239 #else
    240 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    241 			  DMACSR_SUPDATE | DMACSR_ENABLE);
    242 #endif
    243 		if (state) {
    244 			nextdma_print(nsc);
    245 			panic("DMA did not reset");
    246 		}
    247 	}
    248 #endif
    249 }
    250 
    251 void
    252 nextdma_reset(struct nextdma_softc *nsc)
    253 {
    254 	int s;
    255 	struct nextdma_status *stat = &nsc->sc_stat;
    256 
    257 	s = spldma();
    258 
    259 	DPRINTF(("DMA reset\n"));
    260 
    261 #if (defined(ND_DEBUG))
    262 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
    263 #endif
    264 
    265 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    266 	if ((stat->nd_map) || (stat->nd_map_cont)) {
    267 		if (stat->nd_map_cont) {
    268 			DPRINTF(("DMA: resetting with non null continue map\n"));
    269 			if (nsc->sc_conf.nd_completed_cb)
    270 				(*nsc->sc_conf.nd_completed_cb)
    271 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    272 
    273 			stat->nd_map_cont = 0;
    274 			stat->nd_idx_cont = 0;
    275 		}
    276 		if (nsc->sc_conf.nd_shutdown_cb)
    277 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    278 		stat->nd_map = 0;
    279 		stat->nd_idx = 0;
    280 	}
    281 
    282 	splx(s);
    283 }
    284 
    285 /****************************************************************/
    286 
    287 
    288 /* Call the completed and continue callbacks to try to fill
    289  * in the dma continue buffers.
    290  */
    291 void
    292 nextdma_rotate(struct nextdma_softc *nsc)
    293 {
    294 	struct nextdma_status *stat = &nsc->sc_stat;
    295 
    296 	NDTRACEIF (*ndtracep++ = 'r');
    297 	DPRINTF(("DMA nextdma_rotate()\n"));
    298 
    299 	/* Rotate the continue map into the current map */
    300 	stat->nd_map = stat->nd_map_cont;
    301 	stat->nd_idx = stat->nd_idx_cont;
    302 
    303 	if ((!stat->nd_map_cont) ||
    304 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
    305 		if (nsc->sc_conf.nd_continue_cb) {
    306 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
    307 				(nsc->sc_conf.nd_cb_arg);
    308 			if (stat->nd_map_cont) {
    309 				stat->nd_map_cont->dm_xfer_len = 0;
    310 			}
    311 		} else {
    312 			stat->nd_map_cont = 0;
    313 		}
    314 		stat->nd_idx_cont = 0;
    315 	}
    316 
    317 #if defined(DIAGNOSTIC) && 0
    318 	if (stat->nd_map_cont) {
    319 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
    320 			nextdma_print(nsc);
    321 			panic("DMA request unaligned at start");
    322 		}
    323 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    324 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
    325 			nextdma_print(nsc);
    326 			panic("DMA request unaligned at end");
    327 		}
    328 	}
    329 #endif
    330 
    331 }
    332 
    333 void
    334 nextdma_setup_curr_regs(struct nextdma_softc *nsc)
    335 {
    336 	bus_addr_t dd_next;
    337 	bus_addr_t dd_limit;
    338 	bus_addr_t dd_saved_next;
    339 	bus_addr_t dd_saved_limit;
    340 	struct nextdma_status *stat = &nsc->sc_stat;
    341 
    342 	NDTRACEIF (*ndtracep++ = 'C');
    343 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
    344 
    345 	if (stat->nd_map) {
    346 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    347 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
    348 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    349 
    350 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    351 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    352 			dd_limit += 15;
    353 		}
    354 	} else {
    355 		dd_next = turbo ? 0 : 0xdeadbeef;
    356 		dd_limit = turbo ? 0 : 0xdeadbeef;
    357 	}
    358 
    359 	dd_saved_next = dd_next;
    360 	dd_saved_limit = dd_limit;
    361 
    362 	NDTRACEIF (if (stat->nd_map) {
    363 		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    364 		ndtracep += strlen (ndtracep);
    365 	});
    366 
    367 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
    368 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
    369 	} else {
    370 		nd_bsw4 (DD_NEXT, dd_next);
    371 	}
    372 	nd_bsw4 (DD_LIMIT, dd_limit);
    373 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
    374 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
    375 
    376 #ifdef DIAGNOSTIC
    377 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
    378 	    || (nd_bsr4 (DD_NEXT) != dd_next)
    379 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
    380 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
    381 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
    382 		) {
    383 		nextdma_print(nsc);
    384 		panic("DMA failure writing to current regs");
    385 	}
    386 #endif
    387 }
    388 
    389 void
    390 nextdma_setup_cont_regs(struct nextdma_softc *nsc)
    391 {
    392 	bus_addr_t dd_start;
    393 	bus_addr_t dd_stop;
    394 	bus_addr_t dd_saved_start;
    395 	bus_addr_t dd_saved_stop;
    396 	struct nextdma_status *stat = &nsc->sc_stat;
    397 
    398 	NDTRACEIF (*ndtracep++ = 'c');
    399 	DPRINTF(("DMA nextdma_setup_regs()\n"));
    400 
    401 	if (stat->nd_map_cont) {
    402 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
    403 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    404 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    405 
    406 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    407 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
    408 			dd_stop += 15;
    409 		}
    410 	} else {
    411 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
    412 		dd_stop = turbo ? 0 : 0xdeadbee0;
    413 	}
    414 
    415 	dd_saved_start = dd_start;
    416 	dd_saved_stop  = dd_stop;
    417 
    418 	NDTRACEIF (if (stat->nd_map_cont) {
    419 		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    420 		ndtracep += strlen (ndtracep);
    421 	});
    422 
    423 	nd_bsw4 (DD_START, dd_start);
    424 	nd_bsw4 (DD_STOP, dd_stop);
    425 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
    426 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
    427 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
    428 		nd_bsw4 (DD_STOP - 0x40, dd_start);
    429 
    430 #ifdef DIAGNOSTIC
    431 	if ((nd_bsr4 (DD_START) != dd_start)
    432 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
    433 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
    434 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
    435 		) {
    436 		nextdma_print(nsc);
    437 		panic("DMA failure writing to continue regs");
    438 	}
    439 #endif
    440 }
    441 
    442 /****************************************************************/
    443 
    444 #if NESP > 0
    445 static int
    446 nextdma_esp_intr(void *arg)
    447 {
    448 	/* @@@ This is bogus, we can't be certain of arg's type
    449 	 * unless the interrupt is for us.  For now we successfully
    450 	 * cheat because DMA interrupts are the only things invoked
    451 	 * at this interrupt level.
    452 	 */
    453 	struct nextdma_softc *nsc = arg;
    454 	int esp_dma_int(void *); /* XXX */
    455 
    456 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    457 		return 0;
    458 	/* Handle dma interrupts */
    459 
    460 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
    461 
    462 }
    463 #endif
    464 
    465 #if NXE > 0
    466 static int
    467 nextdma_enet_intr(void *arg)
    468 {
    469 	/* @@@ This is bogus, we can't be certain of arg's type
    470 	 * unless the interrupt is for us.  For now we successfully
    471 	 * cheat because DMA interrupts are the only things invoked
    472 	 * at this interrupt level.
    473 	 */
    474 	struct nextdma_softc *nsc = arg;
    475 	unsigned int state;
    476 	bus_addr_t onext;
    477 	bus_addr_t olimit;
    478 	bus_addr_t slimit;
    479 	int result;
    480 	struct nextdma_status *stat = &nsc->sc_stat;
    481 
    482 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    483 		return 0;
    484 	/* Handle dma interrupts */
    485 
    486 	NDTRACEIF (*ndtracep++ = 'D');
    487 #ifdef ND_DEBUG
    488 	if (NEXTDMA_DEBUG) {
    489 		char sbuf[256];
    490 
    491 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    492 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    493 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    494 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    495 	}
    496 #endif
    497 
    498 #ifdef DIAGNOSTIC
    499 	if (!stat->nd_map) {
    500 		nextdma_print(nsc);
    501 		panic("DMA missing current map in interrupt!");
    502 	}
    503 #endif
    504 
    505 	state = nd_bsr4 (DD_CSR);
    506 
    507 #if defined(ND_DEBUG)
    508 	nextdma_debug_savestate(nsc, state);
    509 #endif
    510 
    511 #ifdef DIAGNOSTIC
    512 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
    513 		char sbuf[256];
    514 		nextdma_print(nsc);
    515 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    516 		printf("DMA: state 0x%s\n",sbuf);
    517 		panic("DMA complete not set in interrupt");
    518 	}
    519 #endif
    520 
    521 	DPRINTF(("DMA: finishing xfer\n"));
    522 
    523 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    524 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
    525 
    526 	result = 0;
    527 	if (state & DMACSR_ENABLE) {
    528 		/* enable bit was set */
    529 		result |= 0x01;
    530 	}
    531 	if (state & DMACSR_SUPDATE) {
    532 		/* supdate bit was set */
    533 		result |= 0x02;
    534 	}
    535 	if (stat->nd_map_cont == NULL) {
    536 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    537 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
    538 		result |= 0x04;
    539 	}
    540 	if (state & DMACSR_BUSEXC) {
    541 		/* bus exception bit was set */
    542 		result |= 0x08;
    543 	}
    544 	switch (result) {
    545 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    546 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    547 		if (turbo) {
    548 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    549 			slimit = *limit;
    550 		} else {
    551 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    552 		}
    553 		break;
    554 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    555 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    556 		if (turbo) {
    557 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    558 			slimit = *limit;
    559 		} else {
    560 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    561 		}
    562 		break;
    563 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    564 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    565 		slimit = nd_bsr4 (DD_NEXT);
    566 		break;
    567 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    568 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    569 		slimit = nd_bsr4 (DD_LIMIT);
    570 		break;
    571 	default:
    572 #ifdef DIAGNOSTIC
    573 	{
    574 		char sbuf[256];
    575 		printf("DMA: please send this output to port-next68k-maintainer (at) NetBSD.org:\n");
    576 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    577 		printf("DMA: state 0x%s\n",sbuf);
    578 		nextdma_print(nsc);
    579 		panic("DMA: condition 0x%02x not yet documented to occur",result);
    580 	}
    581 #endif
    582 	slimit = olimit;
    583 	break;
    584 	}
    585 
    586 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    587 		slimit &= ~0x80000000;
    588 		slimit -= 15;
    589 	}
    590 
    591 #ifdef DIAGNOSTIC
    592 	if ((state & DMACSR_READ))
    593 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
    594 			  (state & DMACSR_READ) ? "read" : "write"));
    595 	if ((slimit < onext) || (slimit > olimit)) {
    596 		char sbuf[256];
    597 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    598 		printf("DMA: state 0x%s\n",sbuf);
    599 		nextdma_print(nsc);
    600 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
    601 	}
    602 #endif
    603 
    604 #ifdef DIAGNOSTIC
    605 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
    606 		if (slimit != olimit) {
    607 			char sbuf[256];
    608 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    609 			printf("DMA: state 0x%s\n",sbuf);
    610 			nextdma_print(nsc);
    611 			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
    612 		}
    613 	}
    614 #endif
    615 
    616 #if (defined(ND_DEBUG))
    617 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    618 #endif
    619 
    620 	stat->nd_map->dm_xfer_len += slimit-onext;
    621 
    622 	/* If we've reached the end of the current map, then inform
    623 	 * that we've completed that map.
    624 	 */
    625 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
    626 		if (nsc->sc_conf.nd_completed_cb)
    627 			(*nsc->sc_conf.nd_completed_cb)
    628 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
    629 	} else {
    630 		KASSERT(stat->nd_map == stat->nd_map_cont);
    631 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
    632 	}
    633 	stat->nd_map = 0;
    634 	stat->nd_idx = 0;
    635 
    636 #if (defined(ND_DEBUG))
    637 	if (NEXTDMA_DEBUG) {
    638 		char sbuf[256];
    639 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    640 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    641 	}
    642 #endif
    643 	if (state & DMACSR_ENABLE) {
    644 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    645 
    646 		nextdma_rotate(nsc);
    647 		nextdma_setup_cont_regs(nsc);
    648 
    649 		if (state & DMACSR_READ) {
    650 			dmadir = DMACSR_SETREAD;
    651 		} else {
    652 			dmadir = DMACSR_SETWRITE;
    653 		}
    654 
    655 		if (stat->nd_map_cont == NULL) {
    656 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    657 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
    658 			NDTRACEIF (*ndtracep++ = 'g');
    659 		} else {
    660 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    661 			NDTRACEIF (*ndtracep++ = 'G');
    662 		}
    663 	} else {
    664 		DPRINTF(("DMA: a shutdown occurred\n"));
    665 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    666 
    667 		/* Cleanup more incomplete transfers */
    668 		/* cleanup continue map */
    669 		if (stat->nd_map_cont) {
    670 			DPRINTF(("DMA: shutting down with non null continue map\n"));
    671 			if (nsc->sc_conf.nd_completed_cb)
    672 				(*nsc->sc_conf.nd_completed_cb)
    673 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    674 
    675 			stat->nd_map_cont = 0;
    676 			stat->nd_idx_cont = 0;
    677 		}
    678 		if (nsc->sc_conf.nd_shutdown_cb)
    679 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    680 	}
    681 
    682 #ifdef ND_DEBUG
    683 	if (NEXTDMA_DEBUG) {
    684 		char sbuf[256];
    685 
    686 		snprintb(sbuf, sizeof(sbuf),
    687 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    688 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    689 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    690 	}
    691 #endif
    692 
    693 	return(1);
    694 }
    695 #endif
    696 
    697 /*
    698  * Check to see if dma has finished for a channel */
    699 int
    700 nextdma_finished(struct nextdma_softc *nsc)
    701 {
    702 	int r;
    703 	int s;
    704 	struct nextdma_status *stat = &nsc->sc_stat;
    705 
    706 	s = spldma();
    707 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
    708 	splx(s);
    709 
    710 	return(r);
    711 }
    712 
    713 void
    714 nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
    715 {
    716 	struct nextdma_status *stat = &nsc->sc_stat;
    717 
    718 	NDTRACEIF (*ndtracep++ = 'n');
    719 #ifdef DIAGNOSTIC
    720 	if (!nextdma_finished(nsc)) {
    721 		char sbuf[256];
    722 
    723 		snprintb(sbuf, sizeof(sbuf),
    724 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    725 		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
    726 	}
    727 #endif
    728 
    729 #ifdef ND_DEBUG
    730 	if (NEXTDMA_DEBUG) {
    731 		char sbuf[256];
    732 
    733 		snprintb(sbuf, sizeof(sbuf),
    734 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    735 		printf("DMA start (%ld) intr(0x%s)\n",
    736 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    737 	}
    738 #endif
    739 
    740 #ifdef DIAGNOSTIC
    741 	if (stat->nd_map) {
    742 		nextdma_print(nsc);
    743 		panic("DMA: nextdma_start() with non null map");
    744 	}
    745 	if (stat->nd_map_cont) {
    746 		nextdma_print(nsc);
    747 		panic("DMA: nextdma_start() with non null continue map");
    748 	}
    749 #endif
    750 
    751 #ifdef DIAGNOSTIC
    752 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    753 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
    754 	}
    755 #endif
    756 
    757 #if defined(ND_DEBUG)
    758 	nextdma_debug_initstate(nsc);
    759 #endif
    760 
    761 	/* preload both the current and the continue maps */
    762 	nextdma_rotate(nsc);
    763 
    764 #ifdef DIAGNOSTIC
    765 	if (!stat->nd_map_cont) {
    766 		panic("No map available in nextdma_start()");
    767 	}
    768 #endif
    769 
    770 	nextdma_rotate(nsc);
    771 
    772 #ifdef ND_DEBUG
    773 	if (NEXTDMA_DEBUG) {
    774 		char sbuf[256];
    775 
    776 		snprintb(sbuf, sizeof(sbuf),
    777 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    778 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    779 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
    780 	}
    781 #endif
    782 
    783 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
    784 		 DMACSR_RESET | dmadir);
    785 	nd_bsw4 (DD_CSR, 0);
    786 
    787 	nextdma_setup_curr_regs(nsc);
    788 	nextdma_setup_cont_regs(nsc);
    789 
    790 #if (defined(ND_DEBUG))
    791 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    792 #endif
    793 
    794 	if (stat->nd_map_cont == NULL) {
    795 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
    796 	} else {
    797 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    798 	}
    799 }
    800 
    801 /* This routine is used for debugging */
    802 void
    803 nextdma_print(struct nextdma_softc *nsc)
    804 {
    805 	u_long dd_csr;
    806 	u_long dd_next;
    807 	u_long dd_next_initbuf;
    808 	u_long dd_limit;
    809 	u_long dd_start;
    810 	u_long dd_stop;
    811 	u_long dd_saved_next;
    812 	u_long dd_saved_limit;
    813 	u_long dd_saved_start;
    814 	u_long dd_saved_stop;
    815 	char sbuf[256];
    816 	struct nextdma_status *stat = &nsc->sc_stat;
    817 
    818 	/* Read all of the registers before we print anything out,
    819 	 * in case something changes
    820 	 */
    821 	dd_csr          = nd_bsr4 (DD_CSR);
    822 	dd_next         = nd_bsr4 (DD_NEXT);
    823 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
    824 	dd_limit        = nd_bsr4 (DD_LIMIT);
    825 	dd_start        = nd_bsr4 (DD_START);
    826 	dd_stop         = nd_bsr4 (DD_STOP);
    827 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
    828 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
    829 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
    830 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
    831 
    832 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    833 	    *(volatile u_long *)IIOV(NEXT_P_INTRSTAT));
    834 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    835 
    836 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    837 	    *(volatile u_long *)IIOV(NEXT_P_INTRMASK));
    838 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    839 
    840 	/* NDMAP is Next DMA Print (really!) */
    841 
    842 	if (stat->nd_map) {
    843 		int i;
    844 
    845 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
    846 		       stat->nd_map->dm_mapsize);
    847 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
    848 		       stat->nd_map->dm_nsegs);
    849 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
    850 		       stat->nd_map->dm_xfer_len);
    851 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    852 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
    853 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
    854 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    855 
    856 		printf("NDMAP: Entire map;\n");
    857 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
    858 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    859 			       i,stat->nd_map->dm_segs[i].ds_addr);
    860 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
    861 			       i,stat->nd_map->dm_segs[i].ds_len);
    862 		}
    863 	} else {
    864 		printf("NDMAP: nd_map = NULL\n");
    865 	}
    866 	if (stat->nd_map_cont) {
    867 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
    868 		       stat->nd_map_cont->dm_mapsize);
    869 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
    870 		       stat->nd_map_cont->dm_nsegs);
    871 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
    872 		       stat->nd_map_cont->dm_xfer_len);
    873 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    874 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
    875 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    876 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    877 		if (stat->nd_map_cont != stat->nd_map) {
    878 			int i;
    879 			printf("NDMAP: Entire map;\n");
    880 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
    881 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    882 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
    883 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    884 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
    885 			}
    886 		}
    887 	} else {
    888 		printf("NDMAP: nd_map_cont = NULL\n");
    889 	}
    890 
    891 	snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, dd_csr);
    892 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    893 
    894 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
    895 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
    896 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
    897 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
    898 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
    899 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
    900 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
    901 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
    902 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
    903 
    904 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    905 	    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    906 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    907 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    908 }
    909 
    910 #if defined(ND_DEBUG)
    911 void
    912 nextdma_debug_initstate(struct nextdma_softc *nsc)
    913 {
    914 	switch(nsc->sc_chan->nd_intr) {
    915 	case NEXT_I_ENETR_DMA:
    916 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
    917 		break;
    918 	case NEXT_I_SCSI_DMA:
    919 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
    920 		break;
    921 	}
    922 }
    923 
    924 void
    925 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
    926 {
    927 	switch(nsc->sc_chan->nd_intr) {
    928 	case NEXT_I_ENETR_DMA:
    929 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
    930 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    931 		break;
    932 	case NEXT_I_SCSI_DMA:
    933 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
    934 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    935 		break;
    936 	}
    937 }
    938 
    939 void
    940 nextdma_debug_enetr_dumpstate(void)
    941 {
    942 	int i;
    943 	int s;
    944 	s = spldma();
    945 	i = nextdma_debug_enetr_idx;
    946 	do {
    947 		char sbuf[256];
    948 		if (nextdma_debug_enetr_state[i]) {
    949 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_enetr_state[i]);
    950 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    951 		}
    952 		i++;
    953 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    954 	} while (i != nextdma_debug_enetr_idx);
    955 	splx(s);
    956 }
    957 
    958 void
    959 nextdma_debug_scsi_dumpstate(void)
    960 {
    961 	int i;
    962 	int s;
    963 	s = spldma();
    964 	i = nextdma_debug_scsi_idx;
    965 	do {
    966 		char sbuf[256];
    967 		if (nextdma_debug_scsi_state[i]) {
    968 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_scsi_state[i]);
    969 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    970 		}
    971 		i++;
    972 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    973 	} while (i != nextdma_debug_scsi_idx);
    974 	splx(s);
    975 }
    976 #endif
    977 
    978