Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.38
      1 /*	$NetBSD: nextdma.c,v 1.38 2005/06/05 11:35:09 he Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *      This product includes software developed by Darrin B. Jewell
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.38 2005/06/05 11:35:09 he Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/mbuf.h>
     38 #include <sys/syslog.h>
     39 #include <sys/socket.h>
     40 #include <sys/device.h>
     41 #include <sys/malloc.h>
     42 #include <sys/ioctl.h>
     43 #include <sys/errno.h>
     44 
     45 #define _M68K_BUS_DMA_PRIVATE
     46 #include <machine/autoconf.h>
     47 #include <machine/cpu.h>
     48 #include <machine/intr.h>
     49 
     50 #include <m68k/cacheops.h>
     51 
     52 #include <next68k/next68k/isr.h>
     53 #include <next68k/next68k/nextrom.h>
     54 
     55 #include <next68k/dev/intiovar.h>
     56 
     57 #include "nextdmareg.h"
     58 #include "nextdmavar.h"
     59 
     60 #include "esp.h"
     61 #include "xe.h"
     62 
     63 #if DEBUG
     64 #define ND_DEBUG
     65 #endif
     66 
     67 extern int turbo;
     68 
     69 #define panic		__asm __volatile("trap  #15"); printf
     70 
     71 #define NEXTDMA_DEBUG nextdma_debug
     72 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
     73 #if defined(ND_DEBUG)
     74 int nextdma_debug = 0;
     75 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
     76 int ndtraceshow = 0;
     77 char ndtrace[8192+100];
     78 char *ndtracep = ndtrace;
     79 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
     80 #else
     81 #define DPRINTF(x)
     82 #define NDTRACEIF(x)
     83 #endif
     84 #define PRINTF(x) printf x
     85 
     86 #if defined(ND_DEBUG)
     87 int nextdma_debug_enetr_idx = 0;
     88 unsigned int nextdma_debug_enetr_state[100] = { 0 };
     89 int nextdma_debug_scsi_idx = 0;
     90 unsigned int nextdma_debug_scsi_state[100] = { 0 };
     91 
     92 void nextdma_debug_initstate(struct nextdma_softc *);
     93 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
     94 void nextdma_debug_scsi_dumpstate(void);
     95 void nextdma_debug_enetr_dumpstate(void);
     96 #endif
     97 
     98 
     99 int	nextdma_match(struct device *, struct cfdata *, void *);
    100 void	nextdma_attach(struct device *, struct device *, void *);
    101 
    102 void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
    103 int nextdma_continue(struct nextdma_softc *);
    104 void nextdma_rotate(struct nextdma_softc *);
    105 
    106 void nextdma_setup_cont_regs(struct nextdma_softc *);
    107 void nextdma_setup_curr_regs(struct nextdma_softc *);
    108 
    109 #if NESP > 0
    110 static int nextdma_esp_intr(void *);
    111 #endif
    112 #if NXE > 0
    113 static int nextdma_enet_intr(void *);
    114 #endif
    115 
    116 #define nd_bsr4(reg) \
    117 	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
    118 #define nd_bsw4(reg,val) \
    119 	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
    120 
    121 CFATTACH_DECL(nextdma, sizeof(struct nextdma_softc),
    122     nextdma_match, nextdma_attach, NULL, NULL);
    123 
    124 static struct nextdma_channel nextdma_channel[] = {
    125 #if NESP > 0
    126 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
    127 #endif
    128 #if NXE > 0
    129 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
    130 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
    131 #endif
    132 };
    133 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
    134 
    135 static int attached = 0;
    136 
    137 struct nextdma_softc *
    138 nextdma_findchannel(const char *name)
    139 {
    140 	struct device *dev = TAILQ_FIRST(&alldevs);
    141 
    142 	while (dev != NULL) {
    143 		if (!strncmp(dev->dv_xname, "nextdma", 7)) {
    144 			struct nextdma_softc *nsc = (struct nextdma_softc *)dev;
    145 			if (!strcmp (nsc->sc_chan->nd_name, name))
    146 				return (nsc);
    147 		}
    148 		dev = TAILQ_NEXT(dev, dv_list);
    149 	}
    150 	return (NULL);
    151 }
    152 
    153 int
    154 nextdma_match(struct device *parent, struct cfdata *match, void *aux)
    155 {
    156 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    157 
    158 	if (attached >= nnextdma_channels)
    159 		return (0);
    160 
    161 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
    162 
    163 	return (1);
    164 }
    165 
    166 void
    167 nextdma_attach(struct device *parent, struct device *self, void *aux)
    168 {
    169 	struct nextdma_softc *nsc = (struct nextdma_softc *)self;
    170 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    171 
    172 	if (attached >= nnextdma_channels)
    173 		return;
    174 
    175 	nsc->sc_chan = &nextdma_channel[attached];
    176 
    177 	nsc->sc_dmat = ia->ia_dmat;
    178 	nsc->sc_bst = ia->ia_bst;
    179 
    180 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
    181 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
    182 		panic("%s: can't map DMA registers for channel %s",
    183 		      nsc->sc_dev.dv_xname, nsc->sc_chan->nd_name);
    184 	}
    185 
    186 	nextdma_init (nsc);
    187 
    188 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
    189 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
    190 	INTR_ENABLE(nsc->sc_chan->nd_intr);
    191 
    192 	printf (": channel %d (%s)\n", attached,
    193 		nsc->sc_chan->nd_name);
    194 	attached++;
    195 
    196 	return;
    197 }
    198 
    199 void
    200 nextdma_init(struct nextdma_softc *nsc)
    201 {
    202 #ifdef ND_DEBUG
    203 	if (NEXTDMA_DEBUG) {
    204 		char sbuf[256];
    205 
    206 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    207 				 sbuf, sizeof(sbuf));
    208 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    209 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    210 	}
    211 #endif
    212 
    213 	nsc->sc_stat.nd_map = NULL;
    214 	nsc->sc_stat.nd_idx = 0;
    215 	nsc->sc_stat.nd_map_cont = NULL;
    216 	nsc->sc_stat.nd_idx_cont = 0;
    217 	nsc->sc_stat.nd_exception = 0;
    218 
    219 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
    220 	nd_bsw4 (DD_CSR, 0);
    221 
    222 #if 01
    223 	nextdma_setup_curr_regs(nsc);
    224 	nextdma_setup_cont_regs(nsc);
    225 #endif
    226 
    227 #if defined(DIAGNOSTIC)
    228 	{
    229 		u_long state;
    230 		state = nd_bsr4 (DD_CSR);
    231 
    232 #if 1
    233 		/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
    234 		 * milo (a 25Mhz 68040 mono cube) didn't have this problem
    235 		 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    236 		 */
    237 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    238 #else
    239 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    240 			  DMACSR_SUPDATE | DMACSR_ENABLE);
    241 #endif
    242 		if (state) {
    243 			nextdma_print(nsc);
    244 			panic("DMA did not reset");
    245 		}
    246 	}
    247 #endif
    248 }
    249 
    250 void
    251 nextdma_reset(struct nextdma_softc *nsc)
    252 {
    253 	int s;
    254 	struct nextdma_status *stat = &nsc->sc_stat;
    255 
    256 	s = spldma();
    257 
    258 	DPRINTF(("DMA reset\n"));
    259 
    260 #if (defined(ND_DEBUG))
    261 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
    262 #endif
    263 
    264 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    265 	if ((stat->nd_map) || (stat->nd_map_cont)) {
    266 		if (stat->nd_map_cont) {
    267 			DPRINTF(("DMA: resetting with non null continue map\n"));
    268 			if (nsc->sc_conf.nd_completed_cb)
    269 				(*nsc->sc_conf.nd_completed_cb)
    270 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    271 
    272 			stat->nd_map_cont = 0;
    273 			stat->nd_idx_cont = 0;
    274 		}
    275 		if (nsc->sc_conf.nd_shutdown_cb)
    276 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    277 		stat->nd_map = 0;
    278 		stat->nd_idx = 0;
    279 	}
    280 
    281 	splx(s);
    282 }
    283 
    284 /****************************************************************/
    285 
    286 
    287 /* Call the completed and continue callbacks to try to fill
    288  * in the dma continue buffers.
    289  */
    290 void
    291 nextdma_rotate(struct nextdma_softc *nsc)
    292 {
    293 	struct nextdma_status *stat = &nsc->sc_stat;
    294 
    295 	NDTRACEIF (*ndtracep++ = 'r');
    296 	DPRINTF(("DMA nextdma_rotate()\n"));
    297 
    298 	/* Rotate the continue map into the current map */
    299 	stat->nd_map = stat->nd_map_cont;
    300 	stat->nd_idx = stat->nd_idx_cont;
    301 
    302 	if ((!stat->nd_map_cont) ||
    303 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
    304 		if (nsc->sc_conf.nd_continue_cb) {
    305 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
    306 				(nsc->sc_conf.nd_cb_arg);
    307 			if (stat->nd_map_cont) {
    308 				stat->nd_map_cont->dm_xfer_len = 0;
    309 			}
    310 		} else {
    311 			stat->nd_map_cont = 0;
    312 		}
    313 		stat->nd_idx_cont = 0;
    314 	}
    315 
    316 #if defined(DIAGNOSTIC) && 0
    317 	if (stat->nd_map_cont) {
    318 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
    319 			nextdma_print(nsc);
    320 			panic("DMA request unaligned at start");
    321 		}
    322 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    323 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
    324 			nextdma_print(nsc);
    325 			panic("DMA request unaligned at end");
    326 		}
    327 	}
    328 #endif
    329 
    330 }
    331 
    332 void
    333 nextdma_setup_curr_regs(struct nextdma_softc *nsc)
    334 {
    335 	bus_addr_t dd_next;
    336 	bus_addr_t dd_limit;
    337 	bus_addr_t dd_saved_next;
    338 	bus_addr_t dd_saved_limit;
    339 	struct nextdma_status *stat = &nsc->sc_stat;
    340 
    341 	NDTRACEIF (*ndtracep++ = 'C');
    342 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
    343 
    344 	if (stat->nd_map) {
    345 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    346 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
    347 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    348 
    349 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    350 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    351 			dd_limit += 15;
    352 		}
    353 	} else {
    354 		dd_next = turbo ? 0 : 0xdeadbeef;
    355 		dd_limit = turbo ? 0 : 0xdeadbeef;
    356 	}
    357 
    358 	dd_saved_next = dd_next;
    359 	dd_saved_limit = dd_limit;
    360 
    361 	NDTRACEIF (if (stat->nd_map) {
    362 		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    363 		ndtracep += strlen (ndtracep);
    364 	});
    365 
    366 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
    367 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
    368 	} else {
    369 		nd_bsw4 (DD_NEXT, dd_next);
    370 	}
    371 	nd_bsw4 (DD_LIMIT, dd_limit);
    372 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
    373 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
    374 
    375 #ifdef DIAGNOSTIC
    376 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
    377 	    || (nd_bsr4 (DD_NEXT) != dd_next)
    378 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
    379 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
    380 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
    381 		) {
    382 		nextdma_print(nsc);
    383 		panic("DMA failure writing to current regs");
    384 	}
    385 #endif
    386 }
    387 
    388 void
    389 nextdma_setup_cont_regs(struct nextdma_softc *nsc)
    390 {
    391 	bus_addr_t dd_start;
    392 	bus_addr_t dd_stop;
    393 	bus_addr_t dd_saved_start;
    394 	bus_addr_t dd_saved_stop;
    395 	struct nextdma_status *stat = &nsc->sc_stat;
    396 
    397 	NDTRACEIF (*ndtracep++ = 'c');
    398 	DPRINTF(("DMA nextdma_setup_regs()\n"));
    399 
    400 	if (stat->nd_map_cont) {
    401 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
    402 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    403 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    404 
    405 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    406 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
    407 			dd_stop += 15;
    408 		}
    409 	} else {
    410 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
    411 		dd_stop = turbo ? 0 : 0xdeadbee0;
    412 	}
    413 
    414 	dd_saved_start = dd_start;
    415 	dd_saved_stop  = dd_stop;
    416 
    417 	NDTRACEIF (if (stat->nd_map_cont) {
    418 		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    419 		ndtracep += strlen (ndtracep);
    420 	});
    421 
    422 	nd_bsw4 (DD_START, dd_start);
    423 	nd_bsw4 (DD_STOP, dd_stop);
    424 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
    425 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
    426 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
    427 		nd_bsw4 (DD_STOP - 0x40, dd_start);
    428 
    429 #ifdef DIAGNOSTIC
    430 	if ((nd_bsr4 (DD_START) != dd_start)
    431 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
    432 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
    433 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
    434 		) {
    435 		nextdma_print(nsc);
    436 		panic("DMA failure writing to continue regs");
    437 	}
    438 #endif
    439 }
    440 
    441 /****************************************************************/
    442 
    443 #if NESP > 0
    444 static int
    445 nextdma_esp_intr(void *arg)
    446 {
    447 	/* @@@ This is bogus, we can't be certain of arg's type
    448 	 * unless the interrupt is for us.  For now we successfully
    449 	 * cheat because DMA interrupts are the only things invoked
    450 	 * at this interrupt level.
    451 	 */
    452 	struct nextdma_softc *nsc = arg;
    453 	int esp_dma_int(void *); /* XXX */
    454 
    455 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    456 		return 0;
    457 	/* Handle dma interrupts */
    458 
    459 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
    460 
    461 }
    462 #endif
    463 
    464 #if NXE > 0
    465 static int
    466 nextdma_enet_intr(void *arg)
    467 {
    468 	/* @@@ This is bogus, we can't be certain of arg's type
    469 	 * unless the interrupt is for us.  For now we successfully
    470 	 * cheat because DMA interrupts are the only things invoked
    471 	 * at this interrupt level.
    472 	 */
    473 	struct nextdma_softc *nsc = arg;
    474 	unsigned int state;
    475 	bus_addr_t onext;
    476 	bus_addr_t olimit;
    477 	bus_addr_t slimit;
    478 	int result;
    479 	struct nextdma_status *stat = &nsc->sc_stat;
    480 
    481 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    482 		return 0;
    483 	/* Handle dma interrupts */
    484 
    485 	NDTRACEIF (*ndtracep++ = 'D');
    486 #ifdef ND_DEBUG
    487 	if (NEXTDMA_DEBUG) {
    488 		char sbuf[256];
    489 
    490 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    491 				 sbuf, sizeof(sbuf));
    492 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    493 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    494 	}
    495 #endif
    496 
    497 #ifdef DIAGNOSTIC
    498 	if (!stat->nd_map) {
    499 		nextdma_print(nsc);
    500 		panic("DMA missing current map in interrupt!");
    501 	}
    502 #endif
    503 
    504 	state = nd_bsr4 (DD_CSR);
    505 
    506 #if defined(ND_DEBUG)
    507 	nextdma_debug_savestate(nsc, state);
    508 #endif
    509 
    510 #ifdef DIAGNOSTIC
    511 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
    512 		char sbuf[256];
    513 		nextdma_print(nsc);
    514 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    515 		printf("DMA: state 0x%s\n",sbuf);
    516 		panic("DMA complete not set in interrupt");
    517 	}
    518 #endif
    519 
    520 	DPRINTF(("DMA: finishing xfer\n"));
    521 
    522 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    523 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
    524 
    525 	result = 0;
    526 	if (state & DMACSR_ENABLE) {
    527 		/* enable bit was set */
    528 		result |= 0x01;
    529 	}
    530 	if (state & DMACSR_SUPDATE) {
    531 		/* supdate bit was set */
    532 		result |= 0x02;
    533 	}
    534 	if (stat->nd_map_cont == NULL) {
    535 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    536 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
    537 		result |= 0x04;
    538 	}
    539 	if (state & DMACSR_BUSEXC) {
    540 		/* bus exception bit was set */
    541 		result |= 0x08;
    542 	}
    543 	switch (result) {
    544 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    545 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    546 		if (turbo) {
    547 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    548 			slimit = *limit;
    549 		} else {
    550 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    551 		}
    552 		break;
    553 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    554 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    555 		if (turbo) {
    556 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    557 			slimit = *limit;
    558 		} else {
    559 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    560 		}
    561 		break;
    562 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    563 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    564 		slimit = nd_bsr4 (DD_NEXT);
    565 		break;
    566 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    567 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    568 		slimit = nd_bsr4 (DD_LIMIT);
    569 		break;
    570 	default:
    571 #ifdef DIAGNOSTIC
    572 	{
    573 		char sbuf[256];
    574 		printf("DMA: please send this output to port-next68k-maintainer (at) NetBSD.org:\n");
    575 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    576 		printf("DMA: state 0x%s\n",sbuf);
    577 		nextdma_print(nsc);
    578 		panic("DMA: condition 0x%02x not yet documented to occur",result);
    579 	}
    580 #endif
    581 	slimit = olimit;
    582 	break;
    583 	}
    584 
    585 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    586 		slimit &= ~0x80000000;
    587 		slimit -= 15;
    588 	}
    589 
    590 #ifdef DIAGNOSTIC
    591 	if ((state & DMACSR_READ))
    592 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
    593 			  (state & DMACSR_READ) ? "read" : "write"));
    594 	if ((slimit < onext) || (slimit > olimit)) {
    595 		char sbuf[256];
    596 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    597 		printf("DMA: state 0x%s\n",sbuf);
    598 		nextdma_print(nsc);
    599 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
    600 	}
    601 #endif
    602 
    603 #ifdef DIAGNOSTIC
    604 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
    605 		if (slimit != olimit) {
    606 			char sbuf[256];
    607 			bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    608 			printf("DMA: state 0x%s\n",sbuf);
    609 			nextdma_print(nsc);
    610 			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
    611 		}
    612 	}
    613 #endif
    614 
    615 #if (defined(ND_DEBUG))
    616 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    617 #endif
    618 
    619 	stat->nd_map->dm_xfer_len += slimit-onext;
    620 
    621 	/* If we've reached the end of the current map, then inform
    622 	 * that we've completed that map.
    623 	 */
    624 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
    625 		if (nsc->sc_conf.nd_completed_cb)
    626 			(*nsc->sc_conf.nd_completed_cb)
    627 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
    628 	} else {
    629 		KASSERT(stat->nd_map == stat->nd_map_cont);
    630 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
    631 	}
    632 	stat->nd_map = 0;
    633 	stat->nd_idx = 0;
    634 
    635 #if (defined(ND_DEBUG))
    636 	if (NEXTDMA_DEBUG) {
    637 		char sbuf[256];
    638 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    639 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    640 	}
    641 #endif
    642 	if (state & DMACSR_ENABLE) {
    643 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    644 
    645 		nextdma_rotate(nsc);
    646 		nextdma_setup_cont_regs(nsc);
    647 
    648 		if (state & DMACSR_READ) {
    649 			dmadir = DMACSR_SETREAD;
    650 		} else {
    651 			dmadir = DMACSR_SETWRITE;
    652 		}
    653 
    654 		if (stat->nd_map_cont == NULL) {
    655 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    656 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
    657 			NDTRACEIF (*ndtracep++ = 'g');
    658 		} else {
    659 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    660 			NDTRACEIF (*ndtracep++ = 'G');
    661 		}
    662 	} else {
    663 		DPRINTF(("DMA: a shutdown occurred\n"));
    664 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    665 
    666 		/* Cleanup more incomplete transfers */
    667 		/* cleanup continue map */
    668 		if (stat->nd_map_cont) {
    669 			DPRINTF(("DMA: shutting down with non null continue map\n"));
    670 			if (nsc->sc_conf.nd_completed_cb)
    671 				(*nsc->sc_conf.nd_completed_cb)
    672 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    673 
    674 			stat->nd_map_cont = 0;
    675 			stat->nd_idx_cont = 0;
    676 		}
    677 		if (nsc->sc_conf.nd_shutdown_cb)
    678 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    679 	}
    680 
    681 #ifdef ND_DEBUG
    682 	if (NEXTDMA_DEBUG) {
    683 		char sbuf[256];
    684 
    685 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    686 				 sbuf, sizeof(sbuf));
    687 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    688 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    689 	}
    690 #endif
    691 
    692 	return(1);
    693 }
    694 #endif
    695 
    696 /*
    697  * Check to see if dma has finished for a channel */
    698 int
    699 nextdma_finished(struct nextdma_softc *nsc)
    700 {
    701 	int r;
    702 	int s;
    703 	struct nextdma_status *stat = &nsc->sc_stat;
    704 
    705 	s = spldma();
    706 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
    707 	splx(s);
    708 
    709 	return(r);
    710 }
    711 
    712 void
    713 nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
    714 {
    715 	struct nextdma_status *stat = &nsc->sc_stat;
    716 
    717 	NDTRACEIF (*ndtracep++ = 'n');
    718 #ifdef DIAGNOSTIC
    719 	if (!nextdma_finished(nsc)) {
    720 		char sbuf[256];
    721 
    722 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    723 				 sbuf, sizeof(sbuf));
    724 		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
    725 	}
    726 #endif
    727 
    728 #ifdef ND_DEBUG
    729 	if (NEXTDMA_DEBUG) {
    730 		char sbuf[256];
    731 
    732 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    733 				 sbuf, sizeof(sbuf));
    734 		printf("DMA start (%ld) intr(0x%s)\n",
    735 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    736 	}
    737 #endif
    738 
    739 #ifdef DIAGNOSTIC
    740 	if (stat->nd_map) {
    741 		nextdma_print(nsc);
    742 		panic("DMA: nextdma_start() with non null map");
    743 	}
    744 	if (stat->nd_map_cont) {
    745 		nextdma_print(nsc);
    746 		panic("DMA: nextdma_start() with non null continue map");
    747 	}
    748 #endif
    749 
    750 #ifdef DIAGNOSTIC
    751 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    752 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
    753 	}
    754 #endif
    755 
    756 #if defined(ND_DEBUG)
    757 	nextdma_debug_initstate(nsc);
    758 #endif
    759 
    760 	/* preload both the current and the continue maps */
    761 	nextdma_rotate(nsc);
    762 
    763 #ifdef DIAGNOSTIC
    764 	if (!stat->nd_map_cont) {
    765 		panic("No map available in nextdma_start()");
    766 	}
    767 #endif
    768 
    769 	nextdma_rotate(nsc);
    770 
    771 #ifdef ND_DEBUG
    772 	if (NEXTDMA_DEBUG) {
    773 		char sbuf[256];
    774 
    775 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    776 				 sbuf, sizeof(sbuf));
    777 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    778 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
    779 	}
    780 #endif
    781 
    782 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
    783 		 DMACSR_RESET | dmadir);
    784 	nd_bsw4 (DD_CSR, 0);
    785 
    786 	nextdma_setup_curr_regs(nsc);
    787 	nextdma_setup_cont_regs(nsc);
    788 
    789 #if (defined(ND_DEBUG))
    790 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    791 #endif
    792 
    793 	if (stat->nd_map_cont == NULL) {
    794 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
    795 	} else {
    796 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    797 	}
    798 }
    799 
    800 /* This routine is used for debugging */
    801 void
    802 nextdma_print(struct nextdma_softc *nsc)
    803 {
    804 	u_long dd_csr;
    805 	u_long dd_next;
    806 	u_long dd_next_initbuf;
    807 	u_long dd_limit;
    808 	u_long dd_start;
    809 	u_long dd_stop;
    810 	u_long dd_saved_next;
    811 	u_long dd_saved_limit;
    812 	u_long dd_saved_start;
    813 	u_long dd_saved_stop;
    814 	char sbuf[256];
    815 	struct nextdma_status *stat = &nsc->sc_stat;
    816 
    817 	/* Read all of the registers before we print anything out,
    818 	 * in case something changes
    819 	 */
    820 	dd_csr          = nd_bsr4 (DD_CSR);
    821 	dd_next         = nd_bsr4 (DD_NEXT);
    822 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
    823 	dd_limit        = nd_bsr4 (DD_LIMIT);
    824 	dd_start        = nd_bsr4 (DD_START);
    825 	dd_stop         = nd_bsr4 (DD_STOP);
    826 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
    827 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
    828 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
    829 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
    830 
    831 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
    832 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    833 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    834 
    835 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
    836 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    837 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    838 
    839 	/* NDMAP is Next DMA Print (really!) */
    840 
    841 	if (stat->nd_map) {
    842 		int i;
    843 
    844 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
    845 		       stat->nd_map->dm_mapsize);
    846 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
    847 		       stat->nd_map->dm_nsegs);
    848 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
    849 		       stat->nd_map->dm_xfer_len);
    850 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    851 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
    852 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
    853 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    854 
    855 		printf("NDMAP: Entire map;\n");
    856 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
    857 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    858 			       i,stat->nd_map->dm_segs[i].ds_addr);
    859 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
    860 			       i,stat->nd_map->dm_segs[i].ds_len);
    861 		}
    862 	} else {
    863 		printf("NDMAP: nd_map = NULL\n");
    864 	}
    865 	if (stat->nd_map_cont) {
    866 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
    867 		       stat->nd_map_cont->dm_mapsize);
    868 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
    869 		       stat->nd_map_cont->dm_nsegs);
    870 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
    871 		       stat->nd_map_cont->dm_xfer_len);
    872 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    873 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
    874 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    875 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    876 		if (stat->nd_map_cont != stat->nd_map) {
    877 			int i;
    878 			printf("NDMAP: Entire map;\n");
    879 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
    880 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    881 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
    882 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    883 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
    884 			}
    885 		}
    886 	} else {
    887 		printf("NDMAP: nd_map_cont = NULL\n");
    888 	}
    889 
    890 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
    891 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    892 
    893 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
    894 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
    895 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
    896 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
    897 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
    898 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
    899 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
    900 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
    901 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
    902 
    903 	bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    904 			 sbuf, sizeof(sbuf));
    905 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    906 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    907 }
    908 
    909 #if defined(ND_DEBUG)
    910 void
    911 nextdma_debug_initstate(struct nextdma_softc *nsc)
    912 {
    913 	switch(nsc->sc_chan->nd_intr) {
    914 	case NEXT_I_ENETR_DMA:
    915 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
    916 		break;
    917 	case NEXT_I_SCSI_DMA:
    918 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
    919 		break;
    920 	}
    921 }
    922 
    923 void
    924 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
    925 {
    926 	switch(nsc->sc_chan->nd_intr) {
    927 	case NEXT_I_ENETR_DMA:
    928 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
    929 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    930 		break;
    931 	case NEXT_I_SCSI_DMA:
    932 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
    933 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    934 		break;
    935 	}
    936 }
    937 
    938 void
    939 nextdma_debug_enetr_dumpstate(void)
    940 {
    941 	int i;
    942 	int s;
    943 	s = spldma();
    944 	i = nextdma_debug_enetr_idx;
    945 	do {
    946 		char sbuf[256];
    947 		if (nextdma_debug_enetr_state[i]) {
    948 			bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
    949 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    950 		}
    951 		i++;
    952 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    953 	} while (i != nextdma_debug_enetr_idx);
    954 	splx(s);
    955 }
    956 
    957 void
    958 nextdma_debug_scsi_dumpstate(void)
    959 {
    960 	int i;
    961 	int s;
    962 	s = spldma();
    963 	i = nextdma_debug_scsi_idx;
    964 	do {
    965 		char sbuf[256];
    966 		if (nextdma_debug_scsi_state[i]) {
    967 			bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
    968 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    969 		}
    970 		i++;
    971 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    972 	} while (i != nextdma_debug_scsi_idx);
    973 	splx(s);
    974 }
    975 #endif
    976 
    977