Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.29.8.3
      1 /*	$NetBSD: nextdma.c,v 1.29.8.3 2002/09/17 21:16:29 nathanw Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *      This product includes software developed by Darrin B. Jewell
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/mbuf.h>
     35 #include <sys/syslog.h>
     36 #include <sys/socket.h>
     37 #include <sys/device.h>
     38 #include <sys/malloc.h>
     39 #include <sys/ioctl.h>
     40 #include <sys/errno.h>
     41 
     42 #define _M68K_BUS_DMA_PRIVATE
     43 #include <machine/autoconf.h>
     44 #include <machine/cpu.h>
     45 #include <machine/intr.h>
     46 
     47 #include <m68k/cacheops.h>
     48 
     49 #include <next68k/next68k/isr.h>
     50 #include <next68k/next68k/nextrom.h>
     51 
     52 #include <next68k/dev/intiovar.h>
     53 
     54 #include "nextdmareg.h"
     55 #include "nextdmavar.h"
     56 
     57 #include "esp.h"
     58 #include "xe.h"
     59 
     60 #if DEBUG
     61 #define ND_DEBUG
     62 #endif
     63 
     64 extern int turbo;
     65 
     66 #define panic		__asm __volatile("trap  #15"); printf
     67 
     68 #define NEXTDMA_DEBUG nextdma_debug
     69 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
     70 #if defined(ND_DEBUG)
     71 int nextdma_debug = 0;
     72 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
     73 int ndtraceshow = 0;
     74 char ndtrace[8192+100];
     75 char *ndtracep = ndtrace;
     76 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
     77 #else
     78 #define DPRINTF(x)
     79 #define NDTRACEIF(x)
     80 #endif
     81 #define PRINTF(x) printf x
     82 
     83 #if defined(ND_DEBUG)
     84 int nextdma_debug_enetr_idx = 0;
     85 unsigned int nextdma_debug_enetr_state[100] = { 0 };
     86 int nextdma_debug_scsi_idx = 0;
     87 unsigned int nextdma_debug_scsi_state[100] = { 0 };
     88 
     89 void nextdma_debug_initstate(struct nextdma_softc *);
     90 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
     91 void nextdma_debug_scsi_dumpstate(void);
     92 void nextdma_debug_enetr_dumpstate(void);
     93 #endif
     94 
     95 
     96 int	nextdma_match		__P((struct device *, struct cfdata *, void *));
     97 void	nextdma_attach		__P((struct device *, struct device *, void *));
     98 
     99 void nextdmamap_sync		__P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
    100 				     bus_size_t, int));
    101 int nextdma_continue		__P((struct nextdma_softc *));
    102 void nextdma_rotate		__P((struct nextdma_softc *));
    103 
    104 void nextdma_setup_cont_regs	__P((struct nextdma_softc *));
    105 void nextdma_setup_curr_regs	__P((struct nextdma_softc *));
    106 
    107 #if NESP > 0
    108 static int nextdma_esp_intr	__P((void *));
    109 #endif
    110 #if NXE > 0
    111 static int nextdma_enet_intr	__P((void *));
    112 #endif
    113 
    114 #define nd_bsr4(reg) bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
    115 #define nd_bsw4(reg,val) bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
    116 
    117 struct cfattach nextdma_ca = {
    118 	sizeof(struct nextdma_softc), nextdma_match, nextdma_attach
    119 };
    120 
    121 static struct nextdma_channel nextdma_channel[] = {
    122 #if NESP > 0
    123 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
    124 #endif
    125 #if NXE > 0
    126 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
    127 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
    128 #endif
    129 };
    130 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
    131 
    132 static int attached = 0;
    133 
    134 struct nextdma_softc *
    135 nextdma_findchannel(name)
    136 	char *name;
    137 {
    138 	struct device *dev = alldevs.tqh_first;
    139 
    140 	while (dev != NULL) {
    141 		if (!strncmp(dev->dv_xname, "nextdma", 7)) {
    142 			struct nextdma_softc *nsc = (struct nextdma_softc *)dev;
    143 			if (!strcmp (nsc->sc_chan->nd_name, name))
    144 				return (nsc);
    145 		}
    146 		dev = dev->dv_list.tqe_next;
    147 	}
    148 	return (NULL);
    149 }
    150 
    151 int
    152 nextdma_match(parent, match, aux)
    153 	struct device *parent;
    154 	struct cfdata *match;
    155 	void *aux;
    156 {
    157 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    158 
    159 	if (attached >= nnextdma_channels)
    160 		return (0);
    161 
    162 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
    163 
    164 	return (1);
    165 }
    166 
    167 void
    168 nextdma_attach(parent, self, aux)
    169 	struct device *parent, *self;
    170 	void *aux;
    171 {
    172 	struct nextdma_softc *nsc = (struct nextdma_softc *)self;
    173 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    174 
    175 	if (attached >= nnextdma_channels)
    176 		return;
    177 
    178 	nsc->sc_chan = &nextdma_channel[attached];
    179 
    180 	nsc->sc_dmat = ia->ia_dmat;
    181 	nsc->sc_bst = ia->ia_bst;
    182 
    183 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
    184 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
    185 		panic("%s: can't map DMA registers for channel %s\n",
    186 		      nsc->sc_dev.dv_xname, nsc->sc_chan->nd_name);
    187 	}
    188 
    189 	nextdma_init (nsc);
    190 
    191 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
    192 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
    193 	INTR_ENABLE(nsc->sc_chan->nd_intr);
    194 
    195 	printf (": channel %d (%s)\n", attached,
    196 		nsc->sc_chan->nd_name);
    197 	attached++;
    198 
    199 	return;
    200 }
    201 
    202 void
    203 nextdma_init(nsc)
    204 	struct nextdma_softc *nsc;
    205 {
    206 #ifdef ND_DEBUG
    207 	if (NEXTDMA_DEBUG) {
    208 		char sbuf[256];
    209 
    210 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    211 				 sbuf, sizeof(sbuf));
    212 		printf("DMA init ipl (%ld) intr(0x%s)\n",
    213 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    214 	}
    215 #endif
    216 
    217 	nsc->sc_stat.nd_map = NULL;
    218 	nsc->sc_stat.nd_idx = 0;
    219 	nsc->sc_stat.nd_map_cont = NULL;
    220 	nsc->sc_stat.nd_idx_cont = 0;
    221 	nsc->sc_stat.nd_exception = 0;
    222 
    223 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
    224 	nd_bsw4 (DD_CSR, 0);
    225 
    226 #if 01
    227 	nextdma_setup_curr_regs(nsc);
    228 	nextdma_setup_cont_regs(nsc);
    229 #endif
    230 
    231 #if defined(DIAGNOSTIC)
    232 	{
    233 		u_long state;
    234 		state = nd_bsr4 (DD_CSR);
    235 
    236 #if 1
    237 		/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
    238 		 * milo (a 25Mhz 68040 mono cube) didn't have this problem
    239 		 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    240 		 */
    241 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    242 #else
    243 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    244 			  DMACSR_SUPDATE | DMACSR_ENABLE);
    245 #endif
    246 		if (state) {
    247 			nextdma_print(nsc);
    248 			panic("DMA did not reset");
    249 		}
    250 	}
    251 #endif
    252 }
    253 
    254 void
    255 nextdma_reset(nsc)
    256 	struct nextdma_softc *nsc;
    257 {
    258 	int s;
    259 	struct nextdma_status *stat = &nsc->sc_stat;
    260 
    261 	s = spldma();
    262 
    263 	DPRINTF(("DMA reset\n"));
    264 
    265 #if (defined(ND_DEBUG))
    266 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
    267 #endif
    268 
    269 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    270 	if ((stat->nd_map) || (stat->nd_map_cont)) {
    271 		if (stat->nd_map_cont) {
    272 			DPRINTF(("DMA: resetting with non null continue map\n"));
    273 			if (nsc->sc_conf.nd_completed_cb)
    274 				(*nsc->sc_conf.nd_completed_cb)
    275 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    276 
    277 			stat->nd_map_cont = 0;
    278 			stat->nd_idx_cont = 0;
    279 		}
    280 		if (nsc->sc_conf.nd_shutdown_cb)
    281 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    282 		stat->nd_map = 0;
    283 		stat->nd_idx = 0;
    284 	}
    285 
    286 	splx(s);
    287 }
    288 
    289 /****************************************************************/
    290 
    291 
    292 /* Call the completed and continue callbacks to try to fill
    293  * in the dma continue buffers.
    294  */
    295 void
    296 nextdma_rotate(nsc)
    297 	struct nextdma_softc *nsc;
    298 {
    299 	struct nextdma_status *stat = &nsc->sc_stat;
    300 
    301 	NDTRACEIF (*ndtracep++ = 'r');
    302 	DPRINTF(("DMA nextdma_rotate()\n"));
    303 
    304 	/* Rotate the continue map into the current map */
    305 	stat->nd_map = stat->nd_map_cont;
    306 	stat->nd_idx = stat->nd_idx_cont;
    307 
    308 	if ((!stat->nd_map_cont) ||
    309 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
    310 		if (nsc->sc_conf.nd_continue_cb) {
    311 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
    312 				(nsc->sc_conf.nd_cb_arg);
    313 			if (stat->nd_map_cont) {
    314 				stat->nd_map_cont->dm_xfer_len = 0;
    315 			}
    316 		} else {
    317 			stat->nd_map_cont = 0;
    318 		}
    319 		stat->nd_idx_cont = 0;
    320 	}
    321 
    322 #if defined(DIAGNOSTIC) && 0
    323 	if (stat->nd_map_cont) {
    324 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
    325 			nextdma_print(nsc);
    326 			panic("DMA request unaligned at start\n");
    327 		}
    328 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    329 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
    330 			nextdma_print(nsc);
    331 			panic("DMA request unaligned at end\n");
    332 		}
    333 	}
    334 #endif
    335 
    336 }
    337 
    338 void
    339 nextdma_setup_curr_regs(nsc)
    340 	struct nextdma_softc *nsc;
    341 {
    342 	bus_addr_t dd_next;
    343 	bus_addr_t dd_limit;
    344 	bus_addr_t dd_saved_next;
    345 	bus_addr_t dd_saved_limit;
    346 	struct nextdma_status *stat = &nsc->sc_stat;
    347 
    348 	NDTRACEIF (*ndtracep++ = 'C');
    349 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
    350 
    351 	if (stat->nd_map) {
    352 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    353 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
    354 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    355 
    356 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    357 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
    358 			dd_limit += 15;
    359 		}
    360 	} else {
    361 		dd_next = turbo ? 0 : 0xdeadbeef;
    362 		dd_limit = turbo ? 0 : 0xdeadbeef;
    363 	}
    364 
    365 	dd_saved_next = dd_next;
    366 	dd_saved_limit = dd_limit;
    367 
    368 	NDTRACEIF (if (stat->nd_map) {
    369 		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    370 		ndtracep += strlen (ndtracep);
    371 	});
    372 
    373 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
    374 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
    375 	} else {
    376 		nd_bsw4 (DD_NEXT, dd_next);
    377 	}
    378 	nd_bsw4 (DD_LIMIT, dd_limit);
    379 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
    380 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
    381 
    382 #ifdef DIAGNOSTIC
    383 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
    384 	    || (nd_bsr4 (DD_NEXT) != dd_next)
    385 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
    386 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
    387 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
    388 		) {
    389 		nextdma_print(nsc);
    390 		panic("DMA failure writing to current regs");
    391 	}
    392 #endif
    393 }
    394 
    395 void
    396 nextdma_setup_cont_regs(nsc)
    397 	struct nextdma_softc *nsc;
    398 {
    399 	bus_addr_t dd_start;
    400 	bus_addr_t dd_stop;
    401 	bus_addr_t dd_saved_start;
    402 	bus_addr_t dd_saved_stop;
    403 	struct nextdma_status *stat = &nsc->sc_stat;
    404 
    405 	NDTRACEIF (*ndtracep++ = 'c');
    406 	DPRINTF(("DMA nextdma_setup_regs()\n"));
    407 
    408 	if (stat->nd_map_cont) {
    409 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
    410 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    411 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    412 
    413 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    414 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
    415 			dd_stop += 15;
    416 		}
    417 	} else {
    418 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
    419 		dd_stop = turbo ? 0 : 0xdeadbee0;
    420 	}
    421 
    422 	dd_saved_start = dd_start;
    423 	dd_saved_stop  = dd_stop;
    424 
    425 	NDTRACEIF (if (stat->nd_map_cont) {
    426 		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    427 		ndtracep += strlen (ndtracep);
    428 	});
    429 
    430 	nd_bsw4 (DD_START, dd_start);
    431 	nd_bsw4 (DD_STOP, dd_stop);
    432 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
    433 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
    434 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
    435 		nd_bsw4 (DD_STOP - 0x40, dd_start);
    436 
    437 #ifdef DIAGNOSTIC
    438 	if ((nd_bsr4 (DD_START) != dd_start)
    439 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
    440 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
    441 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
    442 		) {
    443 		nextdma_print(nsc);
    444 		panic("DMA failure writing to continue regs");
    445 	}
    446 #endif
    447 }
    448 
    449 /****************************************************************/
    450 
    451 #if NESP > 0
    452 static int
    453 nextdma_esp_intr(arg)
    454 	void *arg;
    455 {
    456 	/* @@@ This is bogus, we can't be certain of arg's type
    457 	 * unless the interrupt is for us.  For now we successfully
    458 	 * cheat because DMA interrupts are the only things invoked
    459 	 * at this interrupt level.
    460 	 */
    461 	struct nextdma_softc *nsc = arg;
    462 	int esp_dma_int __P((void *)); /* XXX */
    463 
    464 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    465 		return 0;
    466 	/* Handle dma interrupts */
    467 
    468 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
    469 
    470 }
    471 #endif
    472 
    473 #if NXE > 0
    474 static int
    475 nextdma_enet_intr(arg)
    476 	void *arg;
    477 {
    478 	/* @@@ This is bogus, we can't be certain of arg's type
    479 	 * unless the interrupt is for us.  For now we successfully
    480 	 * cheat because DMA interrupts are the only things invoked
    481 	 * at this interrupt level.
    482 	 */
    483 	struct nextdma_softc *nsc = arg;
    484 	unsigned int state;
    485 	bus_addr_t onext;
    486 	bus_addr_t olimit;
    487 	bus_addr_t slimit;
    488 	int result;
    489 	struct nextdma_status *stat = &nsc->sc_stat;
    490 
    491 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    492 		return 0;
    493 	/* Handle dma interrupts */
    494 
    495 	NDTRACEIF (*ndtracep++ = 'D');
    496 #ifdef ND_DEBUG
    497 	if (NEXTDMA_DEBUG) {
    498 		char sbuf[256];
    499 
    500 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    501 				 sbuf, sizeof(sbuf));
    502 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
    503 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    504 	}
    505 #endif
    506 
    507 #ifdef DIAGNOSTIC
    508 	if (!stat->nd_map) {
    509 		nextdma_print(nsc);
    510 		panic("DMA missing current map in interrupt!\n");
    511 	}
    512 #endif
    513 
    514 	state = nd_bsr4 (DD_CSR);
    515 
    516 #if defined(ND_DEBUG)
    517 	nextdma_debug_savestate(nsc, state);
    518 #endif
    519 
    520 #ifdef DIAGNOSTIC
    521 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
    522 		char sbuf[256];
    523 		nextdma_print(nsc);
    524 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    525 		printf("DMA: state 0x%s\n",sbuf);
    526 		panic("DMA complete not set in interrupt\n");
    527 	}
    528 #endif
    529 
    530 	DPRINTF(("DMA: finishing xfer\n"));
    531 
    532 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    533 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
    534 
    535 	result = 0;
    536 	if (state & DMACSR_ENABLE) {
    537 		/* enable bit was set */
    538 		result |= 0x01;
    539 	}
    540 	if (state & DMACSR_SUPDATE) {
    541 		/* supdate bit was set */
    542 		result |= 0x02;
    543 	}
    544 	if (stat->nd_map_cont == NULL) {
    545 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    546 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
    547 		result |= 0x04;
    548 	}
    549 	if (state & DMACSR_BUSEXC) {
    550 		/* bus exception bit was set */
    551 		result |= 0x08;
    552 	}
    553 	switch (result) {
    554 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    555 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    556 		if (turbo) {
    557 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    558 			slimit = *limit;
    559 		} else {
    560 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    561 		}
    562 		break;
    563 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    564 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    565 		if (turbo) {
    566 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
    567 			slimit = *limit;
    568 		} else {
    569 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
    570 		}
    571 		break;
    572 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    573 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    574 		slimit = nd_bsr4 (DD_NEXT);
    575 		break;
    576 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    577 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    578 		slimit = nd_bsr4 (DD_LIMIT);
    579 		break;
    580 	default:
    581 #ifdef DIAGNOSTIC
    582 	{
    583 		char sbuf[256];
    584 		printf("DMA: please send this output to port-next68k-maintainer (at) netbsd.org:\n");
    585 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    586 		printf("DMA: state 0x%s\n",sbuf);
    587 		nextdma_print(nsc);
    588 		panic("DMA: condition 0x%02x not yet documented to occur\n",result);
    589 	}
    590 #endif
    591 	slimit = olimit;
    592 	break;
    593 	}
    594 
    595 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    596 		slimit &= ~0x80000000;
    597 		slimit -= 15;
    598 	}
    599 
    600 #ifdef DIAGNOSTIC
    601 	if ((state & DMACSR_READ))
    602 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
    603 			  (state & DMACSR_READ) ? "read" : "write"));
    604 	if ((slimit < onext) || (slimit > olimit)) {
    605 		char sbuf[256];
    606 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    607 		printf("DMA: state 0x%s\n",sbuf);
    608 		nextdma_print(nsc);
    609 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer\n",slimit);
    610 	}
    611 #endif
    612 
    613 #ifdef DIAGNOSTIC
    614 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
    615 		if (slimit != olimit) {
    616 			char sbuf[256];
    617 			bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    618 			printf("DMA: state 0x%s\n",sbuf);
    619 			nextdma_print(nsc);
    620 			panic("DMA: short limit register (0x%08lx) w/o finishing map.\n",slimit);
    621 		}
    622 	}
    623 #endif
    624 
    625 #if (defined(ND_DEBUG))
    626 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    627 #endif
    628 
    629 	stat->nd_map->dm_xfer_len += slimit-onext;
    630 
    631 	/* If we've reached the end of the current map, then inform
    632 	 * that we've completed that map.
    633 	 */
    634 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
    635 		if (nsc->sc_conf.nd_completed_cb)
    636 			(*nsc->sc_conf.nd_completed_cb)
    637 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
    638 	} else {
    639 		KASSERT(stat->nd_map == stat->nd_map_cont);
    640 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
    641 	}
    642 	stat->nd_map = 0;
    643 	stat->nd_idx = 0;
    644 
    645 #if (defined(ND_DEBUG))
    646 	if (NEXTDMA_DEBUG) {
    647 		char sbuf[256];
    648 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
    649 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    650 	}
    651 #endif
    652 	if (state & DMACSR_ENABLE) {
    653 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    654 
    655 		nextdma_rotate(nsc);
    656 		nextdma_setup_cont_regs(nsc);
    657 
    658 		if (state & DMACSR_READ) {
    659 			dmadir = DMACSR_SETREAD;
    660 		} else {
    661 			dmadir = DMACSR_SETWRITE;
    662 		}
    663 
    664 		if (stat->nd_map_cont == NULL) {
    665 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    666 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
    667 			NDTRACEIF (*ndtracep++ = 'g');
    668 		} else {
    669 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    670 			NDTRACEIF (*ndtracep++ = 'G');
    671 		}
    672 	} else {
    673 		DPRINTF(("DMA: a shutdown occurred\n"));
    674 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    675 
    676 		/* Cleanup more incomplete transfers */
    677 		/* cleanup continue map */
    678 		if (stat->nd_map_cont) {
    679 			DPRINTF(("DMA: shutting down with non null continue map\n"));
    680 			if (nsc->sc_conf.nd_completed_cb)
    681 				(*nsc->sc_conf.nd_completed_cb)
    682 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    683 
    684 			stat->nd_map_cont = 0;
    685 			stat->nd_idx_cont = 0;
    686 		}
    687 		if (nsc->sc_conf.nd_shutdown_cb)
    688 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    689 	}
    690 
    691 #ifdef ND_DEBUG
    692 	if (NEXTDMA_DEBUG) {
    693 		char sbuf[256];
    694 
    695 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    696 				 sbuf, sizeof(sbuf));
    697 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
    698 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    699 	}
    700 #endif
    701 
    702 	return(1);
    703 }
    704 #endif
    705 
    706 /*
    707  * Check to see if dma has finished for a channel */
    708 int
    709 nextdma_finished(nsc)
    710 	struct nextdma_softc *nsc;
    711 {
    712 	int r;
    713 	int s;
    714 	struct nextdma_status *stat = &nsc->sc_stat;
    715 
    716 	s = spldma();
    717 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
    718 	splx(s);
    719 
    720 	return(r);
    721 }
    722 
    723 void
    724 nextdma_start(nsc, dmadir)
    725 	struct nextdma_softc *nsc;
    726 	u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    727 {
    728 	struct nextdma_status *stat = &nsc->sc_stat;
    729 
    730 	NDTRACEIF (*ndtracep++ = 'n');
    731 #ifdef DIAGNOSTIC
    732 	if (!nextdma_finished(nsc)) {
    733 		char sbuf[256];
    734 
    735 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    736 				 sbuf, sizeof(sbuf));
    737 		panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf);
    738 	}
    739 #endif
    740 
    741 #ifdef ND_DEBUG
    742 	if (NEXTDMA_DEBUG) {
    743 		char sbuf[256];
    744 
    745 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    746 				 sbuf, sizeof(sbuf));
    747 		printf("DMA start (%ld) intr(0x%s)\n",
    748 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    749 	}
    750 #endif
    751 
    752 #ifdef DIAGNOSTIC
    753 	if (stat->nd_map) {
    754 		nextdma_print(nsc);
    755 		panic("DMA: nextdma_start() with non null map\n");
    756 	}
    757 	if (stat->nd_map_cont) {
    758 		nextdma_print(nsc);
    759 		panic("DMA: nextdma_start() with non null continue map\n");
    760 	}
    761 #endif
    762 
    763 #ifdef DIAGNOSTIC
    764 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
    765 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
    766 	}
    767 #endif
    768 
    769 #if defined(ND_DEBUG)
    770 	nextdma_debug_initstate(nsc);
    771 #endif
    772 
    773 	/* preload both the current and the continue maps */
    774 	nextdma_rotate(nsc);
    775 
    776 #ifdef DIAGNOSTIC
    777 	if (!stat->nd_map_cont) {
    778 		panic("No map available in nextdma_start()");
    779 	}
    780 #endif
    781 
    782 	nextdma_rotate(nsc);
    783 
    784 #ifdef ND_DEBUG
    785 	if (NEXTDMA_DEBUG) {
    786 		char sbuf[256];
    787 
    788 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    789 				 sbuf, sizeof(sbuf));
    790 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
    791 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
    792 	}
    793 #endif
    794 
    795 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
    796 		 DMACSR_RESET | dmadir);
    797 	nd_bsw4 (DD_CSR, 0);
    798 
    799 	nextdma_setup_curr_regs(nsc);
    800 	nextdma_setup_cont_regs(nsc);
    801 
    802 #if (defined(ND_DEBUG))
    803 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
    804 #endif
    805 
    806 	if (stat->nd_map_cont == NULL) {
    807 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
    808 	} else {
    809 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    810 	}
    811 }
    812 
    813 /* This routine is used for debugging */
    814 void
    815 nextdma_print(nsc)
    816 	struct nextdma_softc *nsc;
    817 {
    818 	u_long dd_csr;
    819 	u_long dd_next;
    820 	u_long dd_next_initbuf;
    821 	u_long dd_limit;
    822 	u_long dd_start;
    823 	u_long dd_stop;
    824 	u_long dd_saved_next;
    825 	u_long dd_saved_limit;
    826 	u_long dd_saved_start;
    827 	u_long dd_saved_stop;
    828 	char sbuf[256];
    829 	struct nextdma_status *stat = &nsc->sc_stat;
    830 
    831 	/* Read all of the registers before we print anything out,
    832 	 * in case something changes
    833 	 */
    834 	dd_csr          = nd_bsr4 (DD_CSR);
    835 	dd_next         = nd_bsr4 (DD_NEXT);
    836 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
    837 	dd_limit        = nd_bsr4 (DD_LIMIT);
    838 	dd_start        = nd_bsr4 (DD_START);
    839 	dd_stop         = nd_bsr4 (DD_STOP);
    840 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
    841 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
    842 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
    843 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
    844 
    845 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
    846 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    847 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
    848 
    849 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
    850 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
    851 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
    852 
    853 	/* NDMAP is Next DMA Print (really!) */
    854 
    855 	if (stat->nd_map) {
    856 		int i;
    857 
    858 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
    859 		       stat->nd_map->dm_mapsize);
    860 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
    861 		       stat->nd_map->dm_nsegs);
    862 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
    863 		       stat->nd_map->dm_xfer_len);
    864 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    865 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
    866 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
    867 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    868 
    869 		printf("NDMAP: Entire map;\n");
    870 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
    871 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    872 			       i,stat->nd_map->dm_segs[i].ds_addr);
    873 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
    874 			       i,stat->nd_map->dm_segs[i].ds_len);
    875 		}
    876 	} else {
    877 		printf("NDMAP: nd_map = NULL\n");
    878 	}
    879 	if (stat->nd_map_cont) {
    880 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
    881 		       stat->nd_map_cont->dm_mapsize);
    882 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
    883 		       stat->nd_map_cont->dm_nsegs);
    884 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
    885 		       stat->nd_map_cont->dm_xfer_len);
    886 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    887 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
    888 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    889 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    890 		if (stat->nd_map_cont != stat->nd_map) {
    891 			int i;
    892 			printf("NDMAP: Entire map;\n");
    893 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
    894 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    895 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
    896 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    897 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
    898 			}
    899 		}
    900 	} else {
    901 		printf("NDMAP: nd_map_cont = NULL\n");
    902 	}
    903 
    904 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
    905 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
    906 
    907 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
    908 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
    909 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
    910 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
    911 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
    912 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
    913 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
    914 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
    915 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
    916 
    917 	bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
    918 			 sbuf, sizeof(sbuf));
    919 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
    920 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    921 }
    922 
    923 #if defined(ND_DEBUG)
    924 void
    925 nextdma_debug_initstate(struct nextdma_softc *nsc)
    926 {
    927 	switch(nsc->sc_chan->nd_intr) {
    928 	case NEXT_I_ENETR_DMA:
    929 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
    930 		break;
    931 	case NEXT_I_SCSI_DMA:
    932 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
    933 		break;
    934 	}
    935 }
    936 
    937 void
    938 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
    939 {
    940 	switch(nsc->sc_chan->nd_intr) {
    941 	case NEXT_I_ENETR_DMA:
    942 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
    943 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    944 		break;
    945 	case NEXT_I_SCSI_DMA:
    946 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
    947 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    948 		break;
    949 	}
    950 }
    951 
    952 void
    953 nextdma_debug_enetr_dumpstate(void)
    954 {
    955 	int i;
    956 	int s;
    957 	s = spldma();
    958 	i = nextdma_debug_enetr_idx;
    959 	do {
    960 		char sbuf[256];
    961 		if (nextdma_debug_enetr_state[i]) {
    962 			bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
    963 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    964 		}
    965 		i++;
    966 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
    967 	} while (i != nextdma_debug_enetr_idx);
    968 	splx(s);
    969 }
    970 
    971 void
    972 nextdma_debug_scsi_dumpstate(void)
    973 {
    974 	int i;
    975 	int s;
    976 	s = spldma();
    977 	i = nextdma_debug_scsi_idx;
    978 	do {
    979 		char sbuf[256];
    980 		if (nextdma_debug_scsi_state[i]) {
    981 			bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
    982 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
    983 		}
    984 		i++;
    985 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
    986 	} while (i != nextdma_debug_scsi_idx);
    987 	splx(s);
    988 }
    989 #endif
    990 
    991