Home | History | Annotate | Line # | Download | only in dev
nextdma.c revision 1.51
      1 /*	$NetBSD: nextdma.c,v 1.51 2023/02/03 23:06:42 tsutsui Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.51 2023/02/03 23:06:42 tsutsui Exp $");
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/syslog.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/malloc.h>
     37 #include <sys/ioctl.h>
     38 #include <sys/errno.h>
     39 
     40 #define _M68K_BUS_DMA_PRIVATE
     41 #include <machine/autoconf.h>
     42 #include <machine/cpu.h>
     43 #include <machine/intr.h>
     44 
     45 #include <m68k/cacheops.h>
     46 
     47 #include <next68k/next68k/isr.h>
     48 #include <next68k/next68k/nextrom.h>
     49 
     50 #include <next68k/dev/intiovar.h>
     51 
     52 #include "nextdmareg.h"
     53 #include "nextdmavar.h"
     54 
     55 #include "esp.h"
     56 #include "xe.h"
     57 
     58 #if DEBUG
     59 #define ND_DEBUG
     60 #endif
     61 
     62 extern int turbo;
     63 
     64 #define panic		__asm volatile("trap  #15"); printf
     65 
     66 #define NEXTDMA_DEBUG nextdma_debug
     67 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
     68 #if defined(ND_DEBUG)
     69 int nextdma_debug = 0;
     70 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
     71 int ndtrace_show = 0;
     72 char ndtrace_buf[8192+100];
     73 size_t ndtrace_len = 0;
     74 #define NDTRACEIF(x) if (10) do {x;} while (0)
     75 #else
     76 #define DPRINTF(x)
     77 #define NDTRACEIF(x)
     78 #endif
     79 #define PRINTF(x) printf x
     80 
     81 void
     82 ndtrace_printf(const char *fmt, ...) {
     83 #ifdef ND_DEBUG
     84 	int len;
     85 	va_list ap;
     86 
     87 	va_start(ap, fmt);
     88 	len = vsnprintf(ndtrace_buf + ndtrace_len, sizeof(ndtrace_buf)
     89 	    - ndtrace_len, fmt, ap);
     90 	va_end(ap);
     91 	ndtrace_len += len;
     92 #endif
     93 }
     94 
     95 int
     96 ndtrace_empty(void) {
     97 #ifdef ND_DEBUG
     98 	return ndtrace_len == 0;
     99 #else
    100 	return 1;
    101 #endif
    102 }
    103 
    104 void
    105 ndtrace_reset(void) {
    106 #ifdef ND_DEBUG
    107 	ndtrace_len = 0;
    108 #endif
    109 }
    110 
    111 void
    112 ndtrace_addc(int c) {
    113 #ifdef ND_DEBUG
    114 	if (ndtrace_len < sizeof(ndtrace_buf) - 1) {
    115 		ndtrace_buf[ndtrace_len++] = c;
    116 		ndtrace_buf[ndtrace_len] = '\0';
    117 	}
    118 #endif
    119 }
    120 
    121 const char *
    122 ndtrace_get(void) {
    123 #ifdef ND_DEBUG
    124 	return ndtrace_buf;
    125 #else
    126 	return NULL;
    127 #endif
    128 }
    129 
    130 
    131 #if defined(ND_DEBUG)
    132 int nextdma_debug_enetr_idx = 0;
    133 unsigned int nextdma_debug_enetr_state[100] = { 0 };
    134 int nextdma_debug_scsi_idx = 0;
    135 unsigned int nextdma_debug_scsi_state[100] = { 0 };
    136 
    137 void nextdma_debug_initstate(struct nextdma_softc *);
    138 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
    139 void nextdma_debug_scsi_dumpstate(void);
    140 void nextdma_debug_enetr_dumpstate(void);
    141 #endif
    142 
    143 
    144 int	nextdma_match(device_t, cfdata_t, void *);
    145 void	nextdma_attach(device_t, device_t, void *);
    146 
    147 void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
    148 int nextdma_continue(struct nextdma_softc *);
    149 void nextdma_rotate(struct nextdma_softc *);
    150 
    151 void nextdma_setup_cont_regs(struct nextdma_softc *);
    152 void nextdma_setup_curr_regs(struct nextdma_softc *);
    153 
    154 #if NESP > 0
    155 static int nextdma_esp_intr(void *);
    156 #endif
    157 #if NXE > 0
    158 static int nextdma_enet_intr(void *);
    159 #endif
    160 
    161 #define nd_bsr4(reg) \
    162 	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
    163 #define nd_bsw4(reg,val) \
    164 	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
    165 
    166 CFATTACH_DECL_NEW(nextdma, sizeof(struct nextdma_softc),
    167     nextdma_match, nextdma_attach, NULL, NULL);
    168 
    169 static struct nextdma_channel nextdma_channel[] = {
    170 #if NESP > 0
    171 	{
    172 		"scsi",
    173 		NEXT_P_SCSI_CSR,
    174 		DD_SIZE,
    175 		NEXT_I_SCSI_DMA,
    176 		&nextdma_esp_intr
    177 	},
    178 #endif
    179 #if NXE > 0
    180 	{
    181 		"enetx",
    182 		NEXT_P_ENETX_CSR,
    183 		DD_SIZE,
    184 		NEXT_I_ENETX_DMA,
    185 		&nextdma_enet_intr
    186 	},
    187 	{
    188 		"enetr",
    189 		NEXT_P_ENETR_CSR,
    190 		DD_SIZE,
    191 		NEXT_I_ENETR_DMA,
    192 		&nextdma_enet_intr
    193 	},
    194 #endif
    195 };
    196 static int nnextdma_channels = __arraycount(nextdma_channel);
    197 
    198 static int attached = 0;
    199 
    200 struct nextdma_softc *
    201 nextdma_findchannel(const char *name)
    202 {
    203 	device_t dev;
    204 	deviter_t di;
    205 
    206 	for (dev = deviter_first(&di, DEVITER_F_ROOT_FIRST);
    207 	     dev != NULL;
    208 	     dev = deviter_next(&di)) {
    209 		if (strncmp(device_xname(dev), "nextdma", 7) == 0) {
    210 			struct nextdma_softc *nsc = device_private(dev);
    211 			if (strcmp(nsc->sc_chan->nd_name, name) == 0)
    212 				break;
    213 		}
    214 	}
    215 	deviter_release(&di);
    216 	if (dev == NULL)
    217 		return NULL;
    218 	return device_private(dev);
    219 }
    220 
    221 int
    222 nextdma_match(device_t parent, cfdata_t match, void *aux)
    223 {
    224 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    225 
    226 	if (attached >= nnextdma_channels)
    227 		return 0;
    228 
    229 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
    230 
    231 	return 1;
    232 }
    233 
    234 void
    235 nextdma_attach(device_t parent, device_t self, void *aux)
    236 {
    237 	struct nextdma_softc *nsc = device_private(self);
    238 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    239 
    240 	if (attached >= nnextdma_channels)
    241 		return;
    242 
    243 	nsc->sc_dev = self;
    244 	nsc->sc_chan = &nextdma_channel[attached];
    245 
    246 	nsc->sc_dmat = ia->ia_dmat;
    247 	nsc->sc_bst = ia->ia_bst;
    248 
    249 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
    250 	    nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
    251 		panic("%s: can't map DMA registers for channel %s",
    252 		    device_xname(self), nsc->sc_chan->nd_name);
    253 	}
    254 
    255 	nextdma_init(nsc);
    256 
    257 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
    258 	    NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
    259 	INTR_ENABLE(nsc->sc_chan->nd_intr);
    260 
    261 	printf(": channel %d (%s)\n", attached,
    262 		nsc->sc_chan->nd_name);
    263 	attached++;
    264 }
    265 
    266 void
    267 nextdma_init(struct nextdma_softc *nsc)
    268 {
    269 #ifdef ND_DEBUG
    270 	if (NEXTDMA_DEBUG) {
    271 		char sbuf[256];
    272 
    273 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    274 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    275 		printf("DMA init ipl (%ld) intr(%s)\n",
    276 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    277 	}
    278 #endif
    279 
    280 	nsc->sc_stat.nd_map = NULL;
    281 	nsc->sc_stat.nd_idx = 0;
    282 	nsc->sc_stat.nd_map_cont = NULL;
    283 	nsc->sc_stat.nd_idx_cont = 0;
    284 	nsc->sc_stat.nd_exception = 0;
    285 
    286 	nd_bsw4(DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
    287 	nd_bsw4(DD_CSR, 0);
    288 
    289 #if 01
    290 	nextdma_setup_curr_regs(nsc);
    291 	nextdma_setup_cont_regs(nsc);
    292 #endif
    293 
    294 #if defined(DIAGNOSTIC)
    295 	{
    296 		u_long state;
    297 		state = nd_bsr4 (DD_CSR);
    298 
    299 #if 1
    300 		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
    301 		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
    302 		 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    303 		 */
    304 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    305 #else
    306 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    307 			  DMACSR_SUPDATE | DMACSR_ENABLE);
    308 #endif
    309 		if (state != 0) {
    310 			nextdma_print(nsc);
    311 			panic("DMA did not reset");
    312 		}
    313 	}
    314 #endif
    315 }
    316 
    317 void
    318 nextdma_reset(struct nextdma_softc *nsc)
    319 {
    320 	int s;
    321 	struct nextdma_status *stat = &nsc->sc_stat;
    322 
    323 	s = spldma();
    324 
    325 	DPRINTF(("DMA reset\n"));
    326 
    327 #if (defined(ND_DEBUG))
    328 	if (NEXTDMA_DEBUG > 1)
    329 		nextdma_print(nsc);
    330 #endif
    331 
    332 	nd_bsw4(DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    333 	if ((stat->nd_map) || (stat->nd_map_cont)) {
    334 		if (stat->nd_map_cont) {
    335 			DPRINTF(
    336 			    ("DMA: resetting with non null continue map\n"));
    337 			if (nsc->sc_conf.nd_completed_cb)
    338 				(*nsc->sc_conf.nd_completed_cb)(
    339 				    stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    340 
    341 			stat->nd_map_cont = 0;
    342 			stat->nd_idx_cont = 0;
    343 		}
    344 		if (nsc->sc_conf.nd_shutdown_cb)
    345 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    346 		stat->nd_map = 0;
    347 		stat->nd_idx = 0;
    348 	}
    349 
    350 	splx(s);
    351 }
    352 
    353 /****************************************************************/
    354 
    355 
    356 /*
    357  * Call the completed and continue callbacks to try to fill
    358  * in the dma continue buffers.
    359  */
    360 void
    361 nextdma_rotate(struct nextdma_softc *nsc)
    362 {
    363 	struct nextdma_status *stat = &nsc->sc_stat;
    364 
    365 	NDTRACEIF(ndtrace_addc('r'));
    366 	DPRINTF(("DMA nextdma_rotate()\n"));
    367 
    368 	/* Rotate the continue map into the current map */
    369 	stat->nd_map = stat->nd_map_cont;
    370 	stat->nd_idx = stat->nd_idx_cont;
    371 
    372 	if ((stat->nd_map_cont == NULL) ||
    373 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
    374 		if (nsc->sc_conf.nd_continue_cb != NULL) {
    375 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
    376 				(nsc->sc_conf.nd_cb_arg);
    377 			if (stat->nd_map_cont != NULL) {
    378 				stat->nd_map_cont->dm_xfer_len = 0;
    379 			}
    380 		} else {
    381 			stat->nd_map_cont = 0;
    382 		}
    383 		stat->nd_idx_cont = 0;
    384 	}
    385 
    386 #if defined(DIAGNOSTIC) && 0
    387 	if (stat->nd_map_cont) {
    388 		if (!DMA_BEGINALIGNED(
    389 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
    390 			nextdma_print(nsc);
    391 			panic("DMA request unaligned at start");
    392 		}
    393 		if (!DMA_ENDALIGNED(
    394 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    395 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
    396 			nextdma_print(nsc);
    397 			panic("DMA request unaligned at end");
    398 		}
    399 	}
    400 #endif
    401 
    402 }
    403 
    404 void
    405 nextdma_setup_curr_regs(struct nextdma_softc *nsc)
    406 {
    407 	bus_addr_t dd_next;
    408 	bus_addr_t dd_limit;
    409 	bus_addr_t dd_saved_next;
    410 	bus_addr_t dd_saved_limit;
    411 	struct nextdma_status *stat = &nsc->sc_stat;
    412 
    413 	NDTRACEIF(ndtrace_addc('C'));
    414 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
    415 
    416 	if (stat->nd_map != NULL) {
    417 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    418 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
    419 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    420 
    421 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    422 			/* Ethernet transmit needs secret magic */
    423 			dd_limit |= 0x80000000;
    424 			dd_limit += 15;
    425 		}
    426 	} else {
    427 		dd_next = turbo ? 0 : 0xdeadbeef;
    428 		dd_limit = turbo ? 0 : 0xdeadbeef;
    429 	}
    430 
    431 	dd_saved_next = dd_next;
    432 	dd_saved_limit = dd_limit;
    433 
    434 	NDTRACEIF(if (stat->nd_map) {
    435 		ndtrace_printf("%ld",
    436 		    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    437 	});
    438 
    439 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
    440 		nd_bsw4(DD_NEXT_INITBUF, dd_next);
    441 	} else {
    442 		nd_bsw4(DD_NEXT, dd_next);
    443 	}
    444 	nd_bsw4(DD_LIMIT, dd_limit);
    445 	if (!turbo)
    446 		nd_bsw4(DD_SAVED_NEXT, dd_saved_next);
    447 	if (!turbo)
    448 		nd_bsw4(DD_SAVED_LIMIT, dd_saved_limit);
    449 
    450 #ifdef DIAGNOSTIC
    451 	if ((nd_bsr4(DD_NEXT_INITBUF) != dd_next)
    452 	    || (nd_bsr4(DD_NEXT) != dd_next)
    453 	    || (nd_bsr4(DD_LIMIT) != dd_limit)
    454 	    || (!turbo && (nd_bsr4(DD_SAVED_NEXT) != dd_saved_next))
    455 	    || (!turbo && (nd_bsr4(DD_SAVED_LIMIT) != dd_saved_limit))
    456 		) {
    457 		nextdma_print(nsc);
    458 		panic("DMA failure writing to current regs");
    459 	}
    460 #endif
    461 }
    462 
    463 void
    464 nextdma_setup_cont_regs(struct nextdma_softc *nsc)
    465 {
    466 	bus_addr_t dd_start;
    467 	bus_addr_t dd_stop;
    468 	bus_addr_t dd_saved_start;
    469 	bus_addr_t dd_saved_stop;
    470 	struct nextdma_status *stat = &nsc->sc_stat;
    471 
    472 	NDTRACEIF(ndtrace_addc('c'));
    473 	DPRINTF(("DMA nextdma_setup_regs()\n"));
    474 
    475 	if (stat->nd_map_cont != NULL) {
    476 		dd_start =
    477 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
    478 		dd_stop  =
    479 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    480 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len;
    481 
    482 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    483 			/* Ethernet transmit needs secret magic */
    484 			dd_stop |= 0x80000000;
    485 			dd_stop += 15;
    486 		}
    487 	} else {
    488 		dd_start = turbo ? nd_bsr4(DD_NEXT) : 0xdeadbee0;
    489 		dd_stop = turbo ? 0 : 0xdeadbee0;
    490 	}
    491 
    492 	dd_saved_start = dd_start;
    493 	dd_saved_stop  = dd_stop;
    494 
    495 	NDTRACEIF(if (stat->nd_map_cont != NULL) {
    496 		ndtrace_printf("%ld",
    497 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    498 	});
    499 
    500 	nd_bsw4(DD_START, dd_start);
    501 	nd_bsw4(DD_STOP, dd_stop);
    502 	if (!turbo)
    503 		nd_bsw4(DD_SAVED_START, dd_saved_start);
    504 	if (!turbo)
    505 		nd_bsw4(DD_SAVED_STOP, dd_saved_stop);
    506 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
    507 		nd_bsw4(DD_STOP - 0x40, dd_start);
    508 
    509 #ifdef DIAGNOSTIC
    510 	if ((nd_bsr4(DD_START) != dd_start)
    511 	    || (dd_stop && (nd_bsr4(DD_STOP) != dd_stop))
    512 	    || (!turbo && (nd_bsr4(DD_SAVED_START) != dd_saved_start))
    513 	    || (!turbo && (nd_bsr4(DD_SAVED_STOP) != dd_saved_stop))
    514 		) {
    515 		nextdma_print(nsc);
    516 		panic("DMA failure writing to continue regs");
    517 	}
    518 #endif
    519 }
    520 
    521 /****************************************************************/
    522 
    523 #if NESP > 0
    524 static int
    525 nextdma_esp_intr(void *arg)
    526 {
    527 	/* @@@ This is bogus, we can't be certain of arg's type
    528 	 * unless the interrupt is for us.  For now we successfully
    529 	 * cheat because DMA interrupts are the only things invoked
    530 	 * at this interrupt level.
    531 	 */
    532 	struct nextdma_softc *nsc = arg;
    533 	int esp_dma_int(void *); /* XXX */
    534 
    535 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    536 		return 0;
    537 	/* Handle dma interrupts */
    538 
    539 	return esp_dma_int(nsc->sc_conf.nd_cb_arg);
    540 }
    541 #endif
    542 
    543 #if NXE > 0
    544 static int
    545 nextdma_enet_intr(void *arg)
    546 {
    547 
    548 	/*
    549 	 * @@@ This is bogus, we can't be certain of arg's type
    550 	 * unless the interrupt is for us.  For now we successfully
    551 	 * cheat because DMA interrupts are the only things invoked
    552 	 * at this interrupt level.
    553 	 */
    554 	struct nextdma_softc *nsc = arg;
    555 	unsigned int state;
    556 	bus_addr_t onext;
    557 	bus_addr_t olimit;
    558 	bus_addr_t slimit;
    559 	int result;
    560 	struct nextdma_status *stat = &nsc->sc_stat;
    561 
    562 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    563 		return 0;
    564 	/* Handle dma interrupts */
    565 
    566 	NDTRACEIF(ndtrace_addc('D'));
    567 #ifdef ND_DEBUG
    568 	if (NEXTDMA_DEBUG) {
    569 		char sbuf[256];
    570 
    571 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    572 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    573 		printf("DMA interrupt ipl (%ld) intr(%s)\n",
    574 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    575 	}
    576 #endif
    577 
    578 #ifdef DIAGNOSTIC
    579 	if (stat->nd_map == NULL) {
    580 		nextdma_print(nsc);
    581 		panic("DMA missing current map in interrupt!");
    582 	}
    583 #endif
    584 
    585 	state = nd_bsr4(DD_CSR);
    586 
    587 #if defined(ND_DEBUG)
    588 	nextdma_debug_savestate(nsc, state);
    589 #endif
    590 
    591 #ifdef DIAGNOSTIC
    592 	if (/* (state & DMACSR_READ) || */ (state & DMACSR_COMPLETE) == 0) {
    593 		char sbuf[256];
    594 		nextdma_print(nsc);
    595 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    596 		printf("DMA: state %s\n",sbuf);
    597 		panic("DMA complete not set in interrupt");
    598 	}
    599 #endif
    600 
    601 	DPRINTF(("DMA: finishing xfer\n"));
    602 
    603 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    604 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
    605 
    606 	result = 0;
    607 	if ((state & DMACSR_ENABLE) != 0) {
    608 		/* enable bit was set */
    609 		result |= 0x01;
    610 	}
    611 	if ((state & DMACSR_SUPDATE) != 0) {
    612 		/* supdate bit was set */
    613 		result |= 0x02;
    614 	}
    615 	if (stat->nd_map_cont == NULL) {
    616 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    617 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
    618 		result |= 0x04;
    619 	}
    620 	if ((state & DMACSR_BUSEXC) != 0) {
    621 		/* bus exception bit was set */
    622 		result |= 0x08;
    623 	}
    624 	switch (result) {
    625 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    626 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    627 		if (turbo) {
    628 			volatile u_int *limit =
    629 			    (volatile u_int *)IIOV(0x2000050 + 0x4000);
    630 			slimit = *limit;
    631 		} else {
    632 			slimit = nd_bsr4(DD_SAVED_LIMIT);
    633 		}
    634 		break;
    635 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    636 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    637 		if (turbo) {
    638 			volatile u_int *limit =
    639 			    (volatile u_int *)IIOV(0x2000050 + 0x4000);
    640 			slimit = *limit;
    641 		} else {
    642 			slimit = nd_bsr4(DD_SAVED_LIMIT);
    643 		}
    644 		break;
    645 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    646 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    647 		slimit = nd_bsr4(DD_NEXT);
    648 		break;
    649 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    650 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    651 		slimit = nd_bsr4(DD_LIMIT);
    652 		break;
    653 	default:
    654 #ifdef DIAGNOSTIC
    655 	{
    656 		char sbuf[256];
    657 		printf("DMA: please send this output to"
    658 		    " port-next68k-maintainer (at) NetBSD.org:\n");
    659 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    660 		printf("DMA: state %s\n",sbuf);
    661 		nextdma_print(nsc);
    662 		panic("DMA: condition 0x%02x not yet documented to occur",
    663 		    result);
    664 	}
    665 #endif
    666 	slimit = olimit;
    667 	break;
    668 	}
    669 
    670 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    671 		slimit &= ~0x80000000;
    672 		slimit -= 15;
    673 	}
    674 
    675 #ifdef DIAGNOSTIC
    676 	if ((state & DMACSR_READ) != 0)
    677 		DPRINTF(("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n",
    678 		    onext, slimit, olimit,
    679 		    (state & DMACSR_READ) ? "read" : "write"));
    680 	if (slimit < onext || slimit > olimit) {
    681 		char sbuf[256];
    682 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    683 		printf("DMA: state %s\n",sbuf);
    684 		nextdma_print(nsc);
    685 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",
    686 		    slimit);
    687 	}
    688 #endif
    689 
    690 #ifdef DIAGNOSTIC
    691 	if ((state & DMACSR_ENABLE) != 0 &&
    692 	    stat->nd_idx + 1 != stat->nd_map->dm_nsegs) {
    693 		if (slimit != olimit) {
    694 			char sbuf[256];
    695 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    696 			printf("DMA: state %s\n",sbuf);
    697 			nextdma_print(nsc);
    698 			panic("DMA: short limit register (0x%08lx)"
    699 			    " w/o finishing map.", slimit);
    700 		}
    701 	}
    702 #endif
    703 
    704 #if (defined(ND_DEBUG))
    705 	if (NEXTDMA_DEBUG > 2)
    706 		nextdma_print(nsc);
    707 #endif
    708 
    709 	stat->nd_map->dm_xfer_len += slimit-onext;
    710 
    711 	/* If we've reached the end of the current map, then inform
    712 	 * that we've completed that map.
    713 	 */
    714 	if (stat->nd_idx + 1 == stat->nd_map->dm_nsegs) {
    715 		if (nsc->sc_conf.nd_completed_cb)
    716 			(*nsc->sc_conf.nd_completed_cb)(stat->nd_map,
    717 			    nsc->sc_conf.nd_cb_arg);
    718 	} else {
    719 		KASSERT(stat->nd_map == stat->nd_map_cont);
    720 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
    721 	}
    722 	stat->nd_map = 0;
    723 	stat->nd_idx = 0;
    724 
    725 #if (defined(ND_DEBUG))
    726 	if (NEXTDMA_DEBUG) {
    727 		char sbuf[256];
    728 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    729 		printf("CLNDMAP: dd->dd_csr          = %s\n", sbuf);
    730 	}
    731 #endif
    732 	if ((state & DMACSR_ENABLE) != 0) {
    733 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    734 
    735 		nextdma_rotate(nsc);
    736 		nextdma_setup_cont_regs(nsc);
    737 
    738 		if ((state & DMACSR_READ) != 0) {
    739 			dmadir = DMACSR_SETREAD;
    740 		} else {
    741 			dmadir = DMACSR_SETWRITE;
    742 		}
    743 
    744 		if (stat->nd_map_cont == NULL) {
    745 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    746 			nd_bsw4(DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
    747 			NDTRACEIF(ndtrace_addc('g'));
    748 		} else {
    749 			nd_bsw4(DD_CSR,
    750 			    DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    751 			NDTRACEIF(ndtrace_addc('G'));
    752 		}
    753 	} else {
    754 		DPRINTF(("DMA: a shutdown occurred\n"));
    755 		nd_bsw4(DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    756 
    757 		/* Cleanup more incomplete transfers */
    758 		/* cleanup continue map */
    759 		if (stat->nd_map_cont) {
    760 			DPRINTF(("DMA: shutting down with"
    761 			    " non null continue map\n"));
    762 			if (nsc->sc_conf.nd_completed_cb != NULL)
    763 				(*nsc->sc_conf.nd_completed_cb)(
    764 				    stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    765 
    766 			stat->nd_map_cont = 0;
    767 			stat->nd_idx_cont = 0;
    768 		}
    769 		if (nsc->sc_conf.nd_shutdown_cb != NULL)
    770 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    771 	}
    772 
    773 #ifdef ND_DEBUG
    774 	if (NEXTDMA_DEBUG) {
    775 		char sbuf[256];
    776 
    777 		snprintb(sbuf, sizeof(sbuf),
    778 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    779 		printf("DMA exiting interrupt ipl (%ld) intr(%s)\n",
    780 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    781 	}
    782 #endif
    783 
    784 	return 1;
    785 }
    786 #endif
    787 
    788 /*
    789  * Check to see if dma has finished for a channel */
    790 int
    791 nextdma_finished(struct nextdma_softc *nsc)
    792 {
    793 	int r;
    794 	int s;
    795 	struct nextdma_status *stat = &nsc->sc_stat;
    796 
    797 	s = spldma();
    798 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
    799 	splx(s);
    800 
    801 	return r;
    802 }
    803 
    804 void
    805 nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
    806 {
    807 	struct nextdma_status *stat = &nsc->sc_stat;
    808 
    809 	NDTRACEIF(ndtrace_addc('n'));
    810 #ifdef DIAGNOSTIC
    811 	if (!nextdma_finished(nsc)) {
    812 		char sbuf[256];
    813 
    814 		snprintb(sbuf, sizeof(sbuf),
    815 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    816 		panic("DMA trying to start before previous finished"
    817 		    " on intr(%s)", sbuf);
    818 	}
    819 #endif
    820 
    821 #ifdef ND_DEBUG
    822 	if (NEXTDMA_DEBUG) {
    823 		char sbuf[256];
    824 
    825 		snprintb(sbuf, sizeof(sbuf),
    826 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    827 		printf("DMA start (%ld) intr(%s)\n",
    828 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    829 	}
    830 #endif
    831 
    832 #ifdef DIAGNOSTIC
    833 	if (stat->nd_map != NULL) {
    834 		nextdma_print(nsc);
    835 		panic("DMA: nextdma_start() with non null map");
    836 	}
    837 	if (stat->nd_map_cont != NULL) {
    838 		nextdma_print(nsc);
    839 		panic("DMA: nextdma_start() with non null continue map");
    840 	}
    841 #endif
    842 
    843 #ifdef DIAGNOSTIC
    844 	if (dmadir != DMACSR_SETREAD && dmadir != DMACSR_SETWRITE) {
    845 		panic("DMA: nextdma_start(), dmadir arg must be"
    846 		    " DMACSR_SETREAD or DMACSR_SETWRITE");
    847 	}
    848 #endif
    849 
    850 #if defined(ND_DEBUG)
    851 	nextdma_debug_initstate(nsc);
    852 #endif
    853 
    854 	/* preload both the current and the continue maps */
    855 	nextdma_rotate(nsc);
    856 
    857 #ifdef DIAGNOSTIC
    858 	if (stat->nd_map_cont == NULL) {
    859 		panic("No map available in nextdma_start()");
    860 	}
    861 #endif
    862 
    863 	nextdma_rotate(nsc);
    864 
    865 #ifdef ND_DEBUG
    866 	if (NEXTDMA_DEBUG) {
    867 		char sbuf[256];
    868 
    869 		snprintb(sbuf, sizeof(sbuf),
    870 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    871 		printf("DMA initiating DMA %s of %d segments on intr(%s)\n",
    872 		    (dmadir == DMACSR_SETREAD ? "read" : "write"),
    873 		    stat->nd_map->dm_nsegs, sbuf);
    874 	}
    875 #endif
    876 
    877 	nd_bsw4(DD_CSR, (turbo ?
    878 	    DMACSR_INITBUFTURBO : DMACSR_INITBUF) | DMACSR_RESET | dmadir);
    879 	nd_bsw4(DD_CSR, 0);
    880 
    881 	nextdma_setup_curr_regs(nsc);
    882 	nextdma_setup_cont_regs(nsc);
    883 
    884 #if (defined(ND_DEBUG))
    885 	if (NEXTDMA_DEBUG > 2)
    886 		nextdma_print(nsc);
    887 #endif
    888 
    889 	if (stat->nd_map_cont == NULL) {
    890 		nd_bsw4(DD_CSR, DMACSR_SETENABLE | dmadir);
    891 	} else {
    892 		nd_bsw4(DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    893 	}
    894 }
    895 
    896 /* This routine is used for debugging */
    897 void
    898 nextdma_print(struct nextdma_softc *nsc)
    899 {
    900 	u_long dd_csr;
    901 	u_long dd_next;
    902 	u_long dd_next_initbuf;
    903 	u_long dd_limit;
    904 	u_long dd_start;
    905 	u_long dd_stop;
    906 	u_long dd_saved_next;
    907 	u_long dd_saved_limit;
    908 	u_long dd_saved_start;
    909 	u_long dd_saved_stop;
    910 	char sbuf[256];
    911 	struct nextdma_status *stat = &nsc->sc_stat;
    912 
    913 	/*
    914 	 * Read all of the registers before we print anything out,
    915 	 * in case something changes
    916 	 */
    917 	dd_csr          = nd_bsr4(DD_CSR);
    918 	dd_next         = nd_bsr4(DD_NEXT);
    919 	dd_next_initbuf = nd_bsr4(DD_NEXT_INITBUF);
    920 	dd_limit        = nd_bsr4(DD_LIMIT);
    921 	dd_start        = nd_bsr4(DD_START);
    922 	dd_stop         = nd_bsr4(DD_STOP);
    923 	dd_saved_next   = nd_bsr4(DD_SAVED_NEXT);
    924 	dd_saved_limit  = nd_bsr4(DD_SAVED_LIMIT);
    925 	dd_saved_start  = nd_bsr4(DD_SAVED_START);
    926 	dd_saved_stop   = nd_bsr4(DD_SAVED_STOP);
    927 
    928 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    929 	    *(volatile u_long *)IIOV(NEXT_P_INTRSTAT));
    930 	printf("NDMAP: *intrstat = %s\n", sbuf);
    931 
    932 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    933 	    *(volatile u_long *)IIOV(NEXT_P_INTRMASK));
    934 	printf("NDMAP: *intrmask = %s\n", sbuf);
    935 
    936 	/* NDMAP is Next DMA Print (really!) */
    937 
    938 	if (stat->nd_map != NULL) {
    939 		int i;
    940 
    941 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
    942 		    stat->nd_map->dm_mapsize);
    943 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
    944 		    stat->nd_map->dm_nsegs);
    945 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
    946 		    stat->nd_map->dm_xfer_len);
    947 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    948 		    stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
    949 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
    950 		    stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    951 
    952 		printf("NDMAP: Entire map;\n");
    953 		for(i = 0; i < stat->nd_map->dm_nsegs; i++) {
    954 			printf("NDMAP:   "
    955 			    "nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    956 			    i, stat->nd_map->dm_segs[i].ds_addr);
    957 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
    958 			    i, stat->nd_map->dm_segs[i].ds_len);
    959 		}
    960 	} else {
    961 		printf("NDMAP: nd_map = NULL\n");
    962 	}
    963 	if (stat->nd_map_cont != NULL) {
    964 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
    965 		    stat->nd_map_cont->dm_mapsize);
    966 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
    967 		    stat->nd_map_cont->dm_nsegs);
    968 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
    969 		    stat->nd_map_cont->dm_xfer_len);
    970 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    971 		    stat->nd_idx_cont,
    972 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
    973 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    974 		    stat->nd_idx_cont,
    975 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    976 		if (stat->nd_map_cont != stat->nd_map) {
    977 			int i;
    978 			printf("NDMAP: Entire map;\n");
    979 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
    980 				printf("NDMAP:   "
    981 				    "nd_map_cont->dm_segs[%d].ds_addr"
    982 				    " = 0x%08lx\n",
    983 				    i, stat->nd_map_cont->dm_segs[i].ds_addr);
    984 				printf("NDMAP:   "
    985 				    "nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    986 				    i, stat->nd_map_cont->dm_segs[i].ds_len);
    987 			}
    988 		}
    989 	} else {
    990 		printf("NDMAP: nd_map_cont = NULL\n");
    991 	}
    992 
    993 	snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, dd_csr);
    994 	printf("NDMAP: dd->dd_csr          = %s\n",   sbuf);
    995 
    996 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
    997 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
    998 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
    999 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
   1000 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
   1001 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
   1002 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
   1003 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
   1004 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
   1005 
   1006 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
   1007 	    NEXT_I_BIT(nsc->sc_chan->nd_intr));
   1008 	printf("NDMAP: interrupt ipl (%ld) intr(%s)\n",
   1009 	    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
   1010 }
   1011 
   1012 #if defined(ND_DEBUG)
   1013 void
   1014 nextdma_debug_initstate(struct nextdma_softc *nsc)
   1015 {
   1016 	switch(nsc->sc_chan->nd_intr) {
   1017 	case NEXT_I_ENETR_DMA:
   1018 		memset(nextdma_debug_enetr_state, 0,
   1019 		    sizeof(nextdma_debug_enetr_state));
   1020 		break;
   1021 	case NEXT_I_SCSI_DMA:
   1022 		memset(nextdma_debug_scsi_state, 0,
   1023 		    sizeof(nextdma_debug_scsi_state));
   1024 		break;
   1025 	}
   1026 }
   1027 
   1028 void
   1029 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
   1030 {
   1031 
   1032 	switch(nsc->sc_chan->nd_intr) {
   1033 	case NEXT_I_ENETR_DMA:
   1034 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
   1035 		nextdma_debug_enetr_idx %=
   1036 		    (sizeof(nextdma_debug_enetr_state) / sizeof(unsigned int));
   1037 		break;
   1038 	case NEXT_I_SCSI_DMA:
   1039 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
   1040 		nextdma_debug_scsi_idx %=
   1041 		    (sizeof(nextdma_debug_scsi_state) / sizeof(unsigned int));
   1042 		break;
   1043 	}
   1044 }
   1045 
   1046 void
   1047 nextdma_debug_enetr_dumpstate(void)
   1048 {
   1049 	int i;
   1050 	int s;
   1051 	s = spldma();
   1052 	i = nextdma_debug_enetr_idx;
   1053 	do {
   1054 		char sbuf[256];
   1055 		if (nextdma_debug_enetr_state[i]) {
   1056 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS,
   1057 			    nextdma_debug_enetr_state[i]);
   1058 			printf("DMA: 0x%02x state %s\n", i, sbuf);
   1059 		}
   1060 		i++;
   1061 		i %= (sizeof(nextdma_debug_enetr_state) / sizeof(unsigned int));
   1062 	} while (i != nextdma_debug_enetr_idx);
   1063 	splx(s);
   1064 }
   1065 
   1066 void
   1067 nextdma_debug_scsi_dumpstate(void)
   1068 {
   1069 	int i;
   1070 	int s;
   1071 	s = spldma();
   1072 	i = nextdma_debug_scsi_idx;
   1073 	do {
   1074 		char sbuf[256];
   1075 		if (nextdma_debug_scsi_state[i]) {
   1076 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS,
   1077 			    nextdma_debug_scsi_state[i]);
   1078 			printf("DMA: 0x%02x state %s\n", i, sbuf);
   1079 		}
   1080 		i++;
   1081 		i %= (sizeof(nextdma_debug_scsi_state) / sizeof(unsigned int));
   1082 	} while (i != nextdma_debug_scsi_idx);
   1083 	splx(s);
   1084 }
   1085 #endif
   1086