Home | History | Annotate | Line # | Download | only in dev
      1 /*	$NetBSD: nextdma.c,v 1.52 2023/12/20 00:40:44 thorpej Exp $	*/
      2 /*
      3  * Copyright (c) 1998 Darrin B. Jewell
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.52 2023/12/20 00:40:44 thorpej Exp $");
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/mbuf.h>
     33 #include <sys/syslog.h>
     34 #include <sys/socket.h>
     35 #include <sys/device.h>
     36 #include <sys/ioctl.h>
     37 #include <sys/errno.h>
     38 
     39 #define _M68K_BUS_DMA_PRIVATE
     40 #include <machine/autoconf.h>
     41 #include <machine/cpu.h>
     42 #include <machine/intr.h>
     43 
     44 #include <m68k/cacheops.h>
     45 
     46 #include <next68k/next68k/isr.h>
     47 #include <next68k/next68k/nextrom.h>
     48 
     49 #include <next68k/dev/intiovar.h>
     50 
     51 #include "nextdmareg.h"
     52 #include "nextdmavar.h"
     53 
     54 #include "esp.h"
     55 #include "xe.h"
     56 
     57 #if DEBUG
     58 #define ND_DEBUG
     59 #endif
     60 
     61 extern int turbo;
     62 
     63 #define panic		__asm volatile("trap  #15"); printf
     64 
     65 #define NEXTDMA_DEBUG nextdma_debug
     66 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
     67 #if defined(ND_DEBUG)
     68 int nextdma_debug = 0;
     69 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
     70 int ndtrace_show = 0;
     71 char ndtrace_buf[8192+100];
     72 size_t ndtrace_len = 0;
     73 #define NDTRACEIF(x) if (10) do {x;} while (0)
     74 #else
     75 #define DPRINTF(x)
     76 #define NDTRACEIF(x)
     77 #endif
     78 #define PRINTF(x) printf x
     79 
     80 void
     81 ndtrace_printf(const char *fmt, ...) {
     82 #ifdef ND_DEBUG
     83 	int len;
     84 	va_list ap;
     85 
     86 	va_start(ap, fmt);
     87 	len = vsnprintf(ndtrace_buf + ndtrace_len, sizeof(ndtrace_buf)
     88 	    - ndtrace_len, fmt, ap);
     89 	va_end(ap);
     90 	ndtrace_len += len;
     91 #endif
     92 }
     93 
     94 int
     95 ndtrace_empty(void) {
     96 #ifdef ND_DEBUG
     97 	return ndtrace_len == 0;
     98 #else
     99 	return 1;
    100 #endif
    101 }
    102 
    103 void
    104 ndtrace_reset(void) {
    105 #ifdef ND_DEBUG
    106 	ndtrace_len = 0;
    107 #endif
    108 }
    109 
    110 void
    111 ndtrace_addc(int c) {
    112 #ifdef ND_DEBUG
    113 	if (ndtrace_len < sizeof(ndtrace_buf) - 1) {
    114 		ndtrace_buf[ndtrace_len++] = c;
    115 		ndtrace_buf[ndtrace_len] = '\0';
    116 	}
    117 #endif
    118 }
    119 
    120 const char *
    121 ndtrace_get(void) {
    122 #ifdef ND_DEBUG
    123 	return ndtrace_buf;
    124 #else
    125 	return NULL;
    126 #endif
    127 }
    128 
    129 
    130 #if defined(ND_DEBUG)
    131 int nextdma_debug_enetr_idx = 0;
    132 unsigned int nextdma_debug_enetr_state[100] = { 0 };
    133 int nextdma_debug_scsi_idx = 0;
    134 unsigned int nextdma_debug_scsi_state[100] = { 0 };
    135 
    136 void nextdma_debug_initstate(struct nextdma_softc *);
    137 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
    138 void nextdma_debug_scsi_dumpstate(void);
    139 void nextdma_debug_enetr_dumpstate(void);
    140 #endif
    141 
    142 
    143 int	nextdma_match(device_t, cfdata_t, void *);
    144 void	nextdma_attach(device_t, device_t, void *);
    145 
    146 void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
    147 int nextdma_continue(struct nextdma_softc *);
    148 void nextdma_rotate(struct nextdma_softc *);
    149 
    150 void nextdma_setup_cont_regs(struct nextdma_softc *);
    151 void nextdma_setup_curr_regs(struct nextdma_softc *);
    152 
    153 #if NESP > 0
    154 static int nextdma_esp_intr(void *);
    155 #endif
    156 #if NXE > 0
    157 static int nextdma_enet_intr(void *);
    158 #endif
    159 
    160 #define nd_bsr4(reg) \
    161 	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
    162 #define nd_bsw4(reg,val) \
    163 	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
    164 
    165 CFATTACH_DECL_NEW(nextdma, sizeof(struct nextdma_softc),
    166     nextdma_match, nextdma_attach, NULL, NULL);
    167 
    168 static struct nextdma_channel nextdma_channel[] = {
    169 #if NESP > 0
    170 	{
    171 		"scsi",
    172 		NEXT_P_SCSI_CSR,
    173 		DD_SIZE,
    174 		NEXT_I_SCSI_DMA,
    175 		&nextdma_esp_intr
    176 	},
    177 #endif
    178 #if NXE > 0
    179 	{
    180 		"enetx",
    181 		NEXT_P_ENETX_CSR,
    182 		DD_SIZE,
    183 		NEXT_I_ENETX_DMA,
    184 		&nextdma_enet_intr
    185 	},
    186 	{
    187 		"enetr",
    188 		NEXT_P_ENETR_CSR,
    189 		DD_SIZE,
    190 		NEXT_I_ENETR_DMA,
    191 		&nextdma_enet_intr
    192 	},
    193 #endif
    194 };
    195 static int nnextdma_channels = __arraycount(nextdma_channel);
    196 
    197 static int attached = 0;
    198 
    199 struct nextdma_softc *
    200 nextdma_findchannel(const char *name)
    201 {
    202 	device_t dev;
    203 	deviter_t di;
    204 
    205 	for (dev = deviter_first(&di, DEVITER_F_ROOT_FIRST);
    206 	     dev != NULL;
    207 	     dev = deviter_next(&di)) {
    208 		if (strncmp(device_xname(dev), "nextdma", 7) == 0) {
    209 			struct nextdma_softc *nsc = device_private(dev);
    210 			if (strcmp(nsc->sc_chan->nd_name, name) == 0)
    211 				break;
    212 		}
    213 	}
    214 	deviter_release(&di);
    215 	if (dev == NULL)
    216 		return NULL;
    217 	return device_private(dev);
    218 }
    219 
    220 int
    221 nextdma_match(device_t parent, cfdata_t match, void *aux)
    222 {
    223 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    224 
    225 	if (attached >= nnextdma_channels)
    226 		return 0;
    227 
    228 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
    229 
    230 	return 1;
    231 }
    232 
    233 void
    234 nextdma_attach(device_t parent, device_t self, void *aux)
    235 {
    236 	struct nextdma_softc *nsc = device_private(self);
    237 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
    238 
    239 	if (attached >= nnextdma_channels)
    240 		return;
    241 
    242 	nsc->sc_dev = self;
    243 	nsc->sc_chan = &nextdma_channel[attached];
    244 
    245 	nsc->sc_dmat = ia->ia_dmat;
    246 	nsc->sc_bst = ia->ia_bst;
    247 
    248 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
    249 	    nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
    250 		panic("%s: can't map DMA registers for channel %s",
    251 		    device_xname(self), nsc->sc_chan->nd_name);
    252 	}
    253 
    254 	nextdma_init(nsc);
    255 
    256 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
    257 	    NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
    258 	INTR_ENABLE(nsc->sc_chan->nd_intr);
    259 
    260 	printf(": channel %d (%s)\n", attached,
    261 		nsc->sc_chan->nd_name);
    262 	attached++;
    263 }
    264 
    265 void
    266 nextdma_init(struct nextdma_softc *nsc)
    267 {
    268 #ifdef ND_DEBUG
    269 	if (NEXTDMA_DEBUG) {
    270 		char sbuf[256];
    271 
    272 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    273 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    274 		printf("DMA init ipl (%ld) intr(%s)\n",
    275 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    276 	}
    277 #endif
    278 
    279 	nsc->sc_stat.nd_map = NULL;
    280 	nsc->sc_stat.nd_idx = 0;
    281 	nsc->sc_stat.nd_map_cont = NULL;
    282 	nsc->sc_stat.nd_idx_cont = 0;
    283 	nsc->sc_stat.nd_exception = 0;
    284 
    285 	nd_bsw4(DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
    286 	nd_bsw4(DD_CSR, 0);
    287 
    288 #if 01
    289 	nextdma_setup_curr_regs(nsc);
    290 	nextdma_setup_cont_regs(nsc);
    291 #endif
    292 
    293 #if defined(DIAGNOSTIC)
    294 	{
    295 		u_long state;
    296 		state = nd_bsr4 (DD_CSR);
    297 
    298 #if 1
    299 		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
    300 		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
    301 		 * Darrin B. Jewell <jewell (at) mit.edu>  Mon May 25 07:53:05 1998
    302 		 */
    303 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
    304 #else
    305 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
    306 			  DMACSR_SUPDATE | DMACSR_ENABLE);
    307 #endif
    308 		if (state != 0) {
    309 			nextdma_print(nsc);
    310 			panic("DMA did not reset");
    311 		}
    312 	}
    313 #endif
    314 }
    315 
    316 void
    317 nextdma_reset(struct nextdma_softc *nsc)
    318 {
    319 	int s;
    320 	struct nextdma_status *stat = &nsc->sc_stat;
    321 
    322 	s = spldma();
    323 
    324 	DPRINTF(("DMA reset\n"));
    325 
    326 #if (defined(ND_DEBUG))
    327 	if (NEXTDMA_DEBUG > 1)
    328 		nextdma_print(nsc);
    329 #endif
    330 
    331 	nd_bsw4(DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    332 	if ((stat->nd_map) || (stat->nd_map_cont)) {
    333 		if (stat->nd_map_cont) {
    334 			DPRINTF(
    335 			    ("DMA: resetting with non null continue map\n"));
    336 			if (nsc->sc_conf.nd_completed_cb)
    337 				(*nsc->sc_conf.nd_completed_cb)(
    338 				    stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    339 
    340 			stat->nd_map_cont = 0;
    341 			stat->nd_idx_cont = 0;
    342 		}
    343 		if (nsc->sc_conf.nd_shutdown_cb)
    344 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    345 		stat->nd_map = 0;
    346 		stat->nd_idx = 0;
    347 	}
    348 
    349 	splx(s);
    350 }
    351 
    352 /****************************************************************/
    353 
    354 
    355 /*
    356  * Call the completed and continue callbacks to try to fill
    357  * in the dma continue buffers.
    358  */
    359 void
    360 nextdma_rotate(struct nextdma_softc *nsc)
    361 {
    362 	struct nextdma_status *stat = &nsc->sc_stat;
    363 
    364 	NDTRACEIF(ndtrace_addc('r'));
    365 	DPRINTF(("DMA nextdma_rotate()\n"));
    366 
    367 	/* Rotate the continue map into the current map */
    368 	stat->nd_map = stat->nd_map_cont;
    369 	stat->nd_idx = stat->nd_idx_cont;
    370 
    371 	if ((stat->nd_map_cont == NULL) ||
    372 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
    373 		if (nsc->sc_conf.nd_continue_cb != NULL) {
    374 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
    375 				(nsc->sc_conf.nd_cb_arg);
    376 			if (stat->nd_map_cont != NULL) {
    377 				stat->nd_map_cont->dm_xfer_len = 0;
    378 			}
    379 		} else {
    380 			stat->nd_map_cont = 0;
    381 		}
    382 		stat->nd_idx_cont = 0;
    383 	}
    384 
    385 #if defined(DIAGNOSTIC) && 0
    386 	if (stat->nd_map_cont) {
    387 		if (!DMA_BEGINALIGNED(
    388 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
    389 			nextdma_print(nsc);
    390 			panic("DMA request unaligned at start");
    391 		}
    392 		if (!DMA_ENDALIGNED(
    393 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    394 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
    395 			nextdma_print(nsc);
    396 			panic("DMA request unaligned at end");
    397 		}
    398 	}
    399 #endif
    400 
    401 }
    402 
    403 void
    404 nextdma_setup_curr_regs(struct nextdma_softc *nsc)
    405 {
    406 	bus_addr_t dd_next;
    407 	bus_addr_t dd_limit;
    408 	bus_addr_t dd_saved_next;
    409 	bus_addr_t dd_saved_limit;
    410 	struct nextdma_status *stat = &nsc->sc_stat;
    411 
    412 	NDTRACEIF(ndtrace_addc('C'));
    413 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
    414 
    415 	if (stat->nd_map != NULL) {
    416 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    417 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
    418 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    419 
    420 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    421 			/* Ethernet transmit needs secret magic */
    422 			dd_limit |= 0x80000000;
    423 			dd_limit += 15;
    424 		}
    425 	} else {
    426 		dd_next = turbo ? 0 : 0xdeadbeef;
    427 		dd_limit = turbo ? 0 : 0xdeadbeef;
    428 	}
    429 
    430 	dd_saved_next = dd_next;
    431 	dd_saved_limit = dd_limit;
    432 
    433 	NDTRACEIF(if (stat->nd_map) {
    434 		ndtrace_printf("%ld",
    435 		    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    436 	});
    437 
    438 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
    439 		nd_bsw4(DD_NEXT_INITBUF, dd_next);
    440 	} else {
    441 		nd_bsw4(DD_NEXT, dd_next);
    442 	}
    443 	nd_bsw4(DD_LIMIT, dd_limit);
    444 	if (!turbo)
    445 		nd_bsw4(DD_SAVED_NEXT, dd_saved_next);
    446 	if (!turbo)
    447 		nd_bsw4(DD_SAVED_LIMIT, dd_saved_limit);
    448 
    449 #ifdef DIAGNOSTIC
    450 	if ((nd_bsr4(DD_NEXT_INITBUF) != dd_next)
    451 	    || (nd_bsr4(DD_NEXT) != dd_next)
    452 	    || (nd_bsr4(DD_LIMIT) != dd_limit)
    453 	    || (!turbo && (nd_bsr4(DD_SAVED_NEXT) != dd_saved_next))
    454 	    || (!turbo && (nd_bsr4(DD_SAVED_LIMIT) != dd_saved_limit))
    455 		) {
    456 		nextdma_print(nsc);
    457 		panic("DMA failure writing to current regs");
    458 	}
    459 #endif
    460 }
    461 
    462 void
    463 nextdma_setup_cont_regs(struct nextdma_softc *nsc)
    464 {
    465 	bus_addr_t dd_start;
    466 	bus_addr_t dd_stop;
    467 	bus_addr_t dd_saved_start;
    468 	bus_addr_t dd_saved_stop;
    469 	struct nextdma_status *stat = &nsc->sc_stat;
    470 
    471 	NDTRACEIF(ndtrace_addc('c'));
    472 	DPRINTF(("DMA nextdma_setup_regs()\n"));
    473 
    474 	if (stat->nd_map_cont != NULL) {
    475 		dd_start =
    476 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
    477 		dd_stop  =
    478 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
    479 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len;
    480 
    481 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    482 			/* Ethernet transmit needs secret magic */
    483 			dd_stop |= 0x80000000;
    484 			dd_stop += 15;
    485 		}
    486 	} else {
    487 		dd_start = turbo ? nd_bsr4(DD_NEXT) : 0xdeadbee0;
    488 		dd_stop = turbo ? 0 : 0xdeadbee0;
    489 	}
    490 
    491 	dd_saved_start = dd_start;
    492 	dd_saved_stop  = dd_stop;
    493 
    494 	NDTRACEIF(if (stat->nd_map_cont != NULL) {
    495 		ndtrace_printf("%ld",
    496 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    497 	});
    498 
    499 	nd_bsw4(DD_START, dd_start);
    500 	nd_bsw4(DD_STOP, dd_stop);
    501 	if (!turbo)
    502 		nd_bsw4(DD_SAVED_START, dd_saved_start);
    503 	if (!turbo)
    504 		nd_bsw4(DD_SAVED_STOP, dd_saved_stop);
    505 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
    506 		nd_bsw4(DD_STOP - 0x40, dd_start);
    507 
    508 #ifdef DIAGNOSTIC
    509 	if ((nd_bsr4(DD_START) != dd_start)
    510 	    || (dd_stop && (nd_bsr4(DD_STOP) != dd_stop))
    511 	    || (!turbo && (nd_bsr4(DD_SAVED_START) != dd_saved_start))
    512 	    || (!turbo && (nd_bsr4(DD_SAVED_STOP) != dd_saved_stop))
    513 		) {
    514 		nextdma_print(nsc);
    515 		panic("DMA failure writing to continue regs");
    516 	}
    517 #endif
    518 }
    519 
    520 /****************************************************************/
    521 
    522 #if NESP > 0
    523 static int
    524 nextdma_esp_intr(void *arg)
    525 {
    526 	/* @@@ This is bogus, we can't be certain of arg's type
    527 	 * unless the interrupt is for us.  For now we successfully
    528 	 * cheat because DMA interrupts are the only things invoked
    529 	 * at this interrupt level.
    530 	 */
    531 	struct nextdma_softc *nsc = arg;
    532 	int esp_dma_int(void *); /* XXX */
    533 
    534 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    535 		return 0;
    536 	/* Handle dma interrupts */
    537 
    538 	return esp_dma_int(nsc->sc_conf.nd_cb_arg);
    539 }
    540 #endif
    541 
    542 #if NXE > 0
    543 static int
    544 nextdma_enet_intr(void *arg)
    545 {
    546 
    547 	/*
    548 	 * @@@ This is bogus, we can't be certain of arg's type
    549 	 * unless the interrupt is for us.  For now we successfully
    550 	 * cheat because DMA interrupts are the only things invoked
    551 	 * at this interrupt level.
    552 	 */
    553 	struct nextdma_softc *nsc = arg;
    554 	unsigned int state;
    555 	bus_addr_t onext;
    556 	bus_addr_t olimit;
    557 	bus_addr_t slimit;
    558 	int result;
    559 	struct nextdma_status *stat = &nsc->sc_stat;
    560 
    561 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
    562 		return 0;
    563 	/* Handle dma interrupts */
    564 
    565 	NDTRACEIF(ndtrace_addc('D'));
    566 #ifdef ND_DEBUG
    567 	if (NEXTDMA_DEBUG) {
    568 		char sbuf[256];
    569 
    570 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    571 		    NEXT_I_BIT(nsc->sc_chan->nd_intr));
    572 		printf("DMA interrupt ipl (%ld) intr(%s)\n",
    573 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    574 	}
    575 #endif
    576 
    577 #ifdef DIAGNOSTIC
    578 	if (stat->nd_map == NULL) {
    579 		nextdma_print(nsc);
    580 		panic("DMA missing current map in interrupt!");
    581 	}
    582 #endif
    583 
    584 	state = nd_bsr4(DD_CSR);
    585 
    586 #if defined(ND_DEBUG)
    587 	nextdma_debug_savestate(nsc, state);
    588 #endif
    589 
    590 #ifdef DIAGNOSTIC
    591 	if (/* (state & DMACSR_READ) || */ (state & DMACSR_COMPLETE) == 0) {
    592 		char sbuf[256];
    593 		nextdma_print(nsc);
    594 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    595 		printf("DMA: state %s\n",sbuf);
    596 		panic("DMA complete not set in interrupt");
    597 	}
    598 #endif
    599 
    600 	DPRINTF(("DMA: finishing xfer\n"));
    601 
    602 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
    603 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
    604 
    605 	result = 0;
    606 	if ((state & DMACSR_ENABLE) != 0) {
    607 		/* enable bit was set */
    608 		result |= 0x01;
    609 	}
    610 	if ((state & DMACSR_SUPDATE) != 0) {
    611 		/* supdate bit was set */
    612 		result |= 0x02;
    613 	}
    614 	if (stat->nd_map_cont == NULL) {
    615 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    616 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
    617 		result |= 0x04;
    618 	}
    619 	if ((state & DMACSR_BUSEXC) != 0) {
    620 		/* bus exception bit was set */
    621 		result |= 0x08;
    622 	}
    623 	switch (result) {
    624 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
    625 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
    626 		if (turbo) {
    627 			volatile u_int *limit =
    628 			    (volatile u_int *)IIOV(0x2000050 + 0x4000);
    629 			slimit = *limit;
    630 		} else {
    631 			slimit = nd_bsr4(DD_SAVED_LIMIT);
    632 		}
    633 		break;
    634 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
    635 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
    636 		if (turbo) {
    637 			volatile u_int *limit =
    638 			    (volatile u_int *)IIOV(0x2000050 + 0x4000);
    639 			slimit = *limit;
    640 		} else {
    641 			slimit = nd_bsr4(DD_SAVED_LIMIT);
    642 		}
    643 		break;
    644 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
    645 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
    646 		slimit = nd_bsr4(DD_NEXT);
    647 		break;
    648 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
    649 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
    650 		slimit = nd_bsr4(DD_LIMIT);
    651 		break;
    652 	default:
    653 #ifdef DIAGNOSTIC
    654 	{
    655 		char sbuf[256];
    656 		printf("DMA: please send this output to"
    657 		    " port-next68k-maintainer (at) NetBSD.org:\n");
    658 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    659 		printf("DMA: state %s\n",sbuf);
    660 		nextdma_print(nsc);
    661 		panic("DMA: condition 0x%02x not yet documented to occur",
    662 		    result);
    663 	}
    664 #endif
    665 	slimit = olimit;
    666 	break;
    667 	}
    668 
    669 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
    670 		slimit &= ~0x80000000;
    671 		slimit -= 15;
    672 	}
    673 
    674 #ifdef DIAGNOSTIC
    675 	if ((state & DMACSR_READ) != 0)
    676 		DPRINTF(("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n",
    677 		    onext, slimit, olimit,
    678 		    (state & DMACSR_READ) ? "read" : "write"));
    679 	if (slimit < onext || slimit > olimit) {
    680 		char sbuf[256];
    681 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    682 		printf("DMA: state %s\n",sbuf);
    683 		nextdma_print(nsc);
    684 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",
    685 		    slimit);
    686 	}
    687 #endif
    688 
    689 #ifdef DIAGNOSTIC
    690 	if ((state & DMACSR_ENABLE) != 0 &&
    691 	    stat->nd_idx + 1 != stat->nd_map->dm_nsegs) {
    692 		if (slimit != olimit) {
    693 			char sbuf[256];
    694 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    695 			printf("DMA: state %s\n",sbuf);
    696 			nextdma_print(nsc);
    697 			panic("DMA: short limit register (0x%08lx)"
    698 			    " w/o finishing map.", slimit);
    699 		}
    700 	}
    701 #endif
    702 
    703 #if (defined(ND_DEBUG))
    704 	if (NEXTDMA_DEBUG > 2)
    705 		nextdma_print(nsc);
    706 #endif
    707 
    708 	stat->nd_map->dm_xfer_len += slimit-onext;
    709 
    710 	/* If we've reached the end of the current map, then inform
    711 	 * that we've completed that map.
    712 	 */
    713 	if (stat->nd_idx + 1 == stat->nd_map->dm_nsegs) {
    714 		if (nsc->sc_conf.nd_completed_cb)
    715 			(*nsc->sc_conf.nd_completed_cb)(stat->nd_map,
    716 			    nsc->sc_conf.nd_cb_arg);
    717 	} else {
    718 		KASSERT(stat->nd_map == stat->nd_map_cont);
    719 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
    720 	}
    721 	stat->nd_map = 0;
    722 	stat->nd_idx = 0;
    723 
    724 #if (defined(ND_DEBUG))
    725 	if (NEXTDMA_DEBUG) {
    726 		char sbuf[256];
    727 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
    728 		printf("CLNDMAP: dd->dd_csr          = %s\n", sbuf);
    729 	}
    730 #endif
    731 	if ((state & DMACSR_ENABLE) != 0) {
    732 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
    733 
    734 		nextdma_rotate(nsc);
    735 		nextdma_setup_cont_regs(nsc);
    736 
    737 		if ((state & DMACSR_READ) != 0) {
    738 			dmadir = DMACSR_SETREAD;
    739 		} else {
    740 			dmadir = DMACSR_SETWRITE;
    741 		}
    742 
    743 		if (stat->nd_map_cont == NULL) {
    744 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
    745 			nd_bsw4(DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
    746 			NDTRACEIF(ndtrace_addc('g'));
    747 		} else {
    748 			nd_bsw4(DD_CSR,
    749 			    DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
    750 			NDTRACEIF(ndtrace_addc('G'));
    751 		}
    752 	} else {
    753 		DPRINTF(("DMA: a shutdown occurred\n"));
    754 		nd_bsw4(DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
    755 
    756 		/* Cleanup more incomplete transfers */
    757 		/* cleanup continue map */
    758 		if (stat->nd_map_cont) {
    759 			DPRINTF(("DMA: shutting down with"
    760 			    " non null continue map\n"));
    761 			if (nsc->sc_conf.nd_completed_cb != NULL)
    762 				(*nsc->sc_conf.nd_completed_cb)(
    763 				    stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
    764 
    765 			stat->nd_map_cont = 0;
    766 			stat->nd_idx_cont = 0;
    767 		}
    768 		if (nsc->sc_conf.nd_shutdown_cb != NULL)
    769 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
    770 	}
    771 
    772 #ifdef ND_DEBUG
    773 	if (NEXTDMA_DEBUG) {
    774 		char sbuf[256];
    775 
    776 		snprintb(sbuf, sizeof(sbuf),
    777 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    778 		printf("DMA exiting interrupt ipl (%ld) intr(%s)\n",
    779 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    780 	}
    781 #endif
    782 
    783 	return 1;
    784 }
    785 #endif
    786 
    787 /*
    788  * Check to see if dma has finished for a channel */
    789 int
    790 nextdma_finished(struct nextdma_softc *nsc)
    791 {
    792 	int r;
    793 	int s;
    794 	struct nextdma_status *stat = &nsc->sc_stat;
    795 
    796 	s = spldma();
    797 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
    798 	splx(s);
    799 
    800 	return r;
    801 }
    802 
    803 void
    804 nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
    805 {
    806 	struct nextdma_status *stat = &nsc->sc_stat;
    807 
    808 	NDTRACEIF(ndtrace_addc('n'));
    809 #ifdef DIAGNOSTIC
    810 	if (!nextdma_finished(nsc)) {
    811 		char sbuf[256];
    812 
    813 		snprintb(sbuf, sizeof(sbuf),
    814 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    815 		panic("DMA trying to start before previous finished"
    816 		    " on intr(%s)", sbuf);
    817 	}
    818 #endif
    819 
    820 #ifdef ND_DEBUG
    821 	if (NEXTDMA_DEBUG) {
    822 		char sbuf[256];
    823 
    824 		snprintb(sbuf, sizeof(sbuf),
    825 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    826 		printf("DMA start (%ld) intr(%s)\n",
    827 		    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
    828 	}
    829 #endif
    830 
    831 #ifdef DIAGNOSTIC
    832 	if (stat->nd_map != NULL) {
    833 		nextdma_print(nsc);
    834 		panic("DMA: nextdma_start() with non null map");
    835 	}
    836 	if (stat->nd_map_cont != NULL) {
    837 		nextdma_print(nsc);
    838 		panic("DMA: nextdma_start() with non null continue map");
    839 	}
    840 #endif
    841 
    842 #ifdef DIAGNOSTIC
    843 	if (dmadir != DMACSR_SETREAD && dmadir != DMACSR_SETWRITE) {
    844 		panic("DMA: nextdma_start(), dmadir arg must be"
    845 		    " DMACSR_SETREAD or DMACSR_SETWRITE");
    846 	}
    847 #endif
    848 
    849 #if defined(ND_DEBUG)
    850 	nextdma_debug_initstate(nsc);
    851 #endif
    852 
    853 	/* preload both the current and the continue maps */
    854 	nextdma_rotate(nsc);
    855 
    856 #ifdef DIAGNOSTIC
    857 	if (stat->nd_map_cont == NULL) {
    858 		panic("No map available in nextdma_start()");
    859 	}
    860 #endif
    861 
    862 	nextdma_rotate(nsc);
    863 
    864 #ifdef ND_DEBUG
    865 	if (NEXTDMA_DEBUG) {
    866 		char sbuf[256];
    867 
    868 		snprintb(sbuf, sizeof(sbuf),
    869 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
    870 		printf("DMA initiating DMA %s of %d segments on intr(%s)\n",
    871 		    (dmadir == DMACSR_SETREAD ? "read" : "write"),
    872 		    stat->nd_map->dm_nsegs, sbuf);
    873 	}
    874 #endif
    875 
    876 	nd_bsw4(DD_CSR, (turbo ?
    877 	    DMACSR_INITBUFTURBO : DMACSR_INITBUF) | DMACSR_RESET | dmadir);
    878 	nd_bsw4(DD_CSR, 0);
    879 
    880 	nextdma_setup_curr_regs(nsc);
    881 	nextdma_setup_cont_regs(nsc);
    882 
    883 #if (defined(ND_DEBUG))
    884 	if (NEXTDMA_DEBUG > 2)
    885 		nextdma_print(nsc);
    886 #endif
    887 
    888 	if (stat->nd_map_cont == NULL) {
    889 		nd_bsw4(DD_CSR, DMACSR_SETENABLE | dmadir);
    890 	} else {
    891 		nd_bsw4(DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
    892 	}
    893 }
    894 
    895 /* This routine is used for debugging */
    896 void
    897 nextdma_print(struct nextdma_softc *nsc)
    898 {
    899 	u_long dd_csr;
    900 	u_long dd_next;
    901 	u_long dd_next_initbuf;
    902 	u_long dd_limit;
    903 	u_long dd_start;
    904 	u_long dd_stop;
    905 	u_long dd_saved_next;
    906 	u_long dd_saved_limit;
    907 	u_long dd_saved_start;
    908 	u_long dd_saved_stop;
    909 	char sbuf[256];
    910 	struct nextdma_status *stat = &nsc->sc_stat;
    911 
    912 	/*
    913 	 * Read all of the registers before we print anything out,
    914 	 * in case something changes
    915 	 */
    916 	dd_csr          = nd_bsr4(DD_CSR);
    917 	dd_next         = nd_bsr4(DD_NEXT);
    918 	dd_next_initbuf = nd_bsr4(DD_NEXT_INITBUF);
    919 	dd_limit        = nd_bsr4(DD_LIMIT);
    920 	dd_start        = nd_bsr4(DD_START);
    921 	dd_stop         = nd_bsr4(DD_STOP);
    922 	dd_saved_next   = nd_bsr4(DD_SAVED_NEXT);
    923 	dd_saved_limit  = nd_bsr4(DD_SAVED_LIMIT);
    924 	dd_saved_start  = nd_bsr4(DD_SAVED_START);
    925 	dd_saved_stop   = nd_bsr4(DD_SAVED_STOP);
    926 
    927 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    928 	    *(volatile u_long *)IIOV(NEXT_P_INTRSTAT));
    929 	printf("NDMAP: *intrstat = %s\n", sbuf);
    930 
    931 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
    932 	    *(volatile u_long *)IIOV(NEXT_P_INTRMASK));
    933 	printf("NDMAP: *intrmask = %s\n", sbuf);
    934 
    935 	/* NDMAP is Next DMA Print (really!) */
    936 
    937 	if (stat->nd_map != NULL) {
    938 		int i;
    939 
    940 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
    941 		    stat->nd_map->dm_mapsize);
    942 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
    943 		    stat->nd_map->dm_nsegs);
    944 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
    945 		    stat->nd_map->dm_xfer_len);
    946 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    947 		    stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
    948 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
    949 		    stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
    950 
    951 		printf("NDMAP: Entire map;\n");
    952 		for(i = 0; i < stat->nd_map->dm_nsegs; i++) {
    953 			printf("NDMAP:   "
    954 			    "nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
    955 			    i, stat->nd_map->dm_segs[i].ds_addr);
    956 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
    957 			    i, stat->nd_map->dm_segs[i].ds_len);
    958 		}
    959 	} else {
    960 		printf("NDMAP: nd_map = NULL\n");
    961 	}
    962 	if (stat->nd_map_cont != NULL) {
    963 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
    964 		    stat->nd_map_cont->dm_mapsize);
    965 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
    966 		    stat->nd_map_cont->dm_nsegs);
    967 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
    968 		    stat->nd_map_cont->dm_xfer_len);
    969 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
    970 		    stat->nd_idx_cont,
    971 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
    972 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    973 		    stat->nd_idx_cont,
    974 		    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
    975 		if (stat->nd_map_cont != stat->nd_map) {
    976 			int i;
    977 			printf("NDMAP: Entire map;\n");
    978 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
    979 				printf("NDMAP:   "
    980 				    "nd_map_cont->dm_segs[%d].ds_addr"
    981 				    " = 0x%08lx\n",
    982 				    i, stat->nd_map_cont->dm_segs[i].ds_addr);
    983 				printf("NDMAP:   "
    984 				    "nd_map_cont->dm_segs[%d].ds_len = %ld\n",
    985 				    i, stat->nd_map_cont->dm_segs[i].ds_len);
    986 			}
    987 		}
    988 	} else {
    989 		printf("NDMAP: nd_map_cont = NULL\n");
    990 	}
    991 
    992 	snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, dd_csr);
    993 	printf("NDMAP: dd->dd_csr          = %s\n",   sbuf);
    994 
    995 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
    996 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
    997 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
    998 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
    999 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
   1000 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
   1001 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
   1002 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
   1003 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
   1004 
   1005 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
   1006 	    NEXT_I_BIT(nsc->sc_chan->nd_intr));
   1007 	printf("NDMAP: interrupt ipl (%ld) intr(%s)\n",
   1008 	    NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
   1009 }
   1010 
   1011 #if defined(ND_DEBUG)
   1012 void
   1013 nextdma_debug_initstate(struct nextdma_softc *nsc)
   1014 {
   1015 	switch(nsc->sc_chan->nd_intr) {
   1016 	case NEXT_I_ENETR_DMA:
   1017 		memset(nextdma_debug_enetr_state, 0,
   1018 		    sizeof(nextdma_debug_enetr_state));
   1019 		break;
   1020 	case NEXT_I_SCSI_DMA:
   1021 		memset(nextdma_debug_scsi_state, 0,
   1022 		    sizeof(nextdma_debug_scsi_state));
   1023 		break;
   1024 	}
   1025 }
   1026 
   1027 void
   1028 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
   1029 {
   1030 
   1031 	switch(nsc->sc_chan->nd_intr) {
   1032 	case NEXT_I_ENETR_DMA:
   1033 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
   1034 		nextdma_debug_enetr_idx %=
   1035 		    (sizeof(nextdma_debug_enetr_state) / sizeof(unsigned int));
   1036 		break;
   1037 	case NEXT_I_SCSI_DMA:
   1038 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
   1039 		nextdma_debug_scsi_idx %=
   1040 		    (sizeof(nextdma_debug_scsi_state) / sizeof(unsigned int));
   1041 		break;
   1042 	}
   1043 }
   1044 
   1045 void
   1046 nextdma_debug_enetr_dumpstate(void)
   1047 {
   1048 	int i;
   1049 	int s;
   1050 	s = spldma();
   1051 	i = nextdma_debug_enetr_idx;
   1052 	do {
   1053 		char sbuf[256];
   1054 		if (nextdma_debug_enetr_state[i]) {
   1055 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS,
   1056 			    nextdma_debug_enetr_state[i]);
   1057 			printf("DMA: 0x%02x state %s\n", i, sbuf);
   1058 		}
   1059 		i++;
   1060 		i %= (sizeof(nextdma_debug_enetr_state) / sizeof(unsigned int));
   1061 	} while (i != nextdma_debug_enetr_idx);
   1062 	splx(s);
   1063 }
   1064 
   1065 void
   1066 nextdma_debug_scsi_dumpstate(void)
   1067 {
   1068 	int i;
   1069 	int s;
   1070 	s = spldma();
   1071 	i = nextdma_debug_scsi_idx;
   1072 	do {
   1073 		char sbuf[256];
   1074 		if (nextdma_debug_scsi_state[i]) {
   1075 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS,
   1076 			    nextdma_debug_scsi_state[i]);
   1077 			printf("DMA: 0x%02x state %s\n", i, sbuf);
   1078 		}
   1079 		i++;
   1080 		i %= (sizeof(nextdma_debug_scsi_state) / sizeof(unsigned int));
   1081 	} while (i != nextdma_debug_scsi_idx);
   1082 	splx(s);
   1083 }
   1084 #endif
   1085