Home | History | Annotate | Line # | Download | only in dev
      1 /*	$NetBSD: si_sebuf.c,v 1.33 2024/12/20 23:52:00 tsutsui Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Gordon W. Ross.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Sun3/E SCSI driver (machine-dependent portion).
     34  * The machine-independent parts are in ncr5380sbc.c
     35  *
     36  * XXX - Mostly from the si driver.  Merge?
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.33 2024/12/20 23:52:00 tsutsui Exp $");
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/errno.h>
     45 #include <sys/kernel.h>
     46 #include <sys/kmem.h>
     47 #include <sys/device.h>
     48 #include <sys/buf.h>
     49 #include <sys/proc.h>
     50 
     51 #include <dev/scsipi/scsi_all.h>
     52 #include <dev/scsipi/scsipi_all.h>
     53 #include <dev/scsipi/scsipi_debug.h>
     54 #include <dev/scsipi/scsiconf.h>
     55 
     56 #include <machine/autoconf.h>
     57 
     58 /* #define DEBUG XXX */
     59 
     60 #include <dev/ic/ncr5380reg.h>
     61 #include <dev/ic/ncr5380var.h>
     62 
     63 #include "sereg.h"
     64 #include "sevar.h"
     65 
     66 /*
     67  * Transfers smaller than this are done using PIO
     68  * (on assumption they're not worth DMA overhead)
     69  */
     70 #define	MIN_DMA_LEN 128
     71 
     72 /*
     73  * Transfers larger than 65535 bytes need to be split-up.
     74  * (Some of the FIFO logic has only 16 bits counters.)
     75  * Make the size an integer multiple of the page size
     76  * to avoid buf/cluster remap problems.  (paranoid?)
     77  */
     78 #define	MAX_DMA_LEN 0xE000
     79 
     80 /*
     81  * This structure is used to keep track of mapped DMA requests.
     82  */
     83 struct se_dma_handle {
     84 	int 		dh_flags;
     85 #define	SIDH_BUSY	1		/* This DH is in use */
     86 #define	SIDH_OUT	2		/* DMA does data out (write) */
     87 	u_char *	dh_addr;	/* KVA of start of buffer */
     88 	int 		dh_maplen;	/* Length of KVA mapping. */
     89 	long		dh_dma; 	/* Offset in DMA buffer. */
     90 };
     91 
     92 /*
     93  * The first structure member has to be the ncr5380_softc
     94  * so we can just cast to go back and fourth between them.
     95  */
     96 struct se_softc {
     97 	struct ncr5380_softc	ncr_sc;
     98 	volatile struct se_regs	*sc_regs;
     99 	int		sc_adapter_type;
    100 	int		sc_adapter_iv;		/* int. vec */
    101 	int 	sc_options;			/* options for this instance */
    102 	int 	sc_reqlen;  		/* requested transfer length */
    103 	struct se_dma_handle *sc_dma;
    104 	/* DMA command block for the OBIO controller. */
    105 	void *sc_dmacmd;
    106 };
    107 
    108 /* Options for disconnect/reselect, DMA, and interrupts. */
    109 #define SE_NO_DISCONNECT    0xff
    110 #define SE_NO_PARITY_CHK  0xff00
    111 #define SE_FORCE_POLLING 0x10000
    112 #define SE_DISABLE_DMA   0x20000
    113 
    114 void se_dma_alloc(struct ncr5380_softc *);
    115 void se_dma_free(struct ncr5380_softc *);
    116 void se_dma_poll(struct ncr5380_softc *);
    117 
    118 void se_dma_setup(struct ncr5380_softc *);
    119 void se_dma_start(struct ncr5380_softc *);
    120 void se_dma_eop(struct ncr5380_softc *);
    121 void se_dma_stop(struct ncr5380_softc *);
    122 
    123 void se_intr_on (struct ncr5380_softc *);
    124 void se_intr_off(struct ncr5380_softc *);
    125 
    126 static int  se_intr(void *);
    127 static void se_reset(struct ncr5380_softc *);
    128 
    129 /*
    130  * New-style autoconfig attachment
    131  */
    132 
    133 static int	se_match(device_t, cfdata_t, void *);
    134 static void	se_attach(device_t, device_t, void *);
    135 
    136 CFATTACH_DECL_NEW(si_sebuf, sizeof(struct se_softc),
    137     se_match, se_attach, NULL, NULL);
    138 
    139 static void	se_minphys(struct buf *);
    140 
    141 /* Options for disconnect/reselect, DMA, and interrupts. */
    142 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
    143 
    144 /* How long to wait for DMA before declaring an error. */
    145 int se_dma_intr_timo = 500;	/* ticks (sec. X 100) */
    146 
    147 int se_debug = 0;
    148 
    149 static int
    150 se_match(device_t parent, cfdata_t cf, void *args)
    151 {
    152 	struct sebuf_attach_args *aa = args;
    153 
    154 	/* Match by name. */
    155 	if (strcmp(aa->name, "se"))
    156 		return 0;
    157 
    158 	/* Anyting else to check? */
    159 
    160 	return 1;
    161 }
    162 
    163 static void
    164 se_attach(device_t parent, device_t self, void *args)
    165 {
    166 	struct se_softc *sc = device_private(self);
    167 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
    168 	struct cfdata *cf = device_cfdata(self);
    169 	struct sebuf_attach_args *aa = args;
    170 	volatile struct se_regs *regs;
    171 	int i;
    172 
    173 	ncr_sc->sc_dev = self;
    174 
    175 	/* Get options from config flags if specified. */
    176 	if (cf->cf_flags)
    177 		sc->sc_options = cf->cf_flags;
    178 	else
    179 		sc->sc_options = se_options;
    180 
    181 	aprint_normal(": options=0x%x\n", sc->sc_options);
    182 
    183 	sc->sc_adapter_type = aa->ca.ca_bustype;
    184 	sc->sc_adapter_iv = aa->ca.ca_intvec;
    185 	sc->sc_regs = regs = aa->regs;
    186 
    187 	/*
    188 	 * MD function pointers used by the MI code.
    189 	 */
    190 	ncr_sc->sc_pio_out = ncr5380_pio_out;
    191 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
    192 
    193 #if 0	/* XXX - not yet... */
    194 	ncr_sc->sc_dma_alloc = se_dma_alloc;
    195 	ncr_sc->sc_dma_free  = se_dma_free;
    196 	ncr_sc->sc_dma_setup = se_dma_setup;
    197 	ncr_sc->sc_dma_start = se_dma_start;
    198 	ncr_sc->sc_dma_poll  = se_dma_poll;
    199 	ncr_sc->sc_dma_eop   = se_dma_eop;
    200 	ncr_sc->sc_dma_stop  = se_dma_stop;
    201 	ncr_sc->sc_intr_on   = se_intr_on;
    202 	ncr_sc->sc_intr_off  = se_intr_off;
    203 #endif	/* XXX */
    204 
    205 	/* Attach interrupt handler. */
    206 	isr_add_vectored(se_intr, (void *)sc,
    207 	    aa->ca.ca_intpri, aa->ca.ca_intvec);
    208 
    209 	/* Reset the hardware. */
    210 	se_reset(ncr_sc);
    211 
    212 	/* Do the common attach stuff. */
    213 
    214 	/*
    215 	 * Support the "options" (config file flags).
    216 	 * Disconnect/reselect is a per-target mask.
    217 	 * Interrupts and DMA are per-controller.
    218 	 */
    219 	ncr_sc->sc_no_disconnect =
    220 	    (sc->sc_options & SE_NO_DISCONNECT);
    221 	ncr_sc->sc_parity_disable =
    222 	    (sc->sc_options & SE_NO_PARITY_CHK) >> 8;
    223 	if (sc->sc_options & SE_FORCE_POLLING)
    224 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
    225 
    226 #if 1	/* XXX - Temporary */
    227 	/* XXX - In case we think DMA is completely broken... */
    228 	if (sc->sc_options & SE_DISABLE_DMA) {
    229 		/* Override this function pointer. */
    230 		ncr_sc->sc_dma_alloc = NULL;
    231 	}
    232 #endif
    233 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
    234 
    235 	/*
    236 	 * Initialize fields used by the MI code
    237 	 */
    238 	ncr_sc->sci_r0 = &regs->ncrregs[0];
    239 	ncr_sc->sci_r1 = &regs->ncrregs[1];
    240 	ncr_sc->sci_r2 = &regs->ncrregs[2];
    241 	ncr_sc->sci_r3 = &regs->ncrregs[3];
    242 	ncr_sc->sci_r4 = &regs->ncrregs[4];
    243 	ncr_sc->sci_r5 = &regs->ncrregs[5];
    244 	ncr_sc->sci_r6 = &regs->ncrregs[6];
    245 	ncr_sc->sci_r7 = &regs->ncrregs[7];
    246 
    247 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
    248 
    249 	/*
    250 	 * Allocate DMA handles.
    251 	 */
    252 	i = SCI_OPENINGS * sizeof(struct se_dma_handle);
    253 	sc->sc_dma = kmem_alloc(i, KM_SLEEP);
    254 	for (i = 0; i < SCI_OPENINGS; i++)
    255 		sc->sc_dma[i].dh_flags = 0;
    256 
    257 	ncr_sc->sc_channel.chan_id = 7;
    258 	ncr_sc->sc_adapter.adapt_minphys = se_minphys;
    259 
    260 	/*
    261 	 *  Initialize se board itself.
    262 	 */
    263 	ncr5380_attach(ncr_sc);
    264 }
    265 
    266 static void
    267 se_reset(struct ncr5380_softc *ncr_sc)
    268 {
    269 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    270 	volatile struct se_regs *se = sc->sc_regs;
    271 
    272 #ifdef	DEBUG
    273 	if (se_debug) {
    274 		printf("%s\n", __func__);
    275 	}
    276 #endif
    277 
    278 	/* The reset bits in the CSR are active low. */
    279 	se->se_csr = 0;
    280 	delay(10);
    281 	se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
    282 	delay(10);
    283 
    284 	/* Make sure the DMA engine is stopped. */
    285 	se->dma_addr = 0;
    286 	se->dma_cntr = 0;
    287 	se->se_ivec = sc->sc_adapter_iv;
    288 }
    289 
    290 /*
    291  * This is called when the bus is going idle,
    292  * so we want to enable the SBC interrupts.
    293  * That is controlled by the DMA enable!
    294  * Who would have guessed!
    295  * What a NASTY trick!
    296  */
    297 void
    298 se_intr_on(struct ncr5380_softc *ncr_sc)
    299 {
    300 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    301 	volatile struct se_regs *se = sc->sc_regs;
    302 
    303 	/* receive mode should be safer */
    304 	se->se_csr &= ~SE_CSR_SEND;
    305 
    306 	/* Clear the count so nothing happens. */
    307 	se->dma_cntr = 0;
    308 
    309 	/* Clear the start address too. (paranoid?) */
    310 	se->dma_addr = 0;
    311 
    312 	/* Finally, enable the DMA engine. */
    313 	se->se_csr |= SE_CSR_INTR_EN;
    314 }
    315 
    316 /*
    317  * This is called when the bus is idle and we are
    318  * about to start playing with the SBC chip.
    319  */
    320 void
    321 se_intr_off(struct ncr5380_softc *ncr_sc)
    322 {
    323 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    324 	volatile struct se_regs *se = sc->sc_regs;
    325 
    326 	se->se_csr &= ~SE_CSR_INTR_EN;
    327 }
    328 
    329 /*
    330  * This function is called during the COMMAND or MSG_IN phase
    331  * that precedes a DATA_IN or DATA_OUT phase, in case we need
    332  * to setup the DMA engine before the bus enters a DATA phase.
    333  *
    334  * On the VME version, setup the start address, but clear the
    335  * count (to make sure it stays idle) and set that later.
    336  * XXX: The VME adapter appears to suppress SBC interrupts
    337  * when the FIFO is not empty or the FIFO count is non-zero!
    338  * XXX: Need to copy data into the DMA buffer...
    339  */
    340 void
    341 se_dma_setup(struct ncr5380_softc *ncr_sc)
    342 {
    343 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    344 	struct sci_req *sr = ncr_sc->sc_current;
    345 	struct se_dma_handle *dh = sr->sr_dma_hand;
    346 	volatile struct se_regs *se = sc->sc_regs;
    347 	long data_pa;
    348 	int xlen;
    349 
    350 	/*
    351 	 * Get the DMA mapping for this segment.
    352 	 * XXX - Should separate allocation and mapin.
    353 	 */
    354 	data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
    355 	data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
    356 	if (data_pa & 1)
    357 		panic("%s: bad pa=0x%lx", __func__, data_pa);
    358 	xlen = ncr_sc->sc_datalen;
    359 	xlen &= ~1;				/* XXX: necessary? */
    360 	sc->sc_reqlen = xlen; 	/* XXX: or less? */
    361 
    362 #ifdef	DEBUG
    363 	if (se_debug & 2) {
    364 		printf("%s: dh=%p, pa=0x%lx, xlen=0x%x\n",
    365 		    __func__, dh, data_pa, xlen);
    366 	}
    367 #endif
    368 
    369 	/* Set direction (send/recv) */
    370 	if (dh->dh_flags & SIDH_OUT) {
    371 		se->se_csr |= SE_CSR_SEND;
    372 	} else {
    373 		se->se_csr &= ~SE_CSR_SEND;
    374 	}
    375 
    376 	/* Load the start address. */
    377 	se->dma_addr = (ushort)(data_pa & 0xFFFF);
    378 
    379 	/*
    380 	 * Keep the count zero or it may start early!
    381 	 */
    382 	se->dma_cntr = 0;
    383 }
    384 
    385 
    386 void
    387 se_dma_start(struct ncr5380_softc *ncr_sc)
    388 {
    389 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    390 	struct sci_req *sr = ncr_sc->sc_current;
    391 	struct se_dma_handle *dh = sr->sr_dma_hand;
    392 	volatile struct se_regs *se = sc->sc_regs;
    393 	int s, xlen;
    394 
    395 	xlen = sc->sc_reqlen;
    396 
    397 	/* This MAY be time critical (not sure). */
    398 	s = splhigh();
    399 
    400 	se->dma_cntr = (ushort)(xlen & 0xFFFF);
    401 
    402 	/*
    403 	 * Acknowledge the phase change.  (After DMA setup!)
    404 	 * Put the SBIC into DMA mode, and start the transfer.
    405 	 */
    406 	if (dh->dh_flags & SIDH_OUT) {
    407 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
    408 		SCI_CLR_INTR(ncr_sc);
    409 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
    410 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
    411 		*ncr_sc->sci_dma_send = 0;	/* start it */
    412 	} else {
    413 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
    414 		SCI_CLR_INTR(ncr_sc);
    415 		*ncr_sc->sci_icmd = 0;
    416 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
    417 		*ncr_sc->sci_irecv = 0;	/* start it */
    418 	}
    419 
    420 	/* Let'er rip! */
    421 	se->se_csr |= SE_CSR_INTR_EN;
    422 
    423 	splx(s);
    424 	ncr_sc->sc_state |= NCR_DOINGDMA;
    425 
    426 #ifdef	DEBUG
    427 	if (se_debug & 2) {
    428 		printf("%s: started, flags=0x%x\n",
    429 		    __func__, ncr_sc->sc_state);
    430 	}
    431 #endif
    432 }
    433 
    434 
    435 void
    436 se_dma_eop(struct ncr5380_softc *ncr_sc)
    437 {
    438 
    439 	/* Not needed - DMA was stopped prior to examining sci_csr */
    440 }
    441 
    442 
    443 void
    444 se_dma_stop(struct ncr5380_softc *ncr_sc)
    445 {
    446 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    447 	struct sci_req *sr = ncr_sc->sc_current;
    448 	struct se_dma_handle *dh = sr->sr_dma_hand;
    449 	volatile struct se_regs *se = sc->sc_regs;
    450 	int resid, ntrans;
    451 
    452 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
    453 #ifdef	DEBUG
    454 		printf("%s: DMA not running\n", __func__);
    455 #endif
    456 		return;
    457 	}
    458 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
    459 
    460 	/* First, halt the DMA engine. */
    461 	se->se_csr &= ~SE_CSR_INTR_EN;	/* VME only */
    462 
    463 	/* Set an impossible phase to prevent data movement? */
    464 	*ncr_sc->sci_tcmd = PHASE_INVALID;
    465 
    466 	/* Note that timeout may have set the error flag. */
    467 	if (ncr_sc->sc_state & NCR_ABORTING)
    468 		goto out;
    469 
    470 	/* XXX: Wait for DMA to actually finish? */
    471 
    472 	/*
    473 	 * Now try to figure out how much actually transferred
    474 	 */
    475 	resid = se->dma_cntr & 0xFFFF;
    476 	if (dh->dh_flags & SIDH_OUT)
    477 		if ((resid > 0) && (resid < sc->sc_reqlen))
    478 			resid++;
    479 	ntrans = sc->sc_reqlen - resid;
    480 
    481 #ifdef	DEBUG
    482 	if (se_debug & 2) {
    483 		printf("%s: resid=0x%x ntrans=0x%x\n",
    484 		    __func__, resid, ntrans);
    485 	}
    486 #endif
    487 
    488 	if (ntrans < MIN_DMA_LEN) {
    489 		printf("se: fifo count: 0x%x\n", resid);
    490 		ncr_sc->sc_state |= NCR_ABORTING;
    491 		goto out;
    492 	}
    493 	if (ntrans > ncr_sc->sc_datalen)
    494 		panic("%s: excess transfer", __func__);
    495 
    496 	/* Adjust data pointer */
    497 	ncr_sc->sc_dataptr += ntrans;
    498 	ncr_sc->sc_datalen -= ntrans;
    499 
    500 out:
    501 	se->dma_addr = 0;
    502 	se->dma_cntr = 0;
    503 
    504 	/* Put SBIC back in PIO mode. */
    505 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
    506 	*ncr_sc->sci_icmd = 0;
    507 }
    508 
    509 /*****************************************************************/
    510 
    511 static void
    512 se_minphys(struct buf *bp)
    513 {
    514 
    515 	if (bp->b_bcount > MAX_DMA_LEN)
    516 		bp->b_bcount = MAX_DMA_LEN;
    517 
    518 	minphys(bp);
    519 }
    520 
    521 
    522 int
    523 se_intr(void *arg)
    524 {
    525 	struct se_softc *sc = arg;
    526 	volatile struct se_regs *se = sc->sc_regs;
    527 	int claimed;
    528 	u_short csr;
    529 
    530 	claimed = 0;
    531 
    532 	/* SBC interrupt? DMA interrupt? */
    533 	csr = se->se_csr;
    534 	NCR_TRACE("se_intr: csr=0x%x\n", csr);
    535 
    536 	if (csr & SE_CSR_SBC_IP) {
    537 		claimed = ncr5380_intr(&sc->ncr_sc);
    538 #ifdef	DEBUG
    539 		if (!claimed) {
    540 			printf("%s: spurious from SBC\n", __func__);
    541 		}
    542 #endif
    543 		/* Yes, we DID cause this interrupt. */
    544 		claimed = 1;
    545 	}
    546 
    547 	return claimed;
    548 }
    549 
    550 
    551 /*****************************************************************
    552  * Common functions for DMA
    553  ****************************************************************/
    554 
    555 /*
    556  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
    557  * for DMA transfer.  On the Sun3/E, this means we have to
    558  * allocate space in the DMA buffer for this transfer.
    559  */
    560 void
    561 se_dma_alloc(struct ncr5380_softc *ncr_sc)
    562 {
    563 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    564 	struct sci_req *sr = ncr_sc->sc_current;
    565 	struct scsipi_xfer *xs = sr->sr_xs;
    566 	struct se_dma_handle *dh;
    567 	int i, xlen;
    568 	u_long addr;
    569 
    570 #ifdef	DIAGNOSTIC
    571 	if (sr->sr_dma_hand != NULL)
    572 		panic("%s: already have DMA handle", __func__);
    573 #endif
    574 
    575 	addr = (u_long)ncr_sc->sc_dataptr;
    576 	xlen = ncr_sc->sc_datalen;
    577 
    578 	/* If the DMA start addr is misaligned then do PIO */
    579 	if ((addr & 1) || (xlen & 1)) {
    580 		printf("%s: misaligned.\n", __func__);
    581 		return;
    582 	}
    583 
    584 	/* Make sure our caller checked sc_min_dma_len. */
    585 	if (xlen < MIN_DMA_LEN)
    586 		panic("%s: xlen=0x%x", __func__, xlen);
    587 
    588 	/*
    589 	 * Never attempt single transfers of more than 63k, because
    590 	 * our count register may be only 16 bits (an OBIO adapter).
    591 	 * This should never happen since already bounded by minphys().
    592 	 * XXX - Should just segment these...
    593 	 */
    594 	if (xlen > MAX_DMA_LEN) {
    595 		printf("%s: excessive xlen=0x%x\n", __func__, xlen);
    596 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
    597 	}
    598 
    599 	/* Find free DMA handle.  Guaranteed to find one since we have
    600 	   as many DMA handles as the driver has processes. */
    601 	for (i = 0; i < SCI_OPENINGS; i++) {
    602 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
    603 			goto found;
    604 	}
    605 	panic("se: no free DMA handles.");
    606 found:
    607 
    608 	dh = &sc->sc_dma[i];
    609 	dh->dh_flags = SIDH_BUSY;
    610 
    611 	/* Copy the "write" flag for convenience. */
    612 	if (xs->xs_control & XS_CTL_DATA_OUT)
    613 		dh->dh_flags |= SIDH_OUT;
    614 
    615 	dh->dh_addr = (uint8_t *)addr;
    616 	dh->dh_maplen  = xlen;
    617 	dh->dh_dma = 0;	/* XXX - Allocate space in DMA buffer. */
    618 	/* XXX: dh->dh_dma = alloc(xlen) */
    619 	if (!dh->dh_dma) {
    620 		/* Can't remap segment */
    621 		printf("%s: can't remap %p/0x%x\n",
    622 		    __func__, dh->dh_addr, dh->dh_maplen);
    623 		dh->dh_flags = 0;
    624 		return;
    625 	}
    626 
    627 	/* success */
    628 	sr->sr_dma_hand = dh;
    629 }
    630 
    631 
    632 void
    633 se_dma_free(struct ncr5380_softc *ncr_sc)
    634 {
    635 	struct sci_req *sr = ncr_sc->sc_current;
    636 	struct se_dma_handle *dh = sr->sr_dma_hand;
    637 
    638 #ifdef	DIAGNOSTIC
    639 	if (dh == NULL)
    640 		panic("%s: no DMA handle", __func__);
    641 #endif
    642 
    643 	if (ncr_sc->sc_state & NCR_DOINGDMA)
    644 		panic("%s: free while in progress", __func__);
    645 
    646 	if (dh->dh_flags & SIDH_BUSY) {
    647 		/* XXX: Should separate allocation and mapping. */
    648 		/* XXX: Give back the DMA space. */
    649 		/* XXX: free((void *)dh->dh_dma, dh->dh_maplen); */
    650 		dh->dh_dma = 0;
    651 		dh->dh_flags = 0;
    652 	}
    653 	sr->sr_dma_hand = NULL;
    654 }
    655 
    656 
    657 #define	CSR_MASK SE_CSR_SBC_IP
    658 #define	POLL_TIMO	50000	/* X100 = 5 sec. */
    659 
    660 /*
    661  * Poll (spin-wait) for DMA completion.
    662  * Called right after xx_dma_start(), and
    663  * xx_dma_stop() will be called next.
    664  * Same for either VME or OBIO.
    665  */
    666 void
    667 se_dma_poll(struct ncr5380_softc *ncr_sc)
    668 {
    669 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    670 	struct sci_req *sr = ncr_sc->sc_current;
    671 	volatile struct se_regs *se = sc->sc_regs;
    672 	int tmo;
    673 
    674 	/* Make sure DMA started successfully. */
    675 	if (ncr_sc->sc_state & NCR_ABORTING)
    676 		return;
    677 
    678 	/*
    679 	 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
    680 	 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
    681 	 * XXX: I really doubt that is necessary...
    682 	 */
    683 
    684 	/* Wait for any "DMA complete" or error bits. */
    685 	tmo = POLL_TIMO;
    686 	for (;;) {
    687 		if (se->se_csr & CSR_MASK)
    688 			break;
    689 		if (--tmo <= 0) {
    690 			printf("se: DMA timeout (while polling)\n");
    691 			/* Indicate timeout as MI code would. */
    692 			sr->sr_flags |= SR_OVERDUE;
    693 			break;
    694 		}
    695 		delay(100);
    696 	}
    697 	NCR_TRACE("se_dma_poll: waited %d\n",
    698 			  POLL_TIMO - tmo);
    699 
    700 #ifdef	DEBUG
    701 	if (se_debug & 2) {
    702 		printf("%s: done, csr=0x%x\n", __func__, se->se_csr);
    703 	}
    704 #endif
    705 }
    706 
    707