Home | History | Annotate | Line # | Download | only in dev
si_sebuf.c revision 1.26.2.1
      1 /*	$NetBSD: si_sebuf.c,v 1.26.2.1 2008/05/18 12:32:54 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Gordon W. Ross.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Sun3/E SCSI driver (machine-dependent portion).
     34  * The machine-independent parts are in ncr5380sbc.c
     35  *
     36  * XXX - Mostly from the si driver.  Merge?
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.26.2.1 2008/05/18 12:32:54 yamt Exp $");
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/errno.h>
     45 #include <sys/kernel.h>
     46 #include <sys/malloc.h>
     47 #include <sys/device.h>
     48 #include <sys/buf.h>
     49 #include <sys/proc.h>
     50 #include <sys/user.h>
     51 
     52 #include <dev/scsipi/scsi_all.h>
     53 #include <dev/scsipi/scsipi_all.h>
     54 #include <dev/scsipi/scsipi_debug.h>
     55 #include <dev/scsipi/scsiconf.h>
     56 
     57 #include <machine/autoconf.h>
     58 
     59 /* #define DEBUG XXX */
     60 
     61 #include <dev/ic/ncr5380reg.h>
     62 #include <dev/ic/ncr5380var.h>
     63 
     64 #include "sereg.h"
     65 #include "sevar.h"
     66 
     67 /*
     68  * Transfers smaller than this are done using PIO
     69  * (on assumption they're not worth DMA overhead)
     70  */
     71 #define	MIN_DMA_LEN 128
     72 
     73 /*
     74  * Transfers lager than 65535 bytes need to be split-up.
     75  * (Some of the FIFO logic has only 16 bits counters.)
     76  * Make the size an integer multiple of the page size
     77  * to avoid buf/cluster remap problems.  (paranoid?)
     78  */
     79 #define	MAX_DMA_LEN 0xE000
     80 
     81 /*
     82  * This structure is used to keep track of mapped DMA requests.
     83  */
     84 struct se_dma_handle {
     85 	int 		dh_flags;
     86 #define	SIDH_BUSY	1		/* This DH is in use */
     87 #define	SIDH_OUT	2		/* DMA does data out (write) */
     88 	u_char *	dh_addr;	/* KVA of start of buffer */
     89 	int 		dh_maplen;	/* Length of KVA mapping. */
     90 	long		dh_dma; 	/* Offset in DMA buffer. */
     91 };
     92 
     93 /*
     94  * The first structure member has to be the ncr5380_softc
     95  * so we can just cast to go back and fourth between them.
     96  */
     97 struct se_softc {
     98 	struct ncr5380_softc	ncr_sc;
     99 	volatile struct se_regs	*sc_regs;
    100 	int		sc_adapter_type;
    101 	int		sc_adapter_iv;		/* int. vec */
    102 	int 	sc_options;			/* options for this instance */
    103 	int 	sc_reqlen;  		/* requested transfer length */
    104 	struct se_dma_handle *sc_dma;
    105 	/* DMA command block for the OBIO controller. */
    106 	void *sc_dmacmd;
    107 };
    108 
    109 /* Options for disconnect/reselect, DMA, and interrupts. */
    110 #define SE_NO_DISCONNECT    0xff
    111 #define SE_NO_PARITY_CHK  0xff00
    112 #define SE_FORCE_POLLING 0x10000
    113 #define SE_DISABLE_DMA   0x20000
    114 
    115 void se_dma_alloc(struct ncr5380_softc *);
    116 void se_dma_free(struct ncr5380_softc *);
    117 void se_dma_poll(struct ncr5380_softc *);
    118 
    119 void se_dma_setup(struct ncr5380_softc *);
    120 void se_dma_start(struct ncr5380_softc *);
    121 void se_dma_eop(struct ncr5380_softc *);
    122 void se_dma_stop(struct ncr5380_softc *);
    123 
    124 void se_intr_on (struct ncr5380_softc *);
    125 void se_intr_off(struct ncr5380_softc *);
    126 
    127 static int  se_intr(void *);
    128 static void se_reset(struct ncr5380_softc *);
    129 
    130 /*
    131  * New-style autoconfig attachment
    132  */
    133 
    134 static int	se_match(device_t, cfdata_t, void *);
    135 static void	se_attach(device_t, device_t, void *);
    136 
    137 CFATTACH_DECL_NEW(si_sebuf, sizeof(struct se_softc),
    138     se_match, se_attach, NULL, NULL);
    139 
    140 static void	se_minphys(struct buf *);
    141 
    142 /* Options for disconnect/reselect, DMA, and interrupts. */
    143 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
    144 
    145 /* How long to wait for DMA before declaring an error. */
    146 int se_dma_intr_timo = 500;	/* ticks (sec. X 100) */
    147 
    148 int se_debug = 0;
    149 
    150 static int
    151 se_match(device_t parent, cfdata_t cf, void *args)
    152 {
    153 	struct sebuf_attach_args *aa = args;
    154 
    155 	/* Match by name. */
    156 	if (strcmp(aa->name, "se"))
    157 		return 0;
    158 
    159 	/* Anyting else to check? */
    160 
    161 	return 1;
    162 }
    163 
    164 static void
    165 se_attach(device_t parent, device_t self, void *args)
    166 {
    167 	struct se_softc *sc = device_private(self);
    168 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
    169 	struct cfdata *cf = device_cfdata(self);
    170 	struct sebuf_attach_args *aa = args;
    171 	volatile struct se_regs *regs;
    172 	int i;
    173 
    174 	ncr_sc->sc_dev = self;
    175 
    176 	/* Get options from config flags if specified. */
    177 	if (cf->cf_flags)
    178 		sc->sc_options = cf->cf_flags;
    179 	else
    180 		sc->sc_options = se_options;
    181 
    182 	aprint_normal(": options=0x%x\n", sc->sc_options);
    183 
    184 	sc->sc_adapter_type = aa->ca.ca_bustype;
    185 	sc->sc_adapter_iv = aa->ca.ca_intvec;
    186 	sc->sc_regs = regs = aa->regs;
    187 
    188 	/*
    189 	 * MD function pointers used by the MI code.
    190 	 */
    191 	ncr_sc->sc_pio_out = ncr5380_pio_out;
    192 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
    193 
    194 #if 0	/* XXX - not yet... */
    195 	ncr_sc->sc_dma_alloc = se_dma_alloc;
    196 	ncr_sc->sc_dma_free  = se_dma_free;
    197 	ncr_sc->sc_dma_setup = se_dma_setup;
    198 	ncr_sc->sc_dma_start = se_dma_start;
    199 	ncr_sc->sc_dma_poll  = se_dma_poll;
    200 	ncr_sc->sc_dma_eop   = se_dma_eop;
    201 	ncr_sc->sc_dma_stop  = se_dma_stop;
    202 	ncr_sc->sc_intr_on   = se_intr_on;
    203 	ncr_sc->sc_intr_off  = se_intr_off;
    204 #endif	/* XXX */
    205 
    206 	/* Attach interrupt handler. */
    207 	isr_add_vectored(se_intr, (void *)sc,
    208 	    aa->ca.ca_intpri, aa->ca.ca_intvec);
    209 
    210 	/* Reset the hardware. */
    211 	se_reset(ncr_sc);
    212 
    213 	/* Do the common attach stuff. */
    214 
    215 	/*
    216 	 * Support the "options" (config file flags).
    217 	 * Disconnect/reselect is a per-target mask.
    218 	 * Interrupts and DMA are per-controller.
    219 	 */
    220 	ncr_sc->sc_no_disconnect =
    221 	    (sc->sc_options & SE_NO_DISCONNECT);
    222 	ncr_sc->sc_parity_disable =
    223 	    (sc->sc_options & SE_NO_PARITY_CHK) >> 8;
    224 	if (sc->sc_options & SE_FORCE_POLLING)
    225 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
    226 
    227 #if 1	/* XXX - Temporary */
    228 	/* XXX - In case we think DMA is completely broken... */
    229 	if (sc->sc_options & SE_DISABLE_DMA) {
    230 		/* Override this function pointer. */
    231 		ncr_sc->sc_dma_alloc = NULL;
    232 	}
    233 #endif
    234 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
    235 
    236 	/*
    237 	 * Initialize fields used by the MI code
    238 	 */
    239 	ncr_sc->sci_r0 = &regs->ncrregs[0];
    240 	ncr_sc->sci_r1 = &regs->ncrregs[1];
    241 	ncr_sc->sci_r2 = &regs->ncrregs[2];
    242 	ncr_sc->sci_r3 = &regs->ncrregs[3];
    243 	ncr_sc->sci_r4 = &regs->ncrregs[4];
    244 	ncr_sc->sci_r5 = &regs->ncrregs[5];
    245 	ncr_sc->sci_r6 = &regs->ncrregs[6];
    246 	ncr_sc->sci_r7 = &regs->ncrregs[7];
    247 
    248 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
    249 
    250 	/*
    251 	 * Allocate DMA handles.
    252 	 */
    253 	i = SCI_OPENINGS * sizeof(struct se_dma_handle);
    254 	sc->sc_dma = malloc(i, M_DEVBUF, M_WAITOK);
    255 	if (sc->sc_dma == NULL)
    256 		panic("se: dma_malloc failed");
    257 	for (i = 0; i < SCI_OPENINGS; i++)
    258 		sc->sc_dma[i].dh_flags = 0;
    259 
    260 	ncr_sc->sc_channel.chan_id = 7;
    261 	ncr_sc->sc_adapter.adapt_minphys = se_minphys;
    262 
    263 	/*
    264 	 *  Initialize se board itself.
    265 	 */
    266 	ncr5380_attach(ncr_sc);
    267 }
    268 
    269 static void
    270 se_reset(struct ncr5380_softc *ncr_sc)
    271 {
    272 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    273 	volatile struct se_regs *se = sc->sc_regs;
    274 
    275 #ifdef	DEBUG
    276 	if (se_debug) {
    277 		printf("%s\n", __func__);
    278 	}
    279 #endif
    280 
    281 	/* The reset bits in the CSR are active low. */
    282 	se->se_csr = 0;
    283 	delay(10);
    284 	se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
    285 	delay(10);
    286 
    287 	/* Make sure the DMA engine is stopped. */
    288 	se->dma_addr = 0;
    289 	se->dma_cntr = 0;
    290 	se->se_ivec = sc->sc_adapter_iv;
    291 }
    292 
    293 /*
    294  * This is called when the bus is going idle,
    295  * so we want to enable the SBC interrupts.
    296  * That is controlled by the DMA enable!
    297  * Who would have guessed!
    298  * What a NASTY trick!
    299  */
    300 void
    301 se_intr_on(struct ncr5380_softc *ncr_sc)
    302 {
    303 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    304 	volatile struct se_regs *se = sc->sc_regs;
    305 
    306 	/* receive mode should be safer */
    307 	se->se_csr &= ~SE_CSR_SEND;
    308 
    309 	/* Clear the count so nothing happens. */
    310 	se->dma_cntr = 0;
    311 
    312 	/* Clear the start address too. (paranoid?) */
    313 	se->dma_addr = 0;
    314 
    315 	/* Finally, enable the DMA engine. */
    316 	se->se_csr |= SE_CSR_INTR_EN;
    317 }
    318 
    319 /*
    320  * This is called when the bus is idle and we are
    321  * about to start playing with the SBC chip.
    322  */
    323 void
    324 se_intr_off(struct ncr5380_softc *ncr_sc)
    325 {
    326 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    327 	volatile struct se_regs *se = sc->sc_regs;
    328 
    329 	se->se_csr &= ~SE_CSR_INTR_EN;
    330 }
    331 
    332 /*
    333  * This function is called during the COMMAND or MSG_IN phase
    334  * that precedes a DATA_IN or DATA_OUT phase, in case we need
    335  * to setup the DMA engine before the bus enters a DATA phase.
    336  *
    337  * On the VME version, setup the start addres, but clear the
    338  * count (to make sure it stays idle) and set that later.
    339  * XXX: The VME adapter appears to suppress SBC interrupts
    340  * when the FIFO is not empty or the FIFO count is non-zero!
    341  * XXX: Need to copy data into the DMA buffer...
    342  */
    343 void
    344 se_dma_setup(struct ncr5380_softc *ncr_sc)
    345 {
    346 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    347 	struct sci_req *sr = ncr_sc->sc_current;
    348 	struct se_dma_handle *dh = sr->sr_dma_hand;
    349 	volatile struct se_regs *se = sc->sc_regs;
    350 	long data_pa;
    351 	int xlen;
    352 
    353 	/*
    354 	 * Get the DMA mapping for this segment.
    355 	 * XXX - Should separate allocation and mapin.
    356 	 */
    357 	data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
    358 	data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
    359 	if (data_pa & 1)
    360 		panic("%s: bad pa=0x%lx", __func__, data_pa);
    361 	xlen = ncr_sc->sc_datalen;
    362 	xlen &= ~1;				/* XXX: necessary? */
    363 	sc->sc_reqlen = xlen; 	/* XXX: or less? */
    364 
    365 #ifdef	DEBUG
    366 	if (se_debug & 2) {
    367 		printf("%s: dh=%p, pa=0x%lx, xlen=0x%x\n",
    368 		    __func__, dh, data_pa, xlen);
    369 	}
    370 #endif
    371 
    372 	/* Set direction (send/recv) */
    373 	if (dh->dh_flags & SIDH_OUT) {
    374 		se->se_csr |= SE_CSR_SEND;
    375 	} else {
    376 		se->se_csr &= ~SE_CSR_SEND;
    377 	}
    378 
    379 	/* Load the start address. */
    380 	se->dma_addr = (ushort)(data_pa & 0xFFFF);
    381 
    382 	/*
    383 	 * Keep the count zero or it may start early!
    384 	 */
    385 	se->dma_cntr = 0;
    386 }
    387 
    388 
    389 void
    390 se_dma_start(struct ncr5380_softc *ncr_sc)
    391 {
    392 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    393 	struct sci_req *sr = ncr_sc->sc_current;
    394 	struct se_dma_handle *dh = sr->sr_dma_hand;
    395 	volatile struct se_regs *se = sc->sc_regs;
    396 	int s, xlen;
    397 
    398 	xlen = sc->sc_reqlen;
    399 
    400 	/* This MAY be time critical (not sure). */
    401 	s = splhigh();
    402 
    403 	se->dma_cntr = (ushort)(xlen & 0xFFFF);
    404 
    405 	/*
    406 	 * Acknowledge the phase change.  (After DMA setup!)
    407 	 * Put the SBIC into DMA mode, and start the transfer.
    408 	 */
    409 	if (dh->dh_flags & SIDH_OUT) {
    410 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
    411 		SCI_CLR_INTR(ncr_sc);
    412 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
    413 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
    414 		*ncr_sc->sci_dma_send = 0;	/* start it */
    415 	} else {
    416 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
    417 		SCI_CLR_INTR(ncr_sc);
    418 		*ncr_sc->sci_icmd = 0;
    419 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
    420 		*ncr_sc->sci_irecv = 0;	/* start it */
    421 	}
    422 
    423 	/* Let'er rip! */
    424 	se->se_csr |= SE_CSR_INTR_EN;
    425 
    426 	splx(s);
    427 	ncr_sc->sc_state |= NCR_DOINGDMA;
    428 
    429 #ifdef	DEBUG
    430 	if (se_debug & 2) {
    431 		printf("%s: started, flags=0x%x\n",
    432 		    __func__, ncr_sc->sc_state);
    433 	}
    434 #endif
    435 }
    436 
    437 
    438 void
    439 se_dma_eop(struct ncr5380_softc *ncr_sc)
    440 {
    441 
    442 	/* Not needed - DMA was stopped prior to examining sci_csr */
    443 }
    444 
    445 
    446 void
    447 se_dma_stop(struct ncr5380_softc *ncr_sc)
    448 {
    449 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    450 	struct sci_req *sr = ncr_sc->sc_current;
    451 	struct se_dma_handle *dh = sr->sr_dma_hand;
    452 	volatile struct se_regs *se = sc->sc_regs;
    453 	int resid, ntrans;
    454 
    455 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
    456 #ifdef	DEBUG
    457 		printf("%s: DMA not running\n", __func__);
    458 #endif
    459 		return;
    460 	}
    461 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
    462 
    463 	/* First, halt the DMA engine. */
    464 	se->se_csr &= ~SE_CSR_INTR_EN;	/* VME only */
    465 
    466 	/* Set an impossible phase to prevent data movement? */
    467 	*ncr_sc->sci_tcmd = PHASE_INVALID;
    468 
    469 	/* Note that timeout may have set the error flag. */
    470 	if (ncr_sc->sc_state & NCR_ABORTING)
    471 		goto out;
    472 
    473 	/* XXX: Wait for DMA to actually finish? */
    474 
    475 	/*
    476 	 * Now try to figure out how much actually transferred
    477 	 */
    478 	resid = se->dma_cntr & 0xFFFF;
    479 	if (dh->dh_flags & SIDH_OUT)
    480 		if ((resid > 0) && (resid < sc->sc_reqlen))
    481 			resid++;
    482 	ntrans = sc->sc_reqlen - resid;
    483 
    484 #ifdef	DEBUG
    485 	if (se_debug & 2) {
    486 		printf("%s: resid=0x%x ntrans=0x%x\n",
    487 		    __func__, resid, ntrans);
    488 	}
    489 #endif
    490 
    491 	if (ntrans < MIN_DMA_LEN) {
    492 		printf("se: fifo count: 0x%x\n", resid);
    493 		ncr_sc->sc_state |= NCR_ABORTING;
    494 		goto out;
    495 	}
    496 	if (ntrans > ncr_sc->sc_datalen)
    497 		panic("%s: excess transfer", __func__);
    498 
    499 	/* Adjust data pointer */
    500 	ncr_sc->sc_dataptr += ntrans;
    501 	ncr_sc->sc_datalen -= ntrans;
    502 
    503 out:
    504 	se->dma_addr = 0;
    505 	se->dma_cntr = 0;
    506 
    507 	/* Put SBIC back in PIO mode. */
    508 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
    509 	*ncr_sc->sci_icmd = 0;
    510 }
    511 
    512 /*****************************************************************/
    513 
    514 static void
    515 se_minphys(struct buf *bp)
    516 {
    517 
    518 	if (bp->b_bcount > MAX_DMA_LEN)
    519 		bp->b_bcount = MAX_DMA_LEN;
    520 
    521 	minphys(bp);
    522 }
    523 
    524 
    525 int
    526 se_intr(void *arg)
    527 {
    528 	struct se_softc *sc = arg;
    529 	volatile struct se_regs *se = sc->sc_regs;
    530 	int dma_error, claimed;
    531 	u_short csr;
    532 
    533 	claimed = 0;
    534 	dma_error = 0;
    535 
    536 	/* SBC interrupt? DMA interrupt? */
    537 	csr = se->se_csr;
    538 	NCR_TRACE("se_intr: csr=0x%x\n", csr);
    539 
    540 	if (csr & SE_CSR_SBC_IP) {
    541 		claimed = ncr5380_intr(&sc->ncr_sc);
    542 #ifdef	DEBUG
    543 		if (!claimed) {
    544 			printf("%s: spurious from SBC\n", __func__);
    545 		}
    546 #endif
    547 		/* Yes, we DID cause this interrupt. */
    548 		claimed = 1;
    549 	}
    550 
    551 	return claimed;
    552 }
    553 
    554 
    555 /*****************************************************************
    556  * Common functions for DMA
    557  ****************************************************************/
    558 
    559 /*
    560  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
    561  * for DMA transfer.  On the Sun3/E, this means we have to
    562  * allocate space in the DMA buffer for this transfer.
    563  */
    564 void
    565 se_dma_alloc(struct ncr5380_softc *ncr_sc)
    566 {
    567 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    568 	struct sci_req *sr = ncr_sc->sc_current;
    569 	struct scsipi_xfer *xs = sr->sr_xs;
    570 	struct se_dma_handle *dh;
    571 	int i, xlen;
    572 	u_long addr;
    573 
    574 #ifdef	DIAGNOSTIC
    575 	if (sr->sr_dma_hand != NULL)
    576 		panic("%s: already have DMA handle", __func__);
    577 #endif
    578 
    579 	addr = (u_long)ncr_sc->sc_dataptr;
    580 	xlen = ncr_sc->sc_datalen;
    581 
    582 	/* If the DMA start addr is misaligned then do PIO */
    583 	if ((addr & 1) || (xlen & 1)) {
    584 		printf("%s: misaligned.\n", __func__);
    585 		return;
    586 	}
    587 
    588 	/* Make sure our caller checked sc_min_dma_len. */
    589 	if (xlen < MIN_DMA_LEN)
    590 		panic("%s: xlen=0x%x", __func__, xlen);
    591 
    592 	/*
    593 	 * Never attempt single transfers of more than 63k, because
    594 	 * our count register may be only 16 bits (an OBIO adapter).
    595 	 * This should never happen since already bounded by minphys().
    596 	 * XXX - Should just segment these...
    597 	 */
    598 	if (xlen > MAX_DMA_LEN) {
    599 		printf("%s: excessive xlen=0x%x\n", __func__, xlen);
    600 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
    601 	}
    602 
    603 	/* Find free DMA handle.  Guaranteed to find one since we have
    604 	   as many DMA handles as the driver has processes. */
    605 	for (i = 0; i < SCI_OPENINGS; i++) {
    606 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
    607 			goto found;
    608 	}
    609 	panic("se: no free DMA handles.");
    610 found:
    611 
    612 	dh = &sc->sc_dma[i];
    613 	dh->dh_flags = SIDH_BUSY;
    614 
    615 	/* Copy the "write" flag for convenience. */
    616 	if (xs->xs_control & XS_CTL_DATA_OUT)
    617 		dh->dh_flags |= SIDH_OUT;
    618 
    619 	dh->dh_addr = (uint8_t *)addr;
    620 	dh->dh_maplen  = xlen;
    621 	dh->dh_dma = 0;	/* XXX - Allocate space in DMA buffer. */
    622 	/* XXX: dh->dh_dma = alloc(xlen) */
    623 	if (!dh->dh_dma) {
    624 		/* Can't remap segment */
    625 		printf("%s: can't remap %p/0x%x\n",
    626 		    __func__, dh->dh_addr, dh->dh_maplen);
    627 		dh->dh_flags = 0;
    628 		return;
    629 	}
    630 
    631 	/* success */
    632 	sr->sr_dma_hand = dh;
    633 }
    634 
    635 
    636 void
    637 se_dma_free(struct ncr5380_softc *ncr_sc)
    638 {
    639 	struct sci_req *sr = ncr_sc->sc_current;
    640 	struct se_dma_handle *dh = sr->sr_dma_hand;
    641 
    642 #ifdef	DIAGNOSTIC
    643 	if (dh == NULL)
    644 		panic("%s: no DMA handle", __func__);
    645 #endif
    646 
    647 	if (ncr_sc->sc_state & NCR_DOINGDMA)
    648 		panic("%s: free while in progress", __func__);
    649 
    650 	if (dh->dh_flags & SIDH_BUSY) {
    651 		/* XXX: Should separate allocation and mapping. */
    652 		/* XXX: Give back the DMA space. */
    653 		/* XXX: free((void *)dh->dh_dma, dh->dh_maplen); */
    654 		dh->dh_dma = 0;
    655 		dh->dh_flags = 0;
    656 	}
    657 	sr->sr_dma_hand = NULL;
    658 }
    659 
    660 
    661 #define	CSR_MASK SE_CSR_SBC_IP
    662 #define	POLL_TIMO	50000	/* X100 = 5 sec. */
    663 
    664 /*
    665  * Poll (spin-wait) for DMA completion.
    666  * Called right after xx_dma_start(), and
    667  * xx_dma_stop() will be called next.
    668  * Same for either VME or OBIO.
    669  */
    670 void
    671 se_dma_poll(struct ncr5380_softc *ncr_sc)
    672 {
    673 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    674 	struct sci_req *sr = ncr_sc->sc_current;
    675 	volatile struct se_regs *se = sc->sc_regs;
    676 	int tmo;
    677 
    678 	/* Make sure DMA started successfully. */
    679 	if (ncr_sc->sc_state & NCR_ABORTING)
    680 		return;
    681 
    682 	/*
    683 	 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
    684 	 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
    685 	 * XXX: I really doubt that is necessary...
    686 	 */
    687 
    688 	/* Wait for any "DMA complete" or error bits. */
    689 	tmo = POLL_TIMO;
    690 	for (;;) {
    691 		if (se->se_csr & CSR_MASK)
    692 			break;
    693 		if (--tmo <= 0) {
    694 			printf("se: DMA timeout (while polling)\n");
    695 			/* Indicate timeout as MI code would. */
    696 			sr->sr_flags |= SR_OVERDUE;
    697 			break;
    698 		}
    699 		delay(100);
    700 	}
    701 	NCR_TRACE("se_dma_poll: waited %d\n",
    702 			  POLL_TIMO - tmo);
    703 
    704 #ifdef	DEBUG
    705 	if (se_debug & 2) {
    706 		printf("%s: done, csr=0x%x\n", __func__, se->se_csr);
    707 	}
    708 #endif
    709 }
    710 
    711