Home | History | Annotate | Line # | Download | only in dev
si_sebuf.c revision 1.28.26.1
      1 /*	$NetBSD: si_sebuf.c,v 1.28.26.1 2014/05/18 17:45:27 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Gordon W. Ross.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Sun3/E SCSI driver (machine-dependent portion).
     34  * The machine-independent parts are in ncr5380sbc.c
     35  *
     36  * XXX - Mostly from the si driver.  Merge?
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.28.26.1 2014/05/18 17:45:27 rmind Exp $");
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/errno.h>
     45 #include <sys/kernel.h>
     46 #include <sys/malloc.h>
     47 #include <sys/device.h>
     48 #include <sys/buf.h>
     49 #include <sys/proc.h>
     50 
     51 #include <dev/scsipi/scsi_all.h>
     52 #include <dev/scsipi/scsipi_all.h>
     53 #include <dev/scsipi/scsipi_debug.h>
     54 #include <dev/scsipi/scsiconf.h>
     55 
     56 #include <machine/autoconf.h>
     57 
     58 /* #define DEBUG XXX */
     59 
     60 #include <dev/ic/ncr5380reg.h>
     61 #include <dev/ic/ncr5380var.h>
     62 
     63 #include "sereg.h"
     64 #include "sevar.h"
     65 
     66 /*
     67  * Transfers smaller than this are done using PIO
     68  * (on assumption they're not worth DMA overhead)
     69  */
     70 #define	MIN_DMA_LEN 128
     71 
     72 /*
     73  * Transfers lager than 65535 bytes need to be split-up.
     74  * (Some of the FIFO logic has only 16 bits counters.)
     75  * Make the size an integer multiple of the page size
     76  * to avoid buf/cluster remap problems.  (paranoid?)
     77  */
     78 #define	MAX_DMA_LEN 0xE000
     79 
     80 /*
     81  * This structure is used to keep track of mapped DMA requests.
     82  */
     83 struct se_dma_handle {
     84 	int 		dh_flags;
     85 #define	SIDH_BUSY	1		/* This DH is in use */
     86 #define	SIDH_OUT	2		/* DMA does data out (write) */
     87 	u_char *	dh_addr;	/* KVA of start of buffer */
     88 	int 		dh_maplen;	/* Length of KVA mapping. */
     89 	long		dh_dma; 	/* Offset in DMA buffer. */
     90 };
     91 
     92 /*
     93  * The first structure member has to be the ncr5380_softc
     94  * so we can just cast to go back and fourth between them.
     95  */
     96 struct se_softc {
     97 	struct ncr5380_softc	ncr_sc;
     98 	volatile struct se_regs	*sc_regs;
     99 	int		sc_adapter_type;
    100 	int		sc_adapter_iv;		/* int. vec */
    101 	int 	sc_options;			/* options for this instance */
    102 	int 	sc_reqlen;  		/* requested transfer length */
    103 	struct se_dma_handle *sc_dma;
    104 	/* DMA command block for the OBIO controller. */
    105 	void *sc_dmacmd;
    106 };
    107 
    108 /* Options for disconnect/reselect, DMA, and interrupts. */
    109 #define SE_NO_DISCONNECT    0xff
    110 #define SE_NO_PARITY_CHK  0xff00
    111 #define SE_FORCE_POLLING 0x10000
    112 #define SE_DISABLE_DMA   0x20000
    113 
    114 void se_dma_alloc(struct ncr5380_softc *);
    115 void se_dma_free(struct ncr5380_softc *);
    116 void se_dma_poll(struct ncr5380_softc *);
    117 
    118 void se_dma_setup(struct ncr5380_softc *);
    119 void se_dma_start(struct ncr5380_softc *);
    120 void se_dma_eop(struct ncr5380_softc *);
    121 void se_dma_stop(struct ncr5380_softc *);
    122 
    123 void se_intr_on (struct ncr5380_softc *);
    124 void se_intr_off(struct ncr5380_softc *);
    125 
    126 static int  se_intr(void *);
    127 static void se_reset(struct ncr5380_softc *);
    128 
    129 /*
    130  * New-style autoconfig attachment
    131  */
    132 
    133 static int	se_match(device_t, cfdata_t, void *);
    134 static void	se_attach(device_t, device_t, void *);
    135 
    136 CFATTACH_DECL_NEW(si_sebuf, sizeof(struct se_softc),
    137     se_match, se_attach, NULL, NULL);
    138 
    139 static void	se_minphys(struct buf *);
    140 
    141 /* Options for disconnect/reselect, DMA, and interrupts. */
    142 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
    143 
    144 /* How long to wait for DMA before declaring an error. */
    145 int se_dma_intr_timo = 500;	/* ticks (sec. X 100) */
    146 
    147 int se_debug = 0;
    148 
    149 static int
    150 se_match(device_t parent, cfdata_t cf, void *args)
    151 {
    152 	struct sebuf_attach_args *aa = args;
    153 
    154 	/* Match by name. */
    155 	if (strcmp(aa->name, "se"))
    156 		return 0;
    157 
    158 	/* Anyting else to check? */
    159 
    160 	return 1;
    161 }
    162 
    163 static void
    164 se_attach(device_t parent, device_t self, void *args)
    165 {
    166 	struct se_softc *sc = device_private(self);
    167 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
    168 	struct cfdata *cf = device_cfdata(self);
    169 	struct sebuf_attach_args *aa = args;
    170 	volatile struct se_regs *regs;
    171 	int i;
    172 
    173 	ncr_sc->sc_dev = self;
    174 
    175 	/* Get options from config flags if specified. */
    176 	if (cf->cf_flags)
    177 		sc->sc_options = cf->cf_flags;
    178 	else
    179 		sc->sc_options = se_options;
    180 
    181 	aprint_normal(": options=0x%x\n", sc->sc_options);
    182 
    183 	sc->sc_adapter_type = aa->ca.ca_bustype;
    184 	sc->sc_adapter_iv = aa->ca.ca_intvec;
    185 	sc->sc_regs = regs = aa->regs;
    186 
    187 	/*
    188 	 * MD function pointers used by the MI code.
    189 	 */
    190 	ncr_sc->sc_pio_out = ncr5380_pio_out;
    191 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
    192 
    193 #if 0	/* XXX - not yet... */
    194 	ncr_sc->sc_dma_alloc = se_dma_alloc;
    195 	ncr_sc->sc_dma_free  = se_dma_free;
    196 	ncr_sc->sc_dma_setup = se_dma_setup;
    197 	ncr_sc->sc_dma_start = se_dma_start;
    198 	ncr_sc->sc_dma_poll  = se_dma_poll;
    199 	ncr_sc->sc_dma_eop   = se_dma_eop;
    200 	ncr_sc->sc_dma_stop  = se_dma_stop;
    201 	ncr_sc->sc_intr_on   = se_intr_on;
    202 	ncr_sc->sc_intr_off  = se_intr_off;
    203 #endif	/* XXX */
    204 
    205 	/* Attach interrupt handler. */
    206 	isr_add_vectored(se_intr, (void *)sc,
    207 	    aa->ca.ca_intpri, aa->ca.ca_intvec);
    208 
    209 	/* Reset the hardware. */
    210 	se_reset(ncr_sc);
    211 
    212 	/* Do the common attach stuff. */
    213 
    214 	/*
    215 	 * Support the "options" (config file flags).
    216 	 * Disconnect/reselect is a per-target mask.
    217 	 * Interrupts and DMA are per-controller.
    218 	 */
    219 	ncr_sc->sc_no_disconnect =
    220 	    (sc->sc_options & SE_NO_DISCONNECT);
    221 	ncr_sc->sc_parity_disable =
    222 	    (sc->sc_options & SE_NO_PARITY_CHK) >> 8;
    223 	if (sc->sc_options & SE_FORCE_POLLING)
    224 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
    225 
    226 #if 1	/* XXX - Temporary */
    227 	/* XXX - In case we think DMA is completely broken... */
    228 	if (sc->sc_options & SE_DISABLE_DMA) {
    229 		/* Override this function pointer. */
    230 		ncr_sc->sc_dma_alloc = NULL;
    231 	}
    232 #endif
    233 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
    234 
    235 	/*
    236 	 * Initialize fields used by the MI code
    237 	 */
    238 	ncr_sc->sci_r0 = &regs->ncrregs[0];
    239 	ncr_sc->sci_r1 = &regs->ncrregs[1];
    240 	ncr_sc->sci_r2 = &regs->ncrregs[2];
    241 	ncr_sc->sci_r3 = &regs->ncrregs[3];
    242 	ncr_sc->sci_r4 = &regs->ncrregs[4];
    243 	ncr_sc->sci_r5 = &regs->ncrregs[5];
    244 	ncr_sc->sci_r6 = &regs->ncrregs[6];
    245 	ncr_sc->sci_r7 = &regs->ncrregs[7];
    246 
    247 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
    248 
    249 	/*
    250 	 * Allocate DMA handles.
    251 	 */
    252 	i = SCI_OPENINGS * sizeof(struct se_dma_handle);
    253 	sc->sc_dma = malloc(i, M_DEVBUF, M_WAITOK);
    254 	if (sc->sc_dma == NULL)
    255 		panic("se: dma_malloc failed");
    256 	for (i = 0; i < SCI_OPENINGS; i++)
    257 		sc->sc_dma[i].dh_flags = 0;
    258 
    259 	ncr_sc->sc_channel.chan_id = 7;
    260 	ncr_sc->sc_adapter.adapt_minphys = se_minphys;
    261 
    262 	/*
    263 	 *  Initialize se board itself.
    264 	 */
    265 	ncr5380_attach(ncr_sc);
    266 }
    267 
    268 static void
    269 se_reset(struct ncr5380_softc *ncr_sc)
    270 {
    271 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    272 	volatile struct se_regs *se = sc->sc_regs;
    273 
    274 #ifdef	DEBUG
    275 	if (se_debug) {
    276 		printf("%s\n", __func__);
    277 	}
    278 #endif
    279 
    280 	/* The reset bits in the CSR are active low. */
    281 	se->se_csr = 0;
    282 	delay(10);
    283 	se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
    284 	delay(10);
    285 
    286 	/* Make sure the DMA engine is stopped. */
    287 	se->dma_addr = 0;
    288 	se->dma_cntr = 0;
    289 	se->se_ivec = sc->sc_adapter_iv;
    290 }
    291 
    292 /*
    293  * This is called when the bus is going idle,
    294  * so we want to enable the SBC interrupts.
    295  * That is controlled by the DMA enable!
    296  * Who would have guessed!
    297  * What a NASTY trick!
    298  */
    299 void
    300 se_intr_on(struct ncr5380_softc *ncr_sc)
    301 {
    302 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    303 	volatile struct se_regs *se = sc->sc_regs;
    304 
    305 	/* receive mode should be safer */
    306 	se->se_csr &= ~SE_CSR_SEND;
    307 
    308 	/* Clear the count so nothing happens. */
    309 	se->dma_cntr = 0;
    310 
    311 	/* Clear the start address too. (paranoid?) */
    312 	se->dma_addr = 0;
    313 
    314 	/* Finally, enable the DMA engine. */
    315 	se->se_csr |= SE_CSR_INTR_EN;
    316 }
    317 
    318 /*
    319  * This is called when the bus is idle and we are
    320  * about to start playing with the SBC chip.
    321  */
    322 void
    323 se_intr_off(struct ncr5380_softc *ncr_sc)
    324 {
    325 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    326 	volatile struct se_regs *se = sc->sc_regs;
    327 
    328 	se->se_csr &= ~SE_CSR_INTR_EN;
    329 }
    330 
    331 /*
    332  * This function is called during the COMMAND or MSG_IN phase
    333  * that precedes a DATA_IN or DATA_OUT phase, in case we need
    334  * to setup the DMA engine before the bus enters a DATA phase.
    335  *
    336  * On the VME version, setup the start addres, but clear the
    337  * count (to make sure it stays idle) and set that later.
    338  * XXX: The VME adapter appears to suppress SBC interrupts
    339  * when the FIFO is not empty or the FIFO count is non-zero!
    340  * XXX: Need to copy data into the DMA buffer...
    341  */
    342 void
    343 se_dma_setup(struct ncr5380_softc *ncr_sc)
    344 {
    345 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    346 	struct sci_req *sr = ncr_sc->sc_current;
    347 	struct se_dma_handle *dh = sr->sr_dma_hand;
    348 	volatile struct se_regs *se = sc->sc_regs;
    349 	long data_pa;
    350 	int xlen;
    351 
    352 	/*
    353 	 * Get the DMA mapping for this segment.
    354 	 * XXX - Should separate allocation and mapin.
    355 	 */
    356 	data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
    357 	data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
    358 	if (data_pa & 1)
    359 		panic("%s: bad pa=0x%lx", __func__, data_pa);
    360 	xlen = ncr_sc->sc_datalen;
    361 	xlen &= ~1;				/* XXX: necessary? */
    362 	sc->sc_reqlen = xlen; 	/* XXX: or less? */
    363 
    364 #ifdef	DEBUG
    365 	if (se_debug & 2) {
    366 		printf("%s: dh=%p, pa=0x%lx, xlen=0x%x\n",
    367 		    __func__, dh, data_pa, xlen);
    368 	}
    369 #endif
    370 
    371 	/* Set direction (send/recv) */
    372 	if (dh->dh_flags & SIDH_OUT) {
    373 		se->se_csr |= SE_CSR_SEND;
    374 	} else {
    375 		se->se_csr &= ~SE_CSR_SEND;
    376 	}
    377 
    378 	/* Load the start address. */
    379 	se->dma_addr = (ushort)(data_pa & 0xFFFF);
    380 
    381 	/*
    382 	 * Keep the count zero or it may start early!
    383 	 */
    384 	se->dma_cntr = 0;
    385 }
    386 
    387 
    388 void
    389 se_dma_start(struct ncr5380_softc *ncr_sc)
    390 {
    391 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    392 	struct sci_req *sr = ncr_sc->sc_current;
    393 	struct se_dma_handle *dh = sr->sr_dma_hand;
    394 	volatile struct se_regs *se = sc->sc_regs;
    395 	int s, xlen;
    396 
    397 	xlen = sc->sc_reqlen;
    398 
    399 	/* This MAY be time critical (not sure). */
    400 	s = splhigh();
    401 
    402 	se->dma_cntr = (ushort)(xlen & 0xFFFF);
    403 
    404 	/*
    405 	 * Acknowledge the phase change.  (After DMA setup!)
    406 	 * Put the SBIC into DMA mode, and start the transfer.
    407 	 */
    408 	if (dh->dh_flags & SIDH_OUT) {
    409 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
    410 		SCI_CLR_INTR(ncr_sc);
    411 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
    412 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
    413 		*ncr_sc->sci_dma_send = 0;	/* start it */
    414 	} else {
    415 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
    416 		SCI_CLR_INTR(ncr_sc);
    417 		*ncr_sc->sci_icmd = 0;
    418 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
    419 		*ncr_sc->sci_irecv = 0;	/* start it */
    420 	}
    421 
    422 	/* Let'er rip! */
    423 	se->se_csr |= SE_CSR_INTR_EN;
    424 
    425 	splx(s);
    426 	ncr_sc->sc_state |= NCR_DOINGDMA;
    427 
    428 #ifdef	DEBUG
    429 	if (se_debug & 2) {
    430 		printf("%s: started, flags=0x%x\n",
    431 		    __func__, ncr_sc->sc_state);
    432 	}
    433 #endif
    434 }
    435 
    436 
    437 void
    438 se_dma_eop(struct ncr5380_softc *ncr_sc)
    439 {
    440 
    441 	/* Not needed - DMA was stopped prior to examining sci_csr */
    442 }
    443 
    444 
    445 void
    446 se_dma_stop(struct ncr5380_softc *ncr_sc)
    447 {
    448 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    449 	struct sci_req *sr = ncr_sc->sc_current;
    450 	struct se_dma_handle *dh = sr->sr_dma_hand;
    451 	volatile struct se_regs *se = sc->sc_regs;
    452 	int resid, ntrans;
    453 
    454 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
    455 #ifdef	DEBUG
    456 		printf("%s: DMA not running\n", __func__);
    457 #endif
    458 		return;
    459 	}
    460 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
    461 
    462 	/* First, halt the DMA engine. */
    463 	se->se_csr &= ~SE_CSR_INTR_EN;	/* VME only */
    464 
    465 	/* Set an impossible phase to prevent data movement? */
    466 	*ncr_sc->sci_tcmd = PHASE_INVALID;
    467 
    468 	/* Note that timeout may have set the error flag. */
    469 	if (ncr_sc->sc_state & NCR_ABORTING)
    470 		goto out;
    471 
    472 	/* XXX: Wait for DMA to actually finish? */
    473 
    474 	/*
    475 	 * Now try to figure out how much actually transferred
    476 	 */
    477 	resid = se->dma_cntr & 0xFFFF;
    478 	if (dh->dh_flags & SIDH_OUT)
    479 		if ((resid > 0) && (resid < sc->sc_reqlen))
    480 			resid++;
    481 	ntrans = sc->sc_reqlen - resid;
    482 
    483 #ifdef	DEBUG
    484 	if (se_debug & 2) {
    485 		printf("%s: resid=0x%x ntrans=0x%x\n",
    486 		    __func__, resid, ntrans);
    487 	}
    488 #endif
    489 
    490 	if (ntrans < MIN_DMA_LEN) {
    491 		printf("se: fifo count: 0x%x\n", resid);
    492 		ncr_sc->sc_state |= NCR_ABORTING;
    493 		goto out;
    494 	}
    495 	if (ntrans > ncr_sc->sc_datalen)
    496 		panic("%s: excess transfer", __func__);
    497 
    498 	/* Adjust data pointer */
    499 	ncr_sc->sc_dataptr += ntrans;
    500 	ncr_sc->sc_datalen -= ntrans;
    501 
    502 out:
    503 	se->dma_addr = 0;
    504 	se->dma_cntr = 0;
    505 
    506 	/* Put SBIC back in PIO mode. */
    507 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
    508 	*ncr_sc->sci_icmd = 0;
    509 }
    510 
    511 /*****************************************************************/
    512 
    513 static void
    514 se_minphys(struct buf *bp)
    515 {
    516 
    517 	if (bp->b_bcount > MAX_DMA_LEN)
    518 		bp->b_bcount = MAX_DMA_LEN;
    519 
    520 	minphys(bp);
    521 }
    522 
    523 
    524 int
    525 se_intr(void *arg)
    526 {
    527 	struct se_softc *sc = arg;
    528 	volatile struct se_regs *se = sc->sc_regs;
    529 	int claimed;
    530 	u_short csr;
    531 
    532 	claimed = 0;
    533 
    534 	/* SBC interrupt? DMA interrupt? */
    535 	csr = se->se_csr;
    536 	NCR_TRACE("se_intr: csr=0x%x\n", csr);
    537 
    538 	if (csr & SE_CSR_SBC_IP) {
    539 		claimed = ncr5380_intr(&sc->ncr_sc);
    540 #ifdef	DEBUG
    541 		if (!claimed) {
    542 			printf("%s: spurious from SBC\n", __func__);
    543 		}
    544 #endif
    545 		/* Yes, we DID cause this interrupt. */
    546 		claimed = 1;
    547 	}
    548 
    549 	return claimed;
    550 }
    551 
    552 
    553 /*****************************************************************
    554  * Common functions for DMA
    555  ****************************************************************/
    556 
    557 /*
    558  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
    559  * for DMA transfer.  On the Sun3/E, this means we have to
    560  * allocate space in the DMA buffer for this transfer.
    561  */
    562 void
    563 se_dma_alloc(struct ncr5380_softc *ncr_sc)
    564 {
    565 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    566 	struct sci_req *sr = ncr_sc->sc_current;
    567 	struct scsipi_xfer *xs = sr->sr_xs;
    568 	struct se_dma_handle *dh;
    569 	int i, xlen;
    570 	u_long addr;
    571 
    572 #ifdef	DIAGNOSTIC
    573 	if (sr->sr_dma_hand != NULL)
    574 		panic("%s: already have DMA handle", __func__);
    575 #endif
    576 
    577 	addr = (u_long)ncr_sc->sc_dataptr;
    578 	xlen = ncr_sc->sc_datalen;
    579 
    580 	/* If the DMA start addr is misaligned then do PIO */
    581 	if ((addr & 1) || (xlen & 1)) {
    582 		printf("%s: misaligned.\n", __func__);
    583 		return;
    584 	}
    585 
    586 	/* Make sure our caller checked sc_min_dma_len. */
    587 	if (xlen < MIN_DMA_LEN)
    588 		panic("%s: xlen=0x%x", __func__, xlen);
    589 
    590 	/*
    591 	 * Never attempt single transfers of more than 63k, because
    592 	 * our count register may be only 16 bits (an OBIO adapter).
    593 	 * This should never happen since already bounded by minphys().
    594 	 * XXX - Should just segment these...
    595 	 */
    596 	if (xlen > MAX_DMA_LEN) {
    597 		printf("%s: excessive xlen=0x%x\n", __func__, xlen);
    598 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
    599 	}
    600 
    601 	/* Find free DMA handle.  Guaranteed to find one since we have
    602 	   as many DMA handles as the driver has processes. */
    603 	for (i = 0; i < SCI_OPENINGS; i++) {
    604 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
    605 			goto found;
    606 	}
    607 	panic("se: no free DMA handles.");
    608 found:
    609 
    610 	dh = &sc->sc_dma[i];
    611 	dh->dh_flags = SIDH_BUSY;
    612 
    613 	/* Copy the "write" flag for convenience. */
    614 	if (xs->xs_control & XS_CTL_DATA_OUT)
    615 		dh->dh_flags |= SIDH_OUT;
    616 
    617 	dh->dh_addr = (uint8_t *)addr;
    618 	dh->dh_maplen  = xlen;
    619 	dh->dh_dma = 0;	/* XXX - Allocate space in DMA buffer. */
    620 	/* XXX: dh->dh_dma = alloc(xlen) */
    621 	if (!dh->dh_dma) {
    622 		/* Can't remap segment */
    623 		printf("%s: can't remap %p/0x%x\n",
    624 		    __func__, dh->dh_addr, dh->dh_maplen);
    625 		dh->dh_flags = 0;
    626 		return;
    627 	}
    628 
    629 	/* success */
    630 	sr->sr_dma_hand = dh;
    631 }
    632 
    633 
    634 void
    635 se_dma_free(struct ncr5380_softc *ncr_sc)
    636 {
    637 	struct sci_req *sr = ncr_sc->sc_current;
    638 	struct se_dma_handle *dh = sr->sr_dma_hand;
    639 
    640 #ifdef	DIAGNOSTIC
    641 	if (dh == NULL)
    642 		panic("%s: no DMA handle", __func__);
    643 #endif
    644 
    645 	if (ncr_sc->sc_state & NCR_DOINGDMA)
    646 		panic("%s: free while in progress", __func__);
    647 
    648 	if (dh->dh_flags & SIDH_BUSY) {
    649 		/* XXX: Should separate allocation and mapping. */
    650 		/* XXX: Give back the DMA space. */
    651 		/* XXX: free((void *)dh->dh_dma, dh->dh_maplen); */
    652 		dh->dh_dma = 0;
    653 		dh->dh_flags = 0;
    654 	}
    655 	sr->sr_dma_hand = NULL;
    656 }
    657 
    658 
    659 #define	CSR_MASK SE_CSR_SBC_IP
    660 #define	POLL_TIMO	50000	/* X100 = 5 sec. */
    661 
    662 /*
    663  * Poll (spin-wait) for DMA completion.
    664  * Called right after xx_dma_start(), and
    665  * xx_dma_stop() will be called next.
    666  * Same for either VME or OBIO.
    667  */
    668 void
    669 se_dma_poll(struct ncr5380_softc *ncr_sc)
    670 {
    671 	struct se_softc *sc = (struct se_softc *)ncr_sc;
    672 	struct sci_req *sr = ncr_sc->sc_current;
    673 	volatile struct se_regs *se = sc->sc_regs;
    674 	int tmo;
    675 
    676 	/* Make sure DMA started successfully. */
    677 	if (ncr_sc->sc_state & NCR_ABORTING)
    678 		return;
    679 
    680 	/*
    681 	 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
    682 	 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
    683 	 * XXX: I really doubt that is necessary...
    684 	 */
    685 
    686 	/* Wait for any "DMA complete" or error bits. */
    687 	tmo = POLL_TIMO;
    688 	for (;;) {
    689 		if (se->se_csr & CSR_MASK)
    690 			break;
    691 		if (--tmo <= 0) {
    692 			printf("se: DMA timeout (while polling)\n");
    693 			/* Indicate timeout as MI code would. */
    694 			sr->sr_flags |= SR_OVERDUE;
    695 			break;
    696 		}
    697 		delay(100);
    698 	}
    699 	NCR_TRACE("se_dma_poll: waited %d\n",
    700 			  POLL_TIMO - tmo);
    701 
    702 #ifdef	DEBUG
    703 	if (se_debug & 2) {
    704 		printf("%s: done, csr=0x%x\n", __func__, se->se_csr);
    705 	}
    706 #endif
    707 }
    708 
    709