Home | History | Annotate | Line # | Download | only in sbus
isp_sbus.c revision 1.67.2.1
      1 /* $NetBSD: isp_sbus.c,v 1.67.2.1 2007/05/27 14:30:28 ad Exp $ */
      2 /*
      3  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
      4  *
      5  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
      6  * All rights reserved.
      7  *
      8  * Additional Copyright (C) 2000-2007 by Matthew Jacob
      9  * All rights reserved.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  *
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: isp_sbus.c,v 1.67.2.1 2007/05/27 14:30:28 ad Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/device.h>
     41 #include <sys/kernel.h>
     42 #include <sys/malloc.h>
     43 #include <sys/queue.h>
     44 #include <dev/ic/isp_netbsd.h>
     45 #include <machine/intr.h>
     46 #include <machine/autoconf.h>
     47 #include <dev/sbus/sbusvar.h>
     48 #include <sys/reboot.h>
     49 
     50 static void isp_sbus_reset0(struct ispsoftc *);
     51 static void isp_sbus_reset1(struct ispsoftc *);
     52 static int isp_sbus_intr(void *);
     53 static int
     54 isp_sbus_rd_isr(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
     55 static uint32_t isp_sbus_rd_reg(struct ispsoftc *, int);
     56 static void isp_sbus_wr_reg (struct ispsoftc *, int, uint32_t);
     57 static int isp_sbus_mbxdma(struct ispsoftc *);
     58 static int isp_sbus_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, uint32_t *,
     59     uint32_t);
     60 static void isp_sbus_dmateardown(struct ispsoftc *, XS_T *, uint32_t);
     61 
     62 #ifndef	ISP_DISABLE_FW
     63 #include <dev/microcode/isp/asm_sbus.h>
     64 #else
     65 #define	ISP_1000_RISC_CODE	NULL
     66 #endif
     67 
     68 static const struct ispmdvec mdvec = {
     69 	isp_sbus_rd_isr,
     70 	isp_sbus_rd_reg,
     71 	isp_sbus_wr_reg,
     72 	isp_sbus_mbxdma,
     73 	isp_sbus_dmasetup,
     74 	isp_sbus_dmateardown,
     75 	isp_sbus_reset0,
     76 	isp_sbus_reset1,
     77 	NULL,
     78 	ISP_1000_RISC_CODE,
     79 	0,
     80 	0
     81 };
     82 
     83 struct isp_sbussoftc {
     84 	struct ispsoftc	sbus_isp;
     85 	struct sbusdev	sbus_sd;
     86 	sdparam		sbus_dev;
     87 	bus_space_tag_t	sbus_bustag;
     88 	bus_space_handle_t sbus_reg;
     89 	int		sbus_node;
     90 	int		sbus_pri;
     91 	struct ispmdvec	sbus_mdvec;
     92 	bus_dmamap_t	*sbus_dmamap;
     93 	int16_t		sbus_poff[_NREG_BLKS];
     94 };
     95 
     96 
     97 static int isp_match(struct device *, struct cfdata *, void *);
     98 static void isp_sbus_attach(struct device *, struct device *, void *);
     99 CFATTACH_DECL(isp_sbus, sizeof (struct isp_sbussoftc),
    100     isp_match, isp_sbus_attach, NULL, NULL);
    101 
    102 static int
    103 isp_match(struct device *parent, struct cfdata *cf, void *aux)
    104 {
    105 	int rv;
    106 #ifdef DEBUG
    107 	static int oneshot = 1;
    108 #endif
    109 	struct sbus_attach_args *sa = aux;
    110 
    111 	rv = (strcmp(cf->cf_name, sa->sa_name) == 0 ||
    112 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
    113 		strcmp("ptisp", sa->sa_name) == 0 ||
    114 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
    115 		strcmp("QLGC,isp", sa->sa_name) == 0);
    116 #ifdef DEBUG
    117 	if (rv && oneshot) {
    118 		oneshot = 0;
    119 		printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
    120 		    "%d.%d Core Version %d.%d\n",
    121 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
    122 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
    123 	}
    124 #endif
    125 	return (rv);
    126 }
    127 
    128 
    129 static void
    130 isp_sbus_attach(struct device *parent, struct device *self, void *aux)
    131 {
    132 	int freq, ispburst, sbusburst;
    133 	struct sbus_attach_args *sa = aux;
    134 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
    135 	struct ispsoftc *isp = &sbc->sbus_isp;
    136 
    137 	printf(" for %s\n", sa->sa_name);
    138 
    139 	sbc->sbus_bustag = sa->sa_bustag;
    140 	if (sa->sa_nintr != 0)
    141 		sbc->sbus_pri = sa->sa_pri;
    142 	sbc->sbus_mdvec = mdvec;
    143 
    144 	if (sa->sa_npromvaddrs) {
    145 		sbus_promaddr_to_handle(sa->sa_bustag,
    146 			sa->sa_promvaddrs[0], &sbc->sbus_reg);
    147 	} else {
    148 		if (sbus_bus_map(sa->sa_bustag,	sa->sa_slot, sa->sa_offset,
    149 			sa->sa_size, 0, &sbc->sbus_reg) != 0) {
    150 			printf("%s: cannot map registers\n", self->dv_xname);
    151 			return;
    152 		}
    153 	}
    154 	sbc->sbus_node = sa->sa_node;
    155 
    156 	freq = prom_getpropint(sa->sa_node, "clock-frequency", 0);
    157 	if (freq) {
    158 		/*
    159 		 * Convert from HZ to MHz, rounding up.
    160 		 */
    161 		freq = (freq + 500000)/1000000;
    162 #if	0
    163 		printf("%s: %d MHz\n", self->dv_xname, freq);
    164 #endif
    165 	}
    166 	sbc->sbus_mdvec.dv_clock = freq;
    167 
    168 	/*
    169 	 * Now figure out what the proper burst sizes, etc., to use.
    170 	 * Unfortunately, there is no ddi_dma_burstsizes here which
    171 	 * walks up the tree finding the limiting burst size node (if
    172 	 * any).
    173 	 */
    174 	sbusburst = ((struct sbus_softc *)parent)->sc_burst;
    175 	if (sbusburst == 0)
    176 		sbusburst = SBUS_BURST_32 - 1;
    177 	ispburst = prom_getpropint(sa->sa_node, "burst-sizes", -1);
    178 	if (ispburst == -1) {
    179 		ispburst = sbusburst;
    180 	}
    181 	ispburst &= sbusburst;
    182 	ispburst &= ~(1 << 7);
    183 	ispburst &= ~(1 << 6);
    184 	sbc->sbus_mdvec.dv_conf1 =  0;
    185 	if (ispburst & (1 << 5)) {
    186 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32;
    187 	} else if (ispburst & (1 << 4)) {
    188 		sbc->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16;
    189 	} else if (ispburst & (1 << 3)) {
    190 		sbc->sbus_mdvec.dv_conf1 =
    191 		    BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8;
    192 	}
    193 	if (sbc->sbus_mdvec.dv_conf1) {
    194 		sbc->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE;
    195 	}
    196 
    197 	isp->isp_mdvec = &sbc->sbus_mdvec;
    198 	isp->isp_bustype = ISP_BT_SBUS;
    199 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
    200 	isp->isp_param = &sbc->sbus_dev;
    201 	isp->isp_dmatag = sa->sa_dmatag;
    202 	MEMZERO(isp->isp_param, sizeof (sdparam));
    203 
    204 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
    205 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
    206 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
    207 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
    208 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
    209 
    210 	/* Establish interrupt channel */
    211 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO,
    212 	    isp_sbus_intr, sbc);
    213 	sbus_establish(&sbc->sbus_sd, &sbc->sbus_isp.isp_osinfo._dev);
    214 
    215 	/*
    216 	 * Set up logging levels.
    217 	 */
    218 #ifdef	ISP_LOGDEFAULT
    219 	isp->isp_dblev = ISP_LOGDEFAULT;
    220 #else
    221 	isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
    222 	if (bootverbose)
    223 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
    224 #ifdef	SCSIDEBUG
    225 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
    226 #endif
    227 #ifdef	DEBUG
    228 	isp->isp_dblev |= ISP_LOGDEBUG0;
    229 #endif
    230 #endif
    231 
    232 	isp->isp_confopts = device_cfdata(self)->cf_flags;
    233 	isp->isp_role = ISP_DEFAULT_ROLES;
    234 
    235 	/*
    236 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
    237 	 */
    238 	isp->isp_confopts |= ISP_CFG_NONVRAM;
    239 
    240 	/*
    241 	 * Mark things if we're a PTI SBus adapter.
    242 	 */
    243 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
    244 	    strcmp("ptisp", sa->sa_name) == 0) {
    245 		SDPARAM(isp)->isp_ptisp = 1;
    246 	}
    247 	ISP_LOCK(isp);
    248 	isp_reset(isp);
    249 	if (isp->isp_state != ISP_RESETSTATE) {
    250 		ISP_UNLOCK(isp);
    251 		return;
    252 	}
    253 	ISP_ENABLE_INTS(isp);
    254 	isp_init(isp);
    255 	if (isp->isp_state != ISP_INITSTATE) {
    256 		isp_uninit(isp);
    257 		ISP_UNLOCK(isp);
    258 		return;
    259 	}
    260 
    261 	/*
    262 	 * do generic attach.
    263 	 */
    264 	ISP_UNLOCK(isp);
    265 	isp_attach(isp);
    266 	if (isp->isp_state != ISP_RUNSTATE) {
    267 		ISP_LOCK(isp);
    268 		isp_uninit(isp);
    269 		ISP_UNLOCK(isp);
    270 	}
    271 }
    272 
    273 
    274 static void
    275 isp_sbus_reset0(struct ispsoftc *isp)
    276 {
    277 	ISP_DISABLE_INTS(isp);
    278 }
    279 
    280 static void
    281 isp_sbus_reset1(struct ispsoftc *isp)
    282 {
    283 	ISP_ENABLE_INTS(isp);
    284 }
    285 
    286 static int
    287 isp_sbus_intr(void *arg)
    288 {
    289 	uint32_t isr;
    290 	uint16_t sema, mbox;
    291 	struct ispsoftc *isp = arg;
    292 
    293 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
    294 		isp->isp_intbogus++;
    295 		return (0);
    296 	} else {
    297 		struct isp_sbussoftc *sbc = arg;
    298 		sbc->sbus_isp.isp_osinfo.onintstack = 1;
    299 		isp_intr(isp, isr, sema, mbox);
    300 		sbc->sbus_isp.isp_osinfo.onintstack = 0;
    301 		return (1);
    302 	}
    303 }
    304 
    305 #define	IspVirt2Off(a, x)	\
    306 	(((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \
    307 	_BLK_REG_SHFT] + ((x) & 0xff))
    308 
    309 #define	BXR2(sbc, off)		\
    310 	bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, off)
    311 
    312 static int
    313 isp_sbus_rd_isr(struct ispsoftc *isp, uint32_t *isrp,
    314     uint16_t *semap, uint16_t *mbp)
    315 {
    316 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    317 	uint32_t isr;
    318 	uint16_t sema;
    319 
    320 	isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR));
    321 	sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA));
    322 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
    323 	isr &= INT_PENDING_MASK(isp);
    324 	sema &= BIU_SEMA_LOCK;
    325 	if (isr == 0 && sema == 0) {
    326 		return (0);
    327 	}
    328 	*isrp = isr;
    329 	if ((*semap = sema) != 0) {
    330 		*mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0));
    331 	}
    332 	return (1);
    333 }
    334 
    335 static uint32_t
    336 isp_sbus_rd_reg(struct ispsoftc *isp, int regoff)
    337 {
    338 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    339 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    340 	offset += (regoff & 0xff);
    341 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
    342 }
    343 
    344 static void
    345 isp_sbus_wr_reg(struct ispsoftc *isp, int regoff, uint32_t val)
    346 {
    347 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    348 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    349 	offset += (regoff & 0xff);
    350 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
    351 }
    352 
    353 static int
    354 isp_sbus_mbxdma(struct ispsoftc *isp)
    355 {
    356 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    357 	bus_dma_segment_t reqseg, rspseg;
    358 	int reqrs, rsprs, i, progress;
    359 	size_t n;
    360 	bus_size_t len;
    361 
    362 	if (isp->isp_rquest_dma)
    363 		return (0);
    364 
    365 	n = isp->isp_maxcmds * sizeof (XS_T *);
    366 	isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
    367 	if (isp->isp_xflist == NULL) {
    368 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
    369 		return (1);
    370 	}
    371 	MEMZERO(isp->isp_xflist, n);
    372 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
    373 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
    374 	if (sbc->sbus_dmamap == NULL) {
    375 		free(isp->isp_xflist, M_DEVBUF);
    376 		isp->isp_xflist = NULL;
    377 		isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
    378 		return (1);
    379 	}
    380 	for (i = 0; i < isp->isp_maxcmds; i++) {
    381 		/* Allocate a DMA handle */
    382 		if (bus_dmamap_create(isp->isp_dmatag, MAXPHYS, 1, MAXPHYS, 0,
    383 		    BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
    384 			isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
    385 			break;
    386 		}
    387 	}
    388 	if (i < isp->isp_maxcmds) {
    389 		while (--i >= 0) {
    390 			bus_dmamap_destroy(isp->isp_dmatag,
    391 			    sbc->sbus_dmamap[i]);
    392 		}
    393 		free(isp->isp_xflist, M_DEVBUF);
    394 		free(sbc->sbus_dmamap, M_DEVBUF);
    395 		isp->isp_xflist = NULL;
    396 		sbc->sbus_dmamap = NULL;
    397 		return (1);
    398 	}
    399 
    400 	/*
    401 	 * Allocate and map the request and response queues
    402 	 */
    403 	progress = 0;
    404 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
    405 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &reqseg, 1, &reqrs,
    406 	    BUS_DMA_NOWAIT)) {
    407 		goto dmafail;
    408 	}
    409 	progress++;
    410 	if (bus_dmamem_map(isp->isp_dmatag, &reqseg, reqrs, len,
    411 	    (void *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    412 		goto dmafail;
    413 	}
    414 	progress++;
    415 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
    416 	    &isp->isp_rqdmap) != 0) {
    417 		goto dmafail;
    418 	}
    419 	progress++;
    420 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rqdmap,
    421 	    isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT) != 0) {
    422 		goto dmafail;
    423 	}
    424 	progress++;
    425 	isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
    426 
    427 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
    428 	if (bus_dmamem_alloc(isp->isp_dmatag, len, 0, 0, &rspseg, 1, &rsprs,
    429 	    BUS_DMA_NOWAIT)) {
    430 		goto dmafail;
    431 	}
    432 	progress++;
    433 	if (bus_dmamem_map(isp->isp_dmatag, &rspseg, rsprs, len,
    434 	    (void *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    435 		goto dmafail;
    436 	}
    437 	progress++;
    438 	if (bus_dmamap_create(isp->isp_dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
    439 	    &isp->isp_rsdmap) != 0) {
    440 		goto dmafail;
    441 	}
    442 	progress++;
    443 	if (bus_dmamap_load(isp->isp_dmatag, isp->isp_rsdmap,
    444 	    isp->isp_result, len, NULL, BUS_DMA_NOWAIT) != 0) {
    445 		goto dmafail;
    446 	}
    447 	isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
    448 
    449 	return (0);
    450 
    451 dmafail:
    452 	isp_prt(isp, ISP_LOGERR, "Mailbox DMA Setup Failure");
    453 
    454 	if (progress >= 8) {
    455 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rsdmap);
    456 	}
    457 	if (progress >= 7) {
    458 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rsdmap);
    459 	}
    460 	if (progress >= 6) {
    461 		bus_dmamem_unmap(isp->isp_dmatag,
    462 		    isp->isp_result, ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)));
    463 	}
    464 	if (progress >= 5) {
    465 		bus_dmamem_free(isp->isp_dmatag, &rspseg, rsprs);
    466 	}
    467 
    468 	if (progress >= 4) {
    469 		bus_dmamap_unload(isp->isp_dmatag, isp->isp_rqdmap);
    470 	}
    471 	if (progress >= 3) {
    472 		bus_dmamap_destroy(isp->isp_dmatag, isp->isp_rqdmap);
    473 	}
    474 	if (progress >= 2) {
    475 		bus_dmamem_unmap(isp->isp_dmatag,
    476 		    isp->isp_rquest, ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)));
    477 	}
    478 	if (progress >= 1) {
    479 		bus_dmamem_free(isp->isp_dmatag, &reqseg, reqrs);
    480 	}
    481 
    482 	for (i = 0; i < isp->isp_maxcmds; i++) {
    483 		bus_dmamap_destroy(isp->isp_dmatag, sbc->sbus_dmamap[i]);
    484 	}
    485 	free(sbc->sbus_dmamap, M_DEVBUF);
    486 	free(isp->isp_xflist, M_DEVBUF);
    487 	isp->isp_xflist = NULL;
    488 	sbc->sbus_dmamap = NULL;
    489 	return (1);
    490 }
    491 
    492 /*
    493  * Map a DMA request.
    494  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
    495  */
    496 
    497 static int
    498 isp_sbus_dmasetup(struct ispsoftc *isp, XS_T *xs, ispreq_t *rq,
    499     uint32_t *nxtip, uint32_t optr)
    500 {
    501 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    502 	bus_dmamap_t dmap;
    503 	ispreq_t *qep;
    504 	int error, cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
    505 	int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
    506 
    507 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
    508 	if (xs->datalen == 0) {
    509 		rq->req_seg_count = 1;
    510 		goto mbxsync;
    511 	}
    512 
    513 	dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
    514 	if (dmap->dm_nsegs != 0) {
    515 		panic("%s: DMA map already allocated", isp->isp_name);
    516 		/* NOTREACHED */
    517 	}
    518 	error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
    519 	    NULL, (cansleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) |
    520 	    BUS_DMA_STREAMING);
    521 	if (error != 0) {
    522 		XS_SETERR(xs, HBA_BOTCH);
    523 		if (error == EAGAIN || error == ENOMEM)
    524 			return (CMD_EAGAIN);
    525 		else
    526 			return (CMD_COMPLETE);
    527 	}
    528 
    529 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0, xs->datalen,
    530 	    in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    531 
    532 	if (in) {
    533 		rq->req_flags |= REQFLAG_DATA_IN;
    534 	} else {
    535 		rq->req_flags |= REQFLAG_DATA_OUT;
    536 	}
    537 
    538 	if (XS_CDBLEN(xs) > 12) {
    539 		uint32_t onxti;
    540 		ispcontreq_t local, *crq = &local, *cqe;
    541 
    542 		onxti = *nxtip;
    543 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, onxti);
    544 		*nxtip = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
    545 		if (*nxtip == optr) {
    546 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
    547 			bus_dmamap_unload(isp->isp_dmatag, dmap);
    548 			XS_SETERR(xs, HBA_BOTCH);
    549 			return (CMD_EAGAIN);
    550 		}
    551 		rq->req_seg_count = 2;
    552 		MEMZERO((void *)crq, sizeof (*crq));
    553 		crq->req_header.rqs_entry_count = 1;
    554 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
    555 		crq->req_dataseg[0].ds_count = xs->datalen;
    556 		crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
    557 		isp_put_cont_req(isp, crq, cqe);
    558 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
    559 	} else {
    560 		rq->req_seg_count = 1;
    561 		rq->req_dataseg[0].ds_count = xs->datalen;
    562 		rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
    563 	}
    564 
    565 mbxsync:
    566 	if (XS_CDBLEN(xs) > 12) {
    567 		isp_put_extended_request(isp,
    568 		    (ispextreq_t *)rq, (ispextreq_t *) qep);
    569 	} else {
    570 		isp_put_request(isp, rq, qep);
    571 	}
    572 	return (CMD_QUEUED);
    573 }
    574 
    575 static void
    576 isp_sbus_dmateardown(struct ispsoftc *isp, XS_T *xs, uint32_t handle)
    577 {
    578 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    579 	bus_dmamap_t dmap;
    580 
    581 	dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
    582 
    583 	if (dmap->dm_nsegs == 0) {
    584 		panic("%s: DMA map not already allocated", isp->isp_name);
    585 		/* NOTREACHED */
    586 	}
    587 	bus_dmamap_sync(isp->isp_dmatag, dmap, 0,
    588 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
    589 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    590 	bus_dmamap_unload(isp->isp_dmatag, dmap);
    591 }
    592