Home | History | Annotate | Line # | Download | only in ic
dpt.c revision 1.24
      1 /*	$NetBSD: dpt.c,v 1.24 2000/11/14 18:21:01 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
      9  * Aerospace Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Portions of this code fall under the following copyright:
     42  *
     43  * Originally written by Julian Elischer (julian (at) tfs.com)
     44  * for TRW Financial Systems for use under the MACH(2.5) operating system.
     45  *
     46  * TRW Financial Systems, in accordance with their agreement with Carnegie
     47  * Mellon University, makes this software available to CMU to distribute
     48  * or use in any manner that they see fit as long as this message is kept with
     49  * the software. For this reason TFS also grants any other persons or
     50  * organisations permission to use or modify this software.
     51  *
     52  * TFS supplies this software to be publicly redistributed
     53  * on the understanding that TFS is not responsible for the correct
     54  * functioning of this software in any circumstances.
     55  */
     56 
     57 #include <sys/cdefs.h>
     58 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.24 2000/11/14 18:21:01 thorpej Exp $");
     59 
     60 #include <sys/param.h>
     61 #include <sys/systm.h>
     62 #include <sys/kernel.h>
     63 #include <sys/device.h>
     64 #include <sys/queue.h>
     65 #include <sys/proc.h>
     66 #include <sys/buf.h>
     67 #include <sys/endian.h>
     68 
     69 #include <uvm/uvm_extern.h>
     70 
     71 #include <machine/bswap.h>
     72 #include <machine/bus.h>
     73 
     74 #include <dev/scsipi/scsi_all.h>
     75 #include <dev/scsipi/scsipi_all.h>
     76 #include <dev/scsipi/scsiconf.h>
     77 
     78 #include <dev/ic/dptreg.h>
     79 #include <dev/ic/dptvar.h>
     80 
     81 /* A default for our link struct */
     82 static struct scsipi_device dpt_dev = {
     83 	NULL,			/* Use default error handler */
     84 	NULL,			/* have a queue, served by this */
     85 	NULL,			/* have no async handler */
     86 	NULL,			/* Use default 'done' routine */
     87 };
     88 
     89 static char *dpt_cname[] = {
     90 	"PM3334", "SmartRAID IV",
     91 	"PM3332", "SmartRAID IV",
     92 	"PM2144", "SmartCache IV",
     93 	"PM2044", "SmartCache IV",
     94 	"PM2142", "SmartCache IV",
     95 	"PM2042", "SmartCache IV",
     96 	"PM2041", "SmartCache IV",
     97 	"PM3224", "SmartRAID III",
     98 	"PM3222", "SmartRAID III",
     99 	"PM3021", "SmartRAID III",
    100 	"PM2124", "SmartCache III",
    101 	"PM2024", "SmartCache III",
    102 	"PM2122", "SmartCache III",
    103 	"PM2022", "SmartCache III",
    104 	"PM2021", "SmartCache III",
    105 	"SK2012", "SmartCache Plus",
    106 	"SK2011", "SmartCache Plus",
    107 	NULL,     "unknown adapter, please report using send-pr(1)",
    108 };
    109 
    110 void *dpt_sdh;				/* shutdown hook */
    111 
    112 /*
    113  * Handle an interrupt from the HBA.
    114  */
    115 int
    116 dpt_intr(xxx_sc)
    117 	void *xxx_sc;
    118 {
    119 	struct dpt_softc *sc;
    120 	struct dpt_ccb *ccb;
    121 	struct eata_sp *sp;
    122 	volatile int junk;
    123 
    124 	sc = xxx_sc;
    125 	sp = sc->sc_stp;
    126 
    127 #ifdef DEBUG
    128 	if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) {
    129 		printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
    130 		return (1);
    131 	}
    132 #endif
    133 
    134 	for (;;) {
    135 		/*
    136 		 * HBA might have interrupted while we were dealing with the
    137 		 * last completed command, since we ACK before we deal; keep
    138 		 * polling.
    139 		 */
    140 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    141 			break;
    142 
    143 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    144 		    sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
    145 
    146 		/* Might have looped before HBA can reset HBA_AUX_INTR */
    147 		if (sp->sp_ccbid == -1) {
    148 			DELAY(50);
    149 
    150 			if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    151 				return (0);
    152 
    153 			printf("%s: no status\n", sc->sc_dv.dv_xname);
    154 
    155 			/* Re-sync DMA map */
    156 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    157 			    sc->sc_stpoff, sizeof(struct eata_sp),
    158 			    BUS_DMASYNC_POSTREAD);
    159 		}
    160 
    161 		/* Make sure CCB ID from status packet is realistic */
    162 		if (sp->sp_ccbid >= 0 && sp->sp_ccbid < sc->sc_nccbs) {
    163 			/* Sync up DMA map and cache cmd status */
    164 			ccb = sc->sc_ccbs + sp->sp_ccbid;
    165 
    166 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    167 			    CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
    168 			    BUS_DMASYNC_POSTWRITE);
    169 
    170 			ccb->ccb_hba_status = sp->sp_hba_status & 0x7F;
    171 			ccb->ccb_scsi_status = sp->sp_scsi_status;
    172 
    173 			/*
    174 			 * Ack the interrupt and process the CCB. If this
    175 			 * is a private CCB it's up to dpt_poll() to notice.
    176 			 */
    177 			sp->sp_ccbid = -1;
    178 			ccb->ccb_flg |= CCB_INTR;
    179 			junk = dpt_inb(sc, HA_STATUS);
    180 			if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    181 				dpt_done_ccb(sc, ccb);
    182 		} else {
    183 			printf("%s: bogus status (returned CCB id %d)\n",
    184 			    sc->sc_dv.dv_xname, sp->sp_ccbid);
    185 
    186 			/* Ack the interrupt */
    187 			sp->sp_ccbid = -1;
    188 			junk = dpt_inb(sc, HA_STATUS);
    189 		}
    190 	}
    191 
    192 	return (1);
    193 }
    194 
    195 /*
    196  * Initialize and attach the HBA. This is the entry point from bus
    197  * specific probe-and-attach code.
    198  */
    199 void
    200 dpt_init(sc, intrstr)
    201 	struct dpt_softc *sc;
    202 	const char *intrstr;
    203 {
    204 	struct eata_inquiry_data *ei;
    205 	int i, j, error, rseg, maxchannel, maxtarget;
    206 	bus_dma_segment_t seg;
    207 	struct eata_cfg *ec;
    208 	struct scsipi_link *link;
    209 	char model[16];
    210 
    211 	ec = &sc->sc_ec;
    212 
    213 	/* Allocate the CCB/status packet/scratch DMA map and load */
    214 	sc->sc_nccbs =
    215 	    min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
    216 	sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
    217 	sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
    218 	sc->sc_scrlen = DPT_SCRATCH_SIZE;
    219 	sc->sc_dmamapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
    220 	    sc->sc_scrlen + sizeof(struct eata_sp);
    221 
    222 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dmamapsize,
    223 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    224 		printf("%s: unable to allocate CCBs, error = %d\n",
    225 		    sc->sc_dv.dv_xname, error);
    226 		return;
    227 	}
    228 
    229 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_dmamapsize,
    230 	    (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    231 		printf("%s: unable to map CCBs, error = %d\n",
    232 		    sc->sc_dv.dv_xname, error);
    233 		return;
    234 	}
    235 
    236 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_dmamapsize,
    237 	    sc->sc_dmamapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
    238 		printf("%s: unable to create CCB DMA map, error = %d\n",
    239 		    sc->sc_dv.dv_xname, error);
    240 		return;
    241 	}
    242 
    243 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
    244 	    sc->sc_ccbs, sc->sc_dmamapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
    245 		printf("%s: unable to load CCB DMA map, error = %d\n",
    246 		    sc->sc_dv.dv_xname, error);
    247 		return;
    248 	}
    249 
    250 	sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
    251 	sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
    252 	sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
    253 	sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
    254 	sc->sc_stp->sp_ccbid = -1;
    255 
    256 	/* Initialize the CCBs */
    257 	TAILQ_INIT(&sc->sc_free_ccb);
    258 	i = dpt_create_ccbs(sc, sc->sc_ccbs, sc->sc_nccbs);
    259 
    260 	if (i == 0) {
    261 		printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
    262 		return;
    263 	} else if (i != sc->sc_nccbs) {
    264 		printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
    265 		    sc->sc_nccbs);
    266 		sc->sc_nccbs = i;
    267 	}
    268 
    269 	/* Set shutdownhook before we start any device activity */
    270 	if (dpt_sdh == NULL)
    271 		dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
    272 
    273 	/* Get the page 0 inquiry data from the HBA */
    274 	dpt_hba_inquire(sc, &ei);
    275 
    276 	/*
    277 	 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
    278 	 * dpt0: interrupting at irq 10
    279 	 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
    280 	 */
    281 	for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
    282 		;
    283 	ei->ei_vendor[i] = '\0';
    284 
    285 	for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
    286 		model[i] = ei->ei_model[i];
    287 	for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; j++)
    288 		model[i++] = ei->ei_model[i];
    289 	model[i] = '\0';
    290 
    291 	/* Find the cannonical name for the board */
    292 	for (i = 0; dpt_cname[i] != NULL; i += 2)
    293 		if (memcmp(ei->ei_model, dpt_cname[i], 6) == 0)
    294 			break;
    295 
    296 	printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
    297 
    298 	if (intrstr != NULL)
    299 		printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr);
    300 
    301 	maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
    302 	    EC_F3_MAX_CHANNEL_SHIFT;
    303 	maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
    304 	    EC_F3_MAX_TARGET_SHIFT;
    305 
    306 	printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
    307 	    sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
    308 
    309 	for (i = 0; i <= maxchannel; i++) {
    310 		sc->sc_hbaid[i] = ec->ec_hba[3 - i];
    311 		printf(" %d", sc->sc_hbaid[i]);
    312 	}
    313 	printf("\n");
    314 
    315 	/* Reset the SCSI bus */
    316 	if (dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_BUS_RESET))
    317 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
    318 
    319 	/* Fill in the adapter, each link and attach in turn */
    320 	sc->sc_adapter.scsipi_cmd = dpt_scsi_cmd;
    321 	sc->sc_adapter.scsipi_minphys = dpt_minphys;
    322 
    323 	for (i = 0; i <= maxchannel; i++) {
    324 		link = &sc->sc_link[i];
    325 		link->scsipi_scsi.channel = i;
    326 		link->scsipi_scsi.adapter_target = sc->sc_hbaid[i];
    327 		link->scsipi_scsi.max_lun = ec->ec_maxlun;
    328 		link->scsipi_scsi.max_target = maxtarget;
    329 		link->type = BUS_SCSI;
    330 		link->device = &dpt_dev;
    331 		link->adapter = &sc->sc_adapter;
    332 		link->adapter_softc = sc;
    333 		link->openings = sc->sc_nccbs;	/* XXX */
    334 		config_found(&sc->sc_dv, link, scsiprint);
    335 	}
    336 }
    337 
    338 /*
    339  * Our 'shutdownhook' to cleanly shut down the HBA. The HBA must flush
    340  * all data from it's cache and mark array groups as clean.
    341  */
    342 void
    343 dpt_shutdown(xxx_sc)
    344 	void *xxx_sc;
    345 {
    346 	extern struct cfdriver dpt_cd;
    347 	struct dpt_softc *sc;
    348 	int i;
    349 
    350 	printf("shutting down dpt devices...");
    351 
    352 	for (i = 0; i < dpt_cd.cd_ndevs; i++) {
    353 		if ((sc = device_lookup(&dpt_cd, i)) == NULL)
    354 			continue;
    355 		dpt_cmd(sc, NULL, 0, CP_IMMEDIATE, CPI_POWEROFF_WARN);
    356 	}
    357 
    358 	DELAY(5000*1000);
    359 	printf(" done\n");
    360 }
    361 
    362 /*
    363  * Send an EATA command to the HBA.
    364  */
    365 int
    366 dpt_cmd(sc, cp, addr, eatacmd, icmd)
    367 	struct dpt_softc *sc;
    368 	struct eata_cp *cp;
    369 	u_int32_t addr;
    370 	int eatacmd, icmd;
    371 {
    372 	int i;
    373 
    374 	for (i = 20000; i; i--) {
    375 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
    376 			break;
    377 		DELAY(50);
    378 	}
    379 
    380 	/* Not the most graceful way to handle this */
    381 	if (i == 0) {
    382 		printf("%s: HBA timeout on EATA command issue; aborting\n",
    383 		    sc->sc_dv.dv_xname);
    384 		return (-1);
    385 	}
    386 
    387 	if (cp == NULL)
    388 		addr = 0;
    389 
    390 	dpt_outl(sc, HA_DMA_BASE, (u_int32_t)addr);
    391 
    392 	if (eatacmd == CP_IMMEDIATE) {
    393 		if (cp == NULL) {
    394 			/* XXX should really pass meaningful values */
    395 			dpt_outb(sc, HA_ICMD_CODE2, 0);
    396 			dpt_outb(sc, HA_ICMD_CODE1, 0);
    397 		}
    398 		dpt_outb(sc, HA_ICMD, icmd);
    399 	}
    400 
    401 	dpt_outb(sc, HA_COMMAND, eatacmd);
    402 	return (0);
    403 }
    404 
    405 /*
    406  * Wait for the HBA status register to reach a specific state.
    407  */
    408 int
    409 dpt_wait(sc, mask, state, ms)
    410 	struct dpt_softc *sc;
    411 	u_int8_t mask, state;
    412 	int ms;
    413 {
    414 
    415 	for (ms *= 10; ms; ms--) {
    416 		if ((dpt_inb(sc, HA_STATUS) & mask) == state)
    417 			return (0);
    418 		DELAY(100);
    419 	}
    420 	return (-1);
    421 }
    422 
    423 /*
    424  * Wait for the specified CCB to finish. This is used when we may not be
    425  * able to sleep and/or interrupts are disabled (eg autoconfiguration).
    426  * The timeout value from the CCB is used. This should only be used for
    427  * CCB_PRIVATE requests; otherwise the CCB will get recycled before we get
    428  * a look at it.
    429  */
    430 int
    431 dpt_poll(sc, ccb)
    432 	struct dpt_softc *sc;
    433 	struct dpt_ccb *ccb;
    434 {
    435 	int i;
    436 
    437 #ifdef DEBUG
    438 	if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    439 		panic("dpt_poll: called for non-CCB_PRIVATE request\n");
    440 #endif
    441 
    442  	if ((ccb->ccb_flg & CCB_INTR) != 0)
    443 		return (0);
    444 
    445 	for (i = ccb->ccb_timeout * 20; i; i--) {
    446 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) {
    447 			dpt_intr(sc);
    448 			if ((ccb->ccb_flg & CCB_INTR) != 0)
    449 				return (0);
    450 		}
    451 		DELAY(50);
    452 	}
    453 
    454 	return (-1);
    455 }
    456 
    457 /*
    458  * Read the EATA configuration from the HBA and perform some sanity checks.
    459  */
    460 int
    461 dpt_readcfg(sc)
    462 	struct dpt_softc *sc;
    463 {
    464 	struct eata_cfg *ec;
    465 	int i, j, stat;
    466 	u_int16_t *p;
    467 
    468 	ec = &sc->sc_ec;
    469 
    470 	/* Older firmware may puke if we talk to it too soon after reset */
    471 	dpt_outb(sc, HA_COMMAND, CP_RESET);
    472 	DELAY(750000);
    473 
    474 	for (i = 1000; i; i--) {
    475 		if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
    476 			break;
    477 		DELAY(2000);
    478 	}
    479 
    480 	if (i == 0) {
    481 		printf("%s: HBA not ready after reset (hba status:%02x)\n",
    482 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    483 		return (-1);
    484 	}
    485 
    486 	while((((stat = dpt_inb(sc, HA_STATUS))
    487 	    != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
    488 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
    489 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
    490 	    && dpt_wait(sc, HA_ST_BUSY, 0, 2000)) {
    491 		/* RAID drives still spinning up? */
    492 		if((dpt_inb(sc, HA_ERROR) != 'D')
    493 		    || (dpt_inb(sc, HA_ERROR + 1) != 'P')
    494 		    || (dpt_inb(sc, HA_ERROR + 2) != 'T')) {
    495 		    	printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
    496 			return (-1);
    497 		}
    498 	}
    499 
    500 	/*
    501 	 * Issue the read-config command and wait for the data to appear.
    502 	 * XXX we shouldn't be doing this with PIO, but it makes it a lot
    503 	 * easier as no DMA setup is required.
    504 	 */
    505 	dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
    506 	memset(ec, 0, sizeof(*ec));
    507 	i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
    508 	    sizeof(ec->ec_cfglen)) >> 1;
    509 	p = (u_int16_t *)ec;
    510 
    511 	if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
    512 		printf("%s: cfg data didn't appear (hba status:%02x)\n",
    513 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    514   		return (-1);
    515   	}
    516 
    517 	/* Begin reading */
    518  	while (i--)
    519 		*p++ = dpt_inw(sc, HA_DATA);
    520 
    521 	if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
    522 	    - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    523 	    - sizeof(ec->ec_cfglen)))
    524 		i = sizeof(struct eata_cfg)
    525 		  - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    526 		  - sizeof(ec->ec_cfglen);
    527 
    528 	j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
    529 	    sizeof(ec->ec_cfglen);
    530 	i >>= 1;
    531 
    532 	while (i--)
    533 		*p++ = dpt_inw(sc, HA_DATA);
    534 
    535 	/* Flush until we have read 512 bytes. */
    536 	i = (512 - j + 1) >> 1;
    537 	while (i--)
    538  		dpt_inw(sc, HA_DATA);
    539 
    540 	/* Defaults for older Firmware */
    541 	if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
    542 		ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
    543 
    544 	if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
    545 		printf("%s: HBA error\n", sc->sc_dv.dv_xname);
    546 		return (-1);
    547 	}
    548 
    549 	if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
    550 	        printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
    551 		return (-1);
    552 	}
    553 
    554 	if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
    555 		printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
    556 		return (-1);
    557 	}
    558 
    559 	if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
    560 	        printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
    561 		return (-1);
    562 	}
    563 
    564 	return (0);
    565 }
    566 
    567 /*
    568  * Adjust the size of each I/O before it passes to the SCSI layer.
    569  */
    570 void
    571 dpt_minphys(bp)
    572 	struct buf *bp;
    573 {
    574 
    575 	if (bp->b_bcount > DPT_MAX_XFER)
    576 		bp->b_bcount = DPT_MAX_XFER;
    577 	minphys(bp);
    578 }
    579 
    580 /*
    581  * Put a CCB onto the freelist.
    582  */
    583 void
    584 dpt_free_ccb(sc, ccb)
    585 	struct dpt_softc *sc;
    586 	struct dpt_ccb *ccb;
    587 {
    588 	int s;
    589 
    590 	s = splbio();
    591 	ccb->ccb_flg = 0;
    592 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, ccb_chain);
    593 
    594 	/* Wake anybody waiting for a free ccb */
    595 	if (ccb->ccb_chain.tqe_next == 0)
    596 		wakeup(&sc->sc_free_ccb);
    597 	splx(s);
    598 }
    599 
    600 /*
    601  * Initialize the specified CCB.
    602  */
    603 int
    604 dpt_init_ccb(sc, ccb)
    605 	struct dpt_softc *sc;
    606 	struct dpt_ccb *ccb;
    607 {
    608 	int error;
    609 
    610 	/* Create the DMA map for this CCB's data */
    611 	error = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, DPT_SG_SIZE,
    612 	    DPT_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    613 	    &ccb->ccb_dmamap_xfer);
    614 
    615 	if (error) {
    616 		printf("%s: can't create ccb dmamap (%d)\n",
    617 		    sc->sc_dv.dv_xname, error);
    618 		return (error);
    619 	}
    620 
    621 	ccb->ccb_flg = 0;
    622 	ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
    623 	    CCB_OFF(sc, ccb);
    624 	return (0);
    625 }
    626 
    627 /*
    628  * Create a set of CCBs and add them to the free list.
    629  */
    630 int
    631 dpt_create_ccbs(sc, ccbstore, count)
    632 	struct dpt_softc *sc;
    633 	struct dpt_ccb *ccbstore;
    634 	int count;
    635 {
    636 	struct dpt_ccb *ccb;
    637 	int i, error;
    638 
    639 	memset(ccbstore, 0, sizeof(struct dpt_ccb) * count);
    640 
    641 	for (i = 0, ccb = ccbstore; i < count; i++, ccb++) {
    642 		if ((error = dpt_init_ccb(sc, ccb)) != 0) {
    643 			printf("%s: unable to init ccb, error = %d\n",
    644 			    sc->sc_dv.dv_xname, error);
    645 			break;
    646 		}
    647 		ccb->ccb_id = i;
    648 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_chain);
    649 	}
    650 
    651 	return (i);
    652 }
    653 
    654 /*
    655  * Get a free ccb. If there are none, see if we can allocate a new one. If
    656  * none are available right now and we are permitted to sleep, then wait
    657  * until one becomes free, otherwise return an error.
    658  */
    659 struct dpt_ccb *
    660 dpt_alloc_ccb(sc, flg)
    661 	struct dpt_softc *sc;
    662 	int flg;
    663 {
    664 	struct dpt_ccb *ccb;
    665 	int s;
    666 
    667 	s = splbio();
    668 
    669 	for (;;) {
    670 		if ((ccb = TAILQ_FIRST(&sc->sc_free_ccb)) != NULL) {
    671 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_chain);
    672 			break;
    673 		}
    674 		if ((flg & XS_CTL_NOSLEEP) != 0) {
    675 			ccb = NULL;
    676 			break;
    677 		}
    678 		tsleep(&sc->sc_free_ccb, PRIBIO, "dptccb", 0);
    679 	}
    680 
    681 	splx(s);
    682 	return (ccb);
    683 }
    684 
    685 /*
    686  * We have a CCB which has been processed by the HBA, now we look to see how
    687  * the operation went. CCBs marked with CCB_PRIVATE are not automatically
    688  * passed here by dpt_intr().
    689  */
    690 void
    691 dpt_done_ccb(sc, ccb)
    692 	struct dpt_softc *sc;
    693 	struct dpt_ccb *ccb;
    694 {
    695 	struct scsipi_sense_data *s1, *s2;
    696 	struct scsipi_xfer *xs;
    697 	bus_dma_tag_t dmat;
    698 
    699 	dmat = sc->sc_dmat;
    700 	xs = ccb->ccb_xs;
    701 
    702 	SC_DEBUG(xs->sc_link, SDEV_DB2, ("dpt_done_ccb\n"));
    703 
    704 	/*
    705 	 * If we were a data transfer, unload the map that described the
    706 	 * data buffer.
    707 	 */
    708 	if (xs->datalen != 0) {
    709 		bus_dmamap_sync(dmat, ccb->ccb_dmamap_xfer, 0,
    710 		    ccb->ccb_dmamap_xfer->dm_mapsize,
    711 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
    712 		    BUS_DMASYNC_POSTWRITE);
    713 		bus_dmamap_unload(dmat, ccb->ccb_dmamap_xfer);
    714 	}
    715 
    716 	if (xs->error == XS_NOERROR) {
    717 		if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
    718 			switch (ccb->ccb_hba_status) {
    719 			case SP_HBA_ERROR_SEL_TO:
    720 				xs->error = XS_SELTIMEOUT;
    721 				break;
    722 			case SP_HBA_ERROR_RESET:
    723 				xs->error = XS_RESET;
    724 				break;
    725 			default:	/* Other scsi protocol messes */
    726 				printf("%s: HBA status %x\n",
    727 				    sc->sc_dv.dv_xname, ccb->ccb_hba_status);
    728 				xs->error = XS_DRIVER_STUFFUP;
    729 			}
    730 		} else if (ccb->ccb_scsi_status != SCSI_OK) {
    731 			switch (ccb->ccb_scsi_status) {
    732 			case SCSI_CHECK:
    733 				s1 = &ccb->ccb_sense;
    734 				s2 = &xs->sense.scsi_sense;
    735 				*s2 = *s1;
    736 				xs->error = XS_SENSE;
    737 				break;
    738 			case SCSI_BUSY:
    739 				xs->error = XS_BUSY;
    740 				break;
    741 			default:
    742 				printf("%s: SCSI status %x\n",
    743 				    sc->sc_dv.dv_xname, ccb->ccb_scsi_status);
    744 				xs->error = XS_DRIVER_STUFFUP;
    745 			}
    746 		} else
    747 			xs->resid = 0;
    748 
    749 		xs->status = ccb->ccb_scsi_status;
    750 	}
    751 
    752 	/* Free up the CCB and mark the command as done */
    753 	dpt_free_ccb(sc, ccb);
    754 	xs->xs_status |= XS_STS_DONE;
    755 	scsipi_done(xs);
    756 
    757 	/*
    758 	 * If there are entries in the software queue, try to run the first
    759 	 * one. We should be more or less guaranteed to succeed, since we
    760 	 * just freed an CCB. NOTE: dpt_scsi_cmd() relies on our calling it
    761 	 * with the first entry in the queue.
    762 	 */
    763 	if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
    764 		dpt_scsi_cmd(xs);
    765 }
    766 
    767 /*
    768  * Start a SCSI command.
    769  */
    770 int
    771 dpt_scsi_cmd(xs)
    772 	struct scsipi_xfer *xs;
    773 {
    774 	int error, i, flags, s, fromqueue, dontqueue, nowait;
    775 	struct scsipi_link *sc_link;
    776 	struct dpt_softc *sc;
    777 	struct dpt_ccb *ccb;
    778 	struct eata_sg *sg;
    779 	struct eata_cp *cp;
    780 	bus_dma_tag_t dmat;
    781 	bus_dmamap_t xfer;
    782 
    783 	sc_link = xs->sc_link;
    784 	flags = xs->xs_control;
    785 	sc = sc_link->adapter_softc;
    786 	dmat = sc->sc_dmat;
    787 	fromqueue = 0;
    788 	dontqueue = 0;
    789 	nowait = 0;
    790 
    791 	SC_DEBUG(sc_link, SDEV_DB2, ("dpt_scsi_cmd\n"));
    792 
    793 	/* Protect the queue */
    794 	s = splbio();
    795 
    796 	/*
    797 	 * If we're running the queue from dpt_done_ccb(), we've been called
    798 	 * with the first queue entry as our argument.
    799 	 */
    800 	if (xs == TAILQ_FIRST(&sc->sc_queue)) {
    801 		TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
    802 		fromqueue = 1;
    803 		nowait = 1;
    804 	} else {
    805 		/* Cmds must be no more than 12 bytes for us */
    806 		if (xs->cmdlen > 12) {
    807 			splx(s);
    808 			xs->error = XS_DRIVER_STUFFUP;
    809 			return (COMPLETE);
    810 		}
    811 
    812 		/*
    813 		 * XXX we can't reset devices just yet. Apparently some
    814 		 * older firmware revisions don't even support it.
    815 		 */
    816 		if ((flags & XS_CTL_RESET) != 0) {
    817 			xs->error = XS_DRIVER_STUFFUP;
    818 			return (COMPLETE);
    819 		}
    820 
    821 		/* Polled requests can't be queued for later */
    822 		dontqueue = flags & XS_CTL_POLL;
    823 
    824 		/* If there are jobs in the queue, run them first */
    825 		if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
    826 			/*
    827 			 * If we can't queue we abort, since we must
    828 			 * preserve the queue order.
    829 			 */
    830 			if (dontqueue) {
    831 				splx(s);
    832 				xs->error = XS_DRIVER_STUFFUP;
    833 				return (TRY_AGAIN_LATER);
    834 			}
    835 
    836 			/* Swap with the first queue entry. */
    837 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
    838 			xs = TAILQ_FIRST(&sc->sc_queue);
    839 			TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
    840 			fromqueue = 1;
    841 		}
    842 	}
    843 
    844 	/* Get a CCB */
    845 	if (nowait)
    846 		flags |= XS_CTL_NOSLEEP;
    847 	if ((ccb = dpt_alloc_ccb(sc, flags)) == NULL) {
    848 		/* If we can't queue, we lose */
    849 		if (dontqueue) {
    850 			splx(s);
    851 			xs->error = XS_DRIVER_STUFFUP;
    852 			return (TRY_AGAIN_LATER);
    853 		}
    854 
    855 		/*
    856 		 * Stuff request into the queue, in front if we came off
    857 		 * it in the first place.
    858 		 */
    859 		if (fromqueue)
    860 			TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
    861 		else
    862 			TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
    863 		splx(s);
    864 		return (SUCCESSFULLY_QUEUED);
    865 	}
    866 
    867 	splx(s);
    868 
    869 	ccb->ccb_xs = xs;
    870 	ccb->ccb_timeout = xs->timeout;
    871 
    872 	cp = &ccb->ccb_eata_cp;
    873 	memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
    874 	cp->cp_ccbid = ccb->ccb_id;
    875 	cp->cp_senselen = sizeof(ccb->ccb_sense);
    876 	cp->cp_stataddr = htobe32(sc->sc_stppa);
    877 	cp->cp_ctl0 = CP_C0_AUTO_SENSE;
    878 	cp->cp_ctl1 = 0;
    879 	cp->cp_ctl2 = 0;
    880 	cp->cp_ctl3 = sc_link->scsipi_scsi.target << CP_C3_ID_SHIFT;
    881 	cp->cp_ctl3 |= sc_link->scsipi_scsi.channel << CP_C3_CHANNEL_SHIFT;
    882 	cp->cp_ctl4 = sc_link->scsipi_scsi.lun << CP_C4_LUN_SHIFT;
    883 	cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
    884 
    885 	if ((flags & XS_CTL_DATA_IN) != 0)
    886 		cp->cp_ctl0 |= CP_C0_DATA_IN;
    887 	if ((flags & XS_CTL_DATA_OUT) != 0)
    888 		cp->cp_ctl0 |= CP_C0_DATA_OUT;
    889 	if (sc->sc_hbaid[sc_link->scsipi_scsi.channel] ==
    890 	    sc_link->scsipi_scsi.target)
    891 	    	cp->cp_ctl0 |= CP_C0_INTERPRET;
    892 
    893 	/* Synchronous xfers musn't write-back through the cache */
    894 	if (xs->bp != NULL && (xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
    895 		cp->cp_ctl2 |= CP_C2_NO_CACHE;
    896 
    897 	cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
    898 	    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
    899 
    900 	if (xs->datalen != 0) {
    901 		xfer = ccb->ccb_dmamap_xfer;
    902 #ifdef TFS
    903 		if ((flags & XS_CTL_DATA_UIO) != 0) {
    904 			error = bus_dmamap_load_uio(dmat, xfer,
    905 			    (struct uio *)xs->data, (flags & XS_CTL_NOSLEEP) ?
    906 			    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    907 		} else
    908 #endif	/* TFS */
    909 		{
    910 			error = bus_dmamap_load(dmat, xfer, xs->data,
    911 			    xs->datalen, NULL, (flags & XS_CTL_NOSLEEP) ?
    912 			    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    913 		}
    914 
    915 		if (error) {
    916 			printf("%s: dpt_scsi_cmd: ", sc->sc_dv.dv_xname);
    917 			if (error == EFBIG)
    918 				printf("more than %d dma segs\n", DPT_SG_SIZE);
    919 			else
    920 				printf("error %d loading dma map\n", error);
    921 
    922 			xs->error = XS_DRIVER_STUFFUP;
    923 			dpt_free_ccb(sc, ccb);
    924 			return (COMPLETE);
    925 		}
    926 
    927 		bus_dmamap_sync(dmat, xfer, 0, xfer->dm_mapsize,
    928 		    (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
    929 		    BUS_DMASYNC_PREWRITE);
    930 
    931 		/* Don't bother using scatter/gather for just 1 segment */
    932 		if (xfer->dm_nsegs == 1) {
    933 			cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
    934 			cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
    935 		} else {
    936 			/*
    937 			 * Load the hardware scatter/gather map with the
    938 			 * contents of the DMA map.
    939 			 */
    940 			sg = ccb->ccb_sg;
    941 			for (i = 0; i < xfer->dm_nsegs; i++, sg++) {
    942 				sg->sg_addr = htobe32(xfer->dm_segs[i].ds_addr);
    943 				sg->sg_len = htobe32(xfer->dm_segs[i].ds_len);
    944 			}
    945 			cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
    946 			    sc->sc_dmamap->dm_segs[0].ds_addr +
    947 			    offsetof(struct dpt_ccb, ccb_sg));
    948 			cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
    949 			cp->cp_ctl0 |= CP_C0_SCATTER;
    950 		}
    951 	} else {
    952 		cp->cp_dataaddr = 0;
    953 		cp->cp_datalen = 0;
    954 	}
    955 
    956 	/* Sync up CCB and status packet */
    957 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
    958 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
    959 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    960 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
    961 
    962 	/*
    963 	 * Start the command. If we are polling on completion, mark it
    964 	 * private so that dpt_intr/dpt_done_ccb don't recycle the CCB
    965 	 * without us noticing.
    966 	 */
    967 	if (dontqueue != 0)
    968 		ccb->ccb_flg |= CCB_PRIVATE;
    969 
    970 	if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0)) {
    971 		printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
    972 		xs->error = XS_DRIVER_STUFFUP;
    973 		dpt_free_ccb(sc, ccb);
    974 		return (TRY_AGAIN_LATER);
    975 	}
    976 
    977 	if (dontqueue == 0)
    978 		return (SUCCESSFULLY_QUEUED);
    979 
    980 	/* Don't wait longer than this single command wants to wait */
    981 	if (dpt_poll(sc, ccb)) {
    982 		dpt_timeout(ccb);
    983 		/* Wait for abort to complete */
    984 		if (dpt_poll(sc, ccb))
    985 			dpt_timeout(ccb);
    986 	}
    987 
    988 	dpt_done_ccb(sc, ccb);
    989 	return (COMPLETE);
    990 }
    991 
    992 /*
    993  * Specified CCB has timed out, abort it.
    994  */
    995 void
    996 dpt_timeout(arg)
    997 	void *arg;
    998 {
    999 	struct scsipi_link *sc_link;
   1000 	struct scsipi_xfer *xs;
   1001 	struct dpt_softc *sc;
   1002  	struct dpt_ccb *ccb;
   1003 	int s;
   1004 
   1005 	ccb = arg;
   1006 	xs = ccb->ccb_xs;
   1007 	sc_link = xs->sc_link;
   1008 	sc = sc_link->adapter_softc;
   1009 
   1010 	scsi_print_addr(sc_link);
   1011 	printf("timed out (status:%02x aux status:%02x)",
   1012 	    dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
   1013 
   1014 	s = splbio();
   1015 
   1016 	if ((ccb->ccb_flg & CCB_ABORT) != 0) {
   1017 		/* Abort timed out, reset the HBA */
   1018 		printf(" AGAIN, resetting HBA\n");
   1019 		dpt_outb(sc, HA_COMMAND, CP_RESET);
   1020 		DELAY(750000);
   1021 	} else {
   1022 		/* Abort the operation that has timed out */
   1023 		printf("\n");
   1024 		ccb->ccb_xs->error = XS_TIMEOUT;
   1025 		ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
   1026 		ccb->ccb_flg |= CCB_ABORT;
   1027 		/* Start the abort */
   1028 		if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa,
   1029 		    CP_IMMEDIATE, CPI_SPEC_ABORT))
   1030 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
   1031 	}
   1032 
   1033 	splx(s);
   1034 }
   1035 
   1036 /*
   1037  * Get inquiry data from the adapter.
   1038  */
   1039 void
   1040 dpt_hba_inquire(sc, ei)
   1041 	struct dpt_softc *sc;
   1042 	struct eata_inquiry_data **ei;
   1043 {
   1044 	struct dpt_ccb *ccb;
   1045 	struct eata_cp *cp;
   1046 	bus_dma_tag_t dmat;
   1047 
   1048 	*ei = (struct eata_inquiry_data *)sc->sc_scr;
   1049 	dmat = sc->sc_dmat;
   1050 
   1051 	/* Get a CCB and mark as private */
   1052 	if ((ccb = dpt_alloc_ccb(sc, 0)) == NULL)
   1053 		panic("%s: no CCB for inquiry", sc->sc_dv.dv_xname);
   1054 
   1055 	ccb->ccb_flg |= CCB_PRIVATE;
   1056 	ccb->ccb_timeout = 200;
   1057 
   1058 	/* Put all the arguments into the CCB */
   1059 	cp = &ccb->ccb_eata_cp;
   1060 	cp->cp_ccbid = ccb->ccb_id;
   1061 	cp->cp_senselen = sizeof(ccb->ccb_sense);
   1062  	cp->cp_senseaddr = 0;
   1063 	cp->cp_stataddr = htobe32(sc->sc_stppa);
   1064 	cp->cp_dataaddr = htobe32(sc->sc_scrpa);
   1065 	cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
   1066 	cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
   1067 	cp->cp_ctl1 = 0;
   1068 	cp->cp_ctl2 = 0;
   1069 	cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
   1070 	cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
   1071 
   1072 	/* Put together the SCSI inquiry command */
   1073 	memset(&cp->cp_cdb_cmd, 0, 12);	/* XXX */
   1074 	cp->cp_cdb_cmd = INQUIRY;
   1075 	cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
   1076 
   1077 	/* Sync up CCB, status packet and scratch area */
   1078 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
   1079 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
   1080 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
   1081 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
   1082 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
   1083 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
   1084 
   1085 	/* Start the command and poll on completion */
   1086 	if (dpt_cmd(sc, &ccb->ccb_eata_cp, ccb->ccb_ccbpa, CP_DMA_CMD, 0))
   1087 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
   1088 
   1089 	if (dpt_poll(sc, ccb))
   1090 		panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
   1091 
   1092 	if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
   1093 	    ccb->ccb_scsi_status != SCSI_OK)
   1094 	    	panic("%s: inquiry failed (hba:%02x scsi:%02x)",
   1095 	    	    sc->sc_dv.dv_xname, ccb->ccb_hba_status,
   1096 	    	    ccb->ccb_scsi_status);
   1097 
   1098 	/* Sync up the DMA map and free CCB, returning */
   1099 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
   1100 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
   1101 	dpt_free_ccb(sc, ccb);
   1102 }
   1103