Home | History | Annotate | Line # | Download | only in ic
dpt.c revision 1.57
      1 /*	$NetBSD: dpt.c,v 1.57 2007/07/09 21:00:35 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
      9  * Aerospace Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
     42  * Copyright (c) 2000 Adaptec Corporation
     43  * All rights reserved.
     44  *
     45  * TERMS AND CONDITIONS OF USE
     46  *
     47  * Redistribution and use in source form, with or without modification, are
     48  * permitted provided that redistributions of source code must retain the
     49  * above copyright notice, this list of conditions and the following disclaimer.
     50  *
     51  * This software is provided `as is' by Adaptec and any express or implied
     52  * warranties, including, but not limited to, the implied warranties of
     53  * merchantability and fitness for a particular purpose, are disclaimed. In no
     54  * event shall Adaptec be liable for any direct, indirect, incidental, special,
     55  * exemplary or consequential damages (including, but not limited to,
     56  * procurement of substitute goods or services; loss of use, data, or profits;
     57  * or business interruptions) however caused and on any theory of liability,
     58  * whether in contract, strict liability, or tort (including negligence or
     59  * otherwise) arising in any way out of the use of this driver software, even
     60  * if advised of the possibility of such damage.
     61  */
     62 
     63 /*
     64  * Portions of this code fall under the following copyright:
     65  *
     66  * Originally written by Julian Elischer (julian (at) tfs.com)
     67  * for TRW Financial Systems for use under the MACH(2.5) operating system.
     68  *
     69  * TRW Financial Systems, in accordance with their agreement with Carnegie
     70  * Mellon University, makes this software available to CMU to distribute
     71  * or use in any manner that they see fit as long as this message is kept with
     72  * the software. For this reason TFS also grants any other persons or
     73  * organisations permission to use or modify this software.
     74  *
     75  * TFS supplies this software to be publicly redistributed
     76  * on the understanding that TFS is not responsible for the correct
     77  * functioning of this software in any circumstances.
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.57 2007/07/09 21:00:35 ad Exp $");
     82 
     83 #include <sys/param.h>
     84 #include <sys/systm.h>
     85 #include <sys/device.h>
     86 #include <sys/queue.h>
     87 #include <sys/buf.h>
     88 #include <sys/endian.h>
     89 #include <sys/conf.h>
     90 #include <sys/kauth.h>
     91 #include <sys/proc.h>
     92 
     93 #include <uvm/uvm_extern.h>
     94 
     95 #include <machine/bus.h>
     96 #ifdef i386
     97 #include <machine/pio.h>
     98 #endif
     99 
    100 #include <dev/scsipi/scsi_all.h>
    101 #include <dev/scsipi/scsipi_all.h>
    102 #include <dev/scsipi/scsiconf.h>
    103 
    104 #include <dev/ic/dptreg.h>
    105 #include <dev/ic/dptvar.h>
    106 
    107 #include <dev/i2o/dptivar.h>
    108 
    109 #ifdef DEBUG
    110 #define	DPRINTF(x)		printf x
    111 #else
    112 #define	DPRINTF(x)
    113 #endif
    114 
    115 #define dpt_inb(x, o)		\
    116     bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
    117 #define dpt_outb(x, o, d)	\
    118     bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
    119 
    120 static const char * const dpt_cname[] = {
    121 	"3334", "SmartRAID IV",
    122 	"3332", "SmartRAID IV",
    123 	"2144", "SmartCache IV",
    124 	"2044", "SmartCache IV",
    125 	"2142", "SmartCache IV",
    126 	"2042", "SmartCache IV",
    127 	"2041", "SmartCache IV",
    128 	"3224", "SmartRAID III",
    129 	"3222", "SmartRAID III",
    130 	"3021", "SmartRAID III",
    131 	"2124", "SmartCache III",
    132 	"2024", "SmartCache III",
    133 	"2122", "SmartCache III",
    134 	"2022", "SmartCache III",
    135 	"2021", "SmartCache III",
    136 	"2012", "SmartCache Plus",
    137 	"2011", "SmartCache Plus",
    138 	NULL,   "<unknown>",
    139 };
    140 
    141 static void	*dpt_sdh;
    142 
    143 dev_type_open(dptopen);
    144 dev_type_ioctl(dptioctl);
    145 
    146 const struct cdevsw dpt_cdevsw = {
    147 	dptopen, nullclose, noread, nowrite, dptioctl,
    148 	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
    149 };
    150 
    151 extern struct cfdriver dpt_cd;
    152 
    153 static struct dpt_sig dpt_sig = {
    154 	{ 'd', 'P', 't', 'S', 'i', 'G'},
    155 	SIG_VERSION,
    156 #if defined(i386)
    157 	PROC_INTEL,
    158 #elif defined(powerpc)
    159 	PROC_POWERPC,
    160 #elif defined(alpha)
    161 	PROC_ALPHA,
    162 #elif defined(__mips__)
    163 	PROC_MIPS,
    164 #elif defined(sparc64)
    165 	PROC_ULTRASPARC,
    166 #else
    167 	0xff,
    168 #endif
    169 #if defined(i386)
    170 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
    171 #else
    172 	0,
    173 #endif
    174 	FT_HBADRVR,
    175 	0,
    176 	OEM_DPT,
    177 	OS_FREE_BSD,	/* XXX */
    178 	CAP_ABOVE16MB,
    179 	DEV_ALL,
    180 	ADF_ALL_EATA,
    181 	0,
    182 	0,
    183 	DPT_VERSION,
    184 	DPT_REVISION,
    185 	DPT_SUBREVISION,
    186 	DPT_MONTH,
    187 	DPT_DAY,
    188 	DPT_YEAR,
    189 	""		/* Will be filled later */
    190 };
    191 
    192 static void	dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
    193 static void	dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
    194 static int	dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
    195 static int	dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
    196 static void	dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
    197 static int	dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
    198 static void	dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
    199 static void	dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
    200 static void	dpt_minphys(struct buf *);
    201 static int	dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
    202 				struct lwp *);
    203 static void	dpt_scsipi_request(struct scsipi_channel *,
    204 				   scsipi_adapter_req_t, void *);
    205 static void	dpt_shutdown(void *);
    206 static void	dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
    207 static int	dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
    208 
    209 static inline struct dpt_ccb	*dpt_ccb_alloc(struct dpt_softc *);
    210 static inline void	dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
    211 
    212 static inline struct dpt_ccb *
    213 dpt_ccb_alloc(struct dpt_softc *sc)
    214 {
    215 	struct dpt_ccb *ccb;
    216 	int s;
    217 
    218 	s = splbio();
    219 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
    220 	SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
    221 	splx(s);
    222 
    223 	return (ccb);
    224 }
    225 
    226 static inline void
    227 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
    228 {
    229 	int s;
    230 
    231 	ccb->ccb_flg = 0;
    232 	ccb->ccb_savesp = NULL;
    233 	s = splbio();
    234 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
    235 	splx(s);
    236 }
    237 
    238 /*
    239  * Handle an interrupt from the HBA.
    240  */
    241 int
    242 dpt_intr(void *cookie)
    243 {
    244 	struct dpt_softc *sc;
    245 	struct dpt_ccb *ccb;
    246 	struct eata_sp *sp;
    247 	volatile int junk;
    248 	int forus;
    249 
    250 	sc = cookie;
    251 	sp = sc->sc_stp;
    252 	forus = 0;
    253 
    254 	for (;;) {
    255 		/*
    256 		 * HBA might have interrupted while we were dealing with the
    257 		 * last completed command, since we ACK before we deal; keep
    258 		 * polling.
    259 		 */
    260 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    261 			break;
    262 		forus = 1;
    263 
    264 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    265 		    sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
    266 
    267 		/* Might have looped before HBA can reset HBA_AUX_INTR. */
    268 		if (sp->sp_ccbid == -1) {
    269 			DELAY(50);
    270 
    271 			if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    272 				return (0);
    273 
    274 			printf("%s: no status\n", sc->sc_dv.dv_xname);
    275 
    276 			/* Re-sync DMA map */
    277 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    278 			    sc->sc_stpoff, sizeof(struct eata_sp),
    279 			    BUS_DMASYNC_POSTREAD);
    280 		}
    281 
    282 		/* Make sure CCB ID from status packet is realistic. */
    283 		if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
    284 			printf("%s: bogus status (returned CCB id %d)\n",
    285 			    sc->sc_dv.dv_xname, sp->sp_ccbid);
    286 
    287 			/* Ack the interrupt */
    288 			sp->sp_ccbid = -1;
    289 			junk = dpt_inb(sc, HA_STATUS);
    290 			continue;
    291 		}
    292 
    293 		/* Sync up DMA map and cache cmd status. */
    294 		ccb = sc->sc_ccbs + sp->sp_ccbid;
    295 
    296 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
    297 		    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
    298 
    299 		ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
    300 		ccb->ccb_scsi_status = sp->sp_scsi_status;
    301 		if (ccb->ccb_savesp != NULL)
    302 			memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
    303 
    304 		/*
    305 		 * Ack the interrupt and process the CCB.  If this
    306 		 * is a private CCB it's up to dpt_ccb_poll() to
    307 		 * notice.
    308 		 */
    309 		sp->sp_ccbid = -1;
    310 		ccb->ccb_flg |= CCB_INTR;
    311 		junk = dpt_inb(sc, HA_STATUS);
    312 		if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    313 			dpt_ccb_done(sc, ccb);
    314 		else if ((ccb->ccb_flg & CCB_WAIT) != 0)
    315 			wakeup(ccb);
    316 	}
    317 
    318 	return (forus);
    319 }
    320 
    321 /*
    322  * Initialize and attach the HBA.  This is the entry point from bus
    323  * specific probe-and-attach code.
    324  */
    325 void
    326 dpt_init(struct dpt_softc *sc, const char *intrstr)
    327 {
    328 	struct scsipi_adapter *adapt;
    329 	struct scsipi_channel *chan;
    330 	struct eata_inquiry_data *ei;
    331 	int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
    332 	bus_dma_segment_t seg;
    333 	struct eata_cfg *ec;
    334 	struct dpt_ccb *ccb;
    335 	char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
    336 	char vendor[__arraycount(ei->ei_vendor) + 1];
    337 
    338 	ec = &sc->sc_ec;
    339 	snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
    340 	    "NetBSD %s DPT driver", osrelease);
    341 
    342 	/*
    343 	 * Allocate the CCB/status packet/scratch DMA map and load.
    344 	 */
    345 	sc->sc_nccbs =
    346 	    min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
    347 	sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
    348 	sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
    349 	mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
    350 	    DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
    351 
    352 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
    353 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    354 		aprint_error("%s: unable to allocate CCBs, rv = %d\n",
    355 		    sc->sc_dv.dv_xname, rv);
    356 		return;
    357 	}
    358 
    359 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
    360 	    (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    361 		aprint_error("%s: unable to map CCBs, rv = %d\n",
    362 		    sc->sc_dv.dv_xname, rv);
    363 		return;
    364 	}
    365 
    366 	if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
    367 	    mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
    368 		aprint_error("%s: unable to create CCB DMA map, rv = %d\n",
    369 		    sc->sc_dv.dv_xname, rv);
    370 		return;
    371 	}
    372 
    373 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
    374 	    sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
    375 		aprint_error("%s: unable to load CCB DMA map, rv = %d\n",
    376 		    sc->sc_dv.dv_xname, rv);
    377 		return;
    378 	}
    379 
    380 	sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff);
    381 	sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
    382 	sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff;
    383 	sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
    384 	sc->sc_stp->sp_ccbid = -1;
    385 
    386 	/*
    387 	 * Create the CCBs.
    388 	 */
    389 	SLIST_INIT(&sc->sc_ccb_free);
    390 	memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
    391 
    392 	for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
    393 		rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
    394 		    DPT_SG_SIZE, DPT_MAX_XFER, 0,
    395 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    396 		    &ccb->ccb_dmamap_xfer);
    397 		if (rv) {
    398 			aprint_error("%s: can't create ccb dmamap (%d)\n",
    399 			    sc->sc_dv.dv_xname, rv);
    400 			break;
    401 		}
    402 
    403 		ccb->ccb_id = i;
    404 		ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
    405 		    CCB_OFF(sc, ccb);
    406 		SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
    407 	}
    408 
    409 	if (i == 0) {
    410 		aprint_error("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
    411 		return;
    412 	} else if (i != sc->sc_nccbs) {
    413 		aprint_error("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname,
    414 		    i, sc->sc_nccbs);
    415 		sc->sc_nccbs = i;
    416 	}
    417 
    418 	/* Set shutdownhook before we start any device activity. */
    419 	if (dpt_sdh == NULL)
    420 		dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
    421 
    422 	/* Get the inquiry data from the HBA. */
    423 	dpt_hba_inquire(sc, &ei);
    424 
    425 	/*
    426 	 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
    427 	 * dpt0: interrupting at irq 10
    428 	 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
    429 	 */
    430 	for (i = 0; ei->ei_vendor[i] != ' ' && i < __arraycount(ei->ei_vendor);
    431 	    i++)
    432 		vendor[i] = ei->ei_vendor[i];
    433 	vendor[i] = '\0';
    434 
    435 	for (i = 0; ei->ei_model[i] != ' ' && i < __arraycount(ei->ei_model);
    436 	    i++)
    437 		model[i] = ei->ei_model[i];
    438 	for (j = 0; ei->ei_suffix[j] != ' ' && j < __arraycount(ei->ei_suffix);
    439 	    i++, j++)
    440 		model[i] = ei->ei_suffix[j];
    441 	model[i] = '\0';
    442 
    443 	/* Find the marketing name for the board. */
    444 	for (i = 0; dpt_cname[i] != NULL; i += 2)
    445 		if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
    446 			break;
    447 
    448 	aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
    449 
    450 	if (intrstr != NULL)
    451 		aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
    452 		    intrstr);
    453 
    454 	maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
    455 	    EC_F3_MAX_CHANNEL_SHIFT;
    456 	maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
    457 	    EC_F3_MAX_TARGET_SHIFT;
    458 
    459 	aprint_normal("%s: %d queued commands, %d channel(s), adapter on ID(s)",
    460 	    sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
    461 
    462 	for (i = 0; i <= maxchannel; i++) {
    463 		sc->sc_hbaid[i] = ec->ec_hba[3 - i];
    464 		aprint_normal(" %d", sc->sc_hbaid[i]);
    465 	}
    466 	aprint_normal("\n");
    467 
    468 	/*
    469 	 * Reset the SCSI controller chip(s) and bus.  XXX Do we need to do
    470 	 * this for each bus?
    471 	 */
    472 	if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
    473 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
    474 
    475 	/* Fill in the scsipi_adapter. */
    476 	adapt = &sc->sc_adapt;
    477 	memset(adapt, 0, sizeof(*adapt));
    478 	adapt->adapt_dev = &sc->sc_dv;
    479 	adapt->adapt_nchannels = maxchannel + 1;
    480 	adapt->adapt_openings = sc->sc_nccbs - 1;
    481 	adapt->adapt_max_periph = sc->sc_nccbs - 1;
    482 	adapt->adapt_request = dpt_scsipi_request;
    483 	adapt->adapt_minphys = dpt_minphys;
    484 
    485 	for (i = 0; i <= maxchannel; i++) {
    486 		/* Fill in the scsipi_channel. */
    487 		chan = &sc->sc_chans[i];
    488 		memset(chan, 0, sizeof(*chan));
    489 		chan->chan_adapter = adapt;
    490 		chan->chan_bustype = &scsi_bustype;
    491 		chan->chan_channel = i;
    492 		chan->chan_ntargets = maxtarget + 1;
    493 		chan->chan_nluns = ec->ec_maxlun + 1;
    494 		chan->chan_id = sc->sc_hbaid[i];
    495 		config_found(&sc->sc_dv, chan, scsiprint);
    496 	}
    497 }
    498 
    499 /*
    500  * Read the EATA configuration from the HBA and perform some sanity checks.
    501  */
    502 int
    503 dpt_readcfg(struct dpt_softc *sc)
    504 {
    505 	struct eata_cfg *ec;
    506 	int i, j, stat;
    507 	u_int16_t *p;
    508 
    509 	ec = &sc->sc_ec;
    510 
    511 	/* Older firmware may puke if we talk to it too soon after reset. */
    512 	dpt_outb(sc, HA_COMMAND, CP_RESET);
    513 	DELAY(750000);
    514 
    515 	for (i = 1000; i; i--) {
    516 		if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
    517 			break;
    518 		DELAY(2000);
    519 	}
    520 
    521 	if (i == 0) {
    522 		printf("%s: HBA not ready after reset (hba status:%02x)\n",
    523 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    524 		return (-1);
    525 	}
    526 
    527 	while((((stat = dpt_inb(sc, HA_STATUS))
    528 	    != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
    529 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
    530 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
    531 	    || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
    532 		/* RAID drives still spinning up? */
    533 		if(dpt_inb(sc, HA_ERROR) != 'D' ||
    534 		   dpt_inb(sc, HA_ERROR + 1) != 'P' ||
    535 		   dpt_inb(sc, HA_ERROR + 2) != 'T') {
    536 			printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
    537 			return (-1);
    538 		}
    539 	}
    540 
    541 	/*
    542 	 * Issue the read-config command and wait for the data to appear.
    543 	 *
    544 	 * Apparently certian firmware revisions won't DMA later on if we
    545 	 * request the config data using PIO, but it makes it a lot easier
    546 	 * as no DMA setup is required.
    547 	 */
    548 	dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
    549 	memset(ec, 0, sizeof(*ec));
    550 	i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
    551 	    sizeof(ec->ec_cfglen)) >> 1;
    552 	p = (u_int16_t *)ec;
    553 
    554 	if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
    555 		printf("%s: cfg data didn't appear (hba status:%02x)\n",
    556 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    557 		return (-1);
    558 	}
    559 
    560 	/* Begin reading. */
    561 	while (i--)
    562 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    563 
    564 	if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
    565 	    - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    566 	    - sizeof(ec->ec_cfglen)))
    567 		i = sizeof(struct eata_cfg)
    568 		  - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    569 		  - sizeof(ec->ec_cfglen);
    570 
    571 	j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
    572 	    sizeof(ec->ec_cfglen);
    573 	i >>= 1;
    574 
    575 	while (i--)
    576 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    577 
    578 	/* Flush until we have read 512 bytes. */
    579 	i = (512 - j + 1) >> 1;
    580 	while (i--)
    581 		(void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    582 
    583 	/* Defaults for older firmware... */
    584 	if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
    585 		ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
    586 
    587 	if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
    588 		printf("%s: HBA error\n", sc->sc_dv.dv_xname);
    589 		return (-1);
    590 	}
    591 
    592 	if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
    593 		printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
    594 		return (-1);
    595 	}
    596 
    597 	if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
    598 		printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
    599 		return (-1);
    600 	}
    601 
    602 	if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
    603 		printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
    604 		return (-1);
    605 	}
    606 
    607 	return (0);
    608 }
    609 
    610 /*
    611  * Our `shutdownhook' to cleanly shut down the HBA.  The HBA must flush all
    612  * data from it's cache and mark array groups as clean.
    613  *
    614  * XXX This doesn't always work (i.e., the HBA may still be flushing after
    615  * we tell root that it's safe to power off).
    616  */
    617 static void
    618 dpt_shutdown(void *cookie)
    619 {
    620 	extern struct cfdriver dpt_cd;
    621 	struct dpt_softc *sc;
    622 	int i;
    623 
    624 	printf("shutting down dpt devices...");
    625 
    626 	for (i = 0; i < dpt_cd.cd_ndevs; i++) {
    627 		if ((sc = device_lookup(&dpt_cd, i)) == NULL)
    628 			continue;
    629 		dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
    630 	}
    631 
    632 	delay(10000*1000);
    633 	printf(" done\n");
    634 }
    635 
    636 /*
    637  * Send an EATA command to the HBA.
    638  */
    639 static int
    640 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
    641 {
    642 	u_int32_t pa;
    643 	int i, s;
    644 
    645 	s = splbio();
    646 
    647 	for (i = 20000; i != 0; i--) {
    648 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
    649 			break;
    650 		DELAY(50);
    651 	}
    652 	if (i == 0) {
    653 		splx(s);
    654 		return (-1);
    655 	}
    656 
    657 	pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
    658 	dpt_outb(sc, HA_DMA_BASE + 0, (pa      ) & 0xff);
    659 	dpt_outb(sc, HA_DMA_BASE + 1, (pa >>  8) & 0xff);
    660 	dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
    661 	dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
    662 
    663 	if (eatacmd == CP_IMMEDIATE)
    664 		dpt_outb(sc, HA_ICMD, icmd);
    665 
    666 	dpt_outb(sc, HA_COMMAND, eatacmd);
    667 
    668 	splx(s);
    669 	return (0);
    670 }
    671 
    672 /*
    673  * Wait for the HBA status register to reach a specific state.
    674  */
    675 static int
    676 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
    677 {
    678 
    679 	for (ms *= 10; ms != 0; ms--) {
    680 		if ((dpt_inb(sc, HA_STATUS) & mask) == state)
    681 			return (0);
    682 		DELAY(100);
    683 	}
    684 
    685 	return (-1);
    686 }
    687 
    688 /*
    689  * Spin waiting for a command to finish.  The timeout value from the CCB is
    690  * used.  The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
    691  * recycled before we get a look at it.
    692  */
    693 static int
    694 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
    695 {
    696 	int i, s;
    697 
    698 #ifdef DEBUG
    699 	if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    700 		panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
    701 #endif
    702 
    703 	s = splbio();
    704 
    705 	if ((ccb->ccb_flg & CCB_INTR) != 0) {
    706 		splx(s);
    707 		return (0);
    708 	}
    709 
    710 	for (i = ccb->ccb_timeout * 20; i != 0; i--) {
    711 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
    712 			dpt_intr(sc);
    713 		if ((ccb->ccb_flg & CCB_INTR) != 0)
    714 			break;
    715 		DELAY(50);
    716 	}
    717 
    718 	splx(s);
    719 	return (i == 0);
    720 }
    721 
    722 /*
    723  * We have a command which has been processed by the HBA, so now we look to
    724  * see how the operation went.  CCBs marked CCB_PRIVATE are not passed here
    725  * by dpt_intr().
    726  */
    727 static void
    728 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
    729 {
    730 	struct scsipi_xfer *xs;
    731 
    732 	xs = ccb->ccb_xs;
    733 
    734 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
    735 
    736 	/*
    737 	 * If we were a data transfer, unload the map that described the
    738 	 * data buffer.
    739 	 */
    740 	if (xs->datalen != 0)
    741 		dpt_ccb_unmap(sc, ccb);
    742 
    743 	if (xs->error == XS_NOERROR) {
    744 		if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
    745 			switch (ccb->ccb_hba_status) {
    746 			case SP_HBA_ERROR_SEL_TO:
    747 				xs->error = XS_SELTIMEOUT;
    748 				break;
    749 			case SP_HBA_ERROR_RESET:
    750 				xs->error = XS_RESET;
    751 				break;
    752 			default:
    753 				printf("%s: HBA status %x\n",
    754 				    sc->sc_dv.dv_xname, ccb->ccb_hba_status);
    755 				xs->error = XS_DRIVER_STUFFUP;
    756 				break;
    757 			}
    758 		} else if (ccb->ccb_scsi_status != SCSI_OK) {
    759 			switch (ccb->ccb_scsi_status) {
    760 			case SCSI_CHECK:
    761 				memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
    762 				    sizeof(xs->sense.scsi_sense));
    763 				xs->error = XS_SENSE;
    764 				break;
    765 			case SCSI_BUSY:
    766 			case SCSI_QUEUE_FULL:
    767 				xs->error = XS_BUSY;
    768 				break;
    769 			default:
    770 				scsipi_printaddr(xs->xs_periph);
    771 				printf("SCSI status %x\n",
    772 				    ccb->ccb_scsi_status);
    773 				xs->error = XS_DRIVER_STUFFUP;
    774 				break;
    775 			}
    776 		} else
    777 			xs->resid = 0;
    778 
    779 		xs->status = ccb->ccb_scsi_status;
    780 	}
    781 
    782 	/* Free up the CCB and mark the command as done. */
    783 	dpt_ccb_free(sc, ccb);
    784 	scsipi_done(xs);
    785 }
    786 
    787 /*
    788  * Specified CCB has timed out, abort it.
    789  */
    790 static void
    791 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
    792 {
    793 	struct scsipi_periph *periph;
    794 	struct scsipi_xfer *xs;
    795 	int s;
    796 
    797 	xs = ccb->ccb_xs;
    798 	periph = xs->xs_periph;
    799 
    800 	scsipi_printaddr(periph);
    801 	printf("timed out (status:%02x aux status:%02x)",
    802 	    dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
    803 
    804 	s = splbio();
    805 
    806 	if ((ccb->ccb_flg & CCB_ABORT) != 0) {
    807 		/* Abort timed out, reset the HBA */
    808 		printf(" AGAIN, resetting HBA\n");
    809 		dpt_outb(sc, HA_COMMAND, CP_RESET);
    810 		DELAY(750000);
    811 	} else {
    812 		/* Abort the operation that has timed out */
    813 		printf("\n");
    814 		xs->error = XS_TIMEOUT;
    815 		ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
    816 		ccb->ccb_flg |= CCB_ABORT;
    817 		/* Start the abort */
    818 		if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
    819 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
    820 	}
    821 
    822 	splx(s);
    823 }
    824 
    825 /*
    826  * Map a data transfer.
    827  */
    828 static int
    829 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
    830 {
    831 	struct scsipi_xfer *xs;
    832 	bus_dmamap_t xfer;
    833 	bus_dma_segment_t *ds;
    834 	struct eata_sg *sg;
    835 	struct eata_cp *cp;
    836 	int rv, i;
    837 
    838 	xs = ccb->ccb_xs;
    839 	xfer = ccb->ccb_dmamap_xfer;
    840 	cp = &ccb->ccb_eata_cp;
    841 
    842 	rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
    843 	    ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
    844 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    845 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
    846 
    847 	switch (rv) {
    848 	case 0:
    849 		break;
    850 	case ENOMEM:
    851 	case EAGAIN:
    852 		xs->error = XS_RESOURCE_SHORTAGE;
    853 		break;
    854 	default:
    855 		xs->error = XS_DRIVER_STUFFUP;
    856 		printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
    857 		break;
    858 	}
    859 
    860 	if (xs->error != XS_NOERROR) {
    861 		dpt_ccb_free(sc, ccb);
    862 		scsipi_done(xs);
    863 		return (-1);
    864 	}
    865 
    866 	bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
    867 	    (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
    868 	    BUS_DMASYNC_PREWRITE);
    869 
    870 	/* Don't bother using scatter/gather for just 1 seg */
    871 	if (xfer->dm_nsegs == 1) {
    872 		cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
    873 		cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
    874 	} else {
    875 		/*
    876 		 * Load the hardware scatter/gather map with
    877 		 * the contents of the DMA map.
    878 		 */
    879 		sg = ccb->ccb_sg;
    880 		ds = xfer->dm_segs;
    881 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
    882  			sg->sg_addr = htobe32(ds->ds_addr);
    883  			sg->sg_len =  htobe32(ds->ds_len);
    884  		}
    885 	 	cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
    886 		    sc->sc_dmamap->dm_segs[0].ds_addr +
    887 		    offsetof(struct dpt_ccb, ccb_sg));
    888 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
    889 		cp->cp_ctl0 |= CP_C0_SCATTER;
    890 	}
    891 
    892 	return (0);
    893 }
    894 
    895 /*
    896  * Unmap a transfer.
    897  */
    898 static void
    899 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
    900 {
    901 
    902 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
    903 	    ccb->ccb_dmamap_xfer->dm_mapsize,
    904 	    (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
    905 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    906 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
    907 }
    908 
    909 /*
    910  * Adjust the size of each I/O before it passes to the SCSI layer.
    911  */
    912 static void
    913 dpt_minphys(struct buf *bp)
    914 {
    915 
    916 	if (bp->b_bcount > DPT_MAX_XFER)
    917 		bp->b_bcount = DPT_MAX_XFER;
    918 	minphys(bp);
    919 }
    920 
    921 /*
    922  * Start a SCSI command.
    923  */
    924 static void
    925 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    926 		   void *arg)
    927 {
    928 	struct dpt_softc *sc;
    929 	struct scsipi_xfer *xs;
    930 	int flags;
    931 	struct scsipi_periph *periph;
    932 	struct dpt_ccb *ccb;
    933 	struct eata_cp *cp;
    934 
    935 	sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
    936 
    937 	switch (req) {
    938 	case ADAPTER_REQ_RUN_XFER:
    939 		xs = arg;
    940 		periph = xs->xs_periph;
    941 		flags = xs->xs_control;
    942 
    943 #ifdef DIAGNOSTIC
    944 		/* Cmds must be no more than 12 bytes for us. */
    945 		if (xs->cmdlen > 12) {
    946 			xs->error = XS_DRIVER_STUFFUP;
    947 			scsipi_done(xs);
    948 			break;
    949 		}
    950 #endif
    951 		/*
    952 		 * XXX We can't reset devices just yet.  Apparently some
    953 		 * older firmware revisions don't even support it.
    954 		 */
    955 		if ((flags & XS_CTL_RESET) != 0) {
    956 			xs->error = XS_DRIVER_STUFFUP;
    957 			scsipi_done(xs);
    958 			break;
    959 		}
    960 
    961 		/*
    962 		 * Get a CCB and fill it.
    963 		 */
    964 		ccb = dpt_ccb_alloc(sc);
    965 		ccb->ccb_xs = xs;
    966 		ccb->ccb_timeout = xs->timeout;
    967 
    968 		cp = &ccb->ccb_eata_cp;
    969 		memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
    970 		cp->cp_ccbid = ccb->ccb_id;
    971 		cp->cp_senselen = sizeof(ccb->ccb_sense);
    972 		cp->cp_stataddr = htobe32(sc->sc_stppa);
    973 		cp->cp_ctl0 = CP_C0_AUTO_SENSE;
    974 		cp->cp_ctl1 = 0;
    975 		cp->cp_ctl2 = 0;
    976 		cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
    977 		cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
    978 		cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
    979 		cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
    980 
    981 		if ((flags & XS_CTL_DATA_IN) != 0)
    982 			cp->cp_ctl0 |= CP_C0_DATA_IN;
    983 		if ((flags & XS_CTL_DATA_OUT) != 0)
    984 			cp->cp_ctl0 |= CP_C0_DATA_OUT;
    985 		if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
    986 			cp->cp_ctl0 |= CP_C0_INTERPRET;
    987 
    988 		/* Synchronous xfers musn't write-back through the cache. */
    989 		if (xs->bp != NULL)
    990 			if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
    991 				cp->cp_ctl2 |= CP_C2_NO_CACHE;
    992 
    993 		cp->cp_senseaddr =
    994 		    htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
    995 		    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
    996 
    997 		if (xs->datalen != 0) {
    998 			if (dpt_ccb_map(sc, ccb))
    999 				break;
   1000 		} else {
   1001 			cp->cp_dataaddr = 0;
   1002 			cp->cp_datalen = 0;
   1003 		}
   1004 
   1005 		/* Sync up CCB and status packet. */
   1006 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
   1007 		    CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
   1008 		    BUS_DMASYNC_PREWRITE);
   1009 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
   1010 		    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
   1011 
   1012 		/*
   1013 		 * Start the command.
   1014 		 */
   1015 		if ((xs->xs_control & XS_CTL_POLL) != 0)
   1016 			ccb->ccb_flg |= CCB_PRIVATE;
   1017 
   1018 		if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
   1019 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
   1020 			xs->error = XS_DRIVER_STUFFUP;
   1021 			if (xs->datalen != 0)
   1022 				dpt_ccb_unmap(sc, ccb);
   1023 			dpt_ccb_free(sc, ccb);
   1024 			break;
   1025 		}
   1026 
   1027 		if ((xs->xs_control & XS_CTL_POLL) == 0)
   1028 			break;
   1029 
   1030 		if (dpt_ccb_poll(sc, ccb)) {
   1031 			dpt_ccb_abort(sc, ccb);
   1032 			/* Wait for abort to complete... */
   1033 			if (dpt_ccb_poll(sc, ccb))
   1034 				dpt_ccb_abort(sc, ccb);
   1035 		}
   1036 
   1037 		dpt_ccb_done(sc, ccb);
   1038 		break;
   1039 
   1040 	case ADAPTER_REQ_GROW_RESOURCES:
   1041 		/*
   1042 		 * Not supported, since we allocate the maximum number of
   1043 		 * CCBs up front.
   1044 		 */
   1045 		break;
   1046 
   1047 	case ADAPTER_REQ_SET_XFER_MODE:
   1048 		/*
   1049 		 * This will be handled by the HBA itself, and we can't
   1050 		 * modify that (ditto for tagged queueing).
   1051 		 */
   1052 		break;
   1053 	}
   1054 }
   1055 
   1056 /*
   1057  * Get inquiry data from the adapter.
   1058  */
   1059 static void
   1060 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
   1061 {
   1062 	struct dpt_ccb *ccb;
   1063 	struct eata_cp *cp;
   1064 
   1065 	*ei = (struct eata_inquiry_data *)sc->sc_scr;
   1066 
   1067 	/* Get a CCB and mark as private */
   1068 	ccb = dpt_ccb_alloc(sc);
   1069 	ccb->ccb_flg |= CCB_PRIVATE;
   1070 	ccb->ccb_timeout = 200;
   1071 
   1072 	/* Put all the arguments into the CCB. */
   1073 	cp = &ccb->ccb_eata_cp;
   1074 	cp->cp_ccbid = ccb->ccb_id;
   1075 	cp->cp_senselen = sizeof(ccb->ccb_sense);
   1076 	cp->cp_senseaddr = 0;
   1077 	cp->cp_stataddr = htobe32(sc->sc_stppa);
   1078 	cp->cp_dataaddr = htobe32(sc->sc_scrpa);
   1079 	cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
   1080 	cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
   1081 	cp->cp_ctl1 = 0;
   1082 	cp->cp_ctl2 = 0;
   1083 	cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
   1084 	cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
   1085 
   1086 	/* Put together the SCSI inquiry command. */
   1087 	memset(&cp->cp_cdb_cmd, 0, 12);
   1088 	cp->cp_cdb_cmd = INQUIRY;
   1089 	cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
   1090 
   1091 	/* Sync up CCB, status packet and scratch area. */
   1092 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
   1093 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
   1094 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
   1095 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
   1096 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
   1097 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
   1098 
   1099 	/* Start the command and poll on completion. */
   1100 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
   1101 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
   1102 
   1103 	if (dpt_ccb_poll(sc, ccb))
   1104 		panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
   1105 
   1106 	if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
   1107 	    ccb->ccb_scsi_status != SCSI_OK)
   1108 		panic("%s: inquiry failed (hba:%02x scsi:%02x)",
   1109 		    sc->sc_dv.dv_xname, ccb->ccb_hba_status,
   1110 		    ccb->ccb_scsi_status);
   1111 
   1112 	/* Sync up the DMA map and free CCB, returning. */
   1113 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
   1114 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
   1115 	dpt_ccb_free(sc, ccb);
   1116 }
   1117 
   1118 int
   1119 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
   1120 {
   1121 
   1122 	if (device_lookup(&dpt_cd, minor(dev)) == NULL)
   1123 		return (ENXIO);
   1124 
   1125 	return (0);
   1126 }
   1127 
   1128 int
   1129 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
   1130 {
   1131 	struct dpt_softc *sc;
   1132 	int rv;
   1133 
   1134 	sc = device_lookup(&dpt_cd, minor(dev));
   1135 
   1136 	switch (cmd & 0xffff) {
   1137 	case DPT_SIGNATURE:
   1138 		memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
   1139 		break;
   1140 
   1141 	case DPT_CTRLINFO:
   1142 		dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
   1143 		break;
   1144 
   1145 	case DPT_SYSINFO:
   1146 		dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
   1147 		break;
   1148 
   1149 	case DPT_BLINKLED:
   1150 		/*
   1151 		 * XXX Don't know how to get this from EATA boards.  I think
   1152 		 * it involves waiting for a "DPT" sequence from HA_ERROR
   1153 		 * and then reading one of the HA_ICMD registers.
   1154 		 */
   1155 		*(int *)data = 0;
   1156 		break;
   1157 
   1158 	case DPT_EATAUSRCMD:
   1159 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
   1160 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
   1161 		if (rv)
   1162 			return (rv);
   1163 
   1164 		if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
   1165 			DPRINTF(("%s: ucp %lu vs %lu bytes\n",
   1166 			    sc->sc_dv.dv_xname, IOCPARM_LEN(cmd),
   1167 			    (unsigned long int)sizeof(struct eata_ucp)));
   1168 			return (EINVAL);
   1169 		}
   1170 
   1171 		if (sc->sc_uactive++)
   1172 			tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
   1173 
   1174 		rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
   1175 
   1176 		sc->sc_uactive--;
   1177 		wakeup_one(&sc->sc_uactive);
   1178 		return (rv);
   1179 
   1180 	default:
   1181 		DPRINTF(("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd));
   1182 		return (ENOTTY);
   1183 	}
   1184 
   1185 	return (0);
   1186 }
   1187 
   1188 void
   1189 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
   1190 {
   1191 
   1192 	memset(info, 0, sizeof(*info));
   1193 	info->id = sc->sc_hbaid[0];
   1194 	info->vect = sc->sc_isairq;
   1195 	info->base = sc->sc_isaport;
   1196 	info->qdepth = sc->sc_nccbs;
   1197 	info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
   1198 	info->heads = 16;
   1199 	info->sectors = 63;
   1200 	info->do_drive32 = 1;
   1201 	info->primary = 1;
   1202 	info->cpLength = sizeof(struct eata_cp);
   1203 	info->spLength = sizeof(struct eata_sp);
   1204 	info->drqNum = sc->sc_isadrq;
   1205 }
   1206 
   1207 void
   1208 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
   1209 {
   1210 #ifdef i386
   1211 	int i, j;
   1212 #endif
   1213 
   1214 	memset(info, 0, sizeof(*info));
   1215 
   1216 #ifdef i386
   1217 	outb (0x70, 0x12);
   1218 	i = inb(0x71);
   1219 	j = i >> 4;
   1220 	if (i == 0x0f) {
   1221 		outb (0x70, 0x19);
   1222 		j = inb (0x71);
   1223 	}
   1224 	info->drive0CMOS = j;
   1225 
   1226 	j = i & 0x0f;
   1227 	if (i == 0x0f) {
   1228 		outb (0x70, 0x1a);
   1229 		j = inb (0x71);
   1230 	}
   1231 	info->drive1CMOS = j;
   1232 	info->processorFamily = dpt_sig.dsProcessorFamily;
   1233 
   1234 	/*
   1235 	 * Get the conventional memory size from CMOS.
   1236 	 */
   1237 	outb(0x70, 0x16);
   1238 	j = inb(0x71);
   1239 	j <<= 8;
   1240 	outb(0x70, 0x15);
   1241 	j |= inb(0x71);
   1242 	info->conventionalMemSize = j;
   1243 
   1244 	/*
   1245 	 * Get the extended memory size from CMOS.
   1246 	 */
   1247 	outb(0x70, 0x31);
   1248 	j = inb(0x71);
   1249 	j <<= 8;
   1250 	outb(0x70, 0x30);
   1251 	j |= inb(0x71);
   1252 	info->extendedMemSize = j;
   1253 
   1254 	switch (cpu_class) {
   1255 	case CPUCLASS_386:
   1256 		info->processorType = PROC_386;
   1257 		break;
   1258 	case CPUCLASS_486:
   1259 		info->processorType = PROC_486;
   1260 		break;
   1261 	case CPUCLASS_586:
   1262 		info->processorType = PROC_PENTIUM;
   1263 		break;
   1264 	case CPUCLASS_686:
   1265 	default:
   1266 		info->processorType = PROC_SEXIUM;
   1267 		break;
   1268 	}
   1269 
   1270 	info->flags = SI_CMOS_Valid | SI_BusTypeValid |
   1271 	    SI_MemorySizeValid | SI_NO_SmartROM;
   1272 #else
   1273 	info->flags = SI_BusTypeValid | SI_NO_SmartROM;
   1274 #endif
   1275 
   1276 	info->busType = sc->sc_bustype;
   1277 }
   1278 
   1279 int
   1280 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
   1281 {
   1282 	struct dpt_ccb *ccb;
   1283 	struct eata_sp sp;
   1284 	struct eata_cp *cp;
   1285 	struct eata_sg *sg;
   1286 	bus_dmamap_t xfer = 0; /* XXX: gcc */
   1287 	bus_dma_segment_t *ds;
   1288 	int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
   1289 
   1290 	/*
   1291 	 * Get a CCB and fill.
   1292 	 */
   1293 	ccb = dpt_ccb_alloc(sc);
   1294 	ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
   1295 	ccb->ccb_timeout = 0;
   1296 	ccb->ccb_savesp = &sp;
   1297 
   1298 	cp = &ccb->ccb_eata_cp;
   1299 	memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
   1300 	uslen = cp->cp_senselen;
   1301 	cp->cp_ccbid = ccb->ccb_id;
   1302 	cp->cp_senselen = sizeof(ccb->ccb_sense);
   1303 	cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
   1304 	    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
   1305 	cp->cp_stataddr = htobe32(sc->sc_stppa);
   1306 
   1307 	/*
   1308 	 * Map data transfers.
   1309 	 */
   1310 	if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
   1311 		xfer = ccb->ccb_dmamap_xfer;
   1312 		datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
   1313 
   1314 		if (ucp->ucp_datalen > DPT_MAX_XFER) {
   1315 			DPRINTF(("%s: xfer too big\n", sc->sc_dv.dv_xname));
   1316 			dpt_ccb_free(sc, ccb);
   1317 			return (EFBIG);
   1318 		}
   1319 		rv = bus_dmamap_load(sc->sc_dmat, xfer,
   1320 		    ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
   1321 		    BUS_DMA_WAITOK | BUS_DMA_STREAMING |
   1322 		    (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
   1323 		if (rv != 0) {
   1324 			DPRINTF(("%s: map failed; %d\n", sc->sc_dv.dv_xname,
   1325 			    rv));
   1326 			dpt_ccb_free(sc, ccb);
   1327 			return (rv);
   1328 		}
   1329 
   1330 		bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
   1331 		    (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
   1332 
   1333 		sg = ccb->ccb_sg;
   1334 		ds = xfer->dm_segs;
   1335 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
   1336 	 		sg->sg_addr = htobe32(ds->ds_addr);
   1337 	 		sg->sg_len = htobe32(ds->ds_len);
   1338  		}
   1339 		cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
   1340 		    sc->sc_dmamap->dm_segs[0].ds_addr +
   1341 		    offsetof(struct dpt_ccb, ccb_sg));
   1342 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
   1343 		cp->cp_ctl0 |= CP_C0_SCATTER;
   1344 	} else {
   1345 		cp->cp_dataaddr = 0;
   1346 		cp->cp_datalen = 0;
   1347 	}
   1348 
   1349 	/*
   1350 	 * Start the command and sleep on completion.
   1351 	 */
   1352 	uvm_lwp_hold(curlwp);
   1353 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
   1354 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
   1355 	s = splbio();
   1356 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
   1357 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
   1358 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
   1359 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
   1360 	tsleep(ccb, PWAIT, "dptucmd", 0);
   1361 	splx(s);
   1362 	uvm_lwp_rele(curlwp);
   1363 
   1364 	/*
   1365 	 * Sync up the DMA map and copy out results.
   1366 	 */
   1367 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
   1368 	    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
   1369 
   1370 	if (cp->cp_datalen != 0) {
   1371 		bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
   1372 		    (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
   1373 		bus_dmamap_unload(sc->sc_dmat, xfer);
   1374 	}
   1375 
   1376 	if (ucp->ucp_stataddr != NULL) {
   1377 		rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
   1378 		if (rv != 0) {
   1379 			DPRINTF(("%s: sp copyout() failed\n",
   1380 			    sc->sc_dv.dv_xname));
   1381 		}
   1382 	}
   1383 	if (rv == 0 && ucp->ucp_senseaddr != NULL) {
   1384 		i = min(uslen, sizeof(ccb->ccb_sense));
   1385 		rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
   1386 		if (rv != 0) {
   1387 			DPRINTF(("%s: sense copyout() failed\n",
   1388 			    sc->sc_dv.dv_xname));
   1389 		}
   1390 	}
   1391 
   1392 	ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
   1393 	ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
   1394 	dpt_ccb_free(sc, ccb);
   1395 	return (rv);
   1396 }
   1397