Home | History | Annotate | Line # | Download | only in ic
dpt.c revision 1.30
      1 /*	$NetBSD: dpt.c,v 1.30 2001/07/19 16:25:24 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
      9  * Aerospace Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Portions of this code fall under the following copyright:
     42  *
     43  * Originally written by Julian Elischer (julian (at) tfs.com)
     44  * for TRW Financial Systems for use under the MACH(2.5) operating system.
     45  *
     46  * TRW Financial Systems, in accordance with their agreement with Carnegie
     47  * Mellon University, makes this software available to CMU to distribute
     48  * or use in any manner that they see fit as long as this message is kept with
     49  * the software. For this reason TFS also grants any other persons or
     50  * organisations permission to use or modify this software.
     51  *
     52  * TFS supplies this software to be publicly redistributed
     53  * on the understanding that TFS is not responsible for the correct
     54  * functioning of this software in any circumstances.
     55  */
     56 
     57 #include <sys/param.h>
     58 #include <sys/systm.h>
     59 #include <sys/device.h>
     60 #include <sys/queue.h>
     61 #include <sys/buf.h>
     62 #include <sys/endian.h>
     63 
     64 #include <uvm/uvm_extern.h>
     65 
     66 #include <machine/bus.h>
     67 
     68 #include <dev/scsipi/scsi_all.h>
     69 #include <dev/scsipi/scsipi_all.h>
     70 #include <dev/scsipi/scsiconf.h>
     71 
     72 #include <dev/ic/dptreg.h>
     73 #include <dev/ic/dptvar.h>
     74 
     75 #define dpt_inb(x, o)		\
     76     bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
     77 #define dpt_outb(x, o, d)	\
     78     bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
     79 
     80 static const char * const dpt_cname[] = {
     81 	"3334", "SmartRAID IV",
     82 	"3332", "SmartRAID IV",
     83 	"2144", "SmartCache IV",
     84 	"2044", "SmartCache IV",
     85 	"2142", "SmartCache IV",
     86 	"2042", "SmartCache IV",
     87 	"2041", "SmartCache IV",
     88 	"3224", "SmartRAID III",
     89 	"3222", "SmartRAID III",
     90 	"3021", "SmartRAID III",
     91 	"2124", "SmartCache III",
     92 	"2024", "SmartCache III",
     93 	"2122", "SmartCache III",
     94 	"2022", "SmartCache III",
     95 	"2021", "SmartCache III",
     96 	"2012", "SmartCache Plus",
     97 	"2011", "SmartCache Plus",
     98 	NULL,   "<unknown>",
     99 };
    100 
    101 static void	*dpt_sdh;
    102 
    103 static void	dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
    104 static void	dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
    105 static int	dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
    106 static int	dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
    107 static void	dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
    108 static int	dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
    109 static void	dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
    110 static void	dpt_minphys(struct buf *);
    111 static void	dpt_scsipi_request(struct scsipi_channel *,
    112 				   scsipi_adapter_req_t, void *);
    113 static void	dpt_shutdown(void *);
    114 static int	dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
    115 
    116 static __inline__ struct dpt_ccb	*dpt_ccb_alloc(struct dpt_softc *);
    117 static __inline__ void	dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
    118 
    119 static __inline__ struct dpt_ccb *
    120 dpt_ccb_alloc(struct dpt_softc *sc)
    121 {
    122 	struct dpt_ccb *ccb;
    123 	int s;
    124 
    125 	s = splbio();
    126 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
    127 	SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
    128 	splx(s);
    129 
    130 	return (ccb);
    131 }
    132 
    133 static __inline__ void
    134 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
    135 {
    136 	int s;
    137 
    138 	ccb->ccb_flg = 0;
    139 	s = splbio();
    140 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
    141 	splx(s);
    142 }
    143 
    144 /*
    145  * Handle an interrupt from the HBA.
    146  */
    147 int
    148 dpt_intr(void *cookie)
    149 {
    150 	struct dpt_softc *sc;
    151 	struct dpt_ccb *ccb;
    152 	struct eata_sp *sp;
    153 	volatile int junk;
    154 	int forus;
    155 
    156 	sc = cookie;
    157 	sp = sc->sc_stp;
    158 	forus = 0;
    159 
    160 	for (;;) {
    161 		/*
    162 		 * HBA might have interrupted while we were dealing with the
    163 		 * last completed command, since we ACK before we deal; keep
    164 		 * polling.
    165 		 */
    166 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    167 			break;
    168 		forus = 1;
    169 
    170 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    171 		    sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
    172 
    173 		/* Might have looped before HBA can reset HBA_AUX_INTR. */
    174 		if (sp->sp_ccbid == -1) {
    175 			DELAY(50);
    176 
    177 			if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    178 				return (0);
    179 
    180 			printf("%s: no status\n", sc->sc_dv.dv_xname);
    181 
    182 			/* Re-sync DMA map */
    183 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    184 			    sc->sc_stpoff, sizeof(struct eata_sp),
    185 			    BUS_DMASYNC_POSTREAD);
    186 		}
    187 
    188 		/* Make sure CCB ID from status packet is realistic. */
    189 		if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
    190 			printf("%s: bogus status (returned CCB id %d)\n",
    191 			    sc->sc_dv.dv_xname, sp->sp_ccbid);
    192 
    193 			/* Ack the interrupt */
    194 			sp->sp_ccbid = -1;
    195 			junk = dpt_inb(sc, HA_STATUS);
    196 			continue;
    197 		}
    198 
    199 		/* Sync up DMA map and cache cmd status. */
    200 		ccb = sc->sc_ccbs + sp->sp_ccbid;
    201 
    202 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
    203 		    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
    204 
    205 		ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
    206 		ccb->ccb_scsi_status = sp->sp_scsi_status;
    207 
    208 		/*
    209 		 * Ack the interrupt and process the CCB.  If this
    210 		 * is a private CCB it's up to dpt_ccb_poll() to
    211 		 * notice.
    212 		 */
    213 		sp->sp_ccbid = -1;
    214 		ccb->ccb_flg |= CCB_INTR;
    215 		junk = dpt_inb(sc, HA_STATUS);
    216 		if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    217 			dpt_ccb_done(sc, ccb);
    218 	}
    219 
    220 	return (forus);
    221 }
    222 
    223 /*
    224  * Initialize and attach the HBA.  This is the entry point from bus
    225  * specific probe-and-attach code.
    226  */
    227 void
    228 dpt_init(struct dpt_softc *sc, const char *intrstr)
    229 {
    230 	struct scsipi_adapter *adapt;
    231 	struct scsipi_channel *chan;
    232 	struct eata_inquiry_data *ei;
    233 	int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
    234 	bus_dma_segment_t seg;
    235 	struct eata_cfg *ec;
    236 	struct dpt_ccb *ccb;
    237 	char model[16];
    238 
    239 	ec = &sc->sc_ec;
    240 
    241 	/*
    242 	 * Allocate the CCB/status packet/scratch DMA map and load.
    243 	 */
    244 	sc->sc_nccbs =
    245 	    min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
    246 	sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
    247 	sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
    248 	mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
    249 	    DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
    250 
    251 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
    252 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    253 		printf("%s: unable to allocate CCBs, rv = %d\n",
    254 		    sc->sc_dv.dv_xname, rv);
    255 		return;
    256 	}
    257 
    258 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
    259 	    (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    260 		printf("%s: unable to map CCBs, rv = %d\n",
    261 		    sc->sc_dv.dv_xname, rv);
    262 		return;
    263 	}
    264 
    265 	if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
    266 	    mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
    267 		printf("%s: unable to create CCB DMA map, rv = %d\n",
    268 		    sc->sc_dv.dv_xname, rv);
    269 		return;
    270 	}
    271 
    272 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
    273 	    sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
    274 		printf("%s: unable to load CCB DMA map, rv = %d\n",
    275 		    sc->sc_dv.dv_xname, rv);
    276 		return;
    277 	}
    278 
    279 	sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
    280 	sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
    281 	sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
    282 	sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
    283 	sc->sc_stp->sp_ccbid = -1;
    284 
    285 	/*
    286 	 * Create the CCBs.
    287 	 */
    288 	SLIST_INIT(&sc->sc_ccb_free);
    289 	memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
    290 
    291 	for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
    292 		rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
    293 		    DPT_SG_SIZE, DPT_MAX_XFER, 0,
    294 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    295 		    &ccb->ccb_dmamap_xfer);
    296 		if (rv) {
    297 			printf("%s: can't create ccb dmamap (%d)\n",
    298 			    sc->sc_dv.dv_xname, rv);
    299 			break;
    300 		}
    301 
    302 		ccb->ccb_id = i;
    303 		ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
    304 		    CCB_OFF(sc, ccb);
    305 		SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
    306 	}
    307 
    308 	if (i == 0) {
    309 		printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
    310 		return;
    311 	} else if (i != sc->sc_nccbs) {
    312 		printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
    313 		    sc->sc_nccbs);
    314 		sc->sc_nccbs = i;
    315 	}
    316 
    317 	/* Set shutdownhook before we start any device activity. */
    318 	if (dpt_sdh == NULL)
    319 		dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
    320 
    321 	/* Get the inquiry data from the HBA. */
    322 	dpt_hba_inquire(sc, &ei);
    323 
    324 	/*
    325 	 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
    326 	 * dpt0: interrupting at irq 10
    327 	 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
    328 	 */
    329 	for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
    330 		;
    331 	ei->ei_vendor[i] = '\0';
    332 
    333 	for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
    334 		model[i] = ei->ei_model[i];
    335 	for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; i++, j++)
    336 		model[i] = ei->ei_model[i];
    337 	model[i] = '\0';
    338 
    339 	/* Find the marketing name for the board. */
    340 	for (i = 0; dpt_cname[i] != NULL; i += 2)
    341 		if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
    342 			break;
    343 
    344 	printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
    345 
    346 	if (intrstr != NULL)
    347 		printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
    348 		    intrstr);
    349 
    350 	maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
    351 	    EC_F3_MAX_CHANNEL_SHIFT;
    352 	maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
    353 	    EC_F3_MAX_TARGET_SHIFT;
    354 
    355 	printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
    356 	    sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
    357 
    358 	for (i = 0; i <= maxchannel; i++) {
    359 		sc->sc_hbaid[i] = ec->ec_hba[3 - i];
    360 		printf(" %d", sc->sc_hbaid[i]);
    361 	}
    362 	printf("\n");
    363 
    364 	/*
    365 	 * Reset the SCSI controller chip(s) and bus.  XXX Do we need to do
    366 	 * this for each bus?
    367 	 */
    368 	if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
    369 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
    370 
    371 	/* Fill in the scsipi_adapter. */
    372 	adapt = &sc->sc_adapt;
    373 	memset(adapt, 0, sizeof(*adapt));
    374 	adapt->adapt_dev = &sc->sc_dv;
    375 	adapt->adapt_nchannels = maxchannel + 1;
    376 	adapt->adapt_openings = sc->sc_nccbs;
    377 	adapt->adapt_max_periph = sc->sc_nccbs;
    378 	adapt->adapt_request = dpt_scsipi_request;
    379 	adapt->adapt_minphys = dpt_minphys;
    380 
    381 	for (i = 0; i <= maxchannel; i++) {
    382 		/* Fill in the scsipi_channel. */
    383 		chan = &sc->sc_chans[i];
    384 		memset(chan, 0, sizeof(*chan));
    385 		chan->chan_adapter = adapt;
    386 		chan->chan_bustype = &scsi_bustype;
    387 		chan->chan_channel = i;
    388 		chan->chan_ntargets = maxtarget + 1;
    389 		chan->chan_nluns = ec->ec_maxlun + 1;
    390 		chan->chan_id = sc->sc_hbaid[i];
    391 		config_found(&sc->sc_dv, chan, scsiprint);
    392 	}
    393 }
    394 
    395 /*
    396  * Read the EATA configuration from the HBA and perform some sanity checks.
    397  */
    398 int
    399 dpt_readcfg(struct dpt_softc *sc)
    400 {
    401 	struct eata_cfg *ec;
    402 	int i, j, stat;
    403 	u_int16_t *p;
    404 
    405 	ec = &sc->sc_ec;
    406 
    407 	/* Older firmware may puke if we talk to it too soon after reset. */
    408 	dpt_outb(sc, HA_COMMAND, CP_RESET);
    409 	DELAY(750000);
    410 
    411 	for (i = 1000; i; i--) {
    412 		if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
    413 			break;
    414 		DELAY(2000);
    415 	}
    416 
    417 	if (i == 0) {
    418 		printf("%s: HBA not ready after reset (hba status:%02x)\n",
    419 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    420 		return (-1);
    421 	}
    422 
    423 	while((((stat = dpt_inb(sc, HA_STATUS))
    424 	    != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
    425 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
    426 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
    427 	    || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
    428 		/* RAID drives still spinning up? */
    429 		if(dpt_inb(sc, HA_ERROR) != 'D' ||
    430 		   dpt_inb(sc, HA_ERROR + 1) != 'P' ||
    431 		   dpt_inb(sc, HA_ERROR + 2) != 'T') {
    432 			printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
    433 			return (-1);
    434 		}
    435 	}
    436 
    437 	/*
    438 	 * Issue the read-config command and wait for the data to appear.
    439 	 *
    440 	 * Apparently certian firmware revisions won't DMA later on if we
    441 	 * request the config data using PIO, but it makes it a lot easier
    442 	 * as no DMA setup is required.
    443 	 */
    444 	dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
    445 	memset(ec, 0, sizeof(*ec));
    446 	i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
    447 	    sizeof(ec->ec_cfglen)) >> 1;
    448 	p = (u_int16_t *)ec;
    449 
    450 	if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
    451 		printf("%s: cfg data didn't appear (hba status:%02x)\n",
    452 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    453 		return (-1);
    454 	}
    455 
    456 	/* Begin reading. */
    457 	while (i--)
    458 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    459 
    460 	if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
    461 	    - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    462 	    - sizeof(ec->ec_cfglen)))
    463 		i = sizeof(struct eata_cfg)
    464 		  - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    465 		  - sizeof(ec->ec_cfglen);
    466 
    467 	j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
    468 	    sizeof(ec->ec_cfglen);
    469 	i >>= 1;
    470 
    471 	while (i--)
    472 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    473 
    474 	/* Flush until we have read 512 bytes. */
    475 	i = (512 - j + 1) >> 1;
    476 	while (i--)
    477 		bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    478 
    479 	/* Defaults for older firmware... */
    480 	if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
    481 		ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
    482 
    483 	if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
    484 		printf("%s: HBA error\n", sc->sc_dv.dv_xname);
    485 		return (-1);
    486 	}
    487 
    488 	if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
    489 		printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
    490 		return (-1);
    491 	}
    492 
    493 	if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
    494 		printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
    495 		return (-1);
    496 	}
    497 
    498 	if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
    499 		printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
    500 		return (-1);
    501 	}
    502 
    503 	return (0);
    504 }
    505 
    506 /*
    507  * Our `shutdownhook' to cleanly shut down the HBA.  The HBA must flush all
    508  * data from it's cache and mark array groups as clean.
    509  *
    510  * XXX This doesn't always work (i.e., the HBA may still be flushing after
    511  * we tell root that it's safe to power off).
    512  */
    513 static void
    514 dpt_shutdown(void *cookie)
    515 {
    516 	extern struct cfdriver dpt_cd;
    517 	struct dpt_softc *sc;
    518 	int i;
    519 
    520 	printf("shutting down dpt devices...");
    521 
    522 	for (i = 0; i < dpt_cd.cd_ndevs; i++) {
    523 		if ((sc = device_lookup(&dpt_cd, i)) == NULL)
    524 			continue;
    525 		dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
    526 	}
    527 
    528 	delay(10000*1000);
    529 	printf(" done\n");
    530 }
    531 
    532 /*
    533  * Send an EATA command to the HBA.
    534  */
    535 static int
    536 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
    537 {
    538 	u_int32_t pa;
    539 	int i, s;
    540 
    541 	s = splbio();
    542 
    543 	for (i = 20000; i != 0; i--) {
    544 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
    545 			break;
    546 		DELAY(50);
    547 	}
    548 	if (i == 0) {
    549 		splx(s);
    550 		return (-1);
    551 	}
    552 
    553 	pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
    554 	dpt_outb(sc, HA_DMA_BASE + 0, (pa      ) & 0xff);
    555 	dpt_outb(sc, HA_DMA_BASE + 1, (pa >>  8) & 0xff);
    556 	dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
    557 	dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
    558 
    559 	if (eatacmd == CP_IMMEDIATE)
    560 		dpt_outb(sc, HA_ICMD, icmd);
    561 
    562 	dpt_outb(sc, HA_COMMAND, eatacmd);
    563 
    564 	splx(s);
    565 	return (0);
    566 }
    567 
    568 /*
    569  * Wait for the HBA status register to reach a specific state.
    570  */
    571 static int
    572 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
    573 {
    574 
    575 	for (ms *= 10; ms != 0; ms--) {
    576 		if ((dpt_inb(sc, HA_STATUS) & mask) == state)
    577 			return (0);
    578 		DELAY(100);
    579 	}
    580 
    581 	return (-1);
    582 }
    583 
    584 /*
    585  * Spin waiting for a command to finish.  The timeout value from the CCB is
    586  * used.  The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
    587  * recycled before we get a look at it.
    588  */
    589 static int
    590 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
    591 {
    592 	int i, s;
    593 
    594 #ifdef DEBUG
    595 	if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    596 		panic("dpt_ccb_poll: called for non-CCB_PRIVATE request\n");
    597 #endif
    598 
    599 	s = splbio();
    600 
    601 	if ((ccb->ccb_flg & CCB_INTR) != 0) {
    602 		splx(s);
    603 		return (0);
    604 	}
    605 
    606 	for (i = ccb->ccb_timeout * 20; i != 0; i--) {
    607 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
    608 			dpt_intr(sc);
    609 		if ((ccb->ccb_flg & CCB_INTR) != 0)
    610 			break;
    611 		DELAY(50);
    612 	}
    613 
    614 	splx(s);
    615 	return (i == 0);
    616 }
    617 
    618 /*
    619  * We have a command which has been processed by the HBA, so now we look to
    620  * see how the operation went.  CCBs marked CCB_PRIVATE are not passed here
    621  * by dpt_intr().
    622  */
    623 static void
    624 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
    625 {
    626 	struct scsipi_xfer *xs;
    627 
    628 	xs = ccb->ccb_xs;
    629 
    630 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
    631 
    632 	/*
    633 	 * If we were a data transfer, unload the map that described the
    634 	 * data buffer.
    635 	 */
    636 	if (xs->datalen != 0)
    637 		dpt_ccb_unmap(sc, ccb);
    638 
    639 	if (xs->error == XS_NOERROR) {
    640 		if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
    641 			switch (ccb->ccb_hba_status) {
    642 			case SP_HBA_ERROR_SEL_TO:
    643 				xs->error = XS_SELTIMEOUT;
    644 				break;
    645 			case SP_HBA_ERROR_RESET:
    646 				xs->error = XS_RESET;
    647 				break;
    648 			default:
    649 				printf("%s: HBA status %x\n",
    650 				    sc->sc_dv.dv_xname, ccb->ccb_hba_status);
    651 				xs->error = XS_DRIVER_STUFFUP;
    652 				break;
    653 			}
    654 		} else if (ccb->ccb_scsi_status != SCSI_OK) {
    655 			switch (ccb->ccb_scsi_status) {
    656 			case SCSI_CHECK:
    657 				memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
    658 				    sizeof(xs->sense.scsi_sense));
    659 				xs->error = XS_SENSE;
    660 				break;
    661 			case SCSI_BUSY:
    662 			case SCSI_QUEUE_FULL:
    663 				xs->error = XS_BUSY;
    664 				break;
    665 			default:
    666 				scsipi_printaddr(xs->xs_periph);
    667 				printf("SCSI status %x\n",
    668 				    ccb->ccb_scsi_status);
    669 				xs->error = XS_DRIVER_STUFFUP;
    670 				break;
    671 			}
    672 		} else
    673 			xs->resid = 0;
    674 
    675 		xs->status = ccb->ccb_scsi_status;
    676 	}
    677 
    678 	/* Free up the CCB and mark the command as done. */
    679 	dpt_ccb_free(sc, ccb);
    680 	scsipi_done(xs);
    681 }
    682 
    683 /*
    684  * Specified CCB has timed out, abort it.
    685  */
    686 static void
    687 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
    688 {
    689 	struct scsipi_periph *periph;
    690 	struct scsipi_xfer *xs;
    691 	int s;
    692 
    693 	xs = ccb->ccb_xs;
    694 	periph = xs->xs_periph;
    695 
    696 	scsipi_printaddr(periph);
    697 	printf("timed out (status:%02x aux status:%02x)",
    698 	    dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
    699 
    700 	s = splbio();
    701 
    702 	if ((ccb->ccb_flg & CCB_ABORT) != 0) {
    703 		/* Abort timed out, reset the HBA */
    704 		printf(" AGAIN, resetting HBA\n");
    705 		dpt_outb(sc, HA_COMMAND, CP_RESET);
    706 		DELAY(750000);
    707 	} else {
    708 		/* Abort the operation that has timed out */
    709 		printf("\n");
    710 		xs->error = XS_TIMEOUT;
    711 		ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
    712 		ccb->ccb_flg |= CCB_ABORT;
    713 		/* Start the abort */
    714 		if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
    715 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
    716 	}
    717 
    718 	splx(s);
    719 }
    720 
    721 /*
    722  * Map a data transfer.
    723  */
    724 static int
    725 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
    726 {
    727 	struct scsipi_xfer *xs;
    728 	bus_dmamap_t xfer;
    729 	bus_dma_segment_t *ds;
    730 	struct eata_sg *sg;
    731 	struct eata_cp *cp;
    732 	int rv, i;
    733 
    734 	xs = ccb->ccb_xs;
    735 	xfer = ccb->ccb_dmamap_xfer;
    736 	cp = &ccb->ccb_eata_cp;
    737 
    738 	rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
    739 	    ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
    740 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    741 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
    742 
    743 	switch (rv) {
    744 	case 0:
    745 		break;
    746 	case ENOMEM:
    747 	case EAGAIN:
    748 		xs->error = XS_RESOURCE_SHORTAGE;
    749 		break;
    750 	default:
    751 		xs->error = XS_DRIVER_STUFFUP;
    752 		printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
    753 		break;
    754 	}
    755 
    756 	if (xs->error != XS_NOERROR) {
    757 		dpt_ccb_free(sc, ccb);
    758 		scsipi_done(xs);
    759 		return (-1);
    760 	}
    761 
    762 	bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
    763 	    (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
    764 	    BUS_DMASYNC_PREWRITE);
    765 
    766 	/* Don't bother using scatter/gather for just 1 seg */
    767 	if (xfer->dm_nsegs == 1) {
    768 		cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
    769 		cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
    770 	} else {
    771 		/*
    772 		 * Load the hardware scatter/gather map with
    773 		 * the contents of the DMA map.
    774 		 */
    775 		sg = ccb->ccb_sg;
    776 		ds = xfer->dm_segs;
    777 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
    778  			sg->sg_addr = htobe32(ds->ds_addr);
    779  			sg->sg_len =  htobe32(ds->ds_len);
    780  		}
    781 	 	cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
    782 		    sc->sc_dmamap->dm_segs[0].ds_addr +
    783 		    offsetof(struct dpt_ccb, ccb_sg));
    784 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
    785 		cp->cp_ctl0 |= CP_C0_SCATTER;
    786 	}
    787 
    788 	return (0);
    789 }
    790 
    791 /*
    792  * Unmap a transfer.
    793  */
    794 static void
    795 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
    796 {
    797 
    798 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
    799 	    ccb->ccb_dmamap_xfer->dm_mapsize,
    800 	    (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
    801 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    802 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
    803 }
    804 
    805 /*
    806  * Adjust the size of each I/O before it passes to the SCSI layer.
    807  */
    808 static void
    809 dpt_minphys(struct buf *bp)
    810 {
    811 
    812 	if (bp->b_bcount > DPT_MAX_XFER)
    813 		bp->b_bcount = DPT_MAX_XFER;
    814 	minphys(bp);
    815 }
    816 
    817 /*
    818  * Start a SCSI command.
    819  */
    820 static void
    821 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    822 		   void *arg)
    823 {
    824 	struct dpt_softc *sc;
    825 	struct scsipi_xfer *xs;
    826 	int flags;
    827 	struct scsipi_periph *periph;
    828 	struct dpt_ccb *ccb;
    829 	struct eata_cp *cp;
    830 
    831 	sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
    832 
    833 	switch (req) {
    834 	case ADAPTER_REQ_RUN_XFER:
    835 		xs = arg;
    836 		periph = xs->xs_periph;
    837 		flags = xs->xs_control;
    838 
    839 #ifdef DIAGNOSTIC
    840 		/* Cmds must be no more than 12 bytes for us. */
    841 		if (xs->cmdlen > 12) {
    842 			xs->error = XS_DRIVER_STUFFUP;
    843 			scsipi_done(xs);
    844 			break;
    845 		}
    846 #endif
    847 		/*
    848 		 * XXX We can't reset devices just yet.  Apparently some
    849 		 * older firmware revisions don't even support it.
    850 		 */
    851 		if ((flags & XS_CTL_RESET) != 0) {
    852 			xs->error = XS_DRIVER_STUFFUP;
    853 			scsipi_done(xs);
    854 			break;
    855 		}
    856 
    857 		/*
    858 		 * Get a CCB and fill it.
    859 		 */
    860 		ccb = dpt_ccb_alloc(sc);
    861 		ccb->ccb_xs = xs;
    862 		ccb->ccb_timeout = xs->timeout;
    863 
    864 		cp = &ccb->ccb_eata_cp;
    865 		memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
    866 		cp->cp_ccbid = ccb->ccb_id;
    867 		cp->cp_senselen = sizeof(ccb->ccb_sense);
    868 		cp->cp_stataddr = htobe32(sc->sc_stppa);
    869 		cp->cp_ctl0 = CP_C0_AUTO_SENSE;
    870 		cp->cp_ctl1 = 0;
    871 		cp->cp_ctl2 = 0;
    872 		cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
    873 		cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
    874 		cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
    875 		cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
    876 
    877 		if ((flags & XS_CTL_DATA_IN) != 0)
    878 			cp->cp_ctl0 |= CP_C0_DATA_IN;
    879 		if ((flags & XS_CTL_DATA_OUT) != 0)
    880 			cp->cp_ctl0 |= CP_C0_DATA_OUT;
    881 		if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
    882 			cp->cp_ctl0 |= CP_C0_INTERPRET;
    883 
    884 		/* Synchronous xfers musn't write-back through the cache. */
    885 		if (xs->bp != NULL)
    886 			if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
    887 				cp->cp_ctl2 |= CP_C2_NO_CACHE;
    888 
    889 		cp->cp_senseaddr =
    890 		    htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
    891 		    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
    892 
    893 		if (xs->datalen != 0) {
    894 			if (dpt_ccb_map(sc, ccb))
    895 				break;
    896 		} else {
    897 			cp->cp_dataaddr = 0;
    898 			cp->cp_datalen = 0;
    899 		}
    900 
    901 		/* Sync up CCB and status packet. */
    902 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    903 		    CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
    904 		    BUS_DMASYNC_PREWRITE);
    905 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    906 		    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
    907 
    908 		/*
    909 		 * Start the command.
    910 		 */
    911 		if ((xs->xs_control & XS_CTL_POLL) != 0)
    912 			ccb->ccb_flg |= CCB_PRIVATE;
    913 
    914 		if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
    915 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
    916 			xs->error = XS_DRIVER_STUFFUP;
    917 			if (xs->datalen != 0)
    918 				dpt_ccb_unmap(sc, ccb);
    919 			dpt_ccb_free(sc, ccb);
    920 			break;
    921 		}
    922 
    923 		if ((xs->xs_control & XS_CTL_POLL) == 0)
    924 			break;
    925 
    926 		if (dpt_ccb_poll(sc, ccb)) {
    927 			dpt_ccb_abort(sc, ccb);
    928 			/* Wait for abort to complete... */
    929 			if (dpt_ccb_poll(sc, ccb))
    930 				dpt_ccb_abort(sc, ccb);
    931 		}
    932 
    933 		dpt_ccb_done(sc, ccb);
    934 		break;
    935 
    936 	case ADAPTER_REQ_GROW_RESOURCES:
    937 		/*
    938 		 * Not supported, since we allocate the maximum number of
    939 		 * CCBs up front.
    940 		 */
    941 		break;
    942 
    943 	case ADAPTER_REQ_SET_XFER_MODE:
    944 		/*
    945 		 * This will be handled by the HBA itself, and we can't
    946 		 * modify that (ditto for tagged queueing).
    947 		 */
    948 		break;
    949 	}
    950 }
    951 
    952 /*
    953  * Get inquiry data from the adapter.
    954  */
    955 static void
    956 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
    957 {
    958 	struct dpt_ccb *ccb;
    959 	struct eata_cp *cp;
    960 
    961 	*ei = (struct eata_inquiry_data *)sc->sc_scr;
    962 
    963 	/* Get a CCB and mark as private */
    964 	ccb = dpt_ccb_alloc(sc);
    965 	ccb->ccb_flg |= CCB_PRIVATE;
    966 	ccb->ccb_timeout = 200;
    967 
    968 	/* Put all the arguments into the CCB. */
    969 	cp = &ccb->ccb_eata_cp;
    970 	cp->cp_ccbid = ccb->ccb_id;
    971 	cp->cp_senselen = sizeof(ccb->ccb_sense);
    972 	cp->cp_senseaddr = 0;
    973 	cp->cp_stataddr = htobe32(sc->sc_stppa);
    974 	cp->cp_dataaddr = htobe32(sc->sc_scrpa);
    975 	cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
    976 	cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
    977 	cp->cp_ctl1 = 0;
    978 	cp->cp_ctl2 = 0;
    979 	cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
    980 	cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
    981 
    982 	/* Put together the SCSI inquiry command. */
    983 	memset(&cp->cp_cdb_cmd, 0, 12);
    984 	cp->cp_cdb_cmd = INQUIRY;
    985 	cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
    986 
    987 	/* Sync up CCB, status packet and scratch area. */
    988 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
    989 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
    990 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    991 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
    992 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
    993 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
    994 
    995 	/* Start the command and poll on completion. */
    996 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
    997 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
    998 
    999 	if (dpt_ccb_poll(sc, ccb))
   1000 		panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
   1001 
   1002 	if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
   1003 	    ccb->ccb_scsi_status != SCSI_OK)
   1004 		panic("%s: inquiry failed (hba:%02x scsi:%02x)",
   1005 		    sc->sc_dv.dv_xname, ccb->ccb_hba_status,
   1006 		    ccb->ccb_scsi_status);
   1007 
   1008 	/* Sync up the DMA map and free CCB, returning. */
   1009 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
   1010 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
   1011 	dpt_ccb_free(sc, ccb);
   1012 }
   1013