Home | History | Annotate | Line # | Download | only in ic
dpt.c revision 1.31
      1 /*	$NetBSD: dpt.c,v 1.31 2001/11/13 13:14:36 lukem Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
      9  * Aerospace Simulation Facility, NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Portions of this code fall under the following copyright:
     42  *
     43  * Originally written by Julian Elischer (julian (at) tfs.com)
     44  * for TRW Financial Systems for use under the MACH(2.5) operating system.
     45  *
     46  * TRW Financial Systems, in accordance with their agreement with Carnegie
     47  * Mellon University, makes this software available to CMU to distribute
     48  * or use in any manner that they see fit as long as this message is kept with
     49  * the software. For this reason TFS also grants any other persons or
     50  * organisations permission to use or modify this software.
     51  *
     52  * TFS supplies this software to be publicly redistributed
     53  * on the understanding that TFS is not responsible for the correct
     54  * functioning of this software in any circumstances.
     55  */
     56 
     57 #include <sys/cdefs.h>
     58 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.31 2001/11/13 13:14:36 lukem Exp $");
     59 
     60 #include <sys/param.h>
     61 #include <sys/systm.h>
     62 #include <sys/device.h>
     63 #include <sys/queue.h>
     64 #include <sys/buf.h>
     65 #include <sys/endian.h>
     66 
     67 #include <uvm/uvm_extern.h>
     68 
     69 #include <machine/bus.h>
     70 
     71 #include <dev/scsipi/scsi_all.h>
     72 #include <dev/scsipi/scsipi_all.h>
     73 #include <dev/scsipi/scsiconf.h>
     74 
     75 #include <dev/ic/dptreg.h>
     76 #include <dev/ic/dptvar.h>
     77 
     78 #define dpt_inb(x, o)		\
     79     bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
     80 #define dpt_outb(x, o, d)	\
     81     bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
     82 
     83 static const char * const dpt_cname[] = {
     84 	"3334", "SmartRAID IV",
     85 	"3332", "SmartRAID IV",
     86 	"2144", "SmartCache IV",
     87 	"2044", "SmartCache IV",
     88 	"2142", "SmartCache IV",
     89 	"2042", "SmartCache IV",
     90 	"2041", "SmartCache IV",
     91 	"3224", "SmartRAID III",
     92 	"3222", "SmartRAID III",
     93 	"3021", "SmartRAID III",
     94 	"2124", "SmartCache III",
     95 	"2024", "SmartCache III",
     96 	"2122", "SmartCache III",
     97 	"2022", "SmartCache III",
     98 	"2021", "SmartCache III",
     99 	"2012", "SmartCache Plus",
    100 	"2011", "SmartCache Plus",
    101 	NULL,   "<unknown>",
    102 };
    103 
    104 static void	*dpt_sdh;
    105 
    106 static void	dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
    107 static void	dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
    108 static int	dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
    109 static int	dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
    110 static void	dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
    111 static int	dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
    112 static void	dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
    113 static void	dpt_minphys(struct buf *);
    114 static void	dpt_scsipi_request(struct scsipi_channel *,
    115 				   scsipi_adapter_req_t, void *);
    116 static void	dpt_shutdown(void *);
    117 static int	dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
    118 
    119 static __inline__ struct dpt_ccb	*dpt_ccb_alloc(struct dpt_softc *);
    120 static __inline__ void	dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
    121 
    122 static __inline__ struct dpt_ccb *
    123 dpt_ccb_alloc(struct dpt_softc *sc)
    124 {
    125 	struct dpt_ccb *ccb;
    126 	int s;
    127 
    128 	s = splbio();
    129 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
    130 	SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
    131 	splx(s);
    132 
    133 	return (ccb);
    134 }
    135 
    136 static __inline__ void
    137 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
    138 {
    139 	int s;
    140 
    141 	ccb->ccb_flg = 0;
    142 	s = splbio();
    143 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
    144 	splx(s);
    145 }
    146 
    147 /*
    148  * Handle an interrupt from the HBA.
    149  */
    150 int
    151 dpt_intr(void *cookie)
    152 {
    153 	struct dpt_softc *sc;
    154 	struct dpt_ccb *ccb;
    155 	struct eata_sp *sp;
    156 	volatile int junk;
    157 	int forus;
    158 
    159 	sc = cookie;
    160 	sp = sc->sc_stp;
    161 	forus = 0;
    162 
    163 	for (;;) {
    164 		/*
    165 		 * HBA might have interrupted while we were dealing with the
    166 		 * last completed command, since we ACK before we deal; keep
    167 		 * polling.
    168 		 */
    169 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    170 			break;
    171 		forus = 1;
    172 
    173 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    174 		    sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
    175 
    176 		/* Might have looped before HBA can reset HBA_AUX_INTR. */
    177 		if (sp->sp_ccbid == -1) {
    178 			DELAY(50);
    179 
    180 			if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
    181 				return (0);
    182 
    183 			printf("%s: no status\n", sc->sc_dv.dv_xname);
    184 
    185 			/* Re-sync DMA map */
    186 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    187 			    sc->sc_stpoff, sizeof(struct eata_sp),
    188 			    BUS_DMASYNC_POSTREAD);
    189 		}
    190 
    191 		/* Make sure CCB ID from status packet is realistic. */
    192 		if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
    193 			printf("%s: bogus status (returned CCB id %d)\n",
    194 			    sc->sc_dv.dv_xname, sp->sp_ccbid);
    195 
    196 			/* Ack the interrupt */
    197 			sp->sp_ccbid = -1;
    198 			junk = dpt_inb(sc, HA_STATUS);
    199 			continue;
    200 		}
    201 
    202 		/* Sync up DMA map and cache cmd status. */
    203 		ccb = sc->sc_ccbs + sp->sp_ccbid;
    204 
    205 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
    206 		    sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
    207 
    208 		ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
    209 		ccb->ccb_scsi_status = sp->sp_scsi_status;
    210 
    211 		/*
    212 		 * Ack the interrupt and process the CCB.  If this
    213 		 * is a private CCB it's up to dpt_ccb_poll() to
    214 		 * notice.
    215 		 */
    216 		sp->sp_ccbid = -1;
    217 		ccb->ccb_flg |= CCB_INTR;
    218 		junk = dpt_inb(sc, HA_STATUS);
    219 		if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    220 			dpt_ccb_done(sc, ccb);
    221 	}
    222 
    223 	return (forus);
    224 }
    225 
    226 /*
    227  * Initialize and attach the HBA.  This is the entry point from bus
    228  * specific probe-and-attach code.
    229  */
    230 void
    231 dpt_init(struct dpt_softc *sc, const char *intrstr)
    232 {
    233 	struct scsipi_adapter *adapt;
    234 	struct scsipi_channel *chan;
    235 	struct eata_inquiry_data *ei;
    236 	int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
    237 	bus_dma_segment_t seg;
    238 	struct eata_cfg *ec;
    239 	struct dpt_ccb *ccb;
    240 	char model[16];
    241 
    242 	ec = &sc->sc_ec;
    243 
    244 	/*
    245 	 * Allocate the CCB/status packet/scratch DMA map and load.
    246 	 */
    247 	sc->sc_nccbs =
    248 	    min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
    249 	sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
    250 	sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
    251 	mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
    252 	    DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
    253 
    254 	if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
    255 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    256 		printf("%s: unable to allocate CCBs, rv = %d\n",
    257 		    sc->sc_dv.dv_xname, rv);
    258 		return;
    259 	}
    260 
    261 	if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
    262 	    (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    263 		printf("%s: unable to map CCBs, rv = %d\n",
    264 		    sc->sc_dv.dv_xname, rv);
    265 		return;
    266 	}
    267 
    268 	if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
    269 	    mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
    270 		printf("%s: unable to create CCB DMA map, rv = %d\n",
    271 		    sc->sc_dv.dv_xname, rv);
    272 		return;
    273 	}
    274 
    275 	if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
    276 	    sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
    277 		printf("%s: unable to load CCB DMA map, rv = %d\n",
    278 		    sc->sc_dv.dv_xname, rv);
    279 		return;
    280 	}
    281 
    282 	sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
    283 	sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
    284 	sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
    285 	sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
    286 	sc->sc_stp->sp_ccbid = -1;
    287 
    288 	/*
    289 	 * Create the CCBs.
    290 	 */
    291 	SLIST_INIT(&sc->sc_ccb_free);
    292 	memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
    293 
    294 	for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
    295 		rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
    296 		    DPT_SG_SIZE, DPT_MAX_XFER, 0,
    297 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    298 		    &ccb->ccb_dmamap_xfer);
    299 		if (rv) {
    300 			printf("%s: can't create ccb dmamap (%d)\n",
    301 			    sc->sc_dv.dv_xname, rv);
    302 			break;
    303 		}
    304 
    305 		ccb->ccb_id = i;
    306 		ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
    307 		    CCB_OFF(sc, ccb);
    308 		SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
    309 	}
    310 
    311 	if (i == 0) {
    312 		printf("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
    313 		return;
    314 	} else if (i != sc->sc_nccbs) {
    315 		printf("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, i,
    316 		    sc->sc_nccbs);
    317 		sc->sc_nccbs = i;
    318 	}
    319 
    320 	/* Set shutdownhook before we start any device activity. */
    321 	if (dpt_sdh == NULL)
    322 		dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
    323 
    324 	/* Get the inquiry data from the HBA. */
    325 	dpt_hba_inquire(sc, &ei);
    326 
    327 	/*
    328 	 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
    329 	 * dpt0: interrupting at irq 10
    330 	 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
    331 	 */
    332 	for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++)
    333 		;
    334 	ei->ei_vendor[i] = '\0';
    335 
    336 	for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++)
    337 		model[i] = ei->ei_model[i];
    338 	for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; i++, j++)
    339 		model[i] = ei->ei_model[i];
    340 	model[i] = '\0';
    341 
    342 	/* Find the marketing name for the board. */
    343 	for (i = 0; dpt_cname[i] != NULL; i += 2)
    344 		if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
    345 			break;
    346 
    347 	printf("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model);
    348 
    349 	if (intrstr != NULL)
    350 		printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
    351 		    intrstr);
    352 
    353 	maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
    354 	    EC_F3_MAX_CHANNEL_SHIFT;
    355 	maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
    356 	    EC_F3_MAX_TARGET_SHIFT;
    357 
    358 	printf("%s: %d queued commands, %d channel(s), adapter on ID(s)",
    359 	    sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
    360 
    361 	for (i = 0; i <= maxchannel; i++) {
    362 		sc->sc_hbaid[i] = ec->ec_hba[3 - i];
    363 		printf(" %d", sc->sc_hbaid[i]);
    364 	}
    365 	printf("\n");
    366 
    367 	/*
    368 	 * Reset the SCSI controller chip(s) and bus.  XXX Do we need to do
    369 	 * this for each bus?
    370 	 */
    371 	if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
    372 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
    373 
    374 	/* Fill in the scsipi_adapter. */
    375 	adapt = &sc->sc_adapt;
    376 	memset(adapt, 0, sizeof(*adapt));
    377 	adapt->adapt_dev = &sc->sc_dv;
    378 	adapt->adapt_nchannels = maxchannel + 1;
    379 	adapt->adapt_openings = sc->sc_nccbs;
    380 	adapt->adapt_max_periph = sc->sc_nccbs;
    381 	adapt->adapt_request = dpt_scsipi_request;
    382 	adapt->adapt_minphys = dpt_minphys;
    383 
    384 	for (i = 0; i <= maxchannel; i++) {
    385 		/* Fill in the scsipi_channel. */
    386 		chan = &sc->sc_chans[i];
    387 		memset(chan, 0, sizeof(*chan));
    388 		chan->chan_adapter = adapt;
    389 		chan->chan_bustype = &scsi_bustype;
    390 		chan->chan_channel = i;
    391 		chan->chan_ntargets = maxtarget + 1;
    392 		chan->chan_nluns = ec->ec_maxlun + 1;
    393 		chan->chan_id = sc->sc_hbaid[i];
    394 		config_found(&sc->sc_dv, chan, scsiprint);
    395 	}
    396 }
    397 
    398 /*
    399  * Read the EATA configuration from the HBA and perform some sanity checks.
    400  */
    401 int
    402 dpt_readcfg(struct dpt_softc *sc)
    403 {
    404 	struct eata_cfg *ec;
    405 	int i, j, stat;
    406 	u_int16_t *p;
    407 
    408 	ec = &sc->sc_ec;
    409 
    410 	/* Older firmware may puke if we talk to it too soon after reset. */
    411 	dpt_outb(sc, HA_COMMAND, CP_RESET);
    412 	DELAY(750000);
    413 
    414 	for (i = 1000; i; i--) {
    415 		if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
    416 			break;
    417 		DELAY(2000);
    418 	}
    419 
    420 	if (i == 0) {
    421 		printf("%s: HBA not ready after reset (hba status:%02x)\n",
    422 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    423 		return (-1);
    424 	}
    425 
    426 	while((((stat = dpt_inb(sc, HA_STATUS))
    427 	    != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
    428 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
    429 	    && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
    430 	    || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
    431 		/* RAID drives still spinning up? */
    432 		if(dpt_inb(sc, HA_ERROR) != 'D' ||
    433 		   dpt_inb(sc, HA_ERROR + 1) != 'P' ||
    434 		   dpt_inb(sc, HA_ERROR + 2) != 'T') {
    435 			printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
    436 			return (-1);
    437 		}
    438 	}
    439 
    440 	/*
    441 	 * Issue the read-config command and wait for the data to appear.
    442 	 *
    443 	 * Apparently certian firmware revisions won't DMA later on if we
    444 	 * request the config data using PIO, but it makes it a lot easier
    445 	 * as no DMA setup is required.
    446 	 */
    447 	dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
    448 	memset(ec, 0, sizeof(*ec));
    449 	i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
    450 	    sizeof(ec->ec_cfglen)) >> 1;
    451 	p = (u_int16_t *)ec;
    452 
    453 	if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
    454 		printf("%s: cfg data didn't appear (hba status:%02x)\n",
    455 		    sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
    456 		return (-1);
    457 	}
    458 
    459 	/* Begin reading. */
    460 	while (i--)
    461 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    462 
    463 	if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
    464 	    - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    465 	    - sizeof(ec->ec_cfglen)))
    466 		i = sizeof(struct eata_cfg)
    467 		  - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
    468 		  - sizeof(ec->ec_cfglen);
    469 
    470 	j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
    471 	    sizeof(ec->ec_cfglen);
    472 	i >>= 1;
    473 
    474 	while (i--)
    475 		*p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    476 
    477 	/* Flush until we have read 512 bytes. */
    478 	i = (512 - j + 1) >> 1;
    479 	while (i--)
    480 		bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
    481 
    482 	/* Defaults for older firmware... */
    483 	if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
    484 		ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
    485 
    486 	if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
    487 		printf("%s: HBA error\n", sc->sc_dv.dv_xname);
    488 		return (-1);
    489 	}
    490 
    491 	if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
    492 		printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
    493 		return (-1);
    494 	}
    495 
    496 	if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
    497 		printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
    498 		return (-1);
    499 	}
    500 
    501 	if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
    502 		printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
    503 		return (-1);
    504 	}
    505 
    506 	return (0);
    507 }
    508 
    509 /*
    510  * Our `shutdownhook' to cleanly shut down the HBA.  The HBA must flush all
    511  * data from it's cache and mark array groups as clean.
    512  *
    513  * XXX This doesn't always work (i.e., the HBA may still be flushing after
    514  * we tell root that it's safe to power off).
    515  */
    516 static void
    517 dpt_shutdown(void *cookie)
    518 {
    519 	extern struct cfdriver dpt_cd;
    520 	struct dpt_softc *sc;
    521 	int i;
    522 
    523 	printf("shutting down dpt devices...");
    524 
    525 	for (i = 0; i < dpt_cd.cd_ndevs; i++) {
    526 		if ((sc = device_lookup(&dpt_cd, i)) == NULL)
    527 			continue;
    528 		dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
    529 	}
    530 
    531 	delay(10000*1000);
    532 	printf(" done\n");
    533 }
    534 
    535 /*
    536  * Send an EATA command to the HBA.
    537  */
    538 static int
    539 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
    540 {
    541 	u_int32_t pa;
    542 	int i, s;
    543 
    544 	s = splbio();
    545 
    546 	for (i = 20000; i != 0; i--) {
    547 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
    548 			break;
    549 		DELAY(50);
    550 	}
    551 	if (i == 0) {
    552 		splx(s);
    553 		return (-1);
    554 	}
    555 
    556 	pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
    557 	dpt_outb(sc, HA_DMA_BASE + 0, (pa      ) & 0xff);
    558 	dpt_outb(sc, HA_DMA_BASE + 1, (pa >>  8) & 0xff);
    559 	dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
    560 	dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
    561 
    562 	if (eatacmd == CP_IMMEDIATE)
    563 		dpt_outb(sc, HA_ICMD, icmd);
    564 
    565 	dpt_outb(sc, HA_COMMAND, eatacmd);
    566 
    567 	splx(s);
    568 	return (0);
    569 }
    570 
    571 /*
    572  * Wait for the HBA status register to reach a specific state.
    573  */
    574 static int
    575 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
    576 {
    577 
    578 	for (ms *= 10; ms != 0; ms--) {
    579 		if ((dpt_inb(sc, HA_STATUS) & mask) == state)
    580 			return (0);
    581 		DELAY(100);
    582 	}
    583 
    584 	return (-1);
    585 }
    586 
    587 /*
    588  * Spin waiting for a command to finish.  The timeout value from the CCB is
    589  * used.  The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
    590  * recycled before we get a look at it.
    591  */
    592 static int
    593 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
    594 {
    595 	int i, s;
    596 
    597 #ifdef DEBUG
    598 	if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
    599 		panic("dpt_ccb_poll: called for non-CCB_PRIVATE request\n");
    600 #endif
    601 
    602 	s = splbio();
    603 
    604 	if ((ccb->ccb_flg & CCB_INTR) != 0) {
    605 		splx(s);
    606 		return (0);
    607 	}
    608 
    609 	for (i = ccb->ccb_timeout * 20; i != 0; i--) {
    610 		if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
    611 			dpt_intr(sc);
    612 		if ((ccb->ccb_flg & CCB_INTR) != 0)
    613 			break;
    614 		DELAY(50);
    615 	}
    616 
    617 	splx(s);
    618 	return (i == 0);
    619 }
    620 
    621 /*
    622  * We have a command which has been processed by the HBA, so now we look to
    623  * see how the operation went.  CCBs marked CCB_PRIVATE are not passed here
    624  * by dpt_intr().
    625  */
    626 static void
    627 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
    628 {
    629 	struct scsipi_xfer *xs;
    630 
    631 	xs = ccb->ccb_xs;
    632 
    633 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
    634 
    635 	/*
    636 	 * If we were a data transfer, unload the map that described the
    637 	 * data buffer.
    638 	 */
    639 	if (xs->datalen != 0)
    640 		dpt_ccb_unmap(sc, ccb);
    641 
    642 	if (xs->error == XS_NOERROR) {
    643 		if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
    644 			switch (ccb->ccb_hba_status) {
    645 			case SP_HBA_ERROR_SEL_TO:
    646 				xs->error = XS_SELTIMEOUT;
    647 				break;
    648 			case SP_HBA_ERROR_RESET:
    649 				xs->error = XS_RESET;
    650 				break;
    651 			default:
    652 				printf("%s: HBA status %x\n",
    653 				    sc->sc_dv.dv_xname, ccb->ccb_hba_status);
    654 				xs->error = XS_DRIVER_STUFFUP;
    655 				break;
    656 			}
    657 		} else if (ccb->ccb_scsi_status != SCSI_OK) {
    658 			switch (ccb->ccb_scsi_status) {
    659 			case SCSI_CHECK:
    660 				memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
    661 				    sizeof(xs->sense.scsi_sense));
    662 				xs->error = XS_SENSE;
    663 				break;
    664 			case SCSI_BUSY:
    665 			case SCSI_QUEUE_FULL:
    666 				xs->error = XS_BUSY;
    667 				break;
    668 			default:
    669 				scsipi_printaddr(xs->xs_periph);
    670 				printf("SCSI status %x\n",
    671 				    ccb->ccb_scsi_status);
    672 				xs->error = XS_DRIVER_STUFFUP;
    673 				break;
    674 			}
    675 		} else
    676 			xs->resid = 0;
    677 
    678 		xs->status = ccb->ccb_scsi_status;
    679 	}
    680 
    681 	/* Free up the CCB and mark the command as done. */
    682 	dpt_ccb_free(sc, ccb);
    683 	scsipi_done(xs);
    684 }
    685 
    686 /*
    687  * Specified CCB has timed out, abort it.
    688  */
    689 static void
    690 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
    691 {
    692 	struct scsipi_periph *periph;
    693 	struct scsipi_xfer *xs;
    694 	int s;
    695 
    696 	xs = ccb->ccb_xs;
    697 	periph = xs->xs_periph;
    698 
    699 	scsipi_printaddr(periph);
    700 	printf("timed out (status:%02x aux status:%02x)",
    701 	    dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
    702 
    703 	s = splbio();
    704 
    705 	if ((ccb->ccb_flg & CCB_ABORT) != 0) {
    706 		/* Abort timed out, reset the HBA */
    707 		printf(" AGAIN, resetting HBA\n");
    708 		dpt_outb(sc, HA_COMMAND, CP_RESET);
    709 		DELAY(750000);
    710 	} else {
    711 		/* Abort the operation that has timed out */
    712 		printf("\n");
    713 		xs->error = XS_TIMEOUT;
    714 		ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
    715 		ccb->ccb_flg |= CCB_ABORT;
    716 		/* Start the abort */
    717 		if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
    718 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
    719 	}
    720 
    721 	splx(s);
    722 }
    723 
    724 /*
    725  * Map a data transfer.
    726  */
    727 static int
    728 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
    729 {
    730 	struct scsipi_xfer *xs;
    731 	bus_dmamap_t xfer;
    732 	bus_dma_segment_t *ds;
    733 	struct eata_sg *sg;
    734 	struct eata_cp *cp;
    735 	int rv, i;
    736 
    737 	xs = ccb->ccb_xs;
    738 	xfer = ccb->ccb_dmamap_xfer;
    739 	cp = &ccb->ccb_eata_cp;
    740 
    741 	rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
    742 	    ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
    743 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    744 	    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
    745 
    746 	switch (rv) {
    747 	case 0:
    748 		break;
    749 	case ENOMEM:
    750 	case EAGAIN:
    751 		xs->error = XS_RESOURCE_SHORTAGE;
    752 		break;
    753 	default:
    754 		xs->error = XS_DRIVER_STUFFUP;
    755 		printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
    756 		break;
    757 	}
    758 
    759 	if (xs->error != XS_NOERROR) {
    760 		dpt_ccb_free(sc, ccb);
    761 		scsipi_done(xs);
    762 		return (-1);
    763 	}
    764 
    765 	bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
    766 	    (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
    767 	    BUS_DMASYNC_PREWRITE);
    768 
    769 	/* Don't bother using scatter/gather for just 1 seg */
    770 	if (xfer->dm_nsegs == 1) {
    771 		cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
    772 		cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
    773 	} else {
    774 		/*
    775 		 * Load the hardware scatter/gather map with
    776 		 * the contents of the DMA map.
    777 		 */
    778 		sg = ccb->ccb_sg;
    779 		ds = xfer->dm_segs;
    780 		for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
    781  			sg->sg_addr = htobe32(ds->ds_addr);
    782  			sg->sg_len =  htobe32(ds->ds_len);
    783  		}
    784 	 	cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
    785 		    sc->sc_dmamap->dm_segs[0].ds_addr +
    786 		    offsetof(struct dpt_ccb, ccb_sg));
    787 		cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
    788 		cp->cp_ctl0 |= CP_C0_SCATTER;
    789 	}
    790 
    791 	return (0);
    792 }
    793 
    794 /*
    795  * Unmap a transfer.
    796  */
    797 static void
    798 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
    799 {
    800 
    801 	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
    802 	    ccb->ccb_dmamap_xfer->dm_mapsize,
    803 	    (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
    804 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    805 	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
    806 }
    807 
    808 /*
    809  * Adjust the size of each I/O before it passes to the SCSI layer.
    810  */
    811 static void
    812 dpt_minphys(struct buf *bp)
    813 {
    814 
    815 	if (bp->b_bcount > DPT_MAX_XFER)
    816 		bp->b_bcount = DPT_MAX_XFER;
    817 	minphys(bp);
    818 }
    819 
    820 /*
    821  * Start a SCSI command.
    822  */
    823 static void
    824 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    825 		   void *arg)
    826 {
    827 	struct dpt_softc *sc;
    828 	struct scsipi_xfer *xs;
    829 	int flags;
    830 	struct scsipi_periph *periph;
    831 	struct dpt_ccb *ccb;
    832 	struct eata_cp *cp;
    833 
    834 	sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
    835 
    836 	switch (req) {
    837 	case ADAPTER_REQ_RUN_XFER:
    838 		xs = arg;
    839 		periph = xs->xs_periph;
    840 		flags = xs->xs_control;
    841 
    842 #ifdef DIAGNOSTIC
    843 		/* Cmds must be no more than 12 bytes for us. */
    844 		if (xs->cmdlen > 12) {
    845 			xs->error = XS_DRIVER_STUFFUP;
    846 			scsipi_done(xs);
    847 			break;
    848 		}
    849 #endif
    850 		/*
    851 		 * XXX We can't reset devices just yet.  Apparently some
    852 		 * older firmware revisions don't even support it.
    853 		 */
    854 		if ((flags & XS_CTL_RESET) != 0) {
    855 			xs->error = XS_DRIVER_STUFFUP;
    856 			scsipi_done(xs);
    857 			break;
    858 		}
    859 
    860 		/*
    861 		 * Get a CCB and fill it.
    862 		 */
    863 		ccb = dpt_ccb_alloc(sc);
    864 		ccb->ccb_xs = xs;
    865 		ccb->ccb_timeout = xs->timeout;
    866 
    867 		cp = &ccb->ccb_eata_cp;
    868 		memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
    869 		cp->cp_ccbid = ccb->ccb_id;
    870 		cp->cp_senselen = sizeof(ccb->ccb_sense);
    871 		cp->cp_stataddr = htobe32(sc->sc_stppa);
    872 		cp->cp_ctl0 = CP_C0_AUTO_SENSE;
    873 		cp->cp_ctl1 = 0;
    874 		cp->cp_ctl2 = 0;
    875 		cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
    876 		cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
    877 		cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
    878 		cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
    879 
    880 		if ((flags & XS_CTL_DATA_IN) != 0)
    881 			cp->cp_ctl0 |= CP_C0_DATA_IN;
    882 		if ((flags & XS_CTL_DATA_OUT) != 0)
    883 			cp->cp_ctl0 |= CP_C0_DATA_OUT;
    884 		if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
    885 			cp->cp_ctl0 |= CP_C0_INTERPRET;
    886 
    887 		/* Synchronous xfers musn't write-back through the cache. */
    888 		if (xs->bp != NULL)
    889 			if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
    890 				cp->cp_ctl2 |= CP_C2_NO_CACHE;
    891 
    892 		cp->cp_senseaddr =
    893 		    htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
    894 		    CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
    895 
    896 		if (xs->datalen != 0) {
    897 			if (dpt_ccb_map(sc, ccb))
    898 				break;
    899 		} else {
    900 			cp->cp_dataaddr = 0;
    901 			cp->cp_datalen = 0;
    902 		}
    903 
    904 		/* Sync up CCB and status packet. */
    905 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
    906 		    CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
    907 		    BUS_DMASYNC_PREWRITE);
    908 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    909 		    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
    910 
    911 		/*
    912 		 * Start the command.
    913 		 */
    914 		if ((xs->xs_control & XS_CTL_POLL) != 0)
    915 			ccb->ccb_flg |= CCB_PRIVATE;
    916 
    917 		if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
    918 			printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
    919 			xs->error = XS_DRIVER_STUFFUP;
    920 			if (xs->datalen != 0)
    921 				dpt_ccb_unmap(sc, ccb);
    922 			dpt_ccb_free(sc, ccb);
    923 			break;
    924 		}
    925 
    926 		if ((xs->xs_control & XS_CTL_POLL) == 0)
    927 			break;
    928 
    929 		if (dpt_ccb_poll(sc, ccb)) {
    930 			dpt_ccb_abort(sc, ccb);
    931 			/* Wait for abort to complete... */
    932 			if (dpt_ccb_poll(sc, ccb))
    933 				dpt_ccb_abort(sc, ccb);
    934 		}
    935 
    936 		dpt_ccb_done(sc, ccb);
    937 		break;
    938 
    939 	case ADAPTER_REQ_GROW_RESOURCES:
    940 		/*
    941 		 * Not supported, since we allocate the maximum number of
    942 		 * CCBs up front.
    943 		 */
    944 		break;
    945 
    946 	case ADAPTER_REQ_SET_XFER_MODE:
    947 		/*
    948 		 * This will be handled by the HBA itself, and we can't
    949 		 * modify that (ditto for tagged queueing).
    950 		 */
    951 		break;
    952 	}
    953 }
    954 
    955 /*
    956  * Get inquiry data from the adapter.
    957  */
    958 static void
    959 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
    960 {
    961 	struct dpt_ccb *ccb;
    962 	struct eata_cp *cp;
    963 
    964 	*ei = (struct eata_inquiry_data *)sc->sc_scr;
    965 
    966 	/* Get a CCB and mark as private */
    967 	ccb = dpt_ccb_alloc(sc);
    968 	ccb->ccb_flg |= CCB_PRIVATE;
    969 	ccb->ccb_timeout = 200;
    970 
    971 	/* Put all the arguments into the CCB. */
    972 	cp = &ccb->ccb_eata_cp;
    973 	cp->cp_ccbid = ccb->ccb_id;
    974 	cp->cp_senselen = sizeof(ccb->ccb_sense);
    975 	cp->cp_senseaddr = 0;
    976 	cp->cp_stataddr = htobe32(sc->sc_stppa);
    977 	cp->cp_dataaddr = htobe32(sc->sc_scrpa);
    978 	cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
    979 	cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
    980 	cp->cp_ctl1 = 0;
    981 	cp->cp_ctl2 = 0;
    982 	cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
    983 	cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
    984 
    985 	/* Put together the SCSI inquiry command. */
    986 	memset(&cp->cp_cdb_cmd, 0, 12);
    987 	cp->cp_cdb_cmd = INQUIRY;
    988 	cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
    989 
    990 	/* Sync up CCB, status packet and scratch area. */
    991 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
    992 	    sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
    993 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
    994 	    sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
    995 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
    996 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
    997 
    998 	/* Start the command and poll on completion. */
    999 	if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
   1000 		panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
   1001 
   1002 	if (dpt_ccb_poll(sc, ccb))
   1003 		panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
   1004 
   1005 	if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
   1006 	    ccb->ccb_scsi_status != SCSI_OK)
   1007 		panic("%s: inquiry failed (hba:%02x scsi:%02x)",
   1008 		    sc->sc_dv.dv_xname, ccb->ccb_hba_status,
   1009 		    ccb->ccb_scsi_status);
   1010 
   1011 	/* Sync up the DMA map and free CCB, returning. */
   1012 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
   1013 	    sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
   1014 	dpt_ccb_free(sc, ccb);
   1015 }
   1016