Home | History | Annotate | Line # | Download | only in ic
aic7xxx_osm.c revision 1.13
      1 /*	$NetBSD: aic7xxx_osm.c,v 1.13 2003/10/30 01:58:17 simonb Exp $	*/
      2 
      3 /*
      4  * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
      5  *
      6  * Copyright (c) 1994-2001 Justin T. Gibbs.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions, and the following disclaimer,
     14  *    without modification.
     15  * 2. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * Alternatively, this software may be distributed under the terms of the
     19  * GNU Public License ("GPL").
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
     34  *
     35  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
     36  */
     37 /*
     38  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.13 2003/10/30 01:58:17 simonb Exp $");
     43 
     44 #include <dev/ic/aic7xxx_osm.h>
     45 #include <dev/ic/aic7xxx_inline.h>
     46 
     47 #ifndef AHC_TMODE_ENABLE
     48 #define AHC_TMODE_ENABLE 0
     49 #endif
     50 
     51 
     52 static void	ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
     53 static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
     54 static int	ahc_poll(struct ahc_softc *ahc, int wait);
     55 static void	ahc_setup_data(struct ahc_softc *ahc,
     56 			       struct scsipi_xfer *xs, struct scb *scb);
     57 static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
     58 static int	ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
     59 			  struct proc *p);
     60 
     61 
     62 
     63 /*
     64  * Attach all the sub-devices we can find
     65  */
     66 int
     67 ahc_attach(struct ahc_softc *ahc)
     68 {
     69 	u_long 	s;
     70 	int i;
     71 	char ahc_info[256];
     72 
     73 	LIST_INIT(&ahc->pending_scbs);
     74 	for (i = 0; i < AHC_NUM_TARGETS; i++)
     75 		TAILQ_INIT(&ahc->untagged_queues[i]);
     76 
     77         ahc_lock(ahc, &s);
     78 
     79 	ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
     80 	ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
     81 
     82 	ahc->sc_adapter.adapt_openings = AHC_MAX_QUEUE;
     83 	ahc->sc_adapter.adapt_max_periph = 16;
     84 
     85 	ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
     86 	ahc->sc_adapter.adapt_minphys = ahc_minphys;
     87 	ahc->sc_adapter.adapt_request = ahc_action;
     88 
     89 	ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
     90         ahc->sc_channel.chan_bustype = &scsi_bustype;
     91         ahc->sc_channel.chan_channel = 0;
     92         ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
     93         ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
     94         ahc->sc_channel.chan_id = ahc->our_id;
     95 
     96 	if (ahc->features & AHC_TWIN) {
     97 		ahc->sc_channel_b = ahc->sc_channel;
     98 		ahc->sc_channel_b.chan_id = ahc->our_id_b;
     99 		ahc->sc_channel_b.chan_channel = 1;
    100 	}
    101 
    102 	ahc_controller_info(ahc, ahc_info);
    103 	printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
    104 
    105 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
    106 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
    107 		    &ahc->sc_channel, scsiprint);
    108 		if (ahc->features & AHC_TWIN)
    109 			ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
    110 			    &ahc->sc_channel_b, scsiprint);
    111 	} else {
    112 		if (ahc->features & AHC_TWIN)
    113 			ahc->sc_child = config_found((void *)&ahc->sc_dev,
    114 			    &ahc->sc_channel_b, scsiprint);
    115 		ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
    116 		    &ahc->sc_channel, scsiprint);
    117 	}
    118 
    119 	ahc_intr_enable(ahc, TRUE);
    120 
    121 	if (ahc->flags & AHC_RESET_BUS_A)
    122 		ahc_reset_channel(ahc, 'A', TRUE);
    123 	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
    124 		ahc_reset_channel(ahc, 'B', TRUE);
    125 
    126 	ahc_unlock(ahc, &s);
    127 	return (1);
    128 }
    129 
    130 /*
    131  * Catch an interrupt from the adapter
    132  */
    133 void
    134 ahc_platform_intr(void *arg)
    135 {
    136 	struct	ahc_softc *ahc;
    137 
    138 	ahc = (struct ahc_softc *)arg;
    139 	ahc_intr(ahc);
    140 }
    141 
    142 /*
    143  * We have an scb which has been processed by the
    144  * adaptor, now we look to see how the operation
    145  * went.
    146  */
    147 void
    148 ahc_done(struct ahc_softc *ahc, struct scb *scb)
    149 {
    150 	struct scsipi_xfer *xs;
    151   	struct scsipi_periph *periph;
    152 	u_long s;
    153 
    154 	xs = scb->xs;
    155 	periph = xs->xs_periph;
    156 	LIST_REMOVE(scb, pending_links);
    157 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
    158 		struct scb_tailq *untagged_q;
    159 		int target_offset;
    160 
    161 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
    162 		untagged_q = &ahc->untagged_queues[target_offset];
    163 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
    164 		scb->flags &= ~SCB_UNTAGGEDQ;
    165 		ahc_run_untagged_queue(ahc, untagged_q);
    166 	}
    167 
    168 	callout_stop(&scb->xs->xs_callout);
    169 
    170 	if (xs->datalen) {
    171 		int op;
    172 
    173 		if (xs->xs_control & XS_CTL_DATA_IN)
    174 			op = BUS_DMASYNC_POSTREAD;
    175 		else
    176 			op = BUS_DMASYNC_POSTWRITE;
    177 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
    178 				scb->dmamap->dm_mapsize, op);
    179 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
    180 	}
    181 
    182 	/*
    183 	 * If the recovery SCB completes, we have to be
    184 	 * out of our timeout.
    185 	 */
    186 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
    187 		struct	scb *list_scb;
    188 
    189 		/*
    190 		 * We were able to complete the command successfully,
    191 		 * so reinstate the timeouts for all other pending
    192 		 * commands.
    193 		 */
    194 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
    195 			struct scsipi_xfer *xs = list_scb->xs;
    196 
    197 			if (!(xs->xs_control & XS_CTL_POLL)) {
    198 				callout_reset(&list_scb->xs->xs_callout,
    199 				    (list_scb->xs->timeout > 1000000) ?
    200 				    (list_scb->xs->timeout / 1000) * hz :
    201 				    (list_scb->xs->timeout * hz) / 1000,
    202 				    ahc_timeout, list_scb);
    203 			}
    204 		}
    205 
    206 		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
    207 		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
    208 			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
    209 		scsipi_printaddr(xs->xs_periph);
    210 		printf("%s: no longer in timeout, status = %x\n",
    211 		       ahc_name(ahc), xs->status);
    212 	}
    213 
    214 	/* Don't clobber any existing error state */
    215 	if (xs->error != XS_NOERROR) {
    216 	  /* Don't clobber any existing error state */
    217 	} else if ((scb->flags & SCB_SENSE) != 0) {
    218 		/*
    219 		 * We performed autosense retrieval.
    220 		 *
    221 		 * Zero any sense not transferred by the
    222 		 * device.  The SCSI spec mandates that any
    223 		 * untransfered data should be assumed to be
    224 		 * zero.  Complete the 'bounce' of sense information
    225 		 * through buffers accessible via bus-space by
    226 		 * copying it into the clients csio.
    227 		 */
    228 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
    229 		memcpy(&xs->sense.scsi_sense,
    230 		       ahc_get_sense_buf(ahc, scb),
    231 		       sizeof(xs->sense.scsi_sense));
    232 		xs->error = XS_SENSE;
    233 	}
    234 	if (scb->flags & SCB_FREEZE_QUEUE) {
    235 		scsipi_periph_thaw(periph, 1);
    236 		scb->flags &= ~SCB_FREEZE_QUEUE;
    237 	}
    238 
    239         ahc_lock(ahc, &s);
    240 	ahc_free_scb(ahc, scb);
    241         ahc_unlock(ahc, &s);
    242 
    243 	scsipi_done(xs);
    244 }
    245 
    246 static int
    247 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
    248 	  struct proc *p)
    249 {
    250 	struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
    251 	int s, ret = ENOTTY;
    252 
    253 	switch (cmd) {
    254 	case SCBUSIORESET:
    255 		s = splbio();
    256 		ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
    257 		    TRUE);
    258 		splx(s);
    259 		ret = 0;
    260 		break;
    261 	default:
    262 		break;
    263 	}
    264 
    265 	return ret;
    266 }
    267 
    268 static void
    269 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    270 {
    271 	struct ahc_softc *ahc;
    272 	int s;
    273 	struct ahc_initiator_tinfo *tinfo;
    274 	struct ahc_tmode_tstate *tstate;
    275 
    276 	ahc  = (void *)chan->chan_adapter->adapt_dev;
    277 
    278 	switch (req) {
    279 
    280 	case ADAPTER_REQ_RUN_XFER:
    281 	  {
    282 		struct scsipi_xfer *xs;
    283 		struct scsipi_periph *periph;
    284 	        struct scb *scb;
    285         	struct hardware_scb *hscb;
    286 		u_int target_id;
    287 		u_int our_id;
    288 		u_long s;
    289 
    290 		xs = arg;
    291 		periph = xs->xs_periph;
    292 
    293 		target_id = periph->periph_target;
    294                 our_id = ahc->our_id;
    295 
    296 		SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
    297 
    298 		/*
    299 		 * get an scb to use.
    300 		 */
    301 		ahc_lock(ahc, &s);
    302 		if ((scb = ahc_get_scb(ahc)) == NULL) {
    303 			xs->error = XS_RESOURCE_SHORTAGE;
    304 			ahc_unlock(ahc, &s);
    305 			scsipi_done(xs);
    306 			return;
    307 		}
    308 		ahc_unlock(ahc, &s);
    309 
    310 		hscb = scb->hscb;
    311 
    312 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
    313 		scb->xs = xs;
    314 
    315 		/*
    316 		 * Put all the arguments for the xfer in the scb
    317 		 */
    318 		hscb->control = 0;
    319 		hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
    320 		hscb->lun = periph->periph_lun;
    321 		if (xs->xs_control & XS_CTL_RESET) {
    322 			hscb->cdb_len = 0;
    323 			scb->flags |= SCB_DEVICE_RESET;
    324 			hscb->control |= MK_MESSAGE;
    325 			ahc_execute_scb(scb, NULL, 0);
    326 		}
    327 
    328 		ahc_setup_data(ahc, xs, scb);
    329 
    330 		break;
    331 	  }
    332 	case ADAPTER_REQ_GROW_RESOURCES:
    333   		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
    334 		return;
    335 
    336 	case ADAPTER_REQ_SET_XFER_MODE:
    337 	    {
    338 		struct scsipi_xfer_mode *xm = arg;
    339 		struct ahc_devinfo devinfo;
    340 		int target_id, our_id, first;
    341 		u_int width;
    342 		char channel;
    343 		u_int ppr_options, period, offset;
    344 		struct ahc_syncrate *syncrate;
    345 		uint16_t old_autoneg;
    346 
    347 		target_id = xm->xm_target;
    348 		our_id = chan->chan_id;
    349 		channel = (chan->chan_channel == 1) ? 'B' : 'A';
    350 		s = splbio();
    351 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
    352 		    &tstate);
    353 		ahc_compile_devinfo(&devinfo, our_id, target_id,
    354 		    0, channel, ROLE_INITIATOR);
    355 
    356 		old_autoneg = tstate->auto_negotiate;
    357 
    358 		/*
    359 		 * XXX since the period and offset are not provided here,
    360 		 * fake things by forcing a renegotiation using the user
    361 		 * settings if this is called for the first time (i.e.
    362 		 * during probe). Also, cap various values at the user
    363 		 * values, assuming that the user set it up that way.
    364 		 */
    365 		if (ahc->inited_target[target_id] == 0) {
    366 			period = tinfo->user.period;
    367 			offset = tinfo->user.offset;
    368 			ppr_options = tinfo->user.ppr_options;
    369 			width = tinfo->user.width;
    370 			tstate->tagenable |=
    371 			    (ahc->user_tagenable & devinfo.target_mask);
    372 			tstate->discenable |=
    373 			    (ahc->user_discenable & devinfo.target_mask);
    374 			ahc->inited_target[target_id] = 1;
    375 			first = 1;
    376 		} else
    377 			first = 0;
    378 
    379 		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
    380 			width = MSG_EXT_WDTR_BUS_16_BIT;
    381 		else
    382 			width = MSG_EXT_WDTR_BUS_8_BIT;
    383 
    384 		ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
    385 		if (width > tinfo->user.width)
    386 			width = tinfo->user.width;
    387 		ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
    388 
    389 		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
    390 			period = 0;
    391 			offset = 0;
    392 			ppr_options = 0;
    393 		}
    394 
    395 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
    396 		    (ppr_options & MSG_EXT_PPR_DT_REQ))
    397 			ppr_options |= MSG_EXT_PPR_DT_REQ;
    398 		else
    399 			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
    400 		if ((tstate->discenable & devinfo.target_mask) == 0 ||
    401 		    (tstate->tagenable & devinfo.target_mask) == 0)
    402 			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
    403 
    404 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
    405 		    (ahc->user_tagenable & devinfo.target_mask))
    406 			tstate->tagenable |= devinfo.target_mask;
    407 		else
    408 			tstate->tagenable &= ~devinfo.target_mask;
    409 
    410 		syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
    411 		    AHC_SYNCRATE_MAX);
    412 		ahc_validate_offset(ahc, NULL, syncrate, &offset,
    413 		    width, ROLE_UNKNOWN);
    414 
    415 		if (offset == 0) {
    416 			period = 0;
    417 			ppr_options = 0;
    418 		}
    419 
    420 		if (ppr_options != 0
    421 		    && tinfo->user.transport_version >= 3) {
    422 			tinfo->goal.transport_version =
    423 			    tinfo->user.transport_version;
    424 			tinfo->curr.transport_version =
    425 			    tinfo->user.transport_version;
    426 		}
    427 
    428 		ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
    429 		    ppr_options, AHC_TRANS_GOAL, FALSE);
    430 
    431 		/*
    432 		 * If this is the first request, and no negotiation is
    433 		 * needed, just confirm the state to the scsipi layer,
    434 		 * so that it can print a message.
    435 		 */
    436 		if (old_autoneg == tstate->auto_negotiate && first) {
    437 			xm->xm_mode = 0;
    438 			xm->xm_period = tinfo->curr.period;
    439 			xm->xm_offset = tinfo->curr.offset;
    440 			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
    441 				xm->xm_mode |= PERIPH_CAP_WIDE16;
    442 			if (tinfo->curr.period)
    443 				xm->xm_mode |= PERIPH_CAP_SYNC;
    444 			if (tstate->tagenable & devinfo.target_mask)
    445 				xm->xm_mode |= PERIPH_CAP_TQING;
    446 			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
    447 				xm->xm_mode |= PERIPH_CAP_DT;
    448 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
    449 		}
    450 		splx(s);
    451 	    }
    452 	}
    453 
    454 	return;
    455 }
    456 
    457 static void
    458 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
    459 {
    460 	struct	scb *scb;
    461 	struct scsipi_xfer *xs;
    462 	struct	ahc_softc *ahc;
    463 	struct	ahc_initiator_tinfo *tinfo;
    464 	struct	ahc_tmode_tstate *tstate;
    465 
    466 	u_int	mask;
    467 	long	s;
    468 
    469 	scb = (struct scb *)arg;
    470 	xs = scb->xs;
    471 	xs->error = 0;
    472 	xs->status = 0;
    473 	xs->xs_status = 0;
    474 	ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
    475 
    476 	if (nsegments != 0) {
    477 		struct	  ahc_dma_seg *sg;
    478 		bus_dma_segment_t *end_seg;
    479 		int op;
    480 
    481 		end_seg = dm_segs + nsegments;
    482 
    483 		/* Copy the segments into our SG list */
    484 		sg = scb->sg_list;
    485 		while (dm_segs < end_seg) {
    486 			uint32_t len;
    487 
    488 			sg->addr = ahc_htole32(dm_segs->ds_addr);
    489 			len = dm_segs->ds_len
    490 			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
    491 			sg->len = ahc_htole32(len);
    492 			sg++;
    493 			dm_segs++;
    494 		}
    495 
    496 		/*
    497 		 * Note where to find the SG entries in bus space.
    498 		 * We also set the full residual flag which the
    499 		 * sequencer will clear as soon as a data transfer
    500 		 * occurs.
    501 		 */
    502 		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
    503 
    504 		if (xs->xs_control & XS_CTL_DATA_IN)
    505 			op = BUS_DMASYNC_PREREAD;
    506 		else
    507 			op = BUS_DMASYNC_PREWRITE;
    508 
    509 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
    510 				scb->dmamap->dm_mapsize, op);
    511 
    512 		sg--;
    513 		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
    514 
    515 		/* Copy the first SG into the "current" data pointer area */
    516 		scb->hscb->dataptr = scb->sg_list->addr;
    517 		scb->hscb->datacnt = scb->sg_list->len;
    518 	} else {
    519 		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
    520 		scb->hscb->dataptr = 0;
    521 		scb->hscb->datacnt = 0;
    522 	}
    523 
    524 	scb->sg_count = nsegments;
    525 
    526 	ahc_lock(ahc, &s);
    527 
    528 	/*
    529 	 * Last time we need to check if this SCB needs to
    530 	 * be aborted.
    531 	 */
    532 	if (xs->xs_status & XS_STS_DONE) {
    533 		if (nsegments != 0)
    534 			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
    535 		ahc_free_scb(ahc, scb);
    536 		ahc_unlock(ahc, &s);
    537 		scsipi_done(xs);
    538 		return;
    539 	}
    540 
    541 	tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
    542 				    SCSIID_OUR_ID(scb->hscb->scsiid),
    543 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
    544 				    &tstate);
    545 
    546 	mask = SCB_GET_TARGET_MASK(ahc, scb);
    547 	scb->hscb->scsirate = tinfo->scsirate;
    548 	scb->hscb->scsioffset = tinfo->curr.offset;
    549 
    550 	if ((tstate->ultraenb & mask) != 0)
    551 		scb->hscb->control |= ULTRAENB;
    552 
    553 	if ((tstate->discenable & mask) != 0)
    554 	    	scb->hscb->control |= DISCENB;
    555 
    556 	if (xs->xs_tag_type)
    557 		scb->hscb->control |= xs->xs_tag_type;
    558 
    559 #if 1	/* This looks like it makes sense at first, but it can loop */
    560 	if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
    561 	     && tinfo->goal.offset == 0
    562 	     && tinfo->goal.ppr_options == 0)) {
    563 		scb->flags |= SCB_NEGOTIATE;
    564 		scb->hscb->control |= MK_MESSAGE;
    565 	} else
    566 #endif
    567 	if ((tstate->auto_negotiate & mask) != 0) {
    568 		scb->flags |= SCB_AUTO_NEGOTIATE;
    569 		scb->hscb->control |= MK_MESSAGE;
    570 	}
    571 
    572 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
    573 
    574 	if (!(xs->xs_control & XS_CTL_POLL)) {
    575 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
    576 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
    577 			      ahc_timeout, scb);
    578 	}
    579 
    580 	/*
    581 	 * We only allow one untagged transaction
    582 	 * per target in the initiator role unless
    583 	 * we are storing a full busy target *lun*
    584 	 * table in SCB space.
    585 	 */
    586 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
    587 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
    588 		struct scb_tailq *untagged_q;
    589 		int target_offset;
    590 
    591 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
    592 		untagged_q = &(ahc->untagged_queues[target_offset]);
    593 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
    594 		scb->flags |= SCB_UNTAGGEDQ;
    595 		if (TAILQ_FIRST(untagged_q) != scb) {
    596 			ahc_unlock(ahc, &s);
    597 			return;
    598 		}
    599 	}
    600 	scb->flags |= SCB_ACTIVE;
    601 
    602 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
    603 		/* Define a mapping from our tag to the SCB. */
    604 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
    605 		ahc_pause(ahc);
    606 		if ((ahc->flags & AHC_PAGESCBS) == 0)
    607 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
    608 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
    609 		ahc_unpause(ahc);
    610 	} else {
    611 		ahc_queue_scb(ahc, scb);
    612 	}
    613 
    614 	if (!(xs->xs_control & XS_CTL_POLL)) {
    615 		ahc_unlock(ahc, &s);
    616 		return;
    617 	}
    618 
    619 	/*
    620 	 * If we can't use interrupts, poll for completion
    621 	 */
    622 	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
    623 	do {
    624 		if (ahc_poll(ahc, xs->timeout)) {
    625 			if (!(xs->xs_control & XS_CTL_SILENT))
    626 				printf("cmd fail\n");
    627 			ahc_timeout(scb);
    628 			break;
    629 		}
    630 	} while (!(xs->xs_status & XS_STS_DONE));
    631 	ahc_unlock(ahc, &s);
    632 
    633 	return;
    634 }
    635 
    636 static int
    637 ahc_poll(struct ahc_softc *ahc, int wait)
    638 {
    639 	while (--wait) {
    640 		DELAY(1000);
    641 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
    642 			break;
    643 	}
    644 
    645 	if (wait == 0) {
    646 		printf("%s: board is not responding\n", ahc_name(ahc));
    647 		return (EIO);
    648 	}
    649 
    650 	ahc_intr((void *)ahc);
    651 	return (0);
    652 }
    653 
    654 static void
    655 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
    656 	       struct scb *scb)
    657 {
    658 	struct hardware_scb *hscb;
    659 
    660 	hscb = scb->hscb;
    661 	xs->resid = xs->status = 0;
    662 
    663 	hscb->cdb_len = xs->cmdlen;
    664 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
    665 		u_long s;
    666 
    667 		ahc_set_transaction_status(scb, CAM_REQ_INVALID);
    668 		ahc_lock(ahc, &s);
    669 		ahc_free_scb(ahc, scb);
    670 		ahc_unlock(ahc, &s);
    671 		scsipi_done(xs);
    672 		return;
    673 	}
    674 
    675 	if (hscb->cdb_len > 12) {
    676 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
    677 		scb->flags |= SCB_CDB32_PTR;
    678 	} else {
    679 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
    680 	}
    681 
    682 	/* Only use S/G if there is a transfer */
    683 	if (xs->datalen) {
    684 		int error;
    685 
    686                 error = bus_dmamap_load(ahc->parent_dmat,
    687 					scb->dmamap, xs->data,
    688 					xs->datalen, NULL,
    689 					((xs->xs_control & XS_CTL_NOSLEEP) ?
    690 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
    691 					BUS_DMA_STREAMING |
    692 					((xs->xs_control & XS_CTL_DATA_IN) ?
    693 					 BUS_DMA_READ : BUS_DMA_WRITE));
    694                 if (error) {
    695 #ifdef AHC_DEBUG
    696                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
    697 			       "= %d\n",
    698 			       ahc_name(ahc), error);
    699 #endif
    700                         xs->error = XS_RESOURCE_SHORTAGE;
    701                         scsipi_done(xs);
    702                         return;
    703                 }
    704                 ahc_execute_scb(scb,
    705 				scb->dmamap->dm_segs,
    706 				scb->dmamap->dm_nsegs);
    707 	} else {
    708 		ahc_execute_scb(scb, NULL, 0);
    709 	}
    710 }
    711 
    712 static void
    713 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
    714 
    715 	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
    716 		struct scb *list_scb;
    717 
    718 		scb->flags |= SCB_RECOVERY_SCB;
    719 
    720 		/*
    721 		 * Take all queued, but not sent SCBs out of the equation.
    722 		 * Also ensure that no new CCBs are queued to us while we
    723 		 * try to fix this problem.
    724 		 */
    725 		scsipi_channel_freeze(&ahc->sc_channel, 1);
    726 		if (ahc->features & AHC_TWIN)
    727 			scsipi_channel_freeze(&ahc->sc_channel_b, 1);
    728 
    729 		/*
    730 		 * Go through all of our pending SCBs and remove
    731 		 * any scheduled timeouts for them.  We will reschedule
    732 		 * them after we've successfully fixed this problem.
    733 		 */
    734 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
    735 			callout_stop(&list_scb->xs->xs_callout);
    736 		}
    737 	}
    738 }
    739 
    740 void
    741 ahc_timeout(void *arg)
    742 {
    743 	struct	scb *scb;
    744 	struct	ahc_softc *ahc;
    745 	long	s;
    746 	int	found;
    747 	u_int	last_phase;
    748 	int	target;
    749 	int	lun;
    750 	int	i;
    751 	char	channel;
    752 
    753 	scb = (struct scb *)arg;
    754 	ahc = (struct ahc_softc *)scb->ahc_softc;
    755 
    756 	ahc_lock(ahc, &s);
    757 
    758 	ahc_pause_and_flushwork(ahc);
    759 
    760 	if ((scb->flags & SCB_ACTIVE) == 0) {
    761 		/* Previous timeout took care of me already */
    762 		printf("%s: Timedout SCB already complete. "
    763 		       "Interrupts may not be functioning.\n", ahc_name(ahc));
    764 		ahc_unpause(ahc);
    765 		ahc_unlock(ahc, &s);
    766 		return;
    767 	}
    768 
    769 	target = SCB_GET_TARGET(ahc, scb);
    770 	channel = SCB_GET_CHANNEL(ahc, scb);
    771 	lun = SCB_GET_LUN(scb);
    772 
    773 	ahc_print_path(ahc, scb);
    774 	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
    775 	ahc_dump_card_state(ahc);
    776 	last_phase = ahc_inb(ahc, LASTPHASE);
    777 	if (scb->sg_count > 0) {
    778 		for (i = 0; i < scb->sg_count; i++) {
    779 			printf("sg[%d] - Addr 0x%x : Length %d\n",
    780 			       i,
    781 			       scb->sg_list[i].addr,
    782 			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
    783 		}
    784 	}
    785 	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
    786 		/*
    787 		 * Been down this road before.
    788 		 * Do a full bus reset.
    789 		 */
    790 bus_reset:
    791 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
    792 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
    793 		printf("%s: Issued Channel %c Bus Reset. "
    794 		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
    795 	} else {
    796 		/*
    797 		 * If we are a target, transition to bus free and report
    798 		 * the timeout.
    799 		 *
    800 		 * The target/initiator that is holding up the bus may not
    801 		 * be the same as the one that triggered this timeout
    802 		 * (different commands have different timeout lengths).
    803 		 * If the bus is idle and we are actiing as the initiator
    804 		 * for this request, queue a BDR message to the timed out
    805 		 * target.  Otherwise, if the timed out transaction is
    806 		 * active:
    807 		 *   Initiator transaction:
    808 		 *	Stuff the message buffer with a BDR message and assert
    809 		 *	ATN in the hopes that the target will let go of the bus
    810 		 *	and go to the mesgout phase.  If this fails, we'll
    811 		 *	get another timeout 2 seconds later which will attempt
    812 		 *	a bus reset.
    813 		 *
    814 		 *   Target transaction:
    815 		 *	Transition to BUS FREE and report the error.
    816 		 *	It's good to be the target!
    817 		 */
    818 		u_int active_scb_index;
    819 		u_int saved_scbptr;
    820 
    821 		saved_scbptr = ahc_inb(ahc, SCBPTR);
    822 		active_scb_index = ahc_inb(ahc, SCB_TAG);
    823 
    824 		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
    825 		  && (active_scb_index < ahc->scb_data->numscbs)) {
    826 			struct scb *active_scb;
    827 
    828 			/*
    829 			 * If the active SCB is not us, assume that
    830 			 * the active SCB has a longer timeout than
    831 			 * the timedout SCB, and wait for the active
    832 			 * SCB to timeout.
    833 			 */
    834 			active_scb = ahc_lookup_scb(ahc, active_scb_index);
    835 			if (active_scb != scb) {
    836 				uint64_t newtimeout;
    837 
    838 				ahc_print_path(ahc, scb);
    839 				printf("Other SCB Timeout%s",
    840 			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
    841 				       ? " again\n" : "\n");
    842 				scb->flags |= SCB_OTHERTCL_TIMEOUT;
    843 				newtimeout = MAX(active_scb->xs->timeout,
    844 						 scb->xs->timeout);
    845 				callout_reset(&scb->xs->xs_callout,
    846 				    newtimeout > 1000000 ?
    847 				    (newtimeout / 1000) * hz :
    848 				    (newtimeout * hz) / 1000,
    849 				    ahc_timeout, scb);
    850 				ahc_unpause(ahc);
    851 				ahc_unlock(ahc, &s);
    852 				return;
    853 			}
    854 
    855 			/* It's us */
    856 			if ((scb->flags & SCB_TARGET_SCB) != 0) {
    857 
    858 				/*
    859 				 * Send back any queued up transactions
    860 				 * and properly record the error condition.
    861 				 */
    862 				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
    863 					       SCB_GET_CHANNEL(ahc, scb),
    864 					       SCB_GET_LUN(scb),
    865 					       scb->hscb->tag,
    866 					       ROLE_TARGET,
    867 					       CAM_CMD_TIMEOUT);
    868 
    869 				/* Will clear us from the bus */
    870 				ahc_restart(ahc);
    871 				ahc_unlock(ahc, &s);
    872 				return;
    873 			}
    874 
    875 			ahc_set_recoveryscb(ahc, active_scb);
    876 			ahc_outb(ahc, MSG_OUT, HOST_MSG);
    877 			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
    878 			ahc_print_path(ahc, active_scb);
    879 			printf("BDR message in message buffer\n");
    880 			active_scb->flags |= SCB_DEVICE_RESET;
    881 			callout_reset(&active_scb->xs->xs_callout,
    882 				      2 * hz, ahc_timeout, active_scb);
    883 			ahc_unpause(ahc);
    884 		} else {
    885 			int	 disconnected;
    886 
    887 			/* XXX Shouldn't panic.  Just punt instead? */
    888 			if ((scb->flags & SCB_TARGET_SCB) != 0)
    889 				panic("Timed-out target SCB but bus idle");
    890 
    891 			if (last_phase != P_BUSFREE
    892 			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
    893 				/* XXX What happened to the SCB? */
    894 				/* Hung target selection.  Goto busfree */
    895 				printf("%s: Hung target selection\n",
    896 				       ahc_name(ahc));
    897 				ahc_restart(ahc);
    898 				ahc_unlock(ahc, &s);
    899 				return;
    900 			}
    901 
    902 			if (ahc_search_qinfifo(ahc, target, channel, lun,
    903 					       scb->hscb->tag, ROLE_INITIATOR,
    904 					       /*status*/0, SEARCH_COUNT) > 0) {
    905 				disconnected = FALSE;
    906 			} else {
    907 				disconnected = TRUE;
    908 			}
    909 
    910 			if (disconnected) {
    911 
    912 				ahc_set_recoveryscb(ahc, scb);
    913 				/*
    914 				 * Actually re-queue this SCB in an attempt
    915 				 * to select the device before it reconnects.
    916 				 * In either case (selection or reselection),
    917 				 * we will now issue a target reset to the
    918 				 * timed-out device.
    919 				 *
    920 				 * Set the MK_MESSAGE control bit indicating
    921 				 * that we desire to send a message.  We
    922 				 * also set the disconnected flag since
    923 				 * in the paging case there is no guarantee
    924 				 * that our SCB control byte matches the
    925 				 * version on the card.  We don't want the
    926 				 * sequencer to abort the command thinking
    927 				 * an unsolicited reselection occurred.
    928 				 */
    929 				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
    930 				scb->flags |= SCB_DEVICE_RESET;
    931 
    932 				/*
    933 				 * Remove any cached copy of this SCB in the
    934 				 * disconnected list in preparation for the
    935 				 * queuing of our abort SCB.  We use the
    936 				 * same element in the SCB, SCB_NEXT, for
    937 				 * both the qinfifo and the disconnected list.
    938 				 */
    939 				ahc_search_disc_list(ahc, target, channel,
    940 						     lun, scb->hscb->tag,
    941 						     /*stop_on_first*/TRUE,
    942 						     /*remove*/TRUE,
    943 						     /*save_state*/FALSE);
    944 
    945 				/*
    946 				 * In the non-paging case, the sequencer will
    947 				 * never re-reference the in-core SCB.
    948 				 * To make sure we are notified during
    949 				 * reslection, set the MK_MESSAGE flag in
    950 				 * the card's copy of the SCB.
    951 				 */
    952 				if ((ahc->flags & AHC_PAGESCBS) == 0) {
    953 					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
    954 					ahc_outb(ahc, SCB_CONTROL,
    955 						 ahc_inb(ahc, SCB_CONTROL)
    956 						| MK_MESSAGE);
    957 				}
    958 
    959 				/*
    960 				 * Clear out any entries in the QINFIFO first
    961 				 * so we are the next SCB for this target
    962 				 * to run.
    963 				 */
    964 				ahc_search_qinfifo(ahc,
    965 						   SCB_GET_TARGET(ahc, scb),
    966 						   channel, SCB_GET_LUN(scb),
    967 						   SCB_LIST_NULL,
    968 						   ROLE_INITIATOR,
    969 						   CAM_REQUEUE_REQ,
    970 						   SEARCH_COMPLETE);
    971 				ahc_print_path(ahc, scb);
    972 				printf("Queuing a BDR SCB\n");
    973 				ahc_qinfifo_requeue_tail(ahc, scb);
    974 				ahc_outb(ahc, SCBPTR, saved_scbptr);
    975 				callout_reset(&scb->xs->xs_callout, 2 * hz,
    976 					      ahc_timeout, scb);
    977 				ahc_unpause(ahc);
    978 			} else {
    979 				/* Go "immediatly" to the bus reset */
    980 				/* This shouldn't happen */
    981 				ahc_set_recoveryscb(ahc, scb);
    982 				ahc_print_path(ahc, scb);
    983 				printf("SCB %d: Immediate reset.  "
    984 					"Flags = 0x%x\n", scb->hscb->tag,
    985 					scb->flags);
    986 				goto bus_reset;
    987 			}
    988 		}
    989 	}
    990 	ahc_unlock(ahc, &s);
    991 }
    992 
    993 void
    994 ahc_platform_set_tags(struct ahc_softc *ahc,
    995 		      struct ahc_devinfo *devinfo, int enable)
    996 {
    997         struct ahc_tmode_tstate *tstate;
    998 
    999         ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
   1000                             devinfo->target, &tstate);
   1001 
   1002         if (enable)
   1003                 tstate->tagenable |= devinfo->target_mask;
   1004 	else
   1005 	  	tstate->tagenable &= ~devinfo->target_mask;
   1006 }
   1007 
   1008 int
   1009 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
   1010 {
   1011 	if (sizeof(struct ahc_platform_data) == 0)
   1012 		return 0;
   1013 	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
   1014 				    M_NOWAIT);
   1015 	if (ahc->platform_data == NULL)
   1016 		return (ENOMEM);
   1017 	return (0);
   1018 }
   1019 
   1020 void
   1021 ahc_platform_free(struct ahc_softc *ahc)
   1022 {
   1023 	if (sizeof(struct ahc_platform_data) == 0)
   1024 		return;
   1025 	free(ahc->platform_data, M_DEVBUF);
   1026 }
   1027 
   1028 int
   1029 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
   1030 {
   1031 	return (0);
   1032 }
   1033 
   1034 int
   1035 ahc_detach(struct device *self, int flags)
   1036 {
   1037 	int rv = 0;
   1038 
   1039 	struct ahc_softc *ahc = (struct ahc_softc*)self;
   1040 
   1041 	ahc_intr_enable(ahc, FALSE);
   1042 	if (ahc->sc_child != NULL)
   1043 		rv = config_detach(ahc->sc_child, flags);
   1044 	if (rv == 0 && ahc->sc_child_b != NULL)
   1045 		rv = config_detach(ahc->sc_child_b, flags);
   1046 
   1047 	shutdownhook_disestablish(ahc->shutdown_hook);
   1048 
   1049 	ahc_free(ahc);
   1050 
   1051 	return (rv);
   1052 }
   1053 
   1054 
   1055 void
   1056 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
   1057 	       ac_code code, void *opt_arg)
   1058 {
   1059 	struct ahc_tmode_tstate *tstate;
   1060 	struct ahc_initiator_tinfo *tinfo;
   1061 	struct ahc_devinfo devinfo;
   1062 	struct scsipi_channel *chan;
   1063 	struct scsipi_xfer_mode xm;
   1064 
   1065 	chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
   1066 	switch (code) {
   1067 	case AC_TRANSFER_NEG:
   1068 		tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
   1069 			    &tstate);
   1070 		ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
   1071 		    channel, ROLE_UNKNOWN);
   1072 		/*
   1073 		 * Don't bother if negotiating. XXX?
   1074 		 */
   1075 		if (tinfo->curr.period != tinfo->goal.period
   1076 		    || tinfo->curr.width != tinfo->goal.width
   1077 		    || tinfo->curr.offset != tinfo->goal.offset
   1078 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
   1079 			break;
   1080 		xm.xm_target = target;
   1081 		xm.xm_mode = 0;
   1082 		xm.xm_period = tinfo->curr.period;
   1083 		xm.xm_offset = tinfo->curr.offset;
   1084 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
   1085 			xm.xm_mode |= PERIPH_CAP_WIDE16;
   1086 		if (tinfo->curr.period)
   1087 			xm.xm_mode |= PERIPH_CAP_SYNC;
   1088 		if (tstate->tagenable & devinfo.target_mask)
   1089 			xm.xm_mode |= PERIPH_CAP_TQING;
   1090 		if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
   1091 			xm.xm_mode |= PERIPH_CAP_DT;
   1092 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
   1093 		break;
   1094 	case AC_BUS_RESET:
   1095 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
   1096 	case AC_SENT_BDR:
   1097 	default:
   1098 		break;
   1099 	}
   1100 }
   1101