Home | History | Annotate | Line # | Download | only in ic
aic7xxx_osm.c revision 1.9
      1 /*	$NetBSD: aic7xxx_osm.c,v 1.9 2003/06/19 20:11:14 bouyer Exp $	*/
      2 
      3 /*
      4  * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
      5  *
      6  * Copyright (c) 1994-2001 Justin T. Gibbs.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions, and the following disclaimer,
     14  *    without modification.
     15  * 2. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * Alternatively, this software may be distributed under the terms of the
     19  * GNU Public License ("GPL").
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
     34  *
     35  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
     36  */
     37 /*
     38  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
     39  */
     40 #include <dev/ic/aic7xxx_osm.h>
     41 #include <dev/ic/aic7xxx_inline.h>
     42 
     43 #ifndef AHC_TMODE_ENABLE
     44 #define AHC_TMODE_ENABLE 0
     45 #endif
     46 
     47 
     48 static void	ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
     49 static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
     50 static int	ahc_poll(struct ahc_softc *ahc, int wait);
     51 static void	ahc_setup_data(struct ahc_softc *ahc,
     52 			       struct scsipi_xfer *xs, struct scb *scb);
     53 static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
     54 static int	ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
     55 			  struct proc *p);
     56 
     57 
     58 
     59 /*
     60  * Attach all the sub-devices we can find
     61  */
     62 int
     63 ahc_attach(struct ahc_softc *ahc)
     64 {
     65 	u_long 	s;
     66 	int i;
     67 	char ahc_info[256];
     68 
     69 	LIST_INIT(&ahc->pending_scbs);
     70 	for (i = 0; i < AHC_NUM_TARGETS; i++)
     71 		TAILQ_INIT(&ahc->untagged_queues[i]);
     72 
     73         ahc_lock(ahc, &s);
     74 
     75 	ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
     76 	ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
     77 
     78 	ahc->sc_adapter.adapt_openings = AHC_MAX_QUEUE;
     79 	ahc->sc_adapter.adapt_max_periph = 16;
     80 
     81 	ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
     82 	ahc->sc_adapter.adapt_minphys = ahc_minphys;
     83 	ahc->sc_adapter.adapt_request = ahc_action;
     84 
     85 	ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
     86         ahc->sc_channel.chan_bustype = &scsi_bustype;
     87         ahc->sc_channel.chan_channel = 0;
     88         ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
     89         ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
     90         ahc->sc_channel.chan_id = ahc->our_id;
     91 
     92 	if (ahc->features & AHC_TWIN) {
     93 		ahc->sc_channel_b = ahc->sc_channel;
     94 		ahc->sc_channel_b.chan_id = ahc->our_id_b;
     95 		ahc->sc_channel_b.chan_channel = 1;
     96 	}
     97 
     98 	ahc_controller_info(ahc, ahc_info);
     99 	printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
    100 
    101 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
    102 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
    103 		    &ahc->sc_channel, scsiprint);
    104 		if (ahc->features & AHC_TWIN)
    105 			ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
    106 			    &ahc->sc_channel_b, scsiprint);
    107 	} else {
    108 		if (ahc->features & AHC_TWIN)
    109 			ahc->sc_child = config_found((void *)&ahc->sc_dev,
    110 			    &ahc->sc_channel_b, scsiprint);
    111 		ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
    112 		    &ahc->sc_channel, scsiprint);
    113 	}
    114 
    115 	ahc_intr_enable(ahc, TRUE);
    116 
    117 	if (ahc->flags & AHC_RESET_BUS_A)
    118 		ahc_reset_channel(ahc, 'A', TRUE);
    119 	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
    120 		ahc_reset_channel(ahc, 'B', TRUE);
    121 
    122 	ahc_unlock(ahc, &s);
    123 	return (1);
    124 }
    125 
    126 /*
    127  * Catch an interrupt from the adapter
    128  */
    129 void
    130 ahc_platform_intr(void *arg)
    131 {
    132 	struct	ahc_softc *ahc;
    133 
    134 	ahc = (struct ahc_softc *)arg;
    135 	ahc_intr(ahc);
    136 }
    137 
    138 /*
    139  * We have an scb which has been processed by the
    140  * adaptor, now we look to see how the operation
    141  * went.
    142  */
    143 void
    144 ahc_done(struct ahc_softc *ahc, struct scb *scb)
    145 {
    146 	struct scsipi_xfer *xs;
    147   	struct scsipi_periph *periph;
    148 	u_long s;
    149 
    150 	xs = scb->xs;
    151 	periph = xs->xs_periph;
    152 	LIST_REMOVE(scb, pending_links);
    153 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
    154 		struct scb_tailq *untagged_q;
    155 		int target_offset;
    156 
    157 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
    158 		untagged_q = &ahc->untagged_queues[target_offset];
    159 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
    160 		scb->flags &= ~SCB_UNTAGGEDQ;
    161 		ahc_run_untagged_queue(ahc, untagged_q);
    162 	}
    163 
    164 	callout_stop(&scb->xs->xs_callout);
    165 
    166 	if (xs->datalen) {
    167 		int op;
    168 
    169 		if (xs->xs_control & XS_CTL_DATA_IN)
    170 			op = BUS_DMASYNC_POSTREAD;
    171 		else
    172 			op = BUS_DMASYNC_POSTWRITE;
    173 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
    174 				scb->dmamap->dm_mapsize, op);
    175 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
    176 	}
    177 
    178 	/*
    179 	 * If the recovery SCB completes, we have to be
    180 	 * out of our timeout.
    181 	 */
    182 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
    183 		struct	scb *list_scb;
    184 
    185 		/*
    186 		 * We were able to complete the command successfully,
    187 		 * so reinstate the timeouts for all other pending
    188 		 * commands.
    189 		 */
    190 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
    191 			struct scsipi_xfer *xs = list_scb->xs;
    192 
    193 			if (!(xs->xs_control & XS_CTL_POLL)) {
    194 				callout_reset(&list_scb->xs->xs_callout,
    195 				    (list_scb->xs->timeout > 1000000) ?
    196 				    (list_scb->xs->timeout / 1000) * hz :
    197 				    (list_scb->xs->timeout * hz) / 1000,
    198 				    ahc_timeout, list_scb);
    199 			}
    200 		}
    201 
    202 		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
    203 		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
    204 			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
    205 		scsipi_printaddr(xs->xs_periph);
    206 		printf("%s: no longer in timeout, status = %x\n",
    207 		       ahc_name(ahc), xs->status);
    208 	}
    209 
    210 	/* Don't clobber any existing error state */
    211 	if (xs->error != XS_NOERROR) {
    212 	  /* Don't clobber any existing error state */
    213 	} else if ((scb->flags & SCB_SENSE) != 0) {
    214 		/*
    215 		 * We performed autosense retrieval.
    216 		 *
    217 		 * Zero any sense not transferred by the
    218 		 * device.  The SCSI spec mandates that any
    219 		 * untransfered data should be assumed to be
    220 		 * zero.  Complete the 'bounce' of sense information
    221 		 * through buffers accessible via bus-space by
    222 		 * copying it into the clients csio.
    223 		 */
    224 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
    225 		memcpy(&xs->sense.scsi_sense,
    226 		       ahc_get_sense_buf(ahc, scb),
    227 		       sizeof(xs->sense.scsi_sense));
    228 		xs->error = XS_SENSE;
    229 	}
    230 	if (scb->flags & SCB_FREEZE_QUEUE) {
    231 		scsipi_periph_thaw(periph, 1);
    232 		scb->flags &= ~SCB_FREEZE_QUEUE;
    233 	}
    234 
    235         ahc_lock(ahc, &s);
    236 	ahc_free_scb(ahc, scb);
    237         ahc_unlock(ahc, &s);
    238 
    239 	scsipi_done(xs);
    240 }
    241 
    242 static int
    243 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
    244 	  struct proc *p)
    245 {
    246 	struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
    247 	int s, ret = ENOTTY;
    248 
    249 	switch (cmd) {
    250 	case SCBUSIORESET:
    251 		s = splbio();
    252 		ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
    253 		    TRUE);
    254 		splx(s);
    255 		ret = 0;
    256 		break;
    257 	default:
    258 		break;
    259 	}
    260 
    261 	return ret;
    262 }
    263 
    264 static void
    265 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    266 {
    267 	struct ahc_softc *ahc;
    268 	int s;
    269 	struct ahc_initiator_tinfo *tinfo;
    270 	struct ahc_tmode_tstate *tstate;
    271 
    272 	ahc  = (void *)chan->chan_adapter->adapt_dev;
    273 
    274 	switch (req) {
    275 
    276 	case ADAPTER_REQ_RUN_XFER:
    277 	  {
    278 		struct scsipi_xfer *xs;
    279 		struct scsipi_periph *periph;
    280 	        struct scb *scb;
    281         	struct hardware_scb *hscb;
    282 		u_int target_id;
    283 		u_int our_id;
    284 		u_long s;
    285 
    286 		xs = arg;
    287 		periph = xs->xs_periph;
    288 
    289 		target_id = periph->periph_target;
    290                 our_id = ahc->our_id;
    291 
    292 		SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
    293 
    294 		/*
    295 		 * get an scb to use.
    296 		 */
    297 		ahc_lock(ahc, &s);
    298 		if ((scb = ahc_get_scb(ahc)) == NULL) {
    299 			xs->error = XS_RESOURCE_SHORTAGE;
    300 			ahc_unlock(ahc, &s);
    301 			scsipi_done(xs);
    302 			return;
    303 		}
    304 		ahc_unlock(ahc, &s);
    305 
    306 		hscb = scb->hscb;
    307 
    308 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
    309 		scb->xs = xs;
    310 
    311 		/*
    312 		 * Put all the arguments for the xfer in the scb
    313 		 */
    314 		hscb->control = 0;
    315 		hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
    316 		hscb->lun = periph->periph_lun;
    317 		if (xs->xs_control & XS_CTL_RESET) {
    318 			hscb->cdb_len = 0;
    319 			scb->flags |= SCB_DEVICE_RESET;
    320 			hscb->control |= MK_MESSAGE;
    321 			ahc_execute_scb(scb, NULL, 0);
    322 		}
    323 
    324 		ahc_setup_data(ahc, xs, scb);
    325 
    326 		break;
    327 	  }
    328 	case ADAPTER_REQ_GROW_RESOURCES:
    329   		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
    330 		return;
    331 
    332 	case ADAPTER_REQ_SET_XFER_MODE:
    333 	    {
    334 		struct scsipi_xfer_mode *xm = arg;
    335 		struct ahc_devinfo devinfo;
    336 		int target_id, our_id, first;
    337 		u_int width;
    338 		char channel;
    339 
    340 		target_id = xm->xm_target;
    341 		our_id = chan->chan_id;
    342 		channel = (chan->chan_channel == 1) ? 'B' : 'A';
    343 		s = splbio();
    344 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
    345 		    &tstate);
    346 		ahc_compile_devinfo(&devinfo, our_id, target_id,
    347 		    0, channel, ROLE_INITIATOR);
    348 
    349 		/*
    350 		 * XXX since the period and offset are not provided here,
    351 		 * fake things by forcing a renegotiation using the user
    352 		 * settings if this is called for the first time (i.e.
    353 		 * during probe). Also, cap various values at the user
    354 		 * values, assuming that the user set it up that way.
    355 		 */
    356 		if (ahc->inited_target[target_id] == 0) {
    357 			tinfo->goal = tinfo->user;
    358 			tstate->tagenable |=
    359 			    (ahc->user_tagenable & devinfo.target_mask);
    360 			tstate->discenable |=
    361 			    (ahc->user_discenable & devinfo.target_mask);
    362 			ahc->inited_target[target_id] = 1;
    363 			first = 1;
    364 		} else
    365 			first = 0;
    366 
    367 		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
    368 			width = MSG_EXT_WDTR_BUS_16_BIT;
    369 		else
    370 			width = MSG_EXT_WDTR_BUS_8_BIT;
    371 
    372 		ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
    373 		if (width > tinfo->user.width)
    374 			width = tinfo->user.width;
    375 		tinfo->goal.width = width;
    376 
    377 		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
    378 			tinfo->goal.period = 0;
    379 			tinfo->goal.offset = 0;
    380 			tinfo->goal.ppr_options = 0;
    381 		}
    382 
    383 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
    384 		    (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
    385 			tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
    386 		else
    387 			tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
    388 
    389 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
    390 		    (ahc->user_tagenable & devinfo.target_mask))
    391 			tstate->tagenable |= devinfo.target_mask;
    392 		else
    393 			tstate->tagenable &= ~devinfo.target_mask;
    394 
    395 		/*
    396 		 * If this is the first request, and no negotiation is
    397 		 * needed, just confirm the state to the scsipi layer,
    398 		 * so that it can print a message.
    399 		 */
    400 		if (!ahc_update_neg_request(ahc, &devinfo, tstate,
    401 		    tinfo, AHC_NEG_IF_NON_ASYNC) && first) {
    402 			xm->xm_mode = 0;
    403 			xm->xm_period = tinfo->curr.period;
    404 			xm->xm_offset = tinfo->curr.offset;
    405 			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
    406 				xm->xm_mode |= PERIPH_CAP_WIDE16;
    407 			if (tinfo->curr.period)
    408 				xm->xm_mode |= PERIPH_CAP_SYNC;
    409 			if (tstate->tagenable & devinfo.target_mask)
    410 				xm->xm_mode |= PERIPH_CAP_TQING;
    411 			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
    412 				xm->xm_mode |= PERIPH_CAP_DT;
    413 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
    414 		}
    415 		splx(s);
    416 	    }
    417 	}
    418 
    419 	return;
    420 }
    421 
    422 static void
    423 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
    424 {
    425 	struct	scb *scb;
    426 	struct scsipi_xfer *xs;
    427 	struct	ahc_softc *ahc;
    428 	struct	ahc_initiator_tinfo *tinfo;
    429 	struct	ahc_tmode_tstate *tstate;
    430 
    431 	u_int	mask;
    432 	long	s;
    433 
    434 	scb = (struct scb *)arg;
    435 	xs = scb->xs;
    436 	xs->error = 0;
    437 	xs->status = 0;
    438 	xs->xs_status = 0;
    439 	ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
    440 
    441 	if (nsegments != 0) {
    442 		struct	  ahc_dma_seg *sg;
    443 		bus_dma_segment_t *end_seg;
    444 		int op;
    445 
    446 		end_seg = dm_segs + nsegments;
    447 
    448 		/* Copy the segments into our SG list */
    449 		sg = scb->sg_list;
    450 		while (dm_segs < end_seg) {
    451 			uint32_t len;
    452 
    453 			sg->addr = ahc_htole32(dm_segs->ds_addr);
    454 			len = dm_segs->ds_len
    455 			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
    456 			sg->len = ahc_htole32(len);
    457 			sg++;
    458 			dm_segs++;
    459 		}
    460 
    461 		/*
    462 		 * Note where to find the SG entries in bus space.
    463 		 * We also set the full residual flag which the
    464 		 * sequencer will clear as soon as a data transfer
    465 		 * occurs.
    466 		 */
    467 		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
    468 
    469 		if (xs->xs_control & XS_CTL_DATA_IN)
    470 			op = BUS_DMASYNC_PREREAD;
    471 		else
    472 			op = BUS_DMASYNC_PREWRITE;
    473 
    474 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
    475 				scb->dmamap->dm_mapsize, op);
    476 
    477 		sg--;
    478 		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
    479 
    480 		/* Copy the first SG into the "current" data pointer area */
    481 		scb->hscb->dataptr = scb->sg_list->addr;
    482 		scb->hscb->datacnt = scb->sg_list->len;
    483 	} else {
    484 		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
    485 		scb->hscb->dataptr = 0;
    486 		scb->hscb->datacnt = 0;
    487 	}
    488 
    489 	scb->sg_count = nsegments;
    490 
    491 	ahc_lock(ahc, &s);
    492 
    493 	/*
    494 	 * Last time we need to check if this SCB needs to
    495 	 * be aborted.
    496 	 */
    497 	if (xs->xs_status & XS_STS_DONE) {
    498 		if (nsegments != 0)
    499 			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
    500 		ahc_free_scb(ahc, scb);
    501 		ahc_unlock(ahc, &s);
    502 		scsipi_done(xs);
    503 		return;
    504 	}
    505 
    506 	tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
    507 				    SCSIID_OUR_ID(scb->hscb->scsiid),
    508 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
    509 				    &tstate);
    510 
    511 	mask = SCB_GET_TARGET_MASK(ahc, scb);
    512 	scb->hscb->scsirate = tinfo->scsirate;
    513 	scb->hscb->scsioffset = tinfo->curr.offset;
    514 
    515 	if ((tstate->ultraenb & mask) != 0)
    516 		scb->hscb->control |= ULTRAENB;
    517 
    518 	if ((tstate->discenable & mask) != 0)
    519 	    	scb->hscb->control |= DISCENB;
    520 
    521 	if (xs->xs_tag_type)
    522 		scb->hscb->control |= xs->xs_tag_type;
    523 
    524 	if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
    525 	     && tinfo->goal.offset == 0
    526 	     && tinfo->goal.ppr_options == 0)) {
    527 		scb->flags |= SCB_NEGOTIATE;
    528 		scb->hscb->control |= MK_MESSAGE;
    529 	} else if ((tstate->auto_negotiate & mask) != 0) {
    530 		scb->flags |= SCB_AUTO_NEGOTIATE;
    531 		scb->hscb->control |= MK_MESSAGE;
    532 	}
    533 
    534 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
    535 
    536 	if (!(xs->xs_control & XS_CTL_POLL)) {
    537 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
    538 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
    539 			      ahc_timeout, scb);
    540 	}
    541 
    542 	/*
    543 	 * We only allow one untagged transaction
    544 	 * per target in the initiator role unless
    545 	 * we are storing a full busy target *lun*
    546 	 * table in SCB space.
    547 	 */
    548 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
    549 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
    550 		struct scb_tailq *untagged_q;
    551 		int target_offset;
    552 
    553 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
    554 		untagged_q = &(ahc->untagged_queues[target_offset]);
    555 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
    556 		scb->flags |= SCB_UNTAGGEDQ;
    557 		if (TAILQ_FIRST(untagged_q) != scb) {
    558 			ahc_unlock(ahc, &s);
    559 			return;
    560 		}
    561 	}
    562 	scb->flags |= SCB_ACTIVE;
    563 
    564 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
    565 		/* Define a mapping from our tag to the SCB. */
    566 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
    567 		ahc_pause(ahc);
    568 		if ((ahc->flags & AHC_PAGESCBS) == 0)
    569 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
    570 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
    571 		ahc_unpause(ahc);
    572 	} else {
    573 		ahc_queue_scb(ahc, scb);
    574 	}
    575 
    576 	if (!(xs->xs_control & XS_CTL_POLL)) {
    577 		ahc_unlock(ahc, &s);
    578 		return;
    579 	}
    580 
    581 	/*
    582 	 * If we can't use interrupts, poll for completion
    583 	 */
    584 	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
    585 	do {
    586 		if (ahc_poll(ahc, xs->timeout)) {
    587 			if (!(xs->xs_control & XS_CTL_SILENT))
    588 				printf("cmd fail\n");
    589 			ahc_timeout(scb);
    590 			break;
    591 		}
    592 	} while (!(xs->xs_status & XS_STS_DONE));
    593 	ahc_unlock(ahc, &s);
    594 
    595 	return;
    596 }
    597 
    598 static int
    599 ahc_poll(struct ahc_softc *ahc, int wait)
    600 {
    601 	while (--wait) {
    602 		DELAY(1000);
    603 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
    604 			break;
    605 	}
    606 
    607 	if (wait == 0) {
    608 		printf("%s: board is not responding\n", ahc_name(ahc));
    609 		return (EIO);
    610 	}
    611 
    612 	ahc_intr((void *)ahc);
    613 	return (0);
    614 }
    615 
    616 static void
    617 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
    618 	       struct scb *scb)
    619 {
    620 	struct hardware_scb *hscb;
    621 
    622 	hscb = scb->hscb;
    623 	xs->resid = xs->status = 0;
    624 
    625 	hscb->cdb_len = xs->cmdlen;
    626 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
    627 		u_long s;
    628 
    629 		ahc_set_transaction_status(scb, CAM_REQ_INVALID);
    630 		ahc_lock(ahc, &s);
    631 		ahc_free_scb(ahc, scb);
    632 		ahc_unlock(ahc, &s);
    633 		scsipi_done(xs);
    634 		return;
    635 	}
    636 
    637 	if (hscb->cdb_len > 12) {
    638 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
    639 		scb->flags |= SCB_CDB32_PTR;
    640 	} else {
    641 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
    642 	}
    643 
    644 	/* Only use S/G if there is a transfer */
    645 	if (xs->datalen) {
    646 		int error;
    647 
    648                 error = bus_dmamap_load(ahc->parent_dmat,
    649 					scb->dmamap, xs->data,
    650 					xs->datalen, NULL,
    651 					((xs->xs_control & XS_CTL_NOSLEEP) ?
    652 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
    653 					BUS_DMA_STREAMING |
    654 					((xs->xs_control & XS_CTL_DATA_IN) ?
    655 					 BUS_DMA_READ : BUS_DMA_WRITE));
    656                 if (error) {
    657 #ifdef AHC_DEBUG
    658                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
    659 			       "= %d\n",
    660 			       ahc_name(ahc), error);
    661 #endif
    662                         xs->error = XS_RESOURCE_SHORTAGE;
    663                         scsipi_done(xs);
    664                         return;
    665                 }
    666                 ahc_execute_scb(scb,
    667 				scb->dmamap->dm_segs,
    668 				scb->dmamap->dm_nsegs);
    669 	} else {
    670 		ahc_execute_scb(scb, NULL, 0);
    671 	}
    672 }
    673 
    674 static void
    675 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
    676 
    677 	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
    678 		struct scb *list_scb;
    679 
    680 		scb->flags |= SCB_RECOVERY_SCB;
    681 
    682 		/*
    683 		 * Take all queued, but not sent SCBs out of the equation.
    684 		 * Also ensure that no new CCBs are queued to us while we
    685 		 * try to fix this problem.
    686 		 */
    687 		scsipi_channel_freeze(&ahc->sc_channel, 1);
    688 		if (ahc->features & AHC_TWIN)
    689 			scsipi_channel_freeze(&ahc->sc_channel_b, 1);
    690 
    691 		/*
    692 		 * Go through all of our pending SCBs and remove
    693 		 * any scheduled timeouts for them.  We will reschedule
    694 		 * them after we've successfully fixed this problem.
    695 		 */
    696 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
    697 			callout_stop(&list_scb->xs->xs_callout);
    698 		}
    699 	}
    700 }
    701 
    702 void
    703 ahc_timeout(void *arg)
    704 {
    705 	struct	scb *scb;
    706 	struct	ahc_softc *ahc;
    707 	long	s;
    708 	int	found;
    709 	u_int	last_phase;
    710 	int	target;
    711 	int	lun;
    712 	int	i;
    713 	char	channel;
    714 
    715 	scb = (struct scb *)arg;
    716 	ahc = (struct ahc_softc *)scb->ahc_softc;
    717 
    718 	ahc_lock(ahc, &s);
    719 
    720 	ahc_pause_and_flushwork(ahc);
    721 
    722 	if ((scb->flags & SCB_ACTIVE) == 0) {
    723 		/* Previous timeout took care of me already */
    724 		printf("%s: Timedout SCB already complete. "
    725 		       "Interrupts may not be functioning.\n", ahc_name(ahc));
    726 		ahc_unpause(ahc);
    727 		ahc_unlock(ahc, &s);
    728 		return;
    729 	}
    730 
    731 	target = SCB_GET_TARGET(ahc, scb);
    732 	channel = SCB_GET_CHANNEL(ahc, scb);
    733 	lun = SCB_GET_LUN(scb);
    734 
    735 	ahc_print_path(ahc, scb);
    736 	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
    737 	ahc_dump_card_state(ahc);
    738 	last_phase = ahc_inb(ahc, LASTPHASE);
    739 	if (scb->sg_count > 0) {
    740 		for (i = 0; i < scb->sg_count; i++) {
    741 			printf("sg[%d] - Addr 0x%x : Length %d\n",
    742 			       i,
    743 			       scb->sg_list[i].addr,
    744 			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
    745 		}
    746 	}
    747 	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
    748 		/*
    749 		 * Been down this road before.
    750 		 * Do a full bus reset.
    751 		 */
    752 bus_reset:
    753 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
    754 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
    755 		printf("%s: Issued Channel %c Bus Reset. "
    756 		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
    757 	} else {
    758 		/*
    759 		 * If we are a target, transition to bus free and report
    760 		 * the timeout.
    761 		 *
    762 		 * The target/initiator that is holding up the bus may not
    763 		 * be the same as the one that triggered this timeout
    764 		 * (different commands have different timeout lengths).
    765 		 * If the bus is idle and we are actiing as the initiator
    766 		 * for this request, queue a BDR message to the timed out
    767 		 * target.  Otherwise, if the timed out transaction is
    768 		 * active:
    769 		 *   Initiator transaction:
    770 		 *	Stuff the message buffer with a BDR message and assert
    771 		 *	ATN in the hopes that the target will let go of the bus
    772 		 *	and go to the mesgout phase.  If this fails, we'll
    773 		 *	get another timeout 2 seconds later which will attempt
    774 		 *	a bus reset.
    775 		 *
    776 		 *   Target transaction:
    777 		 *	Transition to BUS FREE and report the error.
    778 		 *	It's good to be the target!
    779 		 */
    780 		u_int active_scb_index;
    781 		u_int saved_scbptr;
    782 
    783 		saved_scbptr = ahc_inb(ahc, SCBPTR);
    784 		active_scb_index = ahc_inb(ahc, SCB_TAG);
    785 
    786 		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
    787 		  && (active_scb_index < ahc->scb_data->numscbs)) {
    788 			struct scb *active_scb;
    789 
    790 			/*
    791 			 * If the active SCB is not us, assume that
    792 			 * the active SCB has a longer timeout than
    793 			 * the timedout SCB, and wait for the active
    794 			 * SCB to timeout.
    795 			 */
    796 			active_scb = ahc_lookup_scb(ahc, active_scb_index);
    797 			if (active_scb != scb) {
    798 				uint64_t newtimeout;
    799 
    800 				ahc_print_path(ahc, scb);
    801 				printf("Other SCB Timeout%s",
    802 			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
    803 				       ? " again\n" : "\n");
    804 				scb->flags |= SCB_OTHERTCL_TIMEOUT;
    805 				newtimeout = MAX(active_scb->xs->timeout,
    806 						 scb->xs->timeout);
    807 				callout_reset(&scb->xs->xs_callout,
    808 				    newtimeout > 1000000 ?
    809 				    (newtimeout / 1000) * hz :
    810 				    (newtimeout * hz) / 1000,
    811 				    ahc_timeout, scb);
    812 				ahc_unpause(ahc);
    813 				ahc_unlock(ahc, &s);
    814 				return;
    815 			}
    816 
    817 			/* It's us */
    818 			if ((scb->flags & SCB_TARGET_SCB) != 0) {
    819 
    820 				/*
    821 				 * Send back any queued up transactions
    822 				 * and properly record the error condition.
    823 				 */
    824 				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
    825 					       SCB_GET_CHANNEL(ahc, scb),
    826 					       SCB_GET_LUN(scb),
    827 					       scb->hscb->tag,
    828 					       ROLE_TARGET,
    829 					       CAM_CMD_TIMEOUT);
    830 
    831 				/* Will clear us from the bus */
    832 				ahc_restart(ahc);
    833 				ahc_unlock(ahc, &s);
    834 				return;
    835 			}
    836 
    837 			ahc_set_recoveryscb(ahc, active_scb);
    838 			ahc_outb(ahc, MSG_OUT, HOST_MSG);
    839 			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
    840 			ahc_print_path(ahc, active_scb);
    841 			printf("BDR message in message buffer\n");
    842 			active_scb->flags |= SCB_DEVICE_RESET;
    843 			callout_reset(&active_scb->xs->xs_callout,
    844 				      2 * hz, ahc_timeout, active_scb);
    845 			ahc_unpause(ahc);
    846 		} else {
    847 			int	 disconnected;
    848 
    849 			/* XXX Shouldn't panic.  Just punt instead? */
    850 			if ((scb->flags & SCB_TARGET_SCB) != 0)
    851 				panic("Timed-out target SCB but bus idle");
    852 
    853 			if (last_phase != P_BUSFREE
    854 			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
    855 				/* XXX What happened to the SCB? */
    856 				/* Hung target selection.  Goto busfree */
    857 				printf("%s: Hung target selection\n",
    858 				       ahc_name(ahc));
    859 				ahc_restart(ahc);
    860 				ahc_unlock(ahc, &s);
    861 				return;
    862 			}
    863 
    864 			if (ahc_search_qinfifo(ahc, target, channel, lun,
    865 					       scb->hscb->tag, ROLE_INITIATOR,
    866 					       /*status*/0, SEARCH_COUNT) > 0) {
    867 				disconnected = FALSE;
    868 			} else {
    869 				disconnected = TRUE;
    870 			}
    871 
    872 			if (disconnected) {
    873 
    874 				ahc_set_recoveryscb(ahc, scb);
    875 				/*
    876 				 * Actually re-queue this SCB in an attempt
    877 				 * to select the device before it reconnects.
    878 				 * In either case (selection or reselection),
    879 				 * we will now issue a target reset to the
    880 				 * timed-out device.
    881 				 *
    882 				 * Set the MK_MESSAGE control bit indicating
    883 				 * that we desire to send a message.  We
    884 				 * also set the disconnected flag since
    885 				 * in the paging case there is no guarantee
    886 				 * that our SCB control byte matches the
    887 				 * version on the card.  We don't want the
    888 				 * sequencer to abort the command thinking
    889 				 * an unsolicited reselection occurred.
    890 				 */
    891 				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
    892 				scb->flags |= SCB_DEVICE_RESET;
    893 
    894 				/*
    895 				 * Remove any cached copy of this SCB in the
    896 				 * disconnected list in preparation for the
    897 				 * queuing of our abort SCB.  We use the
    898 				 * same element in the SCB, SCB_NEXT, for
    899 				 * both the qinfifo and the disconnected list.
    900 				 */
    901 				ahc_search_disc_list(ahc, target, channel,
    902 						     lun, scb->hscb->tag,
    903 						     /*stop_on_first*/TRUE,
    904 						     /*remove*/TRUE,
    905 						     /*save_state*/FALSE);
    906 
    907 				/*
    908 				 * In the non-paging case, the sequencer will
    909 				 * never re-reference the in-core SCB.
    910 				 * To make sure we are notified during
    911 				 * reslection, set the MK_MESSAGE flag in
    912 				 * the card's copy of the SCB.
    913 				 */
    914 				if ((ahc->flags & AHC_PAGESCBS) == 0) {
    915 					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
    916 					ahc_outb(ahc, SCB_CONTROL,
    917 						 ahc_inb(ahc, SCB_CONTROL)
    918 						| MK_MESSAGE);
    919 				}
    920 
    921 				/*
    922 				 * Clear out any entries in the QINFIFO first
    923 				 * so we are the next SCB for this target
    924 				 * to run.
    925 				 */
    926 				ahc_search_qinfifo(ahc,
    927 						   SCB_GET_TARGET(ahc, scb),
    928 						   channel, SCB_GET_LUN(scb),
    929 						   SCB_LIST_NULL,
    930 						   ROLE_INITIATOR,
    931 						   CAM_REQUEUE_REQ,
    932 						   SEARCH_COMPLETE);
    933 				ahc_print_path(ahc, scb);
    934 				printf("Queuing a BDR SCB\n");
    935 				ahc_qinfifo_requeue_tail(ahc, scb);
    936 				ahc_outb(ahc, SCBPTR, saved_scbptr);
    937 				callout_reset(&scb->xs->xs_callout, 2 * hz,
    938 					      ahc_timeout, scb);
    939 				ahc_unpause(ahc);
    940 			} else {
    941 				/* Go "immediatly" to the bus reset */
    942 				/* This shouldn't happen */
    943 				ahc_set_recoveryscb(ahc, scb);
    944 				ahc_print_path(ahc, scb);
    945 				printf("SCB %d: Immediate reset.  "
    946 					"Flags = 0x%x\n", scb->hscb->tag,
    947 					scb->flags);
    948 				goto bus_reset;
    949 			}
    950 		}
    951 	}
    952 	ahc_unlock(ahc, &s);
    953 }
    954 
    955 void
    956 ahc_platform_set_tags(struct ahc_softc *ahc,
    957 		      struct ahc_devinfo *devinfo, int enable)
    958 {
    959 	struct ahc_initiator_tinfo *tinfo;
    960         struct ahc_tmode_tstate *tstate;
    961 
    962         tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
    963                                     devinfo->target, &tstate);
    964 
    965         if (enable)
    966                 tstate->tagenable |= devinfo->target_mask;
    967 	else
    968 	  	tstate->tagenable &= ~devinfo->target_mask;
    969 }
    970 
    971 int
    972 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
    973 {
    974 	if (sizeof(struct ahc_platform_data) == 0)
    975 		return 0;
    976 	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
    977 				    M_NOWAIT);
    978 	if (ahc->platform_data == NULL)
    979 		return (ENOMEM);
    980 	return (0);
    981 }
    982 
    983 void
    984 ahc_platform_free(struct ahc_softc *ahc)
    985 {
    986 	if (sizeof(struct ahc_platform_data) == 0)
    987 		return;
    988 	free(ahc->platform_data, M_DEVBUF);
    989 }
    990 
    991 int
    992 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
    993 {
    994 	return (0);
    995 }
    996 
    997 int
    998 ahc_detach(struct device *self, int flags)
    999 {
   1000 	int rv = 0;
   1001 
   1002 	struct ahc_softc *ahc = (struct ahc_softc*)self;
   1003 
   1004 	ahc_intr_enable(ahc, FALSE);
   1005 	if (ahc->sc_child != NULL)
   1006 		rv = config_detach(ahc->sc_child, flags);
   1007 	if (rv == 0 && ahc->sc_child_b != NULL)
   1008 		rv = config_detach(ahc->sc_child_b, flags);
   1009 
   1010 	shutdownhook_disestablish(ahc->shutdown_hook);
   1011 
   1012 	ahc_free(ahc);
   1013 
   1014 	return (rv);
   1015 }
   1016 
   1017 
   1018 void
   1019 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
   1020 	       ac_code code, void *opt_arg)
   1021 {
   1022 	struct ahc_tmode_tstate *tstate;
   1023 	struct ahc_initiator_tinfo *tinfo;
   1024 	struct ahc_devinfo devinfo;
   1025 	struct scsipi_channel *chan;
   1026 	struct scsipi_xfer_mode xm;
   1027 
   1028 	chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
   1029 	switch (code) {
   1030 	case AC_TRANSFER_NEG:
   1031 		tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
   1032 			    &tstate);
   1033 		ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
   1034 		    channel, ROLE_UNKNOWN);
   1035 		/*
   1036 		 * Don't bother if negotiating. XXX?
   1037 		 */
   1038 		if (tinfo->curr.period != tinfo->goal.period
   1039 		    || tinfo->curr.width != tinfo->goal.width
   1040 		    || tinfo->curr.offset != tinfo->goal.offset
   1041 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
   1042 			break;
   1043 		xm.xm_target = target;
   1044 		xm.xm_mode = 0;
   1045 		xm.xm_period = tinfo->curr.period;
   1046 		xm.xm_offset = tinfo->curr.offset;
   1047 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
   1048 			xm.xm_mode |= PERIPH_CAP_WIDE16;
   1049 		if (tinfo->curr.period)
   1050 			xm.xm_mode |= PERIPH_CAP_SYNC;
   1051 		if (tstate->tagenable & devinfo.target_mask)
   1052 			xm.xm_mode |= PERIPH_CAP_TQING;
   1053 		if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
   1054 			xm.xm_mode |= PERIPH_CAP_DT;
   1055 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
   1056 		break;
   1057 	case AC_BUS_RESET:
   1058 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
   1059 	case AC_SENT_BDR:
   1060 	default:
   1061 		break;
   1062 	}
   1063 }
   1064