Home | History | Annotate | Line # | Download | only in ic
aic7xxx_osm.c revision 1.1
      1 /*
      2  * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
      3  *
      4  * Copyright (c) 1994-2001 Justin T. Gibbs.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions, and the following disclaimer,
     12  *    without modification.
     13  * 2. The name of the author may not be used to endorse or promote products
     14  *    derived from this software without specific prior written permission.
     15  *
     16  * Alternatively, this software may be distributed under the terms of the
     17  * GNU Public License ("GPL").
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  * $Id: aic7xxx_osm.c,v 1.1 2003/04/19 19:33:30 fvdl Exp $
     32  *
     33  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
     34  *
     35  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
     36  */
     37 /*
     38  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
     39  */
     40 #include <dev/ic/aic7xxx_osm.h>
     41 #include <dev/ic/aic7xxx_inline.h>
     42 
     43 #ifndef AHC_TMODE_ENABLE
     44 #define AHC_TMODE_ENABLE 0
     45 #endif
     46 
     47 
     48 static void	ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
     49 static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
     50 static int	ahc_poll(struct ahc_softc *ahc, int wait);
     51 static void	ahc_setup_data(struct ahc_softc *ahc,
     52 			       struct scsipi_xfer *xs, struct scb *scb);
     53 static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
     54 static int	ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
     55 			  struct proc *p);
     56 
     57 
     58 
     59 /*
     60  * Attach all the sub-devices we can find
     61  */
     62 int
     63 ahc_attach(struct ahc_softc *ahc)
     64 {
     65 	u_long 	s;
     66 	int i;
     67 	char ahc_info[256];
     68 
     69 	LIST_INIT(&ahc->pending_scbs);
     70 	for (i = 0; i < AHC_NUM_TARGETS; i++)
     71 		TAILQ_INIT(&ahc->untagged_queues[i]);
     72 
     73         ahc_lock(ahc, &s);
     74 
     75 	ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
     76 	ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
     77 
     78 	ahc->sc_adapter.adapt_openings = AHC_MAX_QUEUE;
     79 	ahc->sc_adapter.adapt_max_periph = 16;
     80 
     81 	ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
     82 	ahc->sc_adapter.adapt_minphys = ahc_minphys;
     83 	ahc->sc_adapter.adapt_request = ahc_action;
     84 
     85 	ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
     86         ahc->sc_channel.chan_bustype = &scsi_bustype;
     87         ahc->sc_channel.chan_channel = 0;
     88         ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
     89         ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
     90         ahc->sc_channel.chan_id = ahc->our_id;
     91 
     92 	if (ahc->features & AHC_TWIN) {
     93 		ahc->sc_channel_b = ahc->sc_channel;
     94 		ahc->sc_channel_b.chan_id = ahc->our_id_b;
     95 		ahc->sc_channel_b.chan_channel = 1;
     96 	}
     97 
     98 	ahc_controller_info(ahc, ahc_info);
     99 	printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
    100 
    101 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
    102 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
    103 		    &ahc->sc_channel, scsiprint);
    104 		if (ahc->features & AHC_TWIN)
    105 			ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
    106 			    &ahc->sc_channel_b, scsiprint);
    107 	} else {
    108 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
    109 		    &ahc->sc_channel_b, scsiprint);
    110 		ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
    111 		    &ahc->sc_channel, scsiprint);
    112 	}
    113 
    114 	ahc_intr_enable(ahc, TRUE);
    115 
    116 	ahc_unlock(ahc, &s);
    117 	return (1);
    118 }
    119 
    120 /*
    121  * Catch an interrupt from the adapter
    122  */
    123 void
    124 ahc_platform_intr(void *arg)
    125 {
    126 	struct	ahc_softc *ahc;
    127 
    128 	ahc = (struct ahc_softc *)arg;
    129 	ahc_intr(ahc);
    130 }
    131 
    132 /*
    133  * We have an scb which has been processed by the
    134  * adaptor, now we look to see how the operation
    135  * went.
    136  */
    137 void
    138 ahc_done(struct ahc_softc *ahc, struct scb *scb)
    139 {
    140 	struct scsipi_xfer *xs;
    141   	struct scsipi_periph *periph;
    142 	u_long s;
    143 
    144 	xs = scb->xs;
    145 	periph = xs->xs_periph;
    146 	LIST_REMOVE(scb, pending_links);
    147 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
    148 		struct scb_tailq *untagged_q;
    149 		int target_offset;
    150 
    151 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
    152 		untagged_q = &ahc->untagged_queues[target_offset];
    153 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
    154 		scb->flags &= ~SCB_UNTAGGEDQ;
    155 		ahc_run_untagged_queue(ahc, untagged_q);
    156 	}
    157 
    158 	callout_stop(&scb->xs->xs_callout);
    159 
    160 	if (xs->datalen) {
    161 		int op;
    162 
    163 		if (xs->xs_control & XS_CTL_DATA_IN)
    164 			op = BUS_DMASYNC_POSTREAD;
    165 		else
    166 			op = BUS_DMASYNC_POSTWRITE;
    167 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
    168 				scb->dmamap->dm_mapsize, op);
    169 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
    170 	}
    171 
    172 	/*
    173 	 * If the recovery SCB completes, we have to be
    174 	 * out of our timeout.
    175 	 */
    176 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
    177 		struct	scb *list_scb;
    178 
    179 		/*
    180 		 * We were able to complete the command successfully,
    181 		 * so reinstate the timeouts for all other pending
    182 		 * commands.
    183 		 */
    184 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
    185 			struct scsipi_xfer *xs = list_scb->xs;
    186 
    187 			if (!(xs->xs_control & XS_CTL_POLL)) {
    188 				callout_reset(&list_scb->xs->xs_callout,
    189 				    (list_scb->xs->timeout > 1000000) ?
    190 				    (list_scb->xs->timeout / 1000) * hz :
    191 				    (list_scb->xs->timeout * hz) / 1000,
    192 				    ahc_timeout, list_scb);
    193 			}
    194 		}
    195 
    196 		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
    197 		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
    198 			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
    199 		scsipi_printaddr(xs->xs_periph);
    200 		printf("%s: no longer in timeout, status = %x\n",
    201 		       ahc_name(ahc), xs->status);
    202 	}
    203 
    204 	/* Don't clobber any existing error state */
    205 	if (xs->error != XS_NOERROR) {
    206 	  /* Don't clobber any existing error state */
    207 	} else if ((scb->flags & SCB_SENSE) != 0) {
    208 		/*
    209 		 * We performed autosense retrieval.
    210 		 *
    211 		 * Zero any sense not transferred by the
    212 		 * device.  The SCSI spec mandates that any
    213 		 * untransfered data should be assumed to be
    214 		 * zero.  Complete the 'bounce' of sense information
    215 		 * through buffers accessible via bus-space by
    216 		 * copying it into the clients csio.
    217 		 */
    218 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
    219 		memcpy(&xs->sense.scsi_sense,
    220 		       ahc_get_sense_buf(ahc, scb),
    221 		       sizeof(xs->sense.scsi_sense));
    222 		xs->error = XS_SENSE;
    223 	}
    224 	if (scb->flags & SCB_FREEZE_QUEUE) {
    225 		scsipi_periph_thaw(periph, 1);
    226 		scb->flags &= ~SCB_FREEZE_QUEUE;
    227 	}
    228 
    229         ahc_lock(ahc, &s);
    230 	ahc_free_scb(ahc, scb);
    231         ahc_unlock(ahc, &s);
    232 
    233 	scsipi_done(xs);
    234 }
    235 
    236 static int
    237 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
    238 	  struct proc *p)
    239 {
    240 	struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
    241 	int s, ret = ENOTTY;
    242 
    243 	switch (cmd) {
    244 	case SCBUSIORESET:
    245 		s = splbio();
    246 		ahc_reset_channel(ahc, ahc->channel, TRUE);
    247 		splx(s);
    248 		ret = 0;
    249 		break;
    250 	default:
    251 		break;
    252 	}
    253 
    254 	return ret;
    255 }
    256 
    257 static void
    258 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
    259 {
    260 	struct ahc_softc *ahc;
    261 	int s;
    262 	struct ahc_initiator_tinfo *tinfo;
    263 	struct ahc_tmode_tstate *tstate;
    264 
    265 	ahc  = (void *)chan->chan_adapter->adapt_dev;
    266 
    267 	switch (req) {
    268 
    269 	case ADAPTER_REQ_RUN_XFER:
    270 	  {
    271 		struct scsipi_xfer *xs;
    272 		struct scsipi_periph *periph;
    273 	        struct scb *scb;
    274         	struct hardware_scb *hscb;
    275 		u_int target_id;
    276 		u_int our_id;
    277 		u_long s;
    278 
    279 		xs = arg;
    280 		periph = xs->xs_periph;
    281 
    282 		target_id = periph->periph_target;
    283                 our_id = ahc->our_id;
    284 
    285 		SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
    286 
    287 		/*
    288 		 * get an scb to use.
    289 		 */
    290 		ahc_lock(ahc, &s);
    291 		if ((scb = ahc_get_scb(ahc)) == NULL) {
    292 			xs->error = XS_RESOURCE_SHORTAGE;
    293 			ahc_unlock(ahc, &s);
    294 			scsipi_done(xs);
    295 			return;
    296 		}
    297 		ahc_unlock(ahc, &s);
    298 
    299 		hscb = scb->hscb;
    300 
    301 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
    302 		scb->xs = xs;
    303 
    304 		/*
    305 		 * Put all the arguments for the xfer in the scb
    306 		 */
    307 		hscb->control = 0;
    308 		hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
    309 		hscb->lun = periph->periph_lun;
    310 		if (xs->xs_control & XS_CTL_RESET) {
    311 			hscb->cdb_len = 0;
    312 			scb->flags |= SCB_DEVICE_RESET;
    313 			hscb->control |= MK_MESSAGE;
    314 			ahc_execute_scb(scb, NULL, 0);
    315 		}
    316 
    317 		ahc_setup_data(ahc, xs, scb);
    318 
    319 		break;
    320 	  }
    321 	case ADAPTER_REQ_GROW_RESOURCES:
    322   		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
    323 		return;
    324 
    325 	case ADAPTER_REQ_SET_XFER_MODE:
    326 	    {
    327 		struct scsipi_xfer_mode *xm = arg;
    328 		struct ahc_devinfo devinfo;
    329 		int target_id, our_id, first;
    330 		u_int width;
    331 		char channel;
    332 
    333 		target_id = xm->xm_target;
    334 		our_id = chan->chan_id;
    335 		channel = (chan->chan_channel == 1) ? 'B' : 'A';
    336 		s = splbio();
    337 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
    338 		    &tstate);
    339 		ahc_compile_devinfo(&devinfo, our_id, target_id,
    340 		    0, channel, ROLE_INITIATOR);
    341 
    342 		/*
    343 		 * XXX since the period and offset are not provided here,
    344 		 * fake things by forcing a renegotiation using the user
    345 		 * settings if this is called for the first time (i.e.
    346 		 * during probe). Also, cap various values at the user
    347 		 * values, assuming that the user set it up that way.
    348 		 */
    349 		if (ahc->inited_target[target_id] == 0) {
    350 			tinfo->goal = tinfo->user;
    351 			tstate->tagenable |=
    352 			    (ahc->user_tagenable & devinfo.target_mask);
    353 			tstate->discenable |=
    354 			    (ahc->user_discenable & devinfo.target_mask);
    355 			ahc->inited_target[target_id] = 1;
    356 			first = 1;
    357 		} else
    358 			first = 0;
    359 
    360 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
    361 			width = MSG_EXT_WDTR_BUS_16_BIT;
    362 		else
    363 			width = MSG_EXT_WDTR_BUS_8_BIT;
    364 
    365 		ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
    366 		if (width > tinfo->user.width)
    367 			width = tinfo->user.width;
    368 		tinfo->goal.width = width;
    369 
    370 		if (!(xm->xm_mode & PERIPH_CAP_SYNC)) {
    371 			tinfo->goal.period = 0;
    372 			tinfo->goal.offset = 0;
    373 			tinfo->goal.ppr_options = 0;
    374 		}
    375 
    376 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
    377 		    (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
    378 			tinfo->goal.ppr_options |= MSG_EXT_PPR_DT_REQ;
    379 		else
    380 			tinfo->goal.ppr_options &= ~MSG_EXT_PPR_DT_REQ;
    381 
    382 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
    383 		    (ahc->user_tagenable & devinfo.target_mask))
    384 			tstate->tagenable |= devinfo.target_mask;
    385 		else
    386 			tstate->tagenable &= ~devinfo.target_mask;
    387 
    388 		/*
    389 		 * If this is the first request, and no negotiation is
    390 		 * needed, just confirm the state to the scsipi layer,
    391 		 * so that it can print a message.
    392 		 */
    393 		if (!ahc_update_neg_request(ahc, &devinfo, tstate,
    394 		    tinfo, AHC_NEG_IF_NON_ASYNC) && first)
    395 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
    396 		splx(s);
    397 	    }
    398 	}
    399 
    400 	return;
    401 }
    402 
    403 static void
    404 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
    405 {
    406 	struct	scb *scb;
    407 	struct scsipi_xfer *xs;
    408 	struct	ahc_softc *ahc;
    409 	struct	ahc_initiator_tinfo *tinfo;
    410 	struct	ahc_tmode_tstate *tstate;
    411 
    412 	u_int	mask;
    413 	long	s;
    414 
    415 	scb = (struct scb *)arg;
    416 	xs = scb->xs;
    417 	xs->error = 0;
    418 	xs->status = 0;
    419 	xs->xs_status = 0;
    420 	ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
    421 
    422 	if (nsegments != 0) {
    423 		struct	  ahc_dma_seg *sg;
    424 		bus_dma_segment_t *end_seg;
    425 		int op;
    426 
    427 		end_seg = dm_segs + nsegments;
    428 
    429 		/* Copy the segments into our SG list */
    430 		sg = scb->sg_list;
    431 		while (dm_segs < end_seg) {
    432 			uint32_t len;
    433 
    434 			sg->addr = ahc_htole32(dm_segs->ds_addr);
    435 			len = dm_segs->ds_len
    436 			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
    437 			sg->len = ahc_htole32(len);
    438 			sg++;
    439 			dm_segs++;
    440 		}
    441 
    442 		/*
    443 		 * Note where to find the SG entries in bus space.
    444 		 * We also set the full residual flag which the
    445 		 * sequencer will clear as soon as a data transfer
    446 		 * occurs.
    447 		 */
    448 		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
    449 
    450 		if (xs->xs_control & XS_CTL_DATA_IN)
    451 			op = BUS_DMASYNC_PREREAD;
    452 		else
    453 			op = BUS_DMASYNC_PREWRITE;
    454 
    455 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
    456 				scb->dmamap->dm_mapsize, op);
    457 
    458 		sg--;
    459 		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
    460 
    461 		/* Copy the first SG into the "current" data pointer area */
    462 		scb->hscb->dataptr = scb->sg_list->addr;
    463 		scb->hscb->datacnt = scb->sg_list->len;
    464 	} else {
    465 		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
    466 		scb->hscb->dataptr = 0;
    467 		scb->hscb->datacnt = 0;
    468 	}
    469 
    470 	scb->sg_count = nsegments;
    471 
    472 	ahc_lock(ahc, &s);
    473 
    474 	/*
    475 	 * Last time we need to check if this SCB needs to
    476 	 * be aborted.
    477 	 */
    478 	if (xs->xs_status & XS_STS_DONE) {
    479 		if (nsegments != 0)
    480 			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
    481 		ahc_free_scb(ahc, scb);
    482 		ahc_unlock(ahc, &s);
    483 		scsipi_done(xs);
    484 		return;
    485 	}
    486 
    487 	tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
    488 				    SCSIID_OUR_ID(scb->hscb->scsiid),
    489 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
    490 				    &tstate);
    491 
    492 	mask = SCB_GET_TARGET_MASK(ahc, scb);
    493 	scb->hscb->scsirate = tinfo->scsirate;
    494 	scb->hscb->scsioffset = tinfo->curr.offset;
    495 
    496 	if ((tstate->ultraenb & mask) != 0)
    497 		scb->hscb->control |= ULTRAENB;
    498 
    499 	if ((tstate->discenable & mask) != 0)
    500 	    	scb->hscb->control |= DISCENB;
    501 
    502 	if (xs->xs_tag_type)
    503 		scb->hscb->control |= xs->xs_tag_type;
    504 
    505 	if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
    506 	     && tinfo->goal.offset == 0
    507 	     && tinfo->goal.ppr_options == 0)) {
    508 		scb->flags |= SCB_NEGOTIATE;
    509 		scb->hscb->control |= MK_MESSAGE;
    510 	} else if ((tstate->auto_negotiate & mask) != 0) {
    511 		scb->flags |= SCB_AUTO_NEGOTIATE;
    512 		scb->hscb->control |= MK_MESSAGE;
    513 	}
    514 
    515 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
    516 
    517 	if (!(xs->xs_control & XS_CTL_POLL)) {
    518 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
    519 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
    520 			      ahc_timeout, scb);
    521 	}
    522 
    523 	/*
    524 	 * We only allow one untagged transaction
    525 	 * per target in the initiator role unless
    526 	 * we are storing a full busy target *lun*
    527 	 * table in SCB space.
    528 	 */
    529 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
    530 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
    531 		struct scb_tailq *untagged_q;
    532 		int target_offset;
    533 
    534 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
    535 		untagged_q = &(ahc->untagged_queues[target_offset]);
    536 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
    537 		scb->flags |= SCB_UNTAGGEDQ;
    538 		if (TAILQ_FIRST(untagged_q) != scb) {
    539 			ahc_unlock(ahc, &s);
    540 			return;
    541 		}
    542 	}
    543 	scb->flags |= SCB_ACTIVE;
    544 
    545 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
    546 		/* Define a mapping from our tag to the SCB. */
    547 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
    548 		ahc_pause(ahc);
    549 		if ((ahc->flags & AHC_PAGESCBS) == 0)
    550 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
    551 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
    552 		ahc_unpause(ahc);
    553 	} else {
    554 		ahc_queue_scb(ahc, scb);
    555 	}
    556 
    557 	if (!(xs->xs_control & XS_CTL_POLL)) {
    558 		ahc_unlock(ahc, &s);
    559 		return;
    560 	}
    561 
    562 	/*
    563 	 * If we can't use interrupts, poll for completion
    564 	 */
    565 	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
    566 	do {
    567 		if (ahc_poll(ahc, xs->timeout)) {
    568 			if (!(xs->xs_control & XS_CTL_SILENT))
    569 				printf("cmd fail\n");
    570 			ahc_timeout(scb);
    571 			break;
    572 		}
    573 	} while (!(xs->xs_status & XS_STS_DONE));
    574 	ahc_unlock(ahc, &s);
    575 
    576 	return;
    577 }
    578 
    579 static int
    580 ahc_poll(struct ahc_softc *ahc, int wait)
    581 {
    582 	while (--wait) {
    583 		DELAY(1000);
    584 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
    585 			break;
    586 	}
    587 
    588 	if (wait == 0) {
    589 		printf("%s: board is not responding\n", ahc_name(ahc));
    590 		return (EIO);
    591 	}
    592 
    593 	ahc_intr((void *)ahc);
    594 	return (0);
    595 }
    596 
    597 static void
    598 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
    599 	       struct scb *scb)
    600 {
    601 	struct hardware_scb *hscb;
    602 
    603 	hscb = scb->hscb;
    604 	xs->resid = xs->status = 0;
    605 
    606 	hscb->cdb_len = xs->cmdlen;
    607 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
    608 		u_long s;
    609 
    610 		ahc_set_transaction_status(scb, CAM_REQ_INVALID);
    611 		ahc_lock(ahc, &s);
    612 		ahc_free_scb(ahc, scb);
    613 		ahc_unlock(ahc, &s);
    614 		scsipi_done(xs);
    615 		return;
    616 	}
    617 
    618 	if (hscb->cdb_len > 12) {
    619 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
    620 		scb->flags |= SCB_CDB32_PTR;
    621 	} else {
    622 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
    623 	}
    624 
    625 	/* Only use S/G if there is a transfer */
    626 	if (xs->datalen) {
    627 		int error;
    628 
    629                 error = bus_dmamap_load(ahc->parent_dmat,
    630 					scb->dmamap, xs->data,
    631 					xs->datalen, NULL,
    632 					((xs->xs_control & XS_CTL_NOSLEEP) ?
    633 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
    634 					BUS_DMA_STREAMING |
    635 					((xs->xs_control & XS_CTL_DATA_IN) ?
    636 					 BUS_DMA_READ : BUS_DMA_WRITE));
    637                 if (error) {
    638 #ifdef AHC_DEBUG
    639                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
    640 			       "= %d\n",
    641 			       ahc_name(ahc), error);
    642 #endif
    643                         xs->error = XS_RESOURCE_SHORTAGE;
    644                         scsipi_done(xs);
    645                         return;
    646                 }
    647                 ahc_execute_scb(scb,
    648 				scb->dmamap->dm_segs,
    649 				scb->dmamap->dm_nsegs);
    650 	} else {
    651 		ahc_execute_scb(scb, NULL, 0);
    652 	}
    653 }
    654 
    655 static void
    656 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
    657 
    658 	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
    659 		struct scb *list_scb;
    660 
    661 		scb->flags |= SCB_RECOVERY_SCB;
    662 
    663 		/*
    664 		 * Take all queued, but not sent SCBs out of the equation.
    665 		 * Also ensure that no new CCBs are queued to us while we
    666 		 * try to fix this problem.
    667 		 */
    668 		scsipi_channel_freeze(&ahc->sc_channel, 1);
    669 		if (ahc->features & AHC_TWIN)
    670 			scsipi_channel_freeze(&ahc->sc_channel_b, 1);
    671 
    672 		/*
    673 		 * Go through all of our pending SCBs and remove
    674 		 * any scheduled timeouts for them.  We will reschedule
    675 		 * them after we've successfully fixed this problem.
    676 		 */
    677 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
    678 			callout_stop(&list_scb->xs->xs_callout);
    679 		}
    680 	}
    681 }
    682 
    683 void
    684 ahc_timeout(void *arg)
    685 {
    686 	struct	scb *scb;
    687 	struct	ahc_softc *ahc;
    688 	long	s;
    689 	int	found;
    690 	u_int	last_phase;
    691 	int	target;
    692 	int	lun;
    693 	int	i;
    694 	char	channel;
    695 
    696 	scb = (struct scb *)arg;
    697 	ahc = (struct ahc_softc *)scb->ahc_softc;
    698 
    699 	ahc_lock(ahc, &s);
    700 
    701 	ahc_pause_and_flushwork(ahc);
    702 
    703 	if ((scb->flags & SCB_ACTIVE) == 0) {
    704 		/* Previous timeout took care of me already */
    705 		printf("%s: Timedout SCB already complete. "
    706 		       "Interrupts may not be functioning.\n", ahc_name(ahc));
    707 		ahc_unpause(ahc);
    708 		ahc_unlock(ahc, &s);
    709 		return;
    710 	}
    711 
    712 	target = SCB_GET_TARGET(ahc, scb);
    713 	channel = SCB_GET_CHANNEL(ahc, scb);
    714 	lun = SCB_GET_LUN(scb);
    715 
    716 	ahc_print_path(ahc, scb);
    717 	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
    718 	ahc_dump_card_state(ahc);
    719 	last_phase = ahc_inb(ahc, LASTPHASE);
    720 	if (scb->sg_count > 0) {
    721 		for (i = 0; i < scb->sg_count; i++) {
    722 			printf("sg[%d] - Addr 0x%x : Length %d\n",
    723 			       i,
    724 			       scb->sg_list[i].addr,
    725 			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
    726 		}
    727 	}
    728 	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
    729 		/*
    730 		 * Been down this road before.
    731 		 * Do a full bus reset.
    732 		 */
    733 bus_reset:
    734 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
    735 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
    736 		printf("%s: Issued Channel %c Bus Reset. "
    737 		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
    738 	} else {
    739 		/*
    740 		 * If we are a target, transition to bus free and report
    741 		 * the timeout.
    742 		 *
    743 		 * The target/initiator that is holding up the bus may not
    744 		 * be the same as the one that triggered this timeout
    745 		 * (different commands have different timeout lengths).
    746 		 * If the bus is idle and we are actiing as the initiator
    747 		 * for this request, queue a BDR message to the timed out
    748 		 * target.  Otherwise, if the timed out transaction is
    749 		 * active:
    750 		 *   Initiator transaction:
    751 		 *	Stuff the message buffer with a BDR message and assert
    752 		 *	ATN in the hopes that the target will let go of the bus
    753 		 *	and go to the mesgout phase.  If this fails, we'll
    754 		 *	get another timeout 2 seconds later which will attempt
    755 		 *	a bus reset.
    756 		 *
    757 		 *   Target transaction:
    758 		 *	Transition to BUS FREE and report the error.
    759 		 *	It's good to be the target!
    760 		 */
    761 		u_int active_scb_index;
    762 		u_int saved_scbptr;
    763 
    764 		saved_scbptr = ahc_inb(ahc, SCBPTR);
    765 		active_scb_index = ahc_inb(ahc, SCB_TAG);
    766 
    767 		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
    768 		  && (active_scb_index < ahc->scb_data->numscbs)) {
    769 			struct scb *active_scb;
    770 
    771 			/*
    772 			 * If the active SCB is not us, assume that
    773 			 * the active SCB has a longer timeout than
    774 			 * the timedout SCB, and wait for the active
    775 			 * SCB to timeout.
    776 			 */
    777 			active_scb = ahc_lookup_scb(ahc, active_scb_index);
    778 			if (active_scb != scb) {
    779 				uint64_t newtimeout;
    780 
    781 				ahc_print_path(ahc, scb);
    782 				printf("Other SCB Timeout%s",
    783 			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
    784 				       ? " again\n" : "\n");
    785 				scb->flags |= SCB_OTHERTCL_TIMEOUT;
    786 				newtimeout = MAX(active_scb->xs->timeout,
    787 						 scb->xs->timeout);
    788 				callout_reset(&scb->xs->xs_callout,
    789 				    newtimeout > 1000000 ?
    790 				    (newtimeout / 1000) * hz :
    791 				    (newtimeout * hz) / 1000,
    792 				    ahc_timeout, scb);
    793 				ahc_unpause(ahc);
    794 				ahc_unlock(ahc, &s);
    795 				return;
    796 			}
    797 
    798 			/* It's us */
    799 			if ((scb->flags & SCB_TARGET_SCB) != 0) {
    800 
    801 				/*
    802 				 * Send back any queued up transactions
    803 				 * and properly record the error condition.
    804 				 */
    805 				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
    806 					       SCB_GET_CHANNEL(ahc, scb),
    807 					       SCB_GET_LUN(scb),
    808 					       scb->hscb->tag,
    809 					       ROLE_TARGET,
    810 					       CAM_CMD_TIMEOUT);
    811 
    812 				/* Will clear us from the bus */
    813 				ahc_restart(ahc);
    814 				ahc_unlock(ahc, &s);
    815 				return;
    816 			}
    817 
    818 			ahc_set_recoveryscb(ahc, active_scb);
    819 			ahc_outb(ahc, MSG_OUT, HOST_MSG);
    820 			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
    821 			ahc_print_path(ahc, active_scb);
    822 			printf("BDR message in message buffer\n");
    823 			active_scb->flags |= SCB_DEVICE_RESET;
    824 			callout_reset(&active_scb->xs->xs_callout,
    825 				      2 * hz, ahc_timeout, active_scb);
    826 			ahc_unpause(ahc);
    827 		} else {
    828 			int	 disconnected;
    829 
    830 			/* XXX Shouldn't panic.  Just punt instead? */
    831 			if ((scb->flags & SCB_TARGET_SCB) != 0)
    832 				panic("Timed-out target SCB but bus idle");
    833 
    834 			if (last_phase != P_BUSFREE
    835 			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
    836 				/* XXX What happened to the SCB? */
    837 				/* Hung target selection.  Goto busfree */
    838 				printf("%s: Hung target selection\n",
    839 				       ahc_name(ahc));
    840 				ahc_restart(ahc);
    841 				ahc_unlock(ahc, &s);
    842 				return;
    843 			}
    844 
    845 			if (ahc_search_qinfifo(ahc, target, channel, lun,
    846 					       scb->hscb->tag, ROLE_INITIATOR,
    847 					       /*status*/0, SEARCH_COUNT) > 0) {
    848 				disconnected = FALSE;
    849 			} else {
    850 				disconnected = TRUE;
    851 			}
    852 
    853 			if (disconnected) {
    854 
    855 				ahc_set_recoveryscb(ahc, scb);
    856 				/*
    857 				 * Actually re-queue this SCB in an attempt
    858 				 * to select the device before it reconnects.
    859 				 * In either case (selection or reselection),
    860 				 * we will now issue a target reset to the
    861 				 * timed-out device.
    862 				 *
    863 				 * Set the MK_MESSAGE control bit indicating
    864 				 * that we desire to send a message.  We
    865 				 * also set the disconnected flag since
    866 				 * in the paging case there is no guarantee
    867 				 * that our SCB control byte matches the
    868 				 * version on the card.  We don't want the
    869 				 * sequencer to abort the command thinking
    870 				 * an unsolicited reselection occurred.
    871 				 */
    872 				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
    873 				scb->flags |= SCB_DEVICE_RESET;
    874 
    875 				/*
    876 				 * Remove any cached copy of this SCB in the
    877 				 * disconnected list in preparation for the
    878 				 * queuing of our abort SCB.  We use the
    879 				 * same element in the SCB, SCB_NEXT, for
    880 				 * both the qinfifo and the disconnected list.
    881 				 */
    882 				ahc_search_disc_list(ahc, target, channel,
    883 						     lun, scb->hscb->tag,
    884 						     /*stop_on_first*/TRUE,
    885 						     /*remove*/TRUE,
    886 						     /*save_state*/FALSE);
    887 
    888 				/*
    889 				 * In the non-paging case, the sequencer will
    890 				 * never re-reference the in-core SCB.
    891 				 * To make sure we are notified during
    892 				 * reslection, set the MK_MESSAGE flag in
    893 				 * the card's copy of the SCB.
    894 				 */
    895 				if ((ahc->flags & AHC_PAGESCBS) == 0) {
    896 					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
    897 					ahc_outb(ahc, SCB_CONTROL,
    898 						 ahc_inb(ahc, SCB_CONTROL)
    899 						| MK_MESSAGE);
    900 				}
    901 
    902 				/*
    903 				 * Clear out any entries in the QINFIFO first
    904 				 * so we are the next SCB for this target
    905 				 * to run.
    906 				 */
    907 				ahc_search_qinfifo(ahc,
    908 						   SCB_GET_TARGET(ahc, scb),
    909 						   channel, SCB_GET_LUN(scb),
    910 						   SCB_LIST_NULL,
    911 						   ROLE_INITIATOR,
    912 						   CAM_REQUEUE_REQ,
    913 						   SEARCH_COMPLETE);
    914 				ahc_print_path(ahc, scb);
    915 				printf("Queuing a BDR SCB\n");
    916 				ahc_qinfifo_requeue_tail(ahc, scb);
    917 				ahc_outb(ahc, SCBPTR, saved_scbptr);
    918 				callout_reset(&scb->xs->xs_callout, 2 * hz,
    919 					      ahc_timeout, scb);
    920 				ahc_unpause(ahc);
    921 			} else {
    922 				/* Go "immediatly" to the bus reset */
    923 				/* This shouldn't happen */
    924 				ahc_set_recoveryscb(ahc, scb);
    925 				ahc_print_path(ahc, scb);
    926 				printf("SCB %d: Immediate reset.  "
    927 					"Flags = 0x%x\n", scb->hscb->tag,
    928 					scb->flags);
    929 				goto bus_reset;
    930 			}
    931 		}
    932 	}
    933 	ahc_unlock(ahc, &s);
    934 }
    935 
    936 void
    937 ahc_platform_set_tags(struct ahc_softc *ahc,
    938 		      struct ahc_devinfo *devinfo, int enable)
    939 {
    940 	struct ahc_initiator_tinfo *tinfo;
    941         struct ahc_tmode_tstate *tstate;
    942 
    943         tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
    944                                     devinfo->target, &tstate);
    945 
    946         if (enable)
    947                 tstate->tagenable |= devinfo->target_mask;
    948 	else
    949 	  	tstate->tagenable &= ~devinfo->target_mask;
    950 }
    951 
    952 int
    953 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
    954 {
    955 	if (sizeof(struct ahc_platform_data) == 0)
    956 		return 0;
    957 	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
    958 				    M_NOWAIT);
    959 	if (ahc->platform_data == NULL)
    960 		return (ENOMEM);
    961 	return (0);
    962 }
    963 
    964 void
    965 ahc_platform_free(struct ahc_softc *ahc)
    966 {
    967 	if (sizeof(struct ahc_platform_data) == 0)
    968 		return;
    969 	free(ahc->platform_data, M_DEVBUF);
    970 }
    971 
    972 int
    973 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
    974 {
    975 	return (0);
    976 }
    977 
    978 int
    979 ahc_detach(struct device *self, int flags)
    980 {
    981 	int rv = 0;
    982 
    983 	struct ahc_softc *ahc = (struct ahc_softc*)self;
    984 
    985 	ahc_intr_enable(ahc, FALSE);
    986 	if (ahc->sc_child != NULL)
    987 		rv = config_detach(ahc->sc_child, flags);
    988 	if (rv == 0 && ahc->sc_child_b != NULL)
    989 		rv = config_detach(ahc->sc_child_b, flags);
    990 
    991 	ahc_free(ahc);
    992 
    993 	shutdownhook_disestablish(ahc->shutdown_hook);
    994 
    995 	return (rv);
    996 }
    997 
    998 
    999 void
   1000 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
   1001 	       ac_code code, void *opt_arg)
   1002 {
   1003 	struct ahc_tmode_tstate *tstate;
   1004 	struct ahc_initiator_tinfo *tinfo;
   1005 	struct ahc_devinfo devinfo;
   1006 	struct scsipi_channel *chan;
   1007 	struct scsipi_xfer_mode xm;
   1008 
   1009 	chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
   1010 	switch (code) {
   1011 	case AC_TRANSFER_NEG:
   1012 		tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
   1013 			    &tstate);
   1014 		ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
   1015 		    channel, ROLE_UNKNOWN);
   1016 		/*
   1017 		 * Don't bother if negotiating. XXX?
   1018 		 */
   1019 		if (tinfo->curr.period != tinfo->goal.period
   1020 		    || tinfo->curr.width != tinfo->goal.width
   1021 		    || tinfo->curr.offset != tinfo->goal.offset
   1022 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
   1023 			break;
   1024 		xm.xm_target = target;
   1025 		xm.xm_mode = 0;
   1026 		xm.xm_period = tinfo->curr.period;
   1027 		xm.xm_offset = tinfo->curr.offset;
   1028 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
   1029 			xm.xm_mode |= PERIPH_CAP_WIDE16;
   1030 		if (tinfo->curr.period)
   1031 			xm.xm_mode |= PERIPH_CAP_SYNC;
   1032 		if (tstate->tagenable & devinfo.target_mask)
   1033 			xm.xm_mode |= PERIPH_CAP_TQING;
   1034 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
   1035 		break;
   1036 	case AC_BUS_RESET:
   1037 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
   1038 	case AC_SENT_BDR:
   1039 	default:
   1040 		break;
   1041 	}
   1042 }
   1043