Home | History | Annotate | Line # | Download | only in ic
siop_common.c revision 1.37
      1 /*	$NetBSD: siop_common.c,v 1.37 2005/02/27 00:27:02 perry Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2000, 2002 Manuel Bouyer.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. All advertising materials mentioning features or use of this software
     15  *    must display the following acknowledgement:
     16  *	This product includes software developed by Manuel Bouyer.
     17  * 4. The name of the author may not be used to endorse or promote products
     18  *    derived from this software without specific prior written permission.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  *
     31  */
     32 
     33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.37 2005/02/27 00:27:02 perry Exp $");
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/device.h>
     41 #include <sys/malloc.h>
     42 #include <sys/buf.h>
     43 #include <sys/kernel.h>
     44 #include <sys/scsiio.h>
     45 
     46 #include <uvm/uvm_extern.h>
     47 
     48 #include <machine/endian.h>
     49 #include <machine/bus.h>
     50 
     51 #include <dev/scsipi/scsi_all.h>
     52 #include <dev/scsipi/scsi_message.h>
     53 #include <dev/scsipi/scsipi_all.h>
     54 
     55 #include <dev/scsipi/scsiconf.h>
     56 
     57 #include <dev/ic/siopreg.h>
     58 #include <dev/ic/siopvar_common.h>
     59 
     60 #include "opt_siop.h"
     61 
     62 #undef DEBUG
     63 #undef DEBUG_DR
     64 #undef DEBUG_NEG
     65 
     66 int
     67 siop_common_attach(sc)
     68 	struct siop_common_softc *sc;
     69 {
     70 	int error, i;
     71 	bus_dma_segment_t seg;
     72 	int rseg;
     73 
     74 	/*
     75 	 * Allocate DMA-safe memory for the script and map it.
     76 	 */
     77 	if ((sc->features & SF_CHIP_RAM) == 0) {
     78 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
     79 		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
     80 		if (error) {
     81 			aprint_error(
     82 			    "%s: unable to allocate script DMA memory, "
     83 			    "error = %d\n", sc->sc_dev.dv_xname, error);
     84 			return error;
     85 		}
     86 		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
     87 		    (caddr_t *)&sc->sc_script,
     88 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
     89 		if (error) {
     90 			aprint_error("%s: unable to map script DMA memory, "
     91 			    "error = %d\n", sc->sc_dev.dv_xname, error);
     92 			return error;
     93 		}
     94 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
     95 		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
     96 		if (error) {
     97 			aprint_error("%s: unable to create script DMA map, "
     98 			    "error = %d\n", sc->sc_dev.dv_xname, error);
     99 			return error;
    100 		}
    101 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
    102 		    sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
    103 		if (error) {
    104 			aprint_error("%s: unable to load script DMA map, "
    105 			    "error = %d\n", sc->sc_dev.dv_xname, error);
    106 			return error;
    107 		}
    108 		sc->sc_scriptaddr =
    109 		    sc->sc_scriptdma->dm_segs[0].ds_addr;
    110 		sc->ram_size = PAGE_SIZE;
    111 	}
    112 
    113 	sc->sc_adapt.adapt_dev = &sc->sc_dev;
    114 	sc->sc_adapt.adapt_nchannels = 1;
    115 	sc->sc_adapt.adapt_openings = 0;
    116 	sc->sc_adapt.adapt_ioctl = siop_ioctl;
    117 	sc->sc_adapt.adapt_minphys = minphys;
    118 
    119 	memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
    120 	sc->sc_chan.chan_adapter = &sc->sc_adapt;
    121 	sc->sc_chan.chan_bustype = &scsi_bustype;
    122 	sc->sc_chan.chan_channel = 0;
    123 	sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
    124 	sc->sc_chan.chan_ntargets =
    125 	    (sc->features & SF_BUS_WIDE) ? 16 : 8;
    126 	sc->sc_chan.chan_nluns = 8;
    127 	sc->sc_chan.chan_id =
    128 	    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
    129 	if (sc->sc_chan.chan_id == 0 ||
    130 	    sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
    131 		sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
    132 
    133 	for (i = 0; i < 16; i++)
    134 		sc->targets[i] = NULL;
    135 
    136 	/* find min/max sync period for this chip */
    137 	sc->st_maxsync = 0;
    138 	sc->dt_maxsync = 0;
    139 	sc->st_minsync = 255;
    140 	sc->dt_minsync = 255;
    141 	for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
    142 		if (sc->clock_period != scf_period[i].clock)
    143 			continue;
    144 		if (sc->st_maxsync < scf_period[i].period)
    145 			sc->st_maxsync = scf_period[i].period;
    146 		if (sc->st_minsync > scf_period[i].period)
    147 			sc->st_minsync = scf_period[i].period;
    148 	}
    149 	if (sc->st_maxsync == 255 || sc->st_minsync == 0)
    150 		panic("siop: can't find my sync parameters");
    151 	for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
    152 		if (sc->clock_period != dt_scf_period[i].clock)
    153 			continue;
    154 		if (sc->dt_maxsync < dt_scf_period[i].period)
    155 			sc->dt_maxsync = dt_scf_period[i].period;
    156 		if (sc->dt_minsync > dt_scf_period[i].period)
    157 			sc->dt_minsync = dt_scf_period[i].period;
    158 	}
    159 	if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
    160 		panic("siop: can't find my sync parameters");
    161 	return 0;
    162 }
    163 
    164 void
    165 siop_common_reset(sc)
    166 	struct siop_common_softc *sc;
    167 {
    168 	u_int32_t stest3;
    169 
    170 	/* reset the chip */
    171 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
    172 	delay(1000);
    173 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
    174 
    175 	/* init registers */
    176 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
    177 	    SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
    178 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
    179 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
    180 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
    181 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
    182 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
    183 	    0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
    184 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
    185 	    0xff & ~(SIEN1_HTH | SIEN1_GEN));
    186 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
    187 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
    188 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
    189 	    (0xb << STIME0_SEL_SHIFT));
    190 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
    191 	    sc->sc_chan.chan_id | SCID_RRE);
    192 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
    193 	    1 << sc->sc_chan.chan_id);
    194 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
    195 	    (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
    196 	if (sc->features & SF_CHIP_AAIP)
    197 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
    198 		    SIOP_AIPCNTL1, AIPCNTL1_DIS);
    199 
    200 	/* enable clock doubler or quadruler if appropriate */
    201 	if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
    202 		stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
    203 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
    204 		    STEST1_DBLEN);
    205 		if (sc->features & SF_CHIP_QUAD) {
    206 			/* wait for PPL to lock */
    207 			while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
    208 			    SIOP_STEST4) & STEST4_LOCK) == 0)
    209 				delay(10);
    210 		} else {
    211 			/* data sheet says 20us - more won't hurt */
    212 			delay(100);
    213 		}
    214 		/* halt scsi clock, select doubler/quad, restart clock */
    215 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
    216 		    stest3 | STEST3_HSC);
    217 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
    218 		    STEST1_DBLEN | STEST1_DBLSEL);
    219 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
    220 	} else {
    221 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
    222 	}
    223 	if (sc->features & SF_CHIP_FIFO)
    224 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
    225 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
    226 		    CTEST5_DFS);
    227 	if (sc->features & SF_CHIP_LED0) {
    228 		/* Set GPIO0 as output if software LED control is required */
    229 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
    230 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
    231 	}
    232 	if (sc->features & SF_BUS_ULTRA3) {
    233 		/* reset SCNTL4 */
    234 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
    235 	}
    236 	sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
    237 	    STEST4_MODE_MASK;
    238 
    239 	/*
    240 	 * initialise the RAM. Without this we may get scsi gross errors on
    241 	 * the 1010
    242 	 */
    243 	if (sc->features & SF_CHIP_RAM)
    244 		bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
    245 			0, 0, sc->ram_size / 4);
    246 	sc->sc_reset(sc);
    247 }
    248 
    249 /* prepare tables before sending a cmd */
    250 void
    251 siop_setuptables(siop_cmd)
    252 	struct siop_common_cmd *siop_cmd;
    253 {
    254 	int i;
    255 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    256 	struct scsipi_xfer *xs = siop_cmd->xs;
    257 	int target = xs->xs_periph->periph_target;
    258 	int lun = xs->xs_periph->periph_lun;
    259 	int msgoffset = 1;
    260 
    261 	siop_cmd->siop_tables->id = htole32(sc->targets[target]->id);
    262 	memset(siop_cmd->siop_tables->msg_out, 0,
    263 	    sizeof(siop_cmd->siop_tables->msg_out));
    264 	/* request sense doesn't disconnect */
    265 	if (xs->xs_control & XS_CTL_REQSENSE)
    266 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
    267 	else if ((sc->features & SF_CHIP_GEBUG) &&
    268 	    (sc->targets[target]->flags & TARF_ISWIDE) == 0)
    269 		/*
    270 		 * 1010 bug: it seems that the 1010 has problems with reselect
    271 		 * when not in wide mode (generate false SCSI gross error).
    272 		 * The FreeBSD sym driver has comments about it but their
    273 		 * workaround (disable SCSI gross error reporting) doesn't
    274 		 * work with my adapter. So disable disconnect when not
    275 		 * wide.
    276 		 */
    277 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
    278 	else
    279 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
    280 	if (xs->xs_tag_type != 0) {
    281 		if ((sc->targets[target]->flags & TARF_TAG) == 0) {
    282 			scsipi_printaddr(xs->xs_periph);
    283 			printf(": tagged command type %d id %d\n",
    284 			    siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
    285 			panic("tagged command for non-tagging device");
    286 		}
    287 		siop_cmd->flags |= CMDFL_TAG;
    288 		siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
    289 		/*
    290 		 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
    291 		 * different one
    292 		 */
    293 		siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
    294 		msgoffset = 3;
    295 	}
    296 	siop_cmd->siop_tables->t_msgout.count= htole32(msgoffset);
    297 	if (sc->targets[target]->status == TARST_ASYNC) {
    298 		if ((sc->targets[target]->flags & TARF_DT) &&
    299 			(sc->mode == STEST4_MODE_LVD)) {
    300 			sc->targets[target]->status = TARST_PPR_NEG;
    301 			 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
    302 			    sc->maxoff);
    303 		} else if (sc->targets[target]->flags & TARF_WIDE) {
    304 			sc->targets[target]->status = TARST_WIDE_NEG;
    305 			siop_wdtr_msg(siop_cmd, msgoffset,
    306 			    MSG_EXT_WDTR_BUS_16_BIT);
    307 		} else if (sc->targets[target]->flags & TARF_SYNC) {
    308 			sc->targets[target]->status = TARST_SYNC_NEG;
    309 			siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
    310 			(sc->maxoff > 31) ? 31 :  sc->maxoff);
    311 		} else {
    312 			sc->targets[target]->status = TARST_OK;
    313 			siop_update_xfer_mode(sc, target);
    314 		}
    315 	}
    316 	siop_cmd->siop_tables->status =
    317 	    htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */
    318 
    319 	siop_cmd->siop_tables->cmd.count =
    320 	    htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
    321 	siop_cmd->siop_tables->cmd.addr =
    322 	    htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
    323 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
    324 		for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
    325 			siop_cmd->siop_tables->data[i].count =
    326 			    htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
    327 			siop_cmd->siop_tables->data[i].addr =
    328 			    htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
    329 		}
    330 	}
    331 }
    332 
    333 int
    334 siop_wdtr_neg(siop_cmd)
    335 	struct siop_common_cmd *siop_cmd;
    336 {
    337 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    338 	struct siop_common_target *siop_target = siop_cmd->siop_target;
    339 	int target = siop_cmd->xs->xs_periph->periph_target;
    340 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
    341 
    342 	if (siop_target->status == TARST_WIDE_NEG) {
    343 		/* we initiated wide negotiation */
    344 		switch (tables->msg_in[3]) {
    345 		case MSG_EXT_WDTR_BUS_8_BIT:
    346 			siop_target->flags &= ~TARF_ISWIDE;
    347 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
    348 			break;
    349 		case MSG_EXT_WDTR_BUS_16_BIT:
    350 			if (siop_target->flags & TARF_WIDE) {
    351 				siop_target->flags |= TARF_ISWIDE;
    352 				sc->targets[target]->id |= (SCNTL3_EWS << 24);
    353 				break;
    354 			}
    355 		/* FALLTHROUH */
    356 		default:
    357 			/*
    358  			 * hum, we got more than what we can handle, shouldn't
    359 			 * happen. Reject, and stay async
    360 			 */
    361 			siop_target->flags &= ~TARF_ISWIDE;
    362 			siop_target->status = TARST_OK;
    363 			siop_target->offset = siop_target->period = 0;
    364 			siop_update_xfer_mode(sc, target);
    365 			printf("%s: rejecting invalid wide negotiation from "
    366 			    "target %d (%d)\n", sc->sc_dev.dv_xname, target,
    367 			    tables->msg_in[3]);
    368 			tables->t_msgout.count= htole32(1);
    369 			tables->msg_out[0] = MSG_MESSAGE_REJECT;
    370 			return SIOP_NEG_MSGOUT;
    371 		}
    372 		tables->id = htole32(sc->targets[target]->id);
    373 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
    374 		    SIOP_SCNTL3,
    375 		    (sc->targets[target]->id >> 24) & 0xff);
    376 		/* we now need to do sync */
    377 		if (siop_target->flags & TARF_SYNC) {
    378 			siop_target->status = TARST_SYNC_NEG;
    379 			siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
    380 			    (sc->maxoff > 31) ? 31 : sc->maxoff);
    381 			return SIOP_NEG_MSGOUT;
    382 		} else {
    383 			siop_target->status = TARST_OK;
    384 			siop_update_xfer_mode(sc, target);
    385 			return SIOP_NEG_ACK;
    386 		}
    387 	} else {
    388 		/* target initiated wide negotiation */
    389 		if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
    390 		    && (siop_target->flags & TARF_WIDE)) {
    391 			siop_target->flags |= TARF_ISWIDE;
    392 			sc->targets[target]->id |= SCNTL3_EWS << 24;
    393 		} else {
    394 			siop_target->flags &= ~TARF_ISWIDE;
    395 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
    396 		}
    397 		tables->id = htole32(sc->targets[target]->id);
    398 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
    399 		    (sc->targets[target]->id >> 24) & 0xff);
    400 		/*
    401 		 * we did reset wide parameters, so fall back to async,
    402 		 * but don't schedule a sync neg, target should initiate it
    403 		 */
    404 		siop_target->status = TARST_OK;
    405 		siop_target->offset = siop_target->period = 0;
    406 		siop_update_xfer_mode(sc, target);
    407 		siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
    408 		    MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
    409 		return SIOP_NEG_MSGOUT;
    410 	}
    411 }
    412 
    413 int
    414 siop_ppr_neg(siop_cmd)
    415 	struct siop_common_cmd *siop_cmd;
    416 {
    417 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    418 	struct siop_common_target *siop_target = siop_cmd->siop_target;
    419 	int target = siop_cmd->xs->xs_periph->periph_target;
    420 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
    421 	int sync, offset, options, scf = 0;
    422 	int i;
    423 
    424 #ifdef DEBUG_NEG
    425 	printf("%s: anserw on ppr negotiation:", sc->sc_dev.dv_xname);
    426 	for (i = 0; i < 8; i++)
    427 		printf(" 0x%x", tables->msg_in[i]);
    428 	printf("\n");
    429 #endif
    430 
    431 	if (siop_target->status == TARST_PPR_NEG) {
    432 		/* we initiated PPR negotiation */
    433 		sync = tables->msg_in[3];
    434 		offset = tables->msg_in[5];
    435 		options = tables->msg_in[7];
    436 		if (options != MSG_EXT_PPR_DT) {
    437 			/* should't happen */
    438 			printf("%s: ppr negotiation for target %d: "
    439 			    "no DT option\n", sc->sc_dev.dv_xname, target);
    440 			siop_target->status = TARST_ASYNC;
    441 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
    442 			siop_target->offset = 0;
    443 			siop_target->period = 0;
    444 			goto reject;
    445 		}
    446 
    447 		if (offset > sc->maxoff || sync < sc->dt_minsync ||
    448 		    sync > sc->dt_maxsync) {
    449 			printf("%s: ppr negotiation for target %d: "
    450 			    "offset (%d) or sync (%d) out of range\n",
    451 			    sc->sc_dev.dv_xname, target, offset, sync);
    452 			/* should not happen */
    453 			siop_target->offset = 0;
    454 			siop_target->period = 0;
    455 			goto reject;
    456 		} else {
    457 			for (i = 0; i <
    458 			    sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
    459 			    i++) {
    460 				if (sc->clock_period != dt_scf_period[i].clock)
    461 					continue;
    462 				if (dt_scf_period[i].period == sync) {
    463 					/* ok, found it. we now are sync. */
    464 					siop_target->offset = offset;
    465 					siop_target->period = sync;
    466 					scf = dt_scf_period[i].scf;
    467 					siop_target->flags |= TARF_ISDT;
    468 				}
    469 			}
    470 			if ((siop_target->flags & TARF_ISDT) == 0) {
    471 				printf("%s: ppr negotiation for target %d: "
    472 				    "sync (%d) incompatible with adapter\n",
    473 				    sc->sc_dev.dv_xname, target, sync);
    474 				/*
    475 				 * we didn't find it in our table, do async
    476 				 * send reject msg, start SDTR/WDTR neg
    477 				 */
    478 				siop_target->status = TARST_ASYNC;
    479 				siop_target->flags &= ~(TARF_DT | TARF_ISDT);
    480 				siop_target->offset = 0;
    481 				siop_target->period = 0;
    482 				goto reject;
    483 			}
    484 		}
    485 		if (tables->msg_in[6] != 1) {
    486 			printf("%s: ppr negotiation for target %d: "
    487 			    "transfer width (%d) incompatible with dt\n",
    488 			    sc->sc_dev.dv_xname, target, tables->msg_in[6]);
    489 			/* DT mode can only be done with wide transfers */
    490 			siop_target->status = TARST_ASYNC;
    491 			goto reject;
    492 		}
    493 		siop_target->flags |= TARF_ISWIDE;
    494 		sc->targets[target]->id |= (SCNTL3_EWS << 24);
    495 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
    496 		sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
    497 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
    498 		sc->targets[target]->id |=
    499 		    (siop_target->offset & SXFER_MO_MASK) << 8;
    500 		sc->targets[target]->id &= ~0xff;
    501 		sc->targets[target]->id |= SCNTL4_U3EN;
    502 		siop_target->status = TARST_OK;
    503 		siop_update_xfer_mode(sc, target);
    504 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
    505 		    (sc->targets[target]->id >> 24) & 0xff);
    506 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
    507 		    (sc->targets[target]->id >> 8) & 0xff);
    508 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
    509 		    sc->targets[target]->id & 0xff);
    510 		return SIOP_NEG_ACK;
    511 	} else {
    512 		/* target initiated PPR negotiation, shouldn't happen */
    513 		printf("%s: rejecting invalid PPR negotiation from "
    514 		    "target %d\n", sc->sc_dev.dv_xname, target);
    515 reject:
    516 		tables->t_msgout.count= htole32(1);
    517 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
    518 		return SIOP_NEG_MSGOUT;
    519 	}
    520 }
    521 
    522 int
    523 siop_sdtr_neg(siop_cmd)
    524 	struct siop_common_cmd *siop_cmd;
    525 {
    526 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    527 	struct siop_common_target *siop_target = siop_cmd->siop_target;
    528 	int target = siop_cmd->xs->xs_periph->periph_target;
    529 	int sync, maxoffset, offset, i;
    530 	int send_msgout = 0;
    531 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
    532 
    533 	/* limit to Ultra/2 parameters, need PPR for Ultra/3 */
    534 	maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
    535 
    536 	sync = tables->msg_in[3];
    537 	offset = tables->msg_in[4];
    538 
    539 	if (siop_target->status == TARST_SYNC_NEG) {
    540 		/* we initiated sync negotiation */
    541 		siop_target->status = TARST_OK;
    542 #ifdef DEBUG
    543 		printf("sdtr: sync %d offset %d\n", sync, offset);
    544 #endif
    545 		if (offset > maxoffset || sync < sc->st_minsync ||
    546 			sync > sc->st_maxsync)
    547 			goto reject;
    548 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
    549 		    i++) {
    550 			if (sc->clock_period != scf_period[i].clock)
    551 				continue;
    552 			if (scf_period[i].period == sync) {
    553 				/* ok, found it. we now are sync. */
    554 				siop_target->offset = offset;
    555 				siop_target->period = sync;
    556 				sc->targets[target]->id &=
    557 				    ~(SCNTL3_SCF_MASK << 24);
    558 				sc->targets[target]->id |= scf_period[i].scf
    559 				    << (24 + SCNTL3_SCF_SHIFT);
    560 				if (sync < 25 && /* Ultra */
    561 				    (sc->features & SF_BUS_ULTRA3) == 0)
    562 					sc->targets[target]->id |=
    563 					    SCNTL3_ULTRA << 24;
    564 				else
    565 					sc->targets[target]->id &=
    566 					    ~(SCNTL3_ULTRA << 24);
    567 				sc->targets[target]->id &=
    568 				    ~(SXFER_MO_MASK << 8);
    569 				sc->targets[target]->id |=
    570 				    (offset & SXFER_MO_MASK) << 8;
    571 				sc->targets[target]->id &= ~0xff; /* scntl4 */
    572 				goto end;
    573 			}
    574 		}
    575 		/*
    576 		 * we didn't find it in our table, do async and send reject
    577 		 * msg
    578 		 */
    579 reject:
    580 		send_msgout = 1;
    581 		tables->t_msgout.count= htole32(1);
    582 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
    583 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
    584 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
    585 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
    586 		sc->targets[target]->id &= ~0xff; /* scntl4 */
    587 		siop_target->offset = siop_target->period = 0;
    588 	} else { /* target initiated sync neg */
    589 #ifdef DEBUG
    590 		printf("sdtr (target): sync %d offset %d\n", sync, offset);
    591 #endif
    592 		if (offset == 0 || sync > sc->st_maxsync) { /* async */
    593 			goto async;
    594 		}
    595 		if (offset > maxoffset)
    596 			offset = maxoffset;
    597 		if (sync < sc->st_minsync)
    598 			sync = sc->st_minsync;
    599 		/* look for sync period */
    600 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
    601 		    i++) {
    602 			if (sc->clock_period != scf_period[i].clock)
    603 				continue;
    604 			if (scf_period[i].period == sync) {
    605 				/* ok, found it. we now are sync. */
    606 				siop_target->offset = offset;
    607 				siop_target->period = sync;
    608 				sc->targets[target]->id &=
    609 				    ~(SCNTL3_SCF_MASK << 24);
    610 				sc->targets[target]->id |= scf_period[i].scf
    611 				    << (24 + SCNTL3_SCF_SHIFT);
    612 				if (sync < 25 && /* Ultra */
    613 				    (sc->features & SF_BUS_ULTRA3) == 0)
    614 					sc->targets[target]->id |=
    615 					    SCNTL3_ULTRA << 24;
    616 				else
    617 					sc->targets[target]->id &=
    618 					    ~(SCNTL3_ULTRA << 24);
    619 				sc->targets[target]->id &=
    620 				    ~(SXFER_MO_MASK << 8);
    621 				sc->targets[target]->id |=
    622 				    (offset & SXFER_MO_MASK) << 8;
    623 				sc->targets[target]->id &= ~0xff; /* scntl4 */
    624 				siop_sdtr_msg(siop_cmd, 0, sync, offset);
    625 				send_msgout = 1;
    626 				goto end;
    627 			}
    628 		}
    629 async:
    630 		siop_target->offset = siop_target->period = 0;
    631 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
    632 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
    633 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
    634 		sc->targets[target]->id &= ~0xff; /* scntl4 */
    635 		siop_sdtr_msg(siop_cmd, 0, 0, 0);
    636 		send_msgout = 1;
    637 	}
    638 end:
    639 	if (siop_target->status == TARST_OK)
    640 		siop_update_xfer_mode(sc, target);
    641 #ifdef DEBUG
    642 	printf("id now 0x%x\n", sc->targets[target]->id);
    643 #endif
    644 	tables->id = htole32(sc->targets[target]->id);
    645 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
    646 	    (sc->targets[target]->id >> 24) & 0xff);
    647 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
    648 	    (sc->targets[target]->id >> 8) & 0xff);
    649 	if (send_msgout) {
    650 		return SIOP_NEG_MSGOUT;
    651 	} else {
    652 		return SIOP_NEG_ACK;
    653 	}
    654 }
    655 
    656 void
    657 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
    658 	struct siop_common_cmd *siop_cmd;
    659 	int offset;
    660 	int ssync, soff;
    661 {
    662 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
    663 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
    664 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
    665 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
    666 	siop_cmd->siop_tables->msg_out[offset + 4] = soff;
    667 	siop_cmd->siop_tables->t_msgout.count =
    668 	    htole32(offset + MSG_EXT_SDTR_LEN + 2);
    669 }
    670 
    671 void
    672 siop_wdtr_msg(siop_cmd, offset, wide)
    673 	struct siop_common_cmd *siop_cmd;
    674 	int offset;
    675 {
    676 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
    677 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
    678 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
    679 	siop_cmd->siop_tables->msg_out[offset + 3] = wide;
    680 	siop_cmd->siop_tables->t_msgout.count =
    681 	    htole32(offset + MSG_EXT_WDTR_LEN + 2);
    682 }
    683 
    684 void
    685 siop_ppr_msg(siop_cmd, offset, ssync, soff)
    686 	struct siop_common_cmd *siop_cmd;
    687 	int offset;
    688 	int ssync, soff;
    689 {
    690 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
    691 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
    692 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
    693 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
    694 	siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
    695 	siop_cmd->siop_tables->msg_out[offset + 5] = soff;
    696 	siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
    697 	siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
    698 	siop_cmd->siop_tables->t_msgout.count =
    699 	    htole32(offset + MSG_EXT_PPR_LEN + 2);
    700 }
    701 
    702 void
    703 siop_minphys(bp)
    704 	struct buf *bp;
    705 {
    706 	minphys(bp);
    707 }
    708 
    709 int
    710 siop_ioctl(chan, cmd, arg, flag, p)
    711 	struct scsipi_channel *chan;
    712 	u_long cmd;
    713 	caddr_t arg;
    714 	int flag;
    715 	struct proc *p;
    716 {
    717 	struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
    718 
    719 	switch (cmd) {
    720 	case SCBUSIORESET:
    721 		/*
    722 		 * abort the script. This will trigger an interrupt, which will
    723 		 * trigger a bus reset.
    724 		 * We can't safely trigger the reset here as we can't access
    725 		 * the required register while the script is running.
    726 		 */
    727 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
    728 		return (0);
    729 	default:
    730 		return (ENOTTY);
    731 	}
    732 }
    733 
    734 void
    735 siop_ma(siop_cmd)
    736 	struct siop_common_cmd *siop_cmd;
    737 {
    738 	int offset, dbc, sstat;
    739 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    740 	scr_table_t *table; /* table with partial xfer */
    741 
    742 	/*
    743 	 * compute how much of the current table didn't get handled when
    744 	 * a phase mismatch occurs
    745 	 */
    746 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
    747 	    == 0)
    748 	    return; /* no valid data transfer */
    749 
    750 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
    751 	if (offset >= SIOP_NSG) {
    752 		printf("%s: bad offset in siop_sdp (%d)\n",
    753 		    sc->sc_dev.dv_xname, offset);
    754 		return;
    755 	}
    756 	table = &siop_cmd->siop_tables->data[offset];
    757 #ifdef DEBUG_DR
    758 	printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
    759 	    table->count, table->addr);
    760 #endif
    761 	dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
    762 	if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
    763 		if (sc->features & SF_CHIP_DFBC) {
    764 			dbc +=
    765 			    bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
    766 		} else {
    767 			/* need to account stale data in FIFO */
    768 			int dfifo =
    769 			    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
    770 			if (sc->features & SF_CHIP_FIFO) {
    771 				dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
    772 				    SIOP_CTEST5) & CTEST5_BOMASK) << 8;
    773 				dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
    774 			} else {
    775 				dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
    776 			}
    777 		}
    778 		sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
    779 		if (sstat & SSTAT0_OLF)
    780 			dbc++;
    781 		if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
    782 			dbc++;
    783 		if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
    784 			sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
    785 			    SIOP_SSTAT2);
    786 			if (sstat & SSTAT2_OLF1)
    787 				dbc++;
    788 			if ((sstat & SSTAT2_ORF1) &&
    789 			    (sc->features & SF_CHIP_DFBC) == 0)
    790 				dbc++;
    791 		}
    792 		/* clear the FIFO */
    793 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
    794 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
    795 		    CTEST3_CLF);
    796 	}
    797 	siop_cmd->flags |= CMDFL_RESID;
    798 	siop_cmd->resid = dbc;
    799 }
    800 
    801 void
    802 siop_sdp(siop_cmd, offset)
    803 	struct siop_common_cmd *siop_cmd;
    804 	int offset;
    805 {
    806 	scr_table_t *table;
    807 
    808 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
    809 	    == 0)
    810 	    return; /* no data pointers to save */
    811 
    812 	/*
    813 	 * offset == SIOP_NSG may be a valid condition if we get a Save data
    814 	 * pointer when the xfer is done. Just ignore the Save data pointer
    815 	 * in this case
    816 	 */
    817 	if (offset == SIOP_NSG)
    818 		return;
    819 #ifdef DIAGNOSTIC
    820 	if (offset > SIOP_NSG) {
    821 		scsipi_printaddr(siop_cmd->xs->xs_periph);
    822 		printf(": offset %d > %d\n", offset, SIOP_NSG);
    823 		panic("siop_sdp: offset");
    824 	}
    825 #endif
    826 	/*
    827 	 * Save data pointer. We do this by adjusting the tables to point
    828 	 * at the begginning of the data not yet transfered.
    829 	 * offset points to the first table with untransfered data.
    830 	 */
    831 
    832 	/*
    833 	 * before doing that we decrease resid from the ammount of data which
    834 	 * has been transfered.
    835 	 */
    836 	siop_update_resid(siop_cmd, offset);
    837 
    838 	/*
    839 	 * First let see if we have a resid from a phase mismatch. If so,
    840 	 * we have to adjst the table at offset to remove transfered data.
    841 	 */
    842 	if (siop_cmd->flags & CMDFL_RESID) {
    843 		siop_cmd->flags &= ~CMDFL_RESID;
    844 		table = &siop_cmd->siop_tables->data[offset];
    845 		/* "cut" already transfered data from this table */
    846 		table->addr =
    847 		    htole32(le32toh(table->addr) +
    848 		    le32toh(table->count) - siop_cmd->resid);
    849 		table->count = htole32(siop_cmd->resid);
    850 	}
    851 
    852 	/*
    853 	 * now we can remove entries which have been transfered.
    854 	 * We just move the entries with data left at the beggining of the
    855 	 * tables
    856 	 */
    857 	memmove(&siop_cmd->siop_tables->data[0],
    858 	    &siop_cmd->siop_tables->data[offset],
    859 	    (SIOP_NSG - offset) * sizeof(scr_table_t));
    860 }
    861 
    862 void
    863 siop_update_resid(siop_cmd, offset)
    864 	struct siop_common_cmd *siop_cmd;
    865 	int offset;
    866 {
    867 	scr_table_t *table;
    868 	int i;
    869 
    870 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
    871 	    == 0)
    872 	    return; /* no data to transfer */
    873 
    874 	/*
    875 	 * update resid. First account for the table entries which have
    876 	 * been fully completed.
    877 	 */
    878 	for (i = 0; i < offset; i++)
    879 		siop_cmd->xs->resid -=
    880 		    le32toh(siop_cmd->siop_tables->data[i].count);
    881 	/*
    882 	 * if CMDFL_RESID is set, the last table (pointed by offset) is a
    883 	 * partial transfers. If not, offset points to the entry folloing
    884 	 * the last full transfer.
    885 	 */
    886 	if (siop_cmd->flags & CMDFL_RESID) {
    887 		table = &siop_cmd->siop_tables->data[offset];
    888 		siop_cmd->xs->resid -= le32toh(table->count) - siop_cmd->resid;
    889 	}
    890 }
    891 
    892 int
    893 siop_iwr(siop_cmd)
    894 	struct siop_common_cmd *siop_cmd;
    895 {
    896 	int offset;
    897 	scr_table_t *table; /* table with IWR */
    898 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    899 	/* handle ignore wide residue messages */
    900 
    901 	/* if target isn't wide, reject */
    902 	if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
    903 		siop_cmd->siop_tables->t_msgout.count= htole32(1);
    904 		siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
    905 		return SIOP_NEG_MSGOUT;
    906 	}
    907 	/* get index of current command in table */
    908 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
    909 	/*
    910 	 * if the current table did complete, we're now pointing at the
    911 	 * next one. Go back one if we didn't see a phase mismatch.
    912 	 */
    913 	if ((siop_cmd->flags & CMDFL_RESID) == 0)
    914 		offset--;
    915 	table = &siop_cmd->siop_tables->data[offset];
    916 
    917 	if ((siop_cmd->flags & CMDFL_RESID) == 0) {
    918 		if (le32toh(table->count) & 1) {
    919 			/* we really got the number of bytes we expected */
    920 			return SIOP_NEG_ACK;
    921 		} else {
    922 			/*
    923 			 * now we really had a short xfer, by one byte.
    924 			 * handle it just as if we had a phase mistmatch
    925 			 * (there is a resid of one for this table).
    926 			 * Update scratcha1 to reflect the fact that
    927 			 * this xfer isn't complete.
    928 			 */
    929 			 siop_cmd->flags |= CMDFL_RESID;
    930 			 siop_cmd->resid = 1;
    931 			 bus_space_write_1(sc->sc_rt, sc->sc_rh,
    932 			     SIOP_SCRATCHA + 1, offset);
    933 			 return SIOP_NEG_ACK;
    934 		}
    935 	} else {
    936 		/*
    937 		 * we already have a short xfer for this table; it's
    938 		 * just one byte less than we though it was
    939 		 */
    940 		siop_cmd->resid--;
    941 		return SIOP_NEG_ACK;
    942 	}
    943 }
    944 
    945 void
    946 siop_clearfifo(sc)
    947 	struct siop_common_softc *sc;
    948 {
    949 	int timeout = 0;
    950 	int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
    951 
    952 #ifdef DEBUG_INTR
    953 	printf("DMA fifo not empty !\n");
    954 #endif
    955 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
    956 	    ctest3 | CTEST3_CLF);
    957 	while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
    958 	    CTEST3_CLF) != 0) {
    959 		delay(1);
    960 		if (++timeout > 1000) {
    961 			printf("clear fifo failed\n");
    962 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
    963 			    bus_space_read_1(sc->sc_rt, sc->sc_rh,
    964 			    SIOP_CTEST3) & ~CTEST3_CLF);
    965 			return;
    966 		}
    967 	}
    968 }
    969 
    970 int
    971 siop_modechange(sc)
    972 	struct siop_common_softc *sc;
    973 {
    974 	int retry;
    975 	int sist0, sist1, stest2;
    976 	for (retry = 0; retry < 5; retry++) {
    977 		/*
    978 		 * datasheet says to wait 100ms and re-read SIST1,
    979 		 * to check that DIFFSENSE is stable.
    980 		 * We may delay() 5 times for  100ms at interrupt time;
    981 		 * hopefully this will not happen often.
    982 		 */
    983 		delay(100000);
    984 		sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
    985 		sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
    986 		if (sist1 & SIEN1_SBMC)
    987 			continue; /* we got an irq again */
    988 		sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
    989 		    STEST4_MODE_MASK;
    990 		stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
    991 		switch(sc->mode) {
    992 		case STEST4_MODE_DIF:
    993 			printf("%s: switching to differential mode\n",
    994 			    sc->sc_dev.dv_xname);
    995 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
    996 			    stest2 | STEST2_DIF);
    997 			break;
    998 		case STEST4_MODE_SE:
    999 			printf("%s: switching to single-ended mode\n",
   1000 			    sc->sc_dev.dv_xname);
   1001 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
   1002 			    stest2 & ~STEST2_DIF);
   1003 			break;
   1004 		case STEST4_MODE_LVD:
   1005 			printf("%s: switching to LVD mode\n",
   1006 			    sc->sc_dev.dv_xname);
   1007 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
   1008 			    stest2 & ~STEST2_DIF);
   1009 			break;
   1010 		default:
   1011 			printf("%s: invalid SCSI mode 0x%x\n",
   1012 			    sc->sc_dev.dv_xname, sc->mode);
   1013 			return 0;
   1014 		}
   1015 		return 1;
   1016 	}
   1017 	printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
   1018 	    sc->sc_dev.dv_xname);
   1019 	return 0;
   1020 }
   1021 
   1022 void
   1023 siop_resetbus(sc)
   1024 	struct siop_common_softc *sc;
   1025 {
   1026 	int scntl1;
   1027 	scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
   1028 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
   1029 	    scntl1 | SCNTL1_RST);
   1030 	/* minimum 25 us, more time won't hurt */
   1031 	delay(100);
   1032 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
   1033 }
   1034 
   1035 void
   1036 siop_update_xfer_mode(sc, target)
   1037 	struct siop_common_softc *sc;
   1038 	int target;
   1039 {
   1040 	struct siop_common_target *siop_target = sc->targets[target];
   1041 	struct scsipi_xfer_mode xm;
   1042 
   1043 	xm.xm_target = target;
   1044 	xm.xm_mode = 0;
   1045 	xm.xm_period = 0;
   1046 	xm.xm_offset = 0;
   1047 
   1048 
   1049 	if (siop_target->flags & TARF_ISWIDE)
   1050 		xm.xm_mode |= PERIPH_CAP_WIDE16;
   1051 	if (siop_target->period) {
   1052 		xm.xm_period = siop_target->period;
   1053 		xm.xm_offset = siop_target->offset;
   1054 		xm.xm_mode |= PERIPH_CAP_SYNC;
   1055 	}
   1056 	if (siop_target->flags & TARF_TAG) {
   1057 	/* 1010 workaround: can't do disconnect if not wide, so can't do tag */
   1058 		if ((sc->features & SF_CHIP_GEBUG) == 0 ||
   1059 		    (sc->targets[target]->flags & TARF_ISWIDE))
   1060 			xm.xm_mode |= PERIPH_CAP_TQING;
   1061 	}
   1062 
   1063 	scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
   1064 }
   1065