Home | History | Annotate | Line # | Download | only in ic
      1 /*	$NetBSD: siop_common.c,v 1.60 2024/02/08 19:44:08 andvar Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2000, 2002 Manuel Bouyer.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  *
     26  */
     27 
     28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.60 2024/02/08 19:44:08 andvar Exp $");
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/device.h>
     36 #include <sys/buf.h>
     37 #include <sys/kernel.h>
     38 #include <sys/scsiio.h>
     39 
     40 #include <machine/endian.h>
     41 #include <sys/bus.h>
     42 
     43 #include <dev/scsipi/scsi_all.h>
     44 #include <dev/scsipi/scsi_message.h>
     45 #include <dev/scsipi/scsipi_all.h>
     46 
     47 #include <dev/scsipi/scsiconf.h>
     48 
     49 #include <dev/ic/siopreg.h>
     50 #include <dev/ic/siopvar_common.h>
     51 
     52 #include "opt_siop.h"
     53 
     54 #undef DEBUG
     55 #undef DEBUG_DR
     56 #undef DEBUG_NEG
     57 
     58 int
     59 siop_common_attach(struct siop_common_softc *sc)
     60 {
     61 	int error, i;
     62 	bus_dma_segment_t seg;
     63 	int rseg;
     64 
     65 	/*
     66 	 * Allocate DMA-safe memory for the script and map it.
     67 	 */
     68 	if ((sc->features & SF_CHIP_RAM) == 0) {
     69 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
     70 		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
     71 		if (error) {
     72 			aprint_error_dev(sc->sc_dev,
     73 			    "unable to allocate script DMA memory, "
     74 			    "error = %d\n", error);
     75 			return error;
     76 		}
     77 		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
     78 		    (void **)&sc->sc_script,
     79 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
     80 		if (error) {
     81 			aprint_error_dev(sc->sc_dev,
     82 			    "unable to map script DMA memory, "
     83 			    "error = %d\n", error);
     84 			return error;
     85 		}
     86 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
     87 		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
     88 		if (error) {
     89 			aprint_error_dev(sc->sc_dev,
     90 			    "unable to create script DMA map, "
     91 			    "error = %d\n", error);
     92 			return error;
     93 		}
     94 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
     95 		    sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
     96 		if (error) {
     97 			aprint_error_dev(sc->sc_dev,
     98 			    "unable to load script DMA map, "
     99 			    "error = %d\n", error);
    100 			return error;
    101 		}
    102 		sc->sc_scriptaddr =
    103 		    sc->sc_scriptdma->dm_segs[0].ds_addr;
    104 		sc->ram_size = PAGE_SIZE;
    105 	}
    106 
    107 	sc->sc_adapt.adapt_dev = sc->sc_dev;
    108 	sc->sc_adapt.adapt_nchannels = 1;
    109 	sc->sc_adapt.adapt_openings = 0;
    110 	sc->sc_adapt.adapt_ioctl = siop_ioctl;
    111 	sc->sc_adapt.adapt_minphys = minphys;
    112 
    113 	memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
    114 	sc->sc_chan.chan_adapter = &sc->sc_adapt;
    115 	sc->sc_chan.chan_bustype = &scsi_bustype;
    116 	sc->sc_chan.chan_channel = 0;
    117 	sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
    118 	sc->sc_chan.chan_ntargets =
    119 	    (sc->features & SF_BUS_WIDE) ? 16 : 8;
    120 	sc->sc_chan.chan_nluns = 8;
    121 	sc->sc_chan.chan_id =
    122 	    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
    123 	if (sc->sc_chan.chan_id == 0 ||
    124 	    sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
    125 		sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
    126 
    127 	for (i = 0; i < 16; i++)
    128 		sc->targets[i] = NULL;
    129 
    130 	/* find min/max sync period for this chip */
    131 	sc->st_maxsync = 0;
    132 	sc->dt_maxsync = 0;
    133 	sc->st_minsync = 255;
    134 	sc->dt_minsync = 255;
    135 	for (i = 0; i < __arraycount(scf_period); i++) {
    136 		if (sc->clock_period != scf_period[i].clock)
    137 			continue;
    138 		if (sc->st_maxsync < scf_period[i].period)
    139 			sc->st_maxsync = scf_period[i].period;
    140 		if (sc->st_minsync > scf_period[i].period)
    141 			sc->st_minsync = scf_period[i].period;
    142 	}
    143 	if (sc->st_maxsync == 255 || sc->st_minsync == 0)
    144 		panic("siop: can't find my sync parameters");
    145 	for (i = 0; i < __arraycount(dt_scf_period); i++) {
    146 		if (sc->clock_period != dt_scf_period[i].clock)
    147 			continue;
    148 		if (sc->dt_maxsync < dt_scf_period[i].period)
    149 			sc->dt_maxsync = dt_scf_period[i].period;
    150 		if (sc->dt_minsync > dt_scf_period[i].period)
    151 			sc->dt_minsync = dt_scf_period[i].period;
    152 	}
    153 	if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
    154 		panic("siop: can't find my sync parameters");
    155 	return 0;
    156 }
    157 
    158 void
    159 siop_common_reset(struct siop_common_softc *sc)
    160 {
    161 	u_int32_t stest1, stest3;
    162 
    163 	/* reset the chip */
    164 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
    165 	delay(1000);
    166 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
    167 
    168 	/* init registers */
    169 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
    170 	    SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
    171 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
    172 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
    173 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
    174 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
    175 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
    176 	    0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
    177 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
    178 	    0xff & ~(SIEN1_HTH | SIEN1_GEN));
    179 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
    180 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
    181 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
    182 	    (0xb << STIME0_SEL_SHIFT));
    183 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
    184 	    sc->sc_chan.chan_id | SCID_RRE);
    185 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
    186 	    1 << sc->sc_chan.chan_id);
    187 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
    188 	    (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
    189 	if (sc->features & SF_CHIP_AAIP)
    190 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
    191 		    SIOP_AIPCNTL1, AIPCNTL1_DIS);
    192 
    193 	/* enable clock doubler or quadruler if appropriate */
    194 	if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
    195 		stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
    196 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
    197 		    STEST1_DBLEN);
    198 		if (sc->features & SF_CHIP_QUAD) {
    199 			/* wait for PPL to lock */
    200 			while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
    201 			    SIOP_STEST4) & STEST4_LOCK) == 0)
    202 				delay(10);
    203 		} else {
    204 			/* data sheet says 20us - more won't hurt */
    205 			delay(100);
    206 		}
    207 		/* halt scsi clock, select doubler/quad, restart clock */
    208 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
    209 		    stest3 | STEST3_HSC);
    210 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
    211 		    STEST1_DBLEN | STEST1_DBLSEL);
    212 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
    213 	} else {
    214 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
    215 	}
    216 
    217 	if (sc->features & SF_CHIP_USEPCIC) {
    218 		stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1);
    219 		stest1 |= STEST1_SCLK;
    220 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1);
    221 	}
    222 
    223 	if (sc->features & SF_CHIP_FIFO)
    224 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
    225 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
    226 		    CTEST5_DFS);
    227 	if (sc->features & SF_CHIP_LED0) {
    228 		/* Set GPIO0 as output if software LED control is required */
    229 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
    230 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
    231 	}
    232 	if (sc->features & SF_BUS_ULTRA3) {
    233 		/* reset SCNTL4 */
    234 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
    235 	}
    236 	sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
    237 	    STEST4_MODE_MASK;
    238 
    239 	/*
    240 	 * initialise the RAM. Without this we may get scsi gross errors on
    241 	 * the 1010
    242 	 */
    243 	if (sc->features & SF_CHIP_RAM)
    244 		bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
    245 			0, 0, sc->ram_size / 4);
    246 	sc->sc_reset(sc);
    247 }
    248 
    249 /* prepare tables before sending a cmd */
    250 void
    251 siop_setuptables(struct siop_common_cmd *siop_cmd)
    252 {
    253 	int i;
    254 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    255 	struct scsipi_xfer *xs = siop_cmd->xs;
    256 	int target = xs->xs_periph->periph_target;
    257 	int lun = xs->xs_periph->periph_lun;
    258 	int msgoffset = 1;
    259 
    260 	siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
    261 	memset(siop_cmd->siop_tables->msg_out, 0,
    262 	    sizeof(siop_cmd->siop_tables->msg_out));
    263 	/* request sense doesn't disconnect */
    264 	if (xs->xs_control & XS_CTL_REQSENSE)
    265 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
    266 	else if ((sc->features & SF_CHIP_GEBUG) &&
    267 	    (sc->targets[target]->flags & TARF_ISWIDE) == 0)
    268 		/*
    269 		 * 1010 bug: it seems that the 1010 has problems with reselect
    270 		 * when not in wide mode (generate false SCSI gross error).
    271 		 * The FreeBSD sym driver has comments about it but their
    272 		 * workaround (disable SCSI gross error reporting) doesn't
    273 		 * work with my adapter. So disable disconnect when not
    274 		 * wide.
    275 		 */
    276 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
    277 	else
    278 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
    279 	if (xs->xs_tag_type != 0) {
    280 		if ((sc->targets[target]->flags & TARF_TAG) == 0) {
    281 			scsipi_printaddr(xs->xs_periph);
    282 			printf(": tagged command type %d id %d\n",
    283 			    siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
    284 			panic("tagged command for non-tagging device");
    285 		}
    286 		siop_cmd->flags |= CMDFL_TAG;
    287 		siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
    288 		/*
    289 		 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
    290 		 * different one
    291 		 */
    292 		siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
    293 		msgoffset = 3;
    294 	}
    295 	siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
    296 	if (sc->targets[target]->status == TARST_ASYNC) {
    297 		if ((sc->targets[target]->flags & TARF_DT) &&
    298 		    (sc->mode == STEST4_MODE_LVD)) {
    299 			sc->targets[target]->status = TARST_PPR_NEG;
    300 			siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
    301 			    sc->maxoff);
    302 		} else if (sc->targets[target]->flags & TARF_WIDE) {
    303 			sc->targets[target]->status = TARST_WIDE_NEG;
    304 			siop_wdtr_msg(siop_cmd, msgoffset,
    305 			    MSG_EXT_WDTR_BUS_16_BIT);
    306 		} else if (sc->targets[target]->flags & TARF_SYNC) {
    307 			sc->targets[target]->status = TARST_SYNC_NEG;
    308 			siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
    309 			(sc->maxoff > 31) ? 31 :  sc->maxoff);
    310 		} else {
    311 			sc->targets[target]->status = TARST_OK;
    312 			siop_update_xfer_mode(sc, target);
    313 		}
    314 	}
    315 	siop_cmd->siop_tables->status =
    316 	    siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
    317 
    318 	siop_cmd->siop_tables->cmd.count =
    319 	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
    320 	siop_cmd->siop_tables->cmd.addr =
    321 	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
    322 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
    323 		for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
    324 			siop_cmd->siop_tables->data[i].count =
    325 			    siop_htoc32(sc,
    326 				siop_cmd->dmamap_data->dm_segs[i].ds_len);
    327 			siop_cmd->siop_tables->data[i].addr =
    328 			    siop_htoc32(sc,
    329 				siop_cmd->dmamap_data->dm_segs[i].ds_addr);
    330 		}
    331 	}
    332 }
    333 
    334 int
    335 siop_wdtr_neg(struct siop_common_cmd *siop_cmd)
    336 {
    337 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    338 	struct siop_common_target *siop_target = siop_cmd->siop_target;
    339 	int target = siop_cmd->xs->xs_periph->periph_target;
    340 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
    341 
    342 	if (siop_target->status == TARST_WIDE_NEG) {
    343 		/* we initiated wide negotiation */
    344 		switch (tables->msg_in[3]) {
    345 		case MSG_EXT_WDTR_BUS_8_BIT:
    346 			siop_target->flags &= ~TARF_ISWIDE;
    347 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
    348 			break;
    349 		case MSG_EXT_WDTR_BUS_16_BIT:
    350 			if (siop_target->flags & TARF_WIDE) {
    351 				siop_target->flags |= TARF_ISWIDE;
    352 				sc->targets[target]->id |= (SCNTL3_EWS << 24);
    353 				break;
    354 			}
    355 		/* FALLTHROUGH */
    356 		default:
    357 			/*
    358 			 * hum, we got more than what we can handle, shouldn't
    359 			 * happen. Reject, and stay async
    360 			 */
    361 			siop_target->flags &= ~TARF_ISWIDE;
    362 			siop_target->status = TARST_OK;
    363 			siop_target->offset = siop_target->period = 0;
    364 			siop_update_xfer_mode(sc, target);
    365 			printf("%s: rejecting invalid wide negotiation from "
    366 			    "target %d (%d)\n", device_xname(sc->sc_dev),
    367 			    target,
    368 			    tables->msg_in[3]);
    369 			tables->t_msgout.count = siop_htoc32(sc, 1);
    370 			tables->msg_out[0] = MSG_MESSAGE_REJECT;
    371 			return SIOP_NEG_MSGOUT;
    372 		}
    373 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
    374 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
    375 		    SIOP_SCNTL3,
    376 		    (sc->targets[target]->id >> 24) & 0xff);
    377 		/* we now need to do sync */
    378 		if (siop_target->flags & TARF_SYNC) {
    379 			siop_target->status = TARST_SYNC_NEG;
    380 			siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
    381 			    (sc->maxoff > 31) ? 31 : sc->maxoff);
    382 			return SIOP_NEG_MSGOUT;
    383 		} else {
    384 			siop_target->status = TARST_OK;
    385 			siop_update_xfer_mode(sc, target);
    386 			return SIOP_NEG_ACK;
    387 		}
    388 	} else {
    389 		/* target initiated wide negotiation */
    390 		if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
    391 		    && (siop_target->flags & TARF_WIDE)) {
    392 			siop_target->flags |= TARF_ISWIDE;
    393 			sc->targets[target]->id |= SCNTL3_EWS << 24;
    394 		} else {
    395 			siop_target->flags &= ~TARF_ISWIDE;
    396 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
    397 		}
    398 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
    399 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
    400 		    (sc->targets[target]->id >> 24) & 0xff);
    401 		/*
    402 		 * we did reset wide parameters, so fall back to async,
    403 		 * but don't schedule a sync neg, target should initiate it
    404 		 */
    405 		siop_target->status = TARST_OK;
    406 		siop_target->offset = siop_target->period = 0;
    407 		siop_update_xfer_mode(sc, target);
    408 		siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
    409 		    MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
    410 		return SIOP_NEG_MSGOUT;
    411 	}
    412 }
    413 
    414 int
    415 siop_ppr_neg(struct siop_common_cmd *siop_cmd)
    416 {
    417 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    418 	struct siop_common_target *siop_target = siop_cmd->siop_target;
    419 	int target = siop_cmd->xs->xs_periph->periph_target;
    420 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
    421 	int sync, offset, options, scf = 0;
    422 	int i;
    423 
    424 #ifdef DEBUG_NEG
    425 	printf("%s: answer on ppr negotiation:", device_xname(sc->sc_dev));
    426 	for (i = 0; i < 8; i++)
    427 		printf(" 0x%x", tables->msg_in[i]);
    428 	printf("\n");
    429 #endif
    430 
    431 	if (siop_target->status == TARST_PPR_NEG) {
    432 		/* we initiated PPR negotiation */
    433 		sync = tables->msg_in[3];
    434 		offset = tables->msg_in[5];
    435 		options = tables->msg_in[7];
    436 		if (options != MSG_EXT_PPR_DT) {
    437 			/* shouldn't happen */
    438 			printf("%s: ppr negotiation for target %d: "
    439 			    "no DT option\n", device_xname(sc->sc_dev), target);
    440 			siop_target->status = TARST_ASYNC;
    441 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
    442 			siop_target->offset = 0;
    443 			siop_target->period = 0;
    444 			goto reject;
    445 		}
    446 
    447 		if (offset > sc->maxoff || sync < sc->dt_minsync ||
    448 		    sync > sc->dt_maxsync) {
    449 			printf("%s: ppr negotiation for target %d: "
    450 			    "offset (%d) or sync (%d) out of range\n",
    451 			    device_xname(sc->sc_dev), target, offset, sync);
    452 			/* should not happen */
    453 			siop_target->offset = 0;
    454 			siop_target->period = 0;
    455 			goto reject;
    456 		} else {
    457 			for (i = 0; i < __arraycount(dt_scf_period); i++) {
    458 				if (sc->clock_period != dt_scf_period[i].clock)
    459 					continue;
    460 				if (dt_scf_period[i].period == sync) {
    461 					/* ok, found it. we now are sync. */
    462 					siop_target->offset = offset;
    463 					siop_target->period = sync;
    464 					scf = dt_scf_period[i].scf;
    465 					siop_target->flags |= TARF_ISDT;
    466 				}
    467 			}
    468 			if ((siop_target->flags & TARF_ISDT) == 0) {
    469 				printf("%s: ppr negotiation for target %d: "
    470 				    "sync (%d) incompatible with adapter\n",
    471 				    device_xname(sc->sc_dev), target, sync);
    472 				/*
    473 				 * we didn't find it in our table, do async
    474 				 * send reject msg, start SDTR/WDTR neg
    475 				 */
    476 				siop_target->status = TARST_ASYNC;
    477 				siop_target->flags &= ~(TARF_DT | TARF_ISDT);
    478 				siop_target->offset = 0;
    479 				siop_target->period = 0;
    480 				goto reject;
    481 			}
    482 		}
    483 		if (tables->msg_in[6] != 1) {
    484 			printf("%s: ppr negotiation for target %d: "
    485 			    "transfer width (%d) incompatible with dt\n",
    486 			    device_xname(sc->sc_dev),
    487 			    target, tables->msg_in[6]);
    488 			/* DT mode can only be done with wide transfers */
    489 			siop_target->status = TARST_ASYNC;
    490 			goto reject;
    491 		}
    492 		siop_target->flags |= TARF_ISWIDE;
    493 		sc->targets[target]->id |= (SCNTL3_EWS << 24);
    494 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
    495 		sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
    496 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
    497 		sc->targets[target]->id |=
    498 		    (siop_target->offset & SXFER_MO_MASK) << 8;
    499 		sc->targets[target]->id &= ~0xff;
    500 		sc->targets[target]->id |= SCNTL4_U3EN;
    501 		siop_target->status = TARST_OK;
    502 		siop_update_xfer_mode(sc, target);
    503 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
    504 		    (sc->targets[target]->id >> 24) & 0xff);
    505 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
    506 		    (sc->targets[target]->id >> 8) & 0xff);
    507 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
    508 		    sc->targets[target]->id & 0xff);
    509 		return SIOP_NEG_ACK;
    510 	} else {
    511 		/* target initiated PPR negotiation, shouldn't happen */
    512 		printf("%s: rejecting invalid PPR negotiation from "
    513 		    "target %d\n", device_xname(sc->sc_dev), target);
    514 reject:
    515 		tables->t_msgout.count = siop_htoc32(sc, 1);
    516 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
    517 		return SIOP_NEG_MSGOUT;
    518 	}
    519 }
    520 
    521 int
    522 siop_sdtr_neg(struct siop_common_cmd *siop_cmd)
    523 {
    524 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    525 	struct siop_common_target *siop_target = siop_cmd->siop_target;
    526 	int target = siop_cmd->xs->xs_periph->periph_target;
    527 	int sync, maxoffset, offset, i;
    528 	int send_msgout = 0;
    529 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
    530 
    531 	/* limit to Ultra/2 parameters, need PPR for Ultra/3 */
    532 	maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
    533 
    534 	sync = tables->msg_in[3];
    535 	offset = tables->msg_in[4];
    536 
    537 	if (siop_target->status == TARST_SYNC_NEG) {
    538 		/* we initiated sync negotiation */
    539 		siop_target->status = TARST_OK;
    540 #ifdef DEBUG
    541 		printf("sdtr: sync %d offset %d\n", sync, offset);
    542 #endif
    543 		if (offset > maxoffset || sync < sc->st_minsync ||
    544 			sync > sc->st_maxsync)
    545 			goto reject;
    546 		for (i = 0; i < __arraycount(scf_period); i++) {
    547 			if (sc->clock_period != scf_period[i].clock)
    548 				continue;
    549 			if (scf_period[i].period == sync) {
    550 				/* ok, found it. we now are sync. */
    551 				siop_target->offset = offset;
    552 				siop_target->period = sync;
    553 				sc->targets[target]->id &=
    554 				    ~(SCNTL3_SCF_MASK << 24);
    555 				sc->targets[target]->id |= scf_period[i].scf
    556 				    << (24 + SCNTL3_SCF_SHIFT);
    557 				if (sync < 25 && /* Ultra */
    558 				    (sc->features & SF_BUS_ULTRA3) == 0)
    559 					sc->targets[target]->id |=
    560 					    SCNTL3_ULTRA << 24;
    561 				else
    562 					sc->targets[target]->id &=
    563 					    ~(SCNTL3_ULTRA << 24);
    564 				sc->targets[target]->id &=
    565 				    ~(SXFER_MO_MASK << 8);
    566 				sc->targets[target]->id |=
    567 				    (offset & SXFER_MO_MASK) << 8;
    568 				sc->targets[target]->id &= ~0xff; /* scntl4 */
    569 				goto end;
    570 			}
    571 		}
    572 		/*
    573 		 * we didn't find it in our table, do async and send reject
    574 		 * msg
    575 		 */
    576 reject:
    577 		send_msgout = 1;
    578 		tables->t_msgout.count = siop_htoc32(sc, 1);
    579 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
    580 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
    581 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
    582 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
    583 		sc->targets[target]->id &= ~0xff; /* scntl4 */
    584 		siop_target->offset = siop_target->period = 0;
    585 	} else { /* target initiated sync neg */
    586 #ifdef DEBUG
    587 		printf("sdtr (target): sync %d offset %d\n", sync, offset);
    588 #endif
    589 		if (offset == 0 || sync > sc->st_maxsync) { /* async */
    590 			goto async;
    591 		}
    592 		if (offset > maxoffset)
    593 			offset = maxoffset;
    594 		if (sync < sc->st_minsync)
    595 			sync = sc->st_minsync;
    596 		/* look for sync period */
    597 		for (i = 0; i < __arraycount(scf_period); i++) {
    598 			if (sc->clock_period != scf_period[i].clock)
    599 				continue;
    600 			if (scf_period[i].period == sync) {
    601 				/* ok, found it. we now are sync. */
    602 				siop_target->offset = offset;
    603 				siop_target->period = sync;
    604 				sc->targets[target]->id &=
    605 				    ~(SCNTL3_SCF_MASK << 24);
    606 				sc->targets[target]->id |= scf_period[i].scf
    607 				    << (24 + SCNTL3_SCF_SHIFT);
    608 				if (sync < 25 && /* Ultra */
    609 				    (sc->features & SF_BUS_ULTRA3) == 0)
    610 					sc->targets[target]->id |=
    611 					    SCNTL3_ULTRA << 24;
    612 				else
    613 					sc->targets[target]->id &=
    614 					    ~(SCNTL3_ULTRA << 24);
    615 				sc->targets[target]->id &=
    616 				    ~(SXFER_MO_MASK << 8);
    617 				sc->targets[target]->id |=
    618 				    (offset & SXFER_MO_MASK) << 8;
    619 				sc->targets[target]->id &= ~0xff; /* scntl4 */
    620 				siop_sdtr_msg(siop_cmd, 0, sync, offset);
    621 				send_msgout = 1;
    622 				goto end;
    623 			}
    624 		}
    625 async:
    626 		siop_target->offset = siop_target->period = 0;
    627 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
    628 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
    629 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
    630 		sc->targets[target]->id &= ~0xff; /* scntl4 */
    631 		siop_sdtr_msg(siop_cmd, 0, 0, 0);
    632 		send_msgout = 1;
    633 	}
    634 end:
    635 	if (siop_target->status == TARST_OK)
    636 		siop_update_xfer_mode(sc, target);
    637 #ifdef DEBUG
    638 	printf("id now 0x%x\n", sc->targets[target]->id);
    639 #endif
    640 	tables->id = siop_htoc32(sc, sc->targets[target]->id);
    641 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
    642 	    (sc->targets[target]->id >> 24) & 0xff);
    643 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
    644 	    (sc->targets[target]->id >> 8) & 0xff);
    645 	if (send_msgout) {
    646 		return SIOP_NEG_MSGOUT;
    647 	} else {
    648 		return SIOP_NEG_ACK;
    649 	}
    650 }
    651 
    652 void
    653 siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
    654 {
    655 
    656 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
    657 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
    658 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
    659 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
    660 	siop_cmd->siop_tables->msg_out[offset + 4] = soff;
    661 	siop_cmd->siop_tables->t_msgout.count =
    662 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
    663 }
    664 
    665 void
    666 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide)
    667 {
    668 
    669 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
    670 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
    671 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
    672 	siop_cmd->siop_tables->msg_out[offset + 3] = wide;
    673 	siop_cmd->siop_tables->t_msgout.count =
    674 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
    675 }
    676 
    677 void
    678 siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
    679 {
    680 
    681 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
    682 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
    683 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
    684 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
    685 	siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
    686 	siop_cmd->siop_tables->msg_out[offset + 5] = soff;
    687 	siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
    688 	siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
    689 	siop_cmd->siop_tables->t_msgout.count =
    690 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
    691 }
    692 
    693 void
    694 siop_minphys(struct buf *bp)
    695 {
    696 
    697 	minphys(bp);
    698 }
    699 
    700 int
    701 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
    702     int flag, struct proc *p)
    703 {
    704 	struct siop_common_softc *sc;
    705 
    706 	sc = device_private(chan->chan_adapter->adapt_dev);
    707 
    708 	switch (cmd) {
    709 	case SCBUSIORESET:
    710 		/*
    711 		 * abort the script. This will trigger an interrupt, which will
    712 		 * trigger a bus reset.
    713 		 * We can't safely trigger the reset here as we can't access
    714 		 * the required register while the script is running.
    715 		 */
    716 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
    717 		return (0);
    718 	default:
    719 		return (ENOTTY);
    720 	}
    721 }
    722 
    723 void
    724 siop_ma(struct siop_common_cmd *siop_cmd)
    725 {
    726 	int offset, dbc, sstat;
    727 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    728 #ifdef DEBUG_DR
    729 	scr_table_t *table; /* table with partial xfer */
    730 #endif
    731 
    732 	/*
    733 	 * compute how much of the current table didn't get handled when
    734 	 * a phase mismatch occurs
    735 	 */
    736 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
    737 	    == 0)
    738 	    return; /* no valid data transfer */
    739 
    740 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
    741 	if (offset >= SIOP_NSG) {
    742 		aprint_error_dev(sc->sc_dev, "bad offset in siop_sdp (%d)\n",
    743 		    offset);
    744 		return;
    745 	}
    746 #ifdef DEBUG_DR
    747 	table = &siop_cmd->siop_tables->data[offset];
    748 	printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
    749 	    table->count, table->addr);
    750 #endif
    751 	dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
    752 	if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
    753 		if (sc->features & SF_CHIP_DFBC) {
    754 			dbc +=
    755 			    bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
    756 		} else {
    757 			/* need to account stale data in FIFO */
    758 			int dfifo =
    759 			    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
    760 			if (sc->features & SF_CHIP_FIFO) {
    761 				dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
    762 				    SIOP_CTEST5) & CTEST5_BOMASK) << 8;
    763 				dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
    764 			} else {
    765 				dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
    766 			}
    767 		}
    768 		sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
    769 		if (sstat & SSTAT0_OLF)
    770 			dbc++;
    771 		if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
    772 			dbc++;
    773 		if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
    774 			sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
    775 			    SIOP_SSTAT2);
    776 			if (sstat & SSTAT2_OLF1)
    777 				dbc++;
    778 			if ((sstat & SSTAT2_ORF1) &&
    779 			    (sc->features & SF_CHIP_DFBC) == 0)
    780 				dbc++;
    781 		}
    782 		/* clear the FIFO */
    783 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
    784 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
    785 		    CTEST3_CLF);
    786 	}
    787 	siop_cmd->flags |= CMDFL_RESID;
    788 	siop_cmd->resid = dbc;
    789 }
    790 
    791 void
    792 siop_sdp(struct siop_common_cmd *siop_cmd, int offset)
    793 {
    794 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    795 	scr_table_t *table;
    796 
    797 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
    798 	    == 0)
    799 	    return; /* no data pointers to save */
    800 
    801 	/*
    802 	 * offset == SIOP_NSG may be a valid condition if we get a Save data
    803 	 * pointer when the xfer is done. Just ignore the Save data pointer
    804 	 * in this case
    805 	 */
    806 	if (offset == SIOP_NSG)
    807 		return;
    808 #ifdef DIAGNOSTIC
    809 	if (offset > SIOP_NSG) {
    810 		scsipi_printaddr(siop_cmd->xs->xs_periph);
    811 		printf(": offset %d > %d\n", offset, SIOP_NSG);
    812 		panic("siop_sdp: offset");
    813 	}
    814 #endif
    815 	/*
    816 	 * Save data pointer. We do this by adjusting the tables to point
    817 	 * at the beginning of the data not yet transferred.
    818 	 * offset points to the first table with untransferred data.
    819 	 */
    820 
    821 	/*
    822 	 * before doing that we decrease resid from the amount of data which
    823 	 * has been transferred.
    824 	 */
    825 	siop_update_resid(siop_cmd, offset);
    826 
    827 	/*
    828 	 * First let see if we have a resid from a phase mismatch. If so,
    829 	 * we have to adjst the table at offset to remove transferred data.
    830 	 */
    831 	if (siop_cmd->flags & CMDFL_RESID) {
    832 		siop_cmd->flags &= ~CMDFL_RESID;
    833 		table = &siop_cmd->siop_tables->data[offset];
    834 		/* "cut" already transferred data from this table */
    835 		table->addr =
    836 		    siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
    837 		    siop_ctoh32(sc, table->count) - siop_cmd->resid);
    838 		table->count = siop_htoc32(sc, siop_cmd->resid);
    839 	}
    840 
    841 	/*
    842 	 * now we can remove entries which have been transferred.
    843 	 * We just move the entries with data left at the beginning of the
    844 	 * tables
    845 	 */
    846 	memmove(&siop_cmd->siop_tables->data[0],
    847 	    &siop_cmd->siop_tables->data[offset],
    848 	    (SIOP_NSG - offset) * sizeof(scr_table_t));
    849 }
    850 
    851 void
    852 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset)
    853 {
    854 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    855 	scr_table_t *table;
    856 	int i;
    857 
    858 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
    859 	    == 0)
    860 	    return; /* no data to transfer */
    861 
    862 	/*
    863 	 * update resid. First account for the table entries which have
    864 	 * been fully completed.
    865 	 */
    866 	for (i = 0; i < offset; i++)
    867 		siop_cmd->xs->resid -=
    868 		    siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
    869 	/*
    870 	 * if CMDFL_RESID is set, the last table (pointed by offset) is a
    871 	 * partial transfers. If not, offset points to the entry following
    872 	 * the last full transfer.
    873 	 */
    874 	if (siop_cmd->flags & CMDFL_RESID) {
    875 		table = &siop_cmd->siop_tables->data[offset];
    876 		siop_cmd->xs->resid -=
    877 		    siop_ctoh32(sc, table->count) - siop_cmd->resid;
    878 	}
    879 }
    880 
    881 int
    882 siop_iwr(struct siop_common_cmd *siop_cmd)
    883 {
    884 	int offset;
    885 	scr_table_t *table; /* table with IWR */
    886 	struct siop_common_softc *sc = siop_cmd->siop_sc;
    887 
    888 	/* handle ignore wide residue messages */
    889 
    890 	/* if target isn't wide, reject */
    891 	if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
    892 		siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
    893 		siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
    894 		return SIOP_NEG_MSGOUT;
    895 	}
    896 	/* get index of current command in table */
    897 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
    898 	/*
    899 	 * if the current table did complete, we're now pointing at the
    900 	 * next one. Go back one if we didn't see a phase mismatch.
    901 	 */
    902 	if ((siop_cmd->flags & CMDFL_RESID) == 0)
    903 		offset--;
    904 	table = &siop_cmd->siop_tables->data[offset];
    905 
    906 	if ((siop_cmd->flags & CMDFL_RESID) == 0) {
    907 		if (siop_ctoh32(sc, table->count) & 1) {
    908 			/* we really got the number of bytes we expected */
    909 			return SIOP_NEG_ACK;
    910 		} else {
    911 			/*
    912 			 * now we really had a short xfer, by one byte.
    913 			 * handle it just as if we had a phase mismatch
    914 			 * (there is a resid of one for this table).
    915 			 * Update scratcha1 to reflect the fact that
    916 			 * this xfer isn't complete.
    917 			 */
    918 			 siop_cmd->flags |= CMDFL_RESID;
    919 			 siop_cmd->resid = 1;
    920 			 bus_space_write_1(sc->sc_rt, sc->sc_rh,
    921 			     SIOP_SCRATCHA + 1, offset);
    922 			 return SIOP_NEG_ACK;
    923 		}
    924 	} else {
    925 		/*
    926 		 * we already have a short xfer for this table; it's
    927 		 * just one byte less than we though it was
    928 		 */
    929 		siop_cmd->resid--;
    930 		return SIOP_NEG_ACK;
    931 	}
    932 }
    933 
    934 void
    935 siop_clearfifo(struct siop_common_softc *sc)
    936 {
    937 	int timeout = 0;
    938 	int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
    939 
    940 #ifdef DEBUG_INTR
    941 	printf("DMA fifo not empty !\n");
    942 #endif
    943 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
    944 	    ctest3 | CTEST3_CLF);
    945 	while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
    946 	    CTEST3_CLF) != 0) {
    947 		delay(1);
    948 		if (++timeout > 1000) {
    949 			printf("clear fifo failed\n");
    950 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
    951 			    bus_space_read_1(sc->sc_rt, sc->sc_rh,
    952 			    SIOP_CTEST3) & ~CTEST3_CLF);
    953 			return;
    954 		}
    955 	}
    956 }
    957 
    958 int
    959 siop_modechange(struct siop_common_softc *sc)
    960 {
    961 	int retry;
    962 	int sist1, stest2;
    963 
    964 	for (retry = 0; retry < 5; retry++) {
    965 		/*
    966 		 * datasheet says to wait 100ms and re-read SIST1,
    967 		 * to check that DIFFSENSE is stable.
    968 		 * We may delay() 5 times for  100ms at interrupt time;
    969 		 * hopefully this will not happen often.
    970 		 */
    971 		delay(100000);
    972 		(void)bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
    973 		sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
    974 		if (sist1 & SIEN1_SBMC)
    975 			continue; /* we got an irq again */
    976 		sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
    977 		    STEST4_MODE_MASK;
    978 		stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
    979 		switch(sc->mode) {
    980 		case STEST4_MODE_DIF:
    981 			printf("%s: switching to differential mode\n",
    982 			    device_xname(sc->sc_dev));
    983 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
    984 			    stest2 | STEST2_DIF);
    985 			break;
    986 		case STEST4_MODE_SE:
    987 			printf("%s: switching to single-ended mode\n",
    988 			    device_xname(sc->sc_dev));
    989 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
    990 			    stest2 & ~STEST2_DIF);
    991 			break;
    992 		case STEST4_MODE_LVD:
    993 			printf("%s: switching to LVD mode\n",
    994 			    device_xname(sc->sc_dev));
    995 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
    996 			    stest2 & ~STEST2_DIF);
    997 			break;
    998 		default:
    999 			aprint_error_dev(sc->sc_dev, "invalid SCSI mode 0x%x\n",
   1000 			    sc->mode);
   1001 			return 0;
   1002 		}
   1003 		return 1;
   1004 	}
   1005 	printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
   1006 	    device_xname(sc->sc_dev));
   1007 	return 0;
   1008 }
   1009 
   1010 void
   1011 siop_resetbus(struct siop_common_softc *sc)
   1012 {
   1013 	int scntl1;
   1014 
   1015 	scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
   1016 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
   1017 	    scntl1 | SCNTL1_RST);
   1018 	/* minimum 25 us, more time won't hurt */
   1019 	delay(100);
   1020 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
   1021 }
   1022 
   1023 void
   1024 siop_update_xfer_mode(struct siop_common_softc *sc, int target)
   1025 {
   1026 	struct siop_common_target *siop_target = sc->targets[target];
   1027 	struct scsipi_xfer_mode xm;
   1028 
   1029 	xm.xm_target = target;
   1030 	xm.xm_mode = 0;
   1031 	xm.xm_period = 0;
   1032 	xm.xm_offset = 0;
   1033 
   1034 	if (siop_target->flags & TARF_ISWIDE)
   1035 		xm.xm_mode |= PERIPH_CAP_WIDE16;
   1036 	if (siop_target->period) {
   1037 		xm.xm_period = siop_target->period;
   1038 		xm.xm_offset = siop_target->offset;
   1039 		xm.xm_mode |= PERIPH_CAP_SYNC;
   1040 	}
   1041 	if (siop_target->flags & TARF_TAG) {
   1042 	/* 1010 workaround: can't do disconnect if not wide, so can't do tag */
   1043 		if ((sc->features & SF_CHIP_GEBUG) == 0 ||
   1044 		    (sc->targets[target]->flags & TARF_ISWIDE))
   1045 			xm.xm_mode |= PERIPH_CAP_TQING;
   1046 	}
   1047 
   1048 	scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
   1049 }
   1050