siop_common.c revision 1.10 1 /* $NetBSD: siop_common.c,v 1.10 2000/10/23 14:56:17 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/buf.h>
40 #include <sys/kernel.h>
41 #include <sys/scsiio.h>
42
43 #include <machine/endian.h>
44 #include <machine/bus.h>
45
46 #include <dev/scsipi/scsi_all.h>
47 #include <dev/scsipi/scsi_message.h>
48 #include <dev/scsipi/scsipi_all.h>
49
50 #include <dev/scsipi/scsiconf.h>
51
52 #include <dev/ic/siopreg.h>
53 #include <dev/ic/siopvar.h>
54 #include <dev/ic/siopvar_common.h>
55
56 #undef DEBUG
57 #undef DEBUG_DR
58
59 void
60 siop_common_reset(sc)
61 struct siop_softc *sc;
62 {
63 u_int32_t stest3;
64
65 /* reset the chip */
66 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
67 delay(1000);
68 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
69
70 /* init registers */
71 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
72 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
73 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
74 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
75 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
76 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
77 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
78 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
79 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
80 0xff & ~(SIEN1_HTH | SIEN1_GEN));
81 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
82 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
83 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
84 (0xb << STIME0_SEL_SHIFT));
85 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
86 sc->sc_link.scsipi_scsi.adapter_target | SCID_RRE);
87 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
88 1 << sc->sc_link.scsipi_scsi.adapter_target);
89 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
90 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
91
92 /* enable clock doubler or quadruler if appropriate */
93 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
94 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
95 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
96 STEST1_DBLEN);
97 if (sc->features & SF_CHIP_QUAD) {
98 /* wait for PPL to lock */
99 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
100 SIOP_STEST4) & STEST4_LOCK) == 0)
101 delay(10);
102 } else {
103 /* data sheet says 20us - more won't hurt */
104 delay(100);
105 }
106 /* halt scsi clock, select doubler/quad, restart clock */
107 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
108 stest3 | STEST3_HSC);
109 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
110 STEST1_DBLEN | STEST1_DBLSEL);
111 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
112 } else {
113 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
114 }
115 if (sc->features & SF_CHIP_FIFO)
116 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
117 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
118 CTEST5_DFS);
119
120 sc->sc_reset(sc);
121 }
122
123 /* prepare tables before sending a cmd */
124 void
125 siop_setuptables(siop_cmd)
126 struct siop_cmd *siop_cmd;
127 {
128 int i;
129 struct siop_softc *sc = siop_cmd->siop_sc;
130 struct scsipi_xfer *xs = siop_cmd->xs;
131 int target = xs->sc_link->scsipi_scsi.target;
132 int lun = xs->sc_link->scsipi_scsi.lun;
133
134 siop_cmd->siop_tables.id = htole32(sc->targets[target]->id);
135 memset(siop_cmd->siop_tables.msg_out, 0, 8);
136 siop_cmd->siop_tables.msg_out[0] = MSG_IDENTIFY(lun, 1);
137 siop_cmd->siop_tables.t_msgout.count= htole32(1);
138 if (sc->targets[target]->status == TARST_ASYNC) {
139 if (sc->targets[target]->flags & TARF_WIDE) {
140 sc->targets[target]->status = TARST_WIDE_NEG;
141 siop_wdtr_msg(siop_cmd, 1, MSG_EXT_WDTR_BUS_16_BIT);
142 } else if (sc->targets[target]->flags & TARF_SYNC) {
143 sc->targets[target]->status = TARST_SYNC_NEG;
144 siop_sdtr_msg(siop_cmd, 1, sc->minsync, sc->maxoff);
145 } else {
146 sc->targets[target]->status = TARST_OK;
147 }
148 } else if (sc->targets[target]->status == TARST_OK &&
149 (sc->targets[target]->flags & TARF_TAG) &&
150 (siop_cmd->status == CMDST_SENSE) == 0) {
151 siop_cmd->flags |= CMDFL_TAG;
152 }
153 siop_cmd->siop_tables.status = htole32(0xff); /* set invalid status */
154
155 siop_cmd->siop_tables.cmd.count =
156 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
157 siop_cmd->siop_tables.cmd.addr =
158 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
159 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ||
160 siop_cmd->status == CMDST_SENSE) {
161 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
162 siop_cmd->siop_tables.data[i].count =
163 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
164 siop_cmd->siop_tables.data[i].addr =
165 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
166 }
167 }
168 siop_table_sync(siop_cmd, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
169 }
170
171 int
172 siop_wdtr_neg(siop_cmd)
173 struct siop_cmd *siop_cmd;
174 {
175 struct siop_softc *sc = siop_cmd->siop_sc;
176 struct siop_target *siop_target = siop_cmd->siop_target;
177 int target = siop_cmd->xs->sc_link->scsipi_scsi.target;
178 struct siop_xfer_common *tables = &siop_cmd->siop_xfer->tables;
179
180 if (siop_target->status == TARST_WIDE_NEG) {
181 /* we initiated wide negotiation */
182 switch (tables->msg_in[3]) {
183 case MSG_EXT_WDTR_BUS_8_BIT:
184 printf("%s: target %d using 8bit transfers\n",
185 sc->sc_dev.dv_xname, target);
186 siop_target->flags &= ~TARF_ISWIDE;
187 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
188 break;
189 case MSG_EXT_WDTR_BUS_16_BIT:
190 if (siop_target->flags & TARF_WIDE) {
191 printf("%s: target %d using 16bit transfers\n",
192 sc->sc_dev.dv_xname, target);
193 siop_target->flags |= TARF_ISWIDE;
194 sc->targets[target]->id |= (SCNTL3_EWS << 24);
195 break;
196 }
197 /* FALLTHROUH */
198 default:
199 /*
200 * hum, we got more than what we can handle, shoudn't
201 * happen. Reject, and stay async
202 */
203 siop_target->flags &= ~TARF_ISWIDE;
204 siop_target->status = TARST_OK;
205 printf("%s: rejecting invalid wide negotiation from "
206 "target %d (%d)\n", sc->sc_dev.dv_xname, target,
207 tables->msg_in[3]);
208 tables->t_msgout.count= htole32(1);
209 tables->msg_out[0] = MSG_MESSAGE_REJECT;
210 return SIOP_NEG_MSGOUT;
211 }
212 tables->id = htole32(sc->targets[target]->id);
213 bus_space_write_1(sc->sc_rt, sc->sc_rh,
214 SIOP_SCNTL3,
215 (sc->targets[target]->id >> 24) & 0xff);
216 /* we now need to do sync */
217 if (siop_target->flags & TARF_SYNC) {
218 siop_target->status = TARST_SYNC_NEG;
219 siop_sdtr_msg(siop_cmd, 0, sc->minsync, sc->maxoff);
220 return SIOP_NEG_MSGOUT;
221 } else {
222 siop_target->status = TARST_OK;
223 return SIOP_NEG_ACK;
224 }
225 } else {
226 /* target initiated wide negotiation */
227 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
228 && (siop_target->flags & TARF_WIDE)) {
229 printf("%s: target %d using 16bit transfers\n",
230 sc->sc_dev.dv_xname, target);
231 siop_target->flags |= TARF_ISWIDE;
232 sc->targets[target]->id |= SCNTL3_EWS << 24;
233 } else {
234 printf("%s: target %d using 8bit transfers\n",
235 sc->sc_dev.dv_xname, target);
236 siop_target->flags &= ~TARF_ISWIDE;
237 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
238 }
239 tables->id = htole32(sc->targets[target]->id);
240 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
241 (sc->targets[target]->id >> 24) & 0xff);
242 /*
243 * we did reset wide parameters, so fall back to async,
244 * but don't schedule a sync neg, target should initiate it
245 */
246 siop_target->status = TARST_OK;
247 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
248 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
249 return SIOP_NEG_MSGOUT;
250 }
251 }
252
253 int
254 siop_sdtr_neg(siop_cmd)
255 struct siop_cmd *siop_cmd;
256 {
257 struct siop_softc *sc = siop_cmd->siop_sc;
258 struct siop_target *siop_target = siop_cmd->siop_target;
259 int target = siop_cmd->xs->sc_link->scsipi_scsi.target;
260 int sync, offset, i;
261 int send_msgout = 0;
262 struct siop_xfer_common *tables = &siop_cmd->siop_xfer->tables;
263
264 sync = tables->msg_in[3];
265 offset = tables->msg_in[4];
266
267 if (siop_target->status == TARST_SYNC_NEG) {
268 /* we initiated sync negotiation */
269 siop_target->status = TARST_OK;
270 #ifdef DEBUG
271 printf("sdtr: sync %d offset %d\n", sync, offset);
272 #endif
273 if (offset > sc->maxoff || sync < sc->minsync ||
274 sync > sc->maxsync)
275 goto reject;
276 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
277 i++) {
278 if (sc->clock_period != scf_period[i].clock)
279 continue;
280 if (scf_period[i].period == sync) {
281 /* ok, found it. we now are sync. */
282 printf("%s: target %d now synchronous at "
283 "%sMhz, offset %d\n", sc->sc_dev.dv_xname,
284 target, scf_period[i].rate, offset);
285 sc->targets[target]->id &=
286 ~(SCNTL3_SCF_MASK << 24);
287 sc->targets[target]->id |= scf_period[i].scf
288 << (24 + SCNTL3_SCF_SHIFT);
289 if (sync < 25) /* Ultra */
290 sc->targets[target]->id |=
291 SCNTL3_ULTRA << 24;
292 else
293 sc->targets[target]->id &=
294 ~(SCNTL3_ULTRA << 24);
295 sc->targets[target]->id &=
296 ~(SXFER_MO_MASK << 8);
297 sc->targets[target]->id |=
298 (offset & SXFER_MO_MASK) << 8;
299 goto end;
300 }
301 }
302 /*
303 * we didn't find it in our table, do async and send reject
304 * msg
305 */
306 reject:
307 send_msgout = 1;
308 tables->t_msgout.count= htole32(1);
309 tables->msg_out[0] = MSG_MESSAGE_REJECT;
310 printf("%s: target %d asynchronous\n", sc->sc_dev.dv_xname,
311 target);
312 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
313 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
314 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
315 } else { /* target initiated sync neg */
316 #ifdef DEBUG
317 printf("sdtr (target): sync %d offset %d\n", sync, offset);
318 #endif
319 if (offset == 0 || sync > sc->maxsync) { /* async */
320 goto async;
321 }
322 if (offset > sc->maxoff)
323 offset = sc->maxoff;
324 if (sync < sc->minsync)
325 sync = sc->minsync;
326 /* look for sync period */
327 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
328 i++) {
329 if (sc->clock_period != scf_period[i].clock)
330 continue;
331 if (scf_period[i].period == sync) {
332 /* ok, found it. we now are sync. */
333 printf("%s: target %d now synchronous at "
334 "%sMhz, offset %d\n", sc->sc_dev.dv_xname,
335 target, scf_period[i].rate, offset);
336 sc->targets[target]->id &=
337 ~(SCNTL3_SCF_MASK << 24);
338 sc->targets[target]->id |= scf_period[i].scf
339 << (24 + SCNTL3_SCF_SHIFT);
340 if (sync < 25) /* Ultra */
341 sc->targets[target]->id |=
342 SCNTL3_ULTRA << 24;
343 else
344 sc->targets[target]->id &=
345 ~(SCNTL3_ULTRA << 24);
346 sc->targets[target]->id &=
347 ~(SXFER_MO_MASK << 8);
348 sc->targets[target]->id |=
349 (offset & SXFER_MO_MASK) << 8;
350 siop_sdtr_msg(siop_cmd, 0, sync, offset);
351 send_msgout = 1;
352 goto end;
353 }
354 }
355 async:
356 printf("%s: target %d asynchronous\n",
357 sc->sc_dev.dv_xname, target);
358 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
359 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
360 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
361 siop_sdtr_msg(siop_cmd, 0, 0, 0);
362 send_msgout = 1;
363 }
364 end:
365 #ifdef DEBUG
366 printf("id now 0x%x\n", sc->targets[target]->id);
367 #endif
368 tables->id = htole32(sc->targets[target]->id);
369 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
370 (sc->targets[target]->id >> 24) & 0xff);
371 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
372 (sc->targets[target]->id >> 8) & 0xff);
373 if (send_msgout) {
374 return SIOP_NEG_MSGOUT;
375 } else {
376 return SIOP_NEG_ACK;
377 }
378 }
379
380 void
381 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
382 struct siop_cmd *siop_cmd;
383 int offset;
384 int ssync, soff;
385 {
386 siop_cmd->siop_tables.msg_out[offset + 0] = MSG_EXTENDED;
387 siop_cmd->siop_tables.msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
388 siop_cmd->siop_tables.msg_out[offset + 2] = MSG_EXT_SDTR;
389 siop_cmd->siop_tables.msg_out[offset + 3] = ssync;
390 siop_cmd->siop_tables.msg_out[offset + 4] = soff;
391 siop_cmd->siop_tables.t_msgout.count =
392 htole32(offset + MSG_EXT_SDTR_LEN + 2);
393 }
394
395 void
396 siop_wdtr_msg(siop_cmd, offset, wide)
397 struct siop_cmd *siop_cmd;
398 int offset;
399 {
400 siop_cmd->siop_tables.msg_out[offset + 0] = MSG_EXTENDED;
401 siop_cmd->siop_tables.msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
402 siop_cmd->siop_tables.msg_out[offset + 2] = MSG_EXT_WDTR;
403 siop_cmd->siop_tables.msg_out[offset + 3] = wide;
404 siop_cmd->siop_tables.t_msgout.count =
405 htole32(offset + MSG_EXT_WDTR_LEN + 2);
406 }
407
408 void
409 siop_minphys(bp)
410 struct buf *bp;
411 {
412 minphys(bp);
413 }
414
415 int
416 siop_ioctl(link, cmd, arg, flag, p)
417 struct scsipi_link *link;
418 u_long cmd;
419 caddr_t arg;
420 int flag;
421 struct proc *p;
422 {
423 struct siop_softc *sc = link->adapter_softc;
424 u_int8_t scntl1;
425 int s;
426
427 switch (cmd) {
428 case SCBUSACCEL:
429 {
430 struct scbusaccel_args *sp = (struct scbusaccel_args *)arg;
431 s = splbio();
432 if (sp->sa_lun == 0) {
433 if (sp->sa_flags & SC_ACCEL_TAGS) {
434 sc->targets[sp->sa_target]->flags |= TARF_TAG;
435 printf("%s: target %d using tagged queuing\n",
436 sc->sc_dev.dv_xname, sp->sa_target);
437 }
438 if ((sp->sa_flags & SC_ACCEL_WIDE) &&
439 (sc->features & SF_BUS_WIDE))
440 sc->targets[sp->sa_target]->flags |= TARF_WIDE;
441 if (sp->sa_flags & SC_ACCEL_SYNC)
442 sc->targets[sp->sa_target]->flags |= TARF_SYNC;
443 if ((sp->sa_flags & (SC_ACCEL_SYNC | SC_ACCEL_WIDE)) ||
444 sc->targets[sp->sa_target]->status == TARST_PROBING)
445 sc->targets[sp->sa_target]->status =
446 TARST_ASYNC;
447 }
448
449 /* allocate a lun sw entry for this device */
450 siop_add_dev(sc, sp->sa_target, sp->sa_lun);
451 /*
452 * if we can to tagged queueing, inform upper layer
453 * we can have NIOP_NTAG concurent commands
454 */
455 if (sc->targets[sp->sa_target]->flags & TARF_TAG)
456 link->openings = SIOP_NTAG;
457 splx(s);
458 return 0;
459 }
460 case SCBUSIORESET:
461 s = splbio();
462 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
463 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
464 scntl1 | SCNTL1_RST);
465 /* minimum 25 us, more time won't hurt */
466 delay(100);
467 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
468 splx(s);
469 return (0);
470 default:
471 return (ENOTTY);
472 }
473 }
474
475 void
476 siop_sdp(siop_cmd)
477 struct siop_cmd *siop_cmd;
478 {
479 /* save data pointer. Handle async only for now */
480 int offset, dbc, sstat;
481 struct siop_softc *sc = siop_cmd->siop_sc;
482 scr_table_t *table; /* table to patch */
483
484 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
485 == 0)
486 return; /* no data pointers to save */
487 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
488 if (offset >= SIOP_NSG) {
489 printf("%s: bad offset in siop_sdp (%d)\n",
490 sc->sc_dev.dv_xname, offset);
491 return;
492 }
493 table = &siop_cmd->siop_xfer->tables.data[offset];
494 #ifdef DEBUG_DR
495 printf("sdp: offset %d count=%d addr=0x%x ", offset,
496 table->count, table->addr);
497 #endif
498 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
499 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
500 /* need to account stale data in FIFO */
501 int dfifo = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
502 if (sc->features & SF_CHIP_FIFO) {
503 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
504 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
505 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
506 } else {
507 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
508 }
509 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
510 if (sstat & SSTAT0_OLF)
511 dbc++;
512 if (sstat & SSTAT0_ORF)
513 dbc++;
514 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
515 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
516 SIOP_SSTAT2);
517 if (sstat & SSTAT2_OLF1)
518 dbc++;
519 if (sstat & SSTAT2_ORF1)
520 dbc++;
521 }
522 /* clear the FIFO */
523 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
524 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
525 CTEST3_CLF);
526 }
527 table->addr =
528 htole32(le32toh(table->addr) + le32toh(table->count) - dbc);
529 table->count = htole32(dbc);
530 #ifdef DEBUG_DR
531 printf("now count=%d addr=0x%x\n", table->count, table->addr);
532 #endif
533 }
534
535 void
536 siop_clearfifo(sc)
537 struct siop_softc *sc;
538 {
539 int timeout = 0;
540 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
541
542 #ifdef DEBUG_INTR
543 printf("DMA fifo not empty !\n");
544 #endif
545 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
546 ctest3 | CTEST3_CLF);
547 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
548 CTEST3_CLF) != 0) {
549 delay(1);
550 if (++timeout > 1000) {
551 printf("clear fifo failed\n");
552 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
553 bus_space_read_1(sc->sc_rt, sc->sc_rh,
554 SIOP_CTEST3) & ~CTEST3_CLF);
555 return;
556 }
557 }
558 }
559
560 int
561 siop_modechange(sc)
562 struct siop_softc *sc;
563 {
564 int retry;
565 int sist0, sist1, stest2, stest4;
566 for (retry = 0; retry < 5; retry++) {
567 /*
568 * datasheet says to wait 100ms and re-read SIST1,
569 * to check that DIFFSENSE is srable.
570 * We may delay() 5 times for 100ms at interrupt time;
571 * hopefully this will not happen often.
572 */
573 delay(100000);
574 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
575 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
576 if (sist1 & SIEN1_SBMC)
577 continue; /* we got an irq again */
578 stest4 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
579 STEST4_MODE_MASK;
580 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
581 switch(stest4) {
582 case STEST4_MODE_DIF:
583 printf("%s: switching to differential mode\n",
584 sc->sc_dev.dv_xname);
585 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
586 stest2 | STEST2_DIF);
587 break;
588 case STEST4_MODE_SE:
589 printf("%s: switching to single-ended mode\n",
590 sc->sc_dev.dv_xname);
591 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
592 stest2 & ~STEST2_DIF);
593 break;
594 case STEST4_MODE_LVD:
595 printf("%s: switching to LVD mode\n",
596 sc->sc_dev.dv_xname);
597 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
598 stest2 & ~STEST2_DIF);
599 break;
600 default:
601 printf("%s: invalid SCSI mode 0x%x\n",
602 sc->sc_dev.dv_xname, stest4);
603 return 0;
604 }
605 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST0,
606 stest4 >> 2);
607 return 1;
608 }
609 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
610 sc->sc_dev.dv_xname);
611 return 0;
612 }
613
614 void
615 siop_resetbus(sc)
616 struct siop_softc *sc;
617 {
618 int scntl1;
619 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
620 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
621 scntl1 | SCNTL1_RST);
622 /* minimum 25 us, more time won't hurt */
623 delay(100);
624 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
625 }
626