siop_common.c revision 1.3.2.4 1 /* $NetBSD: siop_common.c,v 1.3.2.4 2002/01/29 22:41:17 he Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/buf.h>
40 #include <sys/kernel.h>
41 #include <sys/scsiio.h>
42
43 #include <machine/endian.h>
44 #include <machine/bus.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_kern.h>
49
50 #include <dev/scsipi/scsi_all.h>
51 #include <dev/scsipi/scsi_message.h>
52 #include <dev/scsipi/scsipi_all.h>
53
54 #include <dev/scsipi/scsiconf.h>
55
56 #include <dev/ic/siopreg.h>
57 #include <dev/ic/siopvar.h>
58 #include <dev/ic/siopvar_common.h>
59
60 #undef DEBUG
61 #undef DEBUG_DR
62
63 void
64 siop_common_reset(sc)
65 struct siop_softc *sc;
66 {
67 u_int32_t stest3;
68
69 /* reset the chip */
70 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
71 delay(1000);
72 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
73
74 /* init registers */
75 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
76 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
77 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
78 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
79 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
80 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
81 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
82 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
83 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
84 0xff & ~(SIEN1_HTH | SIEN1_GEN));
85 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
86 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
87 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
88 (0xb << STIME0_SEL_SHIFT));
89 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
90 sc->sc_link.scsipi_scsi.adapter_target | SCID_RRE);
91 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
92 1 << sc->sc_link.scsipi_scsi.adapter_target);
93 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
94 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
95
96 /* enable clock doubler or quadruler if appropriate */
97 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
98 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
99 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
100 STEST1_DBLEN);
101 if (sc->features & SF_CHIP_QUAD) {
102 /* wait for PPL to lock */
103 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
104 SIOP_STEST4) & STEST4_LOCK) == 0)
105 delay(10);
106 } else {
107 /* data sheet says 20us - more won't hurt */
108 delay(100);
109 }
110 /* halt scsi clock, select doubler/quad, restart clock */
111 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
112 stest3 | STEST3_HSC);
113 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
114 STEST1_DBLEN | STEST1_DBLSEL);
115 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
116 } else {
117 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
118 }
119 if (sc->features & SF_CHIP_FIFO)
120 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
121 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
122 CTEST5_DFS);
123
124 sc->sc_reset(sc);
125 }
126
127 /* prepare tables before sending a cmd */
128 void
129 siop_setuptables(siop_cmd)
130 struct siop_cmd *siop_cmd;
131 {
132 int i;
133 struct siop_softc *sc = siop_cmd->siop_sc;
134 struct scsipi_xfer *xs = siop_cmd->xs;
135 int target = xs->sc_link->scsipi_scsi.target;
136 int lun = xs->sc_link->scsipi_scsi.lun;
137
138 siop_cmd->siop_tables.id = htole32(sc->targets[target]->id);
139 memset(siop_cmd->siop_tables.msg_out, 0, 8);
140 if (siop_cmd->status != CMDST_SENSE)
141 siop_cmd->siop_tables.msg_out[0] = MSG_IDENTIFY(lun, 1);
142 else
143 siop_cmd->siop_tables.msg_out[0] = MSG_IDENTIFY(lun, 0);
144 siop_cmd->siop_tables.t_msgout.count= htole32(1);
145 if (sc->targets[target]->status == TARST_ASYNC) {
146 if (sc->targets[target]->flags & TARF_WIDE) {
147 sc->targets[target]->status = TARST_WIDE_NEG;
148 siop_wdtr_msg(siop_cmd, 1, MSG_EXT_WDTR_BUS_16_BIT);
149 } else if (sc->targets[target]->flags & TARF_SYNC) {
150 sc->targets[target]->status = TARST_SYNC_NEG;
151 siop_sdtr_msg(siop_cmd, 1, sc->minsync, sc->maxoff);
152 } else {
153 sc->targets[target]->status = TARST_OK;
154 }
155 } else if (sc->targets[target]->status == TARST_OK &&
156 (sc->targets[target]->flags & TARF_TAG) &&
157 siop_cmd->status != CMDST_SENSE) {
158 siop_cmd->flags |= CMDFL_TAG;
159 }
160 siop_cmd->siop_tables.status =
161 htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */
162
163 siop_cmd->siop_tables.cmd.count =
164 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
165 siop_cmd->siop_tables.cmd.addr =
166 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
167 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ||
168 siop_cmd->status == CMDST_SENSE) {
169 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
170 siop_cmd->siop_tables.data[i].count =
171 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
172 siop_cmd->siop_tables.data[i].addr =
173 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
174 }
175 }
176 siop_table_sync(siop_cmd, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
177 }
178
179 int
180 siop_wdtr_neg(siop_cmd)
181 struct siop_cmd *siop_cmd;
182 {
183 struct siop_softc *sc = siop_cmd->siop_sc;
184 struct siop_target *siop_target = siop_cmd->siop_target;
185 int target = siop_cmd->xs->sc_link->scsipi_scsi.target;
186 struct siop_xfer_common *tables = &siop_cmd->siop_xfer->tables;
187
188 if (siop_target->status == TARST_WIDE_NEG) {
189 /* we initiated wide negotiation */
190 switch (tables->msg_in[3]) {
191 case MSG_EXT_WDTR_BUS_8_BIT:
192 printf("%s: target %d using 8bit transfers\n",
193 sc->sc_dev.dv_xname, target);
194 siop_target->flags &= ~TARF_ISWIDE;
195 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
196 break;
197 case MSG_EXT_WDTR_BUS_16_BIT:
198 if (siop_target->flags & TARF_WIDE) {
199 printf("%s: target %d using 16bit transfers\n",
200 sc->sc_dev.dv_xname, target);
201 siop_target->flags |= TARF_ISWIDE;
202 sc->targets[target]->id |= (SCNTL3_EWS << 24);
203 break;
204 }
205 /* FALLTHROUH */
206 default:
207 /*
208 * hum, we got more than what we can handle, shoudn't
209 * happen. Reject, and stay async
210 */
211 siop_target->flags &= ~TARF_ISWIDE;
212 siop_target->status = TARST_OK;
213 printf("%s: rejecting invalid wide negotiation from "
214 "target %d (%d)\n", sc->sc_dev.dv_xname, target,
215 tables->msg_in[3]);
216 tables->t_msgout.count= htole32(1);
217 tables->msg_out[0] = MSG_MESSAGE_REJECT;
218 return SIOP_NEG_MSGOUT;
219 }
220 tables->id = htole32(sc->targets[target]->id);
221 bus_space_write_1(sc->sc_rt, sc->sc_rh,
222 SIOP_SCNTL3,
223 (sc->targets[target]->id >> 24) & 0xff);
224 /* we now need to do sync */
225 if (siop_target->flags & TARF_SYNC) {
226 siop_target->status = TARST_SYNC_NEG;
227 siop_sdtr_msg(siop_cmd, 0, sc->minsync, sc->maxoff);
228 return SIOP_NEG_MSGOUT;
229 } else {
230 siop_target->status = TARST_OK;
231 return SIOP_NEG_ACK;
232 }
233 } else {
234 /* target initiated wide negotiation */
235 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
236 && (siop_target->flags & TARF_WIDE)) {
237 printf("%s: target %d using 16bit transfers\n",
238 sc->sc_dev.dv_xname, target);
239 siop_target->flags |= TARF_ISWIDE;
240 sc->targets[target]->id |= SCNTL3_EWS << 24;
241 } else {
242 printf("%s: target %d using 8bit transfers\n",
243 sc->sc_dev.dv_xname, target);
244 siop_target->flags &= ~TARF_ISWIDE;
245 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
246 }
247 tables->id = htole32(sc->targets[target]->id);
248 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
249 (sc->targets[target]->id >> 24) & 0xff);
250 /*
251 * we did reset wide parameters, so fall back to async,
252 * but don't schedule a sync neg, target should initiate it
253 */
254 siop_target->status = TARST_OK;
255 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
256 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
257 return SIOP_NEG_MSGOUT;
258 }
259 }
260
261 int
262 siop_sdtr_neg(siop_cmd)
263 struct siop_cmd *siop_cmd;
264 {
265 struct siop_softc *sc = siop_cmd->siop_sc;
266 struct siop_target *siop_target = siop_cmd->siop_target;
267 int target = siop_cmd->xs->sc_link->scsipi_scsi.target;
268 int sync, offset, i;
269 int send_msgout = 0;
270 struct siop_xfer_common *tables = &siop_cmd->siop_xfer->tables;
271
272 sync = tables->msg_in[3];
273 offset = tables->msg_in[4];
274
275 if (siop_target->status == TARST_SYNC_NEG) {
276 /* we initiated sync negotiation */
277 siop_target->status = TARST_OK;
278 #ifdef DEBUG
279 printf("sdtr: sync %d offset %d\n", sync, offset);
280 #endif
281 if (offset > sc->maxoff || sync < sc->minsync ||
282 sync > sc->maxsync)
283 goto reject;
284 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
285 i++) {
286 if (sc->clock_period != scf_period[i].clock)
287 continue;
288 if (scf_period[i].period == sync) {
289 /* ok, found it. we now are sync. */
290 printf("%s: target %d now synchronous at "
291 "%sMhz, offset %d\n", sc->sc_dev.dv_xname,
292 target, scf_period[i].rate, offset);
293 sc->targets[target]->id &=
294 ~(SCNTL3_SCF_MASK << 24);
295 sc->targets[target]->id |= scf_period[i].scf
296 << (24 + SCNTL3_SCF_SHIFT);
297 if (sync < 25) /* Ultra */
298 sc->targets[target]->id |=
299 SCNTL3_ULTRA << 24;
300 else
301 sc->targets[target]->id &=
302 ~(SCNTL3_ULTRA << 24);
303 sc->targets[target]->id &=
304 ~(SXFER_MO_MASK << 8);
305 sc->targets[target]->id |=
306 (offset & SXFER_MO_MASK) << 8;
307 goto end;
308 }
309 }
310 /*
311 * we didn't find it in our table, do async and send reject
312 * msg
313 */
314 reject:
315 send_msgout = 1;
316 tables->t_msgout.count= htole32(1);
317 tables->msg_out[0] = MSG_MESSAGE_REJECT;
318 printf("%s: target %d asynchronous\n", sc->sc_dev.dv_xname,
319 target);
320 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
321 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
322 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
323 } else { /* target initiated sync neg */
324 #ifdef DEBUG
325 printf("sdtr (target): sync %d offset %d\n", sync, offset);
326 #endif
327 if (offset == 0 || sync > sc->maxsync) { /* async */
328 goto async;
329 }
330 if (offset > sc->maxoff)
331 offset = sc->maxoff;
332 if (sync < sc->minsync)
333 sync = sc->minsync;
334 /* look for sync period */
335 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
336 i++) {
337 if (sc->clock_period != scf_period[i].clock)
338 continue;
339 if (scf_period[i].period == sync) {
340 /* ok, found it. we now are sync. */
341 printf("%s: target %d now synchronous at "
342 "%sMhz, offset %d\n", sc->sc_dev.dv_xname,
343 target, scf_period[i].rate, offset);
344 sc->targets[target]->id &=
345 ~(SCNTL3_SCF_MASK << 24);
346 sc->targets[target]->id |= scf_period[i].scf
347 << (24 + SCNTL3_SCF_SHIFT);
348 if (sync < 25) /* Ultra */
349 sc->targets[target]->id |=
350 SCNTL3_ULTRA << 24;
351 else
352 sc->targets[target]->id &=
353 ~(SCNTL3_ULTRA << 24);
354 sc->targets[target]->id &=
355 ~(SXFER_MO_MASK << 8);
356 sc->targets[target]->id |=
357 (offset & SXFER_MO_MASK) << 8;
358 siop_sdtr_msg(siop_cmd, 0, sync, offset);
359 send_msgout = 1;
360 goto end;
361 }
362 }
363 async:
364 printf("%s: target %d asynchronous\n",
365 sc->sc_dev.dv_xname, target);
366 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
367 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
368 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
369 siop_sdtr_msg(siop_cmd, 0, 0, 0);
370 send_msgout = 1;
371 }
372 end:
373 #ifdef DEBUG
374 printf("id now 0x%x\n", sc->targets[target]->id);
375 #endif
376 tables->id = htole32(sc->targets[target]->id);
377 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
378 (sc->targets[target]->id >> 24) & 0xff);
379 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
380 (sc->targets[target]->id >> 8) & 0xff);
381 if (send_msgout) {
382 return SIOP_NEG_MSGOUT;
383 } else {
384 return SIOP_NEG_ACK;
385 }
386 }
387
388 void
389 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
390 struct siop_cmd *siop_cmd;
391 int offset;
392 int ssync, soff;
393 {
394 siop_cmd->siop_tables.msg_out[offset + 0] = MSG_EXTENDED;
395 siop_cmd->siop_tables.msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
396 siop_cmd->siop_tables.msg_out[offset + 2] = MSG_EXT_SDTR;
397 siop_cmd->siop_tables.msg_out[offset + 3] = ssync;
398 siop_cmd->siop_tables.msg_out[offset + 4] = soff;
399 siop_cmd->siop_tables.t_msgout.count =
400 htole32(offset + MSG_EXT_SDTR_LEN + 2);
401 }
402
403 void
404 siop_wdtr_msg(siop_cmd, offset, wide)
405 struct siop_cmd *siop_cmd;
406 int offset;
407 {
408 siop_cmd->siop_tables.msg_out[offset + 0] = MSG_EXTENDED;
409 siop_cmd->siop_tables.msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
410 siop_cmd->siop_tables.msg_out[offset + 2] = MSG_EXT_WDTR;
411 siop_cmd->siop_tables.msg_out[offset + 3] = wide;
412 siop_cmd->siop_tables.t_msgout.count =
413 htole32(offset + MSG_EXT_WDTR_LEN + 2);
414 }
415
416 void
417 siop_minphys(bp)
418 struct buf *bp;
419 {
420 minphys(bp);
421 }
422
423 int
424 siop_ioctl(link, cmd, arg, flag, p)
425 struct scsipi_link *link;
426 u_long cmd;
427 caddr_t arg;
428 int flag;
429 struct proc *p;
430 {
431 struct siop_softc *sc = link->adapter_softc;
432 u_int8_t scntl1;
433 int s;
434
435 switch (cmd) {
436 case SCBUSACCEL:
437 {
438 struct scbusaccel_args *sp = (struct scbusaccel_args *)arg;
439 s = splbio();
440 if (sp->sa_lun == 0) {
441 if (sp->sa_flags & SC_ACCEL_TAGS) {
442 sc->targets[sp->sa_target]->flags |= TARF_TAG;
443 printf("%s: target %d using tagged queuing\n",
444 sc->sc_dev.dv_xname, sp->sa_target);
445 }
446 if ((sp->sa_flags & SC_ACCEL_WIDE) &&
447 (sc->features & SF_BUS_WIDE))
448 sc->targets[sp->sa_target]->flags |= TARF_WIDE;
449 if (sp->sa_flags & SC_ACCEL_SYNC)
450 sc->targets[sp->sa_target]->flags |= TARF_SYNC;
451 if ((sp->sa_flags & (SC_ACCEL_SYNC | SC_ACCEL_WIDE)) ||
452 sc->targets[sp->sa_target]->status == TARST_PROBING)
453 sc->targets[sp->sa_target]->status =
454 TARST_ASYNC;
455 }
456
457 /* allocate a lun sw entry for this device */
458 siop_add_dev(sc, sp->sa_target, sp->sa_lun);
459 /*
460 * if we can to tagged queueing, inform upper layer
461 * we can have NIOP_NTAG concurent commands
462 */
463 if (sc->targets[sp->sa_target]->flags & TARF_TAG)
464 link->openings = SIOP_NTAG;
465 splx(s);
466 return 0;
467 }
468 case SCBUSIORESET:
469 s = splbio();
470 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
471 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
472 scntl1 | SCNTL1_RST);
473 /* minimum 25 us, more time won't hurt */
474 delay(100);
475 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
476 splx(s);
477 return (0);
478 default:
479 return (ENOTTY);
480 }
481 }
482
483 void
484 siop_sdp(siop_cmd)
485 struct siop_cmd *siop_cmd;
486 {
487 /* save data pointer. Handle async only for now */
488 int offset, dbc, sstat;
489 struct siop_softc *sc = siop_cmd->siop_sc;
490 scr_table_t *table; /* table to patch */
491
492 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
493 == 0)
494 return; /* no data pointers to save */
495 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
496 if (offset >= SIOP_NSG) {
497 printf("%s: bad offset in siop_sdp (%d)\n",
498 sc->sc_dev.dv_xname, offset);
499 return;
500 }
501 table = &siop_cmd->siop_xfer->tables.data[offset];
502 #ifdef DEBUG_DR
503 printf("sdp: offset %d count=%d addr=0x%x ", offset,
504 table->count, table->addr);
505 #endif
506 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
507 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
508 if (sc->features & SF_CHIP_DFBC) {
509 dbc +=
510 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
511 } else {
512 /* need to account stale data in FIFO */
513 int dfifo =
514 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
515 if (sc->features & SF_CHIP_FIFO) {
516 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
517 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
518 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
519 } else {
520 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
521 }
522 }
523 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
524 if (sstat & SSTAT0_OLF)
525 dbc++;
526 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
527 dbc++;
528 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
529 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
530 SIOP_SSTAT2);
531 if (sstat & SSTAT2_OLF1)
532 dbc++;
533 if ((sstat & SSTAT2_ORF1) &&
534 (sc->features & SF_CHIP_DFBC) == 0)
535 dbc++;
536 }
537 /* clear the FIFO */
538 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
539 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
540 CTEST3_CLF);
541 }
542 table->addr =
543 htole32(le32toh(table->addr) + le32toh(table->count) - dbc);
544 table->count = htole32(dbc);
545 #ifdef DEBUG_DR
546 printf("now count=%d addr=0x%x\n", table->count, table->addr);
547 #endif
548 }
549
550 void
551 siop_clearfifo(sc)
552 struct siop_softc *sc;
553 {
554 int timeout = 0;
555 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
556
557 #ifdef DEBUG_INTR
558 printf("DMA fifo not empty !\n");
559 #endif
560 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
561 ctest3 | CTEST3_CLF);
562 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
563 CTEST3_CLF) != 0) {
564 delay(1);
565 if (++timeout > 1000) {
566 printf("clear fifo failed\n");
567 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
568 bus_space_read_1(sc->sc_rt, sc->sc_rh,
569 SIOP_CTEST3) & ~CTEST3_CLF);
570 return;
571 }
572 }
573 }
574
575 int
576 siop_modechange(sc)
577 struct siop_softc *sc;
578 {
579 int retry;
580 int sist0, sist1, stest2, stest4;
581 for (retry = 0; retry < 5; retry++) {
582 /*
583 * datasheet says to wait 100ms and re-read SIST1,
584 * to check that DIFFSENSE is srable.
585 * We may delay() 5 times for 100ms at interrupt time;
586 * hopefully this will not happen often.
587 */
588 delay(100000);
589 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
590 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
591 if (sist1 & SIEN1_SBMC)
592 continue; /* we got an irq again */
593 stest4 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
594 STEST4_MODE_MASK;
595 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
596 switch(stest4) {
597 case STEST4_MODE_DIF:
598 printf("%s: switching to differential mode\n",
599 sc->sc_dev.dv_xname);
600 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
601 stest2 | STEST2_DIF);
602 break;
603 case STEST4_MODE_SE:
604 printf("%s: switching to single-ended mode\n",
605 sc->sc_dev.dv_xname);
606 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
607 stest2 & ~STEST2_DIF);
608 break;
609 case STEST4_MODE_LVD:
610 printf("%s: switching to LVD mode\n",
611 sc->sc_dev.dv_xname);
612 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
613 stest2 & ~STEST2_DIF);
614 break;
615 default:
616 printf("%s: invalid SCSI mode 0x%x\n",
617 sc->sc_dev.dv_xname, stest4);
618 return 0;
619 }
620 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST0,
621 stest4 >> 2);
622 return 1;
623 }
624 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
625 sc->sc_dev.dv_xname);
626 return 0;
627 }
628
629 void
630 siop_resetbus(sc)
631 struct siop_softc *sc;
632 {
633 int scntl1;
634 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
635 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
636 scntl1 | SCNTL1_RST);
637 /* minimum 25 us, more time won't hurt */
638 delay(100);
639 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
640 }
641