siop_common.c revision 1.23 1 /* $NetBSD: siop_common.c,v 1.23 2002/04/23 20:41:15 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.23 2002/04/23 20:41:15 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 #include <sys/scsiio.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/endian.h>
49 #include <machine/bus.h>
50
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_message.h>
53 #include <dev/scsipi/scsipi_all.h>
54
55 #include <dev/scsipi/scsiconf.h>
56
57 #include <dev/ic/siopreg.h>
58 #include <dev/ic/siopvar_common.h>
59
60 #include "opt_siop.h"
61
62 #undef DEBUG
63 #undef DEBUG_DR
64 #undef DEBUG_NEG
65
66 int
67 siop_common_attach(sc)
68 struct siop_common_softc *sc;
69 {
70 int error, i;
71 bus_dma_segment_t seg;
72 int rseg;
73
74 /*
75 * Allocate DMA-safe memory for the script and map it.
76 */
77 if ((sc->features & SF_CHIP_RAM) == 0) {
78 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
79 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
80 if (error) {
81 printf("%s: unable to allocate script DMA memory, "
82 "error = %d\n", sc->sc_dev.dv_xname, error);
83 return error;
84 }
85 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
86 (caddr_t *)&sc->sc_script,
87 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
88 if (error) {
89 printf("%s: unable to map script DMA memory, "
90 "error = %d\n", sc->sc_dev.dv_xname, error);
91 return error;
92 }
93 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
94 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
95 if (error) {
96 printf("%s: unable to create script DMA map, "
97 "error = %d\n", sc->sc_dev.dv_xname, error);
98 return error;
99 }
100 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
101 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
102 if (error) {
103 printf("%s: unable to load script DMA map, "
104 "error = %d\n", sc->sc_dev.dv_xname, error);
105 return error;
106 }
107 sc->sc_scriptaddr =
108 sc->sc_scriptdma->dm_segs[0].ds_addr;
109 sc->ram_size = PAGE_SIZE;
110 }
111
112 sc->sc_adapt.adapt_dev = &sc->sc_dev;
113 sc->sc_adapt.adapt_nchannels = 1;
114 sc->sc_adapt.adapt_openings = 0;
115 sc->sc_adapt.adapt_ioctl = siop_ioctl;
116 sc->sc_adapt.adapt_minphys = minphys;
117
118 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
119 sc->sc_chan.chan_adapter = &sc->sc_adapt;
120 sc->sc_chan.chan_bustype = &scsi_bustype;
121 sc->sc_chan.chan_channel = 0;
122 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
123 sc->sc_chan.chan_ntargets =
124 (sc->features & SF_BUS_WIDE) ? 16 : 8;
125 sc->sc_chan.chan_nluns = 8;
126 sc->sc_chan.chan_id =
127 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
128 if (sc->sc_chan.chan_id == 0 ||
129 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
130 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
131
132 for (i = 0; i < 16; i++)
133 sc->targets[i] = NULL;
134
135 /* find min/max sync period for this chip */
136 sc->st_maxsync = 0;
137 sc->dt_maxsync = 0;
138 sc->st_minsync = 255;
139 sc->dt_minsync = 255;
140 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
141 if (sc->clock_period != scf_period[i].clock)
142 continue;
143 if (sc->st_maxsync < scf_period[i].period)
144 sc->st_maxsync = scf_period[i].period;
145 if (sc->st_minsync > scf_period[i].period)
146 sc->st_minsync = scf_period[i].period;
147 }
148 if (sc->st_maxsync == 255 || sc->st_minsync == 0)
149 panic("siop: can't find my sync parameters\n");
150 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
151 if (sc->clock_period != dt_scf_period[i].clock)
152 continue;
153 if (sc->dt_maxsync < dt_scf_period[i].period)
154 sc->dt_maxsync = dt_scf_period[i].period;
155 if (sc->dt_minsync > dt_scf_period[i].period)
156 sc->dt_minsync = dt_scf_period[i].period;
157 }
158 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
159 panic("siop: can't find my sync parameters\n");
160 return 0;
161 }
162
163 void
164 siop_common_reset(sc)
165 struct siop_common_softc *sc;
166 {
167 u_int32_t stest3;
168
169 /* reset the chip */
170 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
171 delay(1000);
172 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
173
174 /* init registers */
175 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
176 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
177 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
182 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
183 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
184 0xff & ~(SIEN1_HTH | SIEN1_GEN));
185 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
187 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
188 (0xb << STIME0_SEL_SHIFT));
189 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
190 sc->sc_chan.chan_id | SCID_RRE);
191 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
192 1 << sc->sc_chan.chan_id);
193 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
194 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
195
196 /* enable clock doubler or quadruler if appropriate */
197 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
198 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
199 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
200 STEST1_DBLEN);
201 if (sc->features & SF_CHIP_QUAD) {
202 /* wait for PPL to lock */
203 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
204 SIOP_STEST4) & STEST4_LOCK) == 0)
205 delay(10);
206 } else {
207 /* data sheet says 20us - more won't hurt */
208 delay(100);
209 }
210 /* halt scsi clock, select doubler/quad, restart clock */
211 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
212 stest3 | STEST3_HSC);
213 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
214 STEST1_DBLEN | STEST1_DBLSEL);
215 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
216 } else {
217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
218 }
219 if (sc->features & SF_CHIP_FIFO)
220 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
221 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
222 CTEST5_DFS);
223 if (sc->features & SF_CHIP_LED0) {
224 /* Set GPIO0 as output if software LED control is required */
225 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
226 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
227 }
228 if (sc->features & SF_BUS_ULTRA3) {
229 /* reset SCNTL4 */
230 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
231 }
232
233 sc->sc_reset(sc);
234 }
235
236 /* prepare tables before sending a cmd */
237 void
238 siop_setuptables(siop_cmd)
239 struct siop_common_cmd *siop_cmd;
240 {
241 int i;
242 struct siop_common_softc *sc = siop_cmd->siop_sc;
243 struct scsipi_xfer *xs = siop_cmd->xs;
244 int target = xs->xs_periph->periph_target;
245 int lun = xs->xs_periph->periph_lun;
246 int msgoffset = 1;
247
248 siop_cmd->siop_tables->id = htole32(sc->targets[target]->id);
249 memset(siop_cmd->siop_tables->msg_out, 0,
250 sizeof(siop_cmd->siop_tables->msg_out));
251 /* request sense doesn't disconnect */
252 if (xs->xs_control & XS_CTL_REQSENSE)
253 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
254 else
255 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
256 siop_cmd->siop_tables->t_msgout.count= htole32(1);
257 if (xs->xs_tag_type != 0) {
258 if ((sc->targets[target]->flags & TARF_TAG) == 0) {
259 scsipi_printaddr(xs->xs_periph);
260 printf(": tagged command type %d id %d\n",
261 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
262 panic("tagged command for non-tagging device\n");
263 }
264 siop_cmd->flags |= CMDFL_TAG;
265 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
266 /*
267 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
268 * different one
269 */
270 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
271 siop_cmd->siop_tables->t_msgout.count = htole32(3);
272 msgoffset = 3;
273 }
274 if (sc->targets[target]->status == TARST_ASYNC) {
275 if (sc->targets[target]->flags & TARF_DT) {
276 sc->targets[target]->status = TARST_PPR_NEG;
277 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
278 sc->maxoff);
279 } else if (sc->targets[target]->flags & TARF_WIDE) {
280 sc->targets[target]->status = TARST_WIDE_NEG;
281 siop_wdtr_msg(siop_cmd, msgoffset,
282 MSG_EXT_WDTR_BUS_16_BIT);
283 } else if (sc->targets[target]->flags & TARF_SYNC) {
284 sc->targets[target]->status = TARST_SYNC_NEG;
285 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
286 (sc->maxoff > 31) ? 31 : sc->maxoff);
287 } else {
288 sc->targets[target]->status = TARST_OK;
289 siop_update_xfer_mode(sc, target);
290 }
291 }
292 siop_cmd->siop_tables->status =
293 htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */
294
295 siop_cmd->siop_tables->cmd.count =
296 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
297 siop_cmd->siop_tables->cmd.addr =
298 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
299 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
300 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
301 siop_cmd->siop_tables->data[i].count =
302 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
303 siop_cmd->siop_tables->data[i].addr =
304 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
305 }
306 }
307 }
308
309 int
310 siop_wdtr_neg(siop_cmd)
311 struct siop_common_cmd *siop_cmd;
312 {
313 struct siop_common_softc *sc = siop_cmd->siop_sc;
314 struct siop_common_target *siop_target = siop_cmd->siop_target;
315 int target = siop_cmd->xs->xs_periph->periph_target;
316 struct siop_common_xfer *tables = siop_cmd->siop_tables;
317
318 if (siop_target->status == TARST_WIDE_NEG) {
319 /* we initiated wide negotiation */
320 switch (tables->msg_in[3]) {
321 case MSG_EXT_WDTR_BUS_8_BIT:
322 siop_target->flags &= ~TARF_ISWIDE;
323 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
324 break;
325 case MSG_EXT_WDTR_BUS_16_BIT:
326 if (siop_target->flags & TARF_WIDE) {
327 siop_target->flags |= TARF_ISWIDE;
328 sc->targets[target]->id |= (SCNTL3_EWS << 24);
329 break;
330 }
331 /* FALLTHROUH */
332 default:
333 /*
334 * hum, we got more than what we can handle, shoudn't
335 * happen. Reject, and stay async
336 */
337 siop_target->flags &= ~TARF_ISWIDE;
338 siop_target->status = TARST_OK;
339 siop_target->offset = siop_target->period = 0;
340 siop_update_xfer_mode(sc, target);
341 printf("%s: rejecting invalid wide negotiation from "
342 "target %d (%d)\n", sc->sc_dev.dv_xname, target,
343 tables->msg_in[3]);
344 tables->t_msgout.count= htole32(1);
345 tables->msg_out[0] = MSG_MESSAGE_REJECT;
346 return SIOP_NEG_MSGOUT;
347 }
348 tables->id = htole32(sc->targets[target]->id);
349 bus_space_write_1(sc->sc_rt, sc->sc_rh,
350 SIOP_SCNTL3,
351 (sc->targets[target]->id >> 24) & 0xff);
352 /* we now need to do sync */
353 if (siop_target->flags & TARF_SYNC) {
354 siop_target->status = TARST_SYNC_NEG;
355 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
356 (sc->maxoff > 31) ? 31 : sc->maxoff);
357 return SIOP_NEG_MSGOUT;
358 } else {
359 siop_target->status = TARST_OK;
360 siop_update_xfer_mode(sc, target);
361 return SIOP_NEG_ACK;
362 }
363 } else {
364 /* target initiated wide negotiation */
365 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
366 && (siop_target->flags & TARF_WIDE)) {
367 siop_target->flags |= TARF_ISWIDE;
368 sc->targets[target]->id |= SCNTL3_EWS << 24;
369 } else {
370 siop_target->flags &= ~TARF_ISWIDE;
371 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
372 }
373 tables->id = htole32(sc->targets[target]->id);
374 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
375 (sc->targets[target]->id >> 24) & 0xff);
376 /*
377 * we did reset wide parameters, so fall back to async,
378 * but don't schedule a sync neg, target should initiate it
379 */
380 siop_target->status = TARST_OK;
381 siop_target->offset = siop_target->period = 0;
382 siop_update_xfer_mode(sc, target);
383 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
384 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
385 return SIOP_NEG_MSGOUT;
386 }
387 }
388
389 int
390 siop_ppr_neg(siop_cmd)
391 struct siop_common_cmd *siop_cmd;
392 {
393 struct siop_common_softc *sc = siop_cmd->siop_sc;
394 struct siop_common_target *siop_target = siop_cmd->siop_target;
395 int target = siop_cmd->xs->xs_periph->periph_target;
396 struct siop_common_xfer *tables = siop_cmd->siop_tables;
397 int sync, offset, options, scf = 0;
398 int i;
399
400 #ifdef DEBUG_NEG
401 printf("%s: anserw on ppr negotiation:", sc->sc_dev.dv_xname);
402 for (i = 0; i < 8; i++)
403 printf(" 0x%x", tables->msg_in[i]);
404 printf("\n");
405 #endif
406
407 if (siop_target->status == TARST_PPR_NEG) {
408 /* we initiated PPR negotiation */
409 sync = tables->msg_in[3];
410 offset = tables->msg_in[5];
411 options = tables->msg_in[7];
412 if (options != MSG_EXT_PPR_DT) {
413 /* should't happen */
414 printf("%s: ppr negotiation for target %d: "
415 "no DT option\n", sc->sc_dev.dv_xname, target);
416 siop_target->status = TARST_ASYNC;
417 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
418 siop_target->offset = 0;
419 siop_target->period = 0;
420 goto reject;
421 }
422
423 if (offset > sc->maxoff || sync < sc->dt_minsync ||
424 sync > sc->dt_maxsync) {
425 printf("%s: ppr negotiation for target %d: "
426 "offset (%d) or sync (%d) out of range\n",
427 sc->sc_dev.dv_xname, target, offset, sync);
428 /* should not happen */
429 siop_target->offset = 0;
430 siop_target->period = 0;
431 goto reject;
432 } else {
433 for (i = 0; i <
434 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
435 i++) {
436 if (sc->clock_period != dt_scf_period[i].clock)
437 continue;
438 if (dt_scf_period[i].period == sync) {
439 /* ok, found it. we now are sync. */
440 siop_target->offset = offset;
441 siop_target->period = sync;
442 scf = dt_scf_period[i].scf;
443 siop_target->flags |= TARF_ISDT;
444 }
445 }
446 if ((siop_target->flags & TARF_ISDT) == 0) {
447 printf("%s: ppr negotiation for target %d: "
448 "sync (%d) incompatible with adapter\n",
449 sc->sc_dev.dv_xname, target, sync);
450 /*
451 * we didn't find it in our table, do async
452 * send reject msg, start SDTR/WDTR neg
453 */
454 siop_target->status = TARST_ASYNC;
455 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
456 siop_target->offset = 0;
457 siop_target->period = 0;
458 goto reject;
459 }
460 }
461 if (tables->msg_in[6] != 1) {
462 printf("%s: ppr negotiation for target %d: "
463 "transfer width (%d) incompatible with dt\n",
464 sc->sc_dev.dv_xname, target, tables->msg_in[6]);
465 /* DT mode can only be done with wide transfers */
466 siop_target->status = TARST_ASYNC;
467 goto reject;
468 }
469 siop_target->flags |= TARF_ISWIDE;
470 sc->targets[target]->id |= (SCNTL3_EWS << 24);
471 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
472 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
473 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
474 sc->targets[target]->id |=
475 (siop_target->offset & SXFER_MO_MASK) << 8;
476 sc->targets[target]->id &= ~0xff;
477 sc->targets[target]->id |= SCNTL4_U3EN;
478 siop_target->status = TARST_OK;
479 siop_update_xfer_mode(sc, target);
480 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
481 (sc->targets[target]->id >> 24) & 0xff);
482 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
483 (sc->targets[target]->id >> 8) & 0xff);
484 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
485 sc->targets[target]->id & 0xff);
486 return SIOP_NEG_ACK;
487 } else {
488 /* target initiated PPR negotiation, shouldn't happen */
489 printf("%s: rejecting invalid PPR negotiation from "
490 "target %d\n", sc->sc_dev.dv_xname, target);
491 reject:
492 tables->t_msgout.count= htole32(1);
493 tables->msg_out[0] = MSG_MESSAGE_REJECT;
494 return SIOP_NEG_MSGOUT;
495 }
496 }
497
498 int
499 siop_sdtr_neg(siop_cmd)
500 struct siop_common_cmd *siop_cmd;
501 {
502 struct siop_common_softc *sc = siop_cmd->siop_sc;
503 struct siop_common_target *siop_target = siop_cmd->siop_target;
504 int target = siop_cmd->xs->xs_periph->periph_target;
505 int sync, maxoffset, offset, i;
506 int send_msgout = 0;
507 struct siop_common_xfer *tables = siop_cmd->siop_tables;
508
509 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */
510 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
511
512 sync = tables->msg_in[3];
513 offset = tables->msg_in[4];
514
515 if (siop_target->status == TARST_SYNC_NEG) {
516 /* we initiated sync negotiation */
517 siop_target->status = TARST_OK;
518 #ifdef DEBUG
519 printf("sdtr: sync %d offset %d\n", sync, offset);
520 #endif
521 if (offset > maxoffset || sync < sc->st_minsync ||
522 sync > sc->st_maxsync)
523 goto reject;
524 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
525 i++) {
526 if (sc->clock_period != scf_period[i].clock)
527 continue;
528 if (scf_period[i].period == sync) {
529 /* ok, found it. we now are sync. */
530 siop_target->offset = offset;
531 siop_target->period = sync;
532 sc->targets[target]->id &=
533 ~(SCNTL3_SCF_MASK << 24);
534 sc->targets[target]->id |= scf_period[i].scf
535 << (24 + SCNTL3_SCF_SHIFT);
536 if (sync < 25 && /* Ultra */
537 (sc->features & SF_BUS_ULTRA3) == 0)
538 sc->targets[target]->id |=
539 SCNTL3_ULTRA << 24;
540 else
541 sc->targets[target]->id &=
542 ~(SCNTL3_ULTRA << 24);
543 sc->targets[target]->id &=
544 ~(SXFER_MO_MASK << 8);
545 sc->targets[target]->id |=
546 (offset & SXFER_MO_MASK) << 8;
547 goto end;
548 }
549 }
550 /*
551 * we didn't find it in our table, do async and send reject
552 * msg
553 */
554 reject:
555 send_msgout = 1;
556 tables->t_msgout.count= htole32(1);
557 tables->msg_out[0] = MSG_MESSAGE_REJECT;
558 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
559 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
560 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
561 siop_target->offset = siop_target->period = 0;
562 } else { /* target initiated sync neg */
563 #ifdef DEBUG
564 printf("sdtr (target): sync %d offset %d\n", sync, offset);
565 #endif
566 if (offset == 0 || sync > sc->st_maxsync) { /* async */
567 goto async;
568 }
569 if (offset > maxoffset)
570 offset = maxoffset;
571 if (sync < sc->st_minsync)
572 sync = sc->st_minsync;
573 /* look for sync period */
574 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
575 i++) {
576 if (sc->clock_period != scf_period[i].clock)
577 continue;
578 if (scf_period[i].period == sync) {
579 /* ok, found it. we now are sync. */
580 siop_target->offset = offset;
581 siop_target->period = sync;
582 sc->targets[target]->id &=
583 ~(SCNTL3_SCF_MASK << 24);
584 sc->targets[target]->id |= scf_period[i].scf
585 << (24 + SCNTL3_SCF_SHIFT);
586 if (sync < 25 && /* Ultra */
587 (sc->features & SF_BUS_ULTRA3) == 0)
588 sc->targets[target]->id |=
589 SCNTL3_ULTRA << 24;
590 else
591 sc->targets[target]->id &=
592 ~(SCNTL3_ULTRA << 24);
593 sc->targets[target]->id &=
594 ~(SXFER_MO_MASK << 8);
595 sc->targets[target]->id |=
596 (offset & SXFER_MO_MASK) << 8;
597 siop_sdtr_msg(siop_cmd, 0, sync, offset);
598 send_msgout = 1;
599 goto end;
600 }
601 }
602 async:
603 siop_target->offset = siop_target->period = 0;
604 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
605 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
606 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
607 siop_sdtr_msg(siop_cmd, 0, 0, 0);
608 send_msgout = 1;
609 }
610 end:
611 if (siop_target->status == TARST_OK)
612 siop_update_xfer_mode(sc, target);
613 #ifdef DEBUG
614 printf("id now 0x%x\n", sc->targets[target]->id);
615 #endif
616 tables->id = htole32(sc->targets[target]->id);
617 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
618 (sc->targets[target]->id >> 24) & 0xff);
619 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
620 (sc->targets[target]->id >> 8) & 0xff);
621 if (send_msgout) {
622 return SIOP_NEG_MSGOUT;
623 } else {
624 return SIOP_NEG_ACK;
625 }
626 }
627
628 void
629 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
630 struct siop_common_cmd *siop_cmd;
631 int offset;
632 int ssync, soff;
633 {
634 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
635 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
636 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
637 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
638 siop_cmd->siop_tables->msg_out[offset + 4] = soff;
639 siop_cmd->siop_tables->t_msgout.count =
640 htole32(offset + MSG_EXT_SDTR_LEN + 2);
641 }
642
643 void
644 siop_wdtr_msg(siop_cmd, offset, wide)
645 struct siop_common_cmd *siop_cmd;
646 int offset;
647 {
648 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
649 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
650 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
651 siop_cmd->siop_tables->msg_out[offset + 3] = wide;
652 siop_cmd->siop_tables->t_msgout.count =
653 htole32(offset + MSG_EXT_WDTR_LEN + 2);
654 }
655
656 void
657 siop_ppr_msg(siop_cmd, offset, ssync, soff)
658 struct siop_common_cmd *siop_cmd;
659 int offset;
660 int ssync, soff;
661 {
662 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
663 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
664 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
665 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
666 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
667 siop_cmd->siop_tables->msg_out[offset + 5] = soff;
668 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
669 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
670 siop_cmd->siop_tables->t_msgout.count =
671 htole32(offset + MSG_EXT_PPR_LEN + 2);
672 }
673
674 void
675 siop_minphys(bp)
676 struct buf *bp;
677 {
678 minphys(bp);
679 }
680
681 int
682 siop_ioctl(chan, cmd, arg, flag, p)
683 struct scsipi_channel *chan;
684 u_long cmd;
685 caddr_t arg;
686 int flag;
687 struct proc *p;
688 {
689 struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
690 u_int8_t scntl1;
691 int s;
692
693 switch (cmd) {
694 case SCBUSIORESET:
695 s = splbio();
696 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
697 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
698 scntl1 | SCNTL1_RST);
699 /* minimum 25 us, more time won't hurt */
700 delay(100);
701 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
702 splx(s);
703 return (0);
704 default:
705 return (ENOTTY);
706 }
707 }
708
709 void
710 siop_sdp(siop_cmd)
711 struct siop_common_cmd *siop_cmd;
712 {
713 /* save data pointer. Handle async only for now */
714 int offset, dbc, sstat;
715 struct siop_common_softc *sc = siop_cmd->siop_sc;
716 scr_table_t *table; /* table to patch */
717
718 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
719 == 0)
720 return; /* no data pointers to save */
721 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
722 if (offset >= SIOP_NSG) {
723 printf("%s: bad offset in siop_sdp (%d)\n",
724 sc->sc_dev.dv_xname, offset);
725 return;
726 }
727 table = &siop_cmd->siop_tables->data[offset];
728 #ifdef DEBUG_DR
729 printf("sdp: offset %d count=%d addr=0x%x ", offset,
730 table->count, table->addr);
731 #endif
732 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
733 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
734 if (sc->features & SF_CHIP_DFBC) {
735 dbc +=
736 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
737 } else {
738 /* need to account stale data in FIFO */
739 int dfifo =
740 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
741 if (sc->features & SF_CHIP_FIFO) {
742 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
743 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
744 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
745 } else {
746 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
747 }
748 }
749 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
750 if (sstat & SSTAT0_OLF)
751 dbc++;
752 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
753 dbc++;
754 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
755 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
756 SIOP_SSTAT2);
757 if (sstat & SSTAT2_OLF1)
758 dbc++;
759 if ((sstat & SSTAT2_ORF1) &&
760 (sc->features & SF_CHIP_DFBC) == 0)
761 dbc++;
762 }
763 /* clear the FIFO */
764 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
765 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
766 CTEST3_CLF);
767 }
768 table->addr =
769 htole32(le32toh(table->addr) + le32toh(table->count) - dbc);
770 table->count = htole32(dbc);
771 #ifdef DEBUG_DR
772 printf("now count=%d addr=0x%x\n", table->count, table->addr);
773 #endif
774 }
775
776 void
777 siop_clearfifo(sc)
778 struct siop_common_softc *sc;
779 {
780 int timeout = 0;
781 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
782
783 #ifdef DEBUG_INTR
784 printf("DMA fifo not empty !\n");
785 #endif
786 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
787 ctest3 | CTEST3_CLF);
788 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
789 CTEST3_CLF) != 0) {
790 delay(1);
791 if (++timeout > 1000) {
792 printf("clear fifo failed\n");
793 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
794 bus_space_read_1(sc->sc_rt, sc->sc_rh,
795 SIOP_CTEST3) & ~CTEST3_CLF);
796 return;
797 }
798 }
799 }
800
801 int
802 siop_modechange(sc)
803 struct siop_common_softc *sc;
804 {
805 int retry;
806 int sist0, sist1, stest2, stest4;
807 for (retry = 0; retry < 5; retry++) {
808 /*
809 * datasheet says to wait 100ms and re-read SIST1,
810 * to check that DIFFSENSE is stable.
811 * We may delay() 5 times for 100ms at interrupt time;
812 * hopefully this will not happen often.
813 */
814 delay(100000);
815 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
816 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
817 if (sist1 & SIEN1_SBMC)
818 continue; /* we got an irq again */
819 stest4 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
820 STEST4_MODE_MASK;
821 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
822 switch(stest4) {
823 case STEST4_MODE_DIF:
824 printf("%s: switching to differential mode\n",
825 sc->sc_dev.dv_xname);
826 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
827 stest2 | STEST2_DIF);
828 break;
829 case STEST4_MODE_SE:
830 printf("%s: switching to single-ended mode\n",
831 sc->sc_dev.dv_xname);
832 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
833 stest2 & ~STEST2_DIF);
834 break;
835 case STEST4_MODE_LVD:
836 printf("%s: switching to LVD mode\n",
837 sc->sc_dev.dv_xname);
838 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
839 stest2 & ~STEST2_DIF);
840 break;
841 default:
842 printf("%s: invalid SCSI mode 0x%x\n",
843 sc->sc_dev.dv_xname, stest4);
844 return 0;
845 }
846 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST0,
847 stest4 >> 2);
848 return 1;
849 }
850 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
851 sc->sc_dev.dv_xname);
852 return 0;
853 }
854
855 void
856 siop_resetbus(sc)
857 struct siop_common_softc *sc;
858 {
859 int scntl1;
860 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
861 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
862 scntl1 | SCNTL1_RST);
863 /* minimum 25 us, more time won't hurt */
864 delay(100);
865 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
866 }
867
868 void
869 siop_update_xfer_mode(sc, target)
870 struct siop_common_softc *sc;
871 int target;
872 {
873 struct siop_common_target *siop_target = sc->targets[target];
874 struct scsipi_xfer_mode xm;
875
876 xm.xm_target = target;
877 xm.xm_mode = 0;
878 xm.xm_period = 0;
879 xm.xm_offset = 0;
880
881 if (siop_target->flags & TARF_ISWIDE)
882 xm.xm_mode |= PERIPH_CAP_WIDE16;
883 if (siop_target->period) {
884 xm.xm_period = siop_target->period;
885 xm.xm_offset = siop_target->offset;
886 xm.xm_mode |= PERIPH_CAP_SYNC;
887 }
888 if (siop_target->flags & TARF_TAG)
889 xm.xm_mode |= PERIPH_CAP_TQING;
890 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
891 }
892