siop_common.c revision 1.47 1 /* $NetBSD: siop_common.c,v 1.47 2009/03/14 15:36:17 dsl Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.47 2009/03/14 15:36:17 dsl Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 #include <sys/scsiio.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/endian.h>
49 #include <sys/bus.h>
50
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_message.h>
53 #include <dev/scsipi/scsipi_all.h>
54
55 #include <dev/scsipi/scsiconf.h>
56
57 #include <dev/ic/siopreg.h>
58 #include <dev/ic/siopvar_common.h>
59
60 #include "opt_siop.h"
61
62 #undef DEBUG
63 #undef DEBUG_DR
64 #undef DEBUG_NEG
65
66 int
67 siop_common_attach(struct siop_common_softc *sc)
68 {
69 int error, i;
70 bus_dma_segment_t seg;
71 int rseg;
72
73 /*
74 * Allocate DMA-safe memory for the script and map it.
75 */
76 if ((sc->features & SF_CHIP_RAM) == 0) {
77 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
78 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
79 if (error) {
80 aprint_error_dev(&sc->sc_dev,
81 "unable to allocate script DMA memory, "
82 "error = %d\n", error);
83 return error;
84 }
85 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
86 (void **)&sc->sc_script,
87 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
88 if (error) {
89 aprint_error_dev(&sc->sc_dev, "unable to map script DMA memory, "
90 "error = %d\n", error);
91 return error;
92 }
93 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
94 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
95 if (error) {
96 aprint_error_dev(&sc->sc_dev, "unable to create script DMA map, "
97 "error = %d\n", error);
98 return error;
99 }
100 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
101 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
102 if (error) {
103 aprint_error_dev(&sc->sc_dev, "unable to load script DMA map, "
104 "error = %d\n", error);
105 return error;
106 }
107 sc->sc_scriptaddr =
108 sc->sc_scriptdma->dm_segs[0].ds_addr;
109 sc->ram_size = PAGE_SIZE;
110 }
111
112 sc->sc_adapt.adapt_dev = &sc->sc_dev;
113 sc->sc_adapt.adapt_nchannels = 1;
114 sc->sc_adapt.adapt_openings = 0;
115 sc->sc_adapt.adapt_ioctl = siop_ioctl;
116 sc->sc_adapt.adapt_minphys = minphys;
117
118 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
119 sc->sc_chan.chan_adapter = &sc->sc_adapt;
120 sc->sc_chan.chan_bustype = &scsi_bustype;
121 sc->sc_chan.chan_channel = 0;
122 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
123 sc->sc_chan.chan_ntargets =
124 (sc->features & SF_BUS_WIDE) ? 16 : 8;
125 sc->sc_chan.chan_nluns = 8;
126 sc->sc_chan.chan_id =
127 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
128 if (sc->sc_chan.chan_id == 0 ||
129 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
130 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
131
132 for (i = 0; i < 16; i++)
133 sc->targets[i] = NULL;
134
135 /* find min/max sync period for this chip */
136 sc->st_maxsync = 0;
137 sc->dt_maxsync = 0;
138 sc->st_minsync = 255;
139 sc->dt_minsync = 255;
140 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
141 if (sc->clock_period != scf_period[i].clock)
142 continue;
143 if (sc->st_maxsync < scf_period[i].period)
144 sc->st_maxsync = scf_period[i].period;
145 if (sc->st_minsync > scf_period[i].period)
146 sc->st_minsync = scf_period[i].period;
147 }
148 if (sc->st_maxsync == 255 || sc->st_minsync == 0)
149 panic("siop: can't find my sync parameters");
150 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
151 if (sc->clock_period != dt_scf_period[i].clock)
152 continue;
153 if (sc->dt_maxsync < dt_scf_period[i].period)
154 sc->dt_maxsync = dt_scf_period[i].period;
155 if (sc->dt_minsync > dt_scf_period[i].period)
156 sc->dt_minsync = dt_scf_period[i].period;
157 }
158 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
159 panic("siop: can't find my sync parameters");
160 return 0;
161 }
162
163 void
164 siop_common_reset(struct siop_common_softc *sc)
165 {
166 u_int32_t stest1, stest3;
167
168 /* reset the chip */
169 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
170 delay(1000);
171 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
172
173 /* init registers */
174 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
175 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
177 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
181 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
183 0xff & ~(SIEN1_HTH | SIEN1_GEN));
184 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
185 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
187 (0xb << STIME0_SEL_SHIFT));
188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
189 sc->sc_chan.chan_id | SCID_RRE);
190 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
191 1 << sc->sc_chan.chan_id);
192 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
193 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
194 if (sc->features & SF_CHIP_AAIP)
195 bus_space_write_1(sc->sc_rt, sc->sc_rh,
196 SIOP_AIPCNTL1, AIPCNTL1_DIS);
197
198 /* enable clock doubler or quadruler if appropriate */
199 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
200 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
201 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
202 STEST1_DBLEN);
203 if (sc->features & SF_CHIP_QUAD) {
204 /* wait for PPL to lock */
205 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
206 SIOP_STEST4) & STEST4_LOCK) == 0)
207 delay(10);
208 } else {
209 /* data sheet says 20us - more won't hurt */
210 delay(100);
211 }
212 /* halt scsi clock, select doubler/quad, restart clock */
213 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
214 stest3 | STEST3_HSC);
215 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
216 STEST1_DBLEN | STEST1_DBLSEL);
217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
218 } else {
219 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
220 }
221
222 if (sc->features & SF_CHIP_USEPCIC) {
223 stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1);
224 stest1 |= STEST1_SCLK;
225 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1);
226 }
227
228 if (sc->features & SF_CHIP_FIFO)
229 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
230 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
231 CTEST5_DFS);
232 if (sc->features & SF_CHIP_LED0) {
233 /* Set GPIO0 as output if software LED control is required */
234 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
235 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
236 }
237 if (sc->features & SF_BUS_ULTRA3) {
238 /* reset SCNTL4 */
239 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
240 }
241 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
242 STEST4_MODE_MASK;
243
244 /*
245 * initialise the RAM. Without this we may get scsi gross errors on
246 * the 1010
247 */
248 if (sc->features & SF_CHIP_RAM)
249 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
250 0, 0, sc->ram_size / 4);
251 sc->sc_reset(sc);
252 }
253
254 /* prepare tables before sending a cmd */
255 void
256 siop_setuptables(struct siop_common_cmd *siop_cmd)
257 {
258 int i;
259 struct siop_common_softc *sc = siop_cmd->siop_sc;
260 struct scsipi_xfer *xs = siop_cmd->xs;
261 int target = xs->xs_periph->periph_target;
262 int lun = xs->xs_periph->periph_lun;
263 int msgoffset = 1;
264
265 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
266 memset(siop_cmd->siop_tables->msg_out, 0,
267 sizeof(siop_cmd->siop_tables->msg_out));
268 /* request sense doesn't disconnect */
269 if (xs->xs_control & XS_CTL_REQSENSE)
270 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
271 else if ((sc->features & SF_CHIP_GEBUG) &&
272 (sc->targets[target]->flags & TARF_ISWIDE) == 0)
273 /*
274 * 1010 bug: it seems that the 1010 has problems with reselect
275 * when not in wide mode (generate false SCSI gross error).
276 * The FreeBSD sym driver has comments about it but their
277 * workaround (disable SCSI gross error reporting) doesn't
278 * work with my adapter. So disable disconnect when not
279 * wide.
280 */
281 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
282 else
283 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
284 if (xs->xs_tag_type != 0) {
285 if ((sc->targets[target]->flags & TARF_TAG) == 0) {
286 scsipi_printaddr(xs->xs_periph);
287 printf(": tagged command type %d id %d\n",
288 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
289 panic("tagged command for non-tagging device");
290 }
291 siop_cmd->flags |= CMDFL_TAG;
292 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
293 /*
294 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
295 * different one
296 */
297 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
298 msgoffset = 3;
299 }
300 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
301 if (sc->targets[target]->status == TARST_ASYNC) {
302 if ((sc->targets[target]->flags & TARF_DT) &&
303 (sc->mode == STEST4_MODE_LVD)) {
304 sc->targets[target]->status = TARST_PPR_NEG;
305 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
306 sc->maxoff);
307 } else if (sc->targets[target]->flags & TARF_WIDE) {
308 sc->targets[target]->status = TARST_WIDE_NEG;
309 siop_wdtr_msg(siop_cmd, msgoffset,
310 MSG_EXT_WDTR_BUS_16_BIT);
311 } else if (sc->targets[target]->flags & TARF_SYNC) {
312 sc->targets[target]->status = TARST_SYNC_NEG;
313 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
314 (sc->maxoff > 31) ? 31 : sc->maxoff);
315 } else {
316 sc->targets[target]->status = TARST_OK;
317 siop_update_xfer_mode(sc, target);
318 }
319 }
320 siop_cmd->siop_tables->status =
321 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
322
323 siop_cmd->siop_tables->cmd.count =
324 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
325 siop_cmd->siop_tables->cmd.addr =
326 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
327 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
328 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
329 siop_cmd->siop_tables->data[i].count =
330 siop_htoc32(sc,
331 siop_cmd->dmamap_data->dm_segs[i].ds_len);
332 siop_cmd->siop_tables->data[i].addr =
333 siop_htoc32(sc,
334 siop_cmd->dmamap_data->dm_segs[i].ds_addr);
335 }
336 }
337 }
338
339 int
340 siop_wdtr_neg(struct siop_common_cmd *siop_cmd)
341 {
342 struct siop_common_softc *sc = siop_cmd->siop_sc;
343 struct siop_common_target *siop_target = siop_cmd->siop_target;
344 int target = siop_cmd->xs->xs_periph->periph_target;
345 struct siop_common_xfer *tables = siop_cmd->siop_tables;
346
347 if (siop_target->status == TARST_WIDE_NEG) {
348 /* we initiated wide negotiation */
349 switch (tables->msg_in[3]) {
350 case MSG_EXT_WDTR_BUS_8_BIT:
351 siop_target->flags &= ~TARF_ISWIDE;
352 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
353 break;
354 case MSG_EXT_WDTR_BUS_16_BIT:
355 if (siop_target->flags & TARF_WIDE) {
356 siop_target->flags |= TARF_ISWIDE;
357 sc->targets[target]->id |= (SCNTL3_EWS << 24);
358 break;
359 }
360 /* FALLTHROUGH */
361 default:
362 /*
363 * hum, we got more than what we can handle, shouldn't
364 * happen. Reject, and stay async
365 */
366 siop_target->flags &= ~TARF_ISWIDE;
367 siop_target->status = TARST_OK;
368 siop_target->offset = siop_target->period = 0;
369 siop_update_xfer_mode(sc, target);
370 printf("%s: rejecting invalid wide negotiation from "
371 "target %d (%d)\n", device_xname(&sc->sc_dev), target,
372 tables->msg_in[3]);
373 tables->t_msgout.count = siop_htoc32(sc, 1);
374 tables->msg_out[0] = MSG_MESSAGE_REJECT;
375 return SIOP_NEG_MSGOUT;
376 }
377 tables->id = siop_htoc32(sc, sc->targets[target]->id);
378 bus_space_write_1(sc->sc_rt, sc->sc_rh,
379 SIOP_SCNTL3,
380 (sc->targets[target]->id >> 24) & 0xff);
381 /* we now need to do sync */
382 if (siop_target->flags & TARF_SYNC) {
383 siop_target->status = TARST_SYNC_NEG;
384 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
385 (sc->maxoff > 31) ? 31 : sc->maxoff);
386 return SIOP_NEG_MSGOUT;
387 } else {
388 siop_target->status = TARST_OK;
389 siop_update_xfer_mode(sc, target);
390 return SIOP_NEG_ACK;
391 }
392 } else {
393 /* target initiated wide negotiation */
394 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
395 && (siop_target->flags & TARF_WIDE)) {
396 siop_target->flags |= TARF_ISWIDE;
397 sc->targets[target]->id |= SCNTL3_EWS << 24;
398 } else {
399 siop_target->flags &= ~TARF_ISWIDE;
400 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
401 }
402 tables->id = siop_htoc32(sc, sc->targets[target]->id);
403 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
404 (sc->targets[target]->id >> 24) & 0xff);
405 /*
406 * we did reset wide parameters, so fall back to async,
407 * but don't schedule a sync neg, target should initiate it
408 */
409 siop_target->status = TARST_OK;
410 siop_target->offset = siop_target->period = 0;
411 siop_update_xfer_mode(sc, target);
412 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
413 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
414 return SIOP_NEG_MSGOUT;
415 }
416 }
417
418 int
419 siop_ppr_neg(struct siop_common_cmd *siop_cmd)
420 {
421 struct siop_common_softc *sc = siop_cmd->siop_sc;
422 struct siop_common_target *siop_target = siop_cmd->siop_target;
423 int target = siop_cmd->xs->xs_periph->periph_target;
424 struct siop_common_xfer *tables = siop_cmd->siop_tables;
425 int sync, offset, options, scf = 0;
426 int i;
427
428 #ifdef DEBUG_NEG
429 printf("%s: answer on ppr negotiation:", device_xname(&sc->sc_dev));
430 for (i = 0; i < 8; i++)
431 printf(" 0x%x", tables->msg_in[i]);
432 printf("\n");
433 #endif
434
435 if (siop_target->status == TARST_PPR_NEG) {
436 /* we initiated PPR negotiation */
437 sync = tables->msg_in[3];
438 offset = tables->msg_in[5];
439 options = tables->msg_in[7];
440 if (options != MSG_EXT_PPR_DT) {
441 /* should't happen */
442 printf("%s: ppr negotiation for target %d: "
443 "no DT option\n", device_xname(&sc->sc_dev), target);
444 siop_target->status = TARST_ASYNC;
445 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
446 siop_target->offset = 0;
447 siop_target->period = 0;
448 goto reject;
449 }
450
451 if (offset > sc->maxoff || sync < sc->dt_minsync ||
452 sync > sc->dt_maxsync) {
453 printf("%s: ppr negotiation for target %d: "
454 "offset (%d) or sync (%d) out of range\n",
455 device_xname(&sc->sc_dev), target, offset, sync);
456 /* should not happen */
457 siop_target->offset = 0;
458 siop_target->period = 0;
459 goto reject;
460 } else {
461 for (i = 0; i <
462 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
463 i++) {
464 if (sc->clock_period != dt_scf_period[i].clock)
465 continue;
466 if (dt_scf_period[i].period == sync) {
467 /* ok, found it. we now are sync. */
468 siop_target->offset = offset;
469 siop_target->period = sync;
470 scf = dt_scf_period[i].scf;
471 siop_target->flags |= TARF_ISDT;
472 }
473 }
474 if ((siop_target->flags & TARF_ISDT) == 0) {
475 printf("%s: ppr negotiation for target %d: "
476 "sync (%d) incompatible with adapter\n",
477 device_xname(&sc->sc_dev), target, sync);
478 /*
479 * we didn't find it in our table, do async
480 * send reject msg, start SDTR/WDTR neg
481 */
482 siop_target->status = TARST_ASYNC;
483 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
484 siop_target->offset = 0;
485 siop_target->period = 0;
486 goto reject;
487 }
488 }
489 if (tables->msg_in[6] != 1) {
490 printf("%s: ppr negotiation for target %d: "
491 "transfer width (%d) incompatible with dt\n",
492 device_xname(&sc->sc_dev), target, tables->msg_in[6]);
493 /* DT mode can only be done with wide transfers */
494 siop_target->status = TARST_ASYNC;
495 goto reject;
496 }
497 siop_target->flags |= TARF_ISWIDE;
498 sc->targets[target]->id |= (SCNTL3_EWS << 24);
499 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
500 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
501 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
502 sc->targets[target]->id |=
503 (siop_target->offset & SXFER_MO_MASK) << 8;
504 sc->targets[target]->id &= ~0xff;
505 sc->targets[target]->id |= SCNTL4_U3EN;
506 siop_target->status = TARST_OK;
507 siop_update_xfer_mode(sc, target);
508 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
509 (sc->targets[target]->id >> 24) & 0xff);
510 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
511 (sc->targets[target]->id >> 8) & 0xff);
512 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
513 sc->targets[target]->id & 0xff);
514 return SIOP_NEG_ACK;
515 } else {
516 /* target initiated PPR negotiation, shouldn't happen */
517 printf("%s: rejecting invalid PPR negotiation from "
518 "target %d\n", device_xname(&sc->sc_dev), target);
519 reject:
520 tables->t_msgout.count = siop_htoc32(sc, 1);
521 tables->msg_out[0] = MSG_MESSAGE_REJECT;
522 return SIOP_NEG_MSGOUT;
523 }
524 }
525
526 int
527 siop_sdtr_neg(struct siop_common_cmd *siop_cmd)
528 {
529 struct siop_common_softc *sc = siop_cmd->siop_sc;
530 struct siop_common_target *siop_target = siop_cmd->siop_target;
531 int target = siop_cmd->xs->xs_periph->periph_target;
532 int sync, maxoffset, offset, i;
533 int send_msgout = 0;
534 struct siop_common_xfer *tables = siop_cmd->siop_tables;
535
536 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */
537 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
538
539 sync = tables->msg_in[3];
540 offset = tables->msg_in[4];
541
542 if (siop_target->status == TARST_SYNC_NEG) {
543 /* we initiated sync negotiation */
544 siop_target->status = TARST_OK;
545 #ifdef DEBUG
546 printf("sdtr: sync %d offset %d\n", sync, offset);
547 #endif
548 if (offset > maxoffset || sync < sc->st_minsync ||
549 sync > sc->st_maxsync)
550 goto reject;
551 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
552 i++) {
553 if (sc->clock_period != scf_period[i].clock)
554 continue;
555 if (scf_period[i].period == sync) {
556 /* ok, found it. we now are sync. */
557 siop_target->offset = offset;
558 siop_target->period = sync;
559 sc->targets[target]->id &=
560 ~(SCNTL3_SCF_MASK << 24);
561 sc->targets[target]->id |= scf_period[i].scf
562 << (24 + SCNTL3_SCF_SHIFT);
563 if (sync < 25 && /* Ultra */
564 (sc->features & SF_BUS_ULTRA3) == 0)
565 sc->targets[target]->id |=
566 SCNTL3_ULTRA << 24;
567 else
568 sc->targets[target]->id &=
569 ~(SCNTL3_ULTRA << 24);
570 sc->targets[target]->id &=
571 ~(SXFER_MO_MASK << 8);
572 sc->targets[target]->id |=
573 (offset & SXFER_MO_MASK) << 8;
574 sc->targets[target]->id &= ~0xff; /* scntl4 */
575 goto end;
576 }
577 }
578 /*
579 * we didn't find it in our table, do async and send reject
580 * msg
581 */
582 reject:
583 send_msgout = 1;
584 tables->t_msgout.count = siop_htoc32(sc, 1);
585 tables->msg_out[0] = MSG_MESSAGE_REJECT;
586 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
587 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
588 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
589 sc->targets[target]->id &= ~0xff; /* scntl4 */
590 siop_target->offset = siop_target->period = 0;
591 } else { /* target initiated sync neg */
592 #ifdef DEBUG
593 printf("sdtr (target): sync %d offset %d\n", sync, offset);
594 #endif
595 if (offset == 0 || sync > sc->st_maxsync) { /* async */
596 goto async;
597 }
598 if (offset > maxoffset)
599 offset = maxoffset;
600 if (sync < sc->st_minsync)
601 sync = sc->st_minsync;
602 /* look for sync period */
603 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
604 i++) {
605 if (sc->clock_period != scf_period[i].clock)
606 continue;
607 if (scf_period[i].period == sync) {
608 /* ok, found it. we now are sync. */
609 siop_target->offset = offset;
610 siop_target->period = sync;
611 sc->targets[target]->id &=
612 ~(SCNTL3_SCF_MASK << 24);
613 sc->targets[target]->id |= scf_period[i].scf
614 << (24 + SCNTL3_SCF_SHIFT);
615 if (sync < 25 && /* Ultra */
616 (sc->features & SF_BUS_ULTRA3) == 0)
617 sc->targets[target]->id |=
618 SCNTL3_ULTRA << 24;
619 else
620 sc->targets[target]->id &=
621 ~(SCNTL3_ULTRA << 24);
622 sc->targets[target]->id &=
623 ~(SXFER_MO_MASK << 8);
624 sc->targets[target]->id |=
625 (offset & SXFER_MO_MASK) << 8;
626 sc->targets[target]->id &= ~0xff; /* scntl4 */
627 siop_sdtr_msg(siop_cmd, 0, sync, offset);
628 send_msgout = 1;
629 goto end;
630 }
631 }
632 async:
633 siop_target->offset = siop_target->period = 0;
634 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
635 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
636 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
637 sc->targets[target]->id &= ~0xff; /* scntl4 */
638 siop_sdtr_msg(siop_cmd, 0, 0, 0);
639 send_msgout = 1;
640 }
641 end:
642 if (siop_target->status == TARST_OK)
643 siop_update_xfer_mode(sc, target);
644 #ifdef DEBUG
645 printf("id now 0x%x\n", sc->targets[target]->id);
646 #endif
647 tables->id = siop_htoc32(sc, sc->targets[target]->id);
648 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
649 (sc->targets[target]->id >> 24) & 0xff);
650 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
651 (sc->targets[target]->id >> 8) & 0xff);
652 if (send_msgout) {
653 return SIOP_NEG_MSGOUT;
654 } else {
655 return SIOP_NEG_ACK;
656 }
657 }
658
659 void
660 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
661 struct siop_common_cmd *siop_cmd;
662 int offset;
663 int ssync, soff;
664 {
665 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
666 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
667 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
668 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
669 siop_cmd->siop_tables->msg_out[offset + 4] = soff;
670 siop_cmd->siop_tables->t_msgout.count =
671 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
672 }
673
674 void
675 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide)
676 {
677 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
678 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
679 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
680 siop_cmd->siop_tables->msg_out[offset + 3] = wide;
681 siop_cmd->siop_tables->t_msgout.count =
682 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
683 }
684
685 void
686 siop_ppr_msg(siop_cmd, offset, ssync, soff)
687 struct siop_common_cmd *siop_cmd;
688 int offset;
689 int ssync, soff;
690 {
691 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
692 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
693 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
694 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
695 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
696 siop_cmd->siop_tables->msg_out[offset + 5] = soff;
697 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
698 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
699 siop_cmd->siop_tables->t_msgout.count =
700 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
701 }
702
703 void
704 siop_minphys(struct buf *bp)
705 {
706 minphys(bp);
707 }
708
709 int
710 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
711 int flag, struct proc *p)
712 {
713 struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
714
715 switch (cmd) {
716 case SCBUSIORESET:
717 /*
718 * abort the script. This will trigger an interrupt, which will
719 * trigger a bus reset.
720 * We can't safely trigger the reset here as we can't access
721 * the required register while the script is running.
722 */
723 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
724 return (0);
725 default:
726 return (ENOTTY);
727 }
728 }
729
730 void
731 siop_ma(struct siop_common_cmd *siop_cmd)
732 {
733 int offset, dbc, sstat;
734 struct siop_common_softc *sc = siop_cmd->siop_sc;
735 scr_table_t *table; /* table with partial xfer */
736
737 /*
738 * compute how much of the current table didn't get handled when
739 * a phase mismatch occurs
740 */
741 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
742 == 0)
743 return; /* no valid data transfer */
744
745 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
746 if (offset >= SIOP_NSG) {
747 aprint_error_dev(&sc->sc_dev, "bad offset in siop_sdp (%d)\n",
748 offset);
749 return;
750 }
751 table = &siop_cmd->siop_tables->data[offset];
752 #ifdef DEBUG_DR
753 printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
754 table->count, table->addr);
755 #endif
756 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
757 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
758 if (sc->features & SF_CHIP_DFBC) {
759 dbc +=
760 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
761 } else {
762 /* need to account stale data in FIFO */
763 int dfifo =
764 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
765 if (sc->features & SF_CHIP_FIFO) {
766 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
767 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
768 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
769 } else {
770 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
771 }
772 }
773 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
774 if (sstat & SSTAT0_OLF)
775 dbc++;
776 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
777 dbc++;
778 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
779 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
780 SIOP_SSTAT2);
781 if (sstat & SSTAT2_OLF1)
782 dbc++;
783 if ((sstat & SSTAT2_ORF1) &&
784 (sc->features & SF_CHIP_DFBC) == 0)
785 dbc++;
786 }
787 /* clear the FIFO */
788 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
789 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
790 CTEST3_CLF);
791 }
792 siop_cmd->flags |= CMDFL_RESID;
793 siop_cmd->resid = dbc;
794 }
795
796 void
797 siop_sdp(struct siop_common_cmd *siop_cmd, int offset)
798 {
799 struct siop_common_softc *sc = siop_cmd->siop_sc;
800 scr_table_t *table;
801
802 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
803 == 0)
804 return; /* no data pointers to save */
805
806 /*
807 * offset == SIOP_NSG may be a valid condition if we get a Save data
808 * pointer when the xfer is done. Just ignore the Save data pointer
809 * in this case
810 */
811 if (offset == SIOP_NSG)
812 return;
813 #ifdef DIAGNOSTIC
814 if (offset > SIOP_NSG) {
815 scsipi_printaddr(siop_cmd->xs->xs_periph);
816 printf(": offset %d > %d\n", offset, SIOP_NSG);
817 panic("siop_sdp: offset");
818 }
819 #endif
820 /*
821 * Save data pointer. We do this by adjusting the tables to point
822 * at the begginning of the data not yet transfered.
823 * offset points to the first table with untransfered data.
824 */
825
826 /*
827 * before doing that we decrease resid from the ammount of data which
828 * has been transfered.
829 */
830 siop_update_resid(siop_cmd, offset);
831
832 /*
833 * First let see if we have a resid from a phase mismatch. If so,
834 * we have to adjst the table at offset to remove transfered data.
835 */
836 if (siop_cmd->flags & CMDFL_RESID) {
837 siop_cmd->flags &= ~CMDFL_RESID;
838 table = &siop_cmd->siop_tables->data[offset];
839 /* "cut" already transfered data from this table */
840 table->addr =
841 siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
842 siop_ctoh32(sc, table->count) - siop_cmd->resid);
843 table->count = siop_htoc32(sc, siop_cmd->resid);
844 }
845
846 /*
847 * now we can remove entries which have been transfered.
848 * We just move the entries with data left at the beggining of the
849 * tables
850 */
851 memmove(&siop_cmd->siop_tables->data[0],
852 &siop_cmd->siop_tables->data[offset],
853 (SIOP_NSG - offset) * sizeof(scr_table_t));
854 }
855
856 void
857 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset)
858 {
859 struct siop_common_softc *sc = siop_cmd->siop_sc;
860 scr_table_t *table;
861 int i;
862
863 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
864 == 0)
865 return; /* no data to transfer */
866
867 /*
868 * update resid. First account for the table entries which have
869 * been fully completed.
870 */
871 for (i = 0; i < offset; i++)
872 siop_cmd->xs->resid -=
873 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
874 /*
875 * if CMDFL_RESID is set, the last table (pointed by offset) is a
876 * partial transfers. If not, offset points to the entry folloing
877 * the last full transfer.
878 */
879 if (siop_cmd->flags & CMDFL_RESID) {
880 table = &siop_cmd->siop_tables->data[offset];
881 siop_cmd->xs->resid -=
882 siop_ctoh32(sc, table->count) - siop_cmd->resid;
883 }
884 }
885
886 int
887 siop_iwr(struct siop_common_cmd *siop_cmd)
888 {
889 int offset;
890 scr_table_t *table; /* table with IWR */
891 struct siop_common_softc *sc = siop_cmd->siop_sc;
892 /* handle ignore wide residue messages */
893
894 /* if target isn't wide, reject */
895 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
896 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
897 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
898 return SIOP_NEG_MSGOUT;
899 }
900 /* get index of current command in table */
901 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
902 /*
903 * if the current table did complete, we're now pointing at the
904 * next one. Go back one if we didn't see a phase mismatch.
905 */
906 if ((siop_cmd->flags & CMDFL_RESID) == 0)
907 offset--;
908 table = &siop_cmd->siop_tables->data[offset];
909
910 if ((siop_cmd->flags & CMDFL_RESID) == 0) {
911 if (siop_ctoh32(sc, table->count) & 1) {
912 /* we really got the number of bytes we expected */
913 return SIOP_NEG_ACK;
914 } else {
915 /*
916 * now we really had a short xfer, by one byte.
917 * handle it just as if we had a phase mistmatch
918 * (there is a resid of one for this table).
919 * Update scratcha1 to reflect the fact that
920 * this xfer isn't complete.
921 */
922 siop_cmd->flags |= CMDFL_RESID;
923 siop_cmd->resid = 1;
924 bus_space_write_1(sc->sc_rt, sc->sc_rh,
925 SIOP_SCRATCHA + 1, offset);
926 return SIOP_NEG_ACK;
927 }
928 } else {
929 /*
930 * we already have a short xfer for this table; it's
931 * just one byte less than we though it was
932 */
933 siop_cmd->resid--;
934 return SIOP_NEG_ACK;
935 }
936 }
937
938 void
939 siop_clearfifo(struct siop_common_softc *sc)
940 {
941 int timeout = 0;
942 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
943
944 #ifdef DEBUG_INTR
945 printf("DMA fifo not empty !\n");
946 #endif
947 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
948 ctest3 | CTEST3_CLF);
949 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
950 CTEST3_CLF) != 0) {
951 delay(1);
952 if (++timeout > 1000) {
953 printf("clear fifo failed\n");
954 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
955 bus_space_read_1(sc->sc_rt, sc->sc_rh,
956 SIOP_CTEST3) & ~CTEST3_CLF);
957 return;
958 }
959 }
960 }
961
962 int
963 siop_modechange(struct siop_common_softc *sc)
964 {
965 int retry;
966 int sist0, sist1, stest2;
967 for (retry = 0; retry < 5; retry++) {
968 /*
969 * datasheet says to wait 100ms and re-read SIST1,
970 * to check that DIFFSENSE is stable.
971 * We may delay() 5 times for 100ms at interrupt time;
972 * hopefully this will not happen often.
973 */
974 delay(100000);
975 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
976 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
977 if (sist1 & SIEN1_SBMC)
978 continue; /* we got an irq again */
979 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
980 STEST4_MODE_MASK;
981 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
982 switch(sc->mode) {
983 case STEST4_MODE_DIF:
984 printf("%s: switching to differential mode\n",
985 device_xname(&sc->sc_dev));
986 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
987 stest2 | STEST2_DIF);
988 break;
989 case STEST4_MODE_SE:
990 printf("%s: switching to single-ended mode\n",
991 device_xname(&sc->sc_dev));
992 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
993 stest2 & ~STEST2_DIF);
994 break;
995 case STEST4_MODE_LVD:
996 printf("%s: switching to LVD mode\n",
997 device_xname(&sc->sc_dev));
998 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
999 stest2 & ~STEST2_DIF);
1000 break;
1001 default:
1002 aprint_error_dev(&sc->sc_dev, "invalid SCSI mode 0x%x\n",
1003 sc->mode);
1004 return 0;
1005 }
1006 return 1;
1007 }
1008 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
1009 device_xname(&sc->sc_dev));
1010 return 0;
1011 }
1012
1013 void
1014 siop_resetbus(struct siop_common_softc *sc)
1015 {
1016 int scntl1;
1017 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1018 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1019 scntl1 | SCNTL1_RST);
1020 /* minimum 25 us, more time won't hurt */
1021 delay(100);
1022 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1023 }
1024
1025 void
1026 siop_update_xfer_mode(struct siop_common_softc *sc, int target)
1027 {
1028 struct siop_common_target *siop_target = sc->targets[target];
1029 struct scsipi_xfer_mode xm;
1030
1031 xm.xm_target = target;
1032 xm.xm_mode = 0;
1033 xm.xm_period = 0;
1034 xm.xm_offset = 0;
1035
1036
1037 if (siop_target->flags & TARF_ISWIDE)
1038 xm.xm_mode |= PERIPH_CAP_WIDE16;
1039 if (siop_target->period) {
1040 xm.xm_period = siop_target->period;
1041 xm.xm_offset = siop_target->offset;
1042 xm.xm_mode |= PERIPH_CAP_SYNC;
1043 }
1044 if (siop_target->flags & TARF_TAG) {
1045 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1046 if ((sc->features & SF_CHIP_GEBUG) == 0 ||
1047 (sc->targets[target]->flags & TARF_ISWIDE))
1048 xm.xm_mode |= PERIPH_CAP_TQING;
1049 }
1050
1051 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
1052 }
1053