siop_common.c revision 1.28.4.5 1 /* $NetBSD: siop_common.c,v 1.28.4.5 2005/03/19 01:44:21 tron Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.28.4.5 2005/03/19 01:44:21 tron Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 #include <sys/scsiio.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/endian.h>
49 #include <machine/bus.h>
50
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_message.h>
53 #include <dev/scsipi/scsipi_all.h>
54
55 #include <dev/scsipi/scsiconf.h>
56
57 #include <dev/ic/siopreg.h>
58 #include <dev/ic/siopvar_common.h>
59
60 #include "opt_siop.h"
61
62 #undef DEBUG
63 #undef DEBUG_DR
64 #undef DEBUG_NEG
65
66 int
67 siop_common_attach(sc)
68 struct siop_common_softc *sc;
69 {
70 int error, i;
71 bus_dma_segment_t seg;
72 int rseg;
73
74 /*
75 * Allocate DMA-safe memory for the script and map it.
76 */
77 if ((sc->features & SF_CHIP_RAM) == 0) {
78 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
79 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
80 if (error) {
81 printf("%s: unable to allocate script DMA memory, "
82 "error = %d\n", sc->sc_dev.dv_xname, error);
83 return error;
84 }
85 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
86 (caddr_t *)&sc->sc_script,
87 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
88 if (error) {
89 printf("%s: unable to map script DMA memory, "
90 "error = %d\n", sc->sc_dev.dv_xname, error);
91 return error;
92 }
93 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
94 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
95 if (error) {
96 printf("%s: unable to create script DMA map, "
97 "error = %d\n", sc->sc_dev.dv_xname, error);
98 return error;
99 }
100 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
101 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
102 if (error) {
103 printf("%s: unable to load script DMA map, "
104 "error = %d\n", sc->sc_dev.dv_xname, error);
105 return error;
106 }
107 sc->sc_scriptaddr =
108 sc->sc_scriptdma->dm_segs[0].ds_addr;
109 sc->ram_size = PAGE_SIZE;
110 }
111
112 sc->sc_adapt.adapt_dev = &sc->sc_dev;
113 sc->sc_adapt.adapt_nchannels = 1;
114 sc->sc_adapt.adapt_openings = 0;
115 sc->sc_adapt.adapt_ioctl = siop_ioctl;
116 sc->sc_adapt.adapt_minphys = minphys;
117
118 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
119 sc->sc_chan.chan_adapter = &sc->sc_adapt;
120 sc->sc_chan.chan_bustype = &scsi_bustype;
121 sc->sc_chan.chan_channel = 0;
122 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
123 sc->sc_chan.chan_ntargets =
124 (sc->features & SF_BUS_WIDE) ? 16 : 8;
125 sc->sc_chan.chan_nluns = 8;
126 sc->sc_chan.chan_id =
127 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
128 if (sc->sc_chan.chan_id == 0 ||
129 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
130 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
131
132 for (i = 0; i < 16; i++)
133 sc->targets[i] = NULL;
134
135 /* find min/max sync period for this chip */
136 sc->st_maxsync = 0;
137 sc->dt_maxsync = 0;
138 sc->st_minsync = 255;
139 sc->dt_minsync = 255;
140 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
141 if (sc->clock_period != scf_period[i].clock)
142 continue;
143 if (sc->st_maxsync < scf_period[i].period)
144 sc->st_maxsync = scf_period[i].period;
145 if (sc->st_minsync > scf_period[i].period)
146 sc->st_minsync = scf_period[i].period;
147 }
148 if (sc->st_maxsync == 255 || sc->st_minsync == 0)
149 panic("siop: can't find my sync parameters\n");
150 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
151 if (sc->clock_period != dt_scf_period[i].clock)
152 continue;
153 if (sc->dt_maxsync < dt_scf_period[i].period)
154 sc->dt_maxsync = dt_scf_period[i].period;
155 if (sc->dt_minsync > dt_scf_period[i].period)
156 sc->dt_minsync = dt_scf_period[i].period;
157 }
158 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
159 panic("siop: can't find my sync parameters\n");
160 return 0;
161 }
162
163 void
164 siop_common_reset(sc)
165 struct siop_common_softc *sc;
166 {
167 u_int32_t stest3;
168
169 /* reset the chip */
170 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
171 delay(1000);
172 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
173
174 /* init registers */
175 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
176 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
177 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
182 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
183 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
184 0xff & ~(SIEN1_HTH | SIEN1_GEN));
185 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
187 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
188 (0xb << STIME0_SEL_SHIFT));
189 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
190 sc->sc_chan.chan_id | SCID_RRE);
191 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
192 1 << sc->sc_chan.chan_id);
193 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
194 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
195 if (sc->features & SF_CHIP_AAIP)
196 bus_space_write_1(sc->sc_rt, sc->sc_rh,
197 SIOP_AIPCNTL1, AIPCNTL1_DIS);
198
199 /* enable clock doubler or quadruler if appropriate */
200 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
201 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
202 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
203 STEST1_DBLEN);
204 if (sc->features & SF_CHIP_QUAD) {
205 /* wait for PPL to lock */
206 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
207 SIOP_STEST4) & STEST4_LOCK) == 0)
208 delay(10);
209 } else {
210 /* data sheet says 20us - more won't hurt */
211 delay(100);
212 }
213 /* halt scsi clock, select doubler/quad, restart clock */
214 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
215 stest3 | STEST3_HSC);
216 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
217 STEST1_DBLEN | STEST1_DBLSEL);
218 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
219 } else {
220 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
221 }
222 if (sc->features & SF_CHIP_FIFO)
223 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
224 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
225 CTEST5_DFS);
226 if (sc->features & SF_CHIP_LED0) {
227 /* Set GPIO0 as output if software LED control is required */
228 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
229 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
230 }
231 if (sc->features & SF_BUS_ULTRA3) {
232 /* reset SCNTL4 */
233 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
234 }
235 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
236 STEST4_MODE_MASK;
237
238 /*
239 * initialise the RAM. Without this we may get scsi gross errors on
240 * the 1010
241 */
242 if (sc->features & SF_CHIP_RAM)
243 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
244 0, 0, sc->ram_size / 4);
245 sc->sc_reset(sc);
246 }
247
248 /* prepare tables before sending a cmd */
249 void
250 siop_setuptables(siop_cmd)
251 struct siop_common_cmd *siop_cmd;
252 {
253 int i;
254 struct siop_common_softc *sc = siop_cmd->siop_sc;
255 struct scsipi_xfer *xs = siop_cmd->xs;
256 int target = xs->xs_periph->periph_target;
257 int lun = xs->xs_periph->periph_lun;
258 int msgoffset = 1;
259
260 siop_cmd->siop_tables->id = htole32(sc->targets[target]->id);
261 memset(siop_cmd->siop_tables->msg_out, 0,
262 sizeof(siop_cmd->siop_tables->msg_out));
263 /* request sense doesn't disconnect */
264 if (xs->xs_control & XS_CTL_REQSENSE)
265 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
266 else if ((sc->features & SF_CHIP_GEBUG) &&
267 (sc->targets[target]->flags & TARF_ISWIDE) == 0)
268 /*
269 * 1010 bug: it seems that the 1010 has problems with reselect
270 * when not in wide mode (generate false SCSI gross error).
271 * The FreeBSD sym driver has comments about it but their
272 * workaround (disable SCSI gross error reporting) doesn't
273 * work with my adapter. So disable disconnect when not
274 * wide.
275 */
276 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
277 else
278 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
279 if (xs->xs_tag_type != 0) {
280 if ((sc->targets[target]->flags & TARF_TAG) == 0) {
281 scsipi_printaddr(xs->xs_periph);
282 printf(": tagged command type %d id %d\n",
283 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
284 panic("tagged command for non-tagging device\n");
285 }
286 siop_cmd->flags |= CMDFL_TAG;
287 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
288 /*
289 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
290 * different one
291 */
292 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
293 msgoffset = 3;
294 }
295 siop_cmd->siop_tables->t_msgout.count= htole32(msgoffset);
296 if (sc->targets[target]->status == TARST_ASYNC) {
297 if ((sc->targets[target]->flags & TARF_DT) &&
298 (sc->mode == STEST4_MODE_LVD)) {
299 sc->targets[target]->status = TARST_PPR_NEG;
300 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
301 sc->maxoff);
302 } else if (sc->targets[target]->flags & TARF_WIDE) {
303 sc->targets[target]->status = TARST_WIDE_NEG;
304 siop_wdtr_msg(siop_cmd, msgoffset,
305 MSG_EXT_WDTR_BUS_16_BIT);
306 } else if (sc->targets[target]->flags & TARF_SYNC) {
307 sc->targets[target]->status = TARST_SYNC_NEG;
308 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
309 (sc->maxoff > 31) ? 31 : sc->maxoff);
310 } else {
311 sc->targets[target]->status = TARST_OK;
312 siop_update_xfer_mode(sc, target);
313 }
314 }
315 siop_cmd->siop_tables->status =
316 htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */
317
318 siop_cmd->siop_tables->cmd.count =
319 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
320 siop_cmd->siop_tables->cmd.addr =
321 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
322 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
323 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
324 siop_cmd->siop_tables->data[i].count =
325 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
326 siop_cmd->siop_tables->data[i].addr =
327 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
328 }
329 }
330 }
331
332 int
333 siop_wdtr_neg(siop_cmd)
334 struct siop_common_cmd *siop_cmd;
335 {
336 struct siop_common_softc *sc = siop_cmd->siop_sc;
337 struct siop_common_target *siop_target = siop_cmd->siop_target;
338 int target = siop_cmd->xs->xs_periph->periph_target;
339 struct siop_common_xfer *tables = siop_cmd->siop_tables;
340
341 if (siop_target->status == TARST_WIDE_NEG) {
342 /* we initiated wide negotiation */
343 switch (tables->msg_in[3]) {
344 case MSG_EXT_WDTR_BUS_8_BIT:
345 siop_target->flags &= ~TARF_ISWIDE;
346 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
347 break;
348 case MSG_EXT_WDTR_BUS_16_BIT:
349 if (siop_target->flags & TARF_WIDE) {
350 siop_target->flags |= TARF_ISWIDE;
351 sc->targets[target]->id |= (SCNTL3_EWS << 24);
352 break;
353 }
354 /* FALLTHROUH */
355 default:
356 /*
357 * hum, we got more than what we can handle, shoudn't
358 * happen. Reject, and stay async
359 */
360 siop_target->flags &= ~TARF_ISWIDE;
361 siop_target->status = TARST_OK;
362 siop_target->offset = siop_target->period = 0;
363 siop_update_xfer_mode(sc, target);
364 printf("%s: rejecting invalid wide negotiation from "
365 "target %d (%d)\n", sc->sc_dev.dv_xname, target,
366 tables->msg_in[3]);
367 tables->t_msgout.count= htole32(1);
368 tables->msg_out[0] = MSG_MESSAGE_REJECT;
369 return SIOP_NEG_MSGOUT;
370 }
371 tables->id = htole32(sc->targets[target]->id);
372 bus_space_write_1(sc->sc_rt, sc->sc_rh,
373 SIOP_SCNTL3,
374 (sc->targets[target]->id >> 24) & 0xff);
375 /* we now need to do sync */
376 if (siop_target->flags & TARF_SYNC) {
377 siop_target->status = TARST_SYNC_NEG;
378 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
379 (sc->maxoff > 31) ? 31 : sc->maxoff);
380 return SIOP_NEG_MSGOUT;
381 } else {
382 siop_target->status = TARST_OK;
383 siop_update_xfer_mode(sc, target);
384 return SIOP_NEG_ACK;
385 }
386 } else {
387 /* target initiated wide negotiation */
388 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
389 && (siop_target->flags & TARF_WIDE)) {
390 siop_target->flags |= TARF_ISWIDE;
391 sc->targets[target]->id |= SCNTL3_EWS << 24;
392 } else {
393 siop_target->flags &= ~TARF_ISWIDE;
394 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
395 }
396 tables->id = htole32(sc->targets[target]->id);
397 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
398 (sc->targets[target]->id >> 24) & 0xff);
399 /*
400 * we did reset wide parameters, so fall back to async,
401 * but don't schedule a sync neg, target should initiate it
402 */
403 siop_target->status = TARST_OK;
404 siop_target->offset = siop_target->period = 0;
405 siop_update_xfer_mode(sc, target);
406 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
407 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
408 return SIOP_NEG_MSGOUT;
409 }
410 }
411
412 int
413 siop_ppr_neg(siop_cmd)
414 struct siop_common_cmd *siop_cmd;
415 {
416 struct siop_common_softc *sc = siop_cmd->siop_sc;
417 struct siop_common_target *siop_target = siop_cmd->siop_target;
418 int target = siop_cmd->xs->xs_periph->periph_target;
419 struct siop_common_xfer *tables = siop_cmd->siop_tables;
420 int sync, offset, options, scf = 0;
421 int i;
422
423 #ifdef DEBUG_NEG
424 printf("%s: anserw on ppr negotiation:", sc->sc_dev.dv_xname);
425 for (i = 0; i < 8; i++)
426 printf(" 0x%x", tables->msg_in[i]);
427 printf("\n");
428 #endif
429
430 if (siop_target->status == TARST_PPR_NEG) {
431 /* we initiated PPR negotiation */
432 sync = tables->msg_in[3];
433 offset = tables->msg_in[5];
434 options = tables->msg_in[7];
435 if (options != MSG_EXT_PPR_DT) {
436 /* should't happen */
437 printf("%s: ppr negotiation for target %d: "
438 "no DT option\n", sc->sc_dev.dv_xname, target);
439 siop_target->status = TARST_ASYNC;
440 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
441 siop_target->offset = 0;
442 siop_target->period = 0;
443 goto reject;
444 }
445
446 if (offset > sc->maxoff || sync < sc->dt_minsync ||
447 sync > sc->dt_maxsync) {
448 printf("%s: ppr negotiation for target %d: "
449 "offset (%d) or sync (%d) out of range\n",
450 sc->sc_dev.dv_xname, target, offset, sync);
451 /* should not happen */
452 siop_target->offset = 0;
453 siop_target->period = 0;
454 goto reject;
455 } else {
456 for (i = 0; i <
457 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
458 i++) {
459 if (sc->clock_period != dt_scf_period[i].clock)
460 continue;
461 if (dt_scf_period[i].period == sync) {
462 /* ok, found it. we now are sync. */
463 siop_target->offset = offset;
464 siop_target->period = sync;
465 scf = dt_scf_period[i].scf;
466 siop_target->flags |= TARF_ISDT;
467 }
468 }
469 if ((siop_target->flags & TARF_ISDT) == 0) {
470 printf("%s: ppr negotiation for target %d: "
471 "sync (%d) incompatible with adapter\n",
472 sc->sc_dev.dv_xname, target, sync);
473 /*
474 * we didn't find it in our table, do async
475 * send reject msg, start SDTR/WDTR neg
476 */
477 siop_target->status = TARST_ASYNC;
478 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
479 siop_target->offset = 0;
480 siop_target->period = 0;
481 goto reject;
482 }
483 }
484 if (tables->msg_in[6] != 1) {
485 printf("%s: ppr negotiation for target %d: "
486 "transfer width (%d) incompatible with dt\n",
487 sc->sc_dev.dv_xname, target, tables->msg_in[6]);
488 /* DT mode can only be done with wide transfers */
489 siop_target->status = TARST_ASYNC;
490 goto reject;
491 }
492 siop_target->flags |= TARF_ISWIDE;
493 sc->targets[target]->id |= (SCNTL3_EWS << 24);
494 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
495 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
496 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
497 sc->targets[target]->id |=
498 (siop_target->offset & SXFER_MO_MASK) << 8;
499 sc->targets[target]->id &= ~0xff;
500 sc->targets[target]->id |= SCNTL4_U3EN;
501 siop_target->status = TARST_OK;
502 siop_update_xfer_mode(sc, target);
503 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
504 (sc->targets[target]->id >> 24) & 0xff);
505 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
506 (sc->targets[target]->id >> 8) & 0xff);
507 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
508 sc->targets[target]->id & 0xff);
509 return SIOP_NEG_ACK;
510 } else {
511 /* target initiated PPR negotiation, shouldn't happen */
512 printf("%s: rejecting invalid PPR negotiation from "
513 "target %d\n", sc->sc_dev.dv_xname, target);
514 reject:
515 tables->t_msgout.count= htole32(1);
516 tables->msg_out[0] = MSG_MESSAGE_REJECT;
517 return SIOP_NEG_MSGOUT;
518 }
519 }
520
521 int
522 siop_sdtr_neg(siop_cmd)
523 struct siop_common_cmd *siop_cmd;
524 {
525 struct siop_common_softc *sc = siop_cmd->siop_sc;
526 struct siop_common_target *siop_target = siop_cmd->siop_target;
527 int target = siop_cmd->xs->xs_periph->periph_target;
528 int sync, maxoffset, offset, i;
529 int send_msgout = 0;
530 struct siop_common_xfer *tables = siop_cmd->siop_tables;
531
532 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */
533 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
534
535 sync = tables->msg_in[3];
536 offset = tables->msg_in[4];
537
538 if (siop_target->status == TARST_SYNC_NEG) {
539 /* we initiated sync negotiation */
540 siop_target->status = TARST_OK;
541 #ifdef DEBUG
542 printf("sdtr: sync %d offset %d\n", sync, offset);
543 #endif
544 if (offset > maxoffset || sync < sc->st_minsync ||
545 sync > sc->st_maxsync)
546 goto reject;
547 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
548 i++) {
549 if (sc->clock_period != scf_period[i].clock)
550 continue;
551 if (scf_period[i].period == sync) {
552 /* ok, found it. we now are sync. */
553 siop_target->offset = offset;
554 siop_target->period = sync;
555 sc->targets[target]->id &=
556 ~(SCNTL3_SCF_MASK << 24);
557 sc->targets[target]->id |= scf_period[i].scf
558 << (24 + SCNTL3_SCF_SHIFT);
559 if (sync < 25 && /* Ultra */
560 (sc->features & SF_BUS_ULTRA3) == 0)
561 sc->targets[target]->id |=
562 SCNTL3_ULTRA << 24;
563 else
564 sc->targets[target]->id &=
565 ~(SCNTL3_ULTRA << 24);
566 sc->targets[target]->id &=
567 ~(SXFER_MO_MASK << 8);
568 sc->targets[target]->id |=
569 (offset & SXFER_MO_MASK) << 8;
570 sc->targets[target]->id &= ~0xff; /* scntl4 */
571 goto end;
572 }
573 }
574 /*
575 * we didn't find it in our table, do async and send reject
576 * msg
577 */
578 reject:
579 send_msgout = 1;
580 tables->t_msgout.count= htole32(1);
581 tables->msg_out[0] = MSG_MESSAGE_REJECT;
582 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
583 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
584 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
585 sc->targets[target]->id &= ~0xff; /* scntl4 */
586 siop_target->offset = siop_target->period = 0;
587 } else { /* target initiated sync neg */
588 #ifdef DEBUG
589 printf("sdtr (target): sync %d offset %d\n", sync, offset);
590 #endif
591 if (offset == 0 || sync > sc->st_maxsync) { /* async */
592 goto async;
593 }
594 if (offset > maxoffset)
595 offset = maxoffset;
596 if (sync < sc->st_minsync)
597 sync = sc->st_minsync;
598 /* look for sync period */
599 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
600 i++) {
601 if (sc->clock_period != scf_period[i].clock)
602 continue;
603 if (scf_period[i].period == sync) {
604 /* ok, found it. we now are sync. */
605 siop_target->offset = offset;
606 siop_target->period = sync;
607 sc->targets[target]->id &=
608 ~(SCNTL3_SCF_MASK << 24);
609 sc->targets[target]->id |= scf_period[i].scf
610 << (24 + SCNTL3_SCF_SHIFT);
611 if (sync < 25 && /* Ultra */
612 (sc->features & SF_BUS_ULTRA3) == 0)
613 sc->targets[target]->id |=
614 SCNTL3_ULTRA << 24;
615 else
616 sc->targets[target]->id &=
617 ~(SCNTL3_ULTRA << 24);
618 sc->targets[target]->id &=
619 ~(SXFER_MO_MASK << 8);
620 sc->targets[target]->id |=
621 (offset & SXFER_MO_MASK) << 8;
622 sc->targets[target]->id &= ~0xff; /* scntl4 */
623 siop_sdtr_msg(siop_cmd, 0, sync, offset);
624 send_msgout = 1;
625 goto end;
626 }
627 }
628 async:
629 siop_target->offset = siop_target->period = 0;
630 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
631 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
632 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
633 sc->targets[target]->id &= ~0xff; /* scntl4 */
634 siop_sdtr_msg(siop_cmd, 0, 0, 0);
635 send_msgout = 1;
636 }
637 end:
638 if (siop_target->status == TARST_OK)
639 siop_update_xfer_mode(sc, target);
640 #ifdef DEBUG
641 printf("id now 0x%x\n", sc->targets[target]->id);
642 #endif
643 tables->id = htole32(sc->targets[target]->id);
644 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
645 (sc->targets[target]->id >> 24) & 0xff);
646 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
647 (sc->targets[target]->id >> 8) & 0xff);
648 if (send_msgout) {
649 return SIOP_NEG_MSGOUT;
650 } else {
651 return SIOP_NEG_ACK;
652 }
653 }
654
655 void
656 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
657 struct siop_common_cmd *siop_cmd;
658 int offset;
659 int ssync, soff;
660 {
661 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
662 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
663 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
664 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
665 siop_cmd->siop_tables->msg_out[offset + 4] = soff;
666 siop_cmd->siop_tables->t_msgout.count =
667 htole32(offset + MSG_EXT_SDTR_LEN + 2);
668 }
669
670 void
671 siop_wdtr_msg(siop_cmd, offset, wide)
672 struct siop_common_cmd *siop_cmd;
673 int offset;
674 {
675 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
676 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
677 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
678 siop_cmd->siop_tables->msg_out[offset + 3] = wide;
679 siop_cmd->siop_tables->t_msgout.count =
680 htole32(offset + MSG_EXT_WDTR_LEN + 2);
681 }
682
683 void
684 siop_ppr_msg(siop_cmd, offset, ssync, soff)
685 struct siop_common_cmd *siop_cmd;
686 int offset;
687 int ssync, soff;
688 {
689 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
690 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
691 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
692 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
693 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
694 siop_cmd->siop_tables->msg_out[offset + 5] = soff;
695 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
696 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
697 siop_cmd->siop_tables->t_msgout.count =
698 htole32(offset + MSG_EXT_PPR_LEN + 2);
699 }
700
701 void
702 siop_minphys(bp)
703 struct buf *bp;
704 {
705 minphys(bp);
706 }
707
708 int
709 siop_ioctl(chan, cmd, arg, flag, p)
710 struct scsipi_channel *chan;
711 u_long cmd;
712 caddr_t arg;
713 int flag;
714 struct proc *p;
715 {
716 struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
717
718 switch (cmd) {
719 case SCBUSIORESET:
720 /*
721 * abort the script. This will trigger an interrupt, which will
722 * trigger a bus reset.
723 * We can't safely trigger the reset here as we can't access
724 * the required register while the script is running.
725 */
726 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
727 return (0);
728 default:
729 return (ENOTTY);
730 }
731 }
732
733 void
734 siop_ma(siop_cmd)
735 struct siop_common_cmd *siop_cmd;
736 {
737 int offset, dbc, sstat;
738 struct siop_common_softc *sc = siop_cmd->siop_sc;
739 scr_table_t *table; /* table with partial xfer */
740
741 /*
742 * compute how much of the current table didn't get handled when
743 * a phase mismatch occurs
744 */
745 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
746 == 0)
747 return; /* no valid data transfer */
748
749 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
750 if (offset >= SIOP_NSG) {
751 printf("%s: bad offset in siop_sdp (%d)\n",
752 sc->sc_dev.dv_xname, offset);
753 return;
754 }
755 table = &siop_cmd->siop_tables->data[offset];
756 #ifdef DEBUG_DR
757 printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
758 table->count, table->addr);
759 #endif
760 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
761 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
762 if (sc->features & SF_CHIP_DFBC) {
763 dbc +=
764 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
765 } else {
766 /* need to account stale data in FIFO */
767 int dfifo =
768 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
769 if (sc->features & SF_CHIP_FIFO) {
770 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
771 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
772 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
773 } else {
774 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
775 }
776 }
777 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
778 if (sstat & SSTAT0_OLF)
779 dbc++;
780 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
781 dbc++;
782 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
783 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
784 SIOP_SSTAT2);
785 if (sstat & SSTAT2_OLF1)
786 dbc++;
787 if ((sstat & SSTAT2_ORF1) &&
788 (sc->features & SF_CHIP_DFBC) == 0)
789 dbc++;
790 }
791 /* clear the FIFO */
792 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
793 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
794 CTEST3_CLF);
795 }
796 siop_cmd->flags |= CMDFL_RESID;
797 siop_cmd->resid = dbc;
798 }
799
800 void
801 siop_sdp(siop_cmd, offset)
802 struct siop_common_cmd *siop_cmd;
803 int offset;
804 {
805 scr_table_t *table;
806
807 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
808 == 0)
809 return; /* no data pointers to save */
810
811 /*
812 * offset == SIOP_NSG may be a valid condition if we get a Save data
813 * pointer when the xfer is done. Just ignore the Save data pointer
814 * in this case
815 */
816 if (offset == SIOP_NSG)
817 return;
818 #ifdef DIAGNOSTIC
819 if (offset > SIOP_NSG) {
820 scsipi_printaddr(siop_cmd->xs->xs_periph);
821 printf(": offset %d > %d\n", offset, SIOP_NSG);
822 panic("siop_sdp: offset");
823 }
824 #endif
825 /*
826 * Save data pointer. We do this by adjusting the tables to point
827 * at the begginning of the data not yet transfered.
828 * offset points to the first table with untransfered data.
829 */
830
831 /*
832 * before doing that we decrease resid from the ammount of data which
833 * has been transfered.
834 */
835 siop_update_resid(siop_cmd, offset);
836
837 /*
838 * First let see if we have a resid from a phase mismatch. If so,
839 * we have to adjst the table at offset to remove transfered data.
840 */
841 if (siop_cmd->flags & CMDFL_RESID) {
842 siop_cmd->flags &= ~CMDFL_RESID;
843 table = &siop_cmd->siop_tables->data[offset];
844 /* "cut" already transfered data from this table */
845 table->addr =
846 htole32(le32toh(table->addr) +
847 le32toh(table->count) - siop_cmd->resid);
848 table->count = htole32(siop_cmd->resid);
849 }
850
851 /*
852 * now we can remove entries which have been transfered.
853 * We just move the entries with data left at the beggining of the
854 * tables
855 */
856 memmove(&siop_cmd->siop_tables->data[0],
857 &siop_cmd->siop_tables->data[offset],
858 (SIOP_NSG - offset) * sizeof(scr_table_t));
859 }
860
861 void
862 siop_update_resid(siop_cmd, offset)
863 struct siop_common_cmd *siop_cmd;
864 int offset;
865 {
866 scr_table_t *table;
867 int i;
868
869 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
870 == 0)
871 return; /* no data to transfer */
872
873 /*
874 * update resid. First account for the table entries which have
875 * been fully completed.
876 */
877 for (i = 0; i < offset; i++)
878 siop_cmd->xs->resid -=
879 le32toh(siop_cmd->siop_tables->data[i].count);
880 /*
881 * if CMDFL_RESID is set, the last table (pointed by offset) is a
882 * partial transfers. If not, offset points to the entry folloing
883 * the last full transfer.
884 */
885 if (siop_cmd->flags & CMDFL_RESID) {
886 table = &siop_cmd->siop_tables->data[offset];
887 siop_cmd->xs->resid -= le32toh(table->count) - siop_cmd->resid;
888 }
889 }
890
891 int
892 siop_iwr(siop_cmd)
893 struct siop_common_cmd *siop_cmd;
894 {
895 int offset;
896 scr_table_t *table; /* table with IWR */
897 struct siop_common_softc *sc = siop_cmd->siop_sc;
898 /* handle ignore wide residue messages */
899
900 /* if target isn't wide, reject */
901 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
902 siop_cmd->siop_tables->t_msgout.count= htole32(1);
903 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
904 return SIOP_NEG_MSGOUT;
905 }
906 /* get index of current command in table */
907 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
908 /*
909 * if the current table did complete, we're now pointing at the
910 * next one. Go back one if we didn't see a phase mismatch.
911 */
912 if ((siop_cmd->flags & CMDFL_RESID) == 0)
913 offset--;
914 table = &siop_cmd->siop_tables->data[offset];
915
916 if ((siop_cmd->flags & CMDFL_RESID) == 0) {
917 if (le32toh(table->count) & 1) {
918 /* we really got the number of bytes we expected */
919 return SIOP_NEG_ACK;
920 } else {
921 /*
922 * now we really had a short xfer, by one byte.
923 * handle it just as if we had a phase mistmatch
924 * (there is a resid of one for this table).
925 * Update scratcha1 to reflect the fact that
926 * this xfer isn't complete.
927 */
928 siop_cmd->flags |= CMDFL_RESID;
929 siop_cmd->resid = 1;
930 bus_space_write_1(sc->sc_rt, sc->sc_rh,
931 SIOP_SCRATCHA + 1, offset);
932 return SIOP_NEG_ACK;
933 }
934 } else {
935 /*
936 * we already have a short xfer for this table; it's
937 * just one byte less than we though it was
938 */
939 siop_cmd->resid--;
940 return SIOP_NEG_ACK;
941 }
942 }
943
944 void
945 siop_clearfifo(sc)
946 struct siop_common_softc *sc;
947 {
948 int timeout = 0;
949 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
950
951 #ifdef DEBUG_INTR
952 printf("DMA fifo not empty !\n");
953 #endif
954 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
955 ctest3 | CTEST3_CLF);
956 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
957 CTEST3_CLF) != 0) {
958 delay(1);
959 if (++timeout > 1000) {
960 printf("clear fifo failed\n");
961 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
962 bus_space_read_1(sc->sc_rt, sc->sc_rh,
963 SIOP_CTEST3) & ~CTEST3_CLF);
964 return;
965 }
966 }
967 }
968
969 int
970 siop_modechange(sc)
971 struct siop_common_softc *sc;
972 {
973 int retry;
974 int sist0, sist1, stest2;
975 for (retry = 0; retry < 5; retry++) {
976 /*
977 * datasheet says to wait 100ms and re-read SIST1,
978 * to check that DIFFSENSE is stable.
979 * We may delay() 5 times for 100ms at interrupt time;
980 * hopefully this will not happen often.
981 */
982 delay(100000);
983 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
984 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
985 if (sist1 & SIEN1_SBMC)
986 continue; /* we got an irq again */
987 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
988 STEST4_MODE_MASK;
989 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
990 switch(sc->mode) {
991 case STEST4_MODE_DIF:
992 printf("%s: switching to differential mode\n",
993 sc->sc_dev.dv_xname);
994 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
995 stest2 | STEST2_DIF);
996 break;
997 case STEST4_MODE_SE:
998 printf("%s: switching to single-ended mode\n",
999 sc->sc_dev.dv_xname);
1000 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
1001 stest2 & ~STEST2_DIF);
1002 break;
1003 case STEST4_MODE_LVD:
1004 printf("%s: switching to LVD mode\n",
1005 sc->sc_dev.dv_xname);
1006 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
1007 stest2 & ~STEST2_DIF);
1008 break;
1009 default:
1010 printf("%s: invalid SCSI mode 0x%x\n",
1011 sc->sc_dev.dv_xname, sc->mode);
1012 return 0;
1013 }
1014 return 1;
1015 }
1016 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
1017 sc->sc_dev.dv_xname);
1018 return 0;
1019 }
1020
1021 void
1022 siop_resetbus(sc)
1023 struct siop_common_softc *sc;
1024 {
1025 int scntl1;
1026 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1027 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1028 scntl1 | SCNTL1_RST);
1029 /* minimum 25 us, more time won't hurt */
1030 delay(100);
1031 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1032 }
1033
1034 void
1035 siop_update_xfer_mode(sc, target)
1036 struct siop_common_softc *sc;
1037 int target;
1038 {
1039 struct siop_common_target *siop_target = sc->targets[target];
1040 struct scsipi_xfer_mode xm;
1041
1042 xm.xm_target = target;
1043 xm.xm_mode = 0;
1044 xm.xm_period = 0;
1045 xm.xm_offset = 0;
1046
1047
1048 if (siop_target->flags & TARF_ISWIDE)
1049 xm.xm_mode |= PERIPH_CAP_WIDE16;
1050 if (siop_target->period) {
1051 xm.xm_period = siop_target->period;
1052 xm.xm_offset = siop_target->offset;
1053 xm.xm_mode |= PERIPH_CAP_SYNC;
1054 }
1055 if (siop_target->flags & TARF_TAG) {
1056 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1057 if ((sc->features & SF_CHIP_GEBUG) == 0 ||
1058 (sc->targets[target]->flags & TARF_ISWIDE))
1059 xm.xm_mode |= PERIPH_CAP_TQING;
1060 }
1061
1062 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
1063 }
1064