siop.c revision 1.58 1 /* $NetBSD: siop.c,v 1.58 2002/04/23 17:33:27 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.58 2002/04/23 17:33:27 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
81
82 /* Number of scheduler slot (needs to match script) */
83 #define SIOP_NSLOTS 40
84
85 void siop_reset __P((struct siop_softc *));
86 void siop_handle_reset __P((struct siop_softc *));
87 int siop_handle_qtag_reject __P((struct siop_cmd *));
88 void siop_scsicmd_end __P((struct siop_cmd *));
89 void siop_unqueue __P((struct siop_softc *, int, int));
90 static void siop_start __P((struct siop_softc *, struct siop_cmd *));
91 void siop_timeout __P((void *));
92 int siop_scsicmd __P((struct scsipi_xfer *));
93 void siop_scsipi_request __P((struct scsipi_channel *,
94 scsipi_adapter_req_t, void *));
95 void siop_dump_script __P((struct siop_softc *));
96 void siop_morecbd __P((struct siop_softc *));
97 struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *));
98 void siop_add_reselsw __P((struct siop_softc *, int));
99 void siop_update_scntl3 __P((struct siop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int siop_stat_intr = 0;
104 static int siop_stat_intr_shortxfer = 0;
105 static int siop_stat_intr_sdp = 0;
106 static int siop_stat_intr_done = 0;
107 static int siop_stat_intr_xferdisc = 0;
108 static int siop_stat_intr_lunresel = 0;
109 static int siop_stat_intr_qfull = 0;
110 void siop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void siop_script_sync __P((struct siop_softc *, int));
117 static __inline__ void
118 siop_script_sync(sc, ops)
119 struct siop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int));
128 static __inline__ u_int32_t
129 siop_script_read(sc, offset)
130 struct siop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void siop_script_write __P((struct siop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 siop_script_write(sc, offset, val)
145 struct siop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 siop_attach(sc)
159 struct siop_softc *sc;
160 {
161 if (siop_common_attach(&sc->sc_c) != 0)
162 return;
163
164 TAILQ_INIT(&sc->free_list);
165 TAILQ_INIT(&sc->cmds);
166 TAILQ_INIT(&sc->lunsw_list);
167 sc->sc_currschedslot = 0;
168 #ifdef SIOP_DEBUG
169 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
170 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script),
171 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
172 #endif
173
174 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
175 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
176
177 /* Do a bus reset, so that devices fall back to narrow/async */
178 siop_resetbus(&sc->sc_c);
179 /*
180 * siop_reset() will reset the chip, thus clearing pending interrupts
181 */
182 siop_reset(sc);
183 #ifdef DUMP_SCRIPT
184 siop_dump_script(sc);
185 #endif
186
187 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
188 }
189
190 void
191 siop_reset(sc)
192 struct siop_softc *sc;
193 {
194 int i, j;
195 struct siop_lunsw *lunsw;
196
197 siop_common_reset(&sc->sc_c);
198
199 /* copy and patch the script */
200 if (sc->sc_c.features & SF_CHIP_RAM) {
201 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
202 siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
203 for (j = 0; j <
204 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
205 j++) {
206 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
207 E_abs_msgin_Used[j] * 4,
208 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
209 }
210 if (sc->sc_c.features & SF_CHIP_LED0) {
211 bus_space_write_region_4(sc->sc_c.sc_ramt,
212 sc->sc_c.sc_ramh,
213 Ent_led_on1, siop_led_on,
214 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
215 bus_space_write_region_4(sc->sc_c.sc_ramt,
216 sc->sc_c.sc_ramh,
217 Ent_led_on2, siop_led_on,
218 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
219 bus_space_write_region_4(sc->sc_c.sc_ramt,
220 sc->sc_c.sc_ramh,
221 Ent_led_off, siop_led_off,
222 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
223 }
224 } else {
225 for (j = 0;
226 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
227 sc->sc_c.sc_script[j] = htole32(siop_script[j]);
228 }
229 for (j = 0; j <
230 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
231 j++) {
232 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
233 htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space);
234 }
235 if (sc->sc_c.features & SF_CHIP_LED0) {
236 for (j = 0; j < (sizeof(siop_led_on) /
237 sizeof(siop_led_on[0])); j++)
238 sc->sc_c.sc_script[
239 Ent_led_on1 / sizeof(siop_led_on[0]) + j
240 ] = htole32(siop_led_on[j]);
241 for (j = 0; j < (sizeof(siop_led_on) /
242 sizeof(siop_led_on[0])); j++)
243 sc->sc_c.sc_script[
244 Ent_led_on2 / sizeof(siop_led_on[0]) + j
245 ] = htole32(siop_led_on[j]);
246 for (j = 0; j < (sizeof(siop_led_off) /
247 sizeof(siop_led_off[0])); j++)
248 sc->sc_c.sc_script[
249 Ent_led_off / sizeof(siop_led_off[0]) + j
250 ] = htole32(siop_led_off[j]);
251 }
252 }
253 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
254 sc->script_free_hi = sc->sc_c.ram_size / 4;
255
256 /* free used and unused lun switches */
257 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
258 #ifdef SIOP_DEBUG
259 printf("%s: free lunsw at offset %d\n",
260 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off);
261 #endif
262 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
263 free(lunsw, M_DEVBUF);
264 }
265 TAILQ_INIT(&sc->lunsw_list);
266 /* restore reselect switch */
267 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
268 struct siop_target *target;
269 if (sc->sc_c.targets[i] == NULL)
270 continue;
271 #ifdef SIOP_DEBUG
272 printf("%s: restore sw for target %d\n",
273 sc->sc_c.sc_dev.dv_xname, i);
274 #endif
275 target = (struct siop_target *)sc->sc_c.targets[i];
276 free(target->lunsw, M_DEVBUF);
277 target->lunsw = siop_get_lunsw(sc);
278 if (target->lunsw == NULL) {
279 printf("%s: can't alloc lunsw for target %d\n",
280 sc->sc_c.sc_dev.dv_xname, i);
281 break;
282 }
283 siop_add_reselsw(sc, i);
284 }
285
286 /* start script */
287 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
288 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
289 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
290 }
291 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
292 sc->sc_c.sc_scriptaddr + Ent_reselect);
293 }
294
295 #if 0
296 #define CALL_SCRIPT(ent) do {\
297 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
298 siop_cmd->cmd_c.dsa, \
299 sc->sc_c.sc_scriptaddr + ent); \
300 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
301 } while (0)
302 #else
303 #define CALL_SCRIPT(ent) do {\
304 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
305 } while (0)
306 #endif
307
308 int
309 siop_intr(v)
310 void *v;
311 {
312 struct siop_softc *sc = v;
313 struct siop_target *siop_target;
314 struct siop_cmd *siop_cmd;
315 struct siop_lun *siop_lun;
316 struct scsipi_xfer *xs;
317 int istat, sist, sstat1, dstat;
318 u_int32_t irqcode;
319 int need_reset = 0;
320 int offset, target, lun, tag;
321 bus_addr_t dsa;
322 struct siop_cbd *cbdp;
323 int freetarget = 0;
324 int restart = 0;
325
326 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
327 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
328 return 0;
329 INCSTAT(siop_stat_intr);
330 if (istat & ISTAT_INTF) {
331 printf("INTRF\n");
332 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
333 SIOP_ISTAT, ISTAT_INTF);
334 }
335 /* use DSA to find the current siop_cmd */
336 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
337 for (cbdp = TAILQ_FIRST(&sc->cmds); cbdp != NULL;
338 cbdp = TAILQ_NEXT(cbdp, next)) {
339 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
340 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
341 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
342 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
343 siop_table_sync(siop_cmd,
344 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
345 break;
346 }
347 }
348 if (cbdp == NULL) {
349 siop_cmd = NULL;
350 }
351 if (siop_cmd) {
352 xs = siop_cmd->cmd_c.xs;
353 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
354 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
355 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
356 tag = siop_cmd->cmd_c.tag;
357 siop_lun = siop_target->siop_lun[lun];
358 #ifdef DIAGNOSTIC
359 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
360 printf("siop_cmd (lun %d) for DSA 0x%x "
361 "not active (%d)\n", lun, (u_int)dsa,
362 siop_cmd->cmd_c.status);
363 xs = NULL;
364 siop_target = NULL;
365 target = -1;
366 lun = -1;
367 tag = -1;
368 siop_lun = NULL;
369 siop_cmd = NULL;
370 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
371 printf("siop_cmd (lun %d tag %d) not in siop_lun "
372 "active (%p != %p)\n", lun, tag, siop_cmd,
373 siop_lun->siop_tag[tag].active);
374 }
375 #endif
376 } else {
377 xs = NULL;
378 siop_target = NULL;
379 target = -1;
380 lun = -1;
381 tag = -1;
382 siop_lun = NULL;
383 }
384 if (istat & ISTAT_DIP) {
385 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
386 SIOP_DSTAT);
387 if (dstat & DSTAT_SSI) {
388 printf("single step dsp 0x%08x dsa 0x08%x\n",
389 (int)(bus_space_read_4(sc->sc_c.sc_rt,
390 sc->sc_c.sc_rh, SIOP_DSP) -
391 sc->sc_c.sc_scriptaddr),
392 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
393 SIOP_DSA));
394 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
395 (istat & ISTAT_SIP) == 0) {
396 bus_space_write_1(sc->sc_c.sc_rt,
397 sc->sc_c.sc_rh, SIOP_DCNTL,
398 bus_space_read_1(sc->sc_c.sc_rt,
399 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
400 }
401 return 1;
402 }
403 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
404 printf("DMA IRQ:");
405 if (dstat & DSTAT_IID)
406 printf(" Illegal instruction");
407 if (dstat & DSTAT_ABRT)
408 printf(" abort");
409 if (dstat & DSTAT_BF)
410 printf(" bus fault");
411 if (dstat & DSTAT_MDPE)
412 printf(" parity");
413 if (dstat & DSTAT_DFE)
414 printf(" dma fifo empty");
415 printf(", DSP=0x%x DSA=0x%x: ",
416 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
417 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
418 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
419 if (siop_cmd)
420 printf("last msg_in=0x%x status=0x%x\n",
421 siop_cmd->cmd_tables->msg_in[0],
422 le32toh(siop_cmd->cmd_tables->status));
423 else
424 printf("%s: current DSA invalid\n",
425 sc->sc_c.sc_dev.dv_xname);
426 need_reset = 1;
427 }
428 }
429 if (istat & ISTAT_SIP) {
430 if (istat & ISTAT_DIP)
431 delay(10);
432 /*
433 * Can't read sist0 & sist1 independantly, or we have to
434 * insert delay
435 */
436 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
437 SIOP_SIST0);
438 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
439 SIOP_SSTAT1);
440 #ifdef SIOP_DEBUG_INTR
441 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
442 "DSA=0x%x DSP=0x%lx\n", sist,
443 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 SIOP_SSTAT1),
445 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
446 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
447 SIOP_DSP) -
448 sc->sc_c.sc_scriptaddr));
449 #endif
450 if (sist & SIST0_RST) {
451 siop_handle_reset(sc);
452 /* no table to flush here */
453 return 1;
454 }
455 if (sist & SIST0_SGE) {
456 if (siop_cmd)
457 scsipi_printaddr(xs->xs_periph);
458 else
459 printf("%s:", sc->sc_c.sc_dev.dv_xname);
460 printf("scsi gross error\n");
461 goto reset;
462 }
463 if ((sist & SIST0_MA) && need_reset == 0) {
464 if (siop_cmd) {
465 int scratcha0;
466 dstat = bus_space_read_1(sc->sc_c.sc_rt,
467 sc->sc_c.sc_rh, SIOP_DSTAT);
468 /*
469 * first restore DSA, in case we were in a S/G
470 * operation.
471 */
472 bus_space_write_4(sc->sc_c.sc_rt,
473 sc->sc_c.sc_rh,
474 SIOP_DSA, siop_cmd->cmd_c.dsa);
475 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
476 sc->sc_c.sc_rh, SIOP_SCRATCHA);
477 switch (sstat1 & SSTAT1_PHASE_MASK) {
478 case SSTAT1_PHASE_STATUS:
479 /*
480 * previous phase may be aborted for any reason
481 * ( for example, the target has less data to
482 * transfer than requested). Just go to status
483 * and the command should terminate.
484 */
485 INCSTAT(siop_stat_intr_shortxfer);
486 if ((dstat & DSTAT_DFE) == 0)
487 siop_clearfifo(&sc->sc_c);
488 /* no table to flush here */
489 CALL_SCRIPT(Ent_status);
490 return 1;
491 case SSTAT1_PHASE_MSGIN:
492 /*
493 * target may be ready to disconnect
494 * Save data pointers just in case.
495 */
496 INCSTAT(siop_stat_intr_xferdisc);
497 if (scratcha0 & A_flag_data)
498 siop_sdp(&siop_cmd->cmd_c);
499 else if ((dstat & DSTAT_DFE) == 0)
500 siop_clearfifo(&sc->sc_c);
501 bus_space_write_1(sc->sc_c.sc_rt,
502 sc->sc_c.sc_rh, SIOP_SCRATCHA,
503 scratcha0 & ~A_flag_data);
504 siop_table_sync(siop_cmd,
505 BUS_DMASYNC_PREREAD |
506 BUS_DMASYNC_PREWRITE);
507 CALL_SCRIPT(Ent_msgin);
508 return 1;
509 }
510 printf("%s: unexpected phase mismatch %d\n",
511 sc->sc_c.sc_dev.dv_xname,
512 sstat1 & SSTAT1_PHASE_MASK);
513 } else {
514 printf("%s: phase mismatch without command\n",
515 sc->sc_c.sc_dev.dv_xname);
516 }
517 need_reset = 1;
518 }
519 if (sist & SIST0_PAR) {
520 /* parity error, reset */
521 if (siop_cmd)
522 scsipi_printaddr(xs->xs_periph);
523 else
524 printf("%s:", sc->sc_c.sc_dev.dv_xname);
525 printf("parity error\n");
526 goto reset;
527 }
528 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
529 /* selection time out, assume there's no device here */
530 if (siop_cmd) {
531 siop_cmd->cmd_c.status = CMDST_DONE;
532 xs->error = XS_SELTIMEOUT;
533 freetarget = 1;
534 goto end;
535 } else {
536 printf("%s: selection timeout without "
537 "command\n", sc->sc_c.sc_dev.dv_xname);
538 need_reset = 1;
539 }
540 }
541 if (sist & SIST0_UDC) {
542 /*
543 * unexpected disconnect. Usually the target signals
544 * a fatal condition this way. Attempt to get sense.
545 */
546 if (siop_cmd) {
547 siop_cmd->cmd_tables->status =
548 htole32(SCSI_CHECK);
549 goto end;
550 }
551 printf("%s: unexpected disconnect without "
552 "command\n", sc->sc_c.sc_dev.dv_xname);
553 goto reset;
554 }
555 if (sist & (SIST1_SBMC << 8)) {
556 /* SCSI bus mode change */
557 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
558 goto reset;
559 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
560 /*
561 * we have a script interrupt, it will
562 * restart the script.
563 */
564 goto scintr;
565 }
566 /*
567 * else we have to restart it ourselve, at the
568 * interrupted instruction.
569 */
570 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
571 SIOP_DSP,
572 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
573 SIOP_DSP) - 8);
574 return 1;
575 }
576 /* Else it's an unhandled exeption (for now). */
577 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
578 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
579 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
580 SIOP_SSTAT1),
581 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
582 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
583 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
584 if (siop_cmd) {
585 siop_cmd->cmd_c.status = CMDST_DONE;
586 xs->error = XS_SELTIMEOUT;
587 goto end;
588 }
589 need_reset = 1;
590 }
591 if (need_reset) {
592 reset:
593 /* fatal error, reset the bus */
594 siop_resetbus(&sc->sc_c);
595 /* no table to flush here */
596 return 1;
597 }
598
599 scintr:
600 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
601 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
602 SIOP_DSPS);
603 #ifdef SIOP_DEBUG_INTR
604 printf("script interrupt 0x%x\n", irqcode);
605 #endif
606 /*
607 * no command, or an inactive command is only valid for a
608 * reselect interrupt
609 */
610 if ((irqcode & 0x80) == 0) {
611 if (siop_cmd == NULL) {
612 printf(
613 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
614 sc->sc_c.sc_dev.dv_xname, irqcode);
615 goto reset;
616 }
617 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
618 printf("%s: command with invalid status "
619 "(IRQ code 0x%x current status %d) !\n",
620 sc->sc_c.sc_dev.dv_xname,
621 irqcode, siop_cmd->cmd_c.status);
622 xs = NULL;
623 }
624 }
625 switch(irqcode) {
626 case A_int_err:
627 printf("error, DSP=0x%x\n",
628 (int)(bus_space_read_4(sc->sc_c.sc_rt,
629 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
630 if (xs) {
631 xs->error = XS_SELTIMEOUT;
632 goto end;
633 } else {
634 goto reset;
635 }
636 case A_int_reseltarg:
637 printf("%s: reselect with invalid target\n",
638 sc->sc_c.sc_dev.dv_xname);
639 goto reset;
640 case A_int_resellun:
641 INCSTAT(siop_stat_intr_lunresel);
642 target = bus_space_read_1(sc->sc_c.sc_rt,
643 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
644 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
645 SIOP_SCRATCHA + 1);
646 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
647 SIOP_SCRATCHA + 2);
648 siop_target =
649 (struct siop_target *)sc->sc_c.targets[target];
650 if (siop_target == NULL) {
651 printf("%s: reselect with invalid target %d\n",
652 sc->sc_c.sc_dev.dv_xname, target);
653 goto reset;
654 }
655 siop_lun = siop_target->siop_lun[lun];
656 if (siop_lun == NULL) {
657 printf("%s: target %d reselect with invalid "
658 "lun %d\n", sc->sc_c.sc_dev.dv_xname,
659 target, lun);
660 goto reset;
661 }
662 if (siop_lun->siop_tag[tag].active == NULL) {
663 printf("%s: target %d lun %d tag %d reselect "
664 "without command\n",
665 sc->sc_c.sc_dev.dv_xname,
666 target, lun, tag);
667 goto reset;
668 }
669 siop_cmd = siop_lun->siop_tag[tag].active;
670 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
671 SIOP_DSP, siop_cmd->cmd_c.dsa +
672 sizeof(struct siop_common_xfer) +
673 Ent_ldsa_reload_dsa);
674 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
675 return 1;
676 case A_int_reseltag:
677 printf("%s: reselect with invalid tag\n",
678 sc->sc_c.sc_dev.dv_xname);
679 goto reset;
680 case A_int_msgin:
681 {
682 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
683 sc->sc_c.sc_rh, SIOP_SFBR);
684 if (msgin == MSG_MESSAGE_REJECT) {
685 int msg, extmsg;
686 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
687 /*
688 * message was part of a identify +
689 * something else. Identify shoudl't
690 * have been rejected.
691 */
692 msg =
693 siop_cmd->cmd_tables->msg_out[1];
694 extmsg =
695 siop_cmd->cmd_tables->msg_out[3];
696 } else {
697 msg = siop_cmd->cmd_tables->msg_out[0];
698 extmsg =
699 siop_cmd->cmd_tables->msg_out[2];
700 }
701 if (msg == MSG_MESSAGE_REJECT) {
702 /* MSG_REJECT for a MSG_REJECT !*/
703 if (xs)
704 scsipi_printaddr(xs->xs_periph);
705 else
706 printf("%s: ",
707 sc->sc_c.sc_dev.dv_xname);
708 printf("our reject message was "
709 "rejected\n");
710 goto reset;
711 }
712 if (msg == MSG_EXTENDED &&
713 extmsg == MSG_EXT_WDTR) {
714 /* WDTR rejected, initiate sync */
715 if ((siop_target->target_c.flags &
716 TARF_SYNC) == 0) {
717 siop_target->target_c.status =
718 TARST_OK;
719 siop_update_xfer_mode(&sc->sc_c,
720 target);
721 /* no table to flush here */
722 CALL_SCRIPT(Ent_msgin_ack);
723 return 1;
724 }
725 siop_target->target_c.status =
726 TARST_SYNC_NEG;
727 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
728 sc->sc_c.st_minsync,
729 sc->sc_c.maxoff);
730 siop_table_sync(siop_cmd,
731 BUS_DMASYNC_PREREAD |
732 BUS_DMASYNC_PREWRITE);
733 CALL_SCRIPT(Ent_send_msgout);
734 return 1;
735 } else if (msg == MSG_EXTENDED &&
736 extmsg == MSG_EXT_SDTR) {
737 /* sync rejected */
738 siop_target->target_c.offset = 0;
739 siop_target->target_c.period = 0;
740 siop_target->target_c.status = TARST_OK;
741 siop_update_xfer_mode(&sc->sc_c,
742 target);
743 /* no table to flush here */
744 CALL_SCRIPT(Ent_msgin_ack);
745 return 1;
746 } else if (msg == MSG_SIMPLE_Q_TAG ||
747 msg == MSG_HEAD_OF_Q_TAG ||
748 msg == MSG_ORDERED_Q_TAG) {
749 if (siop_handle_qtag_reject(
750 siop_cmd) == -1)
751 goto reset;
752 CALL_SCRIPT(Ent_msgin_ack);
753 return 1;
754 }
755 if (xs)
756 scsipi_printaddr(xs->xs_periph);
757 else
758 printf("%s: ",
759 sc->sc_c.sc_dev.dv_xname);
760 if (msg == MSG_EXTENDED) {
761 printf("scsi message reject, extended "
762 "message sent was 0x%x\n", extmsg);
763 } else {
764 printf("scsi message reject, message "
765 "sent was 0x%x\n", msg);
766 }
767 /* no table to flush here */
768 CALL_SCRIPT(Ent_msgin_ack);
769 return 1;
770 }
771 if (xs)
772 scsipi_printaddr(xs->xs_periph);
773 else
774 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
775 printf("unhandled message 0x%x\n",
776 siop_cmd->cmd_tables->msg_in[0]);
777 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
778 siop_cmd->cmd_tables->t_msgout.count= htole32(1);
779 siop_table_sync(siop_cmd,
780 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
781 CALL_SCRIPT(Ent_send_msgout);
782 return 1;
783 }
784 case A_int_extmsgin:
785 #ifdef SIOP_DEBUG_INTR
786 printf("extended message: msg 0x%x len %d\n",
787 siop_cmd->cmd_tables->msg_in[2],
788 siop_cmd->cmd_tables->msg_in[1]);
789 #endif
790 if (siop_cmd->cmd_tables->msg_in[1] >
791 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
792 printf("%s: extended message too big (%d)\n",
793 sc->sc_c.sc_dev.dv_xname,
794 siop_cmd->cmd_tables->msg_in[1]);
795 siop_cmd->cmd_tables->t_extmsgdata.count =
796 htole32(siop_cmd->cmd_tables->msg_in[1] - 1);
797 siop_table_sync(siop_cmd,
798 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
799 CALL_SCRIPT(Ent_get_extmsgdata);
800 return 1;
801 case A_int_extmsgdata:
802 #ifdef SIOP_DEBUG_INTR
803 {
804 int i;
805 printf("extended message: 0x%x, data:",
806 siop_cmd->cmd_tables->msg_in[2]);
807 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
808 i++)
809 printf(" 0x%x",
810 siop_cmd->cmd_tables->msg_in[i]);
811 printf("\n");
812 }
813 #endif
814 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
815 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
816 case SIOP_NEG_MSGOUT:
817 siop_update_scntl3(sc,
818 siop_cmd->cmd_c.siop_target);
819 siop_table_sync(siop_cmd,
820 BUS_DMASYNC_PREREAD |
821 BUS_DMASYNC_PREWRITE);
822 CALL_SCRIPT(Ent_send_msgout);
823 return(1);
824 case SIOP_NEG_ACK:
825 siop_update_scntl3(sc,
826 siop_cmd->cmd_c.siop_target);
827 CALL_SCRIPT(Ent_msgin_ack);
828 return(1);
829 default:
830 panic("invalid retval from "
831 "siop_wdtr_neg()");
832 }
833 return(1);
834 }
835 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
836 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
837 case SIOP_NEG_MSGOUT:
838 siop_update_scntl3(sc,
839 siop_cmd->cmd_c.siop_target);
840 siop_table_sync(siop_cmd,
841 BUS_DMASYNC_PREREAD |
842 BUS_DMASYNC_PREWRITE);
843 CALL_SCRIPT(Ent_send_msgout);
844 return(1);
845 case SIOP_NEG_ACK:
846 siop_update_scntl3(sc,
847 siop_cmd->cmd_c.siop_target);
848 CALL_SCRIPT(Ent_msgin_ack);
849 return(1);
850 default:
851 panic("invalid retval from "
852 "siop_wdtr_neg()");
853 }
854 return(1);
855 }
856 /* send a message reject */
857 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
858 siop_cmd->cmd_tables->t_msgout.count = htole32(1);
859 siop_table_sync(siop_cmd,
860 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
861 CALL_SCRIPT(Ent_send_msgout);
862 return 1;
863 case A_int_disc:
864 INCSTAT(siop_stat_intr_sdp);
865 offset = bus_space_read_1(sc->sc_c.sc_rt,
866 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
867 #ifdef SIOP_DEBUG_DR
868 printf("disconnect offset %d\n", offset);
869 #endif
870 if (offset > SIOP_NSG) {
871 printf("%s: bad offset for disconnect (%d)\n",
872 sc->sc_c.sc_dev.dv_xname, offset);
873 goto reset;
874 }
875 /*
876 * offset == SIOP_NSG may be a valid condition if
877 * we get a sdp when the xfer is done.
878 * Don't call memmove in this case.
879 */
880 if (offset < SIOP_NSG) {
881 memmove(&siop_cmd->cmd_tables->data[0],
882 &siop_cmd->cmd_tables->data[offset],
883 (SIOP_NSG - offset) * sizeof(scr_table_t));
884 siop_table_sync(siop_cmd,
885 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
886 }
887 CALL_SCRIPT(Ent_script_sched);
888 return 1;
889 case A_int_resfail:
890 printf("reselect failed\n");
891 CALL_SCRIPT(Ent_script_sched);
892 return 1;
893 case A_int_done:
894 if (xs == NULL) {
895 printf("%s: done without command, DSA=0x%lx\n",
896 sc->sc_c.sc_dev.dv_xname,
897 (u_long)siop_cmd->cmd_c.dsa);
898 siop_cmd->cmd_c.status = CMDST_FREE;
899 CALL_SCRIPT(Ent_script_sched);
900 return 1;
901 }
902 #ifdef SIOP_DEBUG_INTR
903 printf("done, DSA=0x%lx target id 0x%x last msg "
904 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
905 le32toh(siop_cmd->cmd_tables->id),
906 siop_cmd->cmd_tables->msg_in[0],
907 le32toh(siop_cmd->cmd_tables->status));
908 #endif
909 INCSTAT(siop_stat_intr_done);
910 siop_cmd->cmd_c.status = CMDST_DONE;
911 goto end;
912 default:
913 printf("unknown irqcode %x\n", irqcode);
914 if (xs) {
915 xs->error = XS_SELTIMEOUT;
916 goto end;
917 }
918 goto reset;
919 }
920 return 1;
921 }
922 /* We just should't get there */
923 panic("siop_intr: I shouldn't be there !");
924 return 1;
925 end:
926 /*
927 * restart the script now if command completed properly
928 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
929 * queue
930 */
931 xs->status = le32toh(siop_cmd->cmd_tables->status);
932 if (xs->status == SCSI_OK)
933 CALL_SCRIPT(Ent_script_sched);
934 else
935 restart = 1;
936 siop_lun->siop_tag[tag].active = NULL;
937 siop_scsicmd_end(siop_cmd);
938 if (freetarget && siop_target->target_c.status == TARST_PROBING)
939 siop_del_dev(sc, target, lun);
940 if (restart)
941 CALL_SCRIPT(Ent_script_sched);
942 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
943 /* a command terminated, so we have free slots now */
944 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
945 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
946 }
947
948 return 1;
949 }
950
951 void
952 siop_scsicmd_end(siop_cmd)
953 struct siop_cmd *siop_cmd;
954 {
955 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
956 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
957
958 switch(xs->status) {
959 case SCSI_OK:
960 xs->error = XS_NOERROR;
961 break;
962 case SCSI_BUSY:
963 xs->error = XS_BUSY;
964 break;
965 case SCSI_CHECK:
966 xs->error = XS_BUSY;
967 /* remove commands in the queue and scheduler */
968 siop_unqueue(sc, xs->xs_periph->periph_target,
969 xs->xs_periph->periph_lun);
970 break;
971 case SCSI_QUEUE_FULL:
972 INCSTAT(siop_stat_intr_qfull);
973 #ifdef SIOP_DEBUG
974 printf("%s:%d:%d: queue full (tag %d)\n",
975 sc->sc_c.sc_dev.dv_xname,
976 xs->xs_periph->periph_target,
977 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
978 #endif
979 xs->error = XS_BUSY;
980 break;
981 case SCSI_SIOP_NOCHECK:
982 /*
983 * don't check status, xs->error is already valid
984 */
985 break;
986 case SCSI_SIOP_NOSTATUS:
987 /*
988 * the status byte was not updated, cmd was
989 * aborted
990 */
991 xs->error = XS_SELTIMEOUT;
992 break;
993 default:
994 xs->error = XS_DRIVER_STUFFUP;
995 }
996 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
997 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
998 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
999 (xs->xs_control & XS_CTL_DATA_IN) ?
1000 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1001 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1002 }
1003 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1004 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1005 siop_cmd->cmd_c.status = CMDST_FREE;
1006 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1007 xs->resid = 0;
1008 scsipi_done (xs);
1009 }
1010
1011 void
1012 siop_unqueue(sc, target, lun)
1013 struct siop_softc *sc;
1014 int target;
1015 int lun;
1016 {
1017 int slot, tag;
1018 struct siop_cmd *siop_cmd;
1019 struct siop_lun *siop_lun =
1020 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1021
1022 /* first make sure to read valid data */
1023 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1024
1025 for (tag = 1; tag < SIOP_NTAG; tag++) {
1026 /* look for commands in the scheduler, not yet started */
1027 if (siop_lun->siop_tag[tag].active == NULL)
1028 continue;
1029 siop_cmd = siop_lun->siop_tag[tag].active;
1030 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1031 if (siop_script_read(sc,
1032 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1033 siop_cmd->cmd_c.dsa +
1034 sizeof(struct siop_common_xfer) +
1035 Ent_ldsa_select)
1036 break;
1037 }
1038 if (slot > sc->sc_currschedslot)
1039 continue; /* didn't find it */
1040 if (siop_script_read(sc,
1041 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1042 continue; /* already started */
1043 /* clear the slot */
1044 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1045 0x80000000);
1046 /* ask to requeue */
1047 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1048 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1049 siop_lun->siop_tag[tag].active = NULL;
1050 siop_scsicmd_end(siop_cmd);
1051 }
1052 /* update sc_currschedslot */
1053 sc->sc_currschedslot = 0;
1054 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1055 if (siop_script_read(sc,
1056 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1057 sc->sc_currschedslot = slot;
1058 }
1059 }
1060
1061 /*
1062 * handle a rejected queue tag message: the command will run untagged,
1063 * has to adjust the reselect script.
1064 */
1065 int
1066 siop_handle_qtag_reject(siop_cmd)
1067 struct siop_cmd *siop_cmd;
1068 {
1069 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1070 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1071 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1072 int tag = siop_cmd->cmd_tables->msg_out[2];
1073 struct siop_lun *siop_lun =
1074 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1075
1076 #ifdef SIOP_DEBUG
1077 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1078 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag,
1079 siop_cmd->cmd_c.status);
1080 #endif
1081
1082 if (siop_lun->siop_tag[0].active != NULL) {
1083 printf("%s: untagged command already running for target %d "
1084 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1085 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1086 return -1;
1087 }
1088 /* clear tag slot */
1089 siop_lun->siop_tag[tag].active = NULL;
1090 /* add command to non-tagged slot */
1091 siop_lun->siop_tag[0].active = siop_cmd;
1092 siop_cmd->cmd_c.tag = 0;
1093 /* adjust reselect script if there is one */
1094 if (siop_lun->siop_tag[0].reseloff > 0) {
1095 siop_script_write(sc,
1096 siop_lun->siop_tag[0].reseloff + 1,
1097 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1098 Ent_ldsa_reload_dsa);
1099 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1100 }
1101 return 0;
1102 }
1103
1104 /*
1105 * handle a bus reset: reset chip, unqueue all active commands, free all
1106 * target struct and report loosage to upper layer.
1107 * As the upper layer may requeue immediatly we have to first store
1108 * all active commands in a temporary queue.
1109 */
1110 void
1111 siop_handle_reset(sc)
1112 struct siop_softc *sc;
1113 {
1114 struct siop_cmd *siop_cmd;
1115 struct siop_lun *siop_lun;
1116 int target, lun, tag;
1117 /*
1118 * scsi bus reset. reset the chip and restart
1119 * the queue. Need to clean up all active commands
1120 */
1121 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1122 /* stop, reset and restart the chip */
1123 siop_reset(sc);
1124 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1125 /* chip has been reset, all slots are free now */
1126 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1127 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1128 }
1129 /*
1130 * Process all commands: first commmands being executed
1131 */
1132 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1133 target++) {
1134 if (sc->sc_c.targets[target] == NULL)
1135 continue;
1136 for (lun = 0; lun < 8; lun++) {
1137 struct siop_target *siop_target =
1138 (struct siop_target *)sc->sc_c.targets[target];
1139 siop_lun = siop_target->siop_lun[lun];
1140 if (siop_lun == NULL)
1141 continue;
1142 for (tag = 0; tag <
1143 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1144 SIOP_NTAG : 1);
1145 tag++) {
1146 siop_cmd = siop_lun->siop_tag[tag].active;
1147 if (siop_cmd == NULL)
1148 continue;
1149 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1150 printf("command with tag id %d reset\n", tag);
1151 siop_cmd->cmd_c.xs->error =
1152 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1153 XS_TIMEOUT : XS_RESET;
1154 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1155 siop_lun->siop_tag[tag].active = NULL;
1156 siop_cmd->cmd_c.status = CMDST_DONE;
1157 siop_scsicmd_end(siop_cmd);
1158 }
1159 }
1160 sc->sc_c.targets[target]->status = TARST_ASYNC;
1161 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1162 sc->sc_c.targets[target]->period =
1163 sc->sc_c.targets[target]->offset = 0;
1164 siop_update_xfer_mode(&sc->sc_c, target);
1165 }
1166
1167 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1168 }
1169
1170 void
1171 siop_scsipi_request(chan, req, arg)
1172 struct scsipi_channel *chan;
1173 scsipi_adapter_req_t req;
1174 void *arg;
1175 {
1176 struct scsipi_xfer *xs;
1177 struct scsipi_periph *periph;
1178 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1179 struct siop_cmd *siop_cmd;
1180 struct siop_target *siop_target;
1181 int s, error, i;
1182 int target;
1183 int lun;
1184
1185 switch (req) {
1186 case ADAPTER_REQ_RUN_XFER:
1187 xs = arg;
1188 periph = xs->xs_periph;
1189 target = periph->periph_target;
1190 lun = periph->periph_lun;
1191
1192 s = splbio();
1193 #ifdef SIOP_DEBUG_SCHED
1194 printf("starting cmd for %d:%d\n", target, lun);
1195 #endif
1196 siop_cmd = TAILQ_FIRST(&sc->free_list);
1197 if (siop_cmd == NULL) {
1198 xs->error = XS_RESOURCE_SHORTAGE;
1199 scsipi_done(xs);
1200 splx(s);
1201 return;
1202 }
1203 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1204 #ifdef DIAGNOSTIC
1205 if (siop_cmd->cmd_c.status != CMDST_FREE)
1206 panic("siop_scsicmd: new cmd not free");
1207 #endif
1208 siop_target = (struct siop_target*)sc->sc_c.targets[target];
1209 if (siop_target == NULL) {
1210 #ifdef SIOP_DEBUG
1211 printf("%s: alloc siop_target for target %d\n",
1212 sc->sc_c.sc_dev.dv_xname, target);
1213 #endif
1214 sc->sc_c.targets[target] =
1215 malloc(sizeof(struct siop_target),
1216 M_DEVBUF, M_NOWAIT);
1217 if (sc->sc_c.targets[target] == NULL) {
1218 printf("%s: can't malloc memory for "
1219 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1220 target);
1221 xs->error = XS_RESOURCE_SHORTAGE;
1222 scsipi_done(xs);
1223 splx(s);
1224 return;
1225 }
1226 siop_target =
1227 (struct siop_target*)sc->sc_c.targets[target];
1228 siop_target->target_c.status = TARST_PROBING;
1229 siop_target->target_c.flags = 0;
1230 siop_target->target_c.id =
1231 sc->sc_c.clock_div << 24; /* scntl3 */
1232 siop_target->target_c.id |= target << 16; /* id */
1233 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1234
1235 /* get a lun switch script */
1236 siop_target->lunsw = siop_get_lunsw(sc);
1237 if (siop_target->lunsw == NULL) {
1238 printf("%s: can't alloc lunsw for target %d\n",
1239 sc->sc_c.sc_dev.dv_xname, target);
1240 xs->error = XS_RESOURCE_SHORTAGE;
1241 scsipi_done(xs);
1242 splx(s);
1243 return;
1244 }
1245 for (i=0; i < 8; i++)
1246 siop_target->siop_lun[i] = NULL;
1247 siop_add_reselsw(sc, target);
1248 }
1249 if (siop_target->siop_lun[lun] == NULL) {
1250 siop_target->siop_lun[lun] =
1251 malloc(sizeof(struct siop_lun), M_DEVBUF,
1252 M_NOWAIT|M_ZERO);
1253 if (siop_target->siop_lun[lun] == NULL) {
1254 printf("%s: can't alloc siop_lun for "
1255 "target %d lun %d\n",
1256 sc->sc_c.sc_dev.dv_xname, target, lun);
1257 xs->error = XS_RESOURCE_SHORTAGE;
1258 scsipi_done(xs);
1259 splx(s);
1260 return;
1261 }
1262 }
1263 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1264 siop_cmd->cmd_c.xs = xs;
1265 siop_cmd->cmd_c.flags = 0;
1266 siop_cmd->cmd_c.status = CMDST_READY;
1267
1268 /* load the DMA maps */
1269 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1270 siop_cmd->cmd_c.dmamap_cmd,
1271 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1272 if (error) {
1273 printf("%s: unable to load cmd DMA map: %d\n",
1274 sc->sc_c.sc_dev.dv_xname, error);
1275 xs->error = XS_DRIVER_STUFFUP;
1276 scsipi_done(xs);
1277 splx(s);
1278 return;
1279 }
1280 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1281 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1282 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1283 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1284 ((xs->xs_control & XS_CTL_DATA_IN) ?
1285 BUS_DMA_READ : BUS_DMA_WRITE));
1286 if (error) {
1287 printf("%s: unable to load cmd DMA map: %d",
1288 sc->sc_c.sc_dev.dv_xname, error);
1289 xs->error = XS_DRIVER_STUFFUP;
1290 scsipi_done(xs);
1291 bus_dmamap_unload(sc->sc_c.sc_dmat,
1292 siop_cmd->cmd_c.dmamap_cmd);
1293 splx(s);
1294 return;
1295 }
1296 bus_dmamap_sync(sc->sc_c.sc_dmat,
1297 siop_cmd->cmd_c.dmamap_data, 0,
1298 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1299 (xs->xs_control & XS_CTL_DATA_IN) ?
1300 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1301 }
1302 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1303 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1304 BUS_DMASYNC_PREWRITE);
1305
1306 if (xs->xs_tag_type) {
1307 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1308 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1309 } else {
1310 siop_cmd->cmd_c.tag = 0;
1311 }
1312 siop_setuptables(&siop_cmd->cmd_c);
1313 siop_table_sync(siop_cmd,
1314 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1315 siop_start(sc, siop_cmd);
1316 if (xs->xs_control & XS_CTL_POLL) {
1317 /* poll for command completion */
1318 while ((xs->xs_status & XS_STS_DONE) == 0) {
1319 delay(1000);
1320 siop_intr(sc);
1321 }
1322 }
1323 splx(s);
1324 return;
1325
1326 case ADAPTER_REQ_GROW_RESOURCES:
1327 #ifdef SIOP_DEBUG
1328 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1329 sc->sc_c.sc_adapt.adapt_openings);
1330 #endif
1331 siop_morecbd(sc);
1332 return;
1333
1334 case ADAPTER_REQ_SET_XFER_MODE:
1335 {
1336 struct scsipi_xfer_mode *xm = arg;
1337 if (sc->sc_c.targets[xm->xm_target] == NULL)
1338 return;
1339 s = splbio();
1340 if (xm->xm_mode & PERIPH_CAP_TQING)
1341 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1342 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1343 (sc->sc_c.features & SF_BUS_WIDE))
1344 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1345 if (xm->xm_mode & PERIPH_CAP_SYNC)
1346 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1347 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1348 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1349 sc->sc_c.targets[xm->xm_target]->status =
1350 TARST_ASYNC;
1351
1352 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1353 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1354 /* allocate a lun sw entry for this device */
1355 siop_add_dev(sc, xm->xm_target, lun);
1356 }
1357
1358 splx(s);
1359 }
1360 }
1361 }
1362
1363 static void
1364 siop_start(sc, siop_cmd)
1365 struct siop_softc *sc;
1366 struct siop_cmd *siop_cmd;
1367 {
1368 struct siop_lun *siop_lun;
1369 struct siop_xfer *siop_xfer;
1370 u_int32_t dsa;
1371 int timeout;
1372 int target, lun, slot;
1373
1374 /*
1375 * first make sure to read valid data
1376 */
1377 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1378
1379 /*
1380 * The queue management here is a bit tricky: the script always looks
1381 * at the slot from first to last, so if we always use the first
1382 * free slot commands can stay at the tail of the queue ~forever.
1383 * The algorithm used here is to restart from the head when we know
1384 * that the queue is empty, and only add commands after the last one.
1385 * When we're at the end of the queue wait for the script to clear it.
1386 * The best thing to do here would be to implement a circular queue,
1387 * but using only 53c720 features this can be "interesting".
1388 * A mid-way solution could be to implement 2 queues and swap orders.
1389 */
1390 slot = sc->sc_currschedslot;
1391 /*
1392 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1393 * free. As this is the last used slot, all previous slots are free,
1394 * we can restart from 0.
1395 */
1396 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1397 0x80000000) {
1398 slot = sc->sc_currschedslot = 0;
1399 } else {
1400 slot++;
1401 }
1402 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1403 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1404 siop_lun =
1405 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1406 /* if non-tagged command active, panic: this shouldn't happen */
1407 if (siop_lun->siop_tag[0].active != NULL) {
1408 panic("siop_start: tagged cmd while untagged running");
1409 }
1410 #ifdef DIAGNOSTIC
1411 /* sanity check the tag if needed */
1412 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1413 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1414 panic("siop_start: tag not free");
1415 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1416 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1417 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1418 panic("siop_start: invalid tag id");
1419 }
1420 }
1421 #endif
1422 /*
1423 * find a free scheduler slot and load it.
1424 */
1425 for (; slot < SIOP_NSLOTS; slot++) {
1426 /*
1427 * If cmd if 0x80000000 the slot is free
1428 */
1429 if (siop_script_read(sc,
1430 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1431 0x80000000)
1432 break;
1433 }
1434 if (slot == SIOP_NSLOTS) {
1435 /*
1436 * no more free slot, no need to continue. freeze the queue
1437 * and requeue this command.
1438 */
1439 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1440 sc->sc_flags |= SCF_CHAN_NOSLOT;
1441 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1442 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1443 siop_scsicmd_end(siop_cmd);
1444 return;
1445 }
1446 #ifdef SIOP_DEBUG_SCHED
1447 printf("using slot %d for DSA 0x%lx\n", slot,
1448 (u_long)siop_cmd->cmd_c.dsa);
1449 #endif
1450 /* mark command as active */
1451 if (siop_cmd->cmd_c.status == CMDST_READY)
1452 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1453 else
1454 panic("siop_start: bad status");
1455 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1456 /* patch scripts with DSA addr */
1457 dsa = siop_cmd->cmd_c.dsa;
1458 /* first reselect switch, if we have an entry */
1459 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1460 siop_script_write(sc,
1461 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1462 dsa + sizeof(struct siop_common_xfer) +
1463 Ent_ldsa_reload_dsa);
1464 /* CMD script: MOVE MEMORY addr */
1465 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1466 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1467 htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1468 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1469 /* scheduler slot: JUMP ldsa_select */
1470 siop_script_write(sc,
1471 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1472 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1473 /* handle timeout */
1474 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1475 /* start exire timer */
1476 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1477 if (timeout == 0)
1478 timeout = 1;
1479 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1480 timeout, siop_timeout, siop_cmd);
1481 }
1482 /*
1483 * Change JUMP cmd so that this slot will be handled
1484 */
1485 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1486 0x80080000);
1487 sc->sc_currschedslot = slot;
1488
1489 /* make sure SCRIPT processor will read valid data */
1490 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1491 /* Signal script it has some work to do */
1492 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1493 SIOP_ISTAT, ISTAT_SIGP);
1494 /* and wait for IRQ */
1495 return;
1496 }
1497
1498 void
1499 siop_timeout(v)
1500 void *v;
1501 {
1502 struct siop_cmd *siop_cmd = v;
1503 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1504 int s;
1505
1506 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1507 printf("command timeout\n");
1508
1509 s = splbio();
1510 /* reset the scsi bus */
1511 siop_resetbus(&sc->sc_c);
1512
1513 /* deactivate callout */
1514 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1515 /* mark command as being timed out; siop_intr will handle it */
1516 /*
1517 * mark command has being timed out and just return;
1518 * the bus reset will generate an interrupt,
1519 * it will be handled in siop_intr()
1520 */
1521 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1522 splx(s);
1523 return;
1524
1525 }
1526
1527 void
1528 siop_dump_script(sc)
1529 struct siop_softc *sc;
1530 {
1531 int i;
1532 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1533 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1534 le32toh(sc->sc_c.sc_script[i]),
1535 le32toh(sc->sc_c.sc_script[i+1]));
1536 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1537 0xc0000000) {
1538 i++;
1539 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1540 }
1541 printf("\n");
1542 }
1543 }
1544
1545 void
1546 siop_morecbd(sc)
1547 struct siop_softc *sc;
1548 {
1549 int error, i, j, s;
1550 bus_dma_segment_t seg;
1551 int rseg;
1552 struct siop_cbd *newcbd;
1553 struct siop_xfer *xfer;
1554 bus_addr_t dsa;
1555 u_int32_t *scr;
1556
1557 /* allocate a new list head */
1558 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1559 if (newcbd == NULL) {
1560 printf("%s: can't allocate memory for command descriptors "
1561 "head\n", sc->sc_c.sc_dev.dv_xname);
1562 return;
1563 }
1564
1565 /* allocate cmd list */
1566 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1567 M_DEVBUF, M_NOWAIT|M_ZERO);
1568 if (newcbd->cmds == NULL) {
1569 printf("%s: can't allocate memory for command descriptors\n",
1570 sc->sc_c.sc_dev.dv_xname);
1571 goto bad3;
1572 }
1573 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1574 1, &rseg, BUS_DMA_NOWAIT);
1575 if (error) {
1576 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1577 sc->sc_c.sc_dev.dv_xname, error);
1578 goto bad2;
1579 }
1580 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1581 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1582 if (error) {
1583 printf("%s: unable to map cbd DMA memory, error = %d\n",
1584 sc->sc_c.sc_dev.dv_xname, error);
1585 goto bad2;
1586 }
1587 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1588 BUS_DMA_NOWAIT, &newcbd->xferdma);
1589 if (error) {
1590 printf("%s: unable to create cbd DMA map, error = %d\n",
1591 sc->sc_c.sc_dev.dv_xname, error);
1592 goto bad1;
1593 }
1594 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1595 PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1596 if (error) {
1597 printf("%s: unable to load cbd DMA map, error = %d\n",
1598 sc->sc_c.sc_dev.dv_xname, error);
1599 goto bad0;
1600 }
1601 #ifdef DEBUG
1602 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1603 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1604 #endif
1605 for (i = 0; i < SIOP_NCMDPB; i++) {
1606 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1607 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1608 &newcbd->cmds[i].cmd_c.dmamap_data);
1609 if (error) {
1610 printf("%s: unable to create data DMA map for cbd: "
1611 "error %d\n",
1612 sc->sc_c.sc_dev.dv_xname, error);
1613 goto bad0;
1614 }
1615 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1616 sizeof(struct scsipi_generic), 1,
1617 sizeof(struct scsipi_generic), 0,
1618 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1619 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1620 if (error) {
1621 printf("%s: unable to create cmd DMA map for cbd %d\n",
1622 sc->sc_c.sc_dev.dv_xname, error);
1623 goto bad0;
1624 }
1625 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1626 newcbd->cmds[i].siop_cbdp = newcbd;
1627 xfer = &newcbd->xfers[i];
1628 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1629 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1630 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1631 i * sizeof(struct siop_xfer);
1632 newcbd->cmds[i].cmd_c.dsa = dsa;
1633 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1634 xfer->siop_tables.t_msgout.count= htole32(1);
1635 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1636 xfer->siop_tables.t_msgin.count= htole32(1);
1637 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1638 offsetof(struct siop_common_xfer, msg_in));
1639 xfer->siop_tables.t_extmsgin.count= htole32(2);
1640 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1641 offsetof(struct siop_common_xfer, msg_in) + 1);
1642 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1643 offsetof(struct siop_common_xfer, msg_in) + 3);
1644 xfer->siop_tables.t_status.count= htole32(1);
1645 xfer->siop_tables.t_status.addr = htole32(dsa +
1646 offsetof(struct siop_common_xfer, status));
1647 /* The select/reselect script */
1648 scr = &xfer->resel[0];
1649 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1650 scr[j] = htole32(load_dsa[j]);
1651 /*
1652 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1653 * octet, reg offset is the third.
1654 */
1655 scr[Ent_rdsa0 / 4] =
1656 htole32(0x78100000 | ((dsa & 0x000000ff) << 8));
1657 scr[Ent_rdsa1 / 4] =
1658 htole32(0x78110000 | ( dsa & 0x0000ff00 ));
1659 scr[Ent_rdsa2 / 4] =
1660 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8));
1661 scr[Ent_rdsa3 / 4] =
1662 htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1663 scr[E_ldsa_abs_reselected_Used[0]] =
1664 htole32(sc->sc_c.sc_scriptaddr + Ent_reselected);
1665 scr[E_ldsa_abs_reselect_Used[0]] =
1666 htole32(sc->sc_c.sc_scriptaddr + Ent_reselect);
1667 scr[E_ldsa_abs_selected_Used[0]] =
1668 htole32(sc->sc_c.sc_scriptaddr + Ent_selected);
1669 scr[E_ldsa_abs_data_Used[0]] =
1670 htole32(dsa + sizeof(struct siop_common_xfer) +
1671 Ent_ldsa_data);
1672 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1673 scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1674 s = splbio();
1675 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1676 splx(s);
1677 #ifdef SIOP_DEBUG
1678 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1679 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1680 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1681 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1682 #endif
1683 }
1684 s = splbio();
1685 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1686 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1687 splx(s);
1688 return;
1689 bad0:
1690 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1691 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1692 bad1:
1693 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1694 bad2:
1695 free(newcbd->cmds, M_DEVBUF);
1696 bad3:
1697 free(newcbd, M_DEVBUF);
1698 return;
1699 }
1700
1701 struct siop_lunsw *
1702 siop_get_lunsw(sc)
1703 struct siop_softc *sc;
1704 {
1705 struct siop_lunsw *lunsw;
1706 int i;
1707
1708 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1709 sc->script_free_hi)
1710 return NULL;
1711 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1712 if (lunsw != NULL) {
1713 #ifdef SIOP_DEBUG
1714 printf("siop_get_lunsw got lunsw at offset %d\n",
1715 lunsw->lunsw_off);
1716 #endif
1717 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1718 return lunsw;
1719 }
1720 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1721 if (lunsw == NULL)
1722 return NULL;
1723 #ifdef SIOP_DEBUG
1724 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1725 #endif
1726 if (sc->sc_c.features & SF_CHIP_RAM) {
1727 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1728 sc->script_free_lo * 4, lun_switch,
1729 sizeof(lun_switch) / sizeof(lun_switch[0]));
1730 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1731 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1732 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1733 } else {
1734 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1735 i++)
1736 sc->sc_c.sc_script[sc->script_free_lo + i] =
1737 htole32(lun_switch[i]);
1738 sc->sc_c.sc_script[
1739 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1740 htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1741 }
1742 lunsw->lunsw_off = sc->script_free_lo;
1743 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1744 sc->script_free_lo += lunsw->lunsw_size;
1745 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1746 return lunsw;
1747 }
1748
1749 void
1750 siop_add_reselsw(sc, target)
1751 struct siop_softc *sc;
1752 int target;
1753 {
1754 int i;
1755 struct siop_target *siop_target;
1756 struct siop_lun *siop_lun;
1757
1758 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1759 /*
1760 * add an entry to resel switch
1761 */
1762 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1763 for (i = 0; i < 15; i++) {
1764 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1765 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1766 == 0xff) { /* it's free */
1767 #ifdef SIOP_DEBUG
1768 printf("siop: target %d slot %d offset %d\n",
1769 target, i, siop_target->reseloff);
1770 #endif
1771 /* JUMP abs_foo, IF target | 0x80; */
1772 siop_script_write(sc, siop_target->reseloff,
1773 0x800c0080 | target);
1774 siop_script_write(sc, siop_target->reseloff + 1,
1775 sc->sc_c.sc_scriptaddr +
1776 siop_target->lunsw->lunsw_off * 4 +
1777 Ent_lun_switch_entry);
1778 break;
1779 }
1780 }
1781 if (i == 15) /* no free slot, shouldn't happen */
1782 panic("siop: resel switch full");
1783
1784 sc->sc_ntargets++;
1785 for (i = 0; i < 8; i++) {
1786 siop_lun = siop_target->siop_lun[i];
1787 if (siop_lun == NULL)
1788 continue;
1789 if (siop_lun->reseloff > 0) {
1790 siop_lun->reseloff = 0;
1791 siop_add_dev(sc, target, i);
1792 }
1793 }
1794 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1795 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1796 }
1797
1798 void
1799 siop_update_scntl3(sc, _siop_target)
1800 struct siop_softc *sc;
1801 struct siop_common_target *_siop_target;
1802 {
1803 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1804 /* MOVE target->id >> 24 TO SCNTL3 */
1805 siop_script_write(sc,
1806 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1807 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1808 /* MOVE target->id >> 8 TO SXFER */
1809 siop_script_write(sc,
1810 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1811 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1812 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1813 }
1814
1815 void
1816 siop_add_dev(sc, target, lun)
1817 struct siop_softc *sc;
1818 int target;
1819 int lun;
1820 {
1821 struct siop_lunsw *lunsw;
1822 struct siop_target *siop_target =
1823 (struct siop_target *)sc->sc_c.targets[target];
1824 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1825 int i, ntargets;
1826
1827 if (siop_lun->reseloff > 0)
1828 return;
1829 lunsw = siop_target->lunsw;
1830 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1831 /*
1832 * can't extend this slot. Probably not worth trying to deal
1833 * with this case
1834 */
1835 #ifdef DEBUG
1836 printf("%s:%d:%d: can't allocate a lun sw slot\n",
1837 sc->sc_c.sc_dev.dv_xname, target, lun);
1838 #endif
1839 return;
1840 }
1841 /* count how many free targets we still have to probe */
1842 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1843
1844 /*
1845 * we need 8 bytes for the lun sw additionnal entry, and
1846 * eventually sizeof(tag_switch) for the tag switch entry.
1847 * Keep enouth free space for the free targets that could be
1848 * probed later.
1849 */
1850 if (sc->script_free_lo + 2 +
1851 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1852 ((siop_target->target_c.flags & TARF_TAG) ?
1853 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1854 sc->script_free_hi)) {
1855 /*
1856 * not enouth space, probably not worth dealing with it.
1857 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1858 */
1859 #ifdef DEBUG
1860 printf("%s:%d:%d: not enouth memory for a lun sw slot\n",
1861 sc->sc_c.sc_dev.dv_xname, target, lun);
1862 #endif
1863 return;
1864 }
1865 #ifdef SIOP_DEBUG
1866 printf("%s:%d:%d: allocate lun sw entry\n",
1867 sc->sc_c.sc_dev.dv_xname, target, lun);
1868 #endif
1869 /* INT int_resellun */
1870 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1871 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1872 /* Now the slot entry: JUMP abs_foo, IF lun */
1873 siop_script_write(sc, sc->script_free_lo - 2,
1874 0x800c0000 | lun);
1875 siop_script_write(sc, sc->script_free_lo - 1, 0);
1876 siop_lun->reseloff = sc->script_free_lo - 2;
1877 lunsw->lunsw_size += 2;
1878 sc->script_free_lo += 2;
1879 if (siop_target->target_c.flags & TARF_TAG) {
1880 /* we need a tag switch */
1881 sc->script_free_hi -=
1882 sizeof(tag_switch) / sizeof(tag_switch[0]);
1883 if (sc->sc_c.features & SF_CHIP_RAM) {
1884 bus_space_write_region_4(sc->sc_c.sc_ramt,
1885 sc->sc_c.sc_ramh,
1886 sc->script_free_hi * 4, tag_switch,
1887 sizeof(tag_switch) / sizeof(tag_switch[0]));
1888 } else {
1889 for(i = 0;
1890 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1891 i++) {
1892 sc->sc_c.sc_script[sc->script_free_hi + i] =
1893 htole32(tag_switch[i]);
1894 }
1895 }
1896 siop_script_write(sc,
1897 siop_lun->reseloff + 1,
1898 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1899 Ent_tag_switch_entry);
1900
1901 for (i = 0; i < SIOP_NTAG; i++) {
1902 siop_lun->siop_tag[i].reseloff =
1903 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1904 }
1905 } else {
1906 /* non-tag case; just work with the lun switch */
1907 siop_lun->siop_tag[0].reseloff =
1908 siop_target->siop_lun[lun]->reseloff;
1909 }
1910 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1911 }
1912
1913 void
1914 siop_del_dev(sc, target, lun)
1915 struct siop_softc *sc;
1916 int target;
1917 int lun;
1918 {
1919 int i;
1920 struct siop_target *siop_target;
1921 #ifdef SIOP_DEBUG
1922 printf("%s:%d:%d: free lun sw entry\n",
1923 sc->sc_c.sc_dev.dv_xname, target, lun);
1924 #endif
1925 if (sc->sc_c.targets[target] == NULL)
1926 return;
1927 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1928 free(siop_target->siop_lun[lun], M_DEVBUF);
1929 siop_target->siop_lun[lun] = NULL;
1930 /* XXX compact sw entry too ? */
1931 /* check if we can free the whole target */
1932 for (i = 0; i < 8; i++) {
1933 if (siop_target->siop_lun[i] != NULL)
1934 return;
1935 }
1936 #ifdef SIOP_DEBUG
1937 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
1938 sc->sc_c.sc_dev.dv_xname, target, lun,
1939 sc->sc_c.targets[target]->lunsw->lunsw_off);
1940 #endif
1941 /*
1942 * nothing here, free the target struct and resel
1943 * switch entry
1944 */
1945 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
1946 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1947 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
1948 free(sc->sc_c.targets[target], M_DEVBUF);
1949 sc->sc_c.targets[target] = NULL;
1950 sc->sc_ntargets--;
1951 }
1952
1953 #ifdef SIOP_STATS
1954 void
1955 siop_printstats()
1956 {
1957 printf("siop_stat_intr %d\n", siop_stat_intr);
1958 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
1959 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
1960 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
1961 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
1962 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
1963 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
1964 }
1965 #endif
1966