siop.c revision 1.63 1 /* $NetBSD: siop.c,v 1.63 2002/07/18 11:59:08 wiz Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.63 2002/07/18 11:59:08 wiz Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
81
82 /* Number of scheduler slot (needs to match script) */
83 #define SIOP_NSLOTS 40
84
85 void siop_reset __P((struct siop_softc *));
86 void siop_handle_reset __P((struct siop_softc *));
87 int siop_handle_qtag_reject __P((struct siop_cmd *));
88 void siop_scsicmd_end __P((struct siop_cmd *));
89 void siop_unqueue __P((struct siop_softc *, int, int));
90 static void siop_start __P((struct siop_softc *, struct siop_cmd *));
91 void siop_timeout __P((void *));
92 int siop_scsicmd __P((struct scsipi_xfer *));
93 void siop_scsipi_request __P((struct scsipi_channel *,
94 scsipi_adapter_req_t, void *));
95 void siop_dump_script __P((struct siop_softc *));
96 void siop_morecbd __P((struct siop_softc *));
97 struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *));
98 void siop_add_reselsw __P((struct siop_softc *, int));
99 void siop_update_scntl3 __P((struct siop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int siop_stat_intr = 0;
104 static int siop_stat_intr_shortxfer = 0;
105 static int siop_stat_intr_sdp = 0;
106 static int siop_stat_intr_done = 0;
107 static int siop_stat_intr_xferdisc = 0;
108 static int siop_stat_intr_lunresel = 0;
109 static int siop_stat_intr_qfull = 0;
110 void siop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void siop_script_sync __P((struct siop_softc *, int));
117 static __inline__ void
118 siop_script_sync(sc, ops)
119 struct siop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int));
128 static __inline__ u_int32_t
129 siop_script_read(sc, offset)
130 struct siop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void siop_script_write __P((struct siop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 siop_script_write(sc, offset, val)
145 struct siop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 siop_attach(sc)
159 struct siop_softc *sc;
160 {
161 if (siop_common_attach(&sc->sc_c) != 0)
162 return;
163
164 TAILQ_INIT(&sc->free_list);
165 TAILQ_INIT(&sc->cmds);
166 TAILQ_INIT(&sc->lunsw_list);
167 sc->sc_currschedslot = 0;
168 #ifdef SIOP_DEBUG
169 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
170 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script),
171 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
172 #endif
173
174 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
175 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
176
177 /* Do a bus reset, so that devices fall back to narrow/async */
178 siop_resetbus(&sc->sc_c);
179 /*
180 * siop_reset() will reset the chip, thus clearing pending interrupts
181 */
182 siop_reset(sc);
183 #ifdef DUMP_SCRIPT
184 siop_dump_script(sc);
185 #endif
186
187 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
188 }
189
190 void
191 siop_reset(sc)
192 struct siop_softc *sc;
193 {
194 int i, j;
195 struct siop_lunsw *lunsw;
196
197 siop_common_reset(&sc->sc_c);
198
199 /* copy and patch the script */
200 if (sc->sc_c.features & SF_CHIP_RAM) {
201 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
202 siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
203 for (j = 0; j <
204 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
205 j++) {
206 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
207 E_abs_msgin_Used[j] * 4,
208 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
209 }
210 if (sc->sc_c.features & SF_CHIP_LED0) {
211 bus_space_write_region_4(sc->sc_c.sc_ramt,
212 sc->sc_c.sc_ramh,
213 Ent_led_on1, siop_led_on,
214 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
215 bus_space_write_region_4(sc->sc_c.sc_ramt,
216 sc->sc_c.sc_ramh,
217 Ent_led_on2, siop_led_on,
218 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
219 bus_space_write_region_4(sc->sc_c.sc_ramt,
220 sc->sc_c.sc_ramh,
221 Ent_led_off, siop_led_off,
222 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
223 }
224 } else {
225 for (j = 0;
226 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
227 sc->sc_c.sc_script[j] = htole32(siop_script[j]);
228 }
229 for (j = 0; j <
230 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
231 j++) {
232 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
233 htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space);
234 }
235 if (sc->sc_c.features & SF_CHIP_LED0) {
236 for (j = 0; j < (sizeof(siop_led_on) /
237 sizeof(siop_led_on[0])); j++)
238 sc->sc_c.sc_script[
239 Ent_led_on1 / sizeof(siop_led_on[0]) + j
240 ] = htole32(siop_led_on[j]);
241 for (j = 0; j < (sizeof(siop_led_on) /
242 sizeof(siop_led_on[0])); j++)
243 sc->sc_c.sc_script[
244 Ent_led_on2 / sizeof(siop_led_on[0]) + j
245 ] = htole32(siop_led_on[j]);
246 for (j = 0; j < (sizeof(siop_led_off) /
247 sizeof(siop_led_off[0])); j++)
248 sc->sc_c.sc_script[
249 Ent_led_off / sizeof(siop_led_off[0]) + j
250 ] = htole32(siop_led_off[j]);
251 }
252 }
253 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
254 sc->script_free_hi = sc->sc_c.ram_size / 4;
255
256 /* free used and unused lun switches */
257 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
258 #ifdef SIOP_DEBUG
259 printf("%s: free lunsw at offset %d\n",
260 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off);
261 #endif
262 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
263 free(lunsw, M_DEVBUF);
264 }
265 TAILQ_INIT(&sc->lunsw_list);
266 /* restore reselect switch */
267 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
268 struct siop_target *target;
269 if (sc->sc_c.targets[i] == NULL)
270 continue;
271 #ifdef SIOP_DEBUG
272 printf("%s: restore sw for target %d\n",
273 sc->sc_c.sc_dev.dv_xname, i);
274 #endif
275 target = (struct siop_target *)sc->sc_c.targets[i];
276 free(target->lunsw, M_DEVBUF);
277 target->lunsw = siop_get_lunsw(sc);
278 if (target->lunsw == NULL) {
279 printf("%s: can't alloc lunsw for target %d\n",
280 sc->sc_c.sc_dev.dv_xname, i);
281 break;
282 }
283 siop_add_reselsw(sc, i);
284 }
285
286 /* start script */
287 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
288 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
289 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
290 }
291 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
292 sc->sc_c.sc_scriptaddr + Ent_reselect);
293 }
294
295 #if 0
296 #define CALL_SCRIPT(ent) do {\
297 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
298 siop_cmd->cmd_c.dsa, \
299 sc->sc_c.sc_scriptaddr + ent); \
300 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
301 } while (0)
302 #else
303 #define CALL_SCRIPT(ent) do {\
304 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
305 } while (0)
306 #endif
307
308 int
309 siop_intr(v)
310 void *v;
311 {
312 struct siop_softc *sc = v;
313 struct siop_target *siop_target;
314 struct siop_cmd *siop_cmd;
315 struct siop_lun *siop_lun;
316 struct scsipi_xfer *xs;
317 int istat, sist, sstat1, dstat;
318 u_int32_t irqcode;
319 int need_reset = 0;
320 int offset, target, lun, tag;
321 bus_addr_t dsa;
322 struct siop_cbd *cbdp;
323 int freetarget = 0;
324 int restart = 0;
325
326 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
327 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
328 return 0;
329 INCSTAT(siop_stat_intr);
330 if (istat & ISTAT_INTF) {
331 printf("INTRF\n");
332 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
333 SIOP_ISTAT, ISTAT_INTF);
334 }
335 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
336 (ISTAT_DIP | ISTAT_ABRT)) {
337 /* clear abort */
338 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
339 SIOP_ISTAT, 0);
340 }
341 /* use DSA to find the current siop_cmd */
342 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
343 for (cbdp = TAILQ_FIRST(&sc->cmds); cbdp != NULL;
344 cbdp = TAILQ_NEXT(cbdp, next)) {
345 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
346 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
347 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
348 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
349 siop_table_sync(siop_cmd,
350 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
351 break;
352 }
353 }
354 if (cbdp == NULL) {
355 siop_cmd = NULL;
356 }
357 if (siop_cmd) {
358 xs = siop_cmd->cmd_c.xs;
359 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
360 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
361 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
362 tag = siop_cmd->cmd_c.tag;
363 siop_lun = siop_target->siop_lun[lun];
364 #ifdef DIAGNOSTIC
365 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
366 printf("siop_cmd (lun %d) for DSA 0x%x "
367 "not active (%d)\n", lun, (u_int)dsa,
368 siop_cmd->cmd_c.status);
369 xs = NULL;
370 siop_target = NULL;
371 target = -1;
372 lun = -1;
373 tag = -1;
374 siop_lun = NULL;
375 siop_cmd = NULL;
376 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
377 printf("siop_cmd (lun %d tag %d) not in siop_lun "
378 "active (%p != %p)\n", lun, tag, siop_cmd,
379 siop_lun->siop_tag[tag].active);
380 }
381 #endif
382 } else {
383 xs = NULL;
384 siop_target = NULL;
385 target = -1;
386 lun = -1;
387 tag = -1;
388 siop_lun = NULL;
389 }
390 if (istat & ISTAT_DIP) {
391 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
392 SIOP_DSTAT);
393 if (dstat & DSTAT_ABRT) {
394 /* was probably generated by a bus reset IOCTL */
395 if ((dstat & DSTAT_DFE) == 0)
396 siop_clearfifo(&sc->sc_c);
397 goto reset;
398 }
399 if (dstat & DSTAT_SSI) {
400 printf("single step dsp 0x%08x dsa 0x08%x\n",
401 (int)(bus_space_read_4(sc->sc_c.sc_rt,
402 sc->sc_c.sc_rh, SIOP_DSP) -
403 sc->sc_c.sc_scriptaddr),
404 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
405 SIOP_DSA));
406 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
407 (istat & ISTAT_SIP) == 0) {
408 bus_space_write_1(sc->sc_c.sc_rt,
409 sc->sc_c.sc_rh, SIOP_DCNTL,
410 bus_space_read_1(sc->sc_c.sc_rt,
411 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
412 }
413 return 1;
414 }
415
416 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
417 printf("DMA IRQ:");
418 if (dstat & DSTAT_IID)
419 printf(" Illegal instruction");
420 if (dstat & DSTAT_BF)
421 printf(" bus fault");
422 if (dstat & DSTAT_MDPE)
423 printf(" parity");
424 if (dstat & DSTAT_DFE)
425 printf(" dma fifo empty");
426 else
427 siop_clearfifo(&sc->sc_c);
428 printf(", DSP=0x%x DSA=0x%x: ",
429 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
430 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
431 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
432 if (siop_cmd)
433 printf("last msg_in=0x%x status=0x%x\n",
434 siop_cmd->cmd_tables->msg_in[0],
435 le32toh(siop_cmd->cmd_tables->status));
436 else
437 printf("%s: current DSA invalid\n",
438 sc->sc_c.sc_dev.dv_xname);
439 need_reset = 1;
440 }
441 }
442 if (istat & ISTAT_SIP) {
443 if (istat & ISTAT_DIP)
444 delay(10);
445 /*
446 * Can't read sist0 & sist1 independantly, or we have to
447 * insert delay
448 */
449 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
450 SIOP_SIST0);
451 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
452 SIOP_SSTAT1);
453 #ifdef SIOP_DEBUG_INTR
454 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
455 "DSA=0x%x DSP=0x%lx\n", sist,
456 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
457 SIOP_SSTAT1),
458 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
459 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
460 SIOP_DSP) -
461 sc->sc_c.sc_scriptaddr));
462 #endif
463 if (sist & SIST0_RST) {
464 siop_handle_reset(sc);
465 /* no table to flush here */
466 return 1;
467 }
468 if (sist & SIST0_SGE) {
469 if (siop_cmd)
470 scsipi_printaddr(xs->xs_periph);
471 else
472 printf("%s:", sc->sc_c.sc_dev.dv_xname);
473 printf("scsi gross error\n");
474 goto reset;
475 }
476 if ((sist & SIST0_MA) && need_reset == 0) {
477 if (siop_cmd) {
478 int scratcha0;
479 dstat = bus_space_read_1(sc->sc_c.sc_rt,
480 sc->sc_c.sc_rh, SIOP_DSTAT);
481 /*
482 * first restore DSA, in case we were in a S/G
483 * operation.
484 */
485 bus_space_write_4(sc->sc_c.sc_rt,
486 sc->sc_c.sc_rh,
487 SIOP_DSA, siop_cmd->cmd_c.dsa);
488 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
489 sc->sc_c.sc_rh, SIOP_SCRATCHA);
490 switch (sstat1 & SSTAT1_PHASE_MASK) {
491 case SSTAT1_PHASE_STATUS:
492 /*
493 * previous phase may be aborted for any reason
494 * ( for example, the target has less data to
495 * transfer than requested). Just go to status
496 * and the command should terminate.
497 */
498 INCSTAT(siop_stat_intr_shortxfer);
499 if ((dstat & DSTAT_DFE) == 0)
500 siop_clearfifo(&sc->sc_c);
501 /* no table to flush here */
502 CALL_SCRIPT(Ent_status);
503 return 1;
504 case SSTAT1_PHASE_MSGIN:
505 /*
506 * target may be ready to disconnect
507 * Save data pointers just in case.
508 */
509 INCSTAT(siop_stat_intr_xferdisc);
510 if (scratcha0 & A_flag_data)
511 siop_sdp(&siop_cmd->cmd_c);
512 else if ((dstat & DSTAT_DFE) == 0)
513 siop_clearfifo(&sc->sc_c);
514 bus_space_write_1(sc->sc_c.sc_rt,
515 sc->sc_c.sc_rh, SIOP_SCRATCHA,
516 scratcha0 & ~A_flag_data);
517 siop_table_sync(siop_cmd,
518 BUS_DMASYNC_PREREAD |
519 BUS_DMASYNC_PREWRITE);
520 CALL_SCRIPT(Ent_msgin);
521 return 1;
522 }
523 printf("%s: unexpected phase mismatch %d\n",
524 sc->sc_c.sc_dev.dv_xname,
525 sstat1 & SSTAT1_PHASE_MASK);
526 } else {
527 printf("%s: phase mismatch without command\n",
528 sc->sc_c.sc_dev.dv_xname);
529 }
530 need_reset = 1;
531 }
532 if (sist & SIST0_PAR) {
533 /* parity error, reset */
534 if (siop_cmd)
535 scsipi_printaddr(xs->xs_periph);
536 else
537 printf("%s:", sc->sc_c.sc_dev.dv_xname);
538 printf("parity error\n");
539 goto reset;
540 }
541 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
542 /* selection time out, assume there's no device here */
543 if (siop_cmd) {
544 siop_cmd->cmd_c.status = CMDST_DONE;
545 xs->error = XS_SELTIMEOUT;
546 freetarget = 1;
547 goto end;
548 } else {
549 printf("%s: selection timeout without "
550 "command\n", sc->sc_c.sc_dev.dv_xname);
551 need_reset = 1;
552 }
553 }
554 if (sist & SIST0_UDC) {
555 /*
556 * unexpected disconnect. Usually the target signals
557 * a fatal condition this way. Attempt to get sense.
558 */
559 if (siop_cmd) {
560 siop_cmd->cmd_tables->status =
561 htole32(SCSI_CHECK);
562 goto end;
563 }
564 printf("%s: unexpected disconnect without "
565 "command\n", sc->sc_c.sc_dev.dv_xname);
566 goto reset;
567 }
568 if (sist & (SIST1_SBMC << 8)) {
569 /* SCSI bus mode change */
570 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
571 goto reset;
572 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
573 /*
574 * we have a script interrupt, it will
575 * restart the script.
576 */
577 goto scintr;
578 }
579 /*
580 * else we have to restart it ourselve, at the
581 * interrupted instruction.
582 */
583 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
584 SIOP_DSP,
585 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 SIOP_DSP) - 8);
587 return 1;
588 }
589 /* Else it's an unhandled exeption (for now). */
590 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
591 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
592 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
593 SIOP_SSTAT1),
594 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
595 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
596 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
597 if (siop_cmd) {
598 siop_cmd->cmd_c.status = CMDST_DONE;
599 xs->error = XS_SELTIMEOUT;
600 goto end;
601 }
602 need_reset = 1;
603 }
604 if (need_reset) {
605 reset:
606 /* fatal error, reset the bus */
607 siop_resetbus(&sc->sc_c);
608 /* no table to flush here */
609 return 1;
610 }
611
612 scintr:
613 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
614 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
615 SIOP_DSPS);
616 #ifdef SIOP_DEBUG_INTR
617 printf("script interrupt 0x%x\n", irqcode);
618 #endif
619 /*
620 * no command, or an inactive command is only valid for a
621 * reselect interrupt
622 */
623 if ((irqcode & 0x80) == 0) {
624 if (siop_cmd == NULL) {
625 printf(
626 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
627 sc->sc_c.sc_dev.dv_xname, irqcode);
628 goto reset;
629 }
630 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
631 printf("%s: command with invalid status "
632 "(IRQ code 0x%x current status %d) !\n",
633 sc->sc_c.sc_dev.dv_xname,
634 irqcode, siop_cmd->cmd_c.status);
635 xs = NULL;
636 }
637 }
638 switch(irqcode) {
639 case A_int_err:
640 printf("error, DSP=0x%x\n",
641 (int)(bus_space_read_4(sc->sc_c.sc_rt,
642 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
643 if (xs) {
644 xs->error = XS_SELTIMEOUT;
645 goto end;
646 } else {
647 goto reset;
648 }
649 case A_int_reseltarg:
650 printf("%s: reselect with invalid target\n",
651 sc->sc_c.sc_dev.dv_xname);
652 goto reset;
653 case A_int_resellun:
654 INCSTAT(siop_stat_intr_lunresel);
655 target = bus_space_read_1(sc->sc_c.sc_rt,
656 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
657 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
658 SIOP_SCRATCHA + 1);
659 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
660 SIOP_SCRATCHA + 2);
661 siop_target =
662 (struct siop_target *)sc->sc_c.targets[target];
663 if (siop_target == NULL) {
664 printf("%s: reselect with invalid target %d\n",
665 sc->sc_c.sc_dev.dv_xname, target);
666 goto reset;
667 }
668 siop_lun = siop_target->siop_lun[lun];
669 if (siop_lun == NULL) {
670 printf("%s: target %d reselect with invalid "
671 "lun %d\n", sc->sc_c.sc_dev.dv_xname,
672 target, lun);
673 goto reset;
674 }
675 if (siop_lun->siop_tag[tag].active == NULL) {
676 printf("%s: target %d lun %d tag %d reselect "
677 "without command\n",
678 sc->sc_c.sc_dev.dv_xname,
679 target, lun, tag);
680 goto reset;
681 }
682 siop_cmd = siop_lun->siop_tag[tag].active;
683 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
684 SIOP_DSP, siop_cmd->cmd_c.dsa +
685 sizeof(struct siop_common_xfer) +
686 Ent_ldsa_reload_dsa);
687 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
688 return 1;
689 case A_int_reseltag:
690 printf("%s: reselect with invalid tag\n",
691 sc->sc_c.sc_dev.dv_xname);
692 goto reset;
693 case A_int_msgin:
694 {
695 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
696 sc->sc_c.sc_rh, SIOP_SFBR);
697 if (msgin == MSG_MESSAGE_REJECT) {
698 int msg, extmsg;
699 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
700 /*
701 * message was part of a identify +
702 * something else. Identify shouldn't
703 * have been rejected.
704 */
705 msg =
706 siop_cmd->cmd_tables->msg_out[1];
707 extmsg =
708 siop_cmd->cmd_tables->msg_out[3];
709 } else {
710 msg = siop_cmd->cmd_tables->msg_out[0];
711 extmsg =
712 siop_cmd->cmd_tables->msg_out[2];
713 }
714 if (msg == MSG_MESSAGE_REJECT) {
715 /* MSG_REJECT for a MSG_REJECT !*/
716 if (xs)
717 scsipi_printaddr(xs->xs_periph);
718 else
719 printf("%s: ",
720 sc->sc_c.sc_dev.dv_xname);
721 printf("our reject message was "
722 "rejected\n");
723 goto reset;
724 }
725 if (msg == MSG_EXTENDED &&
726 extmsg == MSG_EXT_WDTR) {
727 /* WDTR rejected, initiate sync */
728 if ((siop_target->target_c.flags &
729 TARF_SYNC) == 0) {
730 siop_target->target_c.status =
731 TARST_OK;
732 siop_update_xfer_mode(&sc->sc_c,
733 target);
734 /* no table to flush here */
735 CALL_SCRIPT(Ent_msgin_ack);
736 return 1;
737 }
738 siop_target->target_c.status =
739 TARST_SYNC_NEG;
740 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
741 sc->sc_c.st_minsync,
742 sc->sc_c.maxoff);
743 siop_table_sync(siop_cmd,
744 BUS_DMASYNC_PREREAD |
745 BUS_DMASYNC_PREWRITE);
746 CALL_SCRIPT(Ent_send_msgout);
747 return 1;
748 } else if (msg == MSG_EXTENDED &&
749 extmsg == MSG_EXT_SDTR) {
750 /* sync rejected */
751 siop_target->target_c.offset = 0;
752 siop_target->target_c.period = 0;
753 siop_target->target_c.status = TARST_OK;
754 siop_update_xfer_mode(&sc->sc_c,
755 target);
756 /* no table to flush here */
757 CALL_SCRIPT(Ent_msgin_ack);
758 return 1;
759 } else if (msg == MSG_SIMPLE_Q_TAG ||
760 msg == MSG_HEAD_OF_Q_TAG ||
761 msg == MSG_ORDERED_Q_TAG) {
762 if (siop_handle_qtag_reject(
763 siop_cmd) == -1)
764 goto reset;
765 CALL_SCRIPT(Ent_msgin_ack);
766 return 1;
767 }
768 if (xs)
769 scsipi_printaddr(xs->xs_periph);
770 else
771 printf("%s: ",
772 sc->sc_c.sc_dev.dv_xname);
773 if (msg == MSG_EXTENDED) {
774 printf("scsi message reject, extended "
775 "message sent was 0x%x\n", extmsg);
776 } else {
777 printf("scsi message reject, message "
778 "sent was 0x%x\n", msg);
779 }
780 /* no table to flush here */
781 CALL_SCRIPT(Ent_msgin_ack);
782 return 1;
783 }
784 if (xs)
785 scsipi_printaddr(xs->xs_periph);
786 else
787 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
788 printf("unhandled message 0x%x\n",
789 siop_cmd->cmd_tables->msg_in[0]);
790 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
791 siop_cmd->cmd_tables->t_msgout.count= htole32(1);
792 siop_table_sync(siop_cmd,
793 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
794 CALL_SCRIPT(Ent_send_msgout);
795 return 1;
796 }
797 case A_int_extmsgin:
798 #ifdef SIOP_DEBUG_INTR
799 printf("extended message: msg 0x%x len %d\n",
800 siop_cmd->cmd_tables->msg_in[2],
801 siop_cmd->cmd_tables->msg_in[1]);
802 #endif
803 if (siop_cmd->cmd_tables->msg_in[1] >
804 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
805 printf("%s: extended message too big (%d)\n",
806 sc->sc_c.sc_dev.dv_xname,
807 siop_cmd->cmd_tables->msg_in[1]);
808 siop_cmd->cmd_tables->t_extmsgdata.count =
809 htole32(siop_cmd->cmd_tables->msg_in[1] - 1);
810 siop_table_sync(siop_cmd,
811 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
812 CALL_SCRIPT(Ent_get_extmsgdata);
813 return 1;
814 case A_int_extmsgdata:
815 #ifdef SIOP_DEBUG_INTR
816 {
817 int i;
818 printf("extended message: 0x%x, data:",
819 siop_cmd->cmd_tables->msg_in[2]);
820 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
821 i++)
822 printf(" 0x%x",
823 siop_cmd->cmd_tables->msg_in[i]);
824 printf("\n");
825 }
826 #endif
827 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
828 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
829 case SIOP_NEG_MSGOUT:
830 siop_update_scntl3(sc,
831 siop_cmd->cmd_c.siop_target);
832 siop_table_sync(siop_cmd,
833 BUS_DMASYNC_PREREAD |
834 BUS_DMASYNC_PREWRITE);
835 CALL_SCRIPT(Ent_send_msgout);
836 return(1);
837 case SIOP_NEG_ACK:
838 siop_update_scntl3(sc,
839 siop_cmd->cmd_c.siop_target);
840 CALL_SCRIPT(Ent_msgin_ack);
841 return(1);
842 default:
843 panic("invalid retval from "
844 "siop_wdtr_neg()");
845 }
846 return(1);
847 }
848 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
849 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
850 case SIOP_NEG_MSGOUT:
851 siop_update_scntl3(sc,
852 siop_cmd->cmd_c.siop_target);
853 siop_table_sync(siop_cmd,
854 BUS_DMASYNC_PREREAD |
855 BUS_DMASYNC_PREWRITE);
856 CALL_SCRIPT(Ent_send_msgout);
857 return(1);
858 case SIOP_NEG_ACK:
859 siop_update_scntl3(sc,
860 siop_cmd->cmd_c.siop_target);
861 CALL_SCRIPT(Ent_msgin_ack);
862 return(1);
863 default:
864 panic("invalid retval from "
865 "siop_wdtr_neg()");
866 }
867 return(1);
868 }
869 /* send a message reject */
870 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
871 siop_cmd->cmd_tables->t_msgout.count = htole32(1);
872 siop_table_sync(siop_cmd,
873 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
874 CALL_SCRIPT(Ent_send_msgout);
875 return 1;
876 case A_int_disc:
877 INCSTAT(siop_stat_intr_sdp);
878 offset = bus_space_read_1(sc->sc_c.sc_rt,
879 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
880 #ifdef SIOP_DEBUG_DR
881 printf("disconnect offset %d\n", offset);
882 #endif
883 if (offset > SIOP_NSG) {
884 printf("%s: bad offset for disconnect (%d)\n",
885 sc->sc_c.sc_dev.dv_xname, offset);
886 goto reset;
887 }
888 /*
889 * offset == SIOP_NSG may be a valid condition if
890 * we get a sdp when the xfer is done.
891 * Don't call memmove in this case.
892 */
893 if (offset < SIOP_NSG) {
894 memmove(&siop_cmd->cmd_tables->data[0],
895 &siop_cmd->cmd_tables->data[offset],
896 (SIOP_NSG - offset) * sizeof(scr_table_t));
897 siop_table_sync(siop_cmd,
898 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
899 }
900 CALL_SCRIPT(Ent_script_sched);
901 return 1;
902 case A_int_resfail:
903 printf("reselect failed\n");
904 CALL_SCRIPT(Ent_script_sched);
905 return 1;
906 case A_int_done:
907 if (xs == NULL) {
908 printf("%s: done without command, DSA=0x%lx\n",
909 sc->sc_c.sc_dev.dv_xname,
910 (u_long)siop_cmd->cmd_c.dsa);
911 siop_cmd->cmd_c.status = CMDST_FREE;
912 CALL_SCRIPT(Ent_script_sched);
913 return 1;
914 }
915 #ifdef SIOP_DEBUG_INTR
916 printf("done, DSA=0x%lx target id 0x%x last msg "
917 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
918 le32toh(siop_cmd->cmd_tables->id),
919 siop_cmd->cmd_tables->msg_in[0],
920 le32toh(siop_cmd->cmd_tables->status));
921 #endif
922 INCSTAT(siop_stat_intr_done);
923 siop_cmd->cmd_c.status = CMDST_DONE;
924 goto end;
925 default:
926 printf("unknown irqcode %x\n", irqcode);
927 if (xs) {
928 xs->error = XS_SELTIMEOUT;
929 goto end;
930 }
931 goto reset;
932 }
933 return 1;
934 }
935 /* We just should't get there */
936 panic("siop_intr: I shouldn't be there !");
937 return 1;
938 end:
939 /*
940 * restart the script now if command completed properly
941 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
942 * queue
943 */
944 xs->status = le32toh(siop_cmd->cmd_tables->status);
945 if (xs->status == SCSI_OK)
946 CALL_SCRIPT(Ent_script_sched);
947 else
948 restart = 1;
949 siop_lun->siop_tag[tag].active = NULL;
950 siop_scsicmd_end(siop_cmd);
951 if (freetarget && siop_target->target_c.status == TARST_PROBING)
952 siop_del_dev(sc, target, lun);
953 if (restart)
954 CALL_SCRIPT(Ent_script_sched);
955 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
956 /* a command terminated, so we have free slots now */
957 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
958 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
959 }
960
961 return 1;
962 }
963
964 void
965 siop_scsicmd_end(siop_cmd)
966 struct siop_cmd *siop_cmd;
967 {
968 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
969 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
970
971 switch(xs->status) {
972 case SCSI_OK:
973 xs->error = XS_NOERROR;
974 break;
975 case SCSI_BUSY:
976 xs->error = XS_BUSY;
977 break;
978 case SCSI_CHECK:
979 xs->error = XS_BUSY;
980 /* remove commands in the queue and scheduler */
981 siop_unqueue(sc, xs->xs_periph->periph_target,
982 xs->xs_periph->periph_lun);
983 break;
984 case SCSI_QUEUE_FULL:
985 INCSTAT(siop_stat_intr_qfull);
986 #ifdef SIOP_DEBUG
987 printf("%s:%d:%d: queue full (tag %d)\n",
988 sc->sc_c.sc_dev.dv_xname,
989 xs->xs_periph->periph_target,
990 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
991 #endif
992 xs->error = XS_BUSY;
993 break;
994 case SCSI_SIOP_NOCHECK:
995 /*
996 * don't check status, xs->error is already valid
997 */
998 break;
999 case SCSI_SIOP_NOSTATUS:
1000 /*
1001 * the status byte was not updated, cmd was
1002 * aborted
1003 */
1004 xs->error = XS_SELTIMEOUT;
1005 break;
1006 default:
1007 scsipi_printaddr(xs->xs_periph);
1008 printf("invalid status code %d\n", xs->status);
1009 xs->error = XS_DRIVER_STUFFUP;
1010 }
1011 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1012 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
1013 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1014 (xs->xs_control & XS_CTL_DATA_IN) ?
1015 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1016 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1017 }
1018 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1019 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1020 siop_cmd->cmd_c.status = CMDST_FREE;
1021 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1022 xs->resid = 0;
1023 scsipi_done (xs);
1024 }
1025
1026 void
1027 siop_unqueue(sc, target, lun)
1028 struct siop_softc *sc;
1029 int target;
1030 int lun;
1031 {
1032 int slot, tag;
1033 struct siop_cmd *siop_cmd;
1034 struct siop_lun *siop_lun =
1035 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1036
1037 /* first make sure to read valid data */
1038 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1039
1040 for (tag = 1; tag < SIOP_NTAG; tag++) {
1041 /* look for commands in the scheduler, not yet started */
1042 if (siop_lun->siop_tag[tag].active == NULL)
1043 continue;
1044 siop_cmd = siop_lun->siop_tag[tag].active;
1045 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1046 if (siop_script_read(sc,
1047 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1048 siop_cmd->cmd_c.dsa +
1049 sizeof(struct siop_common_xfer) +
1050 Ent_ldsa_select)
1051 break;
1052 }
1053 if (slot > sc->sc_currschedslot)
1054 continue; /* didn't find it */
1055 if (siop_script_read(sc,
1056 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1057 continue; /* already started */
1058 /* clear the slot */
1059 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1060 0x80000000);
1061 /* ask to requeue */
1062 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1063 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1064 siop_lun->siop_tag[tag].active = NULL;
1065 siop_scsicmd_end(siop_cmd);
1066 }
1067 /* update sc_currschedslot */
1068 sc->sc_currschedslot = 0;
1069 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1070 if (siop_script_read(sc,
1071 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1072 sc->sc_currschedslot = slot;
1073 }
1074 }
1075
1076 /*
1077 * handle a rejected queue tag message: the command will run untagged,
1078 * has to adjust the reselect script.
1079 */
1080 int
1081 siop_handle_qtag_reject(siop_cmd)
1082 struct siop_cmd *siop_cmd;
1083 {
1084 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1085 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1086 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1087 int tag = siop_cmd->cmd_tables->msg_out[2];
1088 struct siop_lun *siop_lun =
1089 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1090
1091 #ifdef SIOP_DEBUG
1092 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1093 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag,
1094 siop_cmd->cmd_c.status);
1095 #endif
1096
1097 if (siop_lun->siop_tag[0].active != NULL) {
1098 printf("%s: untagged command already running for target %d "
1099 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1100 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1101 return -1;
1102 }
1103 /* clear tag slot */
1104 siop_lun->siop_tag[tag].active = NULL;
1105 /* add command to non-tagged slot */
1106 siop_lun->siop_tag[0].active = siop_cmd;
1107 siop_cmd->cmd_c.tag = 0;
1108 /* adjust reselect script if there is one */
1109 if (siop_lun->siop_tag[0].reseloff > 0) {
1110 siop_script_write(sc,
1111 siop_lun->siop_tag[0].reseloff + 1,
1112 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1113 Ent_ldsa_reload_dsa);
1114 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1115 }
1116 return 0;
1117 }
1118
1119 /*
1120 * handle a bus reset: reset chip, unqueue all active commands, free all
1121 * target struct and report loosage to upper layer.
1122 * As the upper layer may requeue immediatly we have to first store
1123 * all active commands in a temporary queue.
1124 */
1125 void
1126 siop_handle_reset(sc)
1127 struct siop_softc *sc;
1128 {
1129 struct siop_cmd *siop_cmd;
1130 struct siop_lun *siop_lun;
1131 int target, lun, tag;
1132 /*
1133 * scsi bus reset. reset the chip and restart
1134 * the queue. Need to clean up all active commands
1135 */
1136 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1137 /* stop, reset and restart the chip */
1138 siop_reset(sc);
1139 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1140 /* chip has been reset, all slots are free now */
1141 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1142 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1143 }
1144 /*
1145 * Process all commands: first commmands being executed
1146 */
1147 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1148 target++) {
1149 if (sc->sc_c.targets[target] == NULL)
1150 continue;
1151 for (lun = 0; lun < 8; lun++) {
1152 struct siop_target *siop_target =
1153 (struct siop_target *)sc->sc_c.targets[target];
1154 siop_lun = siop_target->siop_lun[lun];
1155 if (siop_lun == NULL)
1156 continue;
1157 for (tag = 0; tag <
1158 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1159 SIOP_NTAG : 1);
1160 tag++) {
1161 siop_cmd = siop_lun->siop_tag[tag].active;
1162 if (siop_cmd == NULL)
1163 continue;
1164 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1165 printf("command with tag id %d reset\n", tag);
1166 siop_cmd->cmd_c.xs->error =
1167 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1168 XS_TIMEOUT : XS_RESET;
1169 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1170 siop_lun->siop_tag[tag].active = NULL;
1171 siop_cmd->cmd_c.status = CMDST_DONE;
1172 siop_scsicmd_end(siop_cmd);
1173 }
1174 }
1175 sc->sc_c.targets[target]->status = TARST_ASYNC;
1176 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1177 sc->sc_c.targets[target]->period =
1178 sc->sc_c.targets[target]->offset = 0;
1179 siop_update_xfer_mode(&sc->sc_c, target);
1180 }
1181
1182 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1183 }
1184
1185 void
1186 siop_scsipi_request(chan, req, arg)
1187 struct scsipi_channel *chan;
1188 scsipi_adapter_req_t req;
1189 void *arg;
1190 {
1191 struct scsipi_xfer *xs;
1192 struct scsipi_periph *periph;
1193 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1194 struct siop_cmd *siop_cmd;
1195 struct siop_target *siop_target;
1196 int s, error, i;
1197 int target;
1198 int lun;
1199
1200 switch (req) {
1201 case ADAPTER_REQ_RUN_XFER:
1202 xs = arg;
1203 periph = xs->xs_periph;
1204 target = periph->periph_target;
1205 lun = periph->periph_lun;
1206
1207 s = splbio();
1208 #ifdef SIOP_DEBUG_SCHED
1209 printf("starting cmd for %d:%d\n", target, lun);
1210 #endif
1211 siop_cmd = TAILQ_FIRST(&sc->free_list);
1212 if (siop_cmd == NULL) {
1213 xs->error = XS_RESOURCE_SHORTAGE;
1214 scsipi_done(xs);
1215 splx(s);
1216 return;
1217 }
1218 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1219 #ifdef DIAGNOSTIC
1220 if (siop_cmd->cmd_c.status != CMDST_FREE)
1221 panic("siop_scsicmd: new cmd not free");
1222 #endif
1223 siop_target = (struct siop_target*)sc->sc_c.targets[target];
1224 if (siop_target == NULL) {
1225 #ifdef SIOP_DEBUG
1226 printf("%s: alloc siop_target for target %d\n",
1227 sc->sc_c.sc_dev.dv_xname, target);
1228 #endif
1229 sc->sc_c.targets[target] =
1230 malloc(sizeof(struct siop_target),
1231 M_DEVBUF, M_NOWAIT);
1232 if (sc->sc_c.targets[target] == NULL) {
1233 printf("%s: can't malloc memory for "
1234 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1235 target);
1236 xs->error = XS_RESOURCE_SHORTAGE;
1237 scsipi_done(xs);
1238 splx(s);
1239 return;
1240 }
1241 siop_target =
1242 (struct siop_target*)sc->sc_c.targets[target];
1243 siop_target->target_c.status = TARST_PROBING;
1244 siop_target->target_c.flags = 0;
1245 siop_target->target_c.id =
1246 sc->sc_c.clock_div << 24; /* scntl3 */
1247 siop_target->target_c.id |= target << 16; /* id */
1248 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1249
1250 /* get a lun switch script */
1251 siop_target->lunsw = siop_get_lunsw(sc);
1252 if (siop_target->lunsw == NULL) {
1253 printf("%s: can't alloc lunsw for target %d\n",
1254 sc->sc_c.sc_dev.dv_xname, target);
1255 xs->error = XS_RESOURCE_SHORTAGE;
1256 scsipi_done(xs);
1257 splx(s);
1258 return;
1259 }
1260 for (i=0; i < 8; i++)
1261 siop_target->siop_lun[i] = NULL;
1262 siop_add_reselsw(sc, target);
1263 }
1264 if (siop_target->siop_lun[lun] == NULL) {
1265 siop_target->siop_lun[lun] =
1266 malloc(sizeof(struct siop_lun), M_DEVBUF,
1267 M_NOWAIT|M_ZERO);
1268 if (siop_target->siop_lun[lun] == NULL) {
1269 printf("%s: can't alloc siop_lun for "
1270 "target %d lun %d\n",
1271 sc->sc_c.sc_dev.dv_xname, target, lun);
1272 xs->error = XS_RESOURCE_SHORTAGE;
1273 scsipi_done(xs);
1274 splx(s);
1275 return;
1276 }
1277 }
1278 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1279 siop_cmd->cmd_c.xs = xs;
1280 siop_cmd->cmd_c.flags = 0;
1281 siop_cmd->cmd_c.status = CMDST_READY;
1282
1283 /* load the DMA maps */
1284 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1285 siop_cmd->cmd_c.dmamap_cmd,
1286 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1287 if (error) {
1288 printf("%s: unable to load cmd DMA map: %d\n",
1289 sc->sc_c.sc_dev.dv_xname, error);
1290 xs->error = XS_DRIVER_STUFFUP;
1291 scsipi_done(xs);
1292 splx(s);
1293 return;
1294 }
1295 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1296 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1297 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1298 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1299 ((xs->xs_control & XS_CTL_DATA_IN) ?
1300 BUS_DMA_READ : BUS_DMA_WRITE));
1301 if (error) {
1302 printf("%s: unable to load cmd DMA map: %d",
1303 sc->sc_c.sc_dev.dv_xname, error);
1304 xs->error = XS_DRIVER_STUFFUP;
1305 scsipi_done(xs);
1306 bus_dmamap_unload(sc->sc_c.sc_dmat,
1307 siop_cmd->cmd_c.dmamap_cmd);
1308 splx(s);
1309 return;
1310 }
1311 bus_dmamap_sync(sc->sc_c.sc_dmat,
1312 siop_cmd->cmd_c.dmamap_data, 0,
1313 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1314 (xs->xs_control & XS_CTL_DATA_IN) ?
1315 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1316 }
1317 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1318 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1319 BUS_DMASYNC_PREWRITE);
1320
1321 if (xs->xs_tag_type) {
1322 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1323 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1324 } else {
1325 siop_cmd->cmd_c.tag = 0;
1326 }
1327 siop_setuptables(&siop_cmd->cmd_c);
1328 siop_table_sync(siop_cmd,
1329 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1330 siop_start(sc, siop_cmd);
1331 if (xs->xs_control & XS_CTL_POLL) {
1332 /* poll for command completion */
1333 while ((xs->xs_status & XS_STS_DONE) == 0) {
1334 delay(1000);
1335 siop_intr(sc);
1336 }
1337 }
1338 splx(s);
1339 return;
1340
1341 case ADAPTER_REQ_GROW_RESOURCES:
1342 #ifdef SIOP_DEBUG
1343 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1344 sc->sc_c.sc_adapt.adapt_openings);
1345 #endif
1346 siop_morecbd(sc);
1347 return;
1348
1349 case ADAPTER_REQ_SET_XFER_MODE:
1350 {
1351 struct scsipi_xfer_mode *xm = arg;
1352 if (sc->sc_c.targets[xm->xm_target] == NULL)
1353 return;
1354 s = splbio();
1355 if (xm->xm_mode & PERIPH_CAP_TQING)
1356 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1357 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1358 (sc->sc_c.features & SF_BUS_WIDE))
1359 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1360 if (xm->xm_mode & PERIPH_CAP_SYNC)
1361 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1362 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1363 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1364 sc->sc_c.targets[xm->xm_target]->status =
1365 TARST_ASYNC;
1366
1367 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1368 if (scsipi_lookup_periph(chan,
1369 xm->xm_target, lun) != NULL) {
1370 /* allocate a lun sw entry for this device */
1371 siop_add_dev(sc, xm->xm_target, lun);
1372 }
1373 }
1374
1375 splx(s);
1376 }
1377 }
1378 }
1379
1380 static void
1381 siop_start(sc, siop_cmd)
1382 struct siop_softc *sc;
1383 struct siop_cmd *siop_cmd;
1384 {
1385 struct siop_lun *siop_lun;
1386 struct siop_xfer *siop_xfer;
1387 u_int32_t dsa;
1388 int timeout;
1389 int target, lun, slot;
1390
1391 /*
1392 * first make sure to read valid data
1393 */
1394 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1395
1396 /*
1397 * The queue management here is a bit tricky: the script always looks
1398 * at the slot from first to last, so if we always use the first
1399 * free slot commands can stay at the tail of the queue ~forever.
1400 * The algorithm used here is to restart from the head when we know
1401 * that the queue is empty, and only add commands after the last one.
1402 * When we're at the end of the queue wait for the script to clear it.
1403 * The best thing to do here would be to implement a circular queue,
1404 * but using only 53c720 features this can be "interesting".
1405 * A mid-way solution could be to implement 2 queues and swap orders.
1406 */
1407 slot = sc->sc_currschedslot;
1408 /*
1409 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1410 * free. As this is the last used slot, all previous slots are free,
1411 * we can restart from 0.
1412 */
1413 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1414 0x80000000) {
1415 slot = sc->sc_currschedslot = 0;
1416 } else {
1417 slot++;
1418 }
1419 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1420 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1421 siop_lun =
1422 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1423 /* if non-tagged command active, panic: this shouldn't happen */
1424 if (siop_lun->siop_tag[0].active != NULL) {
1425 panic("siop_start: tagged cmd while untagged running");
1426 }
1427 #ifdef DIAGNOSTIC
1428 /* sanity check the tag if needed */
1429 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1430 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1431 panic("siop_start: tag not free");
1432 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1433 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1434 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1435 panic("siop_start: invalid tag id");
1436 }
1437 }
1438 #endif
1439 /*
1440 * find a free scheduler slot and load it.
1441 */
1442 for (; slot < SIOP_NSLOTS; slot++) {
1443 /*
1444 * If cmd if 0x80000000 the slot is free
1445 */
1446 if (siop_script_read(sc,
1447 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1448 0x80000000)
1449 break;
1450 }
1451 if (slot == SIOP_NSLOTS) {
1452 /*
1453 * no more free slot, no need to continue. freeze the queue
1454 * and requeue this command.
1455 */
1456 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1457 sc->sc_flags |= SCF_CHAN_NOSLOT;
1458 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1459 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1460 siop_scsicmd_end(siop_cmd);
1461 return;
1462 }
1463 #ifdef SIOP_DEBUG_SCHED
1464 printf("using slot %d for DSA 0x%lx\n", slot,
1465 (u_long)siop_cmd->cmd_c.dsa);
1466 #endif
1467 /* mark command as active */
1468 if (siop_cmd->cmd_c.status == CMDST_READY)
1469 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1470 else
1471 panic("siop_start: bad status");
1472 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1473 /* patch scripts with DSA addr */
1474 dsa = siop_cmd->cmd_c.dsa;
1475 /* first reselect switch, if we have an entry */
1476 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1477 siop_script_write(sc,
1478 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1479 dsa + sizeof(struct siop_common_xfer) +
1480 Ent_ldsa_reload_dsa);
1481 /* CMD script: MOVE MEMORY addr */
1482 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1483 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1484 htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1485 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1486 /* scheduler slot: JUMP ldsa_select */
1487 siop_script_write(sc,
1488 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1489 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1490 /* handle timeout */
1491 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1492 /* start exire timer */
1493 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1494 if (timeout == 0)
1495 timeout = 1;
1496 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1497 timeout, siop_timeout, siop_cmd);
1498 }
1499 /*
1500 * Change JUMP cmd so that this slot will be handled
1501 */
1502 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1503 0x80080000);
1504 sc->sc_currschedslot = slot;
1505
1506 /* make sure SCRIPT processor will read valid data */
1507 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1508 /* Signal script it has some work to do */
1509 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1510 SIOP_ISTAT, ISTAT_SIGP);
1511 /* and wait for IRQ */
1512 return;
1513 }
1514
1515 void
1516 siop_timeout(v)
1517 void *v;
1518 {
1519 struct siop_cmd *siop_cmd = v;
1520 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1521 int s;
1522
1523 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1524 printf("command timeout\n");
1525
1526 s = splbio();
1527 /* reset the scsi bus */
1528 siop_resetbus(&sc->sc_c);
1529
1530 /* deactivate callout */
1531 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1532 /* mark command as being timed out; siop_intr will handle it */
1533 /*
1534 * mark command has being timed out and just return;
1535 * the bus reset will generate an interrupt,
1536 * it will be handled in siop_intr()
1537 */
1538 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1539 splx(s);
1540 return;
1541
1542 }
1543
1544 void
1545 siop_dump_script(sc)
1546 struct siop_softc *sc;
1547 {
1548 int i;
1549 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1550 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1551 le32toh(sc->sc_c.sc_script[i]),
1552 le32toh(sc->sc_c.sc_script[i+1]));
1553 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1554 0xc0000000) {
1555 i++;
1556 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1557 }
1558 printf("\n");
1559 }
1560 }
1561
1562 void
1563 siop_morecbd(sc)
1564 struct siop_softc *sc;
1565 {
1566 int error, i, j, s;
1567 bus_dma_segment_t seg;
1568 int rseg;
1569 struct siop_cbd *newcbd;
1570 struct siop_xfer *xfer;
1571 bus_addr_t dsa;
1572 u_int32_t *scr;
1573
1574 /* allocate a new list head */
1575 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1576 if (newcbd == NULL) {
1577 printf("%s: can't allocate memory for command descriptors "
1578 "head\n", sc->sc_c.sc_dev.dv_xname);
1579 return;
1580 }
1581
1582 /* allocate cmd list */
1583 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1584 M_DEVBUF, M_NOWAIT|M_ZERO);
1585 if (newcbd->cmds == NULL) {
1586 printf("%s: can't allocate memory for command descriptors\n",
1587 sc->sc_c.sc_dev.dv_xname);
1588 goto bad3;
1589 }
1590 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1591 1, &rseg, BUS_DMA_NOWAIT);
1592 if (error) {
1593 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1594 sc->sc_c.sc_dev.dv_xname, error);
1595 goto bad2;
1596 }
1597 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1598 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1599 if (error) {
1600 printf("%s: unable to map cbd DMA memory, error = %d\n",
1601 sc->sc_c.sc_dev.dv_xname, error);
1602 goto bad2;
1603 }
1604 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1605 BUS_DMA_NOWAIT, &newcbd->xferdma);
1606 if (error) {
1607 printf("%s: unable to create cbd DMA map, error = %d\n",
1608 sc->sc_c.sc_dev.dv_xname, error);
1609 goto bad1;
1610 }
1611 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1612 PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1613 if (error) {
1614 printf("%s: unable to load cbd DMA map, error = %d\n",
1615 sc->sc_c.sc_dev.dv_xname, error);
1616 goto bad0;
1617 }
1618 #ifdef DEBUG
1619 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1620 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1621 #endif
1622 for (i = 0; i < SIOP_NCMDPB; i++) {
1623 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1624 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1625 &newcbd->cmds[i].cmd_c.dmamap_data);
1626 if (error) {
1627 printf("%s: unable to create data DMA map for cbd: "
1628 "error %d\n",
1629 sc->sc_c.sc_dev.dv_xname, error);
1630 goto bad0;
1631 }
1632 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1633 sizeof(struct scsipi_generic), 1,
1634 sizeof(struct scsipi_generic), 0,
1635 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1636 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1637 if (error) {
1638 printf("%s: unable to create cmd DMA map for cbd %d\n",
1639 sc->sc_c.sc_dev.dv_xname, error);
1640 goto bad0;
1641 }
1642 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1643 newcbd->cmds[i].siop_cbdp = newcbd;
1644 xfer = &newcbd->xfers[i];
1645 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1646 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1647 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1648 i * sizeof(struct siop_xfer);
1649 newcbd->cmds[i].cmd_c.dsa = dsa;
1650 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1651 xfer->siop_tables.t_msgout.count= htole32(1);
1652 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1653 xfer->siop_tables.t_msgin.count= htole32(1);
1654 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1655 offsetof(struct siop_common_xfer, msg_in));
1656 xfer->siop_tables.t_extmsgin.count= htole32(2);
1657 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1658 offsetof(struct siop_common_xfer, msg_in) + 1);
1659 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1660 offsetof(struct siop_common_xfer, msg_in) + 3);
1661 xfer->siop_tables.t_status.count= htole32(1);
1662 xfer->siop_tables.t_status.addr = htole32(dsa +
1663 offsetof(struct siop_common_xfer, status));
1664 /* The select/reselect script */
1665 scr = &xfer->resel[0];
1666 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1667 scr[j] = htole32(load_dsa[j]);
1668 /*
1669 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1670 * octet, reg offset is the third.
1671 */
1672 scr[Ent_rdsa0 / 4] =
1673 htole32(0x78100000 | ((dsa & 0x000000ff) << 8));
1674 scr[Ent_rdsa1 / 4] =
1675 htole32(0x78110000 | ( dsa & 0x0000ff00 ));
1676 scr[Ent_rdsa2 / 4] =
1677 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8));
1678 scr[Ent_rdsa3 / 4] =
1679 htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1680 scr[E_ldsa_abs_reselected_Used[0]] =
1681 htole32(sc->sc_c.sc_scriptaddr + Ent_reselected);
1682 scr[E_ldsa_abs_reselect_Used[0]] =
1683 htole32(sc->sc_c.sc_scriptaddr + Ent_reselect);
1684 scr[E_ldsa_abs_selected_Used[0]] =
1685 htole32(sc->sc_c.sc_scriptaddr + Ent_selected);
1686 scr[E_ldsa_abs_data_Used[0]] =
1687 htole32(dsa + sizeof(struct siop_common_xfer) +
1688 Ent_ldsa_data);
1689 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1690 scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1691 s = splbio();
1692 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1693 splx(s);
1694 #ifdef SIOP_DEBUG
1695 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1696 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1697 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1698 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1699 #endif
1700 }
1701 s = splbio();
1702 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1703 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1704 splx(s);
1705 return;
1706 bad0:
1707 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1708 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1709 bad1:
1710 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1711 bad2:
1712 free(newcbd->cmds, M_DEVBUF);
1713 bad3:
1714 free(newcbd, M_DEVBUF);
1715 return;
1716 }
1717
1718 struct siop_lunsw *
1719 siop_get_lunsw(sc)
1720 struct siop_softc *sc;
1721 {
1722 struct siop_lunsw *lunsw;
1723 int i;
1724
1725 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1726 sc->script_free_hi)
1727 return NULL;
1728 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1729 if (lunsw != NULL) {
1730 #ifdef SIOP_DEBUG
1731 printf("siop_get_lunsw got lunsw at offset %d\n",
1732 lunsw->lunsw_off);
1733 #endif
1734 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1735 return lunsw;
1736 }
1737 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1738 if (lunsw == NULL)
1739 return NULL;
1740 #ifdef SIOP_DEBUG
1741 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1742 #endif
1743 if (sc->sc_c.features & SF_CHIP_RAM) {
1744 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1745 sc->script_free_lo * 4, lun_switch,
1746 sizeof(lun_switch) / sizeof(lun_switch[0]));
1747 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1748 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1749 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1750 } else {
1751 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1752 i++)
1753 sc->sc_c.sc_script[sc->script_free_lo + i] =
1754 htole32(lun_switch[i]);
1755 sc->sc_c.sc_script[
1756 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1757 htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1758 }
1759 lunsw->lunsw_off = sc->script_free_lo;
1760 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1761 sc->script_free_lo += lunsw->lunsw_size;
1762 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1763 return lunsw;
1764 }
1765
1766 void
1767 siop_add_reselsw(sc, target)
1768 struct siop_softc *sc;
1769 int target;
1770 {
1771 int i;
1772 struct siop_target *siop_target;
1773 struct siop_lun *siop_lun;
1774
1775 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1776 /*
1777 * add an entry to resel switch
1778 */
1779 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1780 for (i = 0; i < 15; i++) {
1781 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1782 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1783 == 0xff) { /* it's free */
1784 #ifdef SIOP_DEBUG
1785 printf("siop: target %d slot %d offset %d\n",
1786 target, i, siop_target->reseloff);
1787 #endif
1788 /* JUMP abs_foo, IF target | 0x80; */
1789 siop_script_write(sc, siop_target->reseloff,
1790 0x800c0080 | target);
1791 siop_script_write(sc, siop_target->reseloff + 1,
1792 sc->sc_c.sc_scriptaddr +
1793 siop_target->lunsw->lunsw_off * 4 +
1794 Ent_lun_switch_entry);
1795 break;
1796 }
1797 }
1798 if (i == 15) /* no free slot, shouldn't happen */
1799 panic("siop: resel switch full");
1800
1801 sc->sc_ntargets++;
1802 for (i = 0; i < 8; i++) {
1803 siop_lun = siop_target->siop_lun[i];
1804 if (siop_lun == NULL)
1805 continue;
1806 if (siop_lun->reseloff > 0) {
1807 siop_lun->reseloff = 0;
1808 siop_add_dev(sc, target, i);
1809 }
1810 }
1811 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1812 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1813 }
1814
1815 void
1816 siop_update_scntl3(sc, _siop_target)
1817 struct siop_softc *sc;
1818 struct siop_common_target *_siop_target;
1819 {
1820 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1821 /* MOVE target->id >> 24 TO SCNTL3 */
1822 siop_script_write(sc,
1823 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1824 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1825 /* MOVE target->id >> 8 TO SXFER */
1826 siop_script_write(sc,
1827 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1828 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1829 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1830 }
1831
1832 void
1833 siop_add_dev(sc, target, lun)
1834 struct siop_softc *sc;
1835 int target;
1836 int lun;
1837 {
1838 struct siop_lunsw *lunsw;
1839 struct siop_target *siop_target =
1840 (struct siop_target *)sc->sc_c.targets[target];
1841 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1842 int i, ntargets;
1843
1844 if (siop_lun->reseloff > 0)
1845 return;
1846 lunsw = siop_target->lunsw;
1847 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1848 /*
1849 * can't extend this slot. Probably not worth trying to deal
1850 * with this case
1851 */
1852 #ifdef DEBUG
1853 printf("%s:%d:%d: can't allocate a lun sw slot\n",
1854 sc->sc_c.sc_dev.dv_xname, target, lun);
1855 #endif
1856 return;
1857 }
1858 /* count how many free targets we still have to probe */
1859 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1860
1861 /*
1862 * we need 8 bytes for the lun sw additionnal entry, and
1863 * eventually sizeof(tag_switch) for the tag switch entry.
1864 * Keep enouth free space for the free targets that could be
1865 * probed later.
1866 */
1867 if (sc->script_free_lo + 2 +
1868 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1869 ((siop_target->target_c.flags & TARF_TAG) ?
1870 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1871 sc->script_free_hi)) {
1872 /*
1873 * not enouth space, probably not worth dealing with it.
1874 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1875 */
1876 #ifdef DEBUG
1877 printf("%s:%d:%d: not enouth memory for a lun sw slot\n",
1878 sc->sc_c.sc_dev.dv_xname, target, lun);
1879 #endif
1880 return;
1881 }
1882 #ifdef SIOP_DEBUG
1883 printf("%s:%d:%d: allocate lun sw entry\n",
1884 sc->sc_c.sc_dev.dv_xname, target, lun);
1885 #endif
1886 /* INT int_resellun */
1887 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1888 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1889 /* Now the slot entry: JUMP abs_foo, IF lun */
1890 siop_script_write(sc, sc->script_free_lo - 2,
1891 0x800c0000 | lun);
1892 siop_script_write(sc, sc->script_free_lo - 1, 0);
1893 siop_lun->reseloff = sc->script_free_lo - 2;
1894 lunsw->lunsw_size += 2;
1895 sc->script_free_lo += 2;
1896 if (siop_target->target_c.flags & TARF_TAG) {
1897 /* we need a tag switch */
1898 sc->script_free_hi -=
1899 sizeof(tag_switch) / sizeof(tag_switch[0]);
1900 if (sc->sc_c.features & SF_CHIP_RAM) {
1901 bus_space_write_region_4(sc->sc_c.sc_ramt,
1902 sc->sc_c.sc_ramh,
1903 sc->script_free_hi * 4, tag_switch,
1904 sizeof(tag_switch) / sizeof(tag_switch[0]));
1905 } else {
1906 for(i = 0;
1907 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1908 i++) {
1909 sc->sc_c.sc_script[sc->script_free_hi + i] =
1910 htole32(tag_switch[i]);
1911 }
1912 }
1913 siop_script_write(sc,
1914 siop_lun->reseloff + 1,
1915 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1916 Ent_tag_switch_entry);
1917
1918 for (i = 0; i < SIOP_NTAG; i++) {
1919 siop_lun->siop_tag[i].reseloff =
1920 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1921 }
1922 } else {
1923 /* non-tag case; just work with the lun switch */
1924 siop_lun->siop_tag[0].reseloff =
1925 siop_target->siop_lun[lun]->reseloff;
1926 }
1927 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1928 }
1929
1930 void
1931 siop_del_dev(sc, target, lun)
1932 struct siop_softc *sc;
1933 int target;
1934 int lun;
1935 {
1936 int i;
1937 struct siop_target *siop_target;
1938 #ifdef SIOP_DEBUG
1939 printf("%s:%d:%d: free lun sw entry\n",
1940 sc->sc_c.sc_dev.dv_xname, target, lun);
1941 #endif
1942 if (sc->sc_c.targets[target] == NULL)
1943 return;
1944 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1945 free(siop_target->siop_lun[lun], M_DEVBUF);
1946 siop_target->siop_lun[lun] = NULL;
1947 /* XXX compact sw entry too ? */
1948 /* check if we can free the whole target */
1949 for (i = 0; i < 8; i++) {
1950 if (siop_target->siop_lun[i] != NULL)
1951 return;
1952 }
1953 #ifdef SIOP_DEBUG
1954 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
1955 sc->sc_c.sc_dev.dv_xname, target, lun,
1956 sc->sc_c.targets[target]->lunsw->lunsw_off);
1957 #endif
1958 /*
1959 * nothing here, free the target struct and resel
1960 * switch entry
1961 */
1962 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
1963 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1964 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
1965 free(sc->sc_c.targets[target], M_DEVBUF);
1966 sc->sc_c.targets[target] = NULL;
1967 sc->sc_ntargets--;
1968 }
1969
1970 #ifdef SIOP_STATS
1971 void
1972 siop_printstats()
1973 {
1974 printf("siop_stat_intr %d\n", siop_stat_intr);
1975 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
1976 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
1977 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
1978 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
1979 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
1980 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
1981 }
1982 #endif
1983