siop.c revision 1.52 1 /* $NetBSD: siop.c,v 1.52 2002/04/18 12:03:15 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.52 2002/04/18 12:03:15 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar.h>
60 #include <dev/ic/siopvar_common.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
81
82 /* Number of scheduler slot (needs to match script) */
83 #define SIOP_NSLOTS 40
84
85 void siop_reset __P((struct siop_softc *));
86 void siop_handle_reset __P((struct siop_softc *));
87 int siop_handle_qtag_reject __P((struct siop_cmd *));
88 void siop_scsicmd_end __P((struct siop_cmd *));
89 void siop_unqueue __P((struct siop_softc *, int, int));
90 static void siop_start __P((struct siop_softc *, struct siop_cmd *));
91 void siop_timeout __P((void *));
92 int siop_scsicmd __P((struct scsipi_xfer *));
93 void siop_scsipi_request __P((struct scsipi_channel *,
94 scsipi_adapter_req_t, void *));
95 void siop_dump_script __P((struct siop_softc *));
96 void siop_morecbd __P((struct siop_softc *));
97 struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *));
98 void siop_add_reselsw __P((struct siop_softc *, int));
99 void siop_update_scntl3 __P((struct siop_softc *, struct siop_target *));
100
101 #ifdef SIOP_STATS
102 static int siop_stat_intr = 0;
103 static int siop_stat_intr_shortxfer = 0;
104 static int siop_stat_intr_sdp = 0;
105 static int siop_stat_intr_done = 0;
106 static int siop_stat_intr_xferdisc = 0;
107 static int siop_stat_intr_lunresel = 0;
108 static int siop_stat_intr_qfull = 0;
109 void siop_printstats __P((void));
110 #define INCSTAT(x) x++
111 #else
112 #define INCSTAT(x)
113 #endif
114
115 static __inline__ void siop_script_sync __P((struct siop_softc *, int));
116 static __inline__ void
117 siop_script_sync(sc, ops)
118 struct siop_softc *sc;
119 int ops;
120 {
121 if ((sc->features & SF_CHIP_RAM) == 0)
122 bus_dmamap_sync(sc->sc_dmat, sc->sc_scriptdma, 0,
123 PAGE_SIZE, ops);
124 }
125
126 static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int));
127 static __inline__ u_int32_t
128 siop_script_read(sc, offset)
129 struct siop_softc *sc;
130 u_int offset;
131 {
132 if (sc->features & SF_CHIP_RAM) {
133 return bus_space_read_4(sc->sc_ramt, sc->sc_ramh, offset * 4);
134 } else {
135 return le32toh(sc->sc_script[offset]);
136 }
137 }
138
139 static __inline__ void siop_script_write __P((struct siop_softc *, u_int,
140 u_int32_t));
141 static __inline__ void
142 siop_script_write(sc, offset, val)
143 struct siop_softc *sc;
144 u_int offset;
145 u_int32_t val;
146 {
147 if (sc->features & SF_CHIP_RAM) {
148 bus_space_write_4(sc->sc_ramt, sc->sc_ramh, offset * 4, val);
149 } else {
150 sc->sc_script[offset] = htole32(val);
151 }
152 }
153
154 void
155 siop_attach(sc)
156 struct siop_softc *sc;
157 {
158 int error, i;
159 bus_dma_segment_t seg;
160 int rseg;
161
162 /*
163 * Allocate DMA-safe memory for the script and map it.
164 */
165 if ((sc->features & SF_CHIP_RAM) == 0) {
166 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
167 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
168 if (error) {
169 printf("%s: unable to allocate script DMA memory, "
170 "error = %d\n", sc->sc_dev.dv_xname, error);
171 return;
172 }
173 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
174 (caddr_t *)&sc->sc_script, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
175 if (error) {
176 printf("%s: unable to map script DMA memory, "
177 "error = %d\n", sc->sc_dev.dv_xname, error);
178 return;
179 }
180 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
181 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
182 if (error) {
183 printf("%s: unable to create script DMA map, "
184 "error = %d\n", sc->sc_dev.dv_xname, error);
185 return;
186 }
187 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
188 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
189 if (error) {
190 printf("%s: unable to load script DMA map, "
191 "error = %d\n", sc->sc_dev.dv_xname, error);
192 return;
193 }
194 sc->sc_scriptaddr = sc->sc_scriptdma->dm_segs[0].ds_addr;
195 sc->ram_size = PAGE_SIZE;
196 }
197 TAILQ_INIT(&sc->free_list);
198 TAILQ_INIT(&sc->cmds);
199 TAILQ_INIT(&sc->lunsw_list);
200 sc->sc_currschedslot = 0;
201 #ifdef SIOP_DEBUG
202 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
203 sc->sc_dev.dv_xname, (int)sizeof(siop_script),
204 (u_int32_t)sc->sc_scriptaddr, sc->sc_script);
205 #endif
206
207 sc->sc_adapt.adapt_dev = &sc->sc_dev;
208 sc->sc_adapt.adapt_nchannels = 1;
209 sc->sc_adapt.adapt_openings = 0;
210 sc->sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
211 sc->sc_adapt.adapt_ioctl = siop_ioctl;
212 sc->sc_adapt.adapt_minphys = minphys;
213 sc->sc_adapt.adapt_request = siop_scsipi_request;
214
215 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
216 sc->sc_chan.chan_adapter = &sc->sc_adapt;
217 sc->sc_chan.chan_bustype = &scsi_bustype;
218 sc->sc_chan.chan_channel = 0;
219 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
220 sc->sc_chan.chan_ntargets = (sc->features & SF_BUS_WIDE) ? 16 : 8;
221 sc->sc_chan.chan_nluns = 8;
222 sc->sc_chan.chan_id = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
223 if (sc->sc_chan.chan_id == 0 ||
224 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
225 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
226
227 for (i = 0; i < 16; i++)
228 sc->targets[i] = NULL;
229
230 /* find min/max sync period for this chip */
231 sc->maxsync = 0;
232 sc->minsync = 255;
233 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
234 if (sc->clock_period != scf_period[i].clock)
235 continue;
236 if (sc->maxsync < scf_period[i].period)
237 sc->maxsync = scf_period[i].period;
238 if (sc->minsync > scf_period[i].period)
239 sc->minsync = scf_period[i].period;
240 }
241 if (sc->maxsync == 255 || sc->minsync == 0)
242 panic("siop: can't find my sync parameters\n");
243 /* Do a bus reset, so that devices fall back to narrow/async */
244 siop_resetbus(sc);
245 /*
246 * siop_reset() will reset the chip, thus clearing pending interrupts
247 */
248 siop_reset(sc);
249 #ifdef DUMP_SCRIPT
250 siop_dump_script(sc);
251 #endif
252
253 config_found((struct device*)sc, &sc->sc_chan, scsiprint);
254 }
255
256 void
257 siop_reset(sc)
258 struct siop_softc *sc;
259 {
260 int i, j;
261 struct siop_lunsw *lunsw;
262
263 siop_common_reset(sc);
264
265 /* copy and patch the script */
266 if (sc->features & SF_CHIP_RAM) {
267 bus_space_write_region_4(sc->sc_ramt, sc->sc_ramh, 0,
268 siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
269 for (j = 0; j <
270 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
271 j++) {
272 bus_space_write_4(sc->sc_ramt, sc->sc_ramh,
273 E_abs_msgin_Used[j] * 4,
274 sc->sc_scriptaddr + Ent_msgin_space);
275 }
276 #ifdef SIOP_SYMLED
277 bus_space_write_region_4(sc->sc_ramt, sc->sc_ramh, Ent_led_on1,
278 siop_led_on, sizeof(siop_led_on) / sizeof(siop_led_on[0]));
279 bus_space_write_region_4(sc->sc_ramt, sc->sc_ramh, Ent_led_on2,
280 siop_led_on, sizeof(siop_led_on) / sizeof(siop_led_on[0]));
281 bus_space_write_region_4(sc->sc_ramt, sc->sc_ramh, Ent_led_off,
282 siop_led_off,
283 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
284 #endif
285 } else {
286 for (j = 0;
287 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
288 sc->sc_script[j] = htole32(siop_script[j]);
289 }
290 for (j = 0; j <
291 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
292 j++) {
293 sc->sc_script[E_abs_msgin_Used[j]] =
294 htole32(sc->sc_scriptaddr + Ent_msgin_space);
295 }
296 #ifdef SIOP_SYMLED
297 for (j = 0;
298 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
299 sc->sc_script[
300 Ent_led_on1 / sizeof(siop_led_on[0]) + j
301 ] = htole32(siop_led_on[j]);
302 for (j = 0;
303 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
304 sc->sc_script[
305 Ent_led_on2 / sizeof(siop_led_on[0]) + j
306 ] = htole32(siop_led_on[j]);
307 for (j = 0;
308 j < (sizeof(siop_led_off) / sizeof(siop_led_off[0])); j++)
309 sc->sc_script[
310 Ent_led_off / sizeof(siop_led_off[0]) + j
311 ] = htole32(siop_led_off[j]);
312 #endif
313 }
314 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
315 sc->script_free_hi = sc->ram_size / 4;
316
317 /* free used and unused lun switches */
318 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
319 #ifdef SIOP_DEBUG
320 printf("%s: free lunsw at offset %d\n",
321 sc->sc_dev.dv_xname, lunsw->lunsw_off);
322 #endif
323 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
324 free(lunsw, M_DEVBUF);
325 }
326 TAILQ_INIT(&sc->lunsw_list);
327 /* restore reselect switch */
328 for (i = 0; i < sc->sc_chan.chan_ntargets; i++) {
329 if (sc->targets[i] == NULL)
330 continue;
331 #ifdef SIOP_DEBUG
332 printf("%s: restore sw for target %d\n",
333 sc->sc_dev.dv_xname, i);
334 #endif
335 free(sc->targets[i]->lunsw, M_DEVBUF);
336 sc->targets[i]->lunsw = siop_get_lunsw(sc);
337 if (sc->targets[i]->lunsw == NULL) {
338 printf("%s: can't alloc lunsw for target %d\n",
339 sc->sc_dev.dv_xname, i);
340 break;
341 }
342 siop_add_reselsw(sc, i);
343 }
344
345 /* start script */
346 if ((sc->features & SF_CHIP_RAM) == 0) {
347 bus_dmamap_sync(sc->sc_dmat, sc->sc_scriptdma, 0, PAGE_SIZE,
348 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
349 }
350 bus_space_write_4(sc->sc_rt, sc->sc_rh, SIOP_DSP,
351 sc->sc_scriptaddr + Ent_reselect);
352 }
353
354 #if 0
355 #define CALL_SCRIPT(ent) do {\
356 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
357 siop_cmd->dsa, \
358 sc->sc_scriptaddr + ent); \
359 bus_space_write_4(sc->sc_rt, sc->sc_rh, SIOP_DSP, sc->sc_scriptaddr + ent); \
360 } while (0)
361 #else
362 #define CALL_SCRIPT(ent) do {\
363 bus_space_write_4(sc->sc_rt, sc->sc_rh, SIOP_DSP, sc->sc_scriptaddr + ent); \
364 } while (0)
365 #endif
366
367 int
368 siop_intr(v)
369 void *v;
370 {
371 struct siop_softc *sc = v;
372 struct siop_target *siop_target;
373 struct siop_cmd *siop_cmd;
374 struct siop_lun *siop_lun;
375 struct scsipi_xfer *xs;
376 int istat, sist, sstat1, dstat;
377 u_int32_t irqcode;
378 int need_reset = 0;
379 int offset, target, lun, tag;
380 bus_addr_t dsa;
381 struct siop_cbd *cbdp;
382 int freetarget = 0;
383 int restart = 0;
384
385 istat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT);
386 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
387 return 0;
388 INCSTAT(siop_stat_intr);
389 if (istat & ISTAT_INTF) {
390 printf("INTRF\n");
391 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_INTF);
392 }
393 /* use DSA to find the current siop_cmd */
394 dsa = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSA);
395 for (cbdp = TAILQ_FIRST(&sc->cmds); cbdp != NULL;
396 cbdp = TAILQ_NEXT(cbdp, next)) {
397 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
398 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
399 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
400 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
401 siop_table_sync(siop_cmd,
402 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
403 break;
404 }
405 }
406 if (cbdp == NULL) {
407 siop_cmd = NULL;
408 }
409 if (siop_cmd) {
410 xs = siop_cmd->xs;
411 siop_target = siop_cmd->siop_target;
412 target = siop_cmd->xs->xs_periph->periph_target;
413 lun = siop_cmd->xs->xs_periph->periph_lun;
414 tag = siop_cmd->tag;
415 siop_lun = siop_target->siop_lun[lun];
416 #ifdef DIAGNOSTIC
417 if (siop_cmd->status != CMDST_ACTIVE) {
418 printf("siop_cmd (lun %d) for DSA 0x%x "
419 "not active (%d)\n", lun, (u_int)dsa,
420 siop_cmd->status);
421 xs = NULL;
422 siop_target = NULL;
423 target = -1;
424 lun = -1;
425 tag = -1;
426 siop_lun = NULL;
427 siop_cmd = NULL;
428 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
429 printf("siop_cmd (lun %d tag %d) not in siop_lun "
430 "active (%p != %p)\n", lun, tag, siop_cmd,
431 siop_lun->siop_tag[tag].active);
432 }
433 #endif
434 } else {
435 xs = NULL;
436 siop_target = NULL;
437 target = -1;
438 lun = -1;
439 tag = -1;
440 siop_lun = NULL;
441 }
442 if (istat & ISTAT_DIP) {
443 dstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DSTAT);
444 if (dstat & DSTAT_SSI) {
445 printf("single step dsp 0x%08x dsa 0x08%x\n",
446 (int)(bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSP) -
447 sc->sc_scriptaddr),
448 bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSA));
449 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
450 (istat & ISTAT_SIP) == 0) {
451 bus_space_write_1(sc->sc_rt, sc->sc_rh,
452 SIOP_DCNTL, bus_space_read_1(sc->sc_rt,
453 sc->sc_rh, SIOP_DCNTL) | DCNTL_STD);
454 }
455 return 1;
456 }
457 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
458 printf("DMA IRQ:");
459 if (dstat & DSTAT_IID)
460 printf(" Illegal instruction");
461 if (dstat & DSTAT_ABRT)
462 printf(" abort");
463 if (dstat & DSTAT_BF)
464 printf(" bus fault");
465 if (dstat & DSTAT_MDPE)
466 printf(" parity");
467 if (dstat & DSTAT_DFE)
468 printf(" dma fifo empty");
469 printf(", DSP=0x%x DSA=0x%x: ",
470 (int)(bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSP) -
471 sc->sc_scriptaddr),
472 bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSA));
473 if (siop_cmd)
474 printf("last msg_in=0x%x status=0x%x\n",
475 siop_cmd->siop_tables.msg_in[0],
476 le32toh(siop_cmd->siop_tables.status));
477 else
478 printf("%s: current DSA invalid\n",
479 sc->sc_dev.dv_xname);
480 need_reset = 1;
481 }
482 }
483 if (istat & ISTAT_SIP) {
484 if (istat & ISTAT_DIP)
485 delay(10);
486 /*
487 * Can't read sist0 & sist1 independantly, or we have to
488 * insert delay
489 */
490 sist = bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
491 sstat1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT1);
492 #ifdef SIOP_DEBUG_INTR
493 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
494 "DSA=0x%x DSP=0x%lx\n", sist,
495 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT1),
496 bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSA),
497 (u_long)(bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSP) -
498 sc->sc_scriptaddr));
499 #endif
500 if (sist & SIST0_RST) {
501 siop_handle_reset(sc);
502 /* no table to flush here */
503 return 1;
504 }
505 if (sist & SIST0_SGE) {
506 if (siop_cmd)
507 scsipi_printaddr(xs->xs_periph);
508 else
509 printf("%s:", sc->sc_dev.dv_xname);
510 printf("scsi gross error\n");
511 goto reset;
512 }
513 if ((sist & SIST0_MA) && need_reset == 0) {
514 if (siop_cmd) {
515 int scratcha0;
516 dstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
517 SIOP_DSTAT);
518 /*
519 * first restore DSA, in case we were in a S/G
520 * operation.
521 */
522 bus_space_write_4(sc->sc_rt, sc->sc_rh,
523 SIOP_DSA, siop_cmd->dsa);
524 scratcha0 = bus_space_read_1(sc->sc_rt,
525 sc->sc_rh, SIOP_SCRATCHA);
526 switch (sstat1 & SSTAT1_PHASE_MASK) {
527 case SSTAT1_PHASE_STATUS:
528 /*
529 * previous phase may be aborted for any reason
530 * ( for example, the target has less data to
531 * transfer than requested). Just go to status
532 * and the command should terminate.
533 */
534 INCSTAT(siop_stat_intr_shortxfer);
535 if ((dstat & DSTAT_DFE) == 0)
536 siop_clearfifo(sc);
537 /* no table to flush here */
538 CALL_SCRIPT(Ent_status);
539 return 1;
540 case SSTAT1_PHASE_MSGIN:
541 /*
542 * target may be ready to disconnect
543 * Save data pointers just in case.
544 */
545 INCSTAT(siop_stat_intr_xferdisc);
546 if (scratcha0 & A_flag_data)
547 siop_sdp(siop_cmd);
548 else if ((dstat & DSTAT_DFE) == 0)
549 siop_clearfifo(sc);
550 bus_space_write_1(sc->sc_rt, sc->sc_rh,
551 SIOP_SCRATCHA,
552 scratcha0 & ~A_flag_data);
553 siop_table_sync(siop_cmd,
554 BUS_DMASYNC_PREREAD |
555 BUS_DMASYNC_PREWRITE);
556 CALL_SCRIPT(Ent_msgin);
557 return 1;
558 }
559 printf("%s: unexpected phase mismatch %d\n",
560 sc->sc_dev.dv_xname,
561 sstat1 & SSTAT1_PHASE_MASK);
562 } else {
563 printf("%s: phase mismatch without command\n",
564 sc->sc_dev.dv_xname);
565 }
566 need_reset = 1;
567 }
568 if (sist & SIST0_PAR) {
569 /* parity error, reset */
570 if (siop_cmd)
571 scsipi_printaddr(xs->xs_periph);
572 else
573 printf("%s:", sc->sc_dev.dv_xname);
574 printf("parity error\n");
575 goto reset;
576 }
577 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
578 /* selection time out, assume there's no device here */
579 if (siop_cmd) {
580 siop_cmd->status = CMDST_DONE;
581 xs->error = XS_SELTIMEOUT;
582 freetarget = 1;
583 goto end;
584 } else {
585 printf("%s: selection timeout without "
586 "command\n", sc->sc_dev.dv_xname);
587 need_reset = 1;
588 }
589 }
590 if (sist & SIST0_UDC) {
591 /*
592 * unexpected disconnect. Usually the target signals
593 * a fatal condition this way. Attempt to get sense.
594 */
595 if (siop_cmd) {
596 siop_cmd->siop_tables.status =
597 htole32(SCSI_CHECK);
598 goto end;
599 }
600 printf("%s: unexpected disconnect without "
601 "command\n", sc->sc_dev.dv_xname);
602 goto reset;
603 }
604 if (sist & (SIST1_SBMC << 8)) {
605 /* SCSI bus mode change */
606 if (siop_modechange(sc) == 0 || need_reset == 1)
607 goto reset;
608 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
609 /*
610 * we have a script interrupt, it will
611 * restart the script.
612 */
613 goto scintr;
614 }
615 /*
616 * else we have to restart it ourselve, at the
617 * interrupted instruction.
618 */
619 bus_space_write_4(sc->sc_rt, sc->sc_rh, SIOP_DSP,
620 bus_space_read_4(sc->sc_rt, sc->sc_rh,
621 SIOP_DSP) - 8);
622 return 1;
623 }
624 /* Else it's an unhandled exeption (for now). */
625 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
626 "DSA=0x%x DSP=0x%x\n", sc->sc_dev.dv_xname, sist,
627 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT1),
628 bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSA),
629 (int)(bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DSP) -
630 sc->sc_scriptaddr));
631 if (siop_cmd) {
632 siop_cmd->status = CMDST_DONE;
633 xs->error = XS_SELTIMEOUT;
634 goto end;
635 }
636 need_reset = 1;
637 }
638 if (need_reset) {
639 reset:
640 /* fatal error, reset the bus */
641 siop_resetbus(sc);
642 /* no table to flush here */
643 return 1;
644 }
645
646 scintr:
647 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
648 irqcode = bus_space_read_4(sc->sc_rt, sc->sc_rh,
649 SIOP_DSPS);
650 #ifdef SIOP_DEBUG_INTR
651 printf("script interrupt 0x%x\n", irqcode);
652 #endif
653 /*
654 * no command, or an inactive command is only valid for a
655 * reselect interrupt
656 */
657 if ((irqcode & 0x80) == 0) {
658 if (siop_cmd == NULL) {
659 printf(
660 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
661 sc->sc_dev.dv_xname, irqcode);
662 goto reset;
663 }
664 if (siop_cmd->status != CMDST_ACTIVE) {
665 printf("%s: command with invalid status "
666 "(IRQ code 0x%x current status %d) !\n",
667 sc->sc_dev.dv_xname,
668 irqcode, siop_cmd->status);
669 xs = NULL;
670 }
671 }
672 switch(irqcode) {
673 case A_int_err:
674 printf("error, DSP=0x%x\n",
675 (int)(bus_space_read_4(sc->sc_rt, sc->sc_rh,
676 SIOP_DSP) - sc->sc_scriptaddr));
677 if (xs) {
678 xs->error = XS_SELTIMEOUT;
679 goto end;
680 } else {
681 goto reset;
682 }
683 case A_int_reseltarg:
684 printf("%s: reselect with invalid target\n",
685 sc->sc_dev.dv_xname);
686 goto reset;
687 case A_int_resellun:
688 INCSTAT(siop_stat_intr_lunresel);
689 target = bus_space_read_1(sc->sc_rt, sc->sc_rh,
690 SIOP_SCRATCHA) & 0xf;
691 lun = bus_space_read_1(sc->sc_rt, sc->sc_rh,
692 SIOP_SCRATCHA + 1);
693 tag = bus_space_read_1(sc->sc_rt, sc->sc_rh,
694 SIOP_SCRATCHA + 2);
695 siop_target = sc->targets[target];
696 if (siop_target == NULL) {
697 printf("%s: reselect with invalid "
698 "target %d\n", sc->sc_dev.dv_xname, target);
699 goto reset;
700 }
701 siop_lun = siop_target->siop_lun[lun];
702 if (siop_lun == NULL) {
703 printf("%s: target %d reselect with invalid "
704 "lun %d\n", sc->sc_dev.dv_xname,
705 target, lun);
706 goto reset;
707 }
708 if (siop_lun->siop_tag[tag].active == NULL) {
709 printf("%s: target %d lun %d tag %d reselect "
710 "without command\n", sc->sc_dev.dv_xname,
711 target, lun, tag);
712 goto reset;
713 }
714 siop_cmd = siop_lun->siop_tag[tag].active;
715 bus_space_write_4(sc->sc_rt, sc->sc_rh, SIOP_DSP,
716 siop_cmd->dsa + sizeof(struct siop_xfer_common) +
717 Ent_ldsa_reload_dsa);
718 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
719 return 1;
720 case A_int_reseltag:
721 printf("%s: reselect with invalid tag\n",
722 sc->sc_dev.dv_xname);
723 goto reset;
724 case A_int_msgin:
725 {
726 int msgin = bus_space_read_1(sc->sc_rt, sc->sc_rh,
727 SIOP_SFBR);
728 if (msgin == MSG_MESSAGE_REJECT) {
729 int msg, extmsg;
730 if (siop_cmd->siop_tables.msg_out[0] & 0x80) {
731 /*
732 * message was part of a identify +
733 * something else. Identify shoudl't
734 * have been rejected.
735 */
736 msg = siop_cmd->siop_tables.msg_out[1];
737 extmsg =
738 siop_cmd->siop_tables.msg_out[3];
739 } else {
740 msg = siop_cmd->siop_tables.msg_out[0];
741 extmsg =
742 siop_cmd->siop_tables.msg_out[2];
743 }
744 if (msg == MSG_MESSAGE_REJECT) {
745 /* MSG_REJECT for a MSG_REJECT !*/
746 if (xs)
747 scsipi_printaddr(xs->xs_periph);
748 else
749 printf("%s: ",
750 sc->sc_dev.dv_xname);
751 printf("our reject message was "
752 "rejected\n");
753 goto reset;
754 }
755 if (msg == MSG_EXTENDED &&
756 extmsg == MSG_EXT_WDTR) {
757 /* WDTR rejected, initiate sync */
758 if ((siop_target->flags & TARF_SYNC)
759 == 0) {
760 siop_target->status = TARST_OK;
761 siop_update_xfer_mode(sc,
762 target);
763 /* no table to flush here */
764 CALL_SCRIPT(Ent_msgin_ack);
765 return 1;
766 }
767 siop_target->status = TARST_SYNC_NEG;
768 siop_sdtr_msg(siop_cmd, 0,
769 sc->minsync, sc->maxoff);
770 siop_table_sync(siop_cmd,
771 BUS_DMASYNC_PREREAD |
772 BUS_DMASYNC_PREWRITE);
773 CALL_SCRIPT(Ent_send_msgout);
774 return 1;
775 } else if (msg == MSG_EXTENDED &&
776 extmsg == MSG_EXT_SDTR) {
777 /* sync rejected */
778 siop_target->offset = 0;
779 siop_target->period = 0;
780 siop_target->status = TARST_OK;
781 siop_update_xfer_mode(sc, target);
782 /* no table to flush here */
783 CALL_SCRIPT(Ent_msgin_ack);
784 return 1;
785 } else if (msg == MSG_SIMPLE_Q_TAG ||
786 msg == MSG_HEAD_OF_Q_TAG ||
787 msg == MSG_ORDERED_Q_TAG) {
788 if (siop_handle_qtag_reject(
789 siop_cmd) == -1)
790 goto reset;
791 CALL_SCRIPT(Ent_msgin_ack);
792 return 1;
793 }
794 if (xs)
795 scsipi_printaddr(xs->xs_periph);
796 else
797 printf("%s: ", sc->sc_dev.dv_xname);
798 if (msg == MSG_EXTENDED) {
799 printf("scsi message reject, extended "
800 "message sent was 0x%x\n", extmsg);
801 } else {
802 printf("scsi message reject, message "
803 "sent was 0x%x\n", msg);
804 }
805 /* no table to flush here */
806 CALL_SCRIPT(Ent_msgin_ack);
807 return 1;
808 }
809 if (xs)
810 scsipi_printaddr(xs->xs_periph);
811 else
812 printf("%s: ", sc->sc_dev.dv_xname);
813 printf("unhandled message 0x%x\n",
814 siop_cmd->siop_tables.msg_in[0]);
815 siop_cmd->siop_tables.msg_out[0] = MSG_MESSAGE_REJECT;
816 siop_cmd->siop_tables.t_msgout.count= htole32(1);
817 siop_table_sync(siop_cmd,
818 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
819 CALL_SCRIPT(Ent_send_msgout);
820 return 1;
821 }
822 case A_int_extmsgin:
823 #ifdef SIOP_DEBUG_INTR
824 printf("extended message: msg 0x%x len %d\n",
825 siop_cmd->siop_tables.msg_in[2],
826 siop_cmd->siop_tables.msg_in[1]);
827 #endif
828 if (siop_cmd->siop_tables.msg_in[1] > 6)
829 printf("%s: extended message too big (%d)\n",
830 sc->sc_dev.dv_xname,
831 siop_cmd->siop_tables.msg_in[1]);
832 siop_cmd->siop_tables.t_extmsgdata.count =
833 htole32(siop_cmd->siop_tables.msg_in[1] - 1);
834 siop_table_sync(siop_cmd,
835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
836 CALL_SCRIPT(Ent_get_extmsgdata);
837 return 1;
838 case A_int_extmsgdata:
839 #ifdef SIOP_DEBUG_INTR
840 {
841 int i;
842 printf("extended message: 0x%x, data:",
843 siop_cmd->siop_tables.msg_in[2]);
844 for (i = 3; i < 2 + siop_cmd->siop_tables.msg_in[1];
845 i++)
846 printf(" 0x%x",
847 siop_cmd->siop_tables.msg_in[i]);
848 printf("\n");
849 }
850 #endif
851 if (siop_cmd->siop_tables.msg_in[2] == MSG_EXT_WDTR) {
852 switch (siop_wdtr_neg(siop_cmd)) {
853 case SIOP_NEG_MSGOUT:
854 siop_update_scntl3(sc,
855 siop_cmd->siop_target);
856 siop_table_sync(siop_cmd,
857 BUS_DMASYNC_PREREAD |
858 BUS_DMASYNC_PREWRITE);
859 CALL_SCRIPT(Ent_send_msgout);
860 return(1);
861 case SIOP_NEG_ACK:
862 siop_update_scntl3(sc,
863 siop_cmd->siop_target);
864 CALL_SCRIPT(Ent_msgin_ack);
865 return(1);
866 default:
867 panic("invalid retval from "
868 "siop_wdtr_neg()");
869 }
870 return(1);
871 }
872 if (siop_cmd->siop_tables.msg_in[2] == MSG_EXT_SDTR) {
873 switch (siop_sdtr_neg(siop_cmd)) {
874 case SIOP_NEG_MSGOUT:
875 siop_update_scntl3(sc,
876 siop_cmd->siop_target);
877 siop_table_sync(siop_cmd,
878 BUS_DMASYNC_PREREAD |
879 BUS_DMASYNC_PREWRITE);
880 CALL_SCRIPT(Ent_send_msgout);
881 return(1);
882 case SIOP_NEG_ACK:
883 siop_update_scntl3(sc,
884 siop_cmd->siop_target);
885 CALL_SCRIPT(Ent_msgin_ack);
886 return(1);
887 default:
888 panic("invalid retval from "
889 "siop_wdtr_neg()");
890 }
891 return(1);
892 }
893 /* send a message reject */
894 siop_cmd->siop_tables.msg_out[0] = MSG_MESSAGE_REJECT;
895 siop_cmd->siop_tables.t_msgout.count = htole32(1);
896 siop_table_sync(siop_cmd,
897 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
898 CALL_SCRIPT(Ent_send_msgout);
899 return 1;
900 case A_int_disc:
901 INCSTAT(siop_stat_intr_sdp);
902 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh,
903 SIOP_SCRATCHA + 1);
904 #ifdef SIOP_DEBUG_DR
905 printf("disconnect offset %d\n", offset);
906 #endif
907 if (offset > SIOP_NSG) {
908 printf("%s: bad offset for disconnect (%d)\n",
909 sc->sc_dev.dv_xname, offset);
910 goto reset;
911 }
912 /*
913 * offset == SIOP_NSG may be a valid condition if
914 * we get a sdp when the xfer is done.
915 * Don't call memmove in this case.
916 */
917 if (offset < SIOP_NSG) {
918 memmove(&siop_cmd->siop_tables.data[0],
919 &siop_cmd->siop_tables.data[offset],
920 (SIOP_NSG - offset) * sizeof(scr_table_t));
921 siop_table_sync(siop_cmd,
922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
923 }
924 CALL_SCRIPT(Ent_script_sched);
925 return 1;
926 case A_int_resfail:
927 printf("reselect failed\n");
928 CALL_SCRIPT(Ent_script_sched);
929 return 1;
930 case A_int_done:
931 if (xs == NULL) {
932 printf("%s: done without command, DSA=0x%lx\n",
933 sc->sc_dev.dv_xname, (u_long)siop_cmd->dsa);
934 siop_cmd->status = CMDST_FREE;
935 CALL_SCRIPT(Ent_script_sched);
936 return 1;
937 }
938 #ifdef SIOP_DEBUG_INTR
939 printf("done, DSA=0x%lx target id 0x%x last msg "
940 "in=0x%x status=0x%x\n", (u_long)siop_cmd->dsa,
941 le32toh(siop_cmd->siop_tables.id),
942 siop_cmd->siop_tables.msg_in[0],
943 le32toh(siop_cmd->siop_tables.status));
944 #endif
945 INCSTAT(siop_stat_intr_done);
946 siop_cmd->status = CMDST_DONE;
947 goto end;
948 default:
949 printf("unknown irqcode %x\n", irqcode);
950 if (xs) {
951 xs->error = XS_SELTIMEOUT;
952 goto end;
953 }
954 goto reset;
955 }
956 return 1;
957 }
958 /* We just should't get there */
959 panic("siop_intr: I shouldn't be there !");
960 return 1;
961 end:
962 /*
963 * restart the script now if command completed properly
964 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
965 * queue
966 */
967 xs->status = le32toh(siop_cmd->siop_tables.status);
968 if (xs->status == SCSI_OK)
969 CALL_SCRIPT(Ent_script_sched);
970 else
971 restart = 1;
972 siop_lun->siop_tag[tag].active = NULL;
973 siop_scsicmd_end(siop_cmd);
974 if (freetarget && siop_target->status == TARST_PROBING)
975 siop_del_dev(sc, target, lun);
976 if (restart)
977 CALL_SCRIPT(Ent_script_sched);
978 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
979 /* a command terminated, so we have free slots now */
980 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
981 scsipi_channel_thaw(&sc->sc_chan, 1);
982 }
983
984 return 1;
985 }
986
987 void
988 siop_scsicmd_end(siop_cmd)
989 struct siop_cmd *siop_cmd;
990 {
991 struct scsipi_xfer *xs = siop_cmd->xs;
992 struct siop_softc *sc = siop_cmd->siop_sc;
993
994 switch(xs->status) {
995 case SCSI_OK:
996 xs->error = XS_NOERROR;
997 break;
998 case SCSI_BUSY:
999 xs->error = XS_BUSY;
1000 break;
1001 case SCSI_CHECK:
1002 xs->error = XS_BUSY;
1003 /* remove commands in the queue and scheduler */
1004 siop_unqueue(sc, xs->xs_periph->periph_target,
1005 xs->xs_periph->periph_lun);
1006 break;
1007 case SCSI_QUEUE_FULL:
1008 INCSTAT(siop_stat_intr_qfull);
1009 #ifdef SIOP_DEBUG
1010 printf("%s:%d:%d: queue full (tag %d)\n", sc->sc_dev.dv_xname,
1011 xs->xs_periph->periph_target,
1012 xs->xs_periph->periph_lun, siop_cmd->tag);
1013 #endif
1014 xs->error = XS_BUSY;
1015 break;
1016 case SCSI_SIOP_NOCHECK:
1017 /*
1018 * don't check status, xs->error is already valid
1019 */
1020 break;
1021 case SCSI_SIOP_NOSTATUS:
1022 /*
1023 * the status byte was not updated, cmd was
1024 * aborted
1025 */
1026 xs->error = XS_SELTIMEOUT;
1027 break;
1028 default:
1029 xs->error = XS_DRIVER_STUFFUP;
1030 }
1031 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1032 bus_dmamap_sync(sc->sc_dmat, siop_cmd->dmamap_data, 0,
1033 siop_cmd->dmamap_data->dm_mapsize,
1034 (xs->xs_control & XS_CTL_DATA_IN) ?
1035 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1036 bus_dmamap_unload(sc->sc_dmat, siop_cmd->dmamap_data);
1037 }
1038 bus_dmamap_unload(sc->sc_dmat, siop_cmd->dmamap_cmd);
1039 callout_stop(&siop_cmd->xs->xs_callout);
1040 siop_cmd->status = CMDST_FREE;
1041 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1042 xs->resid = 0;
1043 scsipi_done (xs);
1044 }
1045
1046 void
1047 siop_unqueue(sc, target, lun)
1048 struct siop_softc *sc;
1049 int target;
1050 int lun;
1051 {
1052 int slot, tag;
1053 struct siop_cmd *siop_cmd;
1054 struct siop_lun *siop_lun = sc->targets[target]->siop_lun[lun];
1055
1056 /* first make sure to read valid data */
1057 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1058
1059 for (tag = 1; tag < SIOP_NTAG; tag++) {
1060 /* look for commands in the scheduler, not yet started */
1061 if (siop_lun->siop_tag[tag].active == NULL)
1062 continue;
1063 siop_cmd = siop_lun->siop_tag[tag].active;
1064 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1065 if (siop_script_read(sc,
1066 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1067 siop_cmd->dsa + sizeof(struct siop_xfer_common) +
1068 Ent_ldsa_select)
1069 break;
1070 }
1071 if (slot > sc->sc_currschedslot)
1072 continue; /* didn't find it */
1073 if (siop_script_read(sc,
1074 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1075 continue; /* already started */
1076 /* clear the slot */
1077 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1078 0x80000000);
1079 /* ask to requeue */
1080 siop_cmd->xs->error = XS_REQUEUE;
1081 siop_cmd->xs->status = SCSI_SIOP_NOCHECK;
1082 siop_lun->siop_tag[tag].active = NULL;
1083 siop_scsicmd_end(siop_cmd);
1084 }
1085 /* update sc_currschedslot */
1086 sc->sc_currschedslot = 0;
1087 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1088 if (siop_script_read(sc,
1089 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1090 sc->sc_currschedslot = slot;
1091 }
1092 }
1093
1094 /*
1095 * handle a rejected queue tag message: the command will run untagged,
1096 * has to adjust the reselect script.
1097 */
1098 int
1099 siop_handle_qtag_reject(siop_cmd)
1100 struct siop_cmd *siop_cmd;
1101 {
1102 struct siop_softc *sc = siop_cmd->siop_sc;
1103 int target = siop_cmd->xs->xs_periph->periph_target;
1104 int lun = siop_cmd->xs->xs_periph->periph_lun;
1105 int tag = siop_cmd->siop_tables.msg_out[2];
1106 struct siop_lun *siop_lun = sc->targets[target]->siop_lun[lun];
1107
1108 #ifdef SIOP_DEBUG
1109 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1110 sc->sc_dev.dv_xname, target, lun, tag, siop_cmd->tag,
1111 siop_cmd->status);
1112 #endif
1113
1114 if (siop_lun->siop_tag[0].active != NULL) {
1115 printf("%s: untagged command already running for target %d "
1116 "lun %d (status %d)\n", sc->sc_dev.dv_xname, target, lun,
1117 siop_lun->siop_tag[0].active->status);
1118 return -1;
1119 }
1120 /* clear tag slot */
1121 siop_lun->siop_tag[tag].active = NULL;
1122 /* add command to non-tagged slot */
1123 siop_lun->siop_tag[0].active = siop_cmd;
1124 siop_cmd->tag = 0;
1125 /* adjust reselect script if there is one */
1126 if (siop_lun->siop_tag[0].reseloff > 0) {
1127 siop_script_write(sc,
1128 siop_lun->siop_tag[0].reseloff + 1,
1129 siop_cmd->dsa + sizeof(struct siop_xfer_common) +
1130 Ent_ldsa_reload_dsa);
1131 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1132 }
1133 return 0;
1134 }
1135
1136 /*
1137 * handle a bus reset: reset chip, unqueue all active commands, free all
1138 * target struct and report loosage to upper layer.
1139 * As the upper layer may requeue immediatly we have to first store
1140 * all active commands in a temporary queue.
1141 */
1142 void
1143 siop_handle_reset(sc)
1144 struct siop_softc *sc;
1145 {
1146 struct siop_cmd *siop_cmd;
1147 struct siop_lun *siop_lun;
1148 int target, lun, tag;
1149 /*
1150 * scsi bus reset. reset the chip and restart
1151 * the queue. Need to clean up all active commands
1152 */
1153 printf("%s: scsi bus reset\n", sc->sc_dev.dv_xname);
1154 /* stop, reset and restart the chip */
1155 siop_reset(sc);
1156 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1157 /* chip has been reset, all slots are free now */
1158 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1159 scsipi_channel_thaw(&sc->sc_chan, 1);
1160 }
1161 /*
1162 * Process all commands: first commmands being executed
1163 */
1164 for (target = 0; target < sc->sc_chan.chan_ntargets;
1165 target++) {
1166 if (sc->targets[target] == NULL)
1167 continue;
1168 for (lun = 0; lun < 8; lun++) {
1169 siop_lun = sc->targets[target]->siop_lun[lun];
1170 if (siop_lun == NULL)
1171 continue;
1172 for (tag = 0; tag <
1173 ((sc->targets[target]->flags & TARF_TAG) ?
1174 SIOP_NTAG : 1);
1175 tag++) {
1176 siop_cmd = siop_lun->siop_tag[tag].active;
1177 if (siop_cmd == NULL)
1178 continue;
1179 scsipi_printaddr(siop_cmd->xs->xs_periph);
1180 printf("command with tag id %d reset\n", tag);
1181 siop_cmd->xs->error =
1182 (siop_cmd->flags & CMDFL_TIMEOUT) ?
1183 XS_TIMEOUT : XS_RESET;
1184 siop_cmd->xs->status = SCSI_SIOP_NOCHECK;
1185 siop_lun->siop_tag[tag].active = NULL;
1186 siop_cmd->status = CMDST_DONE;
1187 siop_scsicmd_end(siop_cmd);
1188 }
1189 }
1190 sc->targets[target]->status = TARST_ASYNC;
1191 sc->targets[target]->flags &= ~TARF_ISWIDE;
1192 sc->targets[target]->period = sc->targets[target]->offset = 0;
1193 siop_update_xfer_mode(sc, target);
1194 }
1195
1196 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_RESET, NULL);
1197 }
1198
1199 void
1200 siop_scsipi_request(chan, req, arg)
1201 struct scsipi_channel *chan;
1202 scsipi_adapter_req_t req;
1203 void *arg;
1204 {
1205 struct scsipi_xfer *xs;
1206 struct scsipi_periph *periph;
1207 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1208 struct siop_cmd *siop_cmd;
1209 int s, error, i;
1210 int target;
1211 int lun;
1212
1213 switch (req) {
1214 case ADAPTER_REQ_RUN_XFER:
1215 xs = arg;
1216 periph = xs->xs_periph;
1217 target = periph->periph_target;
1218 lun = periph->periph_lun;
1219
1220 s = splbio();
1221 #ifdef SIOP_DEBUG_SCHED
1222 printf("starting cmd for %d:%d\n", target, lun);
1223 #endif
1224 siop_cmd = TAILQ_FIRST(&sc->free_list);
1225 if (siop_cmd == NULL) {
1226 xs->error = XS_RESOURCE_SHORTAGE;
1227 scsipi_done(xs);
1228 splx(s);
1229 return;
1230 }
1231 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1232 #ifdef DIAGNOSTIC
1233 if (siop_cmd->status != CMDST_FREE)
1234 panic("siop_scsicmd: new cmd not free");
1235 #endif
1236 if (sc->targets[target] == NULL) {
1237 #ifdef SIOP_DEBUG
1238 printf("%s: alloc siop_target for target %d\n",
1239 sc->sc_dev.dv_xname, target);
1240 #endif
1241 sc->targets[target] =
1242 malloc(sizeof(struct siop_target),
1243 M_DEVBUF, M_NOWAIT);
1244 if (sc->targets[target] == NULL) {
1245 printf("%s: can't malloc memory for "
1246 "target %d\n", sc->sc_dev.dv_xname, target);
1247 xs->error = XS_RESOURCE_SHORTAGE;
1248 scsipi_done(xs);
1249 splx(s);
1250 return;
1251 }
1252 sc->targets[target]->status = TARST_PROBING;
1253 sc->targets[target]->flags = 0;
1254 sc->targets[target]->id =
1255 sc->clock_div << 24; /* scntl3 */
1256 sc->targets[target]->id |= target << 16; /* id */
1257 /* sc->targets[target]->id |= 0x0 << 8; scxfer is 0 */
1258
1259 /* get a lun switch script */
1260 sc->targets[target]->lunsw = siop_get_lunsw(sc);
1261 if (sc->targets[target]->lunsw == NULL) {
1262 printf("%s: can't alloc lunsw for target %d\n",
1263 sc->sc_dev.dv_xname, target);
1264 xs->error = XS_RESOURCE_SHORTAGE;
1265 scsipi_done(xs);
1266 splx(s);
1267 return;
1268 }
1269 for (i=0; i < 8; i++)
1270 sc->targets[target]->siop_lun[i] = NULL;
1271 siop_add_reselsw(sc, target);
1272 }
1273 if (sc->targets[target]->siop_lun[lun] == NULL) {
1274 sc->targets[target]->siop_lun[lun] =
1275 malloc(sizeof(struct siop_lun), M_DEVBUF,
1276 M_NOWAIT|M_ZERO);
1277 if (sc->targets[target]->siop_lun[lun] == NULL) {
1278 printf("%s: can't alloc siop_lun for "
1279 "target %d lun %d\n",
1280 sc->sc_dev.dv_xname, target, lun);
1281 xs->error = XS_RESOURCE_SHORTAGE;
1282 scsipi_done(xs);
1283 splx(s);
1284 return;
1285 }
1286 }
1287 siop_cmd->siop_target = sc->targets[target];
1288 siop_cmd->xs = xs;
1289 siop_cmd->flags = 0;
1290 siop_cmd->status = CMDST_READY;
1291
1292 /* load the DMA maps */
1293 error = bus_dmamap_load(sc->sc_dmat, siop_cmd->dmamap_cmd,
1294 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1295 if (error) {
1296 printf("%s: unable to load cmd DMA map: %d\n",
1297 sc->sc_dev.dv_xname, error);
1298 xs->error = XS_DRIVER_STUFFUP;
1299 scsipi_done(xs);
1300 splx(s);
1301 return;
1302 }
1303 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1304 error = bus_dmamap_load(sc->sc_dmat,
1305 siop_cmd->dmamap_data, xs->data, xs->datalen,
1306 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1307 ((xs->xs_control & XS_CTL_DATA_IN) ?
1308 BUS_DMA_READ : BUS_DMA_WRITE));
1309 if (error) {
1310 printf("%s: unable to load cmd DMA map: %d",
1311 sc->sc_dev.dv_xname, error);
1312 xs->error = XS_DRIVER_STUFFUP;
1313 scsipi_done(xs);
1314 bus_dmamap_unload(sc->sc_dmat, siop_cmd->dmamap_cmd);
1315 splx(s);
1316 return;
1317 }
1318 bus_dmamap_sync(sc->sc_dmat, siop_cmd->dmamap_data, 0,
1319 siop_cmd->dmamap_data->dm_mapsize,
1320 (xs->xs_control & XS_CTL_DATA_IN) ?
1321 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1322 }
1323 bus_dmamap_sync(sc->sc_dmat, siop_cmd->dmamap_cmd, 0,
1324 siop_cmd->dmamap_cmd->dm_mapsize, BUS_DMASYNC_PREWRITE);
1325
1326 siop_setuptables(siop_cmd);
1327 siop_start(sc, siop_cmd);
1328 if (xs->xs_control & XS_CTL_POLL) {
1329 /* poll for command completion */
1330 while ((xs->xs_status & XS_STS_DONE) == 0) {
1331 delay(1000);
1332 siop_intr(sc);
1333 }
1334 }
1335 splx(s);
1336 return;
1337
1338 case ADAPTER_REQ_GROW_RESOURCES:
1339 #ifdef SIOP_DEBUG
1340 printf("%s grow resources (%d)\n", sc->sc_dev.dv_xname,
1341 sc->sc_adapt.adapt_openings);
1342 #endif
1343 siop_morecbd(sc);
1344 return;
1345
1346 case ADAPTER_REQ_SET_XFER_MODE:
1347 {
1348 struct scsipi_xfer_mode *xm = arg;
1349 if (sc->targets[xm->xm_target] == NULL)
1350 return;
1351 s = splbio();
1352 if (xm->xm_mode & PERIPH_CAP_TQING)
1353 sc->targets[xm->xm_target]->flags |= TARF_TAG;
1354 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1355 (sc->features & SF_BUS_WIDE))
1356 sc->targets[xm->xm_target]->flags |= TARF_WIDE;
1357 if (xm->xm_mode & PERIPH_CAP_SYNC)
1358 sc->targets[xm->xm_target]->flags |= TARF_SYNC;
1359 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1360 sc->targets[xm->xm_target]->status == TARST_PROBING)
1361 sc->targets[xm->xm_target]->status =
1362 TARST_ASYNC;
1363
1364 for (lun = 0; lun < sc->sc_chan.chan_nluns; lun++) {
1365 if (sc->sc_chan.chan_periphs[xm->xm_target][lun])
1366 /* allocate a lun sw entry for this device */
1367 siop_add_dev(sc, xm->xm_target, lun);
1368 }
1369
1370 splx(s);
1371 }
1372 }
1373 }
1374
1375 static void
1376 siop_start(sc, siop_cmd)
1377 struct siop_softc *sc;
1378 struct siop_cmd *siop_cmd;
1379 {
1380 struct siop_lun *siop_lun;
1381 u_int32_t dsa;
1382 int timeout;
1383 int target, lun, slot;
1384
1385 /*
1386 * first make sure to read valid data
1387 */
1388 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1389
1390 /*
1391 * The queue management here is a bit tricky: the script always looks
1392 * at the slot from first to last, so if we always use the first
1393 * free slot commands can stay at the tail of the queue ~forever.
1394 * The algorithm used here is to restart from the head when we know
1395 * that the queue is empty, and only add commands after the last one.
1396 * When we're at the end of the queue wait for the script to clear it.
1397 * The best thing to do here would be to implement a circular queue,
1398 * but using only 53c720 features this can be "interesting".
1399 * A mid-way solution could be to implement 2 queues and swap orders.
1400 */
1401 slot = sc->sc_currschedslot;
1402 /*
1403 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1404 * free. As this is the last used slot, all previous slots are free,
1405 * we can restart from 0.
1406 */
1407 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1408 0x80000000) {
1409 slot = sc->sc_currschedslot = 0;
1410 } else {
1411 slot++;
1412 }
1413 target = siop_cmd->xs->xs_periph->periph_target;
1414 lun = siop_cmd->xs->xs_periph->periph_lun;
1415 siop_lun = sc->targets[target]->siop_lun[lun];
1416 /* if non-tagged command active, panic: this shouldn't happen */
1417 if (siop_lun->siop_tag[0].active != NULL) {
1418 panic("siop_start: tagged cmd while untagged running");
1419 }
1420 #ifdef DIAGNOSTIC
1421 /* sanity check the tag if needed */
1422 if (siop_cmd->flags & CMDFL_TAG) {
1423 if (siop_lun->siop_tag[siop_cmd->tag].active != NULL)
1424 panic("siop_start: tag not free");
1425 if (siop_cmd->tag >= SIOP_NTAG) {
1426 scsipi_printaddr(siop_cmd->xs->xs_periph);
1427 printf(": tag id %d\n", siop_cmd->tag);
1428 panic("siop_start: invalid tag id");
1429 }
1430 }
1431 #endif
1432 /*
1433 * find a free scheduler slot and load it.
1434 */
1435 for (; slot < SIOP_NSLOTS; slot++) {
1436 /*
1437 * If cmd if 0x80000000 the slot is free
1438 */
1439 if (siop_script_read(sc,
1440 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1441 0x80000000)
1442 break;
1443 }
1444 if (slot == SIOP_NSLOTS) {
1445 /*
1446 * no more free slot, no need to continue. freeze the queue
1447 * and requeue this command.
1448 */
1449 scsipi_channel_freeze(&sc->sc_chan, 1);
1450 sc->sc_flags |= SCF_CHAN_NOSLOT;
1451 siop_cmd->xs->error = XS_REQUEUE;
1452 siop_cmd->xs->status = SCSI_SIOP_NOCHECK;
1453 siop_scsicmd_end(siop_cmd);
1454 return;
1455 }
1456 #ifdef SIOP_DEBUG_SCHED
1457 printf("using slot %d for DSA 0x%lx\n", slot,
1458 (u_long)siop_cmd->dsa);
1459 #endif
1460 /* mark command as active */
1461 if (siop_cmd->status == CMDST_READY)
1462 siop_cmd->status = CMDST_ACTIVE;
1463 else
1464 panic("siop_start: bad status");
1465 siop_lun->siop_tag[siop_cmd->tag].active = siop_cmd;
1466 /* patch scripts with DSA addr */
1467 dsa = siop_cmd->dsa;
1468 /* first reselect switch, if we have an entry */
1469 if (siop_lun->siop_tag[siop_cmd->tag].reseloff > 0)
1470 siop_script_write(sc,
1471 siop_lun->siop_tag[siop_cmd->tag].reseloff + 1,
1472 dsa + sizeof(struct siop_xfer_common) +
1473 Ent_ldsa_reload_dsa);
1474 /* CMD script: MOVE MEMORY addr */
1475 siop_cmd->siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1476 htole32(sc->sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1477 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1478 /* scheduler slot: JUMP ldsa_select */
1479 siop_script_write(sc,
1480 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1481 dsa + sizeof(struct siop_xfer_common) + Ent_ldsa_select);
1482 /* handle timeout */
1483 if ((siop_cmd->xs->xs_control & XS_CTL_POLL) == 0) {
1484 /* start exire timer */
1485 timeout = mstohz(siop_cmd->xs->timeout);
1486 if (timeout == 0)
1487 timeout = 1;
1488 callout_reset( &siop_cmd->xs->xs_callout,
1489 timeout, siop_timeout, siop_cmd);
1490 }
1491 /*
1492 * Change JUMP cmd so that this slot will be handled
1493 */
1494 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1495 0x80080000);
1496 sc->sc_currschedslot = slot;
1497
1498 /* make sure SCRIPT processor will read valid data */
1499 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1500 /* Signal script it has some work to do */
1501 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SIGP);
1502 /* and wait for IRQ */
1503 return;
1504 }
1505
1506 void
1507 siop_timeout(v)
1508 void *v;
1509 {
1510 struct siop_cmd *siop_cmd = v;
1511 struct siop_softc *sc = siop_cmd->siop_sc;
1512 int s;
1513
1514 scsipi_printaddr(siop_cmd->xs->xs_periph);
1515 printf("command timeout\n");
1516
1517 s = splbio();
1518 /* reset the scsi bus */
1519 siop_resetbus(sc);
1520
1521 /* deactivate callout */
1522 callout_stop(&siop_cmd->xs->xs_callout);
1523 /* mark command as being timed out; siop_intr will handle it */
1524 /*
1525 * mark command has being timed out and just return;
1526 * the bus reset will generate an interrupt,
1527 * it will be handled in siop_intr()
1528 */
1529 siop_cmd->flags |= CMDFL_TIMEOUT;
1530 splx(s);
1531 return;
1532
1533 }
1534
1535 void
1536 siop_dump_script(sc)
1537 struct siop_softc *sc;
1538 {
1539 int i;
1540 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1541 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1542 le32toh(sc->sc_script[i]), le32toh(sc->sc_script[i+1]));
1543 if ((le32toh(sc->sc_script[i]) & 0xe0000000) == 0xc0000000) {
1544 i++;
1545 printf(" 0x%08x", le32toh(sc->sc_script[i+1]));
1546 }
1547 printf("\n");
1548 }
1549 }
1550
1551 void
1552 siop_morecbd(sc)
1553 struct siop_softc *sc;
1554 {
1555 int error, i, j, s;
1556 bus_dma_segment_t seg;
1557 int rseg;
1558 struct siop_cbd *newcbd;
1559 bus_addr_t dsa;
1560 u_int32_t *scr;
1561
1562 /* allocate a new list head */
1563 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1564 if (newcbd == NULL) {
1565 printf("%s: can't allocate memory for command descriptors "
1566 "head\n", sc->sc_dev.dv_xname);
1567 return;
1568 }
1569
1570 /* allocate cmd list */
1571 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1572 M_DEVBUF, M_NOWAIT|M_ZERO);
1573 if (newcbd->cmds == NULL) {
1574 printf("%s: can't allocate memory for command descriptors\n",
1575 sc->sc_dev.dv_xname);
1576 goto bad3;
1577 }
1578 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1579 1, &rseg, BUS_DMA_NOWAIT);
1580 if (error) {
1581 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1582 sc->sc_dev.dv_xname, error);
1583 goto bad2;
1584 }
1585 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
1586 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1587 if (error) {
1588 printf("%s: unable to map cbd DMA memory, error = %d\n",
1589 sc->sc_dev.dv_xname, error);
1590 goto bad2;
1591 }
1592 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1593 BUS_DMA_NOWAIT, &newcbd->xferdma);
1594 if (error) {
1595 printf("%s: unable to create cbd DMA map, error = %d\n",
1596 sc->sc_dev.dv_xname, error);
1597 goto bad1;
1598 }
1599 error = bus_dmamap_load(sc->sc_dmat, newcbd->xferdma, newcbd->xfers,
1600 PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1601 if (error) {
1602 printf("%s: unable to load cbd DMA map, error = %d\n",
1603 sc->sc_dev.dv_xname, error);
1604 goto bad0;
1605 }
1606 #ifdef DEBUG
1607 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_dev.dv_xname,
1608 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1609 #endif
1610 for (i = 0; i < SIOP_NCMDPB; i++) {
1611 error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, SIOP_NSG,
1612 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1613 &newcbd->cmds[i].dmamap_data);
1614 if (error) {
1615 printf("%s: unable to create data DMA map for cbd: "
1616 "error %d\n",
1617 sc->sc_dev.dv_xname, error);
1618 goto bad0;
1619 }
1620 error = bus_dmamap_create(sc->sc_dmat,
1621 sizeof(struct scsipi_generic), 1,
1622 sizeof(struct scsipi_generic), 0,
1623 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1624 &newcbd->cmds[i].dmamap_cmd);
1625 if (error) {
1626 printf("%s: unable to create cmd DMA map for cbd %d\n",
1627 sc->sc_dev.dv_xname, error);
1628 goto bad0;
1629 }
1630 newcbd->cmds[i].siop_sc = sc;
1631 newcbd->cmds[i].siop_cbdp = newcbd;
1632 newcbd->cmds[i].siop_xfer = &newcbd->xfers[i];
1633 memset(newcbd->cmds[i].siop_xfer, 0,
1634 sizeof(struct siop_xfer));
1635 newcbd->cmds[i].dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1636 i * sizeof(struct siop_xfer);
1637 dsa = newcbd->cmds[i].dsa;
1638 newcbd->cmds[i].status = CMDST_FREE;
1639 newcbd->cmds[i].siop_tables.t_msgout.count= htole32(1);
1640 newcbd->cmds[i].siop_tables.t_msgout.addr = htole32(dsa);
1641 newcbd->cmds[i].siop_tables.t_msgin.count= htole32(1);
1642 newcbd->cmds[i].siop_tables.t_msgin.addr = htole32(dsa + 8);
1643 newcbd->cmds[i].siop_tables.t_extmsgin.count= htole32(2);
1644 newcbd->cmds[i].siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1645 newcbd->cmds[i].siop_tables.t_extmsgdata.addr =
1646 htole32(dsa + 11);
1647 newcbd->cmds[i].siop_tables.t_status.count= htole32(1);
1648 newcbd->cmds[i].siop_tables.t_status.addr = htole32(dsa + 16);
1649
1650 /* The select/reselect script */
1651 scr = &newcbd->cmds[i].siop_xfer->resel[0];
1652 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1653 scr[j] = htole32(load_dsa[j]);
1654 /*
1655 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1656 * octet, reg offset is the third.
1657 */
1658 scr[Ent_rdsa0 / 4] =
1659 htole32(0x78100000 | ((dsa & 0x000000ff) << 8));
1660 scr[Ent_rdsa1 / 4] =
1661 htole32(0x78110000 | ( dsa & 0x0000ff00 ));
1662 scr[Ent_rdsa2 / 4] =
1663 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8));
1664 scr[Ent_rdsa3 / 4] =
1665 htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1666 scr[E_ldsa_abs_reselected_Used[0]] =
1667 htole32(sc->sc_scriptaddr + Ent_reselected);
1668 scr[E_ldsa_abs_reselect_Used[0]] =
1669 htole32(sc->sc_scriptaddr + Ent_reselect);
1670 scr[E_ldsa_abs_selected_Used[0]] =
1671 htole32(sc->sc_scriptaddr + Ent_selected);
1672 scr[E_ldsa_abs_data_Used[0]] =
1673 htole32(dsa + sizeof(struct siop_xfer_common) +
1674 Ent_ldsa_data);
1675 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1676 scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1677 s = splbio();
1678 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1679 splx(s);
1680 #ifdef SIOP_DEBUG
1681 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1682 le32toh(newcbd->cmds[i].siop_tables.t_msgin.addr),
1683 le32toh(newcbd->cmds[i].siop_tables.t_msgout.addr),
1684 le32toh(newcbd->cmds[i].siop_tables.t_status.addr));
1685 #endif
1686 }
1687 s = splbio();
1688 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1689 sc->sc_adapt.adapt_openings += SIOP_NCMDPB;
1690 splx(s);
1691 return;
1692 bad0:
1693 bus_dmamap_unload(sc->sc_dmat, newcbd->xferdma);
1694 bus_dmamap_destroy(sc->sc_dmat, newcbd->xferdma);
1695 bad1:
1696 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1697 bad2:
1698 free(newcbd->cmds, M_DEVBUF);
1699 bad3:
1700 free(newcbd, M_DEVBUF);
1701 return;
1702 }
1703
1704 struct siop_lunsw *
1705 siop_get_lunsw(sc)
1706 struct siop_softc *sc;
1707 {
1708 struct siop_lunsw *lunsw;
1709 int i;
1710
1711 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1712 sc->script_free_hi)
1713 return NULL;
1714 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1715 if (lunsw != NULL) {
1716 #ifdef SIOP_DEBUG
1717 printf("siop_get_lunsw got lunsw at offset %d\n",
1718 lunsw->lunsw_off);
1719 #endif
1720 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1721 return lunsw;
1722 }
1723 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1724 if (lunsw == NULL)
1725 return NULL;
1726 #ifdef SIOP_DEBUG
1727 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1728 #endif
1729 if (sc->features & SF_CHIP_RAM) {
1730 bus_space_write_region_4(sc->sc_ramt, sc->sc_ramh,
1731 sc->script_free_lo * 4, lun_switch,
1732 sizeof(lun_switch) / sizeof(lun_switch[0]));
1733 bus_space_write_4(sc->sc_ramt, sc->sc_ramh,
1734 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1735 sc->sc_scriptaddr + Ent_lunsw_return);
1736 } else {
1737 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1738 i++)
1739 sc->sc_script[sc->script_free_lo + i] =
1740 htole32(lun_switch[i]);
1741 sc->sc_script[sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1742 htole32(sc->sc_scriptaddr + Ent_lunsw_return);
1743 }
1744 lunsw->lunsw_off = sc->script_free_lo;
1745 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1746 sc->script_free_lo += lunsw->lunsw_size;
1747 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1748 return lunsw;
1749 }
1750
1751 void
1752 siop_add_reselsw(sc, target)
1753 struct siop_softc *sc;
1754 int target;
1755 {
1756 int i;
1757 struct siop_lun *siop_lun;
1758 /*
1759 * add an entry to resel switch
1760 */
1761 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1762 for (i = 0; i < 15; i++) {
1763 sc->targets[target]->reseloff = Ent_resel_targ0 / 4 + i * 2;
1764 if ((siop_script_read(sc, sc->targets[target]->reseloff) & 0xff)
1765 == 0xff) { /* it's free */
1766 #ifdef SIOP_DEBUG
1767 printf("siop: target %d slot %d offset %d\n",
1768 target, i, sc->targets[target]->reseloff);
1769 #endif
1770 /* JUMP abs_foo, IF target | 0x80; */
1771 siop_script_write(sc, sc->targets[target]->reseloff,
1772 0x800c0080 | target);
1773 siop_script_write(sc, sc->targets[target]->reseloff + 1,
1774 sc->sc_scriptaddr +
1775 sc->targets[target]->lunsw->lunsw_off * 4 +
1776 Ent_lun_switch_entry);
1777 break;
1778 }
1779 }
1780 if (i == 15) /* no free slot, shouldn't happen */
1781 panic("siop: resel switch full");
1782
1783 sc->sc_ntargets++;
1784 for (i = 0; i < 8; i++) {
1785 siop_lun = sc->targets[target]->siop_lun[i];
1786 if (siop_lun == NULL)
1787 continue;
1788 if (siop_lun->reseloff > 0) {
1789 siop_lun->reseloff = 0;
1790 siop_add_dev(sc, target, i);
1791 }
1792 }
1793 siop_update_scntl3(sc, sc->targets[target]);
1794 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1795 }
1796
1797 void
1798 siop_update_scntl3(sc, siop_target)
1799 struct siop_softc *sc;
1800 struct siop_target *siop_target;
1801 {
1802 /* MOVE target->id >> 24 TO SCNTL3 */
1803 siop_script_write(sc,
1804 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1805 0x78030000 | ((siop_target->id >> 16) & 0x0000ff00));
1806 /* MOVE target->id >> 8 TO SXFER */
1807 siop_script_write(sc,
1808 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1809 0x78050000 | (siop_target->id & 0x0000ff00));
1810 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1811 }
1812
1813 void
1814 siop_add_dev(sc, target, lun)
1815 struct siop_softc *sc;
1816 int target;
1817 int lun;
1818 {
1819 struct siop_lunsw *lunsw;
1820 struct siop_lun *siop_lun = sc->targets[target]->siop_lun[lun];
1821 int i, ntargets;
1822
1823 if (siop_lun->reseloff > 0)
1824 return;
1825 lunsw = sc->targets[target]->lunsw;
1826 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1827 /*
1828 * can't extend this slot. Probably not worth trying to deal
1829 * with this case
1830 */
1831 #ifdef DEBUG
1832 printf("%s:%d:%d: can't allocate a lun sw slot\n",
1833 sc->sc_dev.dv_xname, target, lun);
1834 #endif
1835 return;
1836 }
1837 /* count how many free targets we still have to probe */
1838 ntargets = sc->sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1839
1840 /*
1841 * we need 8 bytes for the lun sw additionnal entry, and
1842 * eventually sizeof(tag_switch) for the tag switch entry.
1843 * Keep enouth free space for the free targets that could be
1844 * probed later.
1845 */
1846 if (sc->script_free_lo + 2 +
1847 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1848 ((sc->targets[target]->flags & TARF_TAG) ?
1849 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1850 sc->script_free_hi)) {
1851 /*
1852 * not enouth space, probably not worth dealing with it.
1853 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1854 */
1855 #ifdef DEBUG
1856 printf("%s:%d:%d: not enouth memory for a lun sw slot\n",
1857 sc->sc_dev.dv_xname, target, lun);
1858 #endif
1859 return;
1860 }
1861 #ifdef SIOP_DEBUG
1862 printf("%s:%d:%d: allocate lun sw entry\n",
1863 sc->sc_dev.dv_xname, target, lun);
1864 #endif
1865 /* INT int_resellun */
1866 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1867 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1868 /* Now the slot entry: JUMP abs_foo, IF lun */
1869 siop_script_write(sc, sc->script_free_lo - 2,
1870 0x800c0000 | lun);
1871 siop_script_write(sc, sc->script_free_lo - 1, 0);
1872 siop_lun->reseloff = sc->script_free_lo - 2;
1873 lunsw->lunsw_size += 2;
1874 sc->script_free_lo += 2;
1875 if (sc->targets[target]->flags & TARF_TAG) {
1876 /* we need a tag switch */
1877 sc->script_free_hi -=
1878 sizeof(tag_switch) / sizeof(tag_switch[0]);
1879 if (sc->features & SF_CHIP_RAM) {
1880 bus_space_write_region_4(sc->sc_ramt, sc->sc_ramh,
1881 sc->script_free_hi * 4, tag_switch,
1882 sizeof(tag_switch) / sizeof(tag_switch[0]));
1883 } else {
1884 for(i = 0;
1885 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1886 i++) {
1887 sc->sc_script[sc->script_free_hi + i] =
1888 htole32(tag_switch[i]);
1889 }
1890 }
1891 siop_script_write(sc,
1892 siop_lun->reseloff + 1,
1893 sc->sc_scriptaddr + sc->script_free_hi * 4 +
1894 Ent_tag_switch_entry);
1895
1896 for (i = 0; i < SIOP_NTAG; i++) {
1897 siop_lun->siop_tag[i].reseloff =
1898 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1899 }
1900 } else {
1901 /* non-tag case; just work with the lun switch */
1902 siop_lun->siop_tag[0].reseloff =
1903 sc->targets[target]->siop_lun[lun]->reseloff;
1904 }
1905 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1906 }
1907
1908 void
1909 siop_del_dev(sc, target, lun)
1910 struct siop_softc *sc;
1911 int target;
1912 int lun;
1913 {
1914 int i;
1915 #ifdef SIOP_DEBUG
1916 printf("%s:%d:%d: free lun sw entry\n",
1917 sc->sc_dev.dv_xname, target, lun);
1918 #endif
1919 if (sc->targets[target] == NULL)
1920 return;
1921 free(sc->targets[target]->siop_lun[lun], M_DEVBUF);
1922 sc->targets[target]->siop_lun[lun] = NULL;
1923 /* XXX compact sw entry too ? */
1924 /* check if we can free the whole target */
1925 for (i = 0; i < 8; i++) {
1926 if (sc->targets[target]->siop_lun[i] != NULL)
1927 return;
1928 }
1929 #ifdef SIOP_DEBUG
1930 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
1931 sc->sc_dev.dv_xname, target, lun,
1932 sc->targets[target]->lunsw->lunsw_off);
1933 #endif
1934 /*
1935 * nothing here, free the target struct and resel
1936 * switch entry
1937 */
1938 siop_script_write(sc, sc->targets[target]->reseloff, 0x800c00ff);
1939 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1940 TAILQ_INSERT_TAIL(&sc->lunsw_list, sc->targets[target]->lunsw, next);
1941 free(sc->targets[target], M_DEVBUF);
1942 sc->targets[target] = NULL;
1943 sc->sc_ntargets--;
1944 }
1945
1946 void
1947 siop_update_xfer_mode(sc, target)
1948 struct siop_softc *sc;
1949 int target;
1950 {
1951 struct siop_target *siop_target = sc->targets[target];
1952 struct scsipi_xfer_mode xm;
1953
1954 xm.xm_target = target;
1955 xm.xm_mode = 0;
1956 xm.xm_period = 0;
1957 xm.xm_offset = 0;
1958
1959 if (siop_target->flags & TARF_ISWIDE)
1960 xm.xm_mode |= PERIPH_CAP_WIDE16;
1961 if (siop_target->period) {
1962 xm.xm_period = siop_target->period;
1963 xm.xm_offset = siop_target->offset;
1964 xm.xm_mode |= PERIPH_CAP_SYNC;
1965 }
1966 if (siop_target->flags & TARF_TAG)
1967 xm.xm_mode |= PERIPH_CAP_TQING;
1968 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
1969 }
1970
1971 #ifdef SIOP_STATS
1972 void
1973 siop_printstats()
1974 {
1975 printf("siop_stat_intr %d\n", siop_stat_intr);
1976 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
1977 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
1978 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
1979 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
1980 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
1981 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
1982 }
1983 #endif
1984