siop.c revision 1.57 1 /* $NetBSD: siop.c,v 1.57 2002/04/23 12:55:26 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.57 2002/04/23 12:55:26 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
81
82 /* Number of scheduler slot (needs to match script) */
83 #define SIOP_NSLOTS 40
84
85 void siop_reset __P((struct siop_softc *));
86 void siop_handle_reset __P((struct siop_softc *));
87 int siop_handle_qtag_reject __P((struct siop_cmd *));
88 void siop_scsicmd_end __P((struct siop_cmd *));
89 void siop_unqueue __P((struct siop_softc *, int, int));
90 static void siop_start __P((struct siop_softc *, struct siop_cmd *));
91 void siop_timeout __P((void *));
92 int siop_scsicmd __P((struct scsipi_xfer *));
93 void siop_scsipi_request __P((struct scsipi_channel *,
94 scsipi_adapter_req_t, void *));
95 void siop_dump_script __P((struct siop_softc *));
96 void siop_morecbd __P((struct siop_softc *));
97 struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *));
98 void siop_add_reselsw __P((struct siop_softc *, int));
99 void siop_update_scntl3 __P((struct siop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int siop_stat_intr = 0;
104 static int siop_stat_intr_shortxfer = 0;
105 static int siop_stat_intr_sdp = 0;
106 static int siop_stat_intr_done = 0;
107 static int siop_stat_intr_xferdisc = 0;
108 static int siop_stat_intr_lunresel = 0;
109 static int siop_stat_intr_qfull = 0;
110 void siop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void siop_script_sync __P((struct siop_softc *, int));
117 static __inline__ void
118 siop_script_sync(sc, ops)
119 struct siop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int));
128 static __inline__ u_int32_t
129 siop_script_read(sc, offset)
130 struct siop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void siop_script_write __P((struct siop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 siop_script_write(sc, offset, val)
145 struct siop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 siop_attach(sc)
159 struct siop_softc *sc;
160 {
161 int error, i;
162 bus_dma_segment_t seg;
163 int rseg;
164
165 /*
166 * Allocate DMA-safe memory for the script and map it.
167 */
168 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
169 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
170 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
171 if (error) {
172 printf("%s: unable to allocate script DMA memory, "
173 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
174 return;
175 }
176 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
177 (caddr_t *)&sc->sc_c.sc_script,
178 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
179 if (error) {
180 printf("%s: unable to map script DMA memory, "
181 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
182 return;
183 }
184 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
185 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
186 if (error) {
187 printf("%s: unable to create script DMA map, "
188 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
189 return;
190 }
191 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
192 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
193 if (error) {
194 printf("%s: unable to load script DMA map, "
195 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
196 return;
197 }
198 sc->sc_c.sc_scriptaddr =
199 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
200 sc->sc_c.ram_size = PAGE_SIZE;
201 }
202 TAILQ_INIT(&sc->free_list);
203 TAILQ_INIT(&sc->cmds);
204 TAILQ_INIT(&sc->lunsw_list);
205 sc->sc_currschedslot = 0;
206 #ifdef SIOP_DEBUG
207 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
208 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script),
209 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
210 #endif
211
212 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
213 sc->sc_c.sc_adapt.adapt_nchannels = 1;
214 sc->sc_c.sc_adapt.adapt_openings = 0;
215 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
216 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
217 sc->sc_c.sc_adapt.adapt_minphys = minphys;
218 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
219
220 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
221 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
222 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
223 sc->sc_c.sc_chan.chan_channel = 0;
224 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
225 sc->sc_c.sc_chan.chan_ntargets =
226 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
227 sc->sc_c.sc_chan.chan_nluns = 8;
228 sc->sc_c.sc_chan.chan_id =
229 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
230 if (sc->sc_c.sc_chan.chan_id == 0 ||
231 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
232 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
233
234 for (i = 0; i < 16; i++)
235 sc->sc_c.targets[i] = NULL;
236
237 /* find min/max sync period for this chip */
238 sc->sc_c.maxsync = 0;
239 sc->sc_c.minsync = 255;
240 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
241 if (sc->sc_c.clock_period != scf_period[i].clock)
242 continue;
243 if (sc->sc_c.maxsync < scf_period[i].period)
244 sc->sc_c.maxsync = scf_period[i].period;
245 if (sc->sc_c.minsync > scf_period[i].period)
246 sc->sc_c.minsync = scf_period[i].period;
247 }
248 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
249 panic("siop: can't find my sync parameters\n");
250 /* Do a bus reset, so that devices fall back to narrow/async */
251 siop_resetbus(&sc->sc_c);
252 /*
253 * siop_reset() will reset the chip, thus clearing pending interrupts
254 */
255 siop_reset(sc);
256 #ifdef DUMP_SCRIPT
257 siop_dump_script(sc);
258 #endif
259
260 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
261 }
262
263 void
264 siop_reset(sc)
265 struct siop_softc *sc;
266 {
267 int i, j;
268 struct siop_lunsw *lunsw;
269
270 siop_common_reset(&sc->sc_c);
271
272 /* copy and patch the script */
273 if (sc->sc_c.features & SF_CHIP_RAM) {
274 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
275 siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
276 for (j = 0; j <
277 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
278 j++) {
279 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
280 E_abs_msgin_Used[j] * 4,
281 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
282 }
283 if (sc->sc_c.features & SF_CHIP_LED0) {
284 bus_space_write_region_4(sc->sc_c.sc_ramt,
285 sc->sc_c.sc_ramh,
286 Ent_led_on1, siop_led_on,
287 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
288 bus_space_write_region_4(sc->sc_c.sc_ramt,
289 sc->sc_c.sc_ramh,
290 Ent_led_on2, siop_led_on,
291 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
292 bus_space_write_region_4(sc->sc_c.sc_ramt,
293 sc->sc_c.sc_ramh,
294 Ent_led_off, siop_led_off,
295 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
296 }
297 } else {
298 for (j = 0;
299 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
300 sc->sc_c.sc_script[j] = htole32(siop_script[j]);
301 }
302 for (j = 0; j <
303 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
304 j++) {
305 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
306 htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space);
307 }
308 if (sc->sc_c.features & SF_CHIP_LED0) {
309 for (j = 0; j < (sizeof(siop_led_on) /
310 sizeof(siop_led_on[0])); j++)
311 sc->sc_c.sc_script[
312 Ent_led_on1 / sizeof(siop_led_on[0]) + j
313 ] = htole32(siop_led_on[j]);
314 for (j = 0; j < (sizeof(siop_led_on) /
315 sizeof(siop_led_on[0])); j++)
316 sc->sc_c.sc_script[
317 Ent_led_on2 / sizeof(siop_led_on[0]) + j
318 ] = htole32(siop_led_on[j]);
319 for (j = 0; j < (sizeof(siop_led_off) /
320 sizeof(siop_led_off[0])); j++)
321 sc->sc_c.sc_script[
322 Ent_led_off / sizeof(siop_led_off[0]) + j
323 ] = htole32(siop_led_off[j]);
324 }
325 }
326 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
327 sc->script_free_hi = sc->sc_c.ram_size / 4;
328
329 /* free used and unused lun switches */
330 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
331 #ifdef SIOP_DEBUG
332 printf("%s: free lunsw at offset %d\n",
333 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off);
334 #endif
335 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
336 free(lunsw, M_DEVBUF);
337 }
338 TAILQ_INIT(&sc->lunsw_list);
339 /* restore reselect switch */
340 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
341 struct siop_target *target;
342 if (sc->sc_c.targets[i] == NULL)
343 continue;
344 #ifdef SIOP_DEBUG
345 printf("%s: restore sw for target %d\n",
346 sc->sc_c.sc_dev.dv_xname, i);
347 #endif
348 target = (struct siop_target *)sc->sc_c.targets[i];
349 free(target->lunsw, M_DEVBUF);
350 target->lunsw = siop_get_lunsw(sc);
351 if (target->lunsw == NULL) {
352 printf("%s: can't alloc lunsw for target %d\n",
353 sc->sc_c.sc_dev.dv_xname, i);
354 break;
355 }
356 siop_add_reselsw(sc, i);
357 }
358
359 /* start script */
360 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
361 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
362 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
363 }
364 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
365 sc->sc_c.sc_scriptaddr + Ent_reselect);
366 }
367
368 #if 0
369 #define CALL_SCRIPT(ent) do {\
370 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
371 siop_cmd->cmd_c.dsa, \
372 sc->sc_c.sc_scriptaddr + ent); \
373 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
374 } while (0)
375 #else
376 #define CALL_SCRIPT(ent) do {\
377 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
378 } while (0)
379 #endif
380
381 int
382 siop_intr(v)
383 void *v;
384 {
385 struct siop_softc *sc = v;
386 struct siop_target *siop_target;
387 struct siop_cmd *siop_cmd;
388 struct siop_lun *siop_lun;
389 struct scsipi_xfer *xs;
390 int istat, sist, sstat1, dstat;
391 u_int32_t irqcode;
392 int need_reset = 0;
393 int offset, target, lun, tag;
394 bus_addr_t dsa;
395 struct siop_cbd *cbdp;
396 int freetarget = 0;
397 int restart = 0;
398
399 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
400 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
401 return 0;
402 INCSTAT(siop_stat_intr);
403 if (istat & ISTAT_INTF) {
404 printf("INTRF\n");
405 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
406 SIOP_ISTAT, ISTAT_INTF);
407 }
408 /* use DSA to find the current siop_cmd */
409 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
410 for (cbdp = TAILQ_FIRST(&sc->cmds); cbdp != NULL;
411 cbdp = TAILQ_NEXT(cbdp, next)) {
412 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
413 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
414 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
415 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
416 siop_table_sync(siop_cmd,
417 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
418 break;
419 }
420 }
421 if (cbdp == NULL) {
422 siop_cmd = NULL;
423 }
424 if (siop_cmd) {
425 xs = siop_cmd->cmd_c.xs;
426 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
427 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
428 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
429 tag = siop_cmd->cmd_c.tag;
430 siop_lun = siop_target->siop_lun[lun];
431 #ifdef DIAGNOSTIC
432 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
433 printf("siop_cmd (lun %d) for DSA 0x%x "
434 "not active (%d)\n", lun, (u_int)dsa,
435 siop_cmd->cmd_c.status);
436 xs = NULL;
437 siop_target = NULL;
438 target = -1;
439 lun = -1;
440 tag = -1;
441 siop_lun = NULL;
442 siop_cmd = NULL;
443 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
444 printf("siop_cmd (lun %d tag %d) not in siop_lun "
445 "active (%p != %p)\n", lun, tag, siop_cmd,
446 siop_lun->siop_tag[tag].active);
447 }
448 #endif
449 } else {
450 xs = NULL;
451 siop_target = NULL;
452 target = -1;
453 lun = -1;
454 tag = -1;
455 siop_lun = NULL;
456 }
457 if (istat & ISTAT_DIP) {
458 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
459 SIOP_DSTAT);
460 if (dstat & DSTAT_SSI) {
461 printf("single step dsp 0x%08x dsa 0x08%x\n",
462 (int)(bus_space_read_4(sc->sc_c.sc_rt,
463 sc->sc_c.sc_rh, SIOP_DSP) -
464 sc->sc_c.sc_scriptaddr),
465 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
466 SIOP_DSA));
467 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
468 (istat & ISTAT_SIP) == 0) {
469 bus_space_write_1(sc->sc_c.sc_rt,
470 sc->sc_c.sc_rh, SIOP_DCNTL,
471 bus_space_read_1(sc->sc_c.sc_rt,
472 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
473 }
474 return 1;
475 }
476 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
477 printf("DMA IRQ:");
478 if (dstat & DSTAT_IID)
479 printf(" Illegal instruction");
480 if (dstat & DSTAT_ABRT)
481 printf(" abort");
482 if (dstat & DSTAT_BF)
483 printf(" bus fault");
484 if (dstat & DSTAT_MDPE)
485 printf(" parity");
486 if (dstat & DSTAT_DFE)
487 printf(" dma fifo empty");
488 printf(", DSP=0x%x DSA=0x%x: ",
489 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
490 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
491 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
492 if (siop_cmd)
493 printf("last msg_in=0x%x status=0x%x\n",
494 siop_cmd->cmd_tables->msg_in[0],
495 le32toh(siop_cmd->cmd_tables->status));
496 else
497 printf("%s: current DSA invalid\n",
498 sc->sc_c.sc_dev.dv_xname);
499 need_reset = 1;
500 }
501 }
502 if (istat & ISTAT_SIP) {
503 if (istat & ISTAT_DIP)
504 delay(10);
505 /*
506 * Can't read sist0 & sist1 independantly, or we have to
507 * insert delay
508 */
509 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
510 SIOP_SIST0);
511 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
512 SIOP_SSTAT1);
513 #ifdef SIOP_DEBUG_INTR
514 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
515 "DSA=0x%x DSP=0x%lx\n", sist,
516 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
517 SIOP_SSTAT1),
518 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
519 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
520 SIOP_DSP) -
521 sc->sc_c.sc_scriptaddr));
522 #endif
523 if (sist & SIST0_RST) {
524 siop_handle_reset(sc);
525 /* no table to flush here */
526 return 1;
527 }
528 if (sist & SIST0_SGE) {
529 if (siop_cmd)
530 scsipi_printaddr(xs->xs_periph);
531 else
532 printf("%s:", sc->sc_c.sc_dev.dv_xname);
533 printf("scsi gross error\n");
534 goto reset;
535 }
536 if ((sist & SIST0_MA) && need_reset == 0) {
537 if (siop_cmd) {
538 int scratcha0;
539 dstat = bus_space_read_1(sc->sc_c.sc_rt,
540 sc->sc_c.sc_rh, SIOP_DSTAT);
541 /*
542 * first restore DSA, in case we were in a S/G
543 * operation.
544 */
545 bus_space_write_4(sc->sc_c.sc_rt,
546 sc->sc_c.sc_rh,
547 SIOP_DSA, siop_cmd->cmd_c.dsa);
548 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
549 sc->sc_c.sc_rh, SIOP_SCRATCHA);
550 switch (sstat1 & SSTAT1_PHASE_MASK) {
551 case SSTAT1_PHASE_STATUS:
552 /*
553 * previous phase may be aborted for any reason
554 * ( for example, the target has less data to
555 * transfer than requested). Just go to status
556 * and the command should terminate.
557 */
558 INCSTAT(siop_stat_intr_shortxfer);
559 if ((dstat & DSTAT_DFE) == 0)
560 siop_clearfifo(&sc->sc_c);
561 /* no table to flush here */
562 CALL_SCRIPT(Ent_status);
563 return 1;
564 case SSTAT1_PHASE_MSGIN:
565 /*
566 * target may be ready to disconnect
567 * Save data pointers just in case.
568 */
569 INCSTAT(siop_stat_intr_xferdisc);
570 if (scratcha0 & A_flag_data)
571 siop_sdp(&siop_cmd->cmd_c);
572 else if ((dstat & DSTAT_DFE) == 0)
573 siop_clearfifo(&sc->sc_c);
574 bus_space_write_1(sc->sc_c.sc_rt,
575 sc->sc_c.sc_rh, SIOP_SCRATCHA,
576 scratcha0 & ~A_flag_data);
577 siop_table_sync(siop_cmd,
578 BUS_DMASYNC_PREREAD |
579 BUS_DMASYNC_PREWRITE);
580 CALL_SCRIPT(Ent_msgin);
581 return 1;
582 }
583 printf("%s: unexpected phase mismatch %d\n",
584 sc->sc_c.sc_dev.dv_xname,
585 sstat1 & SSTAT1_PHASE_MASK);
586 } else {
587 printf("%s: phase mismatch without command\n",
588 sc->sc_c.sc_dev.dv_xname);
589 }
590 need_reset = 1;
591 }
592 if (sist & SIST0_PAR) {
593 /* parity error, reset */
594 if (siop_cmd)
595 scsipi_printaddr(xs->xs_periph);
596 else
597 printf("%s:", sc->sc_c.sc_dev.dv_xname);
598 printf("parity error\n");
599 goto reset;
600 }
601 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
602 /* selection time out, assume there's no device here */
603 if (siop_cmd) {
604 siop_cmd->cmd_c.status = CMDST_DONE;
605 xs->error = XS_SELTIMEOUT;
606 freetarget = 1;
607 goto end;
608 } else {
609 printf("%s: selection timeout without "
610 "command\n", sc->sc_c.sc_dev.dv_xname);
611 need_reset = 1;
612 }
613 }
614 if (sist & SIST0_UDC) {
615 /*
616 * unexpected disconnect. Usually the target signals
617 * a fatal condition this way. Attempt to get sense.
618 */
619 if (siop_cmd) {
620 siop_cmd->cmd_tables->status =
621 htole32(SCSI_CHECK);
622 goto end;
623 }
624 printf("%s: unexpected disconnect without "
625 "command\n", sc->sc_c.sc_dev.dv_xname);
626 goto reset;
627 }
628 if (sist & (SIST1_SBMC << 8)) {
629 /* SCSI bus mode change */
630 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
631 goto reset;
632 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
633 /*
634 * we have a script interrupt, it will
635 * restart the script.
636 */
637 goto scintr;
638 }
639 /*
640 * else we have to restart it ourselve, at the
641 * interrupted instruction.
642 */
643 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
644 SIOP_DSP,
645 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
646 SIOP_DSP) - 8);
647 return 1;
648 }
649 /* Else it's an unhandled exeption (for now). */
650 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
651 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
652 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
653 SIOP_SSTAT1),
654 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
655 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
656 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
657 if (siop_cmd) {
658 siop_cmd->cmd_c.status = CMDST_DONE;
659 xs->error = XS_SELTIMEOUT;
660 goto end;
661 }
662 need_reset = 1;
663 }
664 if (need_reset) {
665 reset:
666 /* fatal error, reset the bus */
667 siop_resetbus(&sc->sc_c);
668 /* no table to flush here */
669 return 1;
670 }
671
672 scintr:
673 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
674 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
675 SIOP_DSPS);
676 #ifdef SIOP_DEBUG_INTR
677 printf("script interrupt 0x%x\n", irqcode);
678 #endif
679 /*
680 * no command, or an inactive command is only valid for a
681 * reselect interrupt
682 */
683 if ((irqcode & 0x80) == 0) {
684 if (siop_cmd == NULL) {
685 printf(
686 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
687 sc->sc_c.sc_dev.dv_xname, irqcode);
688 goto reset;
689 }
690 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
691 printf("%s: command with invalid status "
692 "(IRQ code 0x%x current status %d) !\n",
693 sc->sc_c.sc_dev.dv_xname,
694 irqcode, siop_cmd->cmd_c.status);
695 xs = NULL;
696 }
697 }
698 switch(irqcode) {
699 case A_int_err:
700 printf("error, DSP=0x%x\n",
701 (int)(bus_space_read_4(sc->sc_c.sc_rt,
702 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
703 if (xs) {
704 xs->error = XS_SELTIMEOUT;
705 goto end;
706 } else {
707 goto reset;
708 }
709 case A_int_reseltarg:
710 printf("%s: reselect with invalid target\n",
711 sc->sc_c.sc_dev.dv_xname);
712 goto reset;
713 case A_int_resellun:
714 INCSTAT(siop_stat_intr_lunresel);
715 target = bus_space_read_1(sc->sc_c.sc_rt,
716 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
717 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
718 SIOP_SCRATCHA + 1);
719 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
720 SIOP_SCRATCHA + 2);
721 siop_target =
722 (struct siop_target *)sc->sc_c.targets[target];
723 if (siop_target == NULL) {
724 printf("%s: reselect with invalid target %d\n",
725 sc->sc_c.sc_dev.dv_xname, target);
726 goto reset;
727 }
728 siop_lun = siop_target->siop_lun[lun];
729 if (siop_lun == NULL) {
730 printf("%s: target %d reselect with invalid "
731 "lun %d\n", sc->sc_c.sc_dev.dv_xname,
732 target, lun);
733 goto reset;
734 }
735 if (siop_lun->siop_tag[tag].active == NULL) {
736 printf("%s: target %d lun %d tag %d reselect "
737 "without command\n",
738 sc->sc_c.sc_dev.dv_xname,
739 target, lun, tag);
740 goto reset;
741 }
742 siop_cmd = siop_lun->siop_tag[tag].active;
743 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
744 SIOP_DSP, siop_cmd->cmd_c.dsa +
745 sizeof(struct siop_common_xfer) +
746 Ent_ldsa_reload_dsa);
747 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
748 return 1;
749 case A_int_reseltag:
750 printf("%s: reselect with invalid tag\n",
751 sc->sc_c.sc_dev.dv_xname);
752 goto reset;
753 case A_int_msgin:
754 {
755 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
756 sc->sc_c.sc_rh, SIOP_SFBR);
757 if (msgin == MSG_MESSAGE_REJECT) {
758 int msg, extmsg;
759 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
760 /*
761 * message was part of a identify +
762 * something else. Identify shoudl't
763 * have been rejected.
764 */
765 msg =
766 siop_cmd->cmd_tables->msg_out[1];
767 extmsg =
768 siop_cmd->cmd_tables->msg_out[3];
769 } else {
770 msg = siop_cmd->cmd_tables->msg_out[0];
771 extmsg =
772 siop_cmd->cmd_tables->msg_out[2];
773 }
774 if (msg == MSG_MESSAGE_REJECT) {
775 /* MSG_REJECT for a MSG_REJECT !*/
776 if (xs)
777 scsipi_printaddr(xs->xs_periph);
778 else
779 printf("%s: ",
780 sc->sc_c.sc_dev.dv_xname);
781 printf("our reject message was "
782 "rejected\n");
783 goto reset;
784 }
785 if (msg == MSG_EXTENDED &&
786 extmsg == MSG_EXT_WDTR) {
787 /* WDTR rejected, initiate sync */
788 if ((siop_target->target_c.flags &
789 TARF_SYNC) == 0) {
790 siop_target->target_c.status =
791 TARST_OK;
792 siop_update_xfer_mode(&sc->sc_c,
793 target);
794 /* no table to flush here */
795 CALL_SCRIPT(Ent_msgin_ack);
796 return 1;
797 }
798 siop_target->target_c.status =
799 TARST_SYNC_NEG;
800 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
801 sc->sc_c.minsync, sc->sc_c.maxoff);
802 siop_table_sync(siop_cmd,
803 BUS_DMASYNC_PREREAD |
804 BUS_DMASYNC_PREWRITE);
805 CALL_SCRIPT(Ent_send_msgout);
806 return 1;
807 } else if (msg == MSG_EXTENDED &&
808 extmsg == MSG_EXT_SDTR) {
809 /* sync rejected */
810 siop_target->target_c.offset = 0;
811 siop_target->target_c.period = 0;
812 siop_target->target_c.status = TARST_OK;
813 siop_update_xfer_mode(&sc->sc_c,
814 target);
815 /* no table to flush here */
816 CALL_SCRIPT(Ent_msgin_ack);
817 return 1;
818 } else if (msg == MSG_SIMPLE_Q_TAG ||
819 msg == MSG_HEAD_OF_Q_TAG ||
820 msg == MSG_ORDERED_Q_TAG) {
821 if (siop_handle_qtag_reject(
822 siop_cmd) == -1)
823 goto reset;
824 CALL_SCRIPT(Ent_msgin_ack);
825 return 1;
826 }
827 if (xs)
828 scsipi_printaddr(xs->xs_periph);
829 else
830 printf("%s: ",
831 sc->sc_c.sc_dev.dv_xname);
832 if (msg == MSG_EXTENDED) {
833 printf("scsi message reject, extended "
834 "message sent was 0x%x\n", extmsg);
835 } else {
836 printf("scsi message reject, message "
837 "sent was 0x%x\n", msg);
838 }
839 /* no table to flush here */
840 CALL_SCRIPT(Ent_msgin_ack);
841 return 1;
842 }
843 if (xs)
844 scsipi_printaddr(xs->xs_periph);
845 else
846 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
847 printf("unhandled message 0x%x\n",
848 siop_cmd->cmd_tables->msg_in[0]);
849 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
850 siop_cmd->cmd_tables->t_msgout.count= htole32(1);
851 siop_table_sync(siop_cmd,
852 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
853 CALL_SCRIPT(Ent_send_msgout);
854 return 1;
855 }
856 case A_int_extmsgin:
857 #ifdef SIOP_DEBUG_INTR
858 printf("extended message: msg 0x%x len %d\n",
859 siop_cmd->cmd_tables->msg_in[2],
860 siop_cmd->cmd_tables->msg_in[1]);
861 #endif
862 if (siop_cmd->cmd_tables->msg_in[1] >
863 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
864 printf("%s: extended message too big (%d)\n",
865 sc->sc_c.sc_dev.dv_xname,
866 siop_cmd->cmd_tables->msg_in[1]);
867 siop_cmd->cmd_tables->t_extmsgdata.count =
868 htole32(siop_cmd->cmd_tables->msg_in[1] - 1);
869 siop_table_sync(siop_cmd,
870 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
871 CALL_SCRIPT(Ent_get_extmsgdata);
872 return 1;
873 case A_int_extmsgdata:
874 #ifdef SIOP_DEBUG_INTR
875 {
876 int i;
877 printf("extended message: 0x%x, data:",
878 siop_cmd->cmd_tables->msg_in[2]);
879 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
880 i++)
881 printf(" 0x%x",
882 siop_cmd->cmd_tables->msg_in[i]);
883 printf("\n");
884 }
885 #endif
886 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
887 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
888 case SIOP_NEG_MSGOUT:
889 siop_update_scntl3(sc,
890 siop_cmd->cmd_c.siop_target);
891 siop_table_sync(siop_cmd,
892 BUS_DMASYNC_PREREAD |
893 BUS_DMASYNC_PREWRITE);
894 CALL_SCRIPT(Ent_send_msgout);
895 return(1);
896 case SIOP_NEG_ACK:
897 siop_update_scntl3(sc,
898 siop_cmd->cmd_c.siop_target);
899 CALL_SCRIPT(Ent_msgin_ack);
900 return(1);
901 default:
902 panic("invalid retval from "
903 "siop_wdtr_neg()");
904 }
905 return(1);
906 }
907 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
908 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
909 case SIOP_NEG_MSGOUT:
910 siop_update_scntl3(sc,
911 siop_cmd->cmd_c.siop_target);
912 siop_table_sync(siop_cmd,
913 BUS_DMASYNC_PREREAD |
914 BUS_DMASYNC_PREWRITE);
915 CALL_SCRIPT(Ent_send_msgout);
916 return(1);
917 case SIOP_NEG_ACK:
918 siop_update_scntl3(sc,
919 siop_cmd->cmd_c.siop_target);
920 CALL_SCRIPT(Ent_msgin_ack);
921 return(1);
922 default:
923 panic("invalid retval from "
924 "siop_wdtr_neg()");
925 }
926 return(1);
927 }
928 /* send a message reject */
929 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
930 siop_cmd->cmd_tables->t_msgout.count = htole32(1);
931 siop_table_sync(siop_cmd,
932 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
933 CALL_SCRIPT(Ent_send_msgout);
934 return 1;
935 case A_int_disc:
936 INCSTAT(siop_stat_intr_sdp);
937 offset = bus_space_read_1(sc->sc_c.sc_rt,
938 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
939 #ifdef SIOP_DEBUG_DR
940 printf("disconnect offset %d\n", offset);
941 #endif
942 if (offset > SIOP_NSG) {
943 printf("%s: bad offset for disconnect (%d)\n",
944 sc->sc_c.sc_dev.dv_xname, offset);
945 goto reset;
946 }
947 /*
948 * offset == SIOP_NSG may be a valid condition if
949 * we get a sdp when the xfer is done.
950 * Don't call memmove in this case.
951 */
952 if (offset < SIOP_NSG) {
953 memmove(&siop_cmd->cmd_tables->data[0],
954 &siop_cmd->cmd_tables->data[offset],
955 (SIOP_NSG - offset) * sizeof(scr_table_t));
956 siop_table_sync(siop_cmd,
957 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
958 }
959 CALL_SCRIPT(Ent_script_sched);
960 return 1;
961 case A_int_resfail:
962 printf("reselect failed\n");
963 CALL_SCRIPT(Ent_script_sched);
964 return 1;
965 case A_int_done:
966 if (xs == NULL) {
967 printf("%s: done without command, DSA=0x%lx\n",
968 sc->sc_c.sc_dev.dv_xname,
969 (u_long)siop_cmd->cmd_c.dsa);
970 siop_cmd->cmd_c.status = CMDST_FREE;
971 CALL_SCRIPT(Ent_script_sched);
972 return 1;
973 }
974 #ifdef SIOP_DEBUG_INTR
975 printf("done, DSA=0x%lx target id 0x%x last msg "
976 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
977 le32toh(siop_cmd->cmd_tables->id),
978 siop_cmd->cmd_tables->msg_in[0],
979 le32toh(siop_cmd->cmd_tables->status));
980 #endif
981 INCSTAT(siop_stat_intr_done);
982 siop_cmd->cmd_c.status = CMDST_DONE;
983 goto end;
984 default:
985 printf("unknown irqcode %x\n", irqcode);
986 if (xs) {
987 xs->error = XS_SELTIMEOUT;
988 goto end;
989 }
990 goto reset;
991 }
992 return 1;
993 }
994 /* We just should't get there */
995 panic("siop_intr: I shouldn't be there !");
996 return 1;
997 end:
998 /*
999 * restart the script now if command completed properly
1000 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1001 * queue
1002 */
1003 xs->status = le32toh(siop_cmd->cmd_tables->status);
1004 if (xs->status == SCSI_OK)
1005 CALL_SCRIPT(Ent_script_sched);
1006 else
1007 restart = 1;
1008 siop_lun->siop_tag[tag].active = NULL;
1009 siop_scsicmd_end(siop_cmd);
1010 if (freetarget && siop_target->target_c.status == TARST_PROBING)
1011 siop_del_dev(sc, target, lun);
1012 if (restart)
1013 CALL_SCRIPT(Ent_script_sched);
1014 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1015 /* a command terminated, so we have free slots now */
1016 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1017 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1018 }
1019
1020 return 1;
1021 }
1022
1023 void
1024 siop_scsicmd_end(siop_cmd)
1025 struct siop_cmd *siop_cmd;
1026 {
1027 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1028 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1029
1030 switch(xs->status) {
1031 case SCSI_OK:
1032 xs->error = XS_NOERROR;
1033 break;
1034 case SCSI_BUSY:
1035 xs->error = XS_BUSY;
1036 break;
1037 case SCSI_CHECK:
1038 xs->error = XS_BUSY;
1039 /* remove commands in the queue and scheduler */
1040 siop_unqueue(sc, xs->xs_periph->periph_target,
1041 xs->xs_periph->periph_lun);
1042 break;
1043 case SCSI_QUEUE_FULL:
1044 INCSTAT(siop_stat_intr_qfull);
1045 #ifdef SIOP_DEBUG
1046 printf("%s:%d:%d: queue full (tag %d)\n",
1047 sc->sc_c.sc_dev.dv_xname,
1048 xs->xs_periph->periph_target,
1049 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1050 #endif
1051 xs->error = XS_BUSY;
1052 break;
1053 case SCSI_SIOP_NOCHECK:
1054 /*
1055 * don't check status, xs->error is already valid
1056 */
1057 break;
1058 case SCSI_SIOP_NOSTATUS:
1059 /*
1060 * the status byte was not updated, cmd was
1061 * aborted
1062 */
1063 xs->error = XS_SELTIMEOUT;
1064 break;
1065 default:
1066 xs->error = XS_DRIVER_STUFFUP;
1067 }
1068 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1069 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
1070 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1071 (xs->xs_control & XS_CTL_DATA_IN) ?
1072 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1073 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1074 }
1075 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1076 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1077 siop_cmd->cmd_c.status = CMDST_FREE;
1078 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1079 xs->resid = 0;
1080 scsipi_done (xs);
1081 }
1082
1083 void
1084 siop_unqueue(sc, target, lun)
1085 struct siop_softc *sc;
1086 int target;
1087 int lun;
1088 {
1089 int slot, tag;
1090 struct siop_cmd *siop_cmd;
1091 struct siop_lun *siop_lun =
1092 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1093
1094 /* first make sure to read valid data */
1095 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1096
1097 for (tag = 1; tag < SIOP_NTAG; tag++) {
1098 /* look for commands in the scheduler, not yet started */
1099 if (siop_lun->siop_tag[tag].active == NULL)
1100 continue;
1101 siop_cmd = siop_lun->siop_tag[tag].active;
1102 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1103 if (siop_script_read(sc,
1104 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1105 siop_cmd->cmd_c.dsa +
1106 sizeof(struct siop_common_xfer) +
1107 Ent_ldsa_select)
1108 break;
1109 }
1110 if (slot > sc->sc_currschedslot)
1111 continue; /* didn't find it */
1112 if (siop_script_read(sc,
1113 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1114 continue; /* already started */
1115 /* clear the slot */
1116 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1117 0x80000000);
1118 /* ask to requeue */
1119 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1120 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1121 siop_lun->siop_tag[tag].active = NULL;
1122 siop_scsicmd_end(siop_cmd);
1123 }
1124 /* update sc_currschedslot */
1125 sc->sc_currschedslot = 0;
1126 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1127 if (siop_script_read(sc,
1128 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1129 sc->sc_currschedslot = slot;
1130 }
1131 }
1132
1133 /*
1134 * handle a rejected queue tag message: the command will run untagged,
1135 * has to adjust the reselect script.
1136 */
1137 int
1138 siop_handle_qtag_reject(siop_cmd)
1139 struct siop_cmd *siop_cmd;
1140 {
1141 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1142 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1143 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1144 int tag = siop_cmd->cmd_tables->msg_out[2];
1145 struct siop_lun *siop_lun =
1146 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1147
1148 #ifdef SIOP_DEBUG
1149 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1150 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag,
1151 siop_cmd->cmd_c.status);
1152 #endif
1153
1154 if (siop_lun->siop_tag[0].active != NULL) {
1155 printf("%s: untagged command already running for target %d "
1156 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1157 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1158 return -1;
1159 }
1160 /* clear tag slot */
1161 siop_lun->siop_tag[tag].active = NULL;
1162 /* add command to non-tagged slot */
1163 siop_lun->siop_tag[0].active = siop_cmd;
1164 siop_cmd->cmd_c.tag = 0;
1165 /* adjust reselect script if there is one */
1166 if (siop_lun->siop_tag[0].reseloff > 0) {
1167 siop_script_write(sc,
1168 siop_lun->siop_tag[0].reseloff + 1,
1169 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1170 Ent_ldsa_reload_dsa);
1171 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1172 }
1173 return 0;
1174 }
1175
1176 /*
1177 * handle a bus reset: reset chip, unqueue all active commands, free all
1178 * target struct and report loosage to upper layer.
1179 * As the upper layer may requeue immediatly we have to first store
1180 * all active commands in a temporary queue.
1181 */
1182 void
1183 siop_handle_reset(sc)
1184 struct siop_softc *sc;
1185 {
1186 struct siop_cmd *siop_cmd;
1187 struct siop_lun *siop_lun;
1188 int target, lun, tag;
1189 /*
1190 * scsi bus reset. reset the chip and restart
1191 * the queue. Need to clean up all active commands
1192 */
1193 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1194 /* stop, reset and restart the chip */
1195 siop_reset(sc);
1196 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1197 /* chip has been reset, all slots are free now */
1198 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1199 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1200 }
1201 /*
1202 * Process all commands: first commmands being executed
1203 */
1204 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1205 target++) {
1206 if (sc->sc_c.targets[target] == NULL)
1207 continue;
1208 for (lun = 0; lun < 8; lun++) {
1209 struct siop_target *siop_target =
1210 (struct siop_target *)sc->sc_c.targets[target];
1211 siop_lun = siop_target->siop_lun[lun];
1212 if (siop_lun == NULL)
1213 continue;
1214 for (tag = 0; tag <
1215 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1216 SIOP_NTAG : 1);
1217 tag++) {
1218 siop_cmd = siop_lun->siop_tag[tag].active;
1219 if (siop_cmd == NULL)
1220 continue;
1221 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1222 printf("command with tag id %d reset\n", tag);
1223 siop_cmd->cmd_c.xs->error =
1224 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1225 XS_TIMEOUT : XS_RESET;
1226 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1227 siop_lun->siop_tag[tag].active = NULL;
1228 siop_cmd->cmd_c.status = CMDST_DONE;
1229 siop_scsicmd_end(siop_cmd);
1230 }
1231 }
1232 sc->sc_c.targets[target]->status = TARST_ASYNC;
1233 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1234 sc->sc_c.targets[target]->period =
1235 sc->sc_c.targets[target]->offset = 0;
1236 siop_update_xfer_mode(&sc->sc_c, target);
1237 }
1238
1239 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1240 }
1241
1242 void
1243 siop_scsipi_request(chan, req, arg)
1244 struct scsipi_channel *chan;
1245 scsipi_adapter_req_t req;
1246 void *arg;
1247 {
1248 struct scsipi_xfer *xs;
1249 struct scsipi_periph *periph;
1250 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1251 struct siop_cmd *siop_cmd;
1252 struct siop_target *siop_target;
1253 int s, error, i;
1254 int target;
1255 int lun;
1256
1257 switch (req) {
1258 case ADAPTER_REQ_RUN_XFER:
1259 xs = arg;
1260 periph = xs->xs_periph;
1261 target = periph->periph_target;
1262 lun = periph->periph_lun;
1263
1264 s = splbio();
1265 #ifdef SIOP_DEBUG_SCHED
1266 printf("starting cmd for %d:%d\n", target, lun);
1267 #endif
1268 siop_cmd = TAILQ_FIRST(&sc->free_list);
1269 if (siop_cmd == NULL) {
1270 xs->error = XS_RESOURCE_SHORTAGE;
1271 scsipi_done(xs);
1272 splx(s);
1273 return;
1274 }
1275 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1276 #ifdef DIAGNOSTIC
1277 if (siop_cmd->cmd_c.status != CMDST_FREE)
1278 panic("siop_scsicmd: new cmd not free");
1279 #endif
1280 siop_target = (struct siop_target*)sc->sc_c.targets[target];
1281 if (siop_target == NULL) {
1282 #ifdef SIOP_DEBUG
1283 printf("%s: alloc siop_target for target %d\n",
1284 sc->sc_c.sc_dev.dv_xname, target);
1285 #endif
1286 sc->sc_c.targets[target] =
1287 malloc(sizeof(struct siop_target),
1288 M_DEVBUF, M_NOWAIT);
1289 if (sc->sc_c.targets[target] == NULL) {
1290 printf("%s: can't malloc memory for "
1291 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1292 target);
1293 xs->error = XS_RESOURCE_SHORTAGE;
1294 scsipi_done(xs);
1295 splx(s);
1296 return;
1297 }
1298 siop_target =
1299 (struct siop_target*)sc->sc_c.targets[target];
1300 siop_target->target_c.status = TARST_PROBING;
1301 siop_target->target_c.flags = 0;
1302 siop_target->target_c.id =
1303 sc->sc_c.clock_div << 24; /* scntl3 */
1304 siop_target->target_c.id |= target << 16; /* id */
1305 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1306
1307 /* get a lun switch script */
1308 siop_target->lunsw = siop_get_lunsw(sc);
1309 if (siop_target->lunsw == NULL) {
1310 printf("%s: can't alloc lunsw for target %d\n",
1311 sc->sc_c.sc_dev.dv_xname, target);
1312 xs->error = XS_RESOURCE_SHORTAGE;
1313 scsipi_done(xs);
1314 splx(s);
1315 return;
1316 }
1317 for (i=0; i < 8; i++)
1318 siop_target->siop_lun[i] = NULL;
1319 siop_add_reselsw(sc, target);
1320 }
1321 if (siop_target->siop_lun[lun] == NULL) {
1322 siop_target->siop_lun[lun] =
1323 malloc(sizeof(struct siop_lun), M_DEVBUF,
1324 M_NOWAIT|M_ZERO);
1325 if (siop_target->siop_lun[lun] == NULL) {
1326 printf("%s: can't alloc siop_lun for "
1327 "target %d lun %d\n",
1328 sc->sc_c.sc_dev.dv_xname, target, lun);
1329 xs->error = XS_RESOURCE_SHORTAGE;
1330 scsipi_done(xs);
1331 splx(s);
1332 return;
1333 }
1334 }
1335 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1336 siop_cmd->cmd_c.xs = xs;
1337 siop_cmd->cmd_c.flags = 0;
1338 siop_cmd->cmd_c.status = CMDST_READY;
1339
1340 /* load the DMA maps */
1341 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1342 siop_cmd->cmd_c.dmamap_cmd,
1343 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1344 if (error) {
1345 printf("%s: unable to load cmd DMA map: %d\n",
1346 sc->sc_c.sc_dev.dv_xname, error);
1347 xs->error = XS_DRIVER_STUFFUP;
1348 scsipi_done(xs);
1349 splx(s);
1350 return;
1351 }
1352 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1353 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1354 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1355 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1356 ((xs->xs_control & XS_CTL_DATA_IN) ?
1357 BUS_DMA_READ : BUS_DMA_WRITE));
1358 if (error) {
1359 printf("%s: unable to load cmd DMA map: %d",
1360 sc->sc_c.sc_dev.dv_xname, error);
1361 xs->error = XS_DRIVER_STUFFUP;
1362 scsipi_done(xs);
1363 bus_dmamap_unload(sc->sc_c.sc_dmat,
1364 siop_cmd->cmd_c.dmamap_cmd);
1365 splx(s);
1366 return;
1367 }
1368 bus_dmamap_sync(sc->sc_c.sc_dmat,
1369 siop_cmd->cmd_c.dmamap_data, 0,
1370 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1371 (xs->xs_control & XS_CTL_DATA_IN) ?
1372 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1373 }
1374 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1375 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1376 BUS_DMASYNC_PREWRITE);
1377
1378 if (xs->xs_tag_type) {
1379 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1380 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1381 } else {
1382 siop_cmd->cmd_c.tag = 0;
1383 }
1384 siop_setuptables(&siop_cmd->cmd_c);
1385 siop_table_sync(siop_cmd,
1386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1387 siop_start(sc, siop_cmd);
1388 if (xs->xs_control & XS_CTL_POLL) {
1389 /* poll for command completion */
1390 while ((xs->xs_status & XS_STS_DONE) == 0) {
1391 delay(1000);
1392 siop_intr(sc);
1393 }
1394 }
1395 splx(s);
1396 return;
1397
1398 case ADAPTER_REQ_GROW_RESOURCES:
1399 #ifdef SIOP_DEBUG
1400 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1401 sc->sc_c.sc_adapt.adapt_openings);
1402 #endif
1403 siop_morecbd(sc);
1404 return;
1405
1406 case ADAPTER_REQ_SET_XFER_MODE:
1407 {
1408 struct scsipi_xfer_mode *xm = arg;
1409 if (sc->sc_c.targets[xm->xm_target] == NULL)
1410 return;
1411 s = splbio();
1412 if (xm->xm_mode & PERIPH_CAP_TQING)
1413 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1414 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1415 (sc->sc_c.features & SF_BUS_WIDE))
1416 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1417 if (xm->xm_mode & PERIPH_CAP_SYNC)
1418 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1419 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1420 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1421 sc->sc_c.targets[xm->xm_target]->status =
1422 TARST_ASYNC;
1423
1424 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1425 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1426 /* allocate a lun sw entry for this device */
1427 siop_add_dev(sc, xm->xm_target, lun);
1428 }
1429
1430 splx(s);
1431 }
1432 }
1433 }
1434
1435 static void
1436 siop_start(sc, siop_cmd)
1437 struct siop_softc *sc;
1438 struct siop_cmd *siop_cmd;
1439 {
1440 struct siop_lun *siop_lun;
1441 struct siop_xfer *siop_xfer;
1442 u_int32_t dsa;
1443 int timeout;
1444 int target, lun, slot;
1445
1446 /*
1447 * first make sure to read valid data
1448 */
1449 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450
1451 /*
1452 * The queue management here is a bit tricky: the script always looks
1453 * at the slot from first to last, so if we always use the first
1454 * free slot commands can stay at the tail of the queue ~forever.
1455 * The algorithm used here is to restart from the head when we know
1456 * that the queue is empty, and only add commands after the last one.
1457 * When we're at the end of the queue wait for the script to clear it.
1458 * The best thing to do here would be to implement a circular queue,
1459 * but using only 53c720 features this can be "interesting".
1460 * A mid-way solution could be to implement 2 queues and swap orders.
1461 */
1462 slot = sc->sc_currschedslot;
1463 /*
1464 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1465 * free. As this is the last used slot, all previous slots are free,
1466 * we can restart from 0.
1467 */
1468 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1469 0x80000000) {
1470 slot = sc->sc_currschedslot = 0;
1471 } else {
1472 slot++;
1473 }
1474 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1475 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1476 siop_lun =
1477 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1478 /* if non-tagged command active, panic: this shouldn't happen */
1479 if (siop_lun->siop_tag[0].active != NULL) {
1480 panic("siop_start: tagged cmd while untagged running");
1481 }
1482 #ifdef DIAGNOSTIC
1483 /* sanity check the tag if needed */
1484 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1485 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1486 panic("siop_start: tag not free");
1487 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1488 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1489 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1490 panic("siop_start: invalid tag id");
1491 }
1492 }
1493 #endif
1494 /*
1495 * find a free scheduler slot and load it.
1496 */
1497 for (; slot < SIOP_NSLOTS; slot++) {
1498 /*
1499 * If cmd if 0x80000000 the slot is free
1500 */
1501 if (siop_script_read(sc,
1502 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1503 0x80000000)
1504 break;
1505 }
1506 if (slot == SIOP_NSLOTS) {
1507 /*
1508 * no more free slot, no need to continue. freeze the queue
1509 * and requeue this command.
1510 */
1511 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1512 sc->sc_flags |= SCF_CHAN_NOSLOT;
1513 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1514 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1515 siop_scsicmd_end(siop_cmd);
1516 return;
1517 }
1518 #ifdef SIOP_DEBUG_SCHED
1519 printf("using slot %d for DSA 0x%lx\n", slot,
1520 (u_long)siop_cmd->cmd_c.dsa);
1521 #endif
1522 /* mark command as active */
1523 if (siop_cmd->cmd_c.status == CMDST_READY)
1524 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1525 else
1526 panic("siop_start: bad status");
1527 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1528 /* patch scripts with DSA addr */
1529 dsa = siop_cmd->cmd_c.dsa;
1530 /* first reselect switch, if we have an entry */
1531 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1532 siop_script_write(sc,
1533 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1534 dsa + sizeof(struct siop_common_xfer) +
1535 Ent_ldsa_reload_dsa);
1536 /* CMD script: MOVE MEMORY addr */
1537 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1538 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1539 htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1540 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1541 /* scheduler slot: JUMP ldsa_select */
1542 siop_script_write(sc,
1543 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1544 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1545 /* handle timeout */
1546 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1547 /* start exire timer */
1548 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1549 if (timeout == 0)
1550 timeout = 1;
1551 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1552 timeout, siop_timeout, siop_cmd);
1553 }
1554 /*
1555 * Change JUMP cmd so that this slot will be handled
1556 */
1557 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1558 0x80080000);
1559 sc->sc_currschedslot = slot;
1560
1561 /* make sure SCRIPT processor will read valid data */
1562 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1563 /* Signal script it has some work to do */
1564 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1565 SIOP_ISTAT, ISTAT_SIGP);
1566 /* and wait for IRQ */
1567 return;
1568 }
1569
1570 void
1571 siop_timeout(v)
1572 void *v;
1573 {
1574 struct siop_cmd *siop_cmd = v;
1575 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1576 int s;
1577
1578 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1579 printf("command timeout\n");
1580
1581 s = splbio();
1582 /* reset the scsi bus */
1583 siop_resetbus(&sc->sc_c);
1584
1585 /* deactivate callout */
1586 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1587 /* mark command as being timed out; siop_intr will handle it */
1588 /*
1589 * mark command has being timed out and just return;
1590 * the bus reset will generate an interrupt,
1591 * it will be handled in siop_intr()
1592 */
1593 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1594 splx(s);
1595 return;
1596
1597 }
1598
1599 void
1600 siop_dump_script(sc)
1601 struct siop_softc *sc;
1602 {
1603 int i;
1604 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1605 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1606 le32toh(sc->sc_c.sc_script[i]),
1607 le32toh(sc->sc_c.sc_script[i+1]));
1608 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1609 0xc0000000) {
1610 i++;
1611 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1612 }
1613 printf("\n");
1614 }
1615 }
1616
1617 void
1618 siop_morecbd(sc)
1619 struct siop_softc *sc;
1620 {
1621 int error, i, j, s;
1622 bus_dma_segment_t seg;
1623 int rseg;
1624 struct siop_cbd *newcbd;
1625 struct siop_xfer *xfer;
1626 bus_addr_t dsa;
1627 u_int32_t *scr;
1628
1629 /* allocate a new list head */
1630 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1631 if (newcbd == NULL) {
1632 printf("%s: can't allocate memory for command descriptors "
1633 "head\n", sc->sc_c.sc_dev.dv_xname);
1634 return;
1635 }
1636
1637 /* allocate cmd list */
1638 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1639 M_DEVBUF, M_NOWAIT|M_ZERO);
1640 if (newcbd->cmds == NULL) {
1641 printf("%s: can't allocate memory for command descriptors\n",
1642 sc->sc_c.sc_dev.dv_xname);
1643 goto bad3;
1644 }
1645 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1646 1, &rseg, BUS_DMA_NOWAIT);
1647 if (error) {
1648 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1649 sc->sc_c.sc_dev.dv_xname, error);
1650 goto bad2;
1651 }
1652 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1653 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1654 if (error) {
1655 printf("%s: unable to map cbd DMA memory, error = %d\n",
1656 sc->sc_c.sc_dev.dv_xname, error);
1657 goto bad2;
1658 }
1659 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1660 BUS_DMA_NOWAIT, &newcbd->xferdma);
1661 if (error) {
1662 printf("%s: unable to create cbd DMA map, error = %d\n",
1663 sc->sc_c.sc_dev.dv_xname, error);
1664 goto bad1;
1665 }
1666 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1667 PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1668 if (error) {
1669 printf("%s: unable to load cbd DMA map, error = %d\n",
1670 sc->sc_c.sc_dev.dv_xname, error);
1671 goto bad0;
1672 }
1673 #ifdef DEBUG
1674 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1675 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1676 #endif
1677 for (i = 0; i < SIOP_NCMDPB; i++) {
1678 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1679 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1680 &newcbd->cmds[i].cmd_c.dmamap_data);
1681 if (error) {
1682 printf("%s: unable to create data DMA map for cbd: "
1683 "error %d\n",
1684 sc->sc_c.sc_dev.dv_xname, error);
1685 goto bad0;
1686 }
1687 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1688 sizeof(struct scsipi_generic), 1,
1689 sizeof(struct scsipi_generic), 0,
1690 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1691 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1692 if (error) {
1693 printf("%s: unable to create cmd DMA map for cbd %d\n",
1694 sc->sc_c.sc_dev.dv_xname, error);
1695 goto bad0;
1696 }
1697 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1698 newcbd->cmds[i].siop_cbdp = newcbd;
1699 xfer = &newcbd->xfers[i];
1700 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1701 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1702 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1703 i * sizeof(struct siop_xfer);
1704 newcbd->cmds[i].cmd_c.dsa = dsa;
1705 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1706 xfer->siop_tables.t_msgout.count= htole32(1);
1707 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1708 xfer->siop_tables.t_msgin.count= htole32(1);
1709 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1710 offsetof(struct siop_common_xfer, msg_in));
1711 xfer->siop_tables.t_extmsgin.count= htole32(2);
1712 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1713 offsetof(struct siop_common_xfer, msg_in) + 1);
1714 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1715 offsetof(struct siop_common_xfer, msg_in) + 3);
1716 xfer->siop_tables.t_status.count= htole32(1);
1717 xfer->siop_tables.t_status.addr = htole32(dsa +
1718 offsetof(struct siop_common_xfer, status));
1719 /* The select/reselect script */
1720 scr = &xfer->resel[0];
1721 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1722 scr[j] = htole32(load_dsa[j]);
1723 /*
1724 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1725 * octet, reg offset is the third.
1726 */
1727 scr[Ent_rdsa0 / 4] =
1728 htole32(0x78100000 | ((dsa & 0x000000ff) << 8));
1729 scr[Ent_rdsa1 / 4] =
1730 htole32(0x78110000 | ( dsa & 0x0000ff00 ));
1731 scr[Ent_rdsa2 / 4] =
1732 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8));
1733 scr[Ent_rdsa3 / 4] =
1734 htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1735 scr[E_ldsa_abs_reselected_Used[0]] =
1736 htole32(sc->sc_c.sc_scriptaddr + Ent_reselected);
1737 scr[E_ldsa_abs_reselect_Used[0]] =
1738 htole32(sc->sc_c.sc_scriptaddr + Ent_reselect);
1739 scr[E_ldsa_abs_selected_Used[0]] =
1740 htole32(sc->sc_c.sc_scriptaddr + Ent_selected);
1741 scr[E_ldsa_abs_data_Used[0]] =
1742 htole32(dsa + sizeof(struct siop_common_xfer) +
1743 Ent_ldsa_data);
1744 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1745 scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1746 s = splbio();
1747 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1748 splx(s);
1749 #ifdef SIOP_DEBUG
1750 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1751 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1752 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1753 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1754 #endif
1755 }
1756 s = splbio();
1757 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1758 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1759 splx(s);
1760 return;
1761 bad0:
1762 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1763 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1764 bad1:
1765 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1766 bad2:
1767 free(newcbd->cmds, M_DEVBUF);
1768 bad3:
1769 free(newcbd, M_DEVBUF);
1770 return;
1771 }
1772
1773 struct siop_lunsw *
1774 siop_get_lunsw(sc)
1775 struct siop_softc *sc;
1776 {
1777 struct siop_lunsw *lunsw;
1778 int i;
1779
1780 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1781 sc->script_free_hi)
1782 return NULL;
1783 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1784 if (lunsw != NULL) {
1785 #ifdef SIOP_DEBUG
1786 printf("siop_get_lunsw got lunsw at offset %d\n",
1787 lunsw->lunsw_off);
1788 #endif
1789 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1790 return lunsw;
1791 }
1792 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1793 if (lunsw == NULL)
1794 return NULL;
1795 #ifdef SIOP_DEBUG
1796 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1797 #endif
1798 if (sc->sc_c.features & SF_CHIP_RAM) {
1799 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1800 sc->script_free_lo * 4, lun_switch,
1801 sizeof(lun_switch) / sizeof(lun_switch[0]));
1802 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1803 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1804 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1805 } else {
1806 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1807 i++)
1808 sc->sc_c.sc_script[sc->script_free_lo + i] =
1809 htole32(lun_switch[i]);
1810 sc->sc_c.sc_script[
1811 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1812 htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1813 }
1814 lunsw->lunsw_off = sc->script_free_lo;
1815 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1816 sc->script_free_lo += lunsw->lunsw_size;
1817 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1818 return lunsw;
1819 }
1820
1821 void
1822 siop_add_reselsw(sc, target)
1823 struct siop_softc *sc;
1824 int target;
1825 {
1826 int i;
1827 struct siop_target *siop_target;
1828 struct siop_lun *siop_lun;
1829
1830 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1831 /*
1832 * add an entry to resel switch
1833 */
1834 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1835 for (i = 0; i < 15; i++) {
1836 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1837 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1838 == 0xff) { /* it's free */
1839 #ifdef SIOP_DEBUG
1840 printf("siop: target %d slot %d offset %d\n",
1841 target, i, siop_target->reseloff);
1842 #endif
1843 /* JUMP abs_foo, IF target | 0x80; */
1844 siop_script_write(sc, siop_target->reseloff,
1845 0x800c0080 | target);
1846 siop_script_write(sc, siop_target->reseloff + 1,
1847 sc->sc_c.sc_scriptaddr +
1848 siop_target->lunsw->lunsw_off * 4 +
1849 Ent_lun_switch_entry);
1850 break;
1851 }
1852 }
1853 if (i == 15) /* no free slot, shouldn't happen */
1854 panic("siop: resel switch full");
1855
1856 sc->sc_ntargets++;
1857 for (i = 0; i < 8; i++) {
1858 siop_lun = siop_target->siop_lun[i];
1859 if (siop_lun == NULL)
1860 continue;
1861 if (siop_lun->reseloff > 0) {
1862 siop_lun->reseloff = 0;
1863 siop_add_dev(sc, target, i);
1864 }
1865 }
1866 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1867 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1868 }
1869
1870 void
1871 siop_update_scntl3(sc, _siop_target)
1872 struct siop_softc *sc;
1873 struct siop_common_target *_siop_target;
1874 {
1875 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1876 /* MOVE target->id >> 24 TO SCNTL3 */
1877 siop_script_write(sc,
1878 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1879 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1880 /* MOVE target->id >> 8 TO SXFER */
1881 siop_script_write(sc,
1882 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1883 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1884 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1885 }
1886
1887 void
1888 siop_add_dev(sc, target, lun)
1889 struct siop_softc *sc;
1890 int target;
1891 int lun;
1892 {
1893 struct siop_lunsw *lunsw;
1894 struct siop_target *siop_target =
1895 (struct siop_target *)sc->sc_c.targets[target];
1896 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1897 int i, ntargets;
1898
1899 if (siop_lun->reseloff > 0)
1900 return;
1901 lunsw = siop_target->lunsw;
1902 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1903 /*
1904 * can't extend this slot. Probably not worth trying to deal
1905 * with this case
1906 */
1907 #ifdef DEBUG
1908 printf("%s:%d:%d: can't allocate a lun sw slot\n",
1909 sc->sc_c.sc_dev.dv_xname, target, lun);
1910 #endif
1911 return;
1912 }
1913 /* count how many free targets we still have to probe */
1914 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1915
1916 /*
1917 * we need 8 bytes for the lun sw additionnal entry, and
1918 * eventually sizeof(tag_switch) for the tag switch entry.
1919 * Keep enouth free space for the free targets that could be
1920 * probed later.
1921 */
1922 if (sc->script_free_lo + 2 +
1923 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1924 ((siop_target->target_c.flags & TARF_TAG) ?
1925 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1926 sc->script_free_hi)) {
1927 /*
1928 * not enouth space, probably not worth dealing with it.
1929 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1930 */
1931 #ifdef DEBUG
1932 printf("%s:%d:%d: not enouth memory for a lun sw slot\n",
1933 sc->sc_c.sc_dev.dv_xname, target, lun);
1934 #endif
1935 return;
1936 }
1937 #ifdef SIOP_DEBUG
1938 printf("%s:%d:%d: allocate lun sw entry\n",
1939 sc->sc_c.sc_dev.dv_xname, target, lun);
1940 #endif
1941 /* INT int_resellun */
1942 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1943 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1944 /* Now the slot entry: JUMP abs_foo, IF lun */
1945 siop_script_write(sc, sc->script_free_lo - 2,
1946 0x800c0000 | lun);
1947 siop_script_write(sc, sc->script_free_lo - 1, 0);
1948 siop_lun->reseloff = sc->script_free_lo - 2;
1949 lunsw->lunsw_size += 2;
1950 sc->script_free_lo += 2;
1951 if (siop_target->target_c.flags & TARF_TAG) {
1952 /* we need a tag switch */
1953 sc->script_free_hi -=
1954 sizeof(tag_switch) / sizeof(tag_switch[0]);
1955 if (sc->sc_c.features & SF_CHIP_RAM) {
1956 bus_space_write_region_4(sc->sc_c.sc_ramt,
1957 sc->sc_c.sc_ramh,
1958 sc->script_free_hi * 4, tag_switch,
1959 sizeof(tag_switch) / sizeof(tag_switch[0]));
1960 } else {
1961 for(i = 0;
1962 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1963 i++) {
1964 sc->sc_c.sc_script[sc->script_free_hi + i] =
1965 htole32(tag_switch[i]);
1966 }
1967 }
1968 siop_script_write(sc,
1969 siop_lun->reseloff + 1,
1970 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1971 Ent_tag_switch_entry);
1972
1973 for (i = 0; i < SIOP_NTAG; i++) {
1974 siop_lun->siop_tag[i].reseloff =
1975 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1976 }
1977 } else {
1978 /* non-tag case; just work with the lun switch */
1979 siop_lun->siop_tag[0].reseloff =
1980 siop_target->siop_lun[lun]->reseloff;
1981 }
1982 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1983 }
1984
1985 void
1986 siop_del_dev(sc, target, lun)
1987 struct siop_softc *sc;
1988 int target;
1989 int lun;
1990 {
1991 int i;
1992 struct siop_target *siop_target;
1993 #ifdef SIOP_DEBUG
1994 printf("%s:%d:%d: free lun sw entry\n",
1995 sc->sc_c.sc_dev.dv_xname, target, lun);
1996 #endif
1997 if (sc->sc_c.targets[target] == NULL)
1998 return;
1999 siop_target = (struct siop_target *)sc->sc_c.targets[target];
2000 free(siop_target->siop_lun[lun], M_DEVBUF);
2001 siop_target->siop_lun[lun] = NULL;
2002 /* XXX compact sw entry too ? */
2003 /* check if we can free the whole target */
2004 for (i = 0; i < 8; i++) {
2005 if (siop_target->siop_lun[i] != NULL)
2006 return;
2007 }
2008 #ifdef SIOP_DEBUG
2009 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2010 sc->sc_c.sc_dev.dv_xname, target, lun,
2011 sc->sc_c.targets[target]->lunsw->lunsw_off);
2012 #endif
2013 /*
2014 * nothing here, free the target struct and resel
2015 * switch entry
2016 */
2017 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2018 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2019 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2020 free(sc->sc_c.targets[target], M_DEVBUF);
2021 sc->sc_c.targets[target] = NULL;
2022 sc->sc_ntargets--;
2023 }
2024
2025 #ifdef SIOP_STATS
2026 void
2027 siop_printstats()
2028 {
2029 printf("siop_stat_intr %d\n", siop_stat_intr);
2030 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2031 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2032 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2033 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2034 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2035 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2036 }
2037 #endif
2038