siop.c revision 1.53 1 /* $NetBSD: siop.c,v 1.53 2002/04/20 00:15:54 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.53 2002/04/20 00:15:54 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
81
82 /* Number of scheduler slot (needs to match script) */
83 #define SIOP_NSLOTS 40
84
85 void siop_reset __P((struct siop_softc *));
86 void siop_handle_reset __P((struct siop_softc *));
87 int siop_handle_qtag_reject __P((struct siop_cmd *));
88 void siop_scsicmd_end __P((struct siop_cmd *));
89 void siop_unqueue __P((struct siop_softc *, int, int));
90 static void siop_start __P((struct siop_softc *, struct siop_cmd *));
91 void siop_timeout __P((void *));
92 int siop_scsicmd __P((struct scsipi_xfer *));
93 void siop_scsipi_request __P((struct scsipi_channel *,
94 scsipi_adapter_req_t, void *));
95 void siop_dump_script __P((struct siop_softc *));
96 void siop_morecbd __P((struct siop_softc *));
97 struct siop_lunsw *siop_get_lunsw __P((struct siop_softc *));
98 void siop_add_reselsw __P((struct siop_softc *, int));
99 void siop_update_scntl3 __P((struct siop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int siop_stat_intr = 0;
104 static int siop_stat_intr_shortxfer = 0;
105 static int siop_stat_intr_sdp = 0;
106 static int siop_stat_intr_done = 0;
107 static int siop_stat_intr_xferdisc = 0;
108 static int siop_stat_intr_lunresel = 0;
109 static int siop_stat_intr_qfull = 0;
110 void siop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void siop_script_sync __P((struct siop_softc *, int));
117 static __inline__ void
118 siop_script_sync(sc, ops)
119 struct siop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t siop_script_read __P((struct siop_softc *, u_int));
128 static __inline__ u_int32_t
129 siop_script_read(sc, offset)
130 struct siop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void siop_script_write __P((struct siop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 siop_script_write(sc, offset, val)
145 struct siop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 siop_attach(sc)
159 struct siop_softc *sc;
160 {
161 int error, i;
162 bus_dma_segment_t seg;
163 int rseg;
164
165 /*
166 * Allocate DMA-safe memory for the script and map it.
167 */
168 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
169 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
170 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
171 if (error) {
172 printf("%s: unable to allocate script DMA memory, "
173 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
174 return;
175 }
176 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
177 (caddr_t *)&sc->sc_c.sc_script,
178 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
179 if (error) {
180 printf("%s: unable to map script DMA memory, "
181 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
182 return;
183 }
184 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
185 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
186 if (error) {
187 printf("%s: unable to create script DMA map, "
188 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
189 return;
190 }
191 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
192 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
193 if (error) {
194 printf("%s: unable to load script DMA map, "
195 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
196 return;
197 }
198 sc->sc_c.sc_scriptaddr =
199 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
200 sc->sc_c.ram_size = PAGE_SIZE;
201 }
202 TAILQ_INIT(&sc->free_list);
203 TAILQ_INIT(&sc->cmds);
204 TAILQ_INIT(&sc->lunsw_list);
205 sc->sc_currschedslot = 0;
206 #ifdef SIOP_DEBUG
207 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
208 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script),
209 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
210 #endif
211
212 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
213 sc->sc_c.sc_adapt.adapt_nchannels = 1;
214 sc->sc_c.sc_adapt.adapt_openings = 0;
215 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
216 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
217 sc->sc_c.sc_adapt.adapt_minphys = minphys;
218 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
219
220 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
221 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
222 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
223 sc->sc_c.sc_chan.chan_channel = 0;
224 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
225 sc->sc_c.sc_chan.chan_ntargets =
226 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
227 sc->sc_c.sc_chan.chan_nluns = 8;
228 sc->sc_c.sc_chan.chan_id =
229 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
230 if (sc->sc_c.sc_chan.chan_id == 0 ||
231 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
232 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
233
234 for (i = 0; i < 16; i++)
235 sc->sc_c.targets[i] = NULL;
236
237 /* find min/max sync period for this chip */
238 sc->sc_c.maxsync = 0;
239 sc->sc_c.minsync = 255;
240 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
241 if (sc->sc_c.clock_period != scf_period[i].clock)
242 continue;
243 if (sc->sc_c.maxsync < scf_period[i].period)
244 sc->sc_c.maxsync = scf_period[i].period;
245 if (sc->sc_c.minsync > scf_period[i].period)
246 sc->sc_c.minsync = scf_period[i].period;
247 }
248 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
249 panic("siop: can't find my sync parameters\n");
250 /* Do a bus reset, so that devices fall back to narrow/async */
251 siop_resetbus(&sc->sc_c);
252 /*
253 * siop_reset() will reset the chip, thus clearing pending interrupts
254 */
255 siop_reset(sc);
256 #ifdef DUMP_SCRIPT
257 siop_dump_script(sc);
258 #endif
259
260 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
261 }
262
263 void
264 siop_reset(sc)
265 struct siop_softc *sc;
266 {
267 int i, j;
268 struct siop_lunsw *lunsw;
269
270 siop_common_reset(&sc->sc_c);
271
272 /* copy and patch the script */
273 if (sc->sc_c.features & SF_CHIP_RAM) {
274 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
275 siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
276 for (j = 0; j <
277 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
278 j++) {
279 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
280 E_abs_msgin_Used[j] * 4,
281 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
282 }
283 #ifdef SIOP_SYMLED
284 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
285 Ent_led_on1, siop_led_on,
286 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
287 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
288 Ent_led_on2, siop_led_on,
289 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
290 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
291 Ent_led_off, siop_led_off,
292 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
293 #endif
294 } else {
295 for (j = 0;
296 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
297 sc->sc_c.sc_script[j] = htole32(siop_script[j]);
298 }
299 for (j = 0; j <
300 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
301 j++) {
302 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
303 htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space);
304 }
305 #ifdef SIOP_SYMLED
306 for (j = 0;
307 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
308 sc->sc_c.sc_script[
309 Ent_led_on1 / sizeof(siop_led_on[0]) + j
310 ] = htole32(siop_led_on[j]);
311 for (j = 0;
312 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
313 sc->sc_c.sc_script[
314 Ent_led_on2 / sizeof(siop_led_on[0]) + j
315 ] = htole32(siop_led_on[j]);
316 for (j = 0;
317 j < (sizeof(siop_led_off) / sizeof(siop_led_off[0])); j++)
318 sc->sc_c.sc_script[
319 Ent_led_off / sizeof(siop_led_off[0]) + j
320 ] = htole32(siop_led_off[j]);
321 #endif
322 }
323 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
324 sc->script_free_hi = sc->sc_c.ram_size / 4;
325
326 /* free used and unused lun switches */
327 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
328 #ifdef SIOP_DEBUG
329 printf("%s: free lunsw at offset %d\n",
330 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off);
331 #endif
332 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
333 free(lunsw, M_DEVBUF);
334 }
335 TAILQ_INIT(&sc->lunsw_list);
336 /* restore reselect switch */
337 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
338 struct siop_target *target;
339 if (sc->sc_c.targets[i] == NULL)
340 continue;
341 #ifdef SIOP_DEBUG
342 printf("%s: restore sw for target %d\n",
343 sc->sc_c.sc_dev.dv_xname, i);
344 #endif
345 target = (struct siop_target *)sc->sc_c.targets[i];
346 free(target->lunsw, M_DEVBUF);
347 target->lunsw = siop_get_lunsw(sc);
348 if (target->lunsw == NULL) {
349 printf("%s: can't alloc lunsw for target %d\n",
350 sc->sc_c.sc_dev.dv_xname, i);
351 break;
352 }
353 siop_add_reselsw(sc, i);
354 }
355
356 /* start script */
357 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
358 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
359 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
360 }
361 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
362 sc->sc_c.sc_scriptaddr + Ent_reselect);
363 }
364
365 #if 0
366 #define CALL_SCRIPT(ent) do {\
367 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
368 siop_cmd->cmd_c.dsa, \
369 sc->sc_c.sc_scriptaddr + ent); \
370 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
371 } while (0)
372 #else
373 #define CALL_SCRIPT(ent) do {\
374 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
375 } while (0)
376 #endif
377
378 int
379 siop_intr(v)
380 void *v;
381 {
382 struct siop_softc *sc = v;
383 struct siop_target *siop_target;
384 struct siop_cmd *siop_cmd;
385 struct siop_lun *siop_lun;
386 struct scsipi_xfer *xs;
387 int istat, sist, sstat1, dstat;
388 u_int32_t irqcode;
389 int need_reset = 0;
390 int offset, target, lun, tag;
391 bus_addr_t dsa;
392 struct siop_cbd *cbdp;
393 int freetarget = 0;
394 int restart = 0;
395
396 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
397 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
398 return 0;
399 INCSTAT(siop_stat_intr);
400 if (istat & ISTAT_INTF) {
401 printf("INTRF\n");
402 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
403 SIOP_ISTAT, ISTAT_INTF);
404 }
405 /* use DSA to find the current siop_cmd */
406 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
407 for (cbdp = TAILQ_FIRST(&sc->cmds); cbdp != NULL;
408 cbdp = TAILQ_NEXT(cbdp, next)) {
409 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
410 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
411 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
412 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
413 siop_table_sync(siop_cmd,
414 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
415 break;
416 }
417 }
418 if (cbdp == NULL) {
419 siop_cmd = NULL;
420 }
421 if (siop_cmd) {
422 xs = siop_cmd->cmd_c.xs;
423 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
424 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
425 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
426 tag = siop_cmd->cmd_c.tag;
427 siop_lun = siop_target->siop_lun[lun];
428 #ifdef DIAGNOSTIC
429 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
430 printf("siop_cmd (lun %d) for DSA 0x%x "
431 "not active (%d)\n", lun, (u_int)dsa,
432 siop_cmd->cmd_c.status);
433 xs = NULL;
434 siop_target = NULL;
435 target = -1;
436 lun = -1;
437 tag = -1;
438 siop_lun = NULL;
439 siop_cmd = NULL;
440 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
441 printf("siop_cmd (lun %d tag %d) not in siop_lun "
442 "active (%p != %p)\n", lun, tag, siop_cmd,
443 siop_lun->siop_tag[tag].active);
444 }
445 #endif
446 } else {
447 xs = NULL;
448 siop_target = NULL;
449 target = -1;
450 lun = -1;
451 tag = -1;
452 siop_lun = NULL;
453 }
454 if (istat & ISTAT_DIP) {
455 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
456 SIOP_DSTAT);
457 if (dstat & DSTAT_SSI) {
458 printf("single step dsp 0x%08x dsa 0x08%x\n",
459 (int)(bus_space_read_4(sc->sc_c.sc_rt,
460 sc->sc_c.sc_rh, SIOP_DSP) -
461 sc->sc_c.sc_scriptaddr),
462 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
463 SIOP_DSA));
464 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
465 (istat & ISTAT_SIP) == 0) {
466 bus_space_write_1(sc->sc_c.sc_rt,
467 sc->sc_c.sc_rh, SIOP_DCNTL,
468 bus_space_read_1(sc->sc_c.sc_rt,
469 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
470 }
471 return 1;
472 }
473 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
474 printf("DMA IRQ:");
475 if (dstat & DSTAT_IID)
476 printf(" Illegal instruction");
477 if (dstat & DSTAT_ABRT)
478 printf(" abort");
479 if (dstat & DSTAT_BF)
480 printf(" bus fault");
481 if (dstat & DSTAT_MDPE)
482 printf(" parity");
483 if (dstat & DSTAT_DFE)
484 printf(" dma fifo empty");
485 printf(", DSP=0x%x DSA=0x%x: ",
486 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
487 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
488 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
489 if (siop_cmd)
490 printf("last msg_in=0x%x status=0x%x\n",
491 siop_cmd->cmd_tables->msg_in[0],
492 le32toh(siop_cmd->cmd_tables->status));
493 else
494 printf("%s: current DSA invalid\n",
495 sc->sc_c.sc_dev.dv_xname);
496 need_reset = 1;
497 }
498 }
499 if (istat & ISTAT_SIP) {
500 if (istat & ISTAT_DIP)
501 delay(10);
502 /*
503 * Can't read sist0 & sist1 independantly, or we have to
504 * insert delay
505 */
506 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
507 SIOP_SIST0);
508 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
509 SIOP_SSTAT1);
510 #ifdef SIOP_DEBUG_INTR
511 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
512 "DSA=0x%x DSP=0x%lx\n", sist,
513 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
514 SIOP_SSTAT1),
515 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
516 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
517 SIOP_DSP) -
518 sc->sc_c.sc_scriptaddr));
519 #endif
520 if (sist & SIST0_RST) {
521 siop_handle_reset(sc);
522 /* no table to flush here */
523 return 1;
524 }
525 if (sist & SIST0_SGE) {
526 if (siop_cmd)
527 scsipi_printaddr(xs->xs_periph);
528 else
529 printf("%s:", sc->sc_c.sc_dev.dv_xname);
530 printf("scsi gross error\n");
531 goto reset;
532 }
533 if ((sist & SIST0_MA) && need_reset == 0) {
534 if (siop_cmd) {
535 int scratcha0;
536 dstat = bus_space_read_1(sc->sc_c.sc_rt,
537 sc->sc_c.sc_rh, SIOP_DSTAT);
538 /*
539 * first restore DSA, in case we were in a S/G
540 * operation.
541 */
542 bus_space_write_4(sc->sc_c.sc_rt,
543 sc->sc_c.sc_rh,
544 SIOP_DSA, siop_cmd->cmd_c.dsa);
545 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
546 sc->sc_c.sc_rh, SIOP_SCRATCHA);
547 switch (sstat1 & SSTAT1_PHASE_MASK) {
548 case SSTAT1_PHASE_STATUS:
549 /*
550 * previous phase may be aborted for any reason
551 * ( for example, the target has less data to
552 * transfer than requested). Just go to status
553 * and the command should terminate.
554 */
555 INCSTAT(siop_stat_intr_shortxfer);
556 if ((dstat & DSTAT_DFE) == 0)
557 siop_clearfifo(&sc->sc_c);
558 /* no table to flush here */
559 CALL_SCRIPT(Ent_status);
560 return 1;
561 case SSTAT1_PHASE_MSGIN:
562 /*
563 * target may be ready to disconnect
564 * Save data pointers just in case.
565 */
566 INCSTAT(siop_stat_intr_xferdisc);
567 if (scratcha0 & A_flag_data)
568 siop_sdp(&siop_cmd->cmd_c);
569 else if ((dstat & DSTAT_DFE) == 0)
570 siop_clearfifo(&sc->sc_c);
571 bus_space_write_1(sc->sc_c.sc_rt,
572 sc->sc_c.sc_rh, SIOP_SCRATCHA,
573 scratcha0 & ~A_flag_data);
574 siop_table_sync(siop_cmd,
575 BUS_DMASYNC_PREREAD |
576 BUS_DMASYNC_PREWRITE);
577 CALL_SCRIPT(Ent_msgin);
578 return 1;
579 }
580 printf("%s: unexpected phase mismatch %d\n",
581 sc->sc_c.sc_dev.dv_xname,
582 sstat1 & SSTAT1_PHASE_MASK);
583 } else {
584 printf("%s: phase mismatch without command\n",
585 sc->sc_c.sc_dev.dv_xname);
586 }
587 need_reset = 1;
588 }
589 if (sist & SIST0_PAR) {
590 /* parity error, reset */
591 if (siop_cmd)
592 scsipi_printaddr(xs->xs_periph);
593 else
594 printf("%s:", sc->sc_c.sc_dev.dv_xname);
595 printf("parity error\n");
596 goto reset;
597 }
598 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
599 /* selection time out, assume there's no device here */
600 if (siop_cmd) {
601 siop_cmd->cmd_c.status = CMDST_DONE;
602 xs->error = XS_SELTIMEOUT;
603 freetarget = 1;
604 goto end;
605 } else {
606 printf("%s: selection timeout without "
607 "command\n", sc->sc_c.sc_dev.dv_xname);
608 need_reset = 1;
609 }
610 }
611 if (sist & SIST0_UDC) {
612 /*
613 * unexpected disconnect. Usually the target signals
614 * a fatal condition this way. Attempt to get sense.
615 */
616 if (siop_cmd) {
617 siop_cmd->cmd_tables->status =
618 htole32(SCSI_CHECK);
619 goto end;
620 }
621 printf("%s: unexpected disconnect without "
622 "command\n", sc->sc_c.sc_dev.dv_xname);
623 goto reset;
624 }
625 if (sist & (SIST1_SBMC << 8)) {
626 /* SCSI bus mode change */
627 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
628 goto reset;
629 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
630 /*
631 * we have a script interrupt, it will
632 * restart the script.
633 */
634 goto scintr;
635 }
636 /*
637 * else we have to restart it ourselve, at the
638 * interrupted instruction.
639 */
640 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
641 SIOP_DSP,
642 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
643 SIOP_DSP) - 8);
644 return 1;
645 }
646 /* Else it's an unhandled exeption (for now). */
647 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
648 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
649 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
650 SIOP_SSTAT1),
651 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
652 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
653 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
654 if (siop_cmd) {
655 siop_cmd->cmd_c.status = CMDST_DONE;
656 xs->error = XS_SELTIMEOUT;
657 goto end;
658 }
659 need_reset = 1;
660 }
661 if (need_reset) {
662 reset:
663 /* fatal error, reset the bus */
664 siop_resetbus(&sc->sc_c);
665 /* no table to flush here */
666 return 1;
667 }
668
669 scintr:
670 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
671 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
672 SIOP_DSPS);
673 #ifdef SIOP_DEBUG_INTR
674 printf("script interrupt 0x%x\n", irqcode);
675 #endif
676 /*
677 * no command, or an inactive command is only valid for a
678 * reselect interrupt
679 */
680 if ((irqcode & 0x80) == 0) {
681 if (siop_cmd == NULL) {
682 printf(
683 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
684 sc->sc_c.sc_dev.dv_xname, irqcode);
685 goto reset;
686 }
687 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
688 printf("%s: command with invalid status "
689 "(IRQ code 0x%x current status %d) !\n",
690 sc->sc_c.sc_dev.dv_xname,
691 irqcode, siop_cmd->cmd_c.status);
692 xs = NULL;
693 }
694 }
695 switch(irqcode) {
696 case A_int_err:
697 printf("error, DSP=0x%x\n",
698 (int)(bus_space_read_4(sc->sc_c.sc_rt,
699 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
700 if (xs) {
701 xs->error = XS_SELTIMEOUT;
702 goto end;
703 } else {
704 goto reset;
705 }
706 case A_int_reseltarg:
707 printf("%s: reselect with invalid target\n",
708 sc->sc_c.sc_dev.dv_xname);
709 goto reset;
710 case A_int_resellun:
711 INCSTAT(siop_stat_intr_lunresel);
712 target = bus_space_read_1(sc->sc_c.sc_rt,
713 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
714 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
715 SIOP_SCRATCHA + 1);
716 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
717 SIOP_SCRATCHA + 2);
718 siop_target =
719 (struct siop_target *)sc->sc_c.targets[target];
720 if (siop_target == NULL) {
721 printf("%s: reselect with invalid target %d\n",
722 sc->sc_c.sc_dev.dv_xname, target);
723 goto reset;
724 }
725 siop_lun = siop_target->siop_lun[lun];
726 if (siop_lun == NULL) {
727 printf("%s: target %d reselect with invalid "
728 "lun %d\n", sc->sc_c.sc_dev.dv_xname,
729 target, lun);
730 goto reset;
731 }
732 if (siop_lun->siop_tag[tag].active == NULL) {
733 printf("%s: target %d lun %d tag %d reselect "
734 "without command\n",
735 sc->sc_c.sc_dev.dv_xname,
736 target, lun, tag);
737 goto reset;
738 }
739 siop_cmd = siop_lun->siop_tag[tag].active;
740 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
741 SIOP_DSP, siop_cmd->cmd_c.dsa +
742 sizeof(struct siop_common_xfer) +
743 Ent_ldsa_reload_dsa);
744 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
745 return 1;
746 case A_int_reseltag:
747 printf("%s: reselect with invalid tag\n",
748 sc->sc_c.sc_dev.dv_xname);
749 goto reset;
750 case A_int_msgin:
751 {
752 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
753 sc->sc_c.sc_rh, SIOP_SFBR);
754 if (msgin == MSG_MESSAGE_REJECT) {
755 int msg, extmsg;
756 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
757 /*
758 * message was part of a identify +
759 * something else. Identify shoudl't
760 * have been rejected.
761 */
762 msg =
763 siop_cmd->cmd_tables->msg_out[1];
764 extmsg =
765 siop_cmd->cmd_tables->msg_out[3];
766 } else {
767 msg = siop_cmd->cmd_tables->msg_out[0];
768 extmsg =
769 siop_cmd->cmd_tables->msg_out[2];
770 }
771 if (msg == MSG_MESSAGE_REJECT) {
772 /* MSG_REJECT for a MSG_REJECT !*/
773 if (xs)
774 scsipi_printaddr(xs->xs_periph);
775 else
776 printf("%s: ",
777 sc->sc_c.sc_dev.dv_xname);
778 printf("our reject message was "
779 "rejected\n");
780 goto reset;
781 }
782 if (msg == MSG_EXTENDED &&
783 extmsg == MSG_EXT_WDTR) {
784 /* WDTR rejected, initiate sync */
785 if ((siop_target->target_c.flags &
786 TARF_SYNC) == 0) {
787 siop_target->target_c.status =
788 TARST_OK;
789 siop_update_xfer_mode(&sc->sc_c,
790 target);
791 /* no table to flush here */
792 CALL_SCRIPT(Ent_msgin_ack);
793 return 1;
794 }
795 siop_target->target_c.status =
796 TARST_SYNC_NEG;
797 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
798 sc->sc_c.minsync, sc->sc_c.maxoff);
799 siop_table_sync(siop_cmd,
800 BUS_DMASYNC_PREREAD |
801 BUS_DMASYNC_PREWRITE);
802 CALL_SCRIPT(Ent_send_msgout);
803 return 1;
804 } else if (msg == MSG_EXTENDED &&
805 extmsg == MSG_EXT_SDTR) {
806 /* sync rejected */
807 siop_target->target_c.offset = 0;
808 siop_target->target_c.period = 0;
809 siop_target->target_c.status = TARST_OK;
810 siop_update_xfer_mode(&sc->sc_c,
811 target);
812 /* no table to flush here */
813 CALL_SCRIPT(Ent_msgin_ack);
814 return 1;
815 } else if (msg == MSG_SIMPLE_Q_TAG ||
816 msg == MSG_HEAD_OF_Q_TAG ||
817 msg == MSG_ORDERED_Q_TAG) {
818 if (siop_handle_qtag_reject(
819 siop_cmd) == -1)
820 goto reset;
821 CALL_SCRIPT(Ent_msgin_ack);
822 return 1;
823 }
824 if (xs)
825 scsipi_printaddr(xs->xs_periph);
826 else
827 printf("%s: ",
828 sc->sc_c.sc_dev.dv_xname);
829 if (msg == MSG_EXTENDED) {
830 printf("scsi message reject, extended "
831 "message sent was 0x%x\n", extmsg);
832 } else {
833 printf("scsi message reject, message "
834 "sent was 0x%x\n", msg);
835 }
836 /* no table to flush here */
837 CALL_SCRIPT(Ent_msgin_ack);
838 return 1;
839 }
840 if (xs)
841 scsipi_printaddr(xs->xs_periph);
842 else
843 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
844 printf("unhandled message 0x%x\n",
845 siop_cmd->cmd_tables->msg_in[0]);
846 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
847 siop_cmd->cmd_tables->t_msgout.count= htole32(1);
848 siop_table_sync(siop_cmd,
849 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
850 CALL_SCRIPT(Ent_send_msgout);
851 return 1;
852 }
853 case A_int_extmsgin:
854 #ifdef SIOP_DEBUG_INTR
855 printf("extended message: msg 0x%x len %d\n",
856 siop_cmd->cmd_tables->msg_in[2],
857 siop_cmd->cmd_tables->msg_in[1]);
858 #endif
859 if (siop_cmd->cmd_tables->msg_in[1] > 6)
860 printf("%s: extended message too big (%d)\n",
861 sc->sc_c.sc_dev.dv_xname,
862 siop_cmd->cmd_tables->msg_in[1]);
863 siop_cmd->cmd_tables->t_extmsgdata.count =
864 htole32(siop_cmd->cmd_tables->msg_in[1] - 1);
865 siop_table_sync(siop_cmd,
866 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
867 CALL_SCRIPT(Ent_get_extmsgdata);
868 return 1;
869 case A_int_extmsgdata:
870 #ifdef SIOP_DEBUG_INTR
871 {
872 int i;
873 printf("extended message: 0x%x, data:",
874 siop_cmd->cmd_tables->msg_in[2]);
875 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
876 i++)
877 printf(" 0x%x",
878 siop_cmd->cmd_tables->msg_in[i]);
879 printf("\n");
880 }
881 #endif
882 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
883 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
884 case SIOP_NEG_MSGOUT:
885 siop_update_scntl3(sc,
886 siop_cmd->cmd_c.siop_target);
887 siop_table_sync(siop_cmd,
888 BUS_DMASYNC_PREREAD |
889 BUS_DMASYNC_PREWRITE);
890 CALL_SCRIPT(Ent_send_msgout);
891 return(1);
892 case SIOP_NEG_ACK:
893 siop_update_scntl3(sc,
894 siop_cmd->cmd_c.siop_target);
895 CALL_SCRIPT(Ent_msgin_ack);
896 return(1);
897 default:
898 panic("invalid retval from "
899 "siop_wdtr_neg()");
900 }
901 return(1);
902 }
903 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
904 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
905 case SIOP_NEG_MSGOUT:
906 siop_update_scntl3(sc,
907 siop_cmd->cmd_c.siop_target);
908 siop_table_sync(siop_cmd,
909 BUS_DMASYNC_PREREAD |
910 BUS_DMASYNC_PREWRITE);
911 CALL_SCRIPT(Ent_send_msgout);
912 return(1);
913 case SIOP_NEG_ACK:
914 siop_update_scntl3(sc,
915 siop_cmd->cmd_c.siop_target);
916 CALL_SCRIPT(Ent_msgin_ack);
917 return(1);
918 default:
919 panic("invalid retval from "
920 "siop_wdtr_neg()");
921 }
922 return(1);
923 }
924 /* send a message reject */
925 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
926 siop_cmd->cmd_tables->t_msgout.count = htole32(1);
927 siop_table_sync(siop_cmd,
928 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
929 CALL_SCRIPT(Ent_send_msgout);
930 return 1;
931 case A_int_disc:
932 INCSTAT(siop_stat_intr_sdp);
933 offset = bus_space_read_1(sc->sc_c.sc_rt,
934 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
935 #ifdef SIOP_DEBUG_DR
936 printf("disconnect offset %d\n", offset);
937 #endif
938 if (offset > SIOP_NSG) {
939 printf("%s: bad offset for disconnect (%d)\n",
940 sc->sc_c.sc_dev.dv_xname, offset);
941 goto reset;
942 }
943 /*
944 * offset == SIOP_NSG may be a valid condition if
945 * we get a sdp when the xfer is done.
946 * Don't call memmove in this case.
947 */
948 if (offset < SIOP_NSG) {
949 memmove(&siop_cmd->cmd_tables->data[0],
950 &siop_cmd->cmd_tables->data[offset],
951 (SIOP_NSG - offset) * sizeof(scr_table_t));
952 siop_table_sync(siop_cmd,
953 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
954 }
955 CALL_SCRIPT(Ent_script_sched);
956 return 1;
957 case A_int_resfail:
958 printf("reselect failed\n");
959 CALL_SCRIPT(Ent_script_sched);
960 return 1;
961 case A_int_done:
962 if (xs == NULL) {
963 printf("%s: done without command, DSA=0x%lx\n",
964 sc->sc_c.sc_dev.dv_xname,
965 (u_long)siop_cmd->cmd_c.dsa);
966 siop_cmd->cmd_c.status = CMDST_FREE;
967 CALL_SCRIPT(Ent_script_sched);
968 return 1;
969 }
970 #ifdef SIOP_DEBUG_INTR
971 printf("done, DSA=0x%lx target id 0x%x last msg "
972 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
973 le32toh(siop_cmd->cmd_tables->id),
974 siop_cmd->cmd_tables->msg_in[0],
975 le32toh(siop_cmd->cmd_tables->status));
976 #endif
977 INCSTAT(siop_stat_intr_done);
978 siop_cmd->cmd_c.status = CMDST_DONE;
979 goto end;
980 default:
981 printf("unknown irqcode %x\n", irqcode);
982 if (xs) {
983 xs->error = XS_SELTIMEOUT;
984 goto end;
985 }
986 goto reset;
987 }
988 return 1;
989 }
990 /* We just should't get there */
991 panic("siop_intr: I shouldn't be there !");
992 return 1;
993 end:
994 /*
995 * restart the script now if command completed properly
996 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
997 * queue
998 */
999 xs->status = le32toh(siop_cmd->cmd_tables->status);
1000 if (xs->status == SCSI_OK)
1001 CALL_SCRIPT(Ent_script_sched);
1002 else
1003 restart = 1;
1004 siop_lun->siop_tag[tag].active = NULL;
1005 siop_scsicmd_end(siop_cmd);
1006 if (freetarget && siop_target->target_c.status == TARST_PROBING)
1007 siop_del_dev(sc, target, lun);
1008 if (restart)
1009 CALL_SCRIPT(Ent_script_sched);
1010 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1011 /* a command terminated, so we have free slots now */
1012 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1013 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1014 }
1015
1016 return 1;
1017 }
1018
1019 void
1020 siop_scsicmd_end(siop_cmd)
1021 struct siop_cmd *siop_cmd;
1022 {
1023 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1024 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1025
1026 switch(xs->status) {
1027 case SCSI_OK:
1028 xs->error = XS_NOERROR;
1029 break;
1030 case SCSI_BUSY:
1031 xs->error = XS_BUSY;
1032 break;
1033 case SCSI_CHECK:
1034 xs->error = XS_BUSY;
1035 /* remove commands in the queue and scheduler */
1036 siop_unqueue(sc, xs->xs_periph->periph_target,
1037 xs->xs_periph->periph_lun);
1038 break;
1039 case SCSI_QUEUE_FULL:
1040 INCSTAT(siop_stat_intr_qfull);
1041 #ifdef SIOP_DEBUG
1042 printf("%s:%d:%d: queue full (tag %d)\n",
1043 sc->sc_c.sc_dev.dv_xname,
1044 xs->xs_periph->periph_target,
1045 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1046 #endif
1047 xs->error = XS_BUSY;
1048 break;
1049 case SCSI_SIOP_NOCHECK:
1050 /*
1051 * don't check status, xs->error is already valid
1052 */
1053 break;
1054 case SCSI_SIOP_NOSTATUS:
1055 /*
1056 * the status byte was not updated, cmd was
1057 * aborted
1058 */
1059 xs->error = XS_SELTIMEOUT;
1060 break;
1061 default:
1062 xs->error = XS_DRIVER_STUFFUP;
1063 }
1064 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1065 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
1066 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1067 (xs->xs_control & XS_CTL_DATA_IN) ?
1068 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1069 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1070 }
1071 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1072 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1073 siop_cmd->cmd_c.status = CMDST_FREE;
1074 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1075 xs->resid = 0;
1076 scsipi_done (xs);
1077 }
1078
1079 void
1080 siop_unqueue(sc, target, lun)
1081 struct siop_softc *sc;
1082 int target;
1083 int lun;
1084 {
1085 int slot, tag;
1086 struct siop_cmd *siop_cmd;
1087 struct siop_lun *siop_lun =
1088 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1089
1090 /* first make sure to read valid data */
1091 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1092
1093 for (tag = 1; tag < SIOP_NTAG; tag++) {
1094 /* look for commands in the scheduler, not yet started */
1095 if (siop_lun->siop_tag[tag].active == NULL)
1096 continue;
1097 siop_cmd = siop_lun->siop_tag[tag].active;
1098 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1099 if (siop_script_read(sc,
1100 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1101 siop_cmd->cmd_c.dsa +
1102 sizeof(struct siop_common_xfer) +
1103 Ent_ldsa_select)
1104 break;
1105 }
1106 if (slot > sc->sc_currschedslot)
1107 continue; /* didn't find it */
1108 if (siop_script_read(sc,
1109 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1110 continue; /* already started */
1111 /* clear the slot */
1112 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1113 0x80000000);
1114 /* ask to requeue */
1115 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1116 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1117 siop_lun->siop_tag[tag].active = NULL;
1118 siop_scsicmd_end(siop_cmd);
1119 }
1120 /* update sc_currschedslot */
1121 sc->sc_currschedslot = 0;
1122 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1123 if (siop_script_read(sc,
1124 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1125 sc->sc_currschedslot = slot;
1126 }
1127 }
1128
1129 /*
1130 * handle a rejected queue tag message: the command will run untagged,
1131 * has to adjust the reselect script.
1132 */
1133 int
1134 siop_handle_qtag_reject(siop_cmd)
1135 struct siop_cmd *siop_cmd;
1136 {
1137 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1138 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1139 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1140 int tag = siop_cmd->cmd_tables->msg_out[2];
1141 struct siop_lun *siop_lun =
1142 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1143
1144 #ifdef SIOP_DEBUG
1145 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1146 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag,
1147 siop_cmd->cmd_c.status);
1148 #endif
1149
1150 if (siop_lun->siop_tag[0].active != NULL) {
1151 printf("%s: untagged command already running for target %d "
1152 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1153 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1154 return -1;
1155 }
1156 /* clear tag slot */
1157 siop_lun->siop_tag[tag].active = NULL;
1158 /* add command to non-tagged slot */
1159 siop_lun->siop_tag[0].active = siop_cmd;
1160 siop_cmd->cmd_c.tag = 0;
1161 /* adjust reselect script if there is one */
1162 if (siop_lun->siop_tag[0].reseloff > 0) {
1163 siop_script_write(sc,
1164 siop_lun->siop_tag[0].reseloff + 1,
1165 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1166 Ent_ldsa_reload_dsa);
1167 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1168 }
1169 return 0;
1170 }
1171
1172 /*
1173 * handle a bus reset: reset chip, unqueue all active commands, free all
1174 * target struct and report loosage to upper layer.
1175 * As the upper layer may requeue immediatly we have to first store
1176 * all active commands in a temporary queue.
1177 */
1178 void
1179 siop_handle_reset(sc)
1180 struct siop_softc *sc;
1181 {
1182 struct siop_cmd *siop_cmd;
1183 struct siop_lun *siop_lun;
1184 int target, lun, tag;
1185 /*
1186 * scsi bus reset. reset the chip and restart
1187 * the queue. Need to clean up all active commands
1188 */
1189 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1190 /* stop, reset and restart the chip */
1191 siop_reset(sc);
1192 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1193 /* chip has been reset, all slots are free now */
1194 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1195 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1196 }
1197 /*
1198 * Process all commands: first commmands being executed
1199 */
1200 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1201 target++) {
1202 if (sc->sc_c.targets[target] == NULL)
1203 continue;
1204 for (lun = 0; lun < 8; lun++) {
1205 struct siop_target *siop_target =
1206 (struct siop_target *)sc->sc_c.targets[target];
1207 siop_lun = siop_target->siop_lun[lun];
1208 if (siop_lun == NULL)
1209 continue;
1210 for (tag = 0; tag <
1211 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1212 SIOP_NTAG : 1);
1213 tag++) {
1214 siop_cmd = siop_lun->siop_tag[tag].active;
1215 if (siop_cmd == NULL)
1216 continue;
1217 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1218 printf("command with tag id %d reset\n", tag);
1219 siop_cmd->cmd_c.xs->error =
1220 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1221 XS_TIMEOUT : XS_RESET;
1222 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1223 siop_lun->siop_tag[tag].active = NULL;
1224 siop_cmd->cmd_c.status = CMDST_DONE;
1225 siop_scsicmd_end(siop_cmd);
1226 }
1227 }
1228 sc->sc_c.targets[target]->status = TARST_ASYNC;
1229 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1230 sc->sc_c.targets[target]->period =
1231 sc->sc_c.targets[target]->offset = 0;
1232 siop_update_xfer_mode(&sc->sc_c, target);
1233 }
1234
1235 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1236 }
1237
1238 void
1239 siop_scsipi_request(chan, req, arg)
1240 struct scsipi_channel *chan;
1241 scsipi_adapter_req_t req;
1242 void *arg;
1243 {
1244 struct scsipi_xfer *xs;
1245 struct scsipi_periph *periph;
1246 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1247 struct siop_cmd *siop_cmd;
1248 struct siop_target *siop_target;
1249 int s, error, i;
1250 int target;
1251 int lun;
1252
1253 switch (req) {
1254 case ADAPTER_REQ_RUN_XFER:
1255 xs = arg;
1256 periph = xs->xs_periph;
1257 target = periph->periph_target;
1258 lun = periph->periph_lun;
1259
1260 s = splbio();
1261 #ifdef SIOP_DEBUG_SCHED
1262 printf("starting cmd for %d:%d\n", target, lun);
1263 #endif
1264 siop_cmd = TAILQ_FIRST(&sc->free_list);
1265 if (siop_cmd == NULL) {
1266 xs->error = XS_RESOURCE_SHORTAGE;
1267 scsipi_done(xs);
1268 splx(s);
1269 return;
1270 }
1271 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1272 #ifdef DIAGNOSTIC
1273 if (siop_cmd->cmd_c.status != CMDST_FREE)
1274 panic("siop_scsicmd: new cmd not free");
1275 #endif
1276 siop_target = (struct siop_target*)sc->sc_c.targets[target];
1277 if (siop_target == NULL) {
1278 #ifdef SIOP_DEBUG
1279 printf("%s: alloc siop_target for target %d\n",
1280 sc->sc_c.sc_dev.dv_xname, target);
1281 #endif
1282 sc->sc_c.targets[target] =
1283 malloc(sizeof(struct siop_target),
1284 M_DEVBUF, M_NOWAIT);
1285 if (sc->sc_c.targets[target] == NULL) {
1286 printf("%s: can't malloc memory for "
1287 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1288 target);
1289 xs->error = XS_RESOURCE_SHORTAGE;
1290 scsipi_done(xs);
1291 splx(s);
1292 return;
1293 }
1294 siop_target =
1295 (struct siop_target*)sc->sc_c.targets[target];
1296 siop_target->target_c.status = TARST_PROBING;
1297 siop_target->target_c.flags = 0;
1298 siop_target->target_c.id =
1299 sc->sc_c.clock_div << 24; /* scntl3 */
1300 siop_target->target_c.id |= target << 16; /* id */
1301 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1302
1303 /* get a lun switch script */
1304 siop_target->lunsw = siop_get_lunsw(sc);
1305 if (siop_target->lunsw == NULL) {
1306 printf("%s: can't alloc lunsw for target %d\n",
1307 sc->sc_c.sc_dev.dv_xname, target);
1308 xs->error = XS_RESOURCE_SHORTAGE;
1309 scsipi_done(xs);
1310 splx(s);
1311 return;
1312 }
1313 for (i=0; i < 8; i++)
1314 siop_target->siop_lun[i] = NULL;
1315 siop_add_reselsw(sc, target);
1316 }
1317 if (siop_target->siop_lun[lun] == NULL) {
1318 siop_target->siop_lun[lun] =
1319 malloc(sizeof(struct siop_lun), M_DEVBUF,
1320 M_NOWAIT|M_ZERO);
1321 if (siop_target->siop_lun[lun] == NULL) {
1322 printf("%s: can't alloc siop_lun for "
1323 "target %d lun %d\n",
1324 sc->sc_c.sc_dev.dv_xname, target, lun);
1325 xs->error = XS_RESOURCE_SHORTAGE;
1326 scsipi_done(xs);
1327 splx(s);
1328 return;
1329 }
1330 }
1331 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1332 siop_cmd->cmd_c.xs = xs;
1333 siop_cmd->cmd_c.flags = 0;
1334 siop_cmd->cmd_c.status = CMDST_READY;
1335
1336 /* load the DMA maps */
1337 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1338 siop_cmd->cmd_c.dmamap_cmd,
1339 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1340 if (error) {
1341 printf("%s: unable to load cmd DMA map: %d\n",
1342 sc->sc_c.sc_dev.dv_xname, error);
1343 xs->error = XS_DRIVER_STUFFUP;
1344 scsipi_done(xs);
1345 splx(s);
1346 return;
1347 }
1348 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1349 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1350 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1351 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1352 ((xs->xs_control & XS_CTL_DATA_IN) ?
1353 BUS_DMA_READ : BUS_DMA_WRITE));
1354 if (error) {
1355 printf("%s: unable to load cmd DMA map: %d",
1356 sc->sc_c.sc_dev.dv_xname, error);
1357 xs->error = XS_DRIVER_STUFFUP;
1358 scsipi_done(xs);
1359 bus_dmamap_unload(sc->sc_c.sc_dmat,
1360 siop_cmd->cmd_c.dmamap_cmd);
1361 splx(s);
1362 return;
1363 }
1364 bus_dmamap_sync(sc->sc_c.sc_dmat,
1365 siop_cmd->cmd_c.dmamap_data, 0,
1366 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1367 (xs->xs_control & XS_CTL_DATA_IN) ?
1368 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1369 }
1370 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1371 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1372 BUS_DMASYNC_PREWRITE);
1373
1374 siop_setuptables(&siop_cmd->cmd_c);
1375 siop_table_sync(siop_cmd,
1376 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1377 siop_start(sc, siop_cmd);
1378 if (xs->xs_control & XS_CTL_POLL) {
1379 /* poll for command completion */
1380 while ((xs->xs_status & XS_STS_DONE) == 0) {
1381 delay(1000);
1382 siop_intr(sc);
1383 }
1384 }
1385 splx(s);
1386 return;
1387
1388 case ADAPTER_REQ_GROW_RESOURCES:
1389 #ifdef SIOP_DEBUG
1390 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1391 sc->sc_c.sc_adapt.adapt_openings);
1392 #endif
1393 siop_morecbd(sc);
1394 return;
1395
1396 case ADAPTER_REQ_SET_XFER_MODE:
1397 {
1398 struct scsipi_xfer_mode *xm = arg;
1399 if (sc->sc_c.targets[xm->xm_target] == NULL)
1400 return;
1401 s = splbio();
1402 if (xm->xm_mode & PERIPH_CAP_TQING)
1403 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1404 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1405 (sc->sc_c.features & SF_BUS_WIDE))
1406 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1407 if (xm->xm_mode & PERIPH_CAP_SYNC)
1408 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1409 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1410 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1411 sc->sc_c.targets[xm->xm_target]->status =
1412 TARST_ASYNC;
1413
1414 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1415 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1416 /* allocate a lun sw entry for this device */
1417 siop_add_dev(sc, xm->xm_target, lun);
1418 }
1419
1420 splx(s);
1421 }
1422 }
1423 }
1424
1425 static void
1426 siop_start(sc, siop_cmd)
1427 struct siop_softc *sc;
1428 struct siop_cmd *siop_cmd;
1429 {
1430 struct siop_lun *siop_lun;
1431 struct siop_xfer *siop_xfer;
1432 u_int32_t dsa;
1433 int timeout;
1434 int target, lun, slot;
1435
1436 /*
1437 * first make sure to read valid data
1438 */
1439 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1440
1441 /*
1442 * The queue management here is a bit tricky: the script always looks
1443 * at the slot from first to last, so if we always use the first
1444 * free slot commands can stay at the tail of the queue ~forever.
1445 * The algorithm used here is to restart from the head when we know
1446 * that the queue is empty, and only add commands after the last one.
1447 * When we're at the end of the queue wait for the script to clear it.
1448 * The best thing to do here would be to implement a circular queue,
1449 * but using only 53c720 features this can be "interesting".
1450 * A mid-way solution could be to implement 2 queues and swap orders.
1451 */
1452 slot = sc->sc_currschedslot;
1453 /*
1454 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1455 * free. As this is the last used slot, all previous slots are free,
1456 * we can restart from 0.
1457 */
1458 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1459 0x80000000) {
1460 slot = sc->sc_currschedslot = 0;
1461 } else {
1462 slot++;
1463 }
1464 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1465 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1466 siop_lun =
1467 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1468 /* if non-tagged command active, panic: this shouldn't happen */
1469 if (siop_lun->siop_tag[0].active != NULL) {
1470 panic("siop_start: tagged cmd while untagged running");
1471 }
1472 #ifdef DIAGNOSTIC
1473 /* sanity check the tag if needed */
1474 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1475 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1476 panic("siop_start: tag not free");
1477 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1478 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1479 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1480 panic("siop_start: invalid tag id");
1481 }
1482 }
1483 #endif
1484 /*
1485 * find a free scheduler slot and load it.
1486 */
1487 for (; slot < SIOP_NSLOTS; slot++) {
1488 /*
1489 * If cmd if 0x80000000 the slot is free
1490 */
1491 if (siop_script_read(sc,
1492 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1493 0x80000000)
1494 break;
1495 }
1496 if (slot == SIOP_NSLOTS) {
1497 /*
1498 * no more free slot, no need to continue. freeze the queue
1499 * and requeue this command.
1500 */
1501 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1502 sc->sc_flags |= SCF_CHAN_NOSLOT;
1503 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1504 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1505 siop_scsicmd_end(siop_cmd);
1506 return;
1507 }
1508 #ifdef SIOP_DEBUG_SCHED
1509 printf("using slot %d for DSA 0x%lx\n", slot,
1510 (u_long)siop_cmd->cmd_c.dsa);
1511 #endif
1512 /* mark command as active */
1513 if (siop_cmd->cmd_c.status == CMDST_READY)
1514 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1515 else
1516 panic("siop_start: bad status");
1517 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1518 /* patch scripts with DSA addr */
1519 dsa = siop_cmd->cmd_c.dsa;
1520 /* first reselect switch, if we have an entry */
1521 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1522 siop_script_write(sc,
1523 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1524 dsa + sizeof(struct siop_common_xfer) +
1525 Ent_ldsa_reload_dsa);
1526 /* CMD script: MOVE MEMORY addr */
1527 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1528 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1529 htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1530 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1531 /* scheduler slot: JUMP ldsa_select */
1532 siop_script_write(sc,
1533 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1534 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1535 /* handle timeout */
1536 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1537 /* start exire timer */
1538 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1539 if (timeout == 0)
1540 timeout = 1;
1541 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1542 timeout, siop_timeout, siop_cmd);
1543 }
1544 /*
1545 * Change JUMP cmd so that this slot will be handled
1546 */
1547 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1548 0x80080000);
1549 sc->sc_currschedslot = slot;
1550
1551 /* make sure SCRIPT processor will read valid data */
1552 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1553 /* Signal script it has some work to do */
1554 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1555 SIOP_ISTAT, ISTAT_SIGP);
1556 /* and wait for IRQ */
1557 return;
1558 }
1559
1560 void
1561 siop_timeout(v)
1562 void *v;
1563 {
1564 struct siop_cmd *siop_cmd = v;
1565 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1566 int s;
1567
1568 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1569 printf("command timeout\n");
1570
1571 s = splbio();
1572 /* reset the scsi bus */
1573 siop_resetbus(&sc->sc_c);
1574
1575 /* deactivate callout */
1576 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1577 /* mark command as being timed out; siop_intr will handle it */
1578 /*
1579 * mark command has being timed out and just return;
1580 * the bus reset will generate an interrupt,
1581 * it will be handled in siop_intr()
1582 */
1583 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1584 splx(s);
1585 return;
1586
1587 }
1588
1589 void
1590 siop_dump_script(sc)
1591 struct siop_softc *sc;
1592 {
1593 int i;
1594 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1595 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1596 le32toh(sc->sc_c.sc_script[i]),
1597 le32toh(sc->sc_c.sc_script[i+1]));
1598 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1599 0xc0000000) {
1600 i++;
1601 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1602 }
1603 printf("\n");
1604 }
1605 }
1606
1607 void
1608 siop_morecbd(sc)
1609 struct siop_softc *sc;
1610 {
1611 int error, i, j, s;
1612 bus_dma_segment_t seg;
1613 int rseg;
1614 struct siop_cbd *newcbd;
1615 struct siop_xfer *xfer;
1616 bus_addr_t dsa;
1617 u_int32_t *scr;
1618
1619 /* allocate a new list head */
1620 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1621 if (newcbd == NULL) {
1622 printf("%s: can't allocate memory for command descriptors "
1623 "head\n", sc->sc_c.sc_dev.dv_xname);
1624 return;
1625 }
1626
1627 /* allocate cmd list */
1628 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1629 M_DEVBUF, M_NOWAIT|M_ZERO);
1630 if (newcbd->cmds == NULL) {
1631 printf("%s: can't allocate memory for command descriptors\n",
1632 sc->sc_c.sc_dev.dv_xname);
1633 goto bad3;
1634 }
1635 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1636 1, &rseg, BUS_DMA_NOWAIT);
1637 if (error) {
1638 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1639 sc->sc_c.sc_dev.dv_xname, error);
1640 goto bad2;
1641 }
1642 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1643 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1644 if (error) {
1645 printf("%s: unable to map cbd DMA memory, error = %d\n",
1646 sc->sc_c.sc_dev.dv_xname, error);
1647 goto bad2;
1648 }
1649 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1650 BUS_DMA_NOWAIT, &newcbd->xferdma);
1651 if (error) {
1652 printf("%s: unable to create cbd DMA map, error = %d\n",
1653 sc->sc_c.sc_dev.dv_xname, error);
1654 goto bad1;
1655 }
1656 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1657 PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1658 if (error) {
1659 printf("%s: unable to load cbd DMA map, error = %d\n",
1660 sc->sc_c.sc_dev.dv_xname, error);
1661 goto bad0;
1662 }
1663 #ifdef DEBUG
1664 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1665 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1666 #endif
1667 for (i = 0; i < SIOP_NCMDPB; i++) {
1668 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1669 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1670 &newcbd->cmds[i].cmd_c.dmamap_data);
1671 if (error) {
1672 printf("%s: unable to create data DMA map for cbd: "
1673 "error %d\n",
1674 sc->sc_c.sc_dev.dv_xname, error);
1675 goto bad0;
1676 }
1677 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1678 sizeof(struct scsipi_generic), 1,
1679 sizeof(struct scsipi_generic), 0,
1680 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1681 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1682 if (error) {
1683 printf("%s: unable to create cmd DMA map for cbd %d\n",
1684 sc->sc_c.sc_dev.dv_xname, error);
1685 goto bad0;
1686 }
1687 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1688 newcbd->cmds[i].siop_cbdp = newcbd;
1689 xfer = &newcbd->xfers[i];
1690 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1691 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1692 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1693 i * sizeof(struct siop_xfer);
1694 newcbd->cmds[i].cmd_c.dsa = dsa;
1695 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1696 xfer->siop_tables.t_msgout.count= htole32(1);
1697 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1698 xfer->siop_tables.t_msgin.count= htole32(1);
1699 xfer->siop_tables.t_msgin.addr = htole32(dsa + 8);
1700 xfer->siop_tables.t_extmsgin.count= htole32(2);
1701 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1702 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 11);
1703 xfer->siop_tables.t_status.count= htole32(1);
1704 xfer->siop_tables.t_status.addr = htole32(dsa + 16);
1705
1706 /* The select/reselect script */
1707 scr = &xfer->resel[0];
1708 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1709 scr[j] = htole32(load_dsa[j]);
1710 /*
1711 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1712 * octet, reg offset is the third.
1713 */
1714 scr[Ent_rdsa0 / 4] =
1715 htole32(0x78100000 | ((dsa & 0x000000ff) << 8));
1716 scr[Ent_rdsa1 / 4] =
1717 htole32(0x78110000 | ( dsa & 0x0000ff00 ));
1718 scr[Ent_rdsa2 / 4] =
1719 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8));
1720 scr[Ent_rdsa3 / 4] =
1721 htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1722 scr[E_ldsa_abs_reselected_Used[0]] =
1723 htole32(sc->sc_c.sc_scriptaddr + Ent_reselected);
1724 scr[E_ldsa_abs_reselect_Used[0]] =
1725 htole32(sc->sc_c.sc_scriptaddr + Ent_reselect);
1726 scr[E_ldsa_abs_selected_Used[0]] =
1727 htole32(sc->sc_c.sc_scriptaddr + Ent_selected);
1728 scr[E_ldsa_abs_data_Used[0]] =
1729 htole32(dsa + sizeof(struct siop_common_xfer) +
1730 Ent_ldsa_data);
1731 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1732 scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1733 s = splbio();
1734 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1735 splx(s);
1736 #ifdef SIOP_DEBUG
1737 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1738 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1739 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1740 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1741 #endif
1742 }
1743 s = splbio();
1744 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1745 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1746 splx(s);
1747 return;
1748 bad0:
1749 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1750 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1751 bad1:
1752 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1753 bad2:
1754 free(newcbd->cmds, M_DEVBUF);
1755 bad3:
1756 free(newcbd, M_DEVBUF);
1757 return;
1758 }
1759
1760 struct siop_lunsw *
1761 siop_get_lunsw(sc)
1762 struct siop_softc *sc;
1763 {
1764 struct siop_lunsw *lunsw;
1765 int i;
1766
1767 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1768 sc->script_free_hi)
1769 return NULL;
1770 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1771 if (lunsw != NULL) {
1772 #ifdef SIOP_DEBUG
1773 printf("siop_get_lunsw got lunsw at offset %d\n",
1774 lunsw->lunsw_off);
1775 #endif
1776 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1777 return lunsw;
1778 }
1779 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1780 if (lunsw == NULL)
1781 return NULL;
1782 #ifdef SIOP_DEBUG
1783 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1784 #endif
1785 if (sc->sc_c.features & SF_CHIP_RAM) {
1786 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1787 sc->script_free_lo * 4, lun_switch,
1788 sizeof(lun_switch) / sizeof(lun_switch[0]));
1789 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1790 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1791 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1792 } else {
1793 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1794 i++)
1795 sc->sc_c.sc_script[sc->script_free_lo + i] =
1796 htole32(lun_switch[i]);
1797 sc->sc_c.sc_script[
1798 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1799 htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1800 }
1801 lunsw->lunsw_off = sc->script_free_lo;
1802 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1803 sc->script_free_lo += lunsw->lunsw_size;
1804 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1805 return lunsw;
1806 }
1807
1808 void
1809 siop_add_reselsw(sc, target)
1810 struct siop_softc *sc;
1811 int target;
1812 {
1813 int i;
1814 struct siop_target *siop_target;
1815 struct siop_lun *siop_lun;
1816
1817 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1818 /*
1819 * add an entry to resel switch
1820 */
1821 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1822 for (i = 0; i < 15; i++) {
1823 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1824 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1825 == 0xff) { /* it's free */
1826 #ifdef SIOP_DEBUG
1827 printf("siop: target %d slot %d offset %d\n",
1828 target, i, siop_target->reseloff);
1829 #endif
1830 /* JUMP abs_foo, IF target | 0x80; */
1831 siop_script_write(sc, siop_target->reseloff,
1832 0x800c0080 | target);
1833 siop_script_write(sc, siop_target->reseloff + 1,
1834 sc->sc_c.sc_scriptaddr +
1835 siop_target->lunsw->lunsw_off * 4 +
1836 Ent_lun_switch_entry);
1837 break;
1838 }
1839 }
1840 if (i == 15) /* no free slot, shouldn't happen */
1841 panic("siop: resel switch full");
1842
1843 sc->sc_ntargets++;
1844 for (i = 0; i < 8; i++) {
1845 siop_lun = siop_target->siop_lun[i];
1846 if (siop_lun == NULL)
1847 continue;
1848 if (siop_lun->reseloff > 0) {
1849 siop_lun->reseloff = 0;
1850 siop_add_dev(sc, target, i);
1851 }
1852 }
1853 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1854 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1855 }
1856
1857 void
1858 siop_update_scntl3(sc, _siop_target)
1859 struct siop_softc *sc;
1860 struct siop_common_target *_siop_target;
1861 {
1862 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1863 /* MOVE target->id >> 24 TO SCNTL3 */
1864 siop_script_write(sc,
1865 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1866 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1867 /* MOVE target->id >> 8 TO SXFER */
1868 siop_script_write(sc,
1869 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1870 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1871 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1872 }
1873
1874 void
1875 siop_add_dev(sc, target, lun)
1876 struct siop_softc *sc;
1877 int target;
1878 int lun;
1879 {
1880 struct siop_lunsw *lunsw;
1881 struct siop_target *siop_target =
1882 (struct siop_target *)sc->sc_c.targets[target];
1883 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1884 int i, ntargets;
1885
1886 if (siop_lun->reseloff > 0)
1887 return;
1888 lunsw = siop_target->lunsw;
1889 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1890 /*
1891 * can't extend this slot. Probably not worth trying to deal
1892 * with this case
1893 */
1894 #ifdef DEBUG
1895 printf("%s:%d:%d: can't allocate a lun sw slot\n",
1896 sc->sc_c.sc_dev.dv_xname, target, lun);
1897 #endif
1898 return;
1899 }
1900 /* count how many free targets we still have to probe */
1901 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1902
1903 /*
1904 * we need 8 bytes for the lun sw additionnal entry, and
1905 * eventually sizeof(tag_switch) for the tag switch entry.
1906 * Keep enouth free space for the free targets that could be
1907 * probed later.
1908 */
1909 if (sc->script_free_lo + 2 +
1910 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1911 ((siop_target->target_c.flags & TARF_TAG) ?
1912 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1913 sc->script_free_hi)) {
1914 /*
1915 * not enouth space, probably not worth dealing with it.
1916 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1917 */
1918 #ifdef DEBUG
1919 printf("%s:%d:%d: not enouth memory for a lun sw slot\n",
1920 sc->sc_c.sc_dev.dv_xname, target, lun);
1921 #endif
1922 return;
1923 }
1924 #ifdef SIOP_DEBUG
1925 printf("%s:%d:%d: allocate lun sw entry\n",
1926 sc->sc_c.sc_dev.dv_xname, target, lun);
1927 #endif
1928 /* INT int_resellun */
1929 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1930 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1931 /* Now the slot entry: JUMP abs_foo, IF lun */
1932 siop_script_write(sc, sc->script_free_lo - 2,
1933 0x800c0000 | lun);
1934 siop_script_write(sc, sc->script_free_lo - 1, 0);
1935 siop_lun->reseloff = sc->script_free_lo - 2;
1936 lunsw->lunsw_size += 2;
1937 sc->script_free_lo += 2;
1938 if (siop_target->target_c.flags & TARF_TAG) {
1939 /* we need a tag switch */
1940 sc->script_free_hi -=
1941 sizeof(tag_switch) / sizeof(tag_switch[0]);
1942 if (sc->sc_c.features & SF_CHIP_RAM) {
1943 bus_space_write_region_4(sc->sc_c.sc_ramt,
1944 sc->sc_c.sc_ramh,
1945 sc->script_free_hi * 4, tag_switch,
1946 sizeof(tag_switch) / sizeof(tag_switch[0]));
1947 } else {
1948 for(i = 0;
1949 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1950 i++) {
1951 sc->sc_c.sc_script[sc->script_free_hi + i] =
1952 htole32(tag_switch[i]);
1953 }
1954 }
1955 siop_script_write(sc,
1956 siop_lun->reseloff + 1,
1957 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1958 Ent_tag_switch_entry);
1959
1960 for (i = 0; i < SIOP_NTAG; i++) {
1961 siop_lun->siop_tag[i].reseloff =
1962 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1963 }
1964 } else {
1965 /* non-tag case; just work with the lun switch */
1966 siop_lun->siop_tag[0].reseloff =
1967 siop_target->siop_lun[lun]->reseloff;
1968 }
1969 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1970 }
1971
1972 void
1973 siop_del_dev(sc, target, lun)
1974 struct siop_softc *sc;
1975 int target;
1976 int lun;
1977 {
1978 int i;
1979 struct siop_target *siop_target;
1980 #ifdef SIOP_DEBUG
1981 printf("%s:%d:%d: free lun sw entry\n",
1982 sc->sc_c.sc_dev.dv_xname, target, lun);
1983 #endif
1984 if (sc->sc_c.targets[target] == NULL)
1985 return;
1986 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1987 free(siop_target->siop_lun[lun], M_DEVBUF);
1988 siop_target->siop_lun[lun] = NULL;
1989 /* XXX compact sw entry too ? */
1990 /* check if we can free the whole target */
1991 for (i = 0; i < 8; i++) {
1992 if (siop_target->siop_lun[i] != NULL)
1993 return;
1994 }
1995 #ifdef SIOP_DEBUG
1996 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
1997 sc->sc_c.sc_dev.dv_xname, target, lun,
1998 sc->sc_c.targets[target]->lunsw->lunsw_off);
1999 #endif
2000 /*
2001 * nothing here, free the target struct and resel
2002 * switch entry
2003 */
2004 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2005 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2006 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2007 free(sc->sc_c.targets[target], M_DEVBUF);
2008 sc->sc_c.targets[target] = NULL;
2009 sc->sc_ntargets--;
2010 }
2011
2012 #ifdef SIOP_STATS
2013 void
2014 siop_printstats()
2015 {
2016 printf("siop_stat_intr %d\n", siop_stat_intr);
2017 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2018 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2019 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2020 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2021 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2022 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2023 }
2024 #endif
2025