siop.c revision 1.84 1 /* $NetBSD: siop.c,v 1.84 2007/09/30 11:59:42 martin Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.84 2007/09/30 11:59:42 martin Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 /*
68 #define SIOP_DEBUG
69 #define SIOP_DEBUG_DR
70 #define SIOP_DEBUG_INTR
71 #define SIOP_DEBUG_SCHED
72 #define DUMP_SCRIPT
73 */
74
75 #define SIOP_STATS
76
77 #ifndef SIOP_DEFAULT_TARGET
78 #define SIOP_DEFAULT_TARGET 7
79 #endif
80
81 /* number of cmd descriptors per block */
82 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
83
84 /* Number of scheduler slot (needs to match script) */
85 #define SIOP_NSLOTS 40
86
87 void siop_reset(struct siop_softc *);
88 void siop_handle_reset(struct siop_softc *);
89 int siop_handle_qtag_reject(struct siop_cmd *);
90 void siop_scsicmd_end(struct siop_cmd *);
91 void siop_unqueue(struct siop_softc *, int, int);
92 static void siop_start(struct siop_softc *, struct siop_cmd *);
93 void siop_timeout(void *);
94 int siop_scsicmd(struct scsipi_xfer *);
95 void siop_scsipi_request(struct scsipi_channel *,
96 scsipi_adapter_req_t, void *);
97 void siop_dump_script(struct siop_softc *);
98 void siop_morecbd(struct siop_softc *);
99 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
100 void siop_add_reselsw(struct siop_softc *, int);
101 void siop_update_scntl3(struct siop_softc *,
102 struct siop_common_target *);
103
104 #ifdef SIOP_STATS
105 static int siop_stat_intr = 0;
106 static int siop_stat_intr_shortxfer = 0;
107 static int siop_stat_intr_sdp = 0;
108 static int siop_stat_intr_saveoffset = 0;
109 static int siop_stat_intr_done = 0;
110 static int siop_stat_intr_xferdisc = 0;
111 static int siop_stat_intr_lunresel = 0;
112 static int siop_stat_intr_qfull = 0;
113 void siop_printstats(void);
114 #define INCSTAT(x) x++
115 #else
116 #define INCSTAT(x)
117 #endif
118
119 static inline void siop_script_sync(struct siop_softc *, int);
120 static inline void
121 siop_script_sync(sc, ops)
122 struct siop_softc *sc;
123 int ops;
124 {
125 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
126 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
127 PAGE_SIZE, ops);
128 }
129
130 static inline u_int32_t siop_script_read(struct siop_softc *, u_int);
131 static inline u_int32_t
132 siop_script_read(sc, offset)
133 struct siop_softc *sc;
134 u_int offset;
135 {
136 if (sc->sc_c.features & SF_CHIP_RAM) {
137 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
138 offset * 4);
139 } else {
140 return le32toh(sc->sc_c.sc_script[offset]);
141 }
142 }
143
144 static inline void siop_script_write(struct siop_softc *, u_int,
145 u_int32_t);
146 static inline void
147 siop_script_write(sc, offset, val)
148 struct siop_softc *sc;
149 u_int offset;
150 u_int32_t val;
151 {
152 if (sc->sc_c.features & SF_CHIP_RAM) {
153 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
154 offset * 4, val);
155 } else {
156 sc->sc_c.sc_script[offset] = htole32(val);
157 }
158 }
159
160 void
161 siop_attach(sc)
162 struct siop_softc *sc;
163 {
164 if (siop_common_attach(&sc->sc_c) != 0)
165 return;
166
167 TAILQ_INIT(&sc->free_list);
168 TAILQ_INIT(&sc->cmds);
169 TAILQ_INIT(&sc->lunsw_list);
170 sc->sc_currschedslot = 0;
171 #ifdef SIOP_DEBUG
172 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
173 sc->sc_c.sc_dev.dv_xname, (int)sizeof(siop_script),
174 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
175 #endif
176
177 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
178 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
179
180 /* Do a bus reset, so that devices fall back to narrow/async */
181 siop_resetbus(&sc->sc_c);
182 /*
183 * siop_reset() will reset the chip, thus clearing pending interrupts
184 */
185 siop_reset(sc);
186 #ifdef DUMP_SCRIPT
187 siop_dump_script(sc);
188 #endif
189
190 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
191 }
192
193 void
194 siop_reset(sc)
195 struct siop_softc *sc;
196 {
197 int i, j;
198 struct siop_lunsw *lunsw;
199
200 siop_common_reset(&sc->sc_c);
201
202 /* copy and patch the script */
203 if (sc->sc_c.features & SF_CHIP_RAM) {
204 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
205 siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
206 for (j = 0; j <
207 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
208 j++) {
209 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
210 E_abs_msgin_Used[j] * 4,
211 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
212 }
213 if (sc->sc_c.features & SF_CHIP_LED0) {
214 bus_space_write_region_4(sc->sc_c.sc_ramt,
215 sc->sc_c.sc_ramh,
216 Ent_led_on1, siop_led_on,
217 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
218 bus_space_write_region_4(sc->sc_c.sc_ramt,
219 sc->sc_c.sc_ramh,
220 Ent_led_on2, siop_led_on,
221 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
222 bus_space_write_region_4(sc->sc_c.sc_ramt,
223 sc->sc_c.sc_ramh,
224 Ent_led_off, siop_led_off,
225 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
226 }
227 } else {
228 for (j = 0;
229 j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
230 sc->sc_c.sc_script[j] = htole32(siop_script[j]);
231 }
232 for (j = 0; j <
233 (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
234 j++) {
235 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
236 htole32(sc->sc_c.sc_scriptaddr + Ent_msgin_space);
237 }
238 if (sc->sc_c.features & SF_CHIP_LED0) {
239 for (j = 0; j < (sizeof(siop_led_on) /
240 sizeof(siop_led_on[0])); j++)
241 sc->sc_c.sc_script[
242 Ent_led_on1 / sizeof(siop_led_on[0]) + j
243 ] = htole32(siop_led_on[j]);
244 for (j = 0; j < (sizeof(siop_led_on) /
245 sizeof(siop_led_on[0])); j++)
246 sc->sc_c.sc_script[
247 Ent_led_on2 / sizeof(siop_led_on[0]) + j
248 ] = htole32(siop_led_on[j]);
249 for (j = 0; j < (sizeof(siop_led_off) /
250 sizeof(siop_led_off[0])); j++)
251 sc->sc_c.sc_script[
252 Ent_led_off / sizeof(siop_led_off[0]) + j
253 ] = htole32(siop_led_off[j]);
254 }
255 }
256 sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
257 sc->script_free_hi = sc->sc_c.ram_size / 4;
258 sc->sc_ntargets = 0;
259
260 /* free used and unused lun switches */
261 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
262 #ifdef SIOP_DEBUG
263 printf("%s: free lunsw at offset %d\n",
264 sc->sc_c.sc_dev.dv_xname, lunsw->lunsw_off);
265 #endif
266 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
267 free(lunsw, M_DEVBUF);
268 }
269 TAILQ_INIT(&sc->lunsw_list);
270 /* restore reselect switch */
271 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
272 struct siop_target *target;
273 if (sc->sc_c.targets[i] == NULL)
274 continue;
275 #ifdef SIOP_DEBUG
276 printf("%s: restore sw for target %d\n",
277 sc->sc_c.sc_dev.dv_xname, i);
278 #endif
279 target = (struct siop_target *)sc->sc_c.targets[i];
280 free(target->lunsw, M_DEVBUF);
281 target->lunsw = siop_get_lunsw(sc);
282 if (target->lunsw == NULL) {
283 printf("%s: can't alloc lunsw for target %d\n",
284 sc->sc_c.sc_dev.dv_xname, i);
285 break;
286 }
287 siop_add_reselsw(sc, i);
288 }
289
290 /* start script */
291 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
292 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
293 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
294 }
295 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
296 sc->sc_c.sc_scriptaddr + Ent_reselect);
297 }
298
299 #if 0
300 #define CALL_SCRIPT(ent) do {\
301 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
302 siop_cmd->cmd_c.dsa, \
303 sc->sc_c.sc_scriptaddr + ent); \
304 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
305 } while (0)
306 #else
307 #define CALL_SCRIPT(ent) do {\
308 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
309 } while (0)
310 #endif
311
312 int
313 siop_intr(v)
314 void *v;
315 {
316 struct siop_softc *sc = v;
317 struct siop_target *siop_target;
318 struct siop_cmd *siop_cmd;
319 struct siop_lun *siop_lun;
320 struct scsipi_xfer *xs;
321 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
322 u_int32_t irqcode;
323 int need_reset = 0;
324 int offset, target, lun, tag;
325 bus_addr_t dsa;
326 struct siop_cbd *cbdp;
327 int freetarget = 0;
328 int restart = 0;
329
330 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
331 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
332 return 0;
333 INCSTAT(siop_stat_intr);
334 if (istat & ISTAT_INTF) {
335 printf("INTRF\n");
336 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
337 SIOP_ISTAT, ISTAT_INTF);
338 }
339 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
340 (ISTAT_DIP | ISTAT_ABRT)) {
341 /* clear abort */
342 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
343 SIOP_ISTAT, 0);
344 }
345 /* use DSA to find the current siop_cmd */
346 siop_cmd = NULL;
347 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
348 TAILQ_FOREACH(cbdp, &sc->cmds, next) {
349 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
350 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
351 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
352 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
353 siop_table_sync(siop_cmd,
354 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
355 break;
356 }
357 }
358 if (siop_cmd) {
359 xs = siop_cmd->cmd_c.xs;
360 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
361 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
362 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
363 tag = siop_cmd->cmd_c.tag;
364 siop_lun = siop_target->siop_lun[lun];
365 #ifdef DIAGNOSTIC
366 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
367 printf("siop_cmd (lun %d) for DSA 0x%x "
368 "not active (%d)\n", lun, (u_int)dsa,
369 siop_cmd->cmd_c.status);
370 xs = NULL;
371 siop_target = NULL;
372 target = -1;
373 lun = -1;
374 tag = -1;
375 siop_lun = NULL;
376 siop_cmd = NULL;
377 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
378 printf("siop_cmd (lun %d tag %d) not in siop_lun "
379 "active (%p != %p)\n", lun, tag, siop_cmd,
380 siop_lun->siop_tag[tag].active);
381 }
382 #endif
383 } else {
384 xs = NULL;
385 siop_target = NULL;
386 target = -1;
387 lun = -1;
388 tag = -1;
389 siop_lun = NULL;
390 }
391 if (istat & ISTAT_DIP) {
392 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
393 SIOP_DSTAT);
394 if (dstat & DSTAT_ABRT) {
395 /* was probably generated by a bus reset IOCTL */
396 if ((dstat & DSTAT_DFE) == 0)
397 siop_clearfifo(&sc->sc_c);
398 goto reset;
399 }
400 if (dstat & DSTAT_SSI) {
401 printf("single step dsp 0x%08x dsa 0x08%x\n",
402 (int)(bus_space_read_4(sc->sc_c.sc_rt,
403 sc->sc_c.sc_rh, SIOP_DSP) -
404 sc->sc_c.sc_scriptaddr),
405 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
406 SIOP_DSA));
407 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
408 (istat & ISTAT_SIP) == 0) {
409 bus_space_write_1(sc->sc_c.sc_rt,
410 sc->sc_c.sc_rh, SIOP_DCNTL,
411 bus_space_read_1(sc->sc_c.sc_rt,
412 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
413 }
414 return 1;
415 }
416
417 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
418 printf("DMA IRQ:");
419 if (dstat & DSTAT_IID)
420 printf(" Illegal instruction");
421 if (dstat & DSTAT_BF)
422 printf(" bus fault");
423 if (dstat & DSTAT_MDPE)
424 printf(" parity");
425 if (dstat & DSTAT_DFE)
426 printf(" DMA fifo empty");
427 else
428 siop_clearfifo(&sc->sc_c);
429 printf(", DSP=0x%x DSA=0x%x: ",
430 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
431 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
432 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
433 if (siop_cmd)
434 printf("last msg_in=0x%x status=0x%x\n",
435 siop_cmd->cmd_tables->msg_in[0],
436 le32toh(siop_cmd->cmd_tables->status));
437 else
438 printf("%s: current DSA invalid\n",
439 sc->sc_c.sc_dev.dv_xname);
440 need_reset = 1;
441 }
442 }
443 if (istat & ISTAT_SIP) {
444 if (istat & ISTAT_DIP)
445 delay(10);
446 /*
447 * Can't read sist0 & sist1 independently, or we have to
448 * insert delay
449 */
450 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
451 SIOP_SIST0);
452 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
453 SIOP_SSTAT1);
454 #ifdef SIOP_DEBUG_INTR
455 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
456 "DSA=0x%x DSP=0x%lx\n", sist,
457 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
458 SIOP_SSTAT1),
459 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
460 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
461 SIOP_DSP) -
462 sc->sc_c.sc_scriptaddr));
463 #endif
464 if (sist & SIST0_RST) {
465 siop_handle_reset(sc);
466 /* no table to flush here */
467 return 1;
468 }
469 if (sist & SIST0_SGE) {
470 if (siop_cmd)
471 scsipi_printaddr(xs->xs_periph);
472 else
473 printf("%s:", sc->sc_c.sc_dev.dv_xname);
474 printf("scsi gross error\n");
475 goto reset;
476 }
477 if ((sist & SIST0_MA) && need_reset == 0) {
478 if (siop_cmd) {
479 int scratcha0;
480 dstat = bus_space_read_1(sc->sc_c.sc_rt,
481 sc->sc_c.sc_rh, SIOP_DSTAT);
482 /*
483 * first restore DSA, in case we were in a S/G
484 * operation.
485 */
486 bus_space_write_4(sc->sc_c.sc_rt,
487 sc->sc_c.sc_rh,
488 SIOP_DSA, siop_cmd->cmd_c.dsa);
489 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
490 sc->sc_c.sc_rh, SIOP_SCRATCHA);
491 switch (sstat1 & SSTAT1_PHASE_MASK) {
492 case SSTAT1_PHASE_STATUS:
493 /*
494 * previous phase may be aborted for any reason
495 * ( for example, the target has less data to
496 * transfer than requested). Compute resid and
497 * just go to status, the command should
498 * terminate.
499 */
500 INCSTAT(siop_stat_intr_shortxfer);
501 if (scratcha0 & A_flag_data)
502 siop_ma(&siop_cmd->cmd_c);
503 else if ((dstat & DSTAT_DFE) == 0)
504 siop_clearfifo(&sc->sc_c);
505 CALL_SCRIPT(Ent_status);
506 return 1;
507 case SSTAT1_PHASE_MSGIN:
508 /*
509 * target may be ready to disconnect
510 * Compute resid which would be used later
511 * if a save data pointer is needed.
512 */
513 INCSTAT(siop_stat_intr_xferdisc);
514 if (scratcha0 & A_flag_data)
515 siop_ma(&siop_cmd->cmd_c);
516 else if ((dstat & DSTAT_DFE) == 0)
517 siop_clearfifo(&sc->sc_c);
518 bus_space_write_1(sc->sc_c.sc_rt,
519 sc->sc_c.sc_rh, SIOP_SCRATCHA,
520 scratcha0 & ~A_flag_data);
521 CALL_SCRIPT(Ent_msgin);
522 return 1;
523 }
524 printf("%s: unexpected phase mismatch %d\n",
525 sc->sc_c.sc_dev.dv_xname,
526 sstat1 & SSTAT1_PHASE_MASK);
527 } else {
528 printf("%s: phase mismatch without command\n",
529 sc->sc_c.sc_dev.dv_xname);
530 }
531 need_reset = 1;
532 }
533 if (sist & SIST0_PAR) {
534 /* parity error, reset */
535 if (siop_cmd)
536 scsipi_printaddr(xs->xs_periph);
537 else
538 printf("%s:", sc->sc_c.sc_dev.dv_xname);
539 printf("parity error\n");
540 goto reset;
541 }
542 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
543 /* selection time out, assume there's no device here */
544 if (siop_cmd) {
545 siop_cmd->cmd_c.status = CMDST_DONE;
546 xs->error = XS_SELTIMEOUT;
547 freetarget = 1;
548 goto end;
549 } else {
550 printf("%s: selection timeout without "
551 "command\n", sc->sc_c.sc_dev.dv_xname);
552 need_reset = 1;
553 }
554 }
555 if (sist & SIST0_UDC) {
556 /*
557 * unexpected disconnect. Usually the target signals
558 * a fatal condition this way. Attempt to get sense.
559 */
560 if (siop_cmd) {
561 siop_cmd->cmd_tables->status =
562 htole32(SCSI_CHECK);
563 goto end;
564 }
565 printf("%s: unexpected disconnect without "
566 "command\n", sc->sc_c.sc_dev.dv_xname);
567 goto reset;
568 }
569 if (sist & (SIST1_SBMC << 8)) {
570 /* SCSI bus mode change */
571 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
572 goto reset;
573 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
574 /*
575 * we have a script interrupt, it will
576 * restart the script.
577 */
578 goto scintr;
579 }
580 /*
581 * else we have to restart it ourselve, at the
582 * interrupted instruction.
583 */
584 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
585 SIOP_DSP,
586 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
587 SIOP_DSP) - 8);
588 return 1;
589 }
590 /* Else it's an unhandled exception (for now). */
591 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
592 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
593 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
594 SIOP_SSTAT1),
595 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
596 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
597 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
598 if (siop_cmd) {
599 siop_cmd->cmd_c.status = CMDST_DONE;
600 xs->error = XS_SELTIMEOUT;
601 goto end;
602 }
603 need_reset = 1;
604 }
605 if (need_reset) {
606 reset:
607 /* fatal error, reset the bus */
608 siop_resetbus(&sc->sc_c);
609 /* no table to flush here */
610 return 1;
611 }
612
613 scintr:
614 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
615 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
616 SIOP_DSPS);
617 #ifdef SIOP_DEBUG_INTR
618 printf("script interrupt 0x%x\n", irqcode);
619 #endif
620 /*
621 * no command, or an inactive command is only valid for a
622 * reselect interrupt
623 */
624 if ((irqcode & 0x80) == 0) {
625 if (siop_cmd == NULL) {
626 printf(
627 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
628 sc->sc_c.sc_dev.dv_xname, irqcode);
629 goto reset;
630 }
631 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
632 printf("%s: command with invalid status "
633 "(IRQ code 0x%x current status %d) !\n",
634 sc->sc_c.sc_dev.dv_xname,
635 irqcode, siop_cmd->cmd_c.status);
636 xs = NULL;
637 }
638 }
639 switch(irqcode) {
640 case A_int_err:
641 printf("error, DSP=0x%x\n",
642 (int)(bus_space_read_4(sc->sc_c.sc_rt,
643 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
644 if (xs) {
645 xs->error = XS_SELTIMEOUT;
646 goto end;
647 } else {
648 goto reset;
649 }
650 case A_int_reseltarg:
651 printf("%s: reselect with invalid target\n",
652 sc->sc_c.sc_dev.dv_xname);
653 goto reset;
654 case A_int_resellun:
655 INCSTAT(siop_stat_intr_lunresel);
656 target = bus_space_read_1(sc->sc_c.sc_rt,
657 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
658 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
659 SIOP_SCRATCHA + 1);
660 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
661 SIOP_SCRATCHA + 2);
662 siop_target =
663 (struct siop_target *)sc->sc_c.targets[target];
664 if (siop_target == NULL) {
665 printf("%s: reselect with invalid target %d\n",
666 sc->sc_c.sc_dev.dv_xname, target);
667 goto reset;
668 }
669 siop_lun = siop_target->siop_lun[lun];
670 if (siop_lun == NULL) {
671 printf("%s: target %d reselect with invalid "
672 "lun %d\n", sc->sc_c.sc_dev.dv_xname,
673 target, lun);
674 goto reset;
675 }
676 if (siop_lun->siop_tag[tag].active == NULL) {
677 printf("%s: target %d lun %d tag %d reselect "
678 "without command\n",
679 sc->sc_c.sc_dev.dv_xname,
680 target, lun, tag);
681 goto reset;
682 }
683 siop_cmd = siop_lun->siop_tag[tag].active;
684 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
685 SIOP_DSP, siop_cmd->cmd_c.dsa +
686 sizeof(struct siop_common_xfer) +
687 Ent_ldsa_reload_dsa);
688 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
689 return 1;
690 case A_int_reseltag:
691 printf("%s: reselect with invalid tag\n",
692 sc->sc_c.sc_dev.dv_xname);
693 goto reset;
694 case A_int_msgin:
695 {
696 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
697 sc->sc_c.sc_rh, SIOP_SFBR);
698 if (msgin == MSG_MESSAGE_REJECT) {
699 int msg, extmsg;
700 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
701 /*
702 * message was part of a identify +
703 * something else. Identify shouldn't
704 * have been rejected.
705 */
706 msg =
707 siop_cmd->cmd_tables->msg_out[1];
708 extmsg =
709 siop_cmd->cmd_tables->msg_out[3];
710 } else {
711 msg = siop_cmd->cmd_tables->msg_out[0];
712 extmsg =
713 siop_cmd->cmd_tables->msg_out[2];
714 }
715 if (msg == MSG_MESSAGE_REJECT) {
716 /* MSG_REJECT for a MSG_REJECT !*/
717 if (xs)
718 scsipi_printaddr(xs->xs_periph);
719 else
720 printf("%s: ",
721 sc->sc_c.sc_dev.dv_xname);
722 printf("our reject message was "
723 "rejected\n");
724 goto reset;
725 }
726 if (msg == MSG_EXTENDED &&
727 extmsg == MSG_EXT_WDTR) {
728 /* WDTR rejected, initiate sync */
729 if ((siop_target->target_c.flags &
730 TARF_SYNC) == 0) {
731 siop_target->target_c.status =
732 TARST_OK;
733 siop_update_xfer_mode(&sc->sc_c,
734 target);
735 /* no table to flush here */
736 CALL_SCRIPT(Ent_msgin_ack);
737 return 1;
738 }
739 siop_target->target_c.status =
740 TARST_SYNC_NEG;
741 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
742 sc->sc_c.st_minsync,
743 sc->sc_c.maxoff);
744 siop_table_sync(siop_cmd,
745 BUS_DMASYNC_PREREAD |
746 BUS_DMASYNC_PREWRITE);
747 CALL_SCRIPT(Ent_send_msgout);
748 return 1;
749 } else if (msg == MSG_EXTENDED &&
750 extmsg == MSG_EXT_SDTR) {
751 /* sync rejected */
752 siop_target->target_c.offset = 0;
753 siop_target->target_c.period = 0;
754 siop_target->target_c.status = TARST_OK;
755 siop_update_xfer_mode(&sc->sc_c,
756 target);
757 /* no table to flush here */
758 CALL_SCRIPT(Ent_msgin_ack);
759 return 1;
760 } else if (msg == MSG_SIMPLE_Q_TAG ||
761 msg == MSG_HEAD_OF_Q_TAG ||
762 msg == MSG_ORDERED_Q_TAG) {
763 if (siop_handle_qtag_reject(
764 siop_cmd) == -1)
765 goto reset;
766 CALL_SCRIPT(Ent_msgin_ack);
767 return 1;
768 }
769 if (xs)
770 scsipi_printaddr(xs->xs_periph);
771 else
772 printf("%s: ",
773 sc->sc_c.sc_dev.dv_xname);
774 if (msg == MSG_EXTENDED) {
775 printf("scsi message reject, extended "
776 "message sent was 0x%x\n", extmsg);
777 } else {
778 printf("scsi message reject, message "
779 "sent was 0x%x\n", msg);
780 }
781 /* no table to flush here */
782 CALL_SCRIPT(Ent_msgin_ack);
783 return 1;
784 }
785 if (msgin == MSG_IGN_WIDE_RESIDUE) {
786 /* use the extmsgdata table to get the second byte */
787 siop_cmd->cmd_tables->t_extmsgdata.count =
788 htole32(1);
789 siop_table_sync(siop_cmd,
790 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
791 CALL_SCRIPT(Ent_get_extmsgdata);
792 return 1;
793 }
794 if (xs)
795 scsipi_printaddr(xs->xs_periph);
796 else
797 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
798 printf("unhandled message 0x%x\n",
799 siop_cmd->cmd_tables->msg_in[0]);
800 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
801 siop_cmd->cmd_tables->t_msgout.count= htole32(1);
802 siop_table_sync(siop_cmd,
803 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
804 CALL_SCRIPT(Ent_send_msgout);
805 return 1;
806 }
807 case A_int_extmsgin:
808 #ifdef SIOP_DEBUG_INTR
809 printf("extended message: msg 0x%x len %d\n",
810 siop_cmd->cmd_tables->msg_in[2],
811 siop_cmd->cmd_tables->msg_in[1]);
812 #endif
813 if (siop_cmd->cmd_tables->msg_in[1] >
814 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
815 printf("%s: extended message too big (%d)\n",
816 sc->sc_c.sc_dev.dv_xname,
817 siop_cmd->cmd_tables->msg_in[1]);
818 siop_cmd->cmd_tables->t_extmsgdata.count =
819 htole32(siop_cmd->cmd_tables->msg_in[1] - 1);
820 siop_table_sync(siop_cmd,
821 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
822 CALL_SCRIPT(Ent_get_extmsgdata);
823 return 1;
824 case A_int_extmsgdata:
825 #ifdef SIOP_DEBUG_INTR
826 {
827 int i;
828 printf("extended message: 0x%x, data:",
829 siop_cmd->cmd_tables->msg_in[2]);
830 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
831 i++)
832 printf(" 0x%x",
833 siop_cmd->cmd_tables->msg_in[i]);
834 printf("\n");
835 }
836 #endif
837 if (siop_cmd->cmd_tables->msg_in[0] ==
838 MSG_IGN_WIDE_RESIDUE) {
839 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
840 if (siop_cmd->cmd_tables->msg_in[3] != 1)
841 printf("MSG_IGN_WIDE_RESIDUE: "
842 "bad len %d\n",
843 siop_cmd->cmd_tables->msg_in[3]);
844 switch (siop_iwr(&siop_cmd->cmd_c)) {
845 case SIOP_NEG_MSGOUT:
846 siop_table_sync(siop_cmd,
847 BUS_DMASYNC_PREREAD |
848 BUS_DMASYNC_PREWRITE);
849 CALL_SCRIPT(Ent_send_msgout);
850 return(1);
851 case SIOP_NEG_ACK:
852 CALL_SCRIPT(Ent_msgin_ack);
853 return(1);
854 default:
855 panic("invalid retval from "
856 "siop_iwr()");
857 }
858 return(1);
859 }
860 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
861 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
862 case SIOP_NEG_MSGOUT:
863 siop_update_scntl3(sc,
864 siop_cmd->cmd_c.siop_target);
865 siop_table_sync(siop_cmd,
866 BUS_DMASYNC_PREREAD |
867 BUS_DMASYNC_PREWRITE);
868 CALL_SCRIPT(Ent_send_msgout);
869 return(1);
870 case SIOP_NEG_ACK:
871 siop_update_scntl3(sc,
872 siop_cmd->cmd_c.siop_target);
873 CALL_SCRIPT(Ent_msgin_ack);
874 return(1);
875 default:
876 panic("invalid retval from "
877 "siop_wdtr_neg()");
878 }
879 return(1);
880 }
881 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
882 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
883 case SIOP_NEG_MSGOUT:
884 siop_update_scntl3(sc,
885 siop_cmd->cmd_c.siop_target);
886 siop_table_sync(siop_cmd,
887 BUS_DMASYNC_PREREAD |
888 BUS_DMASYNC_PREWRITE);
889 CALL_SCRIPT(Ent_send_msgout);
890 return(1);
891 case SIOP_NEG_ACK:
892 siop_update_scntl3(sc,
893 siop_cmd->cmd_c.siop_target);
894 CALL_SCRIPT(Ent_msgin_ack);
895 return(1);
896 default:
897 panic("invalid retval from "
898 "siop_wdtr_neg()");
899 }
900 return(1);
901 }
902 /* send a message reject */
903 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
904 siop_cmd->cmd_tables->t_msgout.count = htole32(1);
905 siop_table_sync(siop_cmd,
906 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
907 CALL_SCRIPT(Ent_send_msgout);
908 return 1;
909 case A_int_disc:
910 INCSTAT(siop_stat_intr_sdp);
911 offset = bus_space_read_1(sc->sc_c.sc_rt,
912 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
913 #ifdef SIOP_DEBUG_DR
914 printf("disconnect offset %d\n", offset);
915 #endif
916 siop_sdp(&siop_cmd->cmd_c, offset);
917 /* we start again with no offset */
918 siop_cmd->saved_offset = SIOP_NOOFFSET;
919 siop_table_sync(siop_cmd,
920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
921 CALL_SCRIPT(Ent_script_sched);
922 return 1;
923 case A_int_saveoffset:
924 INCSTAT(siop_stat_intr_saveoffset);
925 offset = bus_space_read_1(sc->sc_c.sc_rt,
926 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
927 #ifdef SIOP_DEBUG_DR
928 printf("saveoffset offset %d\n", offset);
929 #endif
930 siop_cmd->saved_offset = offset;
931 CALL_SCRIPT(Ent_script_sched);
932 return 1;
933 case A_int_resfail:
934 printf("reselect failed\n");
935 CALL_SCRIPT(Ent_script_sched);
936 return 1;
937 case A_int_done:
938 if (xs == NULL) {
939 printf("%s: done without command, DSA=0x%lx\n",
940 sc->sc_c.sc_dev.dv_xname,
941 (u_long)siop_cmd->cmd_c.dsa);
942 siop_cmd->cmd_c.status = CMDST_FREE;
943 CALL_SCRIPT(Ent_script_sched);
944 return 1;
945 }
946 #ifdef SIOP_DEBUG_INTR
947 printf("done, DSA=0x%lx target id 0x%x last msg "
948 "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
949 le32toh(siop_cmd->cmd_tables->id),
950 siop_cmd->cmd_tables->msg_in[0],
951 le32toh(siop_cmd->cmd_tables->status));
952 #endif
953 INCSTAT(siop_stat_intr_done);
954 /* update resid. */
955 offset = bus_space_read_1(sc->sc_c.sc_rt,
956 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
957 /*
958 * if we got a disconnect between the last data phase
959 * and the status phase, offset will be 0. In this
960 * case, siop_cmd->saved_offset will have the proper
961 * value if it got updated by the controller
962 */
963 if (offset == 0 &&
964 siop_cmd->saved_offset != SIOP_NOOFFSET)
965 offset = siop_cmd->saved_offset;
966 siop_update_resid(&siop_cmd->cmd_c, offset);
967 siop_cmd->cmd_c.status = CMDST_DONE;
968 goto end;
969 default:
970 printf("unknown irqcode %x\n", irqcode);
971 if (xs) {
972 xs->error = XS_SELTIMEOUT;
973 goto end;
974 }
975 goto reset;
976 }
977 return 1;
978 }
979 /* We just should't get there */
980 panic("siop_intr: I shouldn't be there !");
981
982 end:
983 /*
984 * restart the script now if command completed properly
985 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
986 * queue
987 */
988 xs->status = le32toh(siop_cmd->cmd_tables->status);
989 if (xs->status == SCSI_OK)
990 CALL_SCRIPT(Ent_script_sched);
991 else
992 restart = 1;
993 siop_lun->siop_tag[tag].active = NULL;
994 siop_scsicmd_end(siop_cmd);
995 if (freetarget && siop_target->target_c.status == TARST_PROBING)
996 siop_del_dev(sc, target, lun);
997 if (restart)
998 CALL_SCRIPT(Ent_script_sched);
999 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1000 /* a command terminated, so we have free slots now */
1001 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1002 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1003 }
1004
1005 return 1;
1006 }
1007
1008 void
1009 siop_scsicmd_end(siop_cmd)
1010 struct siop_cmd *siop_cmd;
1011 {
1012 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1013 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1014
1015 switch(xs->status) {
1016 case SCSI_OK:
1017 xs->error = XS_NOERROR;
1018 break;
1019 case SCSI_BUSY:
1020 xs->error = XS_BUSY;
1021 break;
1022 case SCSI_CHECK:
1023 xs->error = XS_BUSY;
1024 /* remove commands in the queue and scheduler */
1025 siop_unqueue(sc, xs->xs_periph->periph_target,
1026 xs->xs_periph->periph_lun);
1027 break;
1028 case SCSI_QUEUE_FULL:
1029 INCSTAT(siop_stat_intr_qfull);
1030 #ifdef SIOP_DEBUG
1031 printf("%s:%d:%d: queue full (tag %d)\n",
1032 sc->sc_c.sc_dev.dv_xname,
1033 xs->xs_periph->periph_target,
1034 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1035 #endif
1036 xs->error = XS_BUSY;
1037 break;
1038 case SCSI_SIOP_NOCHECK:
1039 /*
1040 * don't check status, xs->error is already valid
1041 */
1042 break;
1043 case SCSI_SIOP_NOSTATUS:
1044 /*
1045 * the status byte was not updated, cmd was
1046 * aborted
1047 */
1048 xs->error = XS_SELTIMEOUT;
1049 break;
1050 default:
1051 scsipi_printaddr(xs->xs_periph);
1052 printf("invalid status code %d\n", xs->status);
1053 xs->error = XS_DRIVER_STUFFUP;
1054 }
1055 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1056 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
1057 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1058 (xs->xs_control & XS_CTL_DATA_IN) ?
1059 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1060 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1061 }
1062 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1063 if ((xs->xs_control & XS_CTL_POLL) == 0)
1064 callout_stop(&xs->xs_callout);
1065 siop_cmd->cmd_c.status = CMDST_FREE;
1066 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1067 #if 0
1068 if (xs->resid != 0)
1069 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1070 #endif
1071 scsipi_done (xs);
1072 }
1073
1074 void
1075 siop_unqueue(sc, target, lun)
1076 struct siop_softc *sc;
1077 int target;
1078 int lun;
1079 {
1080 int slot, tag;
1081 struct siop_cmd *siop_cmd;
1082 struct siop_lun *siop_lun =
1083 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1084
1085 /* first make sure to read valid data */
1086 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1087
1088 for (tag = 1; tag < SIOP_NTAG; tag++) {
1089 /* look for commands in the scheduler, not yet started */
1090 if (siop_lun->siop_tag[tag].active == NULL)
1091 continue;
1092 siop_cmd = siop_lun->siop_tag[tag].active;
1093 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1094 if (siop_script_read(sc,
1095 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1096 siop_cmd->cmd_c.dsa +
1097 sizeof(struct siop_common_xfer) +
1098 Ent_ldsa_select)
1099 break;
1100 }
1101 if (slot > sc->sc_currschedslot)
1102 continue; /* didn't find it */
1103 if (siop_script_read(sc,
1104 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1105 continue; /* already started */
1106 /* clear the slot */
1107 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1108 0x80000000);
1109 /* ask to requeue */
1110 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1111 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1112 siop_lun->siop_tag[tag].active = NULL;
1113 siop_scsicmd_end(siop_cmd);
1114 }
1115 /* update sc_currschedslot */
1116 sc->sc_currschedslot = 0;
1117 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1118 if (siop_script_read(sc,
1119 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1120 sc->sc_currschedslot = slot;
1121 }
1122 }
1123
1124 /*
1125 * handle a rejected queue tag message: the command will run untagged,
1126 * has to adjust the reselect script.
1127 */
1128 int
1129 siop_handle_qtag_reject(siop_cmd)
1130 struct siop_cmd *siop_cmd;
1131 {
1132 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1133 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1134 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1135 int tag = siop_cmd->cmd_tables->msg_out[2];
1136 struct siop_lun *siop_lun =
1137 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1138
1139 #ifdef SIOP_DEBUG
1140 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1141 sc->sc_c.sc_dev.dv_xname, target, lun, tag, siop_cmd->cmd_c.tag,
1142 siop_cmd->cmd_c.status);
1143 #endif
1144
1145 if (siop_lun->siop_tag[0].active != NULL) {
1146 printf("%s: untagged command already running for target %d "
1147 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1148 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1149 return -1;
1150 }
1151 /* clear tag slot */
1152 siop_lun->siop_tag[tag].active = NULL;
1153 /* add command to non-tagged slot */
1154 siop_lun->siop_tag[0].active = siop_cmd;
1155 siop_cmd->cmd_c.tag = 0;
1156 /* adjust reselect script if there is one */
1157 if (siop_lun->siop_tag[0].reseloff > 0) {
1158 siop_script_write(sc,
1159 siop_lun->siop_tag[0].reseloff + 1,
1160 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1161 Ent_ldsa_reload_dsa);
1162 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1163 }
1164 return 0;
1165 }
1166
1167 /*
1168 * handle a bus reset: reset chip, unqueue all active commands, free all
1169 * target struct and report lossage to upper layer.
1170 * As the upper layer may requeue immediatly we have to first store
1171 * all active commands in a temporary queue.
1172 */
1173 void
1174 siop_handle_reset(sc)
1175 struct siop_softc *sc;
1176 {
1177 struct siop_cmd *siop_cmd;
1178 struct siop_lun *siop_lun;
1179 int target, lun, tag;
1180 /*
1181 * scsi bus reset. reset the chip and restart
1182 * the queue. Need to clean up all active commands
1183 */
1184 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1185 /* stop, reset and restart the chip */
1186 siop_reset(sc);
1187 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1188 /* chip has been reset, all slots are free now */
1189 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1190 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1191 }
1192 /*
1193 * Process all commands: first commands being executed
1194 */
1195 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1196 target++) {
1197 if (sc->sc_c.targets[target] == NULL)
1198 continue;
1199 for (lun = 0; lun < 8; lun++) {
1200 struct siop_target *siop_target =
1201 (struct siop_target *)sc->sc_c.targets[target];
1202 siop_lun = siop_target->siop_lun[lun];
1203 if (siop_lun == NULL)
1204 continue;
1205 for (tag = 0; tag <
1206 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1207 SIOP_NTAG : 1);
1208 tag++) {
1209 siop_cmd = siop_lun->siop_tag[tag].active;
1210 if (siop_cmd == NULL)
1211 continue;
1212 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1213 printf("command with tag id %d reset\n", tag);
1214 siop_cmd->cmd_c.xs->error =
1215 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1216 XS_TIMEOUT : XS_RESET;
1217 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1218 siop_lun->siop_tag[tag].active = NULL;
1219 siop_cmd->cmd_c.status = CMDST_DONE;
1220 siop_scsicmd_end(siop_cmd);
1221 }
1222 }
1223 sc->sc_c.targets[target]->status = TARST_ASYNC;
1224 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1225 sc->sc_c.targets[target]->period =
1226 sc->sc_c.targets[target]->offset = 0;
1227 siop_update_xfer_mode(&sc->sc_c, target);
1228 }
1229
1230 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1231 }
1232
1233 void
1234 siop_scsipi_request(chan, req, arg)
1235 struct scsipi_channel *chan;
1236 scsipi_adapter_req_t req;
1237 void *arg;
1238 {
1239 struct scsipi_xfer *xs;
1240 struct scsipi_periph *periph;
1241 struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1242 struct siop_cmd *siop_cmd;
1243 struct siop_target *siop_target;
1244 int s, error, i;
1245 int target;
1246 int lun;
1247
1248 switch (req) {
1249 case ADAPTER_REQ_RUN_XFER:
1250 xs = arg;
1251 periph = xs->xs_periph;
1252 target = periph->periph_target;
1253 lun = periph->periph_lun;
1254
1255 s = splbio();
1256 #ifdef SIOP_DEBUG_SCHED
1257 printf("starting cmd for %d:%d\n", target, lun);
1258 #endif
1259 siop_cmd = TAILQ_FIRST(&sc->free_list);
1260 if (siop_cmd == NULL) {
1261 xs->error = XS_RESOURCE_SHORTAGE;
1262 scsipi_done(xs);
1263 splx(s);
1264 return;
1265 }
1266 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1267 #ifdef DIAGNOSTIC
1268 if (siop_cmd->cmd_c.status != CMDST_FREE)
1269 panic("siop_scsicmd: new cmd not free");
1270 #endif
1271 siop_target = (struct siop_target*)sc->sc_c.targets[target];
1272 if (siop_target == NULL) {
1273 #ifdef SIOP_DEBUG
1274 printf("%s: alloc siop_target for target %d\n",
1275 sc->sc_c.sc_dev.dv_xname, target);
1276 #endif
1277 sc->sc_c.targets[target] =
1278 malloc(sizeof(struct siop_target),
1279 M_DEVBUF, M_NOWAIT);
1280 if (sc->sc_c.targets[target] == NULL) {
1281 printf("%s: can't malloc memory for "
1282 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1283 target);
1284 xs->error = XS_RESOURCE_SHORTAGE;
1285 scsipi_done(xs);
1286 splx(s);
1287 return;
1288 }
1289 siop_target =
1290 (struct siop_target*)sc->sc_c.targets[target];
1291 siop_target->target_c.status = TARST_PROBING;
1292 siop_target->target_c.flags = 0;
1293 siop_target->target_c.id =
1294 sc->sc_c.clock_div << 24; /* scntl3 */
1295 siop_target->target_c.id |= target << 16; /* id */
1296 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1297
1298 /* get a lun switch script */
1299 siop_target->lunsw = siop_get_lunsw(sc);
1300 if (siop_target->lunsw == NULL) {
1301 printf("%s: can't alloc lunsw for target %d\n",
1302 sc->sc_c.sc_dev.dv_xname, target);
1303 xs->error = XS_RESOURCE_SHORTAGE;
1304 scsipi_done(xs);
1305 splx(s);
1306 return;
1307 }
1308 for (i=0; i < 8; i++)
1309 siop_target->siop_lun[i] = NULL;
1310 siop_add_reselsw(sc, target);
1311 }
1312 if (siop_target->siop_lun[lun] == NULL) {
1313 siop_target->siop_lun[lun] =
1314 malloc(sizeof(struct siop_lun), M_DEVBUF,
1315 M_NOWAIT|M_ZERO);
1316 if (siop_target->siop_lun[lun] == NULL) {
1317 printf("%s: can't alloc siop_lun for "
1318 "target %d lun %d\n",
1319 sc->sc_c.sc_dev.dv_xname, target, lun);
1320 xs->error = XS_RESOURCE_SHORTAGE;
1321 scsipi_done(xs);
1322 splx(s);
1323 return;
1324 }
1325 }
1326 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1327 siop_cmd->cmd_c.xs = xs;
1328 siop_cmd->cmd_c.flags = 0;
1329 siop_cmd->cmd_c.status = CMDST_READY;
1330
1331 /* load the DMA maps */
1332 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1333 siop_cmd->cmd_c.dmamap_cmd,
1334 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1335 if (error) {
1336 printf("%s: unable to load cmd DMA map: %d\n",
1337 sc->sc_c.sc_dev.dv_xname, error);
1338 xs->error = XS_DRIVER_STUFFUP;
1339 scsipi_done(xs);
1340 splx(s);
1341 return;
1342 }
1343 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1344 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1345 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1346 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1347 ((xs->xs_control & XS_CTL_DATA_IN) ?
1348 BUS_DMA_READ : BUS_DMA_WRITE));
1349 if (error) {
1350 printf("%s: unable to load cmd DMA map: %d",
1351 sc->sc_c.sc_dev.dv_xname, error);
1352 xs->error = XS_DRIVER_STUFFUP;
1353 scsipi_done(xs);
1354 bus_dmamap_unload(sc->sc_c.sc_dmat,
1355 siop_cmd->cmd_c.dmamap_cmd);
1356 splx(s);
1357 return;
1358 }
1359 bus_dmamap_sync(sc->sc_c.sc_dmat,
1360 siop_cmd->cmd_c.dmamap_data, 0,
1361 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1362 (xs->xs_control & XS_CTL_DATA_IN) ?
1363 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1364 }
1365 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1366 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1367 BUS_DMASYNC_PREWRITE);
1368
1369 if (xs->xs_tag_type) {
1370 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1371 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1372 } else {
1373 siop_cmd->cmd_c.tag = 0;
1374 }
1375 siop_setuptables(&siop_cmd->cmd_c);
1376 siop_cmd->saved_offset = SIOP_NOOFFSET;
1377 siop_table_sync(siop_cmd,
1378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1379 siop_start(sc, siop_cmd);
1380 if (xs->xs_control & XS_CTL_POLL) {
1381 /* poll for command completion */
1382 while ((xs->xs_status & XS_STS_DONE) == 0) {
1383 delay(1000);
1384 siop_intr(sc);
1385 }
1386 }
1387 splx(s);
1388 return;
1389
1390 case ADAPTER_REQ_GROW_RESOURCES:
1391 #ifdef SIOP_DEBUG
1392 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1393 sc->sc_c.sc_adapt.adapt_openings);
1394 #endif
1395 siop_morecbd(sc);
1396 return;
1397
1398 case ADAPTER_REQ_SET_XFER_MODE:
1399 {
1400 struct scsipi_xfer_mode *xm = arg;
1401 if (sc->sc_c.targets[xm->xm_target] == NULL)
1402 return;
1403 s = splbio();
1404 if (xm->xm_mode & PERIPH_CAP_TQING)
1405 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1406 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1407 (sc->sc_c.features & SF_BUS_WIDE))
1408 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1409 if (xm->xm_mode & PERIPH_CAP_SYNC)
1410 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1411 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1412 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1413 sc->sc_c.targets[xm->xm_target]->status =
1414 TARST_ASYNC;
1415
1416 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1417 if (scsipi_lookup_periph(chan,
1418 xm->xm_target, lun) != NULL) {
1419 /* allocate a lun sw entry for this device */
1420 siop_add_dev(sc, xm->xm_target, lun);
1421 }
1422 }
1423
1424 splx(s);
1425 }
1426 }
1427 }
1428
1429 static void
1430 siop_start(sc, siop_cmd)
1431 struct siop_softc *sc;
1432 struct siop_cmd *siop_cmd;
1433 {
1434 struct siop_lun *siop_lun;
1435 struct siop_xfer *siop_xfer;
1436 u_int32_t dsa;
1437 int timeout;
1438 int target, lun, slot;
1439
1440 /*
1441 * first make sure to read valid data
1442 */
1443 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1444
1445 /*
1446 * The queue management here is a bit tricky: the script always looks
1447 * at the slot from first to last, so if we always use the first
1448 * free slot commands can stay at the tail of the queue ~forever.
1449 * The algorithm used here is to restart from the head when we know
1450 * that the queue is empty, and only add commands after the last one.
1451 * When we're at the end of the queue wait for the script to clear it.
1452 * The best thing to do here would be to implement a circular queue,
1453 * but using only 53c720 features this can be "interesting".
1454 * A mid-way solution could be to implement 2 queues and swap orders.
1455 */
1456 slot = sc->sc_currschedslot;
1457 /*
1458 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1459 * free. As this is the last used slot, all previous slots are free,
1460 * we can restart from 0.
1461 */
1462 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1463 0x80000000) {
1464 slot = sc->sc_currschedslot = 0;
1465 } else {
1466 slot++;
1467 }
1468 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1469 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1470 siop_lun =
1471 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1472 /* if non-tagged command active, panic: this shouldn't happen */
1473 if (siop_lun->siop_tag[0].active != NULL) {
1474 panic("siop_start: tagged cmd while untagged running");
1475 }
1476 #ifdef DIAGNOSTIC
1477 /* sanity check the tag if needed */
1478 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1479 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1480 panic("siop_start: tag not free");
1481 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1482 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1483 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1484 panic("siop_start: invalid tag id");
1485 }
1486 }
1487 #endif
1488 /*
1489 * find a free scheduler slot and load it.
1490 */
1491 for (; slot < SIOP_NSLOTS; slot++) {
1492 /*
1493 * If cmd if 0x80000000 the slot is free
1494 */
1495 if (siop_script_read(sc,
1496 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1497 0x80000000)
1498 break;
1499 }
1500 if (slot == SIOP_NSLOTS) {
1501 /*
1502 * no more free slot, no need to continue. freeze the queue
1503 * and requeue this command.
1504 */
1505 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1506 sc->sc_flags |= SCF_CHAN_NOSLOT;
1507 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1508 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1509 siop_scsicmd_end(siop_cmd);
1510 return;
1511 }
1512 #ifdef SIOP_DEBUG_SCHED
1513 printf("using slot %d for DSA 0x%lx\n", slot,
1514 (u_long)siop_cmd->cmd_c.dsa);
1515 #endif
1516 /* mark command as active */
1517 if (siop_cmd->cmd_c.status == CMDST_READY)
1518 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1519 else
1520 panic("siop_start: bad status");
1521 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1522 /* patch scripts with DSA addr */
1523 dsa = siop_cmd->cmd_c.dsa;
1524 /* first reselect switch, if we have an entry */
1525 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1526 siop_script_write(sc,
1527 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1528 dsa + sizeof(struct siop_common_xfer) +
1529 Ent_ldsa_reload_dsa);
1530 /* CMD script: MOVE MEMORY addr */
1531 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1532 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1533 htole32(sc->sc_c.sc_scriptaddr + Ent_script_sched_slot0 + slot * 8);
1534 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1535 /* scheduler slot: JUMP ldsa_select */
1536 siop_script_write(sc,
1537 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1538 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1539 /* handle timeout */
1540 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1541 /* start exire timer */
1542 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1543 if (timeout == 0)
1544 timeout = 1;
1545 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1546 timeout, siop_timeout, siop_cmd);
1547 }
1548 /*
1549 * Change JUMP cmd so that this slot will be handled
1550 */
1551 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1552 0x80080000);
1553 sc->sc_currschedslot = slot;
1554
1555 /* make sure SCRIPT processor will read valid data */
1556 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1557 /* Signal script it has some work to do */
1558 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1559 SIOP_ISTAT, ISTAT_SIGP);
1560 /* and wait for IRQ */
1561 return;
1562 }
1563
1564 void
1565 siop_timeout(v)
1566 void *v;
1567 {
1568 struct siop_cmd *siop_cmd = v;
1569 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1570 int s;
1571
1572 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1573 printf("command timeout, CDB: ");
1574 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1575 printf("\n");
1576
1577 s = splbio();
1578 /* reset the scsi bus */
1579 siop_resetbus(&sc->sc_c);
1580
1581 /* deactivate callout */
1582 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1583 /* mark command as being timed out; siop_intr will handle it */
1584 /*
1585 * mark command has being timed out and just return;
1586 * the bus reset will generate an interrupt,
1587 * it will be handled in siop_intr()
1588 */
1589 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1590 splx(s);
1591 return;
1592
1593 }
1594
1595 void
1596 siop_dump_script(sc)
1597 struct siop_softc *sc;
1598 {
1599 int i;
1600 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1601 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1602 le32toh(sc->sc_c.sc_script[i]),
1603 le32toh(sc->sc_c.sc_script[i+1]));
1604 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1605 0xc0000000) {
1606 i++;
1607 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1608 }
1609 printf("\n");
1610 }
1611 }
1612
1613 void
1614 siop_morecbd(sc)
1615 struct siop_softc *sc;
1616 {
1617 int error, i, j, s;
1618 bus_dma_segment_t seg;
1619 int rseg;
1620 struct siop_cbd *newcbd;
1621 struct siop_xfer *xfer;
1622 bus_addr_t dsa;
1623 u_int32_t *scr;
1624
1625 /* allocate a new list head */
1626 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1627 if (newcbd == NULL) {
1628 printf("%s: can't allocate memory for command descriptors "
1629 "head\n", sc->sc_c.sc_dev.dv_xname);
1630 return;
1631 }
1632
1633 /* allocate cmd list */
1634 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1635 M_DEVBUF, M_NOWAIT|M_ZERO);
1636 if (newcbd->cmds == NULL) {
1637 printf("%s: can't allocate memory for command descriptors\n",
1638 sc->sc_c.sc_dev.dv_xname);
1639 goto bad3;
1640 }
1641 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1642 1, &rseg, BUS_DMA_NOWAIT);
1643 if (error) {
1644 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1645 sc->sc_c.sc_dev.dv_xname, error);
1646 goto bad2;
1647 }
1648 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1649 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1650 if (error) {
1651 printf("%s: unable to map cbd DMA memory, error = %d\n",
1652 sc->sc_c.sc_dev.dv_xname, error);
1653 goto bad2;
1654 }
1655 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1656 BUS_DMA_NOWAIT, &newcbd->xferdma);
1657 if (error) {
1658 printf("%s: unable to create cbd DMA map, error = %d\n",
1659 sc->sc_c.sc_dev.dv_xname, error);
1660 goto bad1;
1661 }
1662 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1663 PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1664 if (error) {
1665 printf("%s: unable to load cbd DMA map, error = %d\n",
1666 sc->sc_c.sc_dev.dv_xname, error);
1667 goto bad0;
1668 }
1669 #ifdef DEBUG
1670 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1671 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1672 #endif
1673 for (i = 0; i < SIOP_NCMDPB; i++) {
1674 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1675 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1676 &newcbd->cmds[i].cmd_c.dmamap_data);
1677 if (error) {
1678 printf("%s: unable to create data DMA map for cbd: "
1679 "error %d\n",
1680 sc->sc_c.sc_dev.dv_xname, error);
1681 goto bad0;
1682 }
1683 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1684 sizeof(struct scsipi_generic), 1,
1685 sizeof(struct scsipi_generic), 0,
1686 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1687 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1688 if (error) {
1689 printf("%s: unable to create cmd DMA map for cbd %d\n",
1690 sc->sc_c.sc_dev.dv_xname, error);
1691 goto bad0;
1692 }
1693 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1694 newcbd->cmds[i].siop_cbdp = newcbd;
1695 xfer = &newcbd->xfers[i];
1696 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1697 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1698 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1699 i * sizeof(struct siop_xfer);
1700 newcbd->cmds[i].cmd_c.dsa = dsa;
1701 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1702 xfer->siop_tables.t_msgout.count= htole32(1);
1703 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1704 xfer->siop_tables.t_msgin.count= htole32(1);
1705 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1706 offsetof(struct siop_common_xfer, msg_in));
1707 xfer->siop_tables.t_extmsgin.count= htole32(2);
1708 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1709 offsetof(struct siop_common_xfer, msg_in) + 1);
1710 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1711 offsetof(struct siop_common_xfer, msg_in) + 3);
1712 xfer->siop_tables.t_status.count= htole32(1);
1713 xfer->siop_tables.t_status.addr = htole32(dsa +
1714 offsetof(struct siop_common_xfer, status));
1715 /* The select/reselect script */
1716 scr = &xfer->resel[0];
1717 for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1718 scr[j] = htole32(load_dsa[j]);
1719 /*
1720 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1721 * octet, reg offset is the third.
1722 */
1723 scr[Ent_rdsa0 / 4] =
1724 htole32(0x78100000 | ((dsa & 0x000000ff) << 8));
1725 scr[Ent_rdsa1 / 4] =
1726 htole32(0x78110000 | ( dsa & 0x0000ff00 ));
1727 scr[Ent_rdsa2 / 4] =
1728 htole32(0x78120000 | ((dsa & 0x00ff0000) >> 8));
1729 scr[Ent_rdsa3 / 4] =
1730 htole32(0x78130000 | ((dsa & 0xff000000) >> 16));
1731 scr[E_ldsa_abs_reselected_Used[0]] =
1732 htole32(sc->sc_c.sc_scriptaddr + Ent_reselected);
1733 scr[E_ldsa_abs_reselect_Used[0]] =
1734 htole32(sc->sc_c.sc_scriptaddr + Ent_reselect);
1735 scr[E_ldsa_abs_selected_Used[0]] =
1736 htole32(sc->sc_c.sc_scriptaddr + Ent_selected);
1737 scr[E_ldsa_abs_data_Used[0]] =
1738 htole32(dsa + sizeof(struct siop_common_xfer) +
1739 Ent_ldsa_data);
1740 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1741 scr[Ent_ldsa_data / 4] = htole32(0x80000000);
1742 s = splbio();
1743 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1744 splx(s);
1745 #ifdef SIOP_DEBUG
1746 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1747 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1748 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1749 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1750 #endif
1751 }
1752 s = splbio();
1753 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1754 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1755 splx(s);
1756 return;
1757 bad0:
1758 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1759 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1760 bad1:
1761 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1762 bad2:
1763 free(newcbd->cmds, M_DEVBUF);
1764 bad3:
1765 free(newcbd, M_DEVBUF);
1766 return;
1767 }
1768
1769 struct siop_lunsw *
1770 siop_get_lunsw(sc)
1771 struct siop_softc *sc;
1772 {
1773 struct siop_lunsw *lunsw;
1774 int i;
1775
1776 if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1777 sc->script_free_hi)
1778 return NULL;
1779 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1780 if (lunsw != NULL) {
1781 #ifdef SIOP_DEBUG
1782 printf("siop_get_lunsw got lunsw at offset %d\n",
1783 lunsw->lunsw_off);
1784 #endif
1785 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1786 return lunsw;
1787 }
1788 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1789 if (lunsw == NULL)
1790 return NULL;
1791 #ifdef SIOP_DEBUG
1792 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1793 #endif
1794 if (sc->sc_c.features & SF_CHIP_RAM) {
1795 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1796 sc->script_free_lo * 4, lun_switch,
1797 sizeof(lun_switch) / sizeof(lun_switch[0]));
1798 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1799 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1800 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1801 } else {
1802 for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1803 i++)
1804 sc->sc_c.sc_script[sc->script_free_lo + i] =
1805 htole32(lun_switch[i]);
1806 sc->sc_c.sc_script[
1807 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1808 htole32(sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1809 }
1810 lunsw->lunsw_off = sc->script_free_lo;
1811 lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1812 sc->script_free_lo += lunsw->lunsw_size;
1813 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1814 return lunsw;
1815 }
1816
1817 void
1818 siop_add_reselsw(sc, target)
1819 struct siop_softc *sc;
1820 int target;
1821 {
1822 int i, j;
1823 struct siop_target *siop_target;
1824 struct siop_lun *siop_lun;
1825
1826 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1827 /*
1828 * add an entry to resel switch
1829 */
1830 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1831 for (i = 0; i < 15; i++) {
1832 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1833 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1834 == 0xff) { /* it's free */
1835 #ifdef SIOP_DEBUG
1836 printf("siop: target %d slot %d offset %d\n",
1837 target, i, siop_target->reseloff);
1838 #endif
1839 /* JUMP abs_foo, IF target | 0x80; */
1840 siop_script_write(sc, siop_target->reseloff,
1841 0x800c0080 | target);
1842 siop_script_write(sc, siop_target->reseloff + 1,
1843 sc->sc_c.sc_scriptaddr +
1844 siop_target->lunsw->lunsw_off * 4 +
1845 Ent_lun_switch_entry);
1846 break;
1847 }
1848 }
1849 if (i == 15) /* no free slot, shouldn't happen */
1850 panic("siop: resel switch full");
1851
1852 sc->sc_ntargets++;
1853 for (i = 0; i < 8; i++) {
1854 siop_lun = siop_target->siop_lun[i];
1855 if (siop_lun == NULL)
1856 continue;
1857 if (siop_lun->reseloff > 0) {
1858 siop_lun->reseloff = 0;
1859 for (j = 0; j < SIOP_NTAG; j++)
1860 siop_lun->siop_tag[j].reseloff = 0;
1861 siop_add_dev(sc, target, i);
1862 }
1863 }
1864 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1865 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1866 }
1867
1868 void
1869 siop_update_scntl3(sc, _siop_target)
1870 struct siop_softc *sc;
1871 struct siop_common_target *_siop_target;
1872 {
1873 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1874 /* MOVE target->id >> 24 TO SCNTL3 */
1875 siop_script_write(sc,
1876 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1877 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1878 /* MOVE target->id >> 8 TO SXFER */
1879 siop_script_write(sc,
1880 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1881 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1882 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1883 }
1884
1885 void
1886 siop_add_dev(sc, target, lun)
1887 struct siop_softc *sc;
1888 int target;
1889 int lun;
1890 {
1891 struct siop_lunsw *lunsw;
1892 struct siop_target *siop_target =
1893 (struct siop_target *)sc->sc_c.targets[target];
1894 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1895 int i, ntargets;
1896
1897 if (siop_lun->reseloff > 0)
1898 return;
1899 lunsw = siop_target->lunsw;
1900 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1901 /*
1902 * can't extend this slot. Probably not worth trying to deal
1903 * with this case
1904 */
1905 #ifdef DEBUG
1906 printf("%s:%d:%d: can't allocate a lun sw slot\n",
1907 sc->sc_c.sc_dev.dv_xname, target, lun);
1908 #endif
1909 return;
1910 }
1911 /* count how many free targets we still have to probe */
1912 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1913
1914 /*
1915 * we need 8 bytes for the lun sw additional entry, and
1916 * eventually sizeof(tag_switch) for the tag switch entry.
1917 * Keep enough free space for the free targets that could be
1918 * probed later.
1919 */
1920 if (sc->script_free_lo + 2 +
1921 (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1922 ((siop_target->target_c.flags & TARF_TAG) ?
1923 sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1924 sc->script_free_hi)) {
1925 /*
1926 * not enough space, probably not worth dealing with it.
1927 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1928 */
1929 #ifdef DEBUG
1930 printf("%s:%d:%d: not enough memory for a lun sw slot\n",
1931 sc->sc_c.sc_dev.dv_xname, target, lun);
1932 #endif
1933 return;
1934 }
1935 #ifdef SIOP_DEBUG
1936 printf("%s:%d:%d: allocate lun sw entry\n",
1937 sc->sc_c.sc_dev.dv_xname, target, lun);
1938 #endif
1939 /* INT int_resellun */
1940 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1941 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1942 /* Now the slot entry: JUMP abs_foo, IF lun */
1943 siop_script_write(sc, sc->script_free_lo - 2,
1944 0x800c0000 | lun);
1945 siop_script_write(sc, sc->script_free_lo - 1, 0);
1946 siop_lun->reseloff = sc->script_free_lo - 2;
1947 lunsw->lunsw_size += 2;
1948 sc->script_free_lo += 2;
1949 if (siop_target->target_c.flags & TARF_TAG) {
1950 /* we need a tag switch */
1951 sc->script_free_hi -=
1952 sizeof(tag_switch) / sizeof(tag_switch[0]);
1953 if (sc->sc_c.features & SF_CHIP_RAM) {
1954 bus_space_write_region_4(sc->sc_c.sc_ramt,
1955 sc->sc_c.sc_ramh,
1956 sc->script_free_hi * 4, tag_switch,
1957 sizeof(tag_switch) / sizeof(tag_switch[0]));
1958 } else {
1959 for(i = 0;
1960 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1961 i++) {
1962 sc->sc_c.sc_script[sc->script_free_hi + i] =
1963 htole32(tag_switch[i]);
1964 }
1965 }
1966 siop_script_write(sc,
1967 siop_lun->reseloff + 1,
1968 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1969 Ent_tag_switch_entry);
1970
1971 for (i = 0; i < SIOP_NTAG; i++) {
1972 siop_lun->siop_tag[i].reseloff =
1973 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1974 }
1975 } else {
1976 /* non-tag case; just work with the lun switch */
1977 siop_lun->siop_tag[0].reseloff =
1978 siop_target->siop_lun[lun]->reseloff;
1979 }
1980 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1981 }
1982
1983 void
1984 siop_del_dev(sc, target, lun)
1985 struct siop_softc *sc;
1986 int target;
1987 int lun;
1988 {
1989 int i;
1990 struct siop_target *siop_target;
1991 #ifdef SIOP_DEBUG
1992 printf("%s:%d:%d: free lun sw entry\n",
1993 sc->sc_c.sc_dev.dv_xname, target, lun);
1994 #endif
1995 if (sc->sc_c.targets[target] == NULL)
1996 return;
1997 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1998 free(siop_target->siop_lun[lun], M_DEVBUF);
1999 siop_target->siop_lun[lun] = NULL;
2000 /* XXX compact sw entry too ? */
2001 /* check if we can free the whole target */
2002 for (i = 0; i < 8; i++) {
2003 if (siop_target->siop_lun[i] != NULL)
2004 return;
2005 }
2006 #ifdef SIOP_DEBUG
2007 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2008 sc->sc_c.sc_dev.dv_xname, target, lun,
2009 siop_target->lunsw->lunsw_off);
2010 #endif
2011 /*
2012 * nothing here, free the target struct and resel
2013 * switch entry
2014 */
2015 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2016 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2017 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2018 free(sc->sc_c.targets[target], M_DEVBUF);
2019 sc->sc_c.targets[target] = NULL;
2020 sc->sc_ntargets--;
2021 }
2022
2023 #ifdef SIOP_STATS
2024 void
2025 siop_printstats()
2026 {
2027 printf("siop_stat_intr %d\n", siop_stat_intr);
2028 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2029 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2030 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2031 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
2032 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2033 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2034 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2035 }
2036 #endif
2037