siop.c revision 1.94.4.2 1 /* $NetBSD: siop.c,v 1.94.4.2 2011/03/05 20:53:20 rmind Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.94.4.2 2011/03/05 20:53:20 rmind Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39
40 #include <machine/endian.h>
41 #include <sys/bus.h>
42
43 #include <dev/microcode/siop/siop.out>
44
45 #include <dev/scsipi/scsi_all.h>
46 #include <dev/scsipi/scsi_message.h>
47 #include <dev/scsipi/scsipi_all.h>
48
49 #include <dev/scsipi/scsiconf.h>
50
51 #include <dev/ic/siopreg.h>
52 #include <dev/ic/siopvar_common.h>
53 #include <dev/ic/siopvar.h>
54
55 #include "opt_siop.h"
56
57 /*
58 #define SIOP_DEBUG
59 #define SIOP_DEBUG_DR
60 #define SIOP_DEBUG_INTR
61 #define SIOP_DEBUG_SCHED
62 #define SIOP_DUMP_SCRIPT
63 */
64
65 #define SIOP_STATS
66
67 #ifndef SIOP_DEFAULT_TARGET
68 #define SIOP_DEFAULT_TARGET 7
69 #endif
70
71 /* number of cmd descriptors per block */
72 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
73
74 /* Number of scheduler slot (needs to match script) */
75 #define SIOP_NSLOTS 40
76
77 void siop_reset(struct siop_softc *);
78 void siop_handle_reset(struct siop_softc *);
79 int siop_handle_qtag_reject(struct siop_cmd *);
80 void siop_scsicmd_end(struct siop_cmd *);
81 void siop_unqueue(struct siop_softc *, int, int);
82 static void siop_start(struct siop_softc *, struct siop_cmd *);
83 void siop_timeout(void *);
84 int siop_scsicmd(struct scsipi_xfer *);
85 void siop_scsipi_request(struct scsipi_channel *,
86 scsipi_adapter_req_t, void *);
87 void siop_dump_script(struct siop_softc *);
88 void siop_morecbd(struct siop_softc *);
89 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
90 void siop_add_reselsw(struct siop_softc *, int);
91 void siop_update_scntl3(struct siop_softc *,
92 struct siop_common_target *);
93
94 #ifdef SIOP_STATS
95 static int siop_stat_intr = 0;
96 static int siop_stat_intr_shortxfer = 0;
97 static int siop_stat_intr_sdp = 0;
98 static int siop_stat_intr_saveoffset = 0;
99 static int siop_stat_intr_done = 0;
100 static int siop_stat_intr_xferdisc = 0;
101 static int siop_stat_intr_lunresel = 0;
102 static int siop_stat_intr_qfull = 0;
103 void siop_printstats(void);
104 #define INCSTAT(x) x++
105 #else
106 #define INCSTAT(x)
107 #endif
108
109 static inline void siop_script_sync(struct siop_softc *, int);
110 static inline void
111 siop_script_sync(struct siop_softc *sc, int ops)
112 {
113
114 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
115 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
116 PAGE_SIZE, ops);
117 }
118
119 static inline uint32_t siop_script_read(struct siop_softc *, u_int);
120 static inline uint32_t
121 siop_script_read(struct siop_softc *sc, u_int offset)
122 {
123
124 if (sc->sc_c.features & SF_CHIP_RAM) {
125 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
126 offset * 4);
127 } else {
128 return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]);
129 }
130 }
131
132 static inline void siop_script_write(struct siop_softc *, u_int,
133 uint32_t);
134 static inline void
135 siop_script_write(struct siop_softc *sc, u_int offset, uint32_t val)
136 {
137
138 if (sc->sc_c.features & SF_CHIP_RAM) {
139 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
140 offset * 4, val);
141 } else {
142 sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val);
143 }
144 }
145
146 void
147 siop_attach(struct siop_softc *sc)
148 {
149
150 if (siop_common_attach(&sc->sc_c) != 0)
151 return;
152
153 TAILQ_INIT(&sc->free_list);
154 TAILQ_INIT(&sc->cmds);
155 TAILQ_INIT(&sc->lunsw_list);
156 sc->sc_currschedslot = 0;
157 #ifdef SIOP_DEBUG
158 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
159 device_xname(sc->sc_c.sc_dev), (int)sizeof(siop_script),
160 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
161 #endif
162
163 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
164 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
165
166 /* Do a bus reset, so that devices fall back to narrow/async */
167 siop_resetbus(&sc->sc_c);
168 /*
169 * siop_reset() will reset the chip, thus clearing pending interrupts
170 */
171 siop_reset(sc);
172 #ifdef SIOP_DUMP_SCRIPT
173 siop_dump_script(sc);
174 #endif
175
176 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
177 }
178
179 void
180 siop_reset(struct siop_softc *sc)
181 {
182 int i, j;
183 struct siop_lunsw *lunsw;
184
185 siop_common_reset(&sc->sc_c);
186
187 /* copy and patch the script */
188 if (sc->sc_c.features & SF_CHIP_RAM) {
189 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
190 siop_script, __arraycount(siop_script));
191 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
192 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
193 E_abs_msgin_Used[j] * 4,
194 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
195 }
196 if (sc->sc_c.features & SF_CHIP_LED0) {
197 bus_space_write_region_4(sc->sc_c.sc_ramt,
198 sc->sc_c.sc_ramh,
199 Ent_led_on1, siop_led_on,
200 __arraycount(siop_led_on));
201 bus_space_write_region_4(sc->sc_c.sc_ramt,
202 sc->sc_c.sc_ramh,
203 Ent_led_on2, siop_led_on,
204 __arraycount(siop_led_on));
205 bus_space_write_region_4(sc->sc_c.sc_ramt,
206 sc->sc_c.sc_ramh,
207 Ent_led_off, siop_led_off,
208 __arraycount(siop_led_off));
209 }
210 } else {
211 for (j = 0; j < __arraycount(siop_script); j++) {
212 sc->sc_c.sc_script[j] =
213 siop_htoc32(&sc->sc_c, siop_script[j]);
214 }
215 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
216 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
217 siop_htoc32(&sc->sc_c,
218 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
219 }
220 if (sc->sc_c.features & SF_CHIP_LED0) {
221 for (j = 0; j < __arraycount(siop_led_on); j++)
222 sc->sc_c.sc_script[
223 Ent_led_on1 / sizeof(siop_led_on[0]) + j
224 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
225 for (j = 0; j < __arraycount(siop_led_on); j++)
226 sc->sc_c.sc_script[
227 Ent_led_on2 / sizeof(siop_led_on[0]) + j
228 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
229 for (j = 0; j < __arraycount(siop_led_off); j++)
230 sc->sc_c.sc_script[
231 Ent_led_off / sizeof(siop_led_off[0]) + j
232 ] = siop_htoc32(&sc->sc_c, siop_led_off[j]);
233 }
234 }
235 sc->script_free_lo = __arraycount(siop_script);
236 sc->script_free_hi = sc->sc_c.ram_size / 4;
237 sc->sc_ntargets = 0;
238
239 /* free used and unused lun switches */
240 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
241 #ifdef SIOP_DEBUG
242 printf("%s: free lunsw at offset %d\n",
243 device_xname(sc->sc_c.sc_dev), lunsw->lunsw_off);
244 #endif
245 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
246 free(lunsw, M_DEVBUF);
247 }
248 TAILQ_INIT(&sc->lunsw_list);
249 /* restore reselect switch */
250 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
251 struct siop_target *target;
252 if (sc->sc_c.targets[i] == NULL)
253 continue;
254 #ifdef SIOP_DEBUG
255 printf("%s: restore sw for target %d\n",
256 device_xname(sc->sc_c.sc_dev), i);
257 #endif
258 target = (struct siop_target *)sc->sc_c.targets[i];
259 free(target->lunsw, M_DEVBUF);
260 target->lunsw = siop_get_lunsw(sc);
261 if (target->lunsw == NULL) {
262 aprint_error_dev(sc->sc_c.sc_dev,
263 "can't alloc lunsw for target %d\n", i);
264 break;
265 }
266 siop_add_reselsw(sc, i);
267 }
268
269 /* start script */
270 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
271 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
272 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
273 }
274 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
275 sc->sc_c.sc_scriptaddr + Ent_reselect);
276 }
277
278 #if 0
279 #define CALL_SCRIPT(ent) do { \
280 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
281 siop_cmd->cmd_c.dsa, \
282 sc->sc_c.sc_scriptaddr + ent); \
283 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
284 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
285 } while (/* CONSTCOND */0)
286 #else
287 #define CALL_SCRIPT(ent) do { \
288 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
289 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
290 } while (/* CONSTCOND */0)
291 #endif
292
293 int
294 siop_intr(void *v)
295 {
296 struct siop_softc *sc = v;
297 struct siop_target *siop_target;
298 struct siop_cmd *siop_cmd;
299 struct siop_lun *siop_lun;
300 struct scsipi_xfer *xs;
301 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
302 uint32_t irqcode;
303 int need_reset = 0;
304 int offset, target, lun, tag;
305 bus_addr_t dsa;
306 struct siop_cbd *cbdp;
307 int freetarget = 0;
308 int restart = 0;
309
310 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
311 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
312 return 0;
313 INCSTAT(siop_stat_intr);
314 if (istat & ISTAT_INTF) {
315 printf("INTRF\n");
316 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
317 SIOP_ISTAT, ISTAT_INTF);
318 }
319 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
320 (ISTAT_DIP | ISTAT_ABRT)) {
321 /* clear abort */
322 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
323 SIOP_ISTAT, 0);
324 }
325 /* use DSA to find the current siop_cmd */
326 siop_cmd = NULL;
327 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
328 TAILQ_FOREACH(cbdp, &sc->cmds, next) {
329 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
330 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
331 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
332 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
333 siop_table_sync(siop_cmd,
334 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
335 break;
336 }
337 }
338 if (siop_cmd) {
339 xs = siop_cmd->cmd_c.xs;
340 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
341 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
342 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
343 tag = siop_cmd->cmd_c.tag;
344 siop_lun = siop_target->siop_lun[lun];
345 #ifdef DIAGNOSTIC
346 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
347 printf("siop_cmd (lun %d) for DSA 0x%x "
348 "not active (%d)\n", lun, (u_int)dsa,
349 siop_cmd->cmd_c.status);
350 xs = NULL;
351 siop_target = NULL;
352 target = -1;
353 lun = -1;
354 tag = -1;
355 siop_lun = NULL;
356 siop_cmd = NULL;
357 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
358 printf("siop_cmd (lun %d tag %d) not in siop_lun "
359 "active (%p != %p)\n", lun, tag, siop_cmd,
360 siop_lun->siop_tag[tag].active);
361 }
362 #endif
363 } else {
364 xs = NULL;
365 siop_target = NULL;
366 target = -1;
367 lun = -1;
368 tag = -1;
369 siop_lun = NULL;
370 }
371 if (istat & ISTAT_DIP) {
372 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
373 SIOP_DSTAT);
374 if (dstat & DSTAT_ABRT) {
375 /* was probably generated by a bus reset IOCTL */
376 if ((dstat & DSTAT_DFE) == 0)
377 siop_clearfifo(&sc->sc_c);
378 goto reset;
379 }
380 if (dstat & DSTAT_SSI) {
381 printf("single step dsp 0x%08x dsa 0x08%x\n",
382 (int)(bus_space_read_4(sc->sc_c.sc_rt,
383 sc->sc_c.sc_rh, SIOP_DSP) -
384 sc->sc_c.sc_scriptaddr),
385 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
386 SIOP_DSA));
387 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
388 (istat & ISTAT_SIP) == 0) {
389 bus_space_write_1(sc->sc_c.sc_rt,
390 sc->sc_c.sc_rh, SIOP_DCNTL,
391 bus_space_read_1(sc->sc_c.sc_rt,
392 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
393 }
394 return 1;
395 }
396
397 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
398 printf("DMA IRQ:");
399 if (dstat & DSTAT_IID)
400 printf(" Illegal instruction");
401 if (dstat & DSTAT_BF)
402 printf(" bus fault");
403 if (dstat & DSTAT_MDPE)
404 printf(" parity");
405 if (dstat & DSTAT_DFE)
406 printf(" DMA fifo empty");
407 else
408 siop_clearfifo(&sc->sc_c);
409 printf(", DSP=0x%x DSA=0x%x: ",
410 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
411 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
412 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
413 if (siop_cmd)
414 printf("last msg_in=0x%x status=0x%x\n",
415 siop_cmd->cmd_tables->msg_in[0],
416 siop_ctoh32(&sc->sc_c,
417 siop_cmd->cmd_tables->status));
418 else
419 aprint_error_dev(sc->sc_c.sc_dev,
420 "current DSA invalid\n");
421 need_reset = 1;
422 }
423 }
424 if (istat & ISTAT_SIP) {
425 if (istat & ISTAT_DIP)
426 delay(10);
427 /*
428 * Can't read sist0 & sist1 independently, or we have to
429 * insert delay
430 */
431 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
432 SIOP_SIST0);
433 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
434 SIOP_SSTAT1);
435 #ifdef SIOP_DEBUG_INTR
436 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
437 "DSA=0x%x DSP=0x%lx\n", sist,
438 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
439 SIOP_SSTAT1),
440 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
441 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
442 SIOP_DSP) -
443 sc->sc_c.sc_scriptaddr));
444 #endif
445 if (sist & SIST0_RST) {
446 siop_handle_reset(sc);
447 /* no table to flush here */
448 return 1;
449 }
450 if (sist & SIST0_SGE) {
451 if (siop_cmd)
452 scsipi_printaddr(xs->xs_periph);
453 else
454 printf("%s:", device_xname(sc->sc_c.sc_dev));
455 printf("scsi gross error\n");
456 goto reset;
457 }
458 if ((sist & SIST0_MA) && need_reset == 0) {
459 if (siop_cmd) {
460 int scratcha0;
461 dstat = bus_space_read_1(sc->sc_c.sc_rt,
462 sc->sc_c.sc_rh, SIOP_DSTAT);
463 /*
464 * first restore DSA, in case we were in a S/G
465 * operation.
466 */
467 bus_space_write_4(sc->sc_c.sc_rt,
468 sc->sc_c.sc_rh,
469 SIOP_DSA, siop_cmd->cmd_c.dsa);
470 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
471 sc->sc_c.sc_rh, SIOP_SCRATCHA);
472 switch (sstat1 & SSTAT1_PHASE_MASK) {
473 case SSTAT1_PHASE_STATUS:
474 /*
475 * previous phase may be aborted for any reason
476 * ( for example, the target has less data to
477 * transfer than requested). Compute resid and
478 * just go to status, the command should
479 * terminate.
480 */
481 INCSTAT(siop_stat_intr_shortxfer);
482 if (scratcha0 & A_flag_data)
483 siop_ma(&siop_cmd->cmd_c);
484 else if ((dstat & DSTAT_DFE) == 0)
485 siop_clearfifo(&sc->sc_c);
486 CALL_SCRIPT(Ent_status);
487 return 1;
488 case SSTAT1_PHASE_MSGIN:
489 /*
490 * target may be ready to disconnect
491 * Compute resid which would be used later
492 * if a save data pointer is needed.
493 */
494 INCSTAT(siop_stat_intr_xferdisc);
495 if (scratcha0 & A_flag_data)
496 siop_ma(&siop_cmd->cmd_c);
497 else if ((dstat & DSTAT_DFE) == 0)
498 siop_clearfifo(&sc->sc_c);
499 bus_space_write_1(sc->sc_c.sc_rt,
500 sc->sc_c.sc_rh, SIOP_SCRATCHA,
501 scratcha0 & ~A_flag_data);
502 CALL_SCRIPT(Ent_msgin);
503 return 1;
504 }
505 aprint_error_dev(sc->sc_c.sc_dev,
506 "unexpected phase mismatch %d\n",
507 sstat1 & SSTAT1_PHASE_MASK);
508 } else {
509 aprint_error_dev(sc->sc_c.sc_dev,
510 "phase mismatch without command\n");
511 }
512 need_reset = 1;
513 }
514 if (sist & SIST0_PAR) {
515 /* parity error, reset */
516 if (siop_cmd)
517 scsipi_printaddr(xs->xs_periph);
518 else
519 printf("%s:", device_xname(sc->sc_c.sc_dev));
520 printf("parity error\n");
521 goto reset;
522 }
523 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
524 /* selection time out, assume there's no device here */
525 if (siop_cmd) {
526 siop_cmd->cmd_c.status = CMDST_DONE;
527 xs->error = XS_SELTIMEOUT;
528 freetarget = 1;
529 goto end;
530 } else {
531 aprint_error_dev(sc->sc_c.sc_dev,
532 "selection timeout without "
533 "command\n");
534 need_reset = 1;
535 }
536 }
537 if (sist & SIST0_UDC) {
538 /*
539 * unexpected disconnect. Usually the target signals
540 * a fatal condition this way. Attempt to get sense.
541 */
542 if (siop_cmd) {
543 siop_cmd->cmd_tables->status =
544 siop_htoc32(&sc->sc_c, SCSI_CHECK);
545 goto end;
546 }
547 aprint_error_dev(sc->sc_c.sc_dev,
548 "unexpected disconnect without "
549 "command\n");
550 goto reset;
551 }
552 if (sist & (SIST1_SBMC << 8)) {
553 /* SCSI bus mode change */
554 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
555 goto reset;
556 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
557 /*
558 * we have a script interrupt, it will
559 * restart the script.
560 */
561 goto scintr;
562 }
563 /*
564 * else we have to restart it ourselve, at the
565 * interrupted instruction.
566 */
567 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
568 SIOP_DSP,
569 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
570 SIOP_DSP) - 8);
571 return 1;
572 }
573 /* Else it's an unhandled exception (for now). */
574 aprint_error_dev(sc->sc_c.sc_dev,
575 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
576 "DSA=0x%x DSP=0x%x\n", sist,
577 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
578 SIOP_SSTAT1),
579 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
580 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
581 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
582 if (siop_cmd) {
583 siop_cmd->cmd_c.status = CMDST_DONE;
584 xs->error = XS_SELTIMEOUT;
585 goto end;
586 }
587 need_reset = 1;
588 }
589 if (need_reset) {
590 reset:
591 /* fatal error, reset the bus */
592 siop_resetbus(&sc->sc_c);
593 /* no table to flush here */
594 return 1;
595 }
596
597 scintr:
598 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
599 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
600 SIOP_DSPS);
601 #ifdef SIOP_DEBUG_INTR
602 printf("script interrupt 0x%x\n", irqcode);
603 #endif
604 /*
605 * no command, or an inactive command is only valid for a
606 * reselect interrupt
607 */
608 if ((irqcode & 0x80) == 0) {
609 if (siop_cmd == NULL) {
610 aprint_error_dev(sc->sc_c.sc_dev,
611 "script interrupt (0x%x) with "
612 "invalid DSA !!!\n",
613 irqcode);
614 goto reset;
615 }
616 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
617 aprint_error_dev(sc->sc_c.sc_dev,
618 "command with invalid status "
619 "(IRQ code 0x%x current status %d) !\n",
620 irqcode, siop_cmd->cmd_c.status);
621 xs = NULL;
622 }
623 }
624 switch(irqcode) {
625 case A_int_err:
626 printf("error, DSP=0x%x\n",
627 (int)(bus_space_read_4(sc->sc_c.sc_rt,
628 sc->sc_c.sc_rh, SIOP_DSP) -
629 sc->sc_c.sc_scriptaddr));
630 if (xs) {
631 xs->error = XS_SELTIMEOUT;
632 goto end;
633 } else {
634 goto reset;
635 }
636 case A_int_reseltarg:
637 aprint_error_dev(sc->sc_c.sc_dev,
638 "reselect with invalid target\n");
639 goto reset;
640 case A_int_resellun:
641 INCSTAT(siop_stat_intr_lunresel);
642 target = bus_space_read_1(sc->sc_c.sc_rt,
643 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
644 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
645 SIOP_SCRATCHA + 1);
646 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
647 SIOP_SCRATCHA + 2);
648 siop_target =
649 (struct siop_target *)sc->sc_c.targets[target];
650 if (siop_target == NULL) {
651 printf("%s: reselect with invalid target %d\n",
652 device_xname(sc->sc_c.sc_dev), target);
653 goto reset;
654 }
655 siop_lun = siop_target->siop_lun[lun];
656 if (siop_lun == NULL) {
657 printf("%s: target %d reselect with invalid "
658 "lun %d\n", device_xname(sc->sc_c.sc_dev),
659 target, lun);
660 goto reset;
661 }
662 if (siop_lun->siop_tag[tag].active == NULL) {
663 printf("%s: target %d lun %d tag %d reselect "
664 "without command\n",
665 device_xname(sc->sc_c.sc_dev),
666 target, lun, tag);
667 goto reset;
668 }
669 siop_cmd = siop_lun->siop_tag[tag].active;
670 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
671 SIOP_DSP, siop_cmd->cmd_c.dsa +
672 sizeof(struct siop_common_xfer) +
673 Ent_ldsa_reload_dsa);
674 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
675 return 1;
676 case A_int_reseltag:
677 printf("%s: reselect with invalid tag\n",
678 device_xname(sc->sc_c.sc_dev));
679 goto reset;
680 case A_int_msgin:
681 {
682 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
683 sc->sc_c.sc_rh, SIOP_SFBR);
684
685 if (msgin == MSG_MESSAGE_REJECT) {
686 int msg, extmsg;
687 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
688 /*
689 * message was part of a identify +
690 * something else. Identify shouldn't
691 * have been rejected.
692 */
693 msg =
694 siop_cmd->cmd_tables->msg_out[1];
695 extmsg =
696 siop_cmd->cmd_tables->msg_out[3];
697 } else {
698 msg = siop_cmd->cmd_tables->msg_out[0];
699 extmsg =
700 siop_cmd->cmd_tables->msg_out[2];
701 }
702 if (msg == MSG_MESSAGE_REJECT) {
703 /* MSG_REJECT for a MSG_REJECT !*/
704 if (xs)
705 scsipi_printaddr(xs->xs_periph);
706 else
707 printf("%s: ", device_xname(
708 sc->sc_c.sc_dev));
709 printf("our reject message was "
710 "rejected\n");
711 goto reset;
712 }
713 if (msg == MSG_EXTENDED &&
714 extmsg == MSG_EXT_WDTR) {
715 /* WDTR rejected, initiate sync */
716 if ((siop_target->target_c.flags &
717 TARF_SYNC) == 0) {
718 siop_target->target_c.status =
719 TARST_OK;
720 siop_update_xfer_mode(&sc->sc_c,
721 target);
722 /* no table to flush here */
723 CALL_SCRIPT(Ent_msgin_ack);
724 return 1;
725 }
726 siop_target->target_c.status =
727 TARST_SYNC_NEG;
728 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
729 sc->sc_c.st_minsync,
730 sc->sc_c.maxoff);
731 siop_table_sync(siop_cmd,
732 BUS_DMASYNC_PREREAD |
733 BUS_DMASYNC_PREWRITE);
734 CALL_SCRIPT(Ent_send_msgout);
735 return 1;
736 } else if (msg == MSG_EXTENDED &&
737 extmsg == MSG_EXT_SDTR) {
738 /* sync rejected */
739 siop_target->target_c.offset = 0;
740 siop_target->target_c.period = 0;
741 siop_target->target_c.status = TARST_OK;
742 siop_update_xfer_mode(&sc->sc_c,
743 target);
744 /* no table to flush here */
745 CALL_SCRIPT(Ent_msgin_ack);
746 return 1;
747 } else if (msg == MSG_SIMPLE_Q_TAG ||
748 msg == MSG_HEAD_OF_Q_TAG ||
749 msg == MSG_ORDERED_Q_TAG) {
750 if (siop_handle_qtag_reject(
751 siop_cmd) == -1)
752 goto reset;
753 CALL_SCRIPT(Ent_msgin_ack);
754 return 1;
755 }
756 if (xs)
757 scsipi_printaddr(xs->xs_periph);
758 else
759 printf("%s: ",
760 device_xname(sc->sc_c.sc_dev));
761 if (msg == MSG_EXTENDED) {
762 printf("scsi message reject, extended "
763 "message sent was 0x%x\n", extmsg);
764 } else {
765 printf("scsi message reject, message "
766 "sent was 0x%x\n", msg);
767 }
768 /* no table to flush here */
769 CALL_SCRIPT(Ent_msgin_ack);
770 return 1;
771 }
772 if (msgin == MSG_IGN_WIDE_RESIDUE) {
773 /* use the extmsgdata table to get the second byte */
774 siop_cmd->cmd_tables->t_extmsgdata.count =
775 siop_htoc32(&sc->sc_c, 1);
776 siop_table_sync(siop_cmd,
777 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
778 CALL_SCRIPT(Ent_get_extmsgdata);
779 return 1;
780 }
781 if (xs)
782 scsipi_printaddr(xs->xs_periph);
783 else
784 printf("%s: ", device_xname(sc->sc_c.sc_dev));
785 printf("unhandled message 0x%x\n",
786 siop_cmd->cmd_tables->msg_in[0]);
787 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
788 siop_cmd->cmd_tables->t_msgout.count =
789 siop_htoc32(&sc->sc_c, 1);
790 siop_table_sync(siop_cmd,
791 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
792 CALL_SCRIPT(Ent_send_msgout);
793 return 1;
794 }
795 case A_int_extmsgin:
796 #ifdef SIOP_DEBUG_INTR
797 printf("extended message: msg 0x%x len %d\n",
798 siop_cmd->cmd_tables->msg_in[2],
799 siop_cmd->cmd_tables->msg_in[1]);
800 #endif
801 if (siop_cmd->cmd_tables->msg_in[1] >
802 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
803 aprint_error_dev(sc->sc_c.sc_dev,
804 "extended message too big (%d)\n",
805 siop_cmd->cmd_tables->msg_in[1]);
806 siop_cmd->cmd_tables->t_extmsgdata.count =
807 siop_htoc32(&sc->sc_c,
808 siop_cmd->cmd_tables->msg_in[1] - 1);
809 siop_table_sync(siop_cmd,
810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
811 CALL_SCRIPT(Ent_get_extmsgdata);
812 return 1;
813 case A_int_extmsgdata:
814 #ifdef SIOP_DEBUG_INTR
815 {
816 int i;
817 printf("extended message: 0x%x, data:",
818 siop_cmd->cmd_tables->msg_in[2]);
819 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
820 i++)
821 printf(" 0x%x",
822 siop_cmd->cmd_tables->msg_in[i]);
823 printf("\n");
824 }
825 #endif
826 if (siop_cmd->cmd_tables->msg_in[0] ==
827 MSG_IGN_WIDE_RESIDUE) {
828 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
829 if (siop_cmd->cmd_tables->msg_in[3] != 1)
830 printf("MSG_IGN_WIDE_RESIDUE: "
831 "bad len %d\n",
832 siop_cmd->cmd_tables->msg_in[3]);
833 switch (siop_iwr(&siop_cmd->cmd_c)) {
834 case SIOP_NEG_MSGOUT:
835 siop_table_sync(siop_cmd,
836 BUS_DMASYNC_PREREAD |
837 BUS_DMASYNC_PREWRITE);
838 CALL_SCRIPT(Ent_send_msgout);
839 return(1);
840 case SIOP_NEG_ACK:
841 CALL_SCRIPT(Ent_msgin_ack);
842 return(1);
843 default:
844 panic("invalid retval from "
845 "siop_iwr()");
846 }
847 return(1);
848 }
849 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
850 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
851 case SIOP_NEG_MSGOUT:
852 siop_update_scntl3(sc,
853 siop_cmd->cmd_c.siop_target);
854 siop_table_sync(siop_cmd,
855 BUS_DMASYNC_PREREAD |
856 BUS_DMASYNC_PREWRITE);
857 CALL_SCRIPT(Ent_send_msgout);
858 return(1);
859 case SIOP_NEG_ACK:
860 siop_update_scntl3(sc,
861 siop_cmd->cmd_c.siop_target);
862 CALL_SCRIPT(Ent_msgin_ack);
863 return(1);
864 default:
865 panic("invalid retval from "
866 "siop_wdtr_neg()");
867 }
868 return(1);
869 }
870 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
871 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
872 case SIOP_NEG_MSGOUT:
873 siop_update_scntl3(sc,
874 siop_cmd->cmd_c.siop_target);
875 siop_table_sync(siop_cmd,
876 BUS_DMASYNC_PREREAD |
877 BUS_DMASYNC_PREWRITE);
878 CALL_SCRIPT(Ent_send_msgout);
879 return(1);
880 case SIOP_NEG_ACK:
881 siop_update_scntl3(sc,
882 siop_cmd->cmd_c.siop_target);
883 CALL_SCRIPT(Ent_msgin_ack);
884 return(1);
885 default:
886 panic("invalid retval from "
887 "siop_wdtr_neg()");
888 }
889 return(1);
890 }
891 /* send a message reject */
892 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
893 siop_cmd->cmd_tables->t_msgout.count =
894 siop_htoc32(&sc->sc_c, 1);
895 siop_table_sync(siop_cmd,
896 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
897 CALL_SCRIPT(Ent_send_msgout);
898 return 1;
899 case A_int_disc:
900 INCSTAT(siop_stat_intr_sdp);
901 offset = bus_space_read_1(sc->sc_c.sc_rt,
902 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
903 #ifdef SIOP_DEBUG_DR
904 printf("disconnect offset %d\n", offset);
905 #endif
906 siop_sdp(&siop_cmd->cmd_c, offset);
907 /* we start again with no offset */
908 siop_cmd->saved_offset = SIOP_NOOFFSET;
909 siop_table_sync(siop_cmd,
910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
911 CALL_SCRIPT(Ent_script_sched);
912 return 1;
913 case A_int_saveoffset:
914 INCSTAT(siop_stat_intr_saveoffset);
915 offset = bus_space_read_1(sc->sc_c.sc_rt,
916 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
917 #ifdef SIOP_DEBUG_DR
918 printf("saveoffset offset %d\n", offset);
919 #endif
920 siop_cmd->saved_offset = offset;
921 CALL_SCRIPT(Ent_script_sched);
922 return 1;
923 case A_int_resfail:
924 printf("reselect failed\n");
925 CALL_SCRIPT(Ent_script_sched);
926 return 1;
927 case A_int_done:
928 if (xs == NULL) {
929 printf("%s: done without command, DSA=0x%lx\n",
930 device_xname(sc->sc_c.sc_dev),
931 (u_long)siop_cmd->cmd_c.dsa);
932 siop_cmd->cmd_c.status = CMDST_FREE;
933 CALL_SCRIPT(Ent_script_sched);
934 return 1;
935 }
936 #ifdef SIOP_DEBUG_INTR
937 printf("done, DSA=0x%lx target id 0x%x last msg "
938 "in=0x%x status=0x%x\n",
939 (u_long)siop_cmd->cmd_c.dsa,
940 siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id),
941 siop_cmd->cmd_tables->msg_in[0],
942 siop_ctoh32(&sc->sc_c,
943 siop_cmd->cmd_tables->status));
944 #endif
945 INCSTAT(siop_stat_intr_done);
946 /* update resid. */
947 offset = bus_space_read_1(sc->sc_c.sc_rt,
948 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
949 /*
950 * if we got a disconnect between the last data phase
951 * and the status phase, offset will be 0. In this
952 * case, siop_cmd->saved_offset will have the proper
953 * value if it got updated by the controller
954 */
955 if (offset == 0 &&
956 siop_cmd->saved_offset != SIOP_NOOFFSET)
957 offset = siop_cmd->saved_offset;
958 siop_update_resid(&siop_cmd->cmd_c, offset);
959 siop_cmd->cmd_c.status = CMDST_DONE;
960 goto end;
961 default:
962 printf("unknown irqcode %x\n", irqcode);
963 if (xs) {
964 xs->error = XS_SELTIMEOUT;
965 goto end;
966 }
967 goto reset;
968 }
969 return 1;
970 }
971 /* We just should't get there */
972 panic("siop_intr: I shouldn't be there !");
973
974 end:
975 /*
976 * restart the script now if command completed properly
977 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
978 * queue
979 */
980 xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status);
981 if (xs->status == SCSI_OK)
982 CALL_SCRIPT(Ent_script_sched);
983 else
984 restart = 1;
985 siop_lun->siop_tag[tag].active = NULL;
986 siop_scsicmd_end(siop_cmd);
987 if (freetarget && siop_target->target_c.status == TARST_PROBING)
988 siop_del_dev(sc, target, lun);
989 if (restart)
990 CALL_SCRIPT(Ent_script_sched);
991 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
992 /* a command terminated, so we have free slots now */
993 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
994 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
995 }
996
997 return 1;
998 }
999
1000 void
1001 siop_scsicmd_end(struct siop_cmd *siop_cmd)
1002 {
1003 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1004 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1005
1006 switch(xs->status) {
1007 case SCSI_OK:
1008 xs->error = XS_NOERROR;
1009 break;
1010 case SCSI_BUSY:
1011 xs->error = XS_BUSY;
1012 break;
1013 case SCSI_CHECK:
1014 xs->error = XS_BUSY;
1015 /* remove commands in the queue and scheduler */
1016 siop_unqueue(sc, xs->xs_periph->periph_target,
1017 xs->xs_periph->periph_lun);
1018 break;
1019 case SCSI_QUEUE_FULL:
1020 INCSTAT(siop_stat_intr_qfull);
1021 #ifdef SIOP_DEBUG
1022 printf("%s:%d:%d: queue full (tag %d)\n",
1023 device_xname(sc->sc_c.sc_dev),
1024 xs->xs_periph->periph_target,
1025 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1026 #endif
1027 xs->error = XS_BUSY;
1028 break;
1029 case SCSI_SIOP_NOCHECK:
1030 /*
1031 * don't check status, xs->error is already valid
1032 */
1033 break;
1034 case SCSI_SIOP_NOSTATUS:
1035 /*
1036 * the status byte was not updated, cmd was
1037 * aborted
1038 */
1039 xs->error = XS_SELTIMEOUT;
1040 break;
1041 default:
1042 scsipi_printaddr(xs->xs_periph);
1043 printf("invalid status code %d\n", xs->status);
1044 xs->error = XS_DRIVER_STUFFUP;
1045 }
1046 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1047 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data,
1048 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1049 (xs->xs_control & XS_CTL_DATA_IN) ?
1050 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1051 bus_dmamap_unload(sc->sc_c.sc_dmat,
1052 siop_cmd->cmd_c.dmamap_data);
1053 }
1054 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1055 if ((xs->xs_control & XS_CTL_POLL) == 0)
1056 callout_stop(&xs->xs_callout);
1057 siop_cmd->cmd_c.status = CMDST_FREE;
1058 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1059 #if 0
1060 if (xs->resid != 0)
1061 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1062 #endif
1063 scsipi_done(xs);
1064 }
1065
1066 void
1067 siop_unqueue(struct siop_softc *sc, int target, int lun)
1068 {
1069 int slot, tag;
1070 struct siop_cmd *siop_cmd;
1071 struct siop_lun *siop_lun =
1072 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1073
1074 /* first make sure to read valid data */
1075 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1076
1077 for (tag = 1; tag < SIOP_NTAG; tag++) {
1078 /* look for commands in the scheduler, not yet started */
1079 if (siop_lun->siop_tag[tag].active == NULL)
1080 continue;
1081 siop_cmd = siop_lun->siop_tag[tag].active;
1082 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1083 if (siop_script_read(sc,
1084 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1085 siop_cmd->cmd_c.dsa +
1086 sizeof(struct siop_common_xfer) +
1087 Ent_ldsa_select)
1088 break;
1089 }
1090 if (slot > sc->sc_currschedslot)
1091 continue; /* didn't find it */
1092 if (siop_script_read(sc,
1093 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1094 continue; /* already started */
1095 /* clear the slot */
1096 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1097 0x80000000);
1098 /* ask to requeue */
1099 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1100 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1101 siop_lun->siop_tag[tag].active = NULL;
1102 siop_scsicmd_end(siop_cmd);
1103 }
1104 /* update sc_currschedslot */
1105 sc->sc_currschedslot = 0;
1106 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1107 if (siop_script_read(sc,
1108 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1109 sc->sc_currschedslot = slot;
1110 }
1111 }
1112
1113 /*
1114 * handle a rejected queue tag message: the command will run untagged,
1115 * has to adjust the reselect script.
1116 */
1117 int
1118 siop_handle_qtag_reject(struct siop_cmd *siop_cmd)
1119 {
1120 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1121 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1122 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1123 int tag = siop_cmd->cmd_tables->msg_out[2];
1124 struct siop_lun *siop_lun =
1125 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1126
1127 #ifdef SIOP_DEBUG
1128 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1129 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1130 siop_cmd->cmd_c.tag,
1131 siop_cmd->cmd_c.status);
1132 #endif
1133
1134 if (siop_lun->siop_tag[0].active != NULL) {
1135 printf("%s: untagged command already running for target %d "
1136 "lun %d (status %d)\n", device_xname(sc->sc_c.sc_dev),
1137 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1138 return -1;
1139 }
1140 /* clear tag slot */
1141 siop_lun->siop_tag[tag].active = NULL;
1142 /* add command to non-tagged slot */
1143 siop_lun->siop_tag[0].active = siop_cmd;
1144 siop_cmd->cmd_c.tag = 0;
1145 /* adjust reselect script if there is one */
1146 if (siop_lun->siop_tag[0].reseloff > 0) {
1147 siop_script_write(sc,
1148 siop_lun->siop_tag[0].reseloff + 1,
1149 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1150 Ent_ldsa_reload_dsa);
1151 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1152 }
1153 return 0;
1154 }
1155
1156 /*
1157 * handle a bus reset: reset chip, unqueue all active commands, free all
1158 * target struct and report lossage to upper layer.
1159 * As the upper layer may requeue immediatly we have to first store
1160 * all active commands in a temporary queue.
1161 */
1162 void
1163 siop_handle_reset(struct siop_softc *sc)
1164 {
1165 struct siop_cmd *siop_cmd;
1166 struct siop_lun *siop_lun;
1167 int target, lun, tag;
1168
1169 /*
1170 * scsi bus reset. reset the chip and restart
1171 * the queue. Need to clean up all active commands
1172 */
1173 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1174 /* stop, reset and restart the chip */
1175 siop_reset(sc);
1176 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1177 /* chip has been reset, all slots are free now */
1178 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1179 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1180 }
1181 /*
1182 * Process all commands: first commands being executed
1183 */
1184 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1185 target++) {
1186 if (sc->sc_c.targets[target] == NULL)
1187 continue;
1188 for (lun = 0; lun < 8; lun++) {
1189 struct siop_target *siop_target =
1190 (struct siop_target *)sc->sc_c.targets[target];
1191 siop_lun = siop_target->siop_lun[lun];
1192 if (siop_lun == NULL)
1193 continue;
1194 for (tag = 0; tag <
1195 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1196 SIOP_NTAG : 1);
1197 tag++) {
1198 siop_cmd = siop_lun->siop_tag[tag].active;
1199 if (siop_cmd == NULL)
1200 continue;
1201 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1202 printf("command with tag id %d reset\n", tag);
1203 siop_cmd->cmd_c.xs->error =
1204 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1205 XS_TIMEOUT : XS_RESET;
1206 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1207 siop_lun->siop_tag[tag].active = NULL;
1208 siop_cmd->cmd_c.status = CMDST_DONE;
1209 siop_scsicmd_end(siop_cmd);
1210 }
1211 }
1212 sc->sc_c.targets[target]->status = TARST_ASYNC;
1213 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1214 sc->sc_c.targets[target]->period =
1215 sc->sc_c.targets[target]->offset = 0;
1216 siop_update_xfer_mode(&sc->sc_c, target);
1217 }
1218
1219 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1220 }
1221
1222 void
1223 siop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1224 void *arg)
1225 {
1226 struct scsipi_xfer *xs;
1227 struct scsipi_periph *periph;
1228 struct siop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1229 struct siop_cmd *siop_cmd;
1230 struct siop_target *siop_target;
1231 int s, error, i;
1232 int target;
1233 int lun;
1234
1235 switch (req) {
1236 case ADAPTER_REQ_RUN_XFER:
1237 xs = arg;
1238 periph = xs->xs_periph;
1239 target = periph->periph_target;
1240 lun = periph->periph_lun;
1241
1242 s = splbio();
1243 #ifdef SIOP_DEBUG_SCHED
1244 printf("starting cmd for %d:%d\n", target, lun);
1245 #endif
1246 siop_cmd = TAILQ_FIRST(&sc->free_list);
1247 if (siop_cmd == NULL) {
1248 xs->error = XS_RESOURCE_SHORTAGE;
1249 scsipi_done(xs);
1250 splx(s);
1251 return;
1252 }
1253 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1254 #ifdef DIAGNOSTIC
1255 if (siop_cmd->cmd_c.status != CMDST_FREE)
1256 panic("siop_scsicmd: new cmd not free");
1257 #endif
1258 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1259 if (siop_target == NULL) {
1260 #ifdef SIOP_DEBUG
1261 printf("%s: alloc siop_target for target %d\n",
1262 device_xname(sc->sc_c.sc_dev), target);
1263 #endif
1264 sc->sc_c.targets[target] =
1265 malloc(sizeof(struct siop_target),
1266 M_DEVBUF, M_NOWAIT|M_ZERO);
1267 if (sc->sc_c.targets[target] == NULL) {
1268 aprint_error_dev(sc->sc_c.sc_dev,
1269 "can't malloc memory for "
1270 "target %d\n", target);
1271 xs->error = XS_RESOURCE_SHORTAGE;
1272 scsipi_done(xs);
1273 TAILQ_INSERT_TAIL(&sc->free_list,
1274 siop_cmd, next);
1275 splx(s);
1276 return;
1277 }
1278 siop_target =
1279 (struct siop_target *)sc->sc_c.targets[target];
1280 siop_target->target_c.status = TARST_PROBING;
1281 siop_target->target_c.flags = 0;
1282 siop_target->target_c.id =
1283 sc->sc_c.clock_div << 24; /* scntl3 */
1284 siop_target->target_c.id |= target << 16; /* id */
1285 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1286
1287 /* get a lun switch script */
1288 siop_target->lunsw = siop_get_lunsw(sc);
1289 if (siop_target->lunsw == NULL) {
1290 aprint_error_dev(sc->sc_c.sc_dev,
1291 "can't alloc lunsw for target %d\n",
1292 target);
1293 xs->error = XS_RESOURCE_SHORTAGE;
1294 scsipi_done(xs);
1295 TAILQ_INSERT_TAIL(&sc->free_list,
1296 siop_cmd, next);
1297 splx(s);
1298 return;
1299 }
1300 for (i=0; i < 8; i++)
1301 siop_target->siop_lun[i] = NULL;
1302 siop_add_reselsw(sc, target);
1303 }
1304 if (siop_target->siop_lun[lun] == NULL) {
1305 siop_target->siop_lun[lun] =
1306 malloc(sizeof(struct siop_lun), M_DEVBUF,
1307 M_NOWAIT|M_ZERO);
1308 if (siop_target->siop_lun[lun] == NULL) {
1309 aprint_error_dev(sc->sc_c.sc_dev,
1310 "can't alloc siop_lun for "
1311 "target %d lun %d\n",
1312 target, lun);
1313 xs->error = XS_RESOURCE_SHORTAGE;
1314 scsipi_done(xs);
1315 TAILQ_INSERT_TAIL(&sc->free_list,
1316 siop_cmd, next);
1317 splx(s);
1318 return;
1319 }
1320 }
1321 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1322 siop_cmd->cmd_c.xs = xs;
1323 siop_cmd->cmd_c.flags = 0;
1324 siop_cmd->cmd_c.status = CMDST_READY;
1325
1326 /* load the DMA maps */
1327 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1328 siop_cmd->cmd_c.dmamap_cmd,
1329 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1330 if (error) {
1331 aprint_error_dev(sc->sc_c.sc_dev,
1332 "unable to load cmd DMA map: %d\n",
1333 error);
1334 xs->error = (error == EAGAIN) ?
1335 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1336 scsipi_done(xs);
1337 siop_cmd->cmd_c.status = CMDST_FREE;
1338 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1339 splx(s);
1340 return;
1341 }
1342 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1343 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1344 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1345 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1346 ((xs->xs_control & XS_CTL_DATA_IN) ?
1347 BUS_DMA_READ : BUS_DMA_WRITE));
1348 if (error) {
1349 aprint_error_dev(sc->sc_c.sc_dev,
1350 "unable to load data DMA map: %d\n",
1351 error);
1352 xs->error = (error == EAGAIN) ?
1353 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1354 scsipi_done(xs);
1355 bus_dmamap_unload(sc->sc_c.sc_dmat,
1356 siop_cmd->cmd_c.dmamap_cmd);
1357 siop_cmd->cmd_c.status = CMDST_FREE;
1358 TAILQ_INSERT_TAIL(&sc->free_list,
1359 siop_cmd, next);
1360 splx(s);
1361 return;
1362 }
1363 bus_dmamap_sync(sc->sc_c.sc_dmat,
1364 siop_cmd->cmd_c.dmamap_data, 0,
1365 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1366 (xs->xs_control & XS_CTL_DATA_IN) ?
1367 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1368 }
1369 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1370 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1371 BUS_DMASYNC_PREWRITE);
1372
1373 if (xs->xs_tag_type) {
1374 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1375 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1376 } else {
1377 siop_cmd->cmd_c.tag = 0;
1378 }
1379 siop_setuptables(&siop_cmd->cmd_c);
1380 siop_cmd->saved_offset = SIOP_NOOFFSET;
1381 siop_table_sync(siop_cmd,
1382 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1383 siop_start(sc, siop_cmd);
1384 if (xs->xs_control & XS_CTL_POLL) {
1385 /* poll for command completion */
1386 while ((xs->xs_status & XS_STS_DONE) == 0) {
1387 delay(1000);
1388 siop_intr(sc);
1389 }
1390 }
1391 splx(s);
1392 return;
1393
1394 case ADAPTER_REQ_GROW_RESOURCES:
1395 #ifdef SIOP_DEBUG
1396 printf("%s grow resources (%d)\n",
1397 device_xname(sc->sc_c.sc_dev),
1398 sc->sc_c.sc_adapt.adapt_openings);
1399 #endif
1400 siop_morecbd(sc);
1401 return;
1402
1403 case ADAPTER_REQ_SET_XFER_MODE:
1404 {
1405 struct scsipi_xfer_mode *xm = arg;
1406 if (sc->sc_c.targets[xm->xm_target] == NULL)
1407 return;
1408 s = splbio();
1409 if (xm->xm_mode & PERIPH_CAP_TQING)
1410 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1411 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1412 (sc->sc_c.features & SF_BUS_WIDE))
1413 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1414 if (xm->xm_mode & PERIPH_CAP_SYNC)
1415 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1416 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1417 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1418 sc->sc_c.targets[xm->xm_target]->status =
1419 TARST_ASYNC;
1420
1421 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1422 if (scsipi_lookup_periph(chan,
1423 xm->xm_target, lun) != NULL) {
1424 /* allocate a lun sw entry for this device */
1425 siop_add_dev(sc, xm->xm_target, lun);
1426 }
1427 }
1428
1429 splx(s);
1430 }
1431 }
1432 }
1433
1434 static void
1435 siop_start(struct siop_softc *sc, struct siop_cmd *siop_cmd)
1436 {
1437 struct siop_lun *siop_lun;
1438 struct siop_xfer *siop_xfer;
1439 uint32_t dsa;
1440 int timeout;
1441 int target, lun, slot;
1442
1443 /*
1444 * first make sure to read valid data
1445 */
1446 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1447
1448 /*
1449 * The queue management here is a bit tricky: the script always looks
1450 * at the slot from first to last, so if we always use the first
1451 * free slot commands can stay at the tail of the queue ~forever.
1452 * The algorithm used here is to restart from the head when we know
1453 * that the queue is empty, and only add commands after the last one.
1454 * When we're at the end of the queue wait for the script to clear it.
1455 * The best thing to do here would be to implement a circular queue,
1456 * but using only 53c720 features this can be "interesting".
1457 * A mid-way solution could be to implement 2 queues and swap orders.
1458 */
1459 slot = sc->sc_currschedslot;
1460 /*
1461 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1462 * free. As this is the last used slot, all previous slots are free,
1463 * we can restart from 0.
1464 */
1465 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1466 0x80000000) {
1467 slot = sc->sc_currschedslot = 0;
1468 } else {
1469 slot++;
1470 }
1471 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1472 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1473 siop_lun =
1474 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1475 /* if non-tagged command active, panic: this shouldn't happen */
1476 if (siop_lun->siop_tag[0].active != NULL) {
1477 panic("siop_start: tagged cmd while untagged running");
1478 }
1479 #ifdef DIAGNOSTIC
1480 /* sanity check the tag if needed */
1481 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1482 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1483 panic("siop_start: tag not free");
1484 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1485 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1486 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1487 panic("siop_start: invalid tag id");
1488 }
1489 }
1490 #endif
1491 /*
1492 * find a free scheduler slot and load it.
1493 */
1494 for (; slot < SIOP_NSLOTS; slot++) {
1495 /*
1496 * If cmd if 0x80000000 the slot is free
1497 */
1498 if (siop_script_read(sc,
1499 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1500 0x80000000)
1501 break;
1502 }
1503 if (slot == SIOP_NSLOTS) {
1504 /*
1505 * no more free slot, no need to continue. freeze the queue
1506 * and requeue this command.
1507 */
1508 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1509 sc->sc_flags |= SCF_CHAN_NOSLOT;
1510 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1511 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1512 siop_scsicmd_end(siop_cmd);
1513 return;
1514 }
1515 #ifdef SIOP_DEBUG_SCHED
1516 printf("using slot %d for DSA 0x%lx\n", slot,
1517 (u_long)siop_cmd->cmd_c.dsa);
1518 #endif
1519 /* mark command as active */
1520 if (siop_cmd->cmd_c.status == CMDST_READY)
1521 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1522 else
1523 panic("siop_start: bad status");
1524 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1525 /* patch scripts with DSA addr */
1526 dsa = siop_cmd->cmd_c.dsa;
1527 /* first reselect switch, if we have an entry */
1528 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1529 siop_script_write(sc,
1530 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1531 dsa + sizeof(struct siop_common_xfer) +
1532 Ent_ldsa_reload_dsa);
1533 /* CMD script: MOVE MEMORY addr */
1534 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1535 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1536 siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr +
1537 Ent_script_sched_slot0 + slot * 8);
1538 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1539 /* scheduler slot: JUMP ldsa_select */
1540 siop_script_write(sc,
1541 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1542 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1543 /* handle timeout */
1544 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1545 /* start exire timer */
1546 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1547 if (timeout == 0)
1548 timeout = 1;
1549 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1550 timeout, siop_timeout, siop_cmd);
1551 }
1552 /*
1553 * Change JUMP cmd so that this slot will be handled
1554 */
1555 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1556 0x80080000);
1557 sc->sc_currschedslot = slot;
1558
1559 /* make sure SCRIPT processor will read valid data */
1560 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1561 /* Signal script it has some work to do */
1562 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1563 SIOP_ISTAT, ISTAT_SIGP);
1564 /* and wait for IRQ */
1565 }
1566
1567 void
1568 siop_timeout(void *v)
1569 {
1570 struct siop_cmd *siop_cmd = v;
1571 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1572 int s;
1573
1574 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1575 printf("command timeout, CDB: ");
1576 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1577 printf("\n");
1578
1579 s = splbio();
1580 /* reset the scsi bus */
1581 siop_resetbus(&sc->sc_c);
1582
1583 /* deactivate callout */
1584 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1585 /* mark command as being timed out; siop_intr will handle it */
1586 /*
1587 * mark command has being timed out and just return;
1588 * the bus reset will generate an interrupt,
1589 * it will be handled in siop_intr()
1590 */
1591 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1592 splx(s);
1593 }
1594
1595 void
1596 siop_dump_script(struct siop_softc *sc)
1597 {
1598 int i;
1599
1600 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1601 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1602 siop_script_read(sc, i),
1603 siop_script_read(sc, i + 1));
1604 if ((siop_script_read(sc, i) & 0xe0000000) == 0xc0000000) {
1605 i++;
1606 printf(" 0x%08x", siop_script_read(sc, i + 1));
1607 }
1608 printf("\n");
1609 }
1610 }
1611
1612 void
1613 siop_morecbd(struct siop_softc *sc)
1614 {
1615 int error, off, i, j, s;
1616 bus_dma_segment_t seg;
1617 int rseg;
1618 struct siop_cbd *newcbd;
1619 struct siop_xfer *xfer;
1620 bus_addr_t dsa;
1621 uint32_t *scr;
1622
1623 /* allocate a new list head */
1624 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1625 if (newcbd == NULL) {
1626 aprint_error_dev(sc->sc_c.sc_dev,
1627 "can't allocate memory for command descriptors head\n");
1628 return;
1629 }
1630
1631 /* allocate cmd list */
1632 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1633 M_DEVBUF, M_NOWAIT|M_ZERO);
1634 if (newcbd->cmds == NULL) {
1635 aprint_error_dev(sc->sc_c.sc_dev,
1636 "can't allocate memory for command descriptors\n");
1637 goto bad3;
1638 }
1639 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE,
1640 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1641 if (error) {
1642 aprint_error_dev(sc->sc_c.sc_dev,
1643 "unable to allocate cbd DMA memory, error = %d\n",
1644 error);
1645 goto bad2;
1646 }
1647 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1648 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1649 if (error) {
1650 aprint_error_dev(sc->sc_c.sc_dev,
1651 "unable to map cbd DMA memory, error = %d\n",
1652 error);
1653 goto bad2;
1654 }
1655 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1656 BUS_DMA_NOWAIT, &newcbd->xferdma);
1657 if (error) {
1658 aprint_error_dev(sc->sc_c.sc_dev,
1659 "unable to create cbd DMA map, error = %d\n",
1660 error);
1661 goto bad1;
1662 }
1663 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1664 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1665 if (error) {
1666 aprint_error_dev(sc->sc_c.sc_dev,
1667 "unable to load cbd DMA map, error = %d\n",
1668 error);
1669 goto bad0;
1670 }
1671 #ifdef SIOP_DEBUG
1672 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1673 device_xname(sc->sc_c.sc_dev),
1674 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1675 #endif
1676 off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0;
1677 for (i = 0; i < SIOP_NCMDPB; i++) {
1678 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1679 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1680 &newcbd->cmds[i].cmd_c.dmamap_data);
1681 if (error) {
1682 aprint_error_dev(sc->sc_c.sc_dev,
1683 "unable to create data DMA map for cbd: "
1684 "error %d\n", error);
1685 goto bad0;
1686 }
1687 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1688 sizeof(struct scsipi_generic), 1,
1689 sizeof(struct scsipi_generic), 0,
1690 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1691 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1692 if (error) {
1693 aprint_error_dev(sc->sc_c.sc_dev,
1694 "unable to create cmd DMA map for cbd %d\n", error);
1695 goto bad0;
1696 }
1697 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1698 newcbd->cmds[i].siop_cbdp = newcbd;
1699 xfer = &newcbd->xfers[i];
1700 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1701 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1702 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1703 i * sizeof(struct siop_xfer);
1704 newcbd->cmds[i].cmd_c.dsa = dsa;
1705 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1706 xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1);
1707 xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa);
1708 xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1);
1709 xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c,
1710 dsa + offsetof(struct siop_common_xfer, msg_in));
1711 xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2);
1712 xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c,
1713 dsa + offsetof(struct siop_common_xfer, msg_in) + 1);
1714 xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c,
1715 dsa + offsetof(struct siop_common_xfer, msg_in) + 3);
1716 xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1);
1717 xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c,
1718 dsa + offsetof(struct siop_common_xfer, status) + off);
1719 /* The select/reselect script */
1720 scr = &xfer->resel[0];
1721 for (j = 0; j < __arraycount(load_dsa); j++)
1722 scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]);
1723 /*
1724 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1725 * octet, reg offset is the third.
1726 */
1727 scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c,
1728 0x78100000 | ((dsa & 0x000000ff) << 8));
1729 scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c,
1730 0x78110000 | ( dsa & 0x0000ff00 ));
1731 scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c,
1732 0x78120000 | ((dsa & 0x00ff0000) >> 8));
1733 scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c,
1734 0x78130000 | ((dsa & 0xff000000) >> 16));
1735 scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c,
1736 sc->sc_c.sc_scriptaddr + Ent_reselected);
1737 scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c,
1738 sc->sc_c.sc_scriptaddr + Ent_reselect);
1739 scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c,
1740 sc->sc_c.sc_scriptaddr + Ent_selected);
1741 scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c,
1742 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data);
1743 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1744 scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000);
1745 s = splbio();
1746 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1747 splx(s);
1748 #ifdef SIOP_DEBUG
1749 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1750 siop_ctoh32(&sc->sc_c,
1751 newcbd->cmds[i].cmd_tables->t_msgin.addr),
1752 siop_ctoh32(&sc->sc_c,
1753 newcbd->cmds[i].cmd_tables->t_msgout.addr),
1754 siop_ctoh32(&sc->sc_c,
1755 newcbd->cmds[i].cmd_tables->t_status.addr));
1756 #endif
1757 }
1758 s = splbio();
1759 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1760 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1761 splx(s);
1762 return;
1763 bad0:
1764 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1765 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1766 bad1:
1767 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1768 bad2:
1769 free(newcbd->cmds, M_DEVBUF);
1770 bad3:
1771 free(newcbd, M_DEVBUF);
1772 }
1773
1774 struct siop_lunsw *
1775 siop_get_lunsw(struct siop_softc *sc)
1776 {
1777 struct siop_lunsw *lunsw;
1778 int i;
1779
1780 if (sc->script_free_lo + __arraycount(lun_switch) >= sc->script_free_hi)
1781 return NULL;
1782 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1783 if (lunsw != NULL) {
1784 #ifdef SIOP_DEBUG
1785 printf("siop_get_lunsw got lunsw at offset %d\n",
1786 lunsw->lunsw_off);
1787 #endif
1788 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1789 return lunsw;
1790 }
1791 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1792 if (lunsw == NULL)
1793 return NULL;
1794 #ifdef SIOP_DEBUG
1795 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1796 #endif
1797 if (sc->sc_c.features & SF_CHIP_RAM) {
1798 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1799 sc->script_free_lo * 4, lun_switch,
1800 __arraycount(lun_switch));
1801 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1802 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1803 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1804 } else {
1805 for (i = 0; i < __arraycount(lun_switch); i++)
1806 sc->sc_c.sc_script[sc->script_free_lo + i] =
1807 siop_htoc32(&sc->sc_c, lun_switch[i]);
1808 sc->sc_c.sc_script[
1809 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1810 siop_htoc32(&sc->sc_c,
1811 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1812 }
1813 lunsw->lunsw_off = sc->script_free_lo;
1814 lunsw->lunsw_size = __arraycount(lun_switch);
1815 sc->script_free_lo += lunsw->lunsw_size;
1816 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1817 return lunsw;
1818 }
1819
1820 void
1821 siop_add_reselsw(struct siop_softc *sc, int target)
1822 {
1823 int i, j;
1824 struct siop_target *siop_target;
1825 struct siop_lun *siop_lun;
1826
1827 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1828 /*
1829 * add an entry to resel switch
1830 */
1831 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1832 for (i = 0; i < 15; i++) {
1833 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1834 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1835 == 0xff) { /* it's free */
1836 #ifdef SIOP_DEBUG
1837 printf("siop: target %d slot %d offset %d\n",
1838 target, i, siop_target->reseloff);
1839 #endif
1840 /* JUMP abs_foo, IF target | 0x80; */
1841 siop_script_write(sc, siop_target->reseloff,
1842 0x800c0080 | target);
1843 siop_script_write(sc, siop_target->reseloff + 1,
1844 sc->sc_c.sc_scriptaddr +
1845 siop_target->lunsw->lunsw_off * 4 +
1846 Ent_lun_switch_entry);
1847 break;
1848 }
1849 }
1850 if (i == 15) /* no free slot, shouldn't happen */
1851 panic("siop: resel switch full");
1852
1853 sc->sc_ntargets++;
1854 for (i = 0; i < 8; i++) {
1855 siop_lun = siop_target->siop_lun[i];
1856 if (siop_lun == NULL)
1857 continue;
1858 if (siop_lun->reseloff > 0) {
1859 siop_lun->reseloff = 0;
1860 for (j = 0; j < SIOP_NTAG; j++)
1861 siop_lun->siop_tag[j].reseloff = 0;
1862 siop_add_dev(sc, target, i);
1863 }
1864 }
1865 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1866 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1867 }
1868
1869 void
1870 siop_update_scntl3(struct siop_softc *sc,
1871 struct siop_common_target *_siop_target)
1872 {
1873 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1874
1875 /* MOVE target->id >> 24 TO SCNTL3 */
1876 siop_script_write(sc,
1877 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1878 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1879 /* MOVE target->id >> 8 TO SXFER */
1880 siop_script_write(sc,
1881 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1882 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1883 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1884 }
1885
1886 void
1887 siop_add_dev(struct siop_softc *sc, int target, int lun)
1888 {
1889 struct siop_lunsw *lunsw;
1890 struct siop_target *siop_target =
1891 (struct siop_target *)sc->sc_c.targets[target];
1892 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1893 int i, ntargets;
1894
1895 if (siop_lun->reseloff > 0)
1896 return;
1897 lunsw = siop_target->lunsw;
1898 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1899 /*
1900 * can't extend this slot. Probably not worth trying to deal
1901 * with this case
1902 */
1903 #ifdef SIOP_DEBUG
1904 aprint_error_dev(sc->sc_c.sc_dev,
1905 "%d:%d: can't allocate a lun sw slot\n", target, lun);
1906 #endif
1907 return;
1908 }
1909 /* count how many free targets we still have to probe */
1910 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1911
1912 /*
1913 * we need 8 bytes for the lun sw additional entry, and
1914 * eventually sizeof(tag_switch) for the tag switch entry.
1915 * Keep enough free space for the free targets that could be
1916 * probed later.
1917 */
1918 if (sc->script_free_lo + 2 +
1919 (ntargets * __arraycount(lun_switch)) >=
1920 ((siop_target->target_c.flags & TARF_TAG) ?
1921 sc->script_free_hi - __arraycount(tag_switch) :
1922 sc->script_free_hi)) {
1923 /*
1924 * not enough space, probably not worth dealing with it.
1925 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1926 */
1927 #ifdef SIOP_DEBUG
1928 aprint_error_dev(sc->sc_c.sc_dev,
1929 "%d:%d: not enough memory for a lun sw slot\n",
1930 target, lun);
1931 #endif
1932 return;
1933 }
1934 #ifdef SIOP_DEBUG
1935 printf("%s:%d:%d: allocate lun sw entry\n",
1936 device_xname(sc->sc_c.sc_dev), target, lun);
1937 #endif
1938 /* INT int_resellun */
1939 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1940 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1941 /* Now the slot entry: JUMP abs_foo, IF lun */
1942 siop_script_write(sc, sc->script_free_lo - 2,
1943 0x800c0000 | lun);
1944 siop_script_write(sc, sc->script_free_lo - 1, 0);
1945 siop_lun->reseloff = sc->script_free_lo - 2;
1946 lunsw->lunsw_size += 2;
1947 sc->script_free_lo += 2;
1948 if (siop_target->target_c.flags & TARF_TAG) {
1949 /* we need a tag switch */
1950 sc->script_free_hi -= __arraycount(tag_switch);
1951 if (sc->sc_c.features & SF_CHIP_RAM) {
1952 bus_space_write_region_4(sc->sc_c.sc_ramt,
1953 sc->sc_c.sc_ramh,
1954 sc->script_free_hi * 4, tag_switch,
1955 __arraycount(tag_switch));
1956 } else {
1957 for(i = 0; i < __arraycount(tag_switch); i++) {
1958 sc->sc_c.sc_script[sc->script_free_hi + i] =
1959 siop_htoc32(&sc->sc_c, tag_switch[i]);
1960 }
1961 }
1962 siop_script_write(sc,
1963 siop_lun->reseloff + 1,
1964 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1965 Ent_tag_switch_entry);
1966
1967 for (i = 0; i < SIOP_NTAG; i++) {
1968 siop_lun->siop_tag[i].reseloff =
1969 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1970 }
1971 } else {
1972 /* non-tag case; just work with the lun switch */
1973 siop_lun->siop_tag[0].reseloff =
1974 siop_target->siop_lun[lun]->reseloff;
1975 }
1976 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1977 }
1978
1979 void
1980 siop_del_dev(struct siop_softc *sc, int target, int lun)
1981 {
1982 int i;
1983 struct siop_target *siop_target;
1984
1985 #ifdef SIOP_DEBUG
1986 printf("%s:%d:%d: free lun sw entry\n",
1987 device_xname(sc->sc_c.sc_dev), target, lun);
1988 #endif
1989 if (sc->sc_c.targets[target] == NULL)
1990 return;
1991 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1992 free(siop_target->siop_lun[lun], M_DEVBUF);
1993 siop_target->siop_lun[lun] = NULL;
1994 /* XXX compact sw entry too ? */
1995 /* check if we can free the whole target */
1996 for (i = 0; i < 8; i++) {
1997 if (siop_target->siop_lun[i] != NULL)
1998 return;
1999 }
2000 #ifdef SIOP_DEBUG
2001 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2002 device_xname(sc->sc_c.sc_dev), target, lun,
2003 siop_target->lunsw->lunsw_off);
2004 #endif
2005 /*
2006 * nothing here, free the target struct and resel
2007 * switch entry
2008 */
2009 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2010 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2011 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2012 free(sc->sc_c.targets[target], M_DEVBUF);
2013 sc->sc_c.targets[target] = NULL;
2014 sc->sc_ntargets--;
2015 }
2016
2017 #ifdef SIOP_STATS
2018 void
2019 siop_printstats(void)
2020 {
2021
2022 printf("siop_stat_intr %d\n", siop_stat_intr);
2023 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2024 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2025 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2026 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
2027 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2028 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2029 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2030 }
2031 #endif
2032