siop.c revision 1.97 1 /* $NetBSD: siop.c,v 1.97 2010/09/09 14:50:25 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.97 2010/09/09 14:50:25 jakllsch Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39
40 #include <uvm/uvm_extern.h>
41
42 #include <machine/endian.h>
43 #include <sys/bus.h>
44
45 #include <dev/microcode/siop/siop.out>
46
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsi_message.h>
49 #include <dev/scsipi/scsipi_all.h>
50
51 #include <dev/scsipi/scsiconf.h>
52
53 #include <dev/ic/siopreg.h>
54 #include <dev/ic/siopvar_common.h>
55 #include <dev/ic/siopvar.h>
56
57 #include "opt_siop.h"
58
59 /*
60 #define SIOP_DEBUG
61 #define SIOP_DEBUG_DR
62 #define SIOP_DEBUG_INTR
63 #define SIOP_DEBUG_SCHED
64 #define SIOP_DUMP_SCRIPT
65 */
66
67 #define SIOP_STATS
68
69 #ifndef SIOP_DEFAULT_TARGET
70 #define SIOP_DEFAULT_TARGET 7
71 #endif
72
73 /* number of cmd descriptors per block */
74 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
75
76 /* Number of scheduler slot (needs to match script) */
77 #define SIOP_NSLOTS 40
78
79 void siop_reset(struct siop_softc *);
80 void siop_handle_reset(struct siop_softc *);
81 int siop_handle_qtag_reject(struct siop_cmd *);
82 void siop_scsicmd_end(struct siop_cmd *);
83 void siop_unqueue(struct siop_softc *, int, int);
84 static void siop_start(struct siop_softc *, struct siop_cmd *);
85 void siop_timeout(void *);
86 int siop_scsicmd(struct scsipi_xfer *);
87 void siop_scsipi_request(struct scsipi_channel *,
88 scsipi_adapter_req_t, void *);
89 void siop_dump_script(struct siop_softc *);
90 void siop_morecbd(struct siop_softc *);
91 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
92 void siop_add_reselsw(struct siop_softc *, int);
93 void siop_update_scntl3(struct siop_softc *,
94 struct siop_common_target *);
95
96 #ifdef SIOP_STATS
97 static int siop_stat_intr = 0;
98 static int siop_stat_intr_shortxfer = 0;
99 static int siop_stat_intr_sdp = 0;
100 static int siop_stat_intr_saveoffset = 0;
101 static int siop_stat_intr_done = 0;
102 static int siop_stat_intr_xferdisc = 0;
103 static int siop_stat_intr_lunresel = 0;
104 static int siop_stat_intr_qfull = 0;
105 void siop_printstats(void);
106 #define INCSTAT(x) x++
107 #else
108 #define INCSTAT(x)
109 #endif
110
111 static inline void siop_script_sync(struct siop_softc *, int);
112 static inline void
113 siop_script_sync(struct siop_softc *sc, int ops)
114 {
115
116 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
117 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
118 PAGE_SIZE, ops);
119 }
120
121 static inline uint32_t siop_script_read(struct siop_softc *, u_int);
122 static inline uint32_t
123 siop_script_read(struct siop_softc *sc, u_int offset)
124 {
125
126 if (sc->sc_c.features & SF_CHIP_RAM) {
127 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
128 offset * 4);
129 } else {
130 return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]);
131 }
132 }
133
134 static inline void siop_script_write(struct siop_softc *, u_int,
135 uint32_t);
136 static inline void
137 siop_script_write(struct siop_softc *sc, u_int offset, uint32_t val)
138 {
139
140 if (sc->sc_c.features & SF_CHIP_RAM) {
141 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
142 offset * 4, val);
143 } else {
144 sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val);
145 }
146 }
147
148 void
149 siop_attach(struct siop_softc *sc)
150 {
151
152 if (siop_common_attach(&sc->sc_c) != 0)
153 return;
154
155 TAILQ_INIT(&sc->free_list);
156 TAILQ_INIT(&sc->cmds);
157 TAILQ_INIT(&sc->lunsw_list);
158 sc->sc_currschedslot = 0;
159 #ifdef SIOP_DEBUG
160 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
161 device_xname(sc->sc_c.sc_dev), (int)sizeof(siop_script),
162 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
163 #endif
164
165 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
166 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
167
168 /* Do a bus reset, so that devices fall back to narrow/async */
169 siop_resetbus(&sc->sc_c);
170 /*
171 * siop_reset() will reset the chip, thus clearing pending interrupts
172 */
173 siop_reset(sc);
174 #ifdef SIOP_DUMP_SCRIPT
175 siop_dump_script(sc);
176 #endif
177
178 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
179 }
180
181 void
182 siop_reset(struct siop_softc *sc)
183 {
184 int i, j;
185 struct siop_lunsw *lunsw;
186
187 siop_common_reset(&sc->sc_c);
188
189 /* copy and patch the script */
190 if (sc->sc_c.features & SF_CHIP_RAM) {
191 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
192 siop_script, __arraycount(siop_script));
193 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
194 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
195 E_abs_msgin_Used[j] * 4,
196 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
197 }
198 if (sc->sc_c.features & SF_CHIP_LED0) {
199 bus_space_write_region_4(sc->sc_c.sc_ramt,
200 sc->sc_c.sc_ramh,
201 Ent_led_on1, siop_led_on,
202 __arraycount(siop_led_on));
203 bus_space_write_region_4(sc->sc_c.sc_ramt,
204 sc->sc_c.sc_ramh,
205 Ent_led_on2, siop_led_on,
206 __arraycount(siop_led_on));
207 bus_space_write_region_4(sc->sc_c.sc_ramt,
208 sc->sc_c.sc_ramh,
209 Ent_led_off, siop_led_off,
210 __arraycount(siop_led_off));
211 }
212 } else {
213 for (j = 0; j < __arraycount(siop_script); j++) {
214 sc->sc_c.sc_script[j] =
215 siop_htoc32(&sc->sc_c, siop_script[j]);
216 }
217 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
218 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
219 siop_htoc32(&sc->sc_c,
220 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
221 }
222 if (sc->sc_c.features & SF_CHIP_LED0) {
223 for (j = 0; j < __arraycount(siop_led_on); j++)
224 sc->sc_c.sc_script[
225 Ent_led_on1 / sizeof(siop_led_on[0]) + j
226 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
227 for (j = 0; j < __arraycount(siop_led_on); j++)
228 sc->sc_c.sc_script[
229 Ent_led_on2 / sizeof(siop_led_on[0]) + j
230 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
231 for (j = 0; j < __arraycount(siop_led_off); j++)
232 sc->sc_c.sc_script[
233 Ent_led_off / sizeof(siop_led_off[0]) + j
234 ] = siop_htoc32(&sc->sc_c, siop_led_off[j]);
235 }
236 }
237 sc->script_free_lo = __arraycount(siop_script);
238 sc->script_free_hi = sc->sc_c.ram_size / 4;
239 sc->sc_ntargets = 0;
240
241 /* free used and unused lun switches */
242 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
243 #ifdef SIOP_DEBUG
244 printf("%s: free lunsw at offset %d\n",
245 device_xname(sc->sc_c.sc_dev), lunsw->lunsw_off);
246 #endif
247 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
248 free(lunsw, M_DEVBUF);
249 }
250 TAILQ_INIT(&sc->lunsw_list);
251 /* restore reselect switch */
252 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
253 struct siop_target *target;
254 if (sc->sc_c.targets[i] == NULL)
255 continue;
256 #ifdef SIOP_DEBUG
257 printf("%s: restore sw for target %d\n",
258 device_xname(sc->sc_c.sc_dev), i);
259 #endif
260 target = (struct siop_target *)sc->sc_c.targets[i];
261 free(target->lunsw, M_DEVBUF);
262 target->lunsw = siop_get_lunsw(sc);
263 if (target->lunsw == NULL) {
264 aprint_error_dev(sc->sc_c.sc_dev,
265 "can't alloc lunsw for target %d\n", i);
266 break;
267 }
268 siop_add_reselsw(sc, i);
269 }
270
271 /* start script */
272 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
273 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
274 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
275 }
276 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
277 sc->sc_c.sc_scriptaddr + Ent_reselect);
278 }
279
280 #if 0
281 #define CALL_SCRIPT(ent) do { \
282 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
283 siop_cmd->cmd_c.dsa, \
284 sc->sc_c.sc_scriptaddr + ent); \
285 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
286 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
287 } while (/* CONSTCOND */0)
288 #else
289 #define CALL_SCRIPT(ent) do { \
290 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
291 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
292 } while (/* CONSTCOND */0)
293 #endif
294
295 int
296 siop_intr(void *v)
297 {
298 struct siop_softc *sc = v;
299 struct siop_target *siop_target;
300 struct siop_cmd *siop_cmd;
301 struct siop_lun *siop_lun;
302 struct scsipi_xfer *xs;
303 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
304 uint32_t irqcode;
305 int need_reset = 0;
306 int offset, target, lun, tag;
307 bus_addr_t dsa;
308 struct siop_cbd *cbdp;
309 int freetarget = 0;
310 int restart = 0;
311
312 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
313 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
314 return 0;
315 INCSTAT(siop_stat_intr);
316 if (istat & ISTAT_INTF) {
317 printf("INTRF\n");
318 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
319 SIOP_ISTAT, ISTAT_INTF);
320 }
321 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
322 (ISTAT_DIP | ISTAT_ABRT)) {
323 /* clear abort */
324 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
325 SIOP_ISTAT, 0);
326 }
327 /* use DSA to find the current siop_cmd */
328 siop_cmd = NULL;
329 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
330 TAILQ_FOREACH(cbdp, &sc->cmds, next) {
331 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
332 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
333 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
334 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
335 siop_table_sync(siop_cmd,
336 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
337 break;
338 }
339 }
340 if (siop_cmd) {
341 xs = siop_cmd->cmd_c.xs;
342 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
343 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
344 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
345 tag = siop_cmd->cmd_c.tag;
346 siop_lun = siop_target->siop_lun[lun];
347 #ifdef DIAGNOSTIC
348 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
349 printf("siop_cmd (lun %d) for DSA 0x%x "
350 "not active (%d)\n", lun, (u_int)dsa,
351 siop_cmd->cmd_c.status);
352 xs = NULL;
353 siop_target = NULL;
354 target = -1;
355 lun = -1;
356 tag = -1;
357 siop_lun = NULL;
358 siop_cmd = NULL;
359 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
360 printf("siop_cmd (lun %d tag %d) not in siop_lun "
361 "active (%p != %p)\n", lun, tag, siop_cmd,
362 siop_lun->siop_tag[tag].active);
363 }
364 #endif
365 } else {
366 xs = NULL;
367 siop_target = NULL;
368 target = -1;
369 lun = -1;
370 tag = -1;
371 siop_lun = NULL;
372 }
373 if (istat & ISTAT_DIP) {
374 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
375 SIOP_DSTAT);
376 if (dstat & DSTAT_ABRT) {
377 /* was probably generated by a bus reset IOCTL */
378 if ((dstat & DSTAT_DFE) == 0)
379 siop_clearfifo(&sc->sc_c);
380 goto reset;
381 }
382 if (dstat & DSTAT_SSI) {
383 printf("single step dsp 0x%08x dsa 0x08%x\n",
384 (int)(bus_space_read_4(sc->sc_c.sc_rt,
385 sc->sc_c.sc_rh, SIOP_DSP) -
386 sc->sc_c.sc_scriptaddr),
387 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
388 SIOP_DSA));
389 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
390 (istat & ISTAT_SIP) == 0) {
391 bus_space_write_1(sc->sc_c.sc_rt,
392 sc->sc_c.sc_rh, SIOP_DCNTL,
393 bus_space_read_1(sc->sc_c.sc_rt,
394 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
395 }
396 return 1;
397 }
398
399 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
400 printf("DMA IRQ:");
401 if (dstat & DSTAT_IID)
402 printf(" Illegal instruction");
403 if (dstat & DSTAT_BF)
404 printf(" bus fault");
405 if (dstat & DSTAT_MDPE)
406 printf(" parity");
407 if (dstat & DSTAT_DFE)
408 printf(" DMA fifo empty");
409 else
410 siop_clearfifo(&sc->sc_c);
411 printf(", DSP=0x%x DSA=0x%x: ",
412 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
413 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
414 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
415 if (siop_cmd)
416 printf("last msg_in=0x%x status=0x%x\n",
417 siop_cmd->cmd_tables->msg_in[0],
418 siop_ctoh32(&sc->sc_c,
419 siop_cmd->cmd_tables->status));
420 else
421 aprint_error_dev(sc->sc_c.sc_dev,
422 "current DSA invalid\n");
423 need_reset = 1;
424 }
425 }
426 if (istat & ISTAT_SIP) {
427 if (istat & ISTAT_DIP)
428 delay(10);
429 /*
430 * Can't read sist0 & sist1 independently, or we have to
431 * insert delay
432 */
433 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
434 SIOP_SIST0);
435 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
436 SIOP_SSTAT1);
437 #ifdef SIOP_DEBUG_INTR
438 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
439 "DSA=0x%x DSP=0x%lx\n", sist,
440 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
441 SIOP_SSTAT1),
442 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
443 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 SIOP_DSP) -
445 sc->sc_c.sc_scriptaddr));
446 #endif
447 if (sist & SIST0_RST) {
448 siop_handle_reset(sc);
449 /* no table to flush here */
450 return 1;
451 }
452 if (sist & SIST0_SGE) {
453 if (siop_cmd)
454 scsipi_printaddr(xs->xs_periph);
455 else
456 printf("%s:", device_xname(sc->sc_c.sc_dev));
457 printf("scsi gross error\n");
458 goto reset;
459 }
460 if ((sist & SIST0_MA) && need_reset == 0) {
461 if (siop_cmd) {
462 int scratcha0;
463 dstat = bus_space_read_1(sc->sc_c.sc_rt,
464 sc->sc_c.sc_rh, SIOP_DSTAT);
465 /*
466 * first restore DSA, in case we were in a S/G
467 * operation.
468 */
469 bus_space_write_4(sc->sc_c.sc_rt,
470 sc->sc_c.sc_rh,
471 SIOP_DSA, siop_cmd->cmd_c.dsa);
472 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
473 sc->sc_c.sc_rh, SIOP_SCRATCHA);
474 switch (sstat1 & SSTAT1_PHASE_MASK) {
475 case SSTAT1_PHASE_STATUS:
476 /*
477 * previous phase may be aborted for any reason
478 * ( for example, the target has less data to
479 * transfer than requested). Compute resid and
480 * just go to status, the command should
481 * terminate.
482 */
483 INCSTAT(siop_stat_intr_shortxfer);
484 if (scratcha0 & A_flag_data)
485 siop_ma(&siop_cmd->cmd_c);
486 else if ((dstat & DSTAT_DFE) == 0)
487 siop_clearfifo(&sc->sc_c);
488 CALL_SCRIPT(Ent_status);
489 return 1;
490 case SSTAT1_PHASE_MSGIN:
491 /*
492 * target may be ready to disconnect
493 * Compute resid which would be used later
494 * if a save data pointer is needed.
495 */
496 INCSTAT(siop_stat_intr_xferdisc);
497 if (scratcha0 & A_flag_data)
498 siop_ma(&siop_cmd->cmd_c);
499 else if ((dstat & DSTAT_DFE) == 0)
500 siop_clearfifo(&sc->sc_c);
501 bus_space_write_1(sc->sc_c.sc_rt,
502 sc->sc_c.sc_rh, SIOP_SCRATCHA,
503 scratcha0 & ~A_flag_data);
504 CALL_SCRIPT(Ent_msgin);
505 return 1;
506 }
507 aprint_error_dev(sc->sc_c.sc_dev,
508 "unexpected phase mismatch %d\n",
509 sstat1 & SSTAT1_PHASE_MASK);
510 } else {
511 aprint_error_dev(sc->sc_c.sc_dev,
512 "phase mismatch without command\n");
513 }
514 need_reset = 1;
515 }
516 if (sist & SIST0_PAR) {
517 /* parity error, reset */
518 if (siop_cmd)
519 scsipi_printaddr(xs->xs_periph);
520 else
521 printf("%s:", device_xname(sc->sc_c.sc_dev));
522 printf("parity error\n");
523 goto reset;
524 }
525 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
526 /* selection time out, assume there's no device here */
527 if (siop_cmd) {
528 siop_cmd->cmd_c.status = CMDST_DONE;
529 xs->error = XS_SELTIMEOUT;
530 freetarget = 1;
531 goto end;
532 } else {
533 aprint_error_dev(sc->sc_c.sc_dev,
534 "selection timeout without "
535 "command\n");
536 need_reset = 1;
537 }
538 }
539 if (sist & SIST0_UDC) {
540 /*
541 * unexpected disconnect. Usually the target signals
542 * a fatal condition this way. Attempt to get sense.
543 */
544 if (siop_cmd) {
545 siop_cmd->cmd_tables->status =
546 siop_htoc32(&sc->sc_c, SCSI_CHECK);
547 goto end;
548 }
549 aprint_error_dev(sc->sc_c.sc_dev,
550 "unexpected disconnect without "
551 "command\n");
552 goto reset;
553 }
554 if (sist & (SIST1_SBMC << 8)) {
555 /* SCSI bus mode change */
556 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
557 goto reset;
558 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
559 /*
560 * we have a script interrupt, it will
561 * restart the script.
562 */
563 goto scintr;
564 }
565 /*
566 * else we have to restart it ourselve, at the
567 * interrupted instruction.
568 */
569 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
570 SIOP_DSP,
571 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
572 SIOP_DSP) - 8);
573 return 1;
574 }
575 /* Else it's an unhandled exception (for now). */
576 aprint_error_dev(sc->sc_c.sc_dev,
577 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
578 "DSA=0x%x DSP=0x%x\n", sist,
579 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
580 SIOP_SSTAT1),
581 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
582 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
583 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
584 if (siop_cmd) {
585 siop_cmd->cmd_c.status = CMDST_DONE;
586 xs->error = XS_SELTIMEOUT;
587 goto end;
588 }
589 need_reset = 1;
590 }
591 if (need_reset) {
592 reset:
593 /* fatal error, reset the bus */
594 siop_resetbus(&sc->sc_c);
595 /* no table to flush here */
596 return 1;
597 }
598
599 scintr:
600 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
601 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
602 SIOP_DSPS);
603 #ifdef SIOP_DEBUG_INTR
604 printf("script interrupt 0x%x\n", irqcode);
605 #endif
606 /*
607 * no command, or an inactive command is only valid for a
608 * reselect interrupt
609 */
610 if ((irqcode & 0x80) == 0) {
611 if (siop_cmd == NULL) {
612 aprint_error_dev(sc->sc_c.sc_dev,
613 "script interrupt (0x%x) with "
614 "invalid DSA !!!\n",
615 irqcode);
616 goto reset;
617 }
618 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
619 aprint_error_dev(sc->sc_c.sc_dev,
620 "command with invalid status "
621 "(IRQ code 0x%x current status %d) !\n",
622 irqcode, siop_cmd->cmd_c.status);
623 xs = NULL;
624 }
625 }
626 switch(irqcode) {
627 case A_int_err:
628 printf("error, DSP=0x%x\n",
629 (int)(bus_space_read_4(sc->sc_c.sc_rt,
630 sc->sc_c.sc_rh, SIOP_DSP) -
631 sc->sc_c.sc_scriptaddr));
632 if (xs) {
633 xs->error = XS_SELTIMEOUT;
634 goto end;
635 } else {
636 goto reset;
637 }
638 case A_int_reseltarg:
639 aprint_error_dev(sc->sc_c.sc_dev,
640 "reselect with invalid target\n");
641 goto reset;
642 case A_int_resellun:
643 INCSTAT(siop_stat_intr_lunresel);
644 target = bus_space_read_1(sc->sc_c.sc_rt,
645 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
646 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
647 SIOP_SCRATCHA + 1);
648 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
649 SIOP_SCRATCHA + 2);
650 siop_target =
651 (struct siop_target *)sc->sc_c.targets[target];
652 if (siop_target == NULL) {
653 printf("%s: reselect with invalid target %d\n",
654 device_xname(sc->sc_c.sc_dev), target);
655 goto reset;
656 }
657 siop_lun = siop_target->siop_lun[lun];
658 if (siop_lun == NULL) {
659 printf("%s: target %d reselect with invalid "
660 "lun %d\n", device_xname(sc->sc_c.sc_dev),
661 target, lun);
662 goto reset;
663 }
664 if (siop_lun->siop_tag[tag].active == NULL) {
665 printf("%s: target %d lun %d tag %d reselect "
666 "without command\n",
667 device_xname(sc->sc_c.sc_dev),
668 target, lun, tag);
669 goto reset;
670 }
671 siop_cmd = siop_lun->siop_tag[tag].active;
672 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
673 SIOP_DSP, siop_cmd->cmd_c.dsa +
674 sizeof(struct siop_common_xfer) +
675 Ent_ldsa_reload_dsa);
676 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
677 return 1;
678 case A_int_reseltag:
679 printf("%s: reselect with invalid tag\n",
680 device_xname(sc->sc_c.sc_dev));
681 goto reset;
682 case A_int_msgin:
683 {
684 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
685 sc->sc_c.sc_rh, SIOP_SFBR);
686
687 if (msgin == MSG_MESSAGE_REJECT) {
688 int msg, extmsg;
689 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
690 /*
691 * message was part of a identify +
692 * something else. Identify shouldn't
693 * have been rejected.
694 */
695 msg =
696 siop_cmd->cmd_tables->msg_out[1];
697 extmsg =
698 siop_cmd->cmd_tables->msg_out[3];
699 } else {
700 msg = siop_cmd->cmd_tables->msg_out[0];
701 extmsg =
702 siop_cmd->cmd_tables->msg_out[2];
703 }
704 if (msg == MSG_MESSAGE_REJECT) {
705 /* MSG_REJECT for a MSG_REJECT !*/
706 if (xs)
707 scsipi_printaddr(xs->xs_periph);
708 else
709 printf("%s: ", device_xname(
710 sc->sc_c.sc_dev));
711 printf("our reject message was "
712 "rejected\n");
713 goto reset;
714 }
715 if (msg == MSG_EXTENDED &&
716 extmsg == MSG_EXT_WDTR) {
717 /* WDTR rejected, initiate sync */
718 if ((siop_target->target_c.flags &
719 TARF_SYNC) == 0) {
720 siop_target->target_c.status =
721 TARST_OK;
722 siop_update_xfer_mode(&sc->sc_c,
723 target);
724 /* no table to flush here */
725 CALL_SCRIPT(Ent_msgin_ack);
726 return 1;
727 }
728 siop_target->target_c.status =
729 TARST_SYNC_NEG;
730 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
731 sc->sc_c.st_minsync,
732 sc->sc_c.maxoff);
733 siop_table_sync(siop_cmd,
734 BUS_DMASYNC_PREREAD |
735 BUS_DMASYNC_PREWRITE);
736 CALL_SCRIPT(Ent_send_msgout);
737 return 1;
738 } else if (msg == MSG_EXTENDED &&
739 extmsg == MSG_EXT_SDTR) {
740 /* sync rejected */
741 siop_target->target_c.offset = 0;
742 siop_target->target_c.period = 0;
743 siop_target->target_c.status = TARST_OK;
744 siop_update_xfer_mode(&sc->sc_c,
745 target);
746 /* no table to flush here */
747 CALL_SCRIPT(Ent_msgin_ack);
748 return 1;
749 } else if (msg == MSG_SIMPLE_Q_TAG ||
750 msg == MSG_HEAD_OF_Q_TAG ||
751 msg == MSG_ORDERED_Q_TAG) {
752 if (siop_handle_qtag_reject(
753 siop_cmd) == -1)
754 goto reset;
755 CALL_SCRIPT(Ent_msgin_ack);
756 return 1;
757 }
758 if (xs)
759 scsipi_printaddr(xs->xs_periph);
760 else
761 printf("%s: ",
762 device_xname(sc->sc_c.sc_dev));
763 if (msg == MSG_EXTENDED) {
764 printf("scsi message reject, extended "
765 "message sent was 0x%x\n", extmsg);
766 } else {
767 printf("scsi message reject, message "
768 "sent was 0x%x\n", msg);
769 }
770 /* no table to flush here */
771 CALL_SCRIPT(Ent_msgin_ack);
772 return 1;
773 }
774 if (msgin == MSG_IGN_WIDE_RESIDUE) {
775 /* use the extmsgdata table to get the second byte */
776 siop_cmd->cmd_tables->t_extmsgdata.count =
777 siop_htoc32(&sc->sc_c, 1);
778 siop_table_sync(siop_cmd,
779 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
780 CALL_SCRIPT(Ent_get_extmsgdata);
781 return 1;
782 }
783 if (xs)
784 scsipi_printaddr(xs->xs_periph);
785 else
786 printf("%s: ", device_xname(sc->sc_c.sc_dev));
787 printf("unhandled message 0x%x\n",
788 siop_cmd->cmd_tables->msg_in[0]);
789 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
790 siop_cmd->cmd_tables->t_msgout.count =
791 siop_htoc32(&sc->sc_c, 1);
792 siop_table_sync(siop_cmd,
793 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
794 CALL_SCRIPT(Ent_send_msgout);
795 return 1;
796 }
797 case A_int_extmsgin:
798 #ifdef SIOP_DEBUG_INTR
799 printf("extended message: msg 0x%x len %d\n",
800 siop_cmd->cmd_tables->msg_in[2],
801 siop_cmd->cmd_tables->msg_in[1]);
802 #endif
803 if (siop_cmd->cmd_tables->msg_in[1] >
804 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
805 aprint_error_dev(sc->sc_c.sc_dev,
806 "extended message too big (%d)\n",
807 siop_cmd->cmd_tables->msg_in[1]);
808 siop_cmd->cmd_tables->t_extmsgdata.count =
809 siop_htoc32(&sc->sc_c,
810 siop_cmd->cmd_tables->msg_in[1] - 1);
811 siop_table_sync(siop_cmd,
812 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
813 CALL_SCRIPT(Ent_get_extmsgdata);
814 return 1;
815 case A_int_extmsgdata:
816 #ifdef SIOP_DEBUG_INTR
817 {
818 int i;
819 printf("extended message: 0x%x, data:",
820 siop_cmd->cmd_tables->msg_in[2]);
821 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
822 i++)
823 printf(" 0x%x",
824 siop_cmd->cmd_tables->msg_in[i]);
825 printf("\n");
826 }
827 #endif
828 if (siop_cmd->cmd_tables->msg_in[0] ==
829 MSG_IGN_WIDE_RESIDUE) {
830 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
831 if (siop_cmd->cmd_tables->msg_in[3] != 1)
832 printf("MSG_IGN_WIDE_RESIDUE: "
833 "bad len %d\n",
834 siop_cmd->cmd_tables->msg_in[3]);
835 switch (siop_iwr(&siop_cmd->cmd_c)) {
836 case SIOP_NEG_MSGOUT:
837 siop_table_sync(siop_cmd,
838 BUS_DMASYNC_PREREAD |
839 BUS_DMASYNC_PREWRITE);
840 CALL_SCRIPT(Ent_send_msgout);
841 return(1);
842 case SIOP_NEG_ACK:
843 CALL_SCRIPT(Ent_msgin_ack);
844 return(1);
845 default:
846 panic("invalid retval from "
847 "siop_iwr()");
848 }
849 return(1);
850 }
851 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
852 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
853 case SIOP_NEG_MSGOUT:
854 siop_update_scntl3(sc,
855 siop_cmd->cmd_c.siop_target);
856 siop_table_sync(siop_cmd,
857 BUS_DMASYNC_PREREAD |
858 BUS_DMASYNC_PREWRITE);
859 CALL_SCRIPT(Ent_send_msgout);
860 return(1);
861 case SIOP_NEG_ACK:
862 siop_update_scntl3(sc,
863 siop_cmd->cmd_c.siop_target);
864 CALL_SCRIPT(Ent_msgin_ack);
865 return(1);
866 default:
867 panic("invalid retval from "
868 "siop_wdtr_neg()");
869 }
870 return(1);
871 }
872 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
873 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
874 case SIOP_NEG_MSGOUT:
875 siop_update_scntl3(sc,
876 siop_cmd->cmd_c.siop_target);
877 siop_table_sync(siop_cmd,
878 BUS_DMASYNC_PREREAD |
879 BUS_DMASYNC_PREWRITE);
880 CALL_SCRIPT(Ent_send_msgout);
881 return(1);
882 case SIOP_NEG_ACK:
883 siop_update_scntl3(sc,
884 siop_cmd->cmd_c.siop_target);
885 CALL_SCRIPT(Ent_msgin_ack);
886 return(1);
887 default:
888 panic("invalid retval from "
889 "siop_wdtr_neg()");
890 }
891 return(1);
892 }
893 /* send a message reject */
894 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
895 siop_cmd->cmd_tables->t_msgout.count =
896 siop_htoc32(&sc->sc_c, 1);
897 siop_table_sync(siop_cmd,
898 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
899 CALL_SCRIPT(Ent_send_msgout);
900 return 1;
901 case A_int_disc:
902 INCSTAT(siop_stat_intr_sdp);
903 offset = bus_space_read_1(sc->sc_c.sc_rt,
904 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
905 #ifdef SIOP_DEBUG_DR
906 printf("disconnect offset %d\n", offset);
907 #endif
908 siop_sdp(&siop_cmd->cmd_c, offset);
909 /* we start again with no offset */
910 siop_cmd->saved_offset = SIOP_NOOFFSET;
911 siop_table_sync(siop_cmd,
912 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
913 CALL_SCRIPT(Ent_script_sched);
914 return 1;
915 case A_int_saveoffset:
916 INCSTAT(siop_stat_intr_saveoffset);
917 offset = bus_space_read_1(sc->sc_c.sc_rt,
918 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
919 #ifdef SIOP_DEBUG_DR
920 printf("saveoffset offset %d\n", offset);
921 #endif
922 siop_cmd->saved_offset = offset;
923 CALL_SCRIPT(Ent_script_sched);
924 return 1;
925 case A_int_resfail:
926 printf("reselect failed\n");
927 CALL_SCRIPT(Ent_script_sched);
928 return 1;
929 case A_int_done:
930 if (xs == NULL) {
931 printf("%s: done without command, DSA=0x%lx\n",
932 device_xname(sc->sc_c.sc_dev),
933 (u_long)siop_cmd->cmd_c.dsa);
934 siop_cmd->cmd_c.status = CMDST_FREE;
935 CALL_SCRIPT(Ent_script_sched);
936 return 1;
937 }
938 #ifdef SIOP_DEBUG_INTR
939 printf("done, DSA=0x%lx target id 0x%x last msg "
940 "in=0x%x status=0x%x\n",
941 (u_long)siop_cmd->cmd_c.dsa,
942 siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id),
943 siop_cmd->cmd_tables->msg_in[0],
944 siop_ctoh32(&sc->sc_c,
945 siop_cmd->cmd_tables->status));
946 #endif
947 INCSTAT(siop_stat_intr_done);
948 /* update resid. */
949 offset = bus_space_read_1(sc->sc_c.sc_rt,
950 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
951 /*
952 * if we got a disconnect between the last data phase
953 * and the status phase, offset will be 0. In this
954 * case, siop_cmd->saved_offset will have the proper
955 * value if it got updated by the controller
956 */
957 if (offset == 0 &&
958 siop_cmd->saved_offset != SIOP_NOOFFSET)
959 offset = siop_cmd->saved_offset;
960 siop_update_resid(&siop_cmd->cmd_c, offset);
961 siop_cmd->cmd_c.status = CMDST_DONE;
962 goto end;
963 default:
964 printf("unknown irqcode %x\n", irqcode);
965 if (xs) {
966 xs->error = XS_SELTIMEOUT;
967 goto end;
968 }
969 goto reset;
970 }
971 return 1;
972 }
973 /* We just should't get there */
974 panic("siop_intr: I shouldn't be there !");
975
976 end:
977 /*
978 * restart the script now if command completed properly
979 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
980 * queue
981 */
982 xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status);
983 if (xs->status == SCSI_OK)
984 CALL_SCRIPT(Ent_script_sched);
985 else
986 restart = 1;
987 siop_lun->siop_tag[tag].active = NULL;
988 siop_scsicmd_end(siop_cmd);
989 if (freetarget && siop_target->target_c.status == TARST_PROBING)
990 siop_del_dev(sc, target, lun);
991 if (restart)
992 CALL_SCRIPT(Ent_script_sched);
993 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
994 /* a command terminated, so we have free slots now */
995 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
996 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
997 }
998
999 return 1;
1000 }
1001
1002 void
1003 siop_scsicmd_end(struct siop_cmd *siop_cmd)
1004 {
1005 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1006 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1007
1008 switch(xs->status) {
1009 case SCSI_OK:
1010 xs->error = XS_NOERROR;
1011 break;
1012 case SCSI_BUSY:
1013 xs->error = XS_BUSY;
1014 break;
1015 case SCSI_CHECK:
1016 xs->error = XS_BUSY;
1017 /* remove commands in the queue and scheduler */
1018 siop_unqueue(sc, xs->xs_periph->periph_target,
1019 xs->xs_periph->periph_lun);
1020 break;
1021 case SCSI_QUEUE_FULL:
1022 INCSTAT(siop_stat_intr_qfull);
1023 #ifdef SIOP_DEBUG
1024 printf("%s:%d:%d: queue full (tag %d)\n",
1025 device_xname(sc->sc_c.sc_dev),
1026 xs->xs_periph->periph_target,
1027 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1028 #endif
1029 xs->error = XS_BUSY;
1030 break;
1031 case SCSI_SIOP_NOCHECK:
1032 /*
1033 * don't check status, xs->error is already valid
1034 */
1035 break;
1036 case SCSI_SIOP_NOSTATUS:
1037 /*
1038 * the status byte was not updated, cmd was
1039 * aborted
1040 */
1041 xs->error = XS_SELTIMEOUT;
1042 break;
1043 default:
1044 scsipi_printaddr(xs->xs_periph);
1045 printf("invalid status code %d\n", xs->status);
1046 xs->error = XS_DRIVER_STUFFUP;
1047 }
1048 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1049 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data,
1050 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1051 (xs->xs_control & XS_CTL_DATA_IN) ?
1052 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1053 bus_dmamap_unload(sc->sc_c.sc_dmat,
1054 siop_cmd->cmd_c.dmamap_data);
1055 }
1056 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1057 if ((xs->xs_control & XS_CTL_POLL) == 0)
1058 callout_stop(&xs->xs_callout);
1059 siop_cmd->cmd_c.status = CMDST_FREE;
1060 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1061 #if 0
1062 if (xs->resid != 0)
1063 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1064 #endif
1065 scsipi_done(xs);
1066 }
1067
1068 void
1069 siop_unqueue(struct siop_softc *sc, int target, int lun)
1070 {
1071 int slot, tag;
1072 struct siop_cmd *siop_cmd;
1073 struct siop_lun *siop_lun =
1074 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1075
1076 /* first make sure to read valid data */
1077 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1078
1079 for (tag = 1; tag < SIOP_NTAG; tag++) {
1080 /* look for commands in the scheduler, not yet started */
1081 if (siop_lun->siop_tag[tag].active == NULL)
1082 continue;
1083 siop_cmd = siop_lun->siop_tag[tag].active;
1084 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1085 if (siop_script_read(sc,
1086 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1087 siop_cmd->cmd_c.dsa +
1088 sizeof(struct siop_common_xfer) +
1089 Ent_ldsa_select)
1090 break;
1091 }
1092 if (slot > sc->sc_currschedslot)
1093 continue; /* didn't find it */
1094 if (siop_script_read(sc,
1095 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1096 continue; /* already started */
1097 /* clear the slot */
1098 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1099 0x80000000);
1100 /* ask to requeue */
1101 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1102 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1103 siop_lun->siop_tag[tag].active = NULL;
1104 siop_scsicmd_end(siop_cmd);
1105 }
1106 /* update sc_currschedslot */
1107 sc->sc_currschedslot = 0;
1108 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1109 if (siop_script_read(sc,
1110 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1111 sc->sc_currschedslot = slot;
1112 }
1113 }
1114
1115 /*
1116 * handle a rejected queue tag message: the command will run untagged,
1117 * has to adjust the reselect script.
1118 */
1119 int
1120 siop_handle_qtag_reject(struct siop_cmd *siop_cmd)
1121 {
1122 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1123 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1124 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1125 int tag = siop_cmd->cmd_tables->msg_out[2];
1126 struct siop_lun *siop_lun =
1127 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1128
1129 #ifdef SIOP_DEBUG
1130 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1131 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1132 siop_cmd->cmd_c.tag,
1133 siop_cmd->cmd_c.status);
1134 #endif
1135
1136 if (siop_lun->siop_tag[0].active != NULL) {
1137 printf("%s: untagged command already running for target %d "
1138 "lun %d (status %d)\n", device_xname(sc->sc_c.sc_dev),
1139 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1140 return -1;
1141 }
1142 /* clear tag slot */
1143 siop_lun->siop_tag[tag].active = NULL;
1144 /* add command to non-tagged slot */
1145 siop_lun->siop_tag[0].active = siop_cmd;
1146 siop_cmd->cmd_c.tag = 0;
1147 /* adjust reselect script if there is one */
1148 if (siop_lun->siop_tag[0].reseloff > 0) {
1149 siop_script_write(sc,
1150 siop_lun->siop_tag[0].reseloff + 1,
1151 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1152 Ent_ldsa_reload_dsa);
1153 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1154 }
1155 return 0;
1156 }
1157
1158 /*
1159 * handle a bus reset: reset chip, unqueue all active commands, free all
1160 * target struct and report lossage to upper layer.
1161 * As the upper layer may requeue immediatly we have to first store
1162 * all active commands in a temporary queue.
1163 */
1164 void
1165 siop_handle_reset(struct siop_softc *sc)
1166 {
1167 struct siop_cmd *siop_cmd;
1168 struct siop_lun *siop_lun;
1169 int target, lun, tag;
1170
1171 /*
1172 * scsi bus reset. reset the chip and restart
1173 * the queue. Need to clean up all active commands
1174 */
1175 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1176 /* stop, reset and restart the chip */
1177 siop_reset(sc);
1178 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1179 /* chip has been reset, all slots are free now */
1180 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1181 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1182 }
1183 /*
1184 * Process all commands: first commands being executed
1185 */
1186 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1187 target++) {
1188 if (sc->sc_c.targets[target] == NULL)
1189 continue;
1190 for (lun = 0; lun < 8; lun++) {
1191 struct siop_target *siop_target =
1192 (struct siop_target *)sc->sc_c.targets[target];
1193 siop_lun = siop_target->siop_lun[lun];
1194 if (siop_lun == NULL)
1195 continue;
1196 for (tag = 0; tag <
1197 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1198 SIOP_NTAG : 1);
1199 tag++) {
1200 siop_cmd = siop_lun->siop_tag[tag].active;
1201 if (siop_cmd == NULL)
1202 continue;
1203 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1204 printf("command with tag id %d reset\n", tag);
1205 siop_cmd->cmd_c.xs->error =
1206 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1207 XS_TIMEOUT : XS_RESET;
1208 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1209 siop_lun->siop_tag[tag].active = NULL;
1210 siop_cmd->cmd_c.status = CMDST_DONE;
1211 siop_scsicmd_end(siop_cmd);
1212 }
1213 }
1214 sc->sc_c.targets[target]->status = TARST_ASYNC;
1215 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1216 sc->sc_c.targets[target]->period =
1217 sc->sc_c.targets[target]->offset = 0;
1218 siop_update_xfer_mode(&sc->sc_c, target);
1219 }
1220
1221 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1222 }
1223
1224 void
1225 siop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1226 void *arg)
1227 {
1228 struct scsipi_xfer *xs;
1229 struct scsipi_periph *periph;
1230 struct siop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1231 struct siop_cmd *siop_cmd;
1232 struct siop_target *siop_target;
1233 int s, error, i;
1234 int target;
1235 int lun;
1236
1237 switch (req) {
1238 case ADAPTER_REQ_RUN_XFER:
1239 xs = arg;
1240 periph = xs->xs_periph;
1241 target = periph->periph_target;
1242 lun = periph->periph_lun;
1243
1244 s = splbio();
1245 #ifdef SIOP_DEBUG_SCHED
1246 printf("starting cmd for %d:%d\n", target, lun);
1247 #endif
1248 siop_cmd = TAILQ_FIRST(&sc->free_list);
1249 if (siop_cmd == NULL) {
1250 xs->error = XS_RESOURCE_SHORTAGE;
1251 scsipi_done(xs);
1252 splx(s);
1253 return;
1254 }
1255 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1256 #ifdef DIAGNOSTIC
1257 if (siop_cmd->cmd_c.status != CMDST_FREE)
1258 panic("siop_scsicmd: new cmd not free");
1259 #endif
1260 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1261 if (siop_target == NULL) {
1262 #ifdef SIOP_DEBUG
1263 printf("%s: alloc siop_target for target %d\n",
1264 device_xname(sc->sc_c.sc_dev), target);
1265 #endif
1266 sc->sc_c.targets[target] =
1267 malloc(sizeof(struct siop_target),
1268 M_DEVBUF, M_NOWAIT|M_ZERO);
1269 if (sc->sc_c.targets[target] == NULL) {
1270 aprint_error_dev(sc->sc_c.sc_dev,
1271 "can't malloc memory for "
1272 "target %d\n", target);
1273 xs->error = XS_RESOURCE_SHORTAGE;
1274 scsipi_done(xs);
1275 TAILQ_INSERT_TAIL(&sc->free_list,
1276 siop_cmd, next);
1277 splx(s);
1278 return;
1279 }
1280 siop_target =
1281 (struct siop_target *)sc->sc_c.targets[target];
1282 siop_target->target_c.status = TARST_PROBING;
1283 siop_target->target_c.flags = 0;
1284 siop_target->target_c.id =
1285 sc->sc_c.clock_div << 24; /* scntl3 */
1286 siop_target->target_c.id |= target << 16; /* id */
1287 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1288
1289 /* get a lun switch script */
1290 siop_target->lunsw = siop_get_lunsw(sc);
1291 if (siop_target->lunsw == NULL) {
1292 aprint_error_dev(sc->sc_c.sc_dev,
1293 "can't alloc lunsw for target %d\n",
1294 target);
1295 xs->error = XS_RESOURCE_SHORTAGE;
1296 scsipi_done(xs);
1297 TAILQ_INSERT_TAIL(&sc->free_list,
1298 siop_cmd, next);
1299 splx(s);
1300 return;
1301 }
1302 for (i=0; i < 8; i++)
1303 siop_target->siop_lun[i] = NULL;
1304 siop_add_reselsw(sc, target);
1305 }
1306 if (siop_target->siop_lun[lun] == NULL) {
1307 siop_target->siop_lun[lun] =
1308 malloc(sizeof(struct siop_lun), M_DEVBUF,
1309 M_NOWAIT|M_ZERO);
1310 if (siop_target->siop_lun[lun] == NULL) {
1311 aprint_error_dev(sc->sc_c.sc_dev,
1312 "can't alloc siop_lun for "
1313 "target %d lun %d\n",
1314 target, lun);
1315 xs->error = XS_RESOURCE_SHORTAGE;
1316 scsipi_done(xs);
1317 TAILQ_INSERT_TAIL(&sc->free_list,
1318 siop_cmd, next);
1319 splx(s);
1320 return;
1321 }
1322 }
1323 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1324 siop_cmd->cmd_c.xs = xs;
1325 siop_cmd->cmd_c.flags = 0;
1326 siop_cmd->cmd_c.status = CMDST_READY;
1327
1328 /* load the DMA maps */
1329 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1330 siop_cmd->cmd_c.dmamap_cmd,
1331 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1332 if (error) {
1333 aprint_error_dev(sc->sc_c.sc_dev,
1334 "unable to load cmd DMA map: %d\n",
1335 error);
1336 xs->error = (error == EAGAIN) ?
1337 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1338 scsipi_done(xs);
1339 siop_cmd->cmd_c.status = CMDST_FREE;
1340 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1341 splx(s);
1342 return;
1343 }
1344 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1345 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1346 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1347 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1348 ((xs->xs_control & XS_CTL_DATA_IN) ?
1349 BUS_DMA_READ : BUS_DMA_WRITE));
1350 if (error) {
1351 aprint_error_dev(sc->sc_c.sc_dev,
1352 "unable to load data DMA map: %d\n",
1353 error);
1354 xs->error = (error == EAGAIN) ?
1355 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1356 scsipi_done(xs);
1357 bus_dmamap_unload(sc->sc_c.sc_dmat,
1358 siop_cmd->cmd_c.dmamap_cmd);
1359 siop_cmd->cmd_c.status = CMDST_FREE;
1360 TAILQ_INSERT_TAIL(&sc->free_list,
1361 siop_cmd, next);
1362 splx(s);
1363 return;
1364 }
1365 bus_dmamap_sync(sc->sc_c.sc_dmat,
1366 siop_cmd->cmd_c.dmamap_data, 0,
1367 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1368 (xs->xs_control & XS_CTL_DATA_IN) ?
1369 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1370 }
1371 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1372 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1373 BUS_DMASYNC_PREWRITE);
1374
1375 if (xs->xs_tag_type) {
1376 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1377 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1378 } else {
1379 siop_cmd->cmd_c.tag = 0;
1380 }
1381 siop_setuptables(&siop_cmd->cmd_c);
1382 siop_cmd->saved_offset = SIOP_NOOFFSET;
1383 siop_table_sync(siop_cmd,
1384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1385 siop_start(sc, siop_cmd);
1386 if (xs->xs_control & XS_CTL_POLL) {
1387 /* poll for command completion */
1388 while ((xs->xs_status & XS_STS_DONE) == 0) {
1389 delay(1000);
1390 siop_intr(sc);
1391 }
1392 }
1393 splx(s);
1394 return;
1395
1396 case ADAPTER_REQ_GROW_RESOURCES:
1397 #ifdef SIOP_DEBUG
1398 printf("%s grow resources (%d)\n",
1399 device_xname(sc->sc_c.sc_dev),
1400 sc->sc_c.sc_adapt.adapt_openings);
1401 #endif
1402 siop_morecbd(sc);
1403 return;
1404
1405 case ADAPTER_REQ_SET_XFER_MODE:
1406 {
1407 struct scsipi_xfer_mode *xm = arg;
1408 if (sc->sc_c.targets[xm->xm_target] == NULL)
1409 return;
1410 s = splbio();
1411 if (xm->xm_mode & PERIPH_CAP_TQING)
1412 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1413 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1414 (sc->sc_c.features & SF_BUS_WIDE))
1415 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1416 if (xm->xm_mode & PERIPH_CAP_SYNC)
1417 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1418 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1419 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1420 sc->sc_c.targets[xm->xm_target]->status =
1421 TARST_ASYNC;
1422
1423 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1424 if (scsipi_lookup_periph(chan,
1425 xm->xm_target, lun) != NULL) {
1426 /* allocate a lun sw entry for this device */
1427 siop_add_dev(sc, xm->xm_target, lun);
1428 }
1429 }
1430
1431 splx(s);
1432 }
1433 }
1434 }
1435
1436 static void
1437 siop_start(struct siop_softc *sc, struct siop_cmd *siop_cmd)
1438 {
1439 struct siop_lun *siop_lun;
1440 struct siop_xfer *siop_xfer;
1441 uint32_t dsa;
1442 int timeout;
1443 int target, lun, slot;
1444
1445 /*
1446 * first make sure to read valid data
1447 */
1448 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1449
1450 /*
1451 * The queue management here is a bit tricky: the script always looks
1452 * at the slot from first to last, so if we always use the first
1453 * free slot commands can stay at the tail of the queue ~forever.
1454 * The algorithm used here is to restart from the head when we know
1455 * that the queue is empty, and only add commands after the last one.
1456 * When we're at the end of the queue wait for the script to clear it.
1457 * The best thing to do here would be to implement a circular queue,
1458 * but using only 53c720 features this can be "interesting".
1459 * A mid-way solution could be to implement 2 queues and swap orders.
1460 */
1461 slot = sc->sc_currschedslot;
1462 /*
1463 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1464 * free. As this is the last used slot, all previous slots are free,
1465 * we can restart from 0.
1466 */
1467 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1468 0x80000000) {
1469 slot = sc->sc_currschedslot = 0;
1470 } else {
1471 slot++;
1472 }
1473 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1474 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1475 siop_lun =
1476 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1477 /* if non-tagged command active, panic: this shouldn't happen */
1478 if (siop_lun->siop_tag[0].active != NULL) {
1479 panic("siop_start: tagged cmd while untagged running");
1480 }
1481 #ifdef DIAGNOSTIC
1482 /* sanity check the tag if needed */
1483 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1484 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1485 panic("siop_start: tag not free");
1486 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1487 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1488 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1489 panic("siop_start: invalid tag id");
1490 }
1491 }
1492 #endif
1493 /*
1494 * find a free scheduler slot and load it.
1495 */
1496 for (; slot < SIOP_NSLOTS; slot++) {
1497 /*
1498 * If cmd if 0x80000000 the slot is free
1499 */
1500 if (siop_script_read(sc,
1501 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1502 0x80000000)
1503 break;
1504 }
1505 if (slot == SIOP_NSLOTS) {
1506 /*
1507 * no more free slot, no need to continue. freeze the queue
1508 * and requeue this command.
1509 */
1510 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1511 sc->sc_flags |= SCF_CHAN_NOSLOT;
1512 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1513 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1514 siop_scsicmd_end(siop_cmd);
1515 return;
1516 }
1517 #ifdef SIOP_DEBUG_SCHED
1518 printf("using slot %d for DSA 0x%lx\n", slot,
1519 (u_long)siop_cmd->cmd_c.dsa);
1520 #endif
1521 /* mark command as active */
1522 if (siop_cmd->cmd_c.status == CMDST_READY)
1523 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1524 else
1525 panic("siop_start: bad status");
1526 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1527 /* patch scripts with DSA addr */
1528 dsa = siop_cmd->cmd_c.dsa;
1529 /* first reselect switch, if we have an entry */
1530 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1531 siop_script_write(sc,
1532 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1533 dsa + sizeof(struct siop_common_xfer) +
1534 Ent_ldsa_reload_dsa);
1535 /* CMD script: MOVE MEMORY addr */
1536 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1537 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1538 siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr +
1539 Ent_script_sched_slot0 + slot * 8);
1540 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1541 /* scheduler slot: JUMP ldsa_select */
1542 siop_script_write(sc,
1543 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1544 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1545 /* handle timeout */
1546 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1547 /* start exire timer */
1548 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1549 if (timeout == 0)
1550 timeout = 1;
1551 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1552 timeout, siop_timeout, siop_cmd);
1553 }
1554 /*
1555 * Change JUMP cmd so that this slot will be handled
1556 */
1557 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1558 0x80080000);
1559 sc->sc_currschedslot = slot;
1560
1561 /* make sure SCRIPT processor will read valid data */
1562 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1563 /* Signal script it has some work to do */
1564 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1565 SIOP_ISTAT, ISTAT_SIGP);
1566 /* and wait for IRQ */
1567 }
1568
1569 void
1570 siop_timeout(void *v)
1571 {
1572 struct siop_cmd *siop_cmd = v;
1573 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1574 int s;
1575
1576 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1577 printf("command timeout, CDB: ");
1578 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1579 printf("\n");
1580
1581 s = splbio();
1582 /* reset the scsi bus */
1583 siop_resetbus(&sc->sc_c);
1584
1585 /* deactivate callout */
1586 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1587 /* mark command as being timed out; siop_intr will handle it */
1588 /*
1589 * mark command has being timed out and just return;
1590 * the bus reset will generate an interrupt,
1591 * it will be handled in siop_intr()
1592 */
1593 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1594 splx(s);
1595 }
1596
1597 void
1598 siop_dump_script(struct siop_softc *sc)
1599 {
1600 int i;
1601
1602 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1603 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1604 siop_script_read(sc, i),
1605 siop_script_read(sc, i + 1));
1606 if ((siop_script_read(sc, i) & 0xe0000000) == 0xc0000000) {
1607 i++;
1608 printf(" 0x%08x", siop_script_read(sc, i + 1));
1609 }
1610 printf("\n");
1611 }
1612 }
1613
1614 void
1615 siop_morecbd(struct siop_softc *sc)
1616 {
1617 int error, off, i, j, s;
1618 bus_dma_segment_t seg;
1619 int rseg;
1620 struct siop_cbd *newcbd;
1621 struct siop_xfer *xfer;
1622 bus_addr_t dsa;
1623 uint32_t *scr;
1624
1625 /* allocate a new list head */
1626 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1627 if (newcbd == NULL) {
1628 aprint_error_dev(sc->sc_c.sc_dev,
1629 "can't allocate memory for command descriptors head\n");
1630 return;
1631 }
1632
1633 /* allocate cmd list */
1634 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1635 M_DEVBUF, M_NOWAIT|M_ZERO);
1636 if (newcbd->cmds == NULL) {
1637 aprint_error_dev(sc->sc_c.sc_dev,
1638 "can't allocate memory for command descriptors\n");
1639 goto bad3;
1640 }
1641 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE,
1642 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1643 if (error) {
1644 aprint_error_dev(sc->sc_c.sc_dev,
1645 "unable to allocate cbd DMA memory, error = %d\n",
1646 error);
1647 goto bad2;
1648 }
1649 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1650 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1651 if (error) {
1652 aprint_error_dev(sc->sc_c.sc_dev,
1653 "unable to map cbd DMA memory, error = %d\n",
1654 error);
1655 goto bad2;
1656 }
1657 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1658 BUS_DMA_NOWAIT, &newcbd->xferdma);
1659 if (error) {
1660 aprint_error_dev(sc->sc_c.sc_dev,
1661 "unable to create cbd DMA map, error = %d\n",
1662 error);
1663 goto bad1;
1664 }
1665 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1666 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1667 if (error) {
1668 aprint_error_dev(sc->sc_c.sc_dev,
1669 "unable to load cbd DMA map, error = %d\n",
1670 error);
1671 goto bad0;
1672 }
1673 #ifdef SIOP_DEBUG
1674 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1675 device_xname(sc->sc_c.sc_dev),
1676 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1677 #endif
1678 off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0;
1679 for (i = 0; i < SIOP_NCMDPB; i++) {
1680 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1681 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1682 &newcbd->cmds[i].cmd_c.dmamap_data);
1683 if (error) {
1684 aprint_error_dev(sc->sc_c.sc_dev,
1685 "unable to create data DMA map for cbd: "
1686 "error %d\n", error);
1687 goto bad0;
1688 }
1689 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1690 sizeof(struct scsipi_generic), 1,
1691 sizeof(struct scsipi_generic), 0,
1692 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1693 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1694 if (error) {
1695 aprint_error_dev(sc->sc_c.sc_dev,
1696 "unable to create cmd DMA map for cbd %d\n", error);
1697 goto bad0;
1698 }
1699 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1700 newcbd->cmds[i].siop_cbdp = newcbd;
1701 xfer = &newcbd->xfers[i];
1702 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1703 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1704 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1705 i * sizeof(struct siop_xfer);
1706 newcbd->cmds[i].cmd_c.dsa = dsa;
1707 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1708 xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1);
1709 xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa);
1710 xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1);
1711 xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c,
1712 dsa + offsetof(struct siop_common_xfer, msg_in));
1713 xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2);
1714 xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c,
1715 dsa + offsetof(struct siop_common_xfer, msg_in) + 1);
1716 xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c,
1717 dsa + offsetof(struct siop_common_xfer, msg_in) + 3);
1718 xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1);
1719 xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c,
1720 dsa + offsetof(struct siop_common_xfer, status) + off);
1721 /* The select/reselect script */
1722 scr = &xfer->resel[0];
1723 for (j = 0; j < __arraycount(load_dsa); j++)
1724 scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]);
1725 /*
1726 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1727 * octet, reg offset is the third.
1728 */
1729 scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c,
1730 0x78100000 | ((dsa & 0x000000ff) << 8));
1731 scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c,
1732 0x78110000 | ( dsa & 0x0000ff00 ));
1733 scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c,
1734 0x78120000 | ((dsa & 0x00ff0000) >> 8));
1735 scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c,
1736 0x78130000 | ((dsa & 0xff000000) >> 16));
1737 scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c,
1738 sc->sc_c.sc_scriptaddr + Ent_reselected);
1739 scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c,
1740 sc->sc_c.sc_scriptaddr + Ent_reselect);
1741 scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c,
1742 sc->sc_c.sc_scriptaddr + Ent_selected);
1743 scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c,
1744 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data);
1745 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1746 scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000);
1747 s = splbio();
1748 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1749 splx(s);
1750 #ifdef SIOP_DEBUG
1751 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1752 siop_ctoh32(&sc->sc_c,
1753 newcbd->cmds[i].cmd_tables->t_msgin.addr),
1754 siop_ctoh32(&sc->sc_c,
1755 newcbd->cmds[i].cmd_tables->t_msgout.addr),
1756 siop_ctoh32(&sc->sc_c,
1757 newcbd->cmds[i].cmd_tables->t_status.addr));
1758 #endif
1759 }
1760 s = splbio();
1761 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1762 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1763 splx(s);
1764 return;
1765 bad0:
1766 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1767 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1768 bad1:
1769 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1770 bad2:
1771 free(newcbd->cmds, M_DEVBUF);
1772 bad3:
1773 free(newcbd, M_DEVBUF);
1774 }
1775
1776 struct siop_lunsw *
1777 siop_get_lunsw(struct siop_softc *sc)
1778 {
1779 struct siop_lunsw *lunsw;
1780 int i;
1781
1782 if (sc->script_free_lo + __arraycount(lun_switch) >= sc->script_free_hi)
1783 return NULL;
1784 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1785 if (lunsw != NULL) {
1786 #ifdef SIOP_DEBUG
1787 printf("siop_get_lunsw got lunsw at offset %d\n",
1788 lunsw->lunsw_off);
1789 #endif
1790 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1791 return lunsw;
1792 }
1793 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1794 if (lunsw == NULL)
1795 return NULL;
1796 #ifdef SIOP_DEBUG
1797 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1798 #endif
1799 if (sc->sc_c.features & SF_CHIP_RAM) {
1800 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1801 sc->script_free_lo * 4, lun_switch,
1802 __arraycount(lun_switch));
1803 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1804 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1805 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1806 } else {
1807 for (i = 0; i < __arraycount(lun_switch); i++)
1808 sc->sc_c.sc_script[sc->script_free_lo + i] =
1809 siop_htoc32(&sc->sc_c, lun_switch[i]);
1810 sc->sc_c.sc_script[
1811 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1812 siop_htoc32(&sc->sc_c,
1813 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1814 }
1815 lunsw->lunsw_off = sc->script_free_lo;
1816 lunsw->lunsw_size = __arraycount(lun_switch);
1817 sc->script_free_lo += lunsw->lunsw_size;
1818 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1819 return lunsw;
1820 }
1821
1822 void
1823 siop_add_reselsw(struct siop_softc *sc, int target)
1824 {
1825 int i, j;
1826 struct siop_target *siop_target;
1827 struct siop_lun *siop_lun;
1828
1829 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1830 /*
1831 * add an entry to resel switch
1832 */
1833 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1834 for (i = 0; i < 15; i++) {
1835 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1836 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1837 == 0xff) { /* it's free */
1838 #ifdef SIOP_DEBUG
1839 printf("siop: target %d slot %d offset %d\n",
1840 target, i, siop_target->reseloff);
1841 #endif
1842 /* JUMP abs_foo, IF target | 0x80; */
1843 siop_script_write(sc, siop_target->reseloff,
1844 0x800c0080 | target);
1845 siop_script_write(sc, siop_target->reseloff + 1,
1846 sc->sc_c.sc_scriptaddr +
1847 siop_target->lunsw->lunsw_off * 4 +
1848 Ent_lun_switch_entry);
1849 break;
1850 }
1851 }
1852 if (i == 15) /* no free slot, shouldn't happen */
1853 panic("siop: resel switch full");
1854
1855 sc->sc_ntargets++;
1856 for (i = 0; i < 8; i++) {
1857 siop_lun = siop_target->siop_lun[i];
1858 if (siop_lun == NULL)
1859 continue;
1860 if (siop_lun->reseloff > 0) {
1861 siop_lun->reseloff = 0;
1862 for (j = 0; j < SIOP_NTAG; j++)
1863 siop_lun->siop_tag[j].reseloff = 0;
1864 siop_add_dev(sc, target, i);
1865 }
1866 }
1867 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1868 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1869 }
1870
1871 void
1872 siop_update_scntl3(struct siop_softc *sc,
1873 struct siop_common_target *_siop_target)
1874 {
1875 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1876
1877 /* MOVE target->id >> 24 TO SCNTL3 */
1878 siop_script_write(sc,
1879 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1880 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1881 /* MOVE target->id >> 8 TO SXFER */
1882 siop_script_write(sc,
1883 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1884 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1885 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1886 }
1887
1888 void
1889 siop_add_dev(struct siop_softc *sc, int target, int lun)
1890 {
1891 struct siop_lunsw *lunsw;
1892 struct siop_target *siop_target =
1893 (struct siop_target *)sc->sc_c.targets[target];
1894 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1895 int i, ntargets;
1896
1897 if (siop_lun->reseloff > 0)
1898 return;
1899 lunsw = siop_target->lunsw;
1900 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1901 /*
1902 * can't extend this slot. Probably not worth trying to deal
1903 * with this case
1904 */
1905 #ifdef SIOP_DEBUG
1906 aprint_error_dev(sc->sc_c.sc_dev,
1907 "%d:%d: can't allocate a lun sw slot\n", target, lun);
1908 #endif
1909 return;
1910 }
1911 /* count how many free targets we still have to probe */
1912 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1913
1914 /*
1915 * we need 8 bytes for the lun sw additional entry, and
1916 * eventually sizeof(tag_switch) for the tag switch entry.
1917 * Keep enough free space for the free targets that could be
1918 * probed later.
1919 */
1920 if (sc->script_free_lo + 2 +
1921 (ntargets * __arraycount(lun_switch)) >=
1922 ((siop_target->target_c.flags & TARF_TAG) ?
1923 sc->script_free_hi - __arraycount(tag_switch) :
1924 sc->script_free_hi)) {
1925 /*
1926 * not enough space, probably not worth dealing with it.
1927 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1928 */
1929 #ifdef SIOP_DEBUG
1930 aprint_error_dev(sc->sc_c.sc_dev,
1931 "%d:%d: not enough memory for a lun sw slot\n",
1932 target, lun);
1933 #endif
1934 return;
1935 }
1936 #ifdef SIOP_DEBUG
1937 printf("%s:%d:%d: allocate lun sw entry\n",
1938 device_xname(sc->sc_c.sc_dev), target, lun);
1939 #endif
1940 /* INT int_resellun */
1941 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1942 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1943 /* Now the slot entry: JUMP abs_foo, IF lun */
1944 siop_script_write(sc, sc->script_free_lo - 2,
1945 0x800c0000 | lun);
1946 siop_script_write(sc, sc->script_free_lo - 1, 0);
1947 siop_lun->reseloff = sc->script_free_lo - 2;
1948 lunsw->lunsw_size += 2;
1949 sc->script_free_lo += 2;
1950 if (siop_target->target_c.flags & TARF_TAG) {
1951 /* we need a tag switch */
1952 sc->script_free_hi -= __arraycount(tag_switch);
1953 if (sc->sc_c.features & SF_CHIP_RAM) {
1954 bus_space_write_region_4(sc->sc_c.sc_ramt,
1955 sc->sc_c.sc_ramh,
1956 sc->script_free_hi * 4, tag_switch,
1957 __arraycount(tag_switch));
1958 } else {
1959 for(i = 0; i < __arraycount(tag_switch); i++) {
1960 sc->sc_c.sc_script[sc->script_free_hi + i] =
1961 siop_htoc32(&sc->sc_c, tag_switch[i]);
1962 }
1963 }
1964 siop_script_write(sc,
1965 siop_lun->reseloff + 1,
1966 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1967 Ent_tag_switch_entry);
1968
1969 for (i = 0; i < SIOP_NTAG; i++) {
1970 siop_lun->siop_tag[i].reseloff =
1971 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1972 }
1973 } else {
1974 /* non-tag case; just work with the lun switch */
1975 siop_lun->siop_tag[0].reseloff =
1976 siop_target->siop_lun[lun]->reseloff;
1977 }
1978 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1979 }
1980
1981 void
1982 siop_del_dev(struct siop_softc *sc, int target, int lun)
1983 {
1984 int i;
1985 struct siop_target *siop_target;
1986
1987 #ifdef SIOP_DEBUG
1988 printf("%s:%d:%d: free lun sw entry\n",
1989 device_xname(sc->sc_c.sc_dev), target, lun);
1990 #endif
1991 if (sc->sc_c.targets[target] == NULL)
1992 return;
1993 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1994 free(siop_target->siop_lun[lun], M_DEVBUF);
1995 siop_target->siop_lun[lun] = NULL;
1996 /* XXX compact sw entry too ? */
1997 /* check if we can free the whole target */
1998 for (i = 0; i < 8; i++) {
1999 if (siop_target->siop_lun[i] != NULL)
2000 return;
2001 }
2002 #ifdef SIOP_DEBUG
2003 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2004 device_xname(sc->sc_c.sc_dev), target, lun,
2005 siop_target->lunsw->lunsw_off);
2006 #endif
2007 /*
2008 * nothing here, free the target struct and resel
2009 * switch entry
2010 */
2011 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2012 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2013 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2014 free(sc->sc_c.targets[target], M_DEVBUF);
2015 sc->sc_c.targets[target] = NULL;
2016 sc->sc_ntargets--;
2017 }
2018
2019 #ifdef SIOP_STATS
2020 void
2021 siop_printstats(void)
2022 {
2023
2024 printf("siop_stat_intr %d\n", siop_stat_intr);
2025 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2026 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2027 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2028 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
2029 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2030 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2031 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2032 }
2033 #endif
2034