siop.c revision 1.95 1 /* $NetBSD: siop.c,v 1.95 2010/04/09 19:25:52 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.95 2010/04/09 19:25:52 jakllsch Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39
40 #include <uvm/uvm_extern.h>
41
42 #include <machine/endian.h>
43 #include <sys/bus.h>
44
45 #include <dev/microcode/siop/siop.out>
46
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsi_message.h>
49 #include <dev/scsipi/scsipi_all.h>
50
51 #include <dev/scsipi/scsiconf.h>
52
53 #include <dev/ic/siopreg.h>
54 #include <dev/ic/siopvar_common.h>
55 #include <dev/ic/siopvar.h>
56
57 #include "opt_siop.h"
58
59 #ifndef DEBUG
60 #undef DEBUG
61 #endif
62 /*
63 #define SIOP_DEBUG
64 #define SIOP_DEBUG_DR
65 #define SIOP_DEBUG_INTR
66 #define SIOP_DEBUG_SCHED
67 #define DUMP_SCRIPT
68 */
69
70 #define SIOP_STATS
71
72 #ifndef SIOP_DEFAULT_TARGET
73 #define SIOP_DEFAULT_TARGET 7
74 #endif
75
76 /* number of cmd descriptors per block */
77 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
78
79 /* Number of scheduler slot (needs to match script) */
80 #define SIOP_NSLOTS 40
81
82 void siop_reset(struct siop_softc *);
83 void siop_handle_reset(struct siop_softc *);
84 int siop_handle_qtag_reject(struct siop_cmd *);
85 void siop_scsicmd_end(struct siop_cmd *);
86 void siop_unqueue(struct siop_softc *, int, int);
87 static void siop_start(struct siop_softc *, struct siop_cmd *);
88 void siop_timeout(void *);
89 int siop_scsicmd(struct scsipi_xfer *);
90 void siop_scsipi_request(struct scsipi_channel *,
91 scsipi_adapter_req_t, void *);
92 void siop_dump_script(struct siop_softc *);
93 void siop_morecbd(struct siop_softc *);
94 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
95 void siop_add_reselsw(struct siop_softc *, int);
96 void siop_update_scntl3(struct siop_softc *,
97 struct siop_common_target *);
98
99 #ifdef SIOP_STATS
100 static int siop_stat_intr = 0;
101 static int siop_stat_intr_shortxfer = 0;
102 static int siop_stat_intr_sdp = 0;
103 static int siop_stat_intr_saveoffset = 0;
104 static int siop_stat_intr_done = 0;
105 static int siop_stat_intr_xferdisc = 0;
106 static int siop_stat_intr_lunresel = 0;
107 static int siop_stat_intr_qfull = 0;
108 void siop_printstats(void);
109 #define INCSTAT(x) x++
110 #else
111 #define INCSTAT(x)
112 #endif
113
114 static inline void siop_script_sync(struct siop_softc *, int);
115 static inline void
116 siop_script_sync(struct siop_softc *sc, int ops)
117 {
118
119 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
120 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
121 PAGE_SIZE, ops);
122 }
123
124 static inline uint32_t siop_script_read(struct siop_softc *, u_int);
125 static inline uint32_t
126 siop_script_read(struct siop_softc *sc, u_int offset)
127 {
128
129 if (sc->sc_c.features & SF_CHIP_RAM) {
130 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
131 offset * 4);
132 } else {
133 return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]);
134 }
135 }
136
137 static inline void siop_script_write(struct siop_softc *, u_int,
138 uint32_t);
139 static inline void
140 siop_script_write(struct siop_softc *sc, u_int offset, uint32_t val)
141 {
142
143 if (sc->sc_c.features & SF_CHIP_RAM) {
144 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
145 offset * 4, val);
146 } else {
147 sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val);
148 }
149 }
150
151 void
152 siop_attach(struct siop_softc *sc)
153 {
154
155 if (siop_common_attach(&sc->sc_c) != 0)
156 return;
157
158 TAILQ_INIT(&sc->free_list);
159 TAILQ_INIT(&sc->cmds);
160 TAILQ_INIT(&sc->lunsw_list);
161 sc->sc_currschedslot = 0;
162 #ifdef SIOP_DEBUG
163 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
164 device_xname(sc->sc_c.sc_dev), (int)sizeof(siop_script),
165 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
166 #endif
167
168 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
169 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
170
171 /* Do a bus reset, so that devices fall back to narrow/async */
172 siop_resetbus(&sc->sc_c);
173 /*
174 * siop_reset() will reset the chip, thus clearing pending interrupts
175 */
176 siop_reset(sc);
177 #ifdef DUMP_SCRIPT
178 siop_dump_script(sc);
179 #endif
180
181 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
182 }
183
184 void
185 siop_reset(struct siop_softc *sc)
186 {
187 int i, j;
188 struct siop_lunsw *lunsw;
189
190 siop_common_reset(&sc->sc_c);
191
192 /* copy and patch the script */
193 if (sc->sc_c.features & SF_CHIP_RAM) {
194 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
195 siop_script, __arraycount(siop_script));
196 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
197 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
198 E_abs_msgin_Used[j] * 4,
199 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
200 }
201 if (sc->sc_c.features & SF_CHIP_LED0) {
202 bus_space_write_region_4(sc->sc_c.sc_ramt,
203 sc->sc_c.sc_ramh,
204 Ent_led_on1, siop_led_on,
205 __arraycount(siop_led_on));
206 bus_space_write_region_4(sc->sc_c.sc_ramt,
207 sc->sc_c.sc_ramh,
208 Ent_led_on2, siop_led_on,
209 __arraycount(siop_led_on));
210 bus_space_write_region_4(sc->sc_c.sc_ramt,
211 sc->sc_c.sc_ramh,
212 Ent_led_off, siop_led_off,
213 __arraycount(siop_led_off));
214 }
215 } else {
216 for (j = 0; j < __arraycount(siop_script); j++) {
217 sc->sc_c.sc_script[j] =
218 siop_htoc32(&sc->sc_c, siop_script[j]);
219 }
220 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
221 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
222 siop_htoc32(&sc->sc_c,
223 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
224 }
225 if (sc->sc_c.features & SF_CHIP_LED0) {
226 for (j = 0; j < __arraycount(siop_led_on); j++)
227 sc->sc_c.sc_script[
228 Ent_led_on1 / sizeof(siop_led_on[0]) + j
229 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
230 for (j = 0; j < __arraycount(siop_led_on); j++)
231 sc->sc_c.sc_script[
232 Ent_led_on2 / sizeof(siop_led_on[0]) + j
233 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
234 for (j = 0; j < __arraycount(siop_led_off); j++)
235 sc->sc_c.sc_script[
236 Ent_led_off / sizeof(siop_led_off[0]) + j
237 ] = siop_htoc32(&sc->sc_c, siop_led_off[j]);
238 }
239 }
240 sc->script_free_lo = __arraycount(siop_script);
241 sc->script_free_hi = sc->sc_c.ram_size / 4;
242 sc->sc_ntargets = 0;
243
244 /* free used and unused lun switches */
245 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
246 #ifdef SIOP_DEBUG
247 printf("%s: free lunsw at offset %d\n",
248 device_xname(sc->sc_c.sc_dev), lunsw->lunsw_off);
249 #endif
250 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
251 free(lunsw, M_DEVBUF);
252 }
253 TAILQ_INIT(&sc->lunsw_list);
254 /* restore reselect switch */
255 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
256 struct siop_target *target;
257 if (sc->sc_c.targets[i] == NULL)
258 continue;
259 #ifdef SIOP_DEBUG
260 printf("%s: restore sw for target %d\n",
261 device_xname(sc->sc_c.sc_dev), i);
262 #endif
263 target = (struct siop_target *)sc->sc_c.targets[i];
264 free(target->lunsw, M_DEVBUF);
265 target->lunsw = siop_get_lunsw(sc);
266 if (target->lunsw == NULL) {
267 aprint_error_dev(sc->sc_c.sc_dev,
268 "can't alloc lunsw for target %d\n", i);
269 break;
270 }
271 siop_add_reselsw(sc, i);
272 }
273
274 /* start script */
275 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
276 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
277 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
278 }
279 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
280 sc->sc_c.sc_scriptaddr + Ent_reselect);
281 }
282
283 #if 0
284 #define CALL_SCRIPT(ent) do { \
285 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
286 siop_cmd->cmd_c.dsa, \
287 sc->sc_c.sc_scriptaddr + ent); \
288 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
289 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
290 } while (/* CONSTCOND */0)
291 #else
292 #define CALL_SCRIPT(ent) do { \
293 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
294 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
295 } while (/* CONSTCOND */0)
296 #endif
297
298 int
299 siop_intr(void *v)
300 {
301 struct siop_softc *sc = v;
302 struct siop_target *siop_target;
303 struct siop_cmd *siop_cmd;
304 struct siop_lun *siop_lun;
305 struct scsipi_xfer *xs;
306 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
307 uint32_t irqcode;
308 int need_reset = 0;
309 int offset, target, lun, tag;
310 bus_addr_t dsa;
311 struct siop_cbd *cbdp;
312 int freetarget = 0;
313 int restart = 0;
314
315 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
316 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
317 return 0;
318 INCSTAT(siop_stat_intr);
319 if (istat & ISTAT_INTF) {
320 printf("INTRF\n");
321 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
322 SIOP_ISTAT, ISTAT_INTF);
323 }
324 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
325 (ISTAT_DIP | ISTAT_ABRT)) {
326 /* clear abort */
327 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
328 SIOP_ISTAT, 0);
329 }
330 /* use DSA to find the current siop_cmd */
331 siop_cmd = NULL;
332 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
333 TAILQ_FOREACH(cbdp, &sc->cmds, next) {
334 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
335 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
336 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
337 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
338 siop_table_sync(siop_cmd,
339 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
340 break;
341 }
342 }
343 if (siop_cmd) {
344 xs = siop_cmd->cmd_c.xs;
345 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
346 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
347 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
348 tag = siop_cmd->cmd_c.tag;
349 siop_lun = siop_target->siop_lun[lun];
350 #ifdef DIAGNOSTIC
351 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
352 printf("siop_cmd (lun %d) for DSA 0x%x "
353 "not active (%d)\n", lun, (u_int)dsa,
354 siop_cmd->cmd_c.status);
355 xs = NULL;
356 siop_target = NULL;
357 target = -1;
358 lun = -1;
359 tag = -1;
360 siop_lun = NULL;
361 siop_cmd = NULL;
362 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
363 printf("siop_cmd (lun %d tag %d) not in siop_lun "
364 "active (%p != %p)\n", lun, tag, siop_cmd,
365 siop_lun->siop_tag[tag].active);
366 }
367 #endif
368 } else {
369 xs = NULL;
370 siop_target = NULL;
371 target = -1;
372 lun = -1;
373 tag = -1;
374 siop_lun = NULL;
375 }
376 if (istat & ISTAT_DIP) {
377 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
378 SIOP_DSTAT);
379 if (dstat & DSTAT_ABRT) {
380 /* was probably generated by a bus reset IOCTL */
381 if ((dstat & DSTAT_DFE) == 0)
382 siop_clearfifo(&sc->sc_c);
383 goto reset;
384 }
385 if (dstat & DSTAT_SSI) {
386 printf("single step dsp 0x%08x dsa 0x08%x\n",
387 (int)(bus_space_read_4(sc->sc_c.sc_rt,
388 sc->sc_c.sc_rh, SIOP_DSP) -
389 sc->sc_c.sc_scriptaddr),
390 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
391 SIOP_DSA));
392 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
393 (istat & ISTAT_SIP) == 0) {
394 bus_space_write_1(sc->sc_c.sc_rt,
395 sc->sc_c.sc_rh, SIOP_DCNTL,
396 bus_space_read_1(sc->sc_c.sc_rt,
397 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
398 }
399 return 1;
400 }
401
402 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
403 printf("DMA IRQ:");
404 if (dstat & DSTAT_IID)
405 printf(" Illegal instruction");
406 if (dstat & DSTAT_BF)
407 printf(" bus fault");
408 if (dstat & DSTAT_MDPE)
409 printf(" parity");
410 if (dstat & DSTAT_DFE)
411 printf(" DMA fifo empty");
412 else
413 siop_clearfifo(&sc->sc_c);
414 printf(", DSP=0x%x DSA=0x%x: ",
415 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
416 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
417 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
418 if (siop_cmd)
419 printf("last msg_in=0x%x status=0x%x\n",
420 siop_cmd->cmd_tables->msg_in[0],
421 siop_ctoh32(&sc->sc_c,
422 siop_cmd->cmd_tables->status));
423 else
424 aprint_error_dev(sc->sc_c.sc_dev,
425 "current DSA invalid\n");
426 need_reset = 1;
427 }
428 }
429 if (istat & ISTAT_SIP) {
430 if (istat & ISTAT_DIP)
431 delay(10);
432 /*
433 * Can't read sist0 & sist1 independently, or we have to
434 * insert delay
435 */
436 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
437 SIOP_SIST0);
438 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
439 SIOP_SSTAT1);
440 #ifdef SIOP_DEBUG_INTR
441 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
442 "DSA=0x%x DSP=0x%lx\n", sist,
443 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 SIOP_SSTAT1),
445 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
446 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
447 SIOP_DSP) -
448 sc->sc_c.sc_scriptaddr));
449 #endif
450 if (sist & SIST0_RST) {
451 siop_handle_reset(sc);
452 /* no table to flush here */
453 return 1;
454 }
455 if (sist & SIST0_SGE) {
456 if (siop_cmd)
457 scsipi_printaddr(xs->xs_periph);
458 else
459 printf("%s:", device_xname(sc->sc_c.sc_dev));
460 printf("scsi gross error\n");
461 goto reset;
462 }
463 if ((sist & SIST0_MA) && need_reset == 0) {
464 if (siop_cmd) {
465 int scratcha0;
466 dstat = bus_space_read_1(sc->sc_c.sc_rt,
467 sc->sc_c.sc_rh, SIOP_DSTAT);
468 /*
469 * first restore DSA, in case we were in a S/G
470 * operation.
471 */
472 bus_space_write_4(sc->sc_c.sc_rt,
473 sc->sc_c.sc_rh,
474 SIOP_DSA, siop_cmd->cmd_c.dsa);
475 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
476 sc->sc_c.sc_rh, SIOP_SCRATCHA);
477 switch (sstat1 & SSTAT1_PHASE_MASK) {
478 case SSTAT1_PHASE_STATUS:
479 /*
480 * previous phase may be aborted for any reason
481 * ( for example, the target has less data to
482 * transfer than requested). Compute resid and
483 * just go to status, the command should
484 * terminate.
485 */
486 INCSTAT(siop_stat_intr_shortxfer);
487 if (scratcha0 & A_flag_data)
488 siop_ma(&siop_cmd->cmd_c);
489 else if ((dstat & DSTAT_DFE) == 0)
490 siop_clearfifo(&sc->sc_c);
491 CALL_SCRIPT(Ent_status);
492 return 1;
493 case SSTAT1_PHASE_MSGIN:
494 /*
495 * target may be ready to disconnect
496 * Compute resid which would be used later
497 * if a save data pointer is needed.
498 */
499 INCSTAT(siop_stat_intr_xferdisc);
500 if (scratcha0 & A_flag_data)
501 siop_ma(&siop_cmd->cmd_c);
502 else if ((dstat & DSTAT_DFE) == 0)
503 siop_clearfifo(&sc->sc_c);
504 bus_space_write_1(sc->sc_c.sc_rt,
505 sc->sc_c.sc_rh, SIOP_SCRATCHA,
506 scratcha0 & ~A_flag_data);
507 CALL_SCRIPT(Ent_msgin);
508 return 1;
509 }
510 aprint_error_dev(sc->sc_c.sc_dev,
511 "unexpected phase mismatch %d\n",
512 sstat1 & SSTAT1_PHASE_MASK);
513 } else {
514 aprint_error_dev(sc->sc_c.sc_dev,
515 "phase mismatch without command\n");
516 }
517 need_reset = 1;
518 }
519 if (sist & SIST0_PAR) {
520 /* parity error, reset */
521 if (siop_cmd)
522 scsipi_printaddr(xs->xs_periph);
523 else
524 printf("%s:", device_xname(sc->sc_c.sc_dev));
525 printf("parity error\n");
526 goto reset;
527 }
528 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
529 /* selection time out, assume there's no device here */
530 if (siop_cmd) {
531 siop_cmd->cmd_c.status = CMDST_DONE;
532 xs->error = XS_SELTIMEOUT;
533 freetarget = 1;
534 goto end;
535 } else {
536 aprint_error_dev(sc->sc_c.sc_dev,
537 "selection timeout without "
538 "command\n");
539 need_reset = 1;
540 }
541 }
542 if (sist & SIST0_UDC) {
543 /*
544 * unexpected disconnect. Usually the target signals
545 * a fatal condition this way. Attempt to get sense.
546 */
547 if (siop_cmd) {
548 siop_cmd->cmd_tables->status =
549 siop_htoc32(&sc->sc_c, SCSI_CHECK);
550 goto end;
551 }
552 aprint_error_dev(sc->sc_c.sc_dev,
553 "unexpected disconnect without "
554 "command\n");
555 goto reset;
556 }
557 if (sist & (SIST1_SBMC << 8)) {
558 /* SCSI bus mode change */
559 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
560 goto reset;
561 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
562 /*
563 * we have a script interrupt, it will
564 * restart the script.
565 */
566 goto scintr;
567 }
568 /*
569 * else we have to restart it ourselve, at the
570 * interrupted instruction.
571 */
572 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
573 SIOP_DSP,
574 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
575 SIOP_DSP) - 8);
576 return 1;
577 }
578 /* Else it's an unhandled exception (for now). */
579 aprint_error_dev(sc->sc_c.sc_dev,
580 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
581 "DSA=0x%x DSP=0x%x\n", sist,
582 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
583 SIOP_SSTAT1),
584 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
585 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
587 if (siop_cmd) {
588 siop_cmd->cmd_c.status = CMDST_DONE;
589 xs->error = XS_SELTIMEOUT;
590 goto end;
591 }
592 need_reset = 1;
593 }
594 if (need_reset) {
595 reset:
596 /* fatal error, reset the bus */
597 siop_resetbus(&sc->sc_c);
598 /* no table to flush here */
599 return 1;
600 }
601
602 scintr:
603 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
604 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
605 SIOP_DSPS);
606 #ifdef SIOP_DEBUG_INTR
607 printf("script interrupt 0x%x\n", irqcode);
608 #endif
609 /*
610 * no command, or an inactive command is only valid for a
611 * reselect interrupt
612 */
613 if ((irqcode & 0x80) == 0) {
614 if (siop_cmd == NULL) {
615 aprint_error_dev(sc->sc_c.sc_dev,
616 "script interrupt (0x%x) with "
617 "invalid DSA !!!\n",
618 irqcode);
619 goto reset;
620 }
621 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
622 aprint_error_dev(sc->sc_c.sc_dev,
623 "command with invalid status "
624 "(IRQ code 0x%x current status %d) !\n",
625 irqcode, siop_cmd->cmd_c.status);
626 xs = NULL;
627 }
628 }
629 switch(irqcode) {
630 case A_int_err:
631 printf("error, DSP=0x%x\n",
632 (int)(bus_space_read_4(sc->sc_c.sc_rt,
633 sc->sc_c.sc_rh, SIOP_DSP) -
634 sc->sc_c.sc_scriptaddr));
635 if (xs) {
636 xs->error = XS_SELTIMEOUT;
637 goto end;
638 } else {
639 goto reset;
640 }
641 case A_int_reseltarg:
642 aprint_error_dev(sc->sc_c.sc_dev,
643 "reselect with invalid target\n");
644 goto reset;
645 case A_int_resellun:
646 INCSTAT(siop_stat_intr_lunresel);
647 target = bus_space_read_1(sc->sc_c.sc_rt,
648 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
649 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
650 SIOP_SCRATCHA + 1);
651 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
652 SIOP_SCRATCHA + 2);
653 siop_target =
654 (struct siop_target *)sc->sc_c.targets[target];
655 if (siop_target == NULL) {
656 printf("%s: reselect with invalid target %d\n",
657 device_xname(sc->sc_c.sc_dev), target);
658 goto reset;
659 }
660 siop_lun = siop_target->siop_lun[lun];
661 if (siop_lun == NULL) {
662 printf("%s: target %d reselect with invalid "
663 "lun %d\n", device_xname(sc->sc_c.sc_dev),
664 target, lun);
665 goto reset;
666 }
667 if (siop_lun->siop_tag[tag].active == NULL) {
668 printf("%s: target %d lun %d tag %d reselect "
669 "without command\n",
670 device_xname(sc->sc_c.sc_dev),
671 target, lun, tag);
672 goto reset;
673 }
674 siop_cmd = siop_lun->siop_tag[tag].active;
675 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
676 SIOP_DSP, siop_cmd->cmd_c.dsa +
677 sizeof(struct siop_common_xfer) +
678 Ent_ldsa_reload_dsa);
679 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
680 return 1;
681 case A_int_reseltag:
682 printf("%s: reselect with invalid tag\n",
683 device_xname(sc->sc_c.sc_dev));
684 goto reset;
685 case A_int_msgin:
686 {
687 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
688 sc->sc_c.sc_rh, SIOP_SFBR);
689
690 if (msgin == MSG_MESSAGE_REJECT) {
691 int msg, extmsg;
692 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
693 /*
694 * message was part of a identify +
695 * something else. Identify shouldn't
696 * have been rejected.
697 */
698 msg =
699 siop_cmd->cmd_tables->msg_out[1];
700 extmsg =
701 siop_cmd->cmd_tables->msg_out[3];
702 } else {
703 msg = siop_cmd->cmd_tables->msg_out[0];
704 extmsg =
705 siop_cmd->cmd_tables->msg_out[2];
706 }
707 if (msg == MSG_MESSAGE_REJECT) {
708 /* MSG_REJECT for a MSG_REJECT !*/
709 if (xs)
710 scsipi_printaddr(xs->xs_periph);
711 else
712 printf("%s: ", device_xname(
713 sc->sc_c.sc_dev));
714 printf("our reject message was "
715 "rejected\n");
716 goto reset;
717 }
718 if (msg == MSG_EXTENDED &&
719 extmsg == MSG_EXT_WDTR) {
720 /* WDTR rejected, initiate sync */
721 if ((siop_target->target_c.flags &
722 TARF_SYNC) == 0) {
723 siop_target->target_c.status =
724 TARST_OK;
725 siop_update_xfer_mode(&sc->sc_c,
726 target);
727 /* no table to flush here */
728 CALL_SCRIPT(Ent_msgin_ack);
729 return 1;
730 }
731 siop_target->target_c.status =
732 TARST_SYNC_NEG;
733 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
734 sc->sc_c.st_minsync,
735 sc->sc_c.maxoff);
736 siop_table_sync(siop_cmd,
737 BUS_DMASYNC_PREREAD |
738 BUS_DMASYNC_PREWRITE);
739 CALL_SCRIPT(Ent_send_msgout);
740 return 1;
741 } else if (msg == MSG_EXTENDED &&
742 extmsg == MSG_EXT_SDTR) {
743 /* sync rejected */
744 siop_target->target_c.offset = 0;
745 siop_target->target_c.period = 0;
746 siop_target->target_c.status = TARST_OK;
747 siop_update_xfer_mode(&sc->sc_c,
748 target);
749 /* no table to flush here */
750 CALL_SCRIPT(Ent_msgin_ack);
751 return 1;
752 } else if (msg == MSG_SIMPLE_Q_TAG ||
753 msg == MSG_HEAD_OF_Q_TAG ||
754 msg == MSG_ORDERED_Q_TAG) {
755 if (siop_handle_qtag_reject(
756 siop_cmd) == -1)
757 goto reset;
758 CALL_SCRIPT(Ent_msgin_ack);
759 return 1;
760 }
761 if (xs)
762 scsipi_printaddr(xs->xs_periph);
763 else
764 printf("%s: ",
765 device_xname(sc->sc_c.sc_dev));
766 if (msg == MSG_EXTENDED) {
767 printf("scsi message reject, extended "
768 "message sent was 0x%x\n", extmsg);
769 } else {
770 printf("scsi message reject, message "
771 "sent was 0x%x\n", msg);
772 }
773 /* no table to flush here */
774 CALL_SCRIPT(Ent_msgin_ack);
775 return 1;
776 }
777 if (msgin == MSG_IGN_WIDE_RESIDUE) {
778 /* use the extmsgdata table to get the second byte */
779 siop_cmd->cmd_tables->t_extmsgdata.count =
780 siop_htoc32(&sc->sc_c, 1);
781 siop_table_sync(siop_cmd,
782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
783 CALL_SCRIPT(Ent_get_extmsgdata);
784 return 1;
785 }
786 if (xs)
787 scsipi_printaddr(xs->xs_periph);
788 else
789 printf("%s: ", device_xname(sc->sc_c.sc_dev));
790 printf("unhandled message 0x%x\n",
791 siop_cmd->cmd_tables->msg_in[0]);
792 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
793 siop_cmd->cmd_tables->t_msgout.count =
794 siop_htoc32(&sc->sc_c, 1);
795 siop_table_sync(siop_cmd,
796 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
797 CALL_SCRIPT(Ent_send_msgout);
798 return 1;
799 }
800 case A_int_extmsgin:
801 #ifdef SIOP_DEBUG_INTR
802 printf("extended message: msg 0x%x len %d\n",
803 siop_cmd->cmd_tables->msg_in[2],
804 siop_cmd->cmd_tables->msg_in[1]);
805 #endif
806 if (siop_cmd->cmd_tables->msg_in[1] >
807 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
808 aprint_error_dev(sc->sc_c.sc_dev,
809 "extended message too big (%d)\n",
810 siop_cmd->cmd_tables->msg_in[1]);
811 siop_cmd->cmd_tables->t_extmsgdata.count =
812 siop_htoc32(&sc->sc_c,
813 siop_cmd->cmd_tables->msg_in[1] - 1);
814 siop_table_sync(siop_cmd,
815 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
816 CALL_SCRIPT(Ent_get_extmsgdata);
817 return 1;
818 case A_int_extmsgdata:
819 #ifdef SIOP_DEBUG_INTR
820 {
821 int i;
822 printf("extended message: 0x%x, data:",
823 siop_cmd->cmd_tables->msg_in[2]);
824 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
825 i++)
826 printf(" 0x%x",
827 siop_cmd->cmd_tables->msg_in[i]);
828 printf("\n");
829 }
830 #endif
831 if (siop_cmd->cmd_tables->msg_in[0] ==
832 MSG_IGN_WIDE_RESIDUE) {
833 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
834 if (siop_cmd->cmd_tables->msg_in[3] != 1)
835 printf("MSG_IGN_WIDE_RESIDUE: "
836 "bad len %d\n",
837 siop_cmd->cmd_tables->msg_in[3]);
838 switch (siop_iwr(&siop_cmd->cmd_c)) {
839 case SIOP_NEG_MSGOUT:
840 siop_table_sync(siop_cmd,
841 BUS_DMASYNC_PREREAD |
842 BUS_DMASYNC_PREWRITE);
843 CALL_SCRIPT(Ent_send_msgout);
844 return(1);
845 case SIOP_NEG_ACK:
846 CALL_SCRIPT(Ent_msgin_ack);
847 return(1);
848 default:
849 panic("invalid retval from "
850 "siop_iwr()");
851 }
852 return(1);
853 }
854 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
855 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
856 case SIOP_NEG_MSGOUT:
857 siop_update_scntl3(sc,
858 siop_cmd->cmd_c.siop_target);
859 siop_table_sync(siop_cmd,
860 BUS_DMASYNC_PREREAD |
861 BUS_DMASYNC_PREWRITE);
862 CALL_SCRIPT(Ent_send_msgout);
863 return(1);
864 case SIOP_NEG_ACK:
865 siop_update_scntl3(sc,
866 siop_cmd->cmd_c.siop_target);
867 CALL_SCRIPT(Ent_msgin_ack);
868 return(1);
869 default:
870 panic("invalid retval from "
871 "siop_wdtr_neg()");
872 }
873 return(1);
874 }
875 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
876 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
877 case SIOP_NEG_MSGOUT:
878 siop_update_scntl3(sc,
879 siop_cmd->cmd_c.siop_target);
880 siop_table_sync(siop_cmd,
881 BUS_DMASYNC_PREREAD |
882 BUS_DMASYNC_PREWRITE);
883 CALL_SCRIPT(Ent_send_msgout);
884 return(1);
885 case SIOP_NEG_ACK:
886 siop_update_scntl3(sc,
887 siop_cmd->cmd_c.siop_target);
888 CALL_SCRIPT(Ent_msgin_ack);
889 return(1);
890 default:
891 panic("invalid retval from "
892 "siop_wdtr_neg()");
893 }
894 return(1);
895 }
896 /* send a message reject */
897 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
898 siop_cmd->cmd_tables->t_msgout.count =
899 siop_htoc32(&sc->sc_c, 1);
900 siop_table_sync(siop_cmd,
901 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
902 CALL_SCRIPT(Ent_send_msgout);
903 return 1;
904 case A_int_disc:
905 INCSTAT(siop_stat_intr_sdp);
906 offset = bus_space_read_1(sc->sc_c.sc_rt,
907 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
908 #ifdef SIOP_DEBUG_DR
909 printf("disconnect offset %d\n", offset);
910 #endif
911 siop_sdp(&siop_cmd->cmd_c, offset);
912 /* we start again with no offset */
913 siop_cmd->saved_offset = SIOP_NOOFFSET;
914 siop_table_sync(siop_cmd,
915 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
916 CALL_SCRIPT(Ent_script_sched);
917 return 1;
918 case A_int_saveoffset:
919 INCSTAT(siop_stat_intr_saveoffset);
920 offset = bus_space_read_1(sc->sc_c.sc_rt,
921 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
922 #ifdef SIOP_DEBUG_DR
923 printf("saveoffset offset %d\n", offset);
924 #endif
925 siop_cmd->saved_offset = offset;
926 CALL_SCRIPT(Ent_script_sched);
927 return 1;
928 case A_int_resfail:
929 printf("reselect failed\n");
930 CALL_SCRIPT(Ent_script_sched);
931 return 1;
932 case A_int_done:
933 if (xs == NULL) {
934 printf("%s: done without command, DSA=0x%lx\n",
935 device_xname(sc->sc_c.sc_dev),
936 (u_long)siop_cmd->cmd_c.dsa);
937 siop_cmd->cmd_c.status = CMDST_FREE;
938 CALL_SCRIPT(Ent_script_sched);
939 return 1;
940 }
941 #ifdef SIOP_DEBUG_INTR
942 printf("done, DSA=0x%lx target id 0x%x last msg "
943 "in=0x%x status=0x%x\n",
944 (u_long)siop_cmd->cmd_c.dsa,
945 siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id),
946 siop_cmd->cmd_tables->msg_in[0],
947 siop_ctoh32(&sc->sc_c,
948 siop_cmd->cmd_tables->status));
949 #endif
950 INCSTAT(siop_stat_intr_done);
951 /* update resid. */
952 offset = bus_space_read_1(sc->sc_c.sc_rt,
953 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
954 /*
955 * if we got a disconnect between the last data phase
956 * and the status phase, offset will be 0. In this
957 * case, siop_cmd->saved_offset will have the proper
958 * value if it got updated by the controller
959 */
960 if (offset == 0 &&
961 siop_cmd->saved_offset != SIOP_NOOFFSET)
962 offset = siop_cmd->saved_offset;
963 siop_update_resid(&siop_cmd->cmd_c, offset);
964 siop_cmd->cmd_c.status = CMDST_DONE;
965 goto end;
966 default:
967 printf("unknown irqcode %x\n", irqcode);
968 if (xs) {
969 xs->error = XS_SELTIMEOUT;
970 goto end;
971 }
972 goto reset;
973 }
974 return 1;
975 }
976 /* We just should't get there */
977 panic("siop_intr: I shouldn't be there !");
978
979 end:
980 /*
981 * restart the script now if command completed properly
982 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
983 * queue
984 */
985 xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status);
986 if (xs->status == SCSI_OK)
987 CALL_SCRIPT(Ent_script_sched);
988 else
989 restart = 1;
990 siop_lun->siop_tag[tag].active = NULL;
991 siop_scsicmd_end(siop_cmd);
992 if (freetarget && siop_target->target_c.status == TARST_PROBING)
993 siop_del_dev(sc, target, lun);
994 if (restart)
995 CALL_SCRIPT(Ent_script_sched);
996 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
997 /* a command terminated, so we have free slots now */
998 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
999 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1000 }
1001
1002 return 1;
1003 }
1004
1005 void
1006 siop_scsicmd_end(struct siop_cmd *siop_cmd)
1007 {
1008 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1009 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1010
1011 switch(xs->status) {
1012 case SCSI_OK:
1013 xs->error = XS_NOERROR;
1014 break;
1015 case SCSI_BUSY:
1016 xs->error = XS_BUSY;
1017 break;
1018 case SCSI_CHECK:
1019 xs->error = XS_BUSY;
1020 /* remove commands in the queue and scheduler */
1021 siop_unqueue(sc, xs->xs_periph->periph_target,
1022 xs->xs_periph->periph_lun);
1023 break;
1024 case SCSI_QUEUE_FULL:
1025 INCSTAT(siop_stat_intr_qfull);
1026 #ifdef SIOP_DEBUG
1027 printf("%s:%d:%d: queue full (tag %d)\n",
1028 device_xname(sc->sc_c.sc_dev),
1029 xs->xs_periph->periph_target,
1030 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1031 #endif
1032 xs->error = XS_BUSY;
1033 break;
1034 case SCSI_SIOP_NOCHECK:
1035 /*
1036 * don't check status, xs->error is already valid
1037 */
1038 break;
1039 case SCSI_SIOP_NOSTATUS:
1040 /*
1041 * the status byte was not updated, cmd was
1042 * aborted
1043 */
1044 xs->error = XS_SELTIMEOUT;
1045 break;
1046 default:
1047 scsipi_printaddr(xs->xs_periph);
1048 printf("invalid status code %d\n", xs->status);
1049 xs->error = XS_DRIVER_STUFFUP;
1050 }
1051 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1052 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data,
1053 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1054 (xs->xs_control & XS_CTL_DATA_IN) ?
1055 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1056 bus_dmamap_unload(sc->sc_c.sc_dmat,
1057 siop_cmd->cmd_c.dmamap_data);
1058 }
1059 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1060 if ((xs->xs_control & XS_CTL_POLL) == 0)
1061 callout_stop(&xs->xs_callout);
1062 siop_cmd->cmd_c.status = CMDST_FREE;
1063 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1064 #if 0
1065 if (xs->resid != 0)
1066 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1067 #endif
1068 scsipi_done(xs);
1069 }
1070
1071 void
1072 siop_unqueue(struct siop_softc *sc, int target, int lun)
1073 {
1074 int slot, tag;
1075 struct siop_cmd *siop_cmd;
1076 struct siop_lun *siop_lun =
1077 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1078
1079 /* first make sure to read valid data */
1080 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1081
1082 for (tag = 1; tag < SIOP_NTAG; tag++) {
1083 /* look for commands in the scheduler, not yet started */
1084 if (siop_lun->siop_tag[tag].active == NULL)
1085 continue;
1086 siop_cmd = siop_lun->siop_tag[tag].active;
1087 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1088 if (siop_script_read(sc,
1089 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1090 siop_cmd->cmd_c.dsa +
1091 sizeof(struct siop_common_xfer) +
1092 Ent_ldsa_select)
1093 break;
1094 }
1095 if (slot > sc->sc_currschedslot)
1096 continue; /* didn't find it */
1097 if (siop_script_read(sc,
1098 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1099 continue; /* already started */
1100 /* clear the slot */
1101 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1102 0x80000000);
1103 /* ask to requeue */
1104 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1105 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1106 siop_lun->siop_tag[tag].active = NULL;
1107 siop_scsicmd_end(siop_cmd);
1108 }
1109 /* update sc_currschedslot */
1110 sc->sc_currschedslot = 0;
1111 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1112 if (siop_script_read(sc,
1113 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1114 sc->sc_currschedslot = slot;
1115 }
1116 }
1117
1118 /*
1119 * handle a rejected queue tag message: the command will run untagged,
1120 * has to adjust the reselect script.
1121 */
1122 int
1123 siop_handle_qtag_reject(struct siop_cmd *siop_cmd)
1124 {
1125 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1126 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1127 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1128 int tag = siop_cmd->cmd_tables->msg_out[2];
1129 struct siop_lun *siop_lun =
1130 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1131
1132 #ifdef SIOP_DEBUG
1133 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1134 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1135 siop_cmd->cmd_c.tag,
1136 siop_cmd->cmd_c.status);
1137 #endif
1138
1139 if (siop_lun->siop_tag[0].active != NULL) {
1140 printf("%s: untagged command already running for target %d "
1141 "lun %d (status %d)\n", device_xname(sc->sc_c.sc_dev),
1142 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1143 return -1;
1144 }
1145 /* clear tag slot */
1146 siop_lun->siop_tag[tag].active = NULL;
1147 /* add command to non-tagged slot */
1148 siop_lun->siop_tag[0].active = siop_cmd;
1149 siop_cmd->cmd_c.tag = 0;
1150 /* adjust reselect script if there is one */
1151 if (siop_lun->siop_tag[0].reseloff > 0) {
1152 siop_script_write(sc,
1153 siop_lun->siop_tag[0].reseloff + 1,
1154 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1155 Ent_ldsa_reload_dsa);
1156 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1157 }
1158 return 0;
1159 }
1160
1161 /*
1162 * handle a bus reset: reset chip, unqueue all active commands, free all
1163 * target struct and report lossage to upper layer.
1164 * As the upper layer may requeue immediatly we have to first store
1165 * all active commands in a temporary queue.
1166 */
1167 void
1168 siop_handle_reset(struct siop_softc *sc)
1169 {
1170 struct siop_cmd *siop_cmd;
1171 struct siop_lun *siop_lun;
1172 int target, lun, tag;
1173
1174 /*
1175 * scsi bus reset. reset the chip and restart
1176 * the queue. Need to clean up all active commands
1177 */
1178 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1179 /* stop, reset and restart the chip */
1180 siop_reset(sc);
1181 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1182 /* chip has been reset, all slots are free now */
1183 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1184 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1185 }
1186 /*
1187 * Process all commands: first commands being executed
1188 */
1189 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1190 target++) {
1191 if (sc->sc_c.targets[target] == NULL)
1192 continue;
1193 for (lun = 0; lun < 8; lun++) {
1194 struct siop_target *siop_target =
1195 (struct siop_target *)sc->sc_c.targets[target];
1196 siop_lun = siop_target->siop_lun[lun];
1197 if (siop_lun == NULL)
1198 continue;
1199 for (tag = 0; tag <
1200 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1201 SIOP_NTAG : 1);
1202 tag++) {
1203 siop_cmd = siop_lun->siop_tag[tag].active;
1204 if (siop_cmd == NULL)
1205 continue;
1206 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1207 printf("command with tag id %d reset\n", tag);
1208 siop_cmd->cmd_c.xs->error =
1209 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1210 XS_TIMEOUT : XS_RESET;
1211 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1212 siop_lun->siop_tag[tag].active = NULL;
1213 siop_cmd->cmd_c.status = CMDST_DONE;
1214 siop_scsicmd_end(siop_cmd);
1215 }
1216 }
1217 sc->sc_c.targets[target]->status = TARST_ASYNC;
1218 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1219 sc->sc_c.targets[target]->period =
1220 sc->sc_c.targets[target]->offset = 0;
1221 siop_update_xfer_mode(&sc->sc_c, target);
1222 }
1223
1224 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1225 }
1226
1227 void
1228 siop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1229 void *arg)
1230 {
1231 struct scsipi_xfer *xs;
1232 struct scsipi_periph *periph;
1233 struct siop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1234 struct siop_cmd *siop_cmd;
1235 struct siop_target *siop_target;
1236 int s, error, i;
1237 int target;
1238 int lun;
1239
1240 switch (req) {
1241 case ADAPTER_REQ_RUN_XFER:
1242 xs = arg;
1243 periph = xs->xs_periph;
1244 target = periph->periph_target;
1245 lun = periph->periph_lun;
1246
1247 s = splbio();
1248 #ifdef SIOP_DEBUG_SCHED
1249 printf("starting cmd for %d:%d\n", target, lun);
1250 #endif
1251 siop_cmd = TAILQ_FIRST(&sc->free_list);
1252 if (siop_cmd == NULL) {
1253 xs->error = XS_RESOURCE_SHORTAGE;
1254 scsipi_done(xs);
1255 splx(s);
1256 return;
1257 }
1258 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1259 #ifdef DIAGNOSTIC
1260 if (siop_cmd->cmd_c.status != CMDST_FREE)
1261 panic("siop_scsicmd: new cmd not free");
1262 #endif
1263 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1264 if (siop_target == NULL) {
1265 #ifdef SIOP_DEBUG
1266 printf("%s: alloc siop_target for target %d\n",
1267 device_xname(sc->sc_c.sc_dev), target);
1268 #endif
1269 sc->sc_c.targets[target] =
1270 malloc(sizeof(struct siop_target),
1271 M_DEVBUF, M_NOWAIT|M_ZERO);
1272 if (sc->sc_c.targets[target] == NULL) {
1273 aprint_error_dev(sc->sc_c.sc_dev,
1274 "can't malloc memory for "
1275 "target %d\n", target);
1276 xs->error = XS_RESOURCE_SHORTAGE;
1277 scsipi_done(xs);
1278 TAILQ_INSERT_TAIL(&sc->free_list,
1279 siop_cmd, next);
1280 splx(s);
1281 return;
1282 }
1283 siop_target =
1284 (struct siop_target *)sc->sc_c.targets[target];
1285 siop_target->target_c.status = TARST_PROBING;
1286 siop_target->target_c.flags = 0;
1287 siop_target->target_c.id =
1288 sc->sc_c.clock_div << 24; /* scntl3 */
1289 siop_target->target_c.id |= target << 16; /* id */
1290 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1291
1292 /* get a lun switch script */
1293 siop_target->lunsw = siop_get_lunsw(sc);
1294 if (siop_target->lunsw == NULL) {
1295 aprint_error_dev(sc->sc_c.sc_dev,
1296 "can't alloc lunsw for target %d\n",
1297 target);
1298 xs->error = XS_RESOURCE_SHORTAGE;
1299 scsipi_done(xs);
1300 TAILQ_INSERT_TAIL(&sc->free_list,
1301 siop_cmd, next);
1302 splx(s);
1303 return;
1304 }
1305 for (i=0; i < 8; i++)
1306 siop_target->siop_lun[i] = NULL;
1307 siop_add_reselsw(sc, target);
1308 }
1309 if (siop_target->siop_lun[lun] == NULL) {
1310 siop_target->siop_lun[lun] =
1311 malloc(sizeof(struct siop_lun), M_DEVBUF,
1312 M_NOWAIT|M_ZERO);
1313 if (siop_target->siop_lun[lun] == NULL) {
1314 aprint_error_dev(sc->sc_c.sc_dev,
1315 "can't alloc siop_lun for "
1316 "target %d lun %d\n",
1317 target, lun);
1318 xs->error = XS_RESOURCE_SHORTAGE;
1319 scsipi_done(xs);
1320 TAILQ_INSERT_TAIL(&sc->free_list,
1321 siop_cmd, next);
1322 splx(s);
1323 return;
1324 }
1325 }
1326 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1327 siop_cmd->cmd_c.xs = xs;
1328 siop_cmd->cmd_c.flags = 0;
1329 siop_cmd->cmd_c.status = CMDST_READY;
1330
1331 /* load the DMA maps */
1332 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1333 siop_cmd->cmd_c.dmamap_cmd,
1334 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1335 if (error) {
1336 aprint_error_dev(sc->sc_c.sc_dev,
1337 "unable to load cmd DMA map: %d\n",
1338 error);
1339 xs->error = (error == EAGAIN) ?
1340 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1341 scsipi_done(xs);
1342 siop_cmd->cmd_c.status = CMDST_FREE;
1343 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1344 splx(s);
1345 return;
1346 }
1347 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1348 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1349 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1350 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1351 ((xs->xs_control & XS_CTL_DATA_IN) ?
1352 BUS_DMA_READ : BUS_DMA_WRITE));
1353 if (error) {
1354 aprint_error_dev(sc->sc_c.sc_dev,
1355 "unable to load data DMA map: %d",
1356 error);
1357 xs->error = (error == EAGAIN) ?
1358 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1359 scsipi_done(xs);
1360 bus_dmamap_unload(sc->sc_c.sc_dmat,
1361 siop_cmd->cmd_c.dmamap_cmd);
1362 siop_cmd->cmd_c.status = CMDST_FREE;
1363 TAILQ_INSERT_TAIL(&sc->free_list,
1364 siop_cmd, next);
1365 splx(s);
1366 return;
1367 }
1368 bus_dmamap_sync(sc->sc_c.sc_dmat,
1369 siop_cmd->cmd_c.dmamap_data, 0,
1370 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1371 (xs->xs_control & XS_CTL_DATA_IN) ?
1372 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1373 }
1374 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1375 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1376 BUS_DMASYNC_PREWRITE);
1377
1378 if (xs->xs_tag_type) {
1379 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1380 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1381 } else {
1382 siop_cmd->cmd_c.tag = 0;
1383 }
1384 siop_setuptables(&siop_cmd->cmd_c);
1385 siop_cmd->saved_offset = SIOP_NOOFFSET;
1386 siop_table_sync(siop_cmd,
1387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1388 siop_start(sc, siop_cmd);
1389 if (xs->xs_control & XS_CTL_POLL) {
1390 /* poll for command completion */
1391 while ((xs->xs_status & XS_STS_DONE) == 0) {
1392 delay(1000);
1393 siop_intr(sc);
1394 }
1395 }
1396 splx(s);
1397 return;
1398
1399 case ADAPTER_REQ_GROW_RESOURCES:
1400 #ifdef SIOP_DEBUG
1401 printf("%s grow resources (%d)\n",
1402 device_xname(sc->sc_c.sc_dev),
1403 sc->sc_c.sc_adapt.adapt_openings);
1404 #endif
1405 siop_morecbd(sc);
1406 return;
1407
1408 case ADAPTER_REQ_SET_XFER_MODE:
1409 {
1410 struct scsipi_xfer_mode *xm = arg;
1411 if (sc->sc_c.targets[xm->xm_target] == NULL)
1412 return;
1413 s = splbio();
1414 if (xm->xm_mode & PERIPH_CAP_TQING)
1415 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1416 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1417 (sc->sc_c.features & SF_BUS_WIDE))
1418 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1419 if (xm->xm_mode & PERIPH_CAP_SYNC)
1420 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1421 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1422 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1423 sc->sc_c.targets[xm->xm_target]->status =
1424 TARST_ASYNC;
1425
1426 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1427 if (scsipi_lookup_periph(chan,
1428 xm->xm_target, lun) != NULL) {
1429 /* allocate a lun sw entry for this device */
1430 siop_add_dev(sc, xm->xm_target, lun);
1431 }
1432 }
1433
1434 splx(s);
1435 }
1436 }
1437 }
1438
1439 static void
1440 siop_start(struct siop_softc *sc, struct siop_cmd *siop_cmd)
1441 {
1442 struct siop_lun *siop_lun;
1443 struct siop_xfer *siop_xfer;
1444 uint32_t dsa;
1445 int timeout;
1446 int target, lun, slot;
1447
1448 /*
1449 * first make sure to read valid data
1450 */
1451 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1452
1453 /*
1454 * The queue management here is a bit tricky: the script always looks
1455 * at the slot from first to last, so if we always use the first
1456 * free slot commands can stay at the tail of the queue ~forever.
1457 * The algorithm used here is to restart from the head when we know
1458 * that the queue is empty, and only add commands after the last one.
1459 * When we're at the end of the queue wait for the script to clear it.
1460 * The best thing to do here would be to implement a circular queue,
1461 * but using only 53c720 features this can be "interesting".
1462 * A mid-way solution could be to implement 2 queues and swap orders.
1463 */
1464 slot = sc->sc_currschedslot;
1465 /*
1466 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1467 * free. As this is the last used slot, all previous slots are free,
1468 * we can restart from 0.
1469 */
1470 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1471 0x80000000) {
1472 slot = sc->sc_currschedslot = 0;
1473 } else {
1474 slot++;
1475 }
1476 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1477 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1478 siop_lun =
1479 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1480 /* if non-tagged command active, panic: this shouldn't happen */
1481 if (siop_lun->siop_tag[0].active != NULL) {
1482 panic("siop_start: tagged cmd while untagged running");
1483 }
1484 #ifdef DIAGNOSTIC
1485 /* sanity check the tag if needed */
1486 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1487 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1488 panic("siop_start: tag not free");
1489 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1490 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1491 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1492 panic("siop_start: invalid tag id");
1493 }
1494 }
1495 #endif
1496 /*
1497 * find a free scheduler slot and load it.
1498 */
1499 for (; slot < SIOP_NSLOTS; slot++) {
1500 /*
1501 * If cmd if 0x80000000 the slot is free
1502 */
1503 if (siop_script_read(sc,
1504 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1505 0x80000000)
1506 break;
1507 }
1508 if (slot == SIOP_NSLOTS) {
1509 /*
1510 * no more free slot, no need to continue. freeze the queue
1511 * and requeue this command.
1512 */
1513 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1514 sc->sc_flags |= SCF_CHAN_NOSLOT;
1515 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1516 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1517 siop_scsicmd_end(siop_cmd);
1518 return;
1519 }
1520 #ifdef SIOP_DEBUG_SCHED
1521 printf("using slot %d for DSA 0x%lx\n", slot,
1522 (u_long)siop_cmd->cmd_c.dsa);
1523 #endif
1524 /* mark command as active */
1525 if (siop_cmd->cmd_c.status == CMDST_READY)
1526 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1527 else
1528 panic("siop_start: bad status");
1529 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1530 /* patch scripts with DSA addr */
1531 dsa = siop_cmd->cmd_c.dsa;
1532 /* first reselect switch, if we have an entry */
1533 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1534 siop_script_write(sc,
1535 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1536 dsa + sizeof(struct siop_common_xfer) +
1537 Ent_ldsa_reload_dsa);
1538 /* CMD script: MOVE MEMORY addr */
1539 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1540 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1541 siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr +
1542 Ent_script_sched_slot0 + slot * 8);
1543 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1544 /* scheduler slot: JUMP ldsa_select */
1545 siop_script_write(sc,
1546 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1547 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1548 /* handle timeout */
1549 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1550 /* start exire timer */
1551 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1552 if (timeout == 0)
1553 timeout = 1;
1554 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1555 timeout, siop_timeout, siop_cmd);
1556 }
1557 /*
1558 * Change JUMP cmd so that this slot will be handled
1559 */
1560 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1561 0x80080000);
1562 sc->sc_currschedslot = slot;
1563
1564 /* make sure SCRIPT processor will read valid data */
1565 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1566 /* Signal script it has some work to do */
1567 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1568 SIOP_ISTAT, ISTAT_SIGP);
1569 /* and wait for IRQ */
1570 }
1571
1572 void
1573 siop_timeout(void *v)
1574 {
1575 struct siop_cmd *siop_cmd = v;
1576 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1577 int s;
1578
1579 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1580 printf("command timeout, CDB: ");
1581 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1582 printf("\n");
1583
1584 s = splbio();
1585 /* reset the scsi bus */
1586 siop_resetbus(&sc->sc_c);
1587
1588 /* deactivate callout */
1589 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1590 /* mark command as being timed out; siop_intr will handle it */
1591 /*
1592 * mark command has being timed out and just return;
1593 * the bus reset will generate an interrupt,
1594 * it will be handled in siop_intr()
1595 */
1596 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1597 splx(s);
1598 }
1599
1600 void
1601 siop_dump_script(struct siop_softc *sc)
1602 {
1603 int i;
1604
1605 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1606 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1607 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i]),
1608 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i + 1]));
1609 if ((siop_ctoh32(&sc->sc_c,
1610 sc->sc_c.sc_script[i]) & 0xe0000000) == 0xc0000000) {
1611 i++;
1612 printf(" 0x%08x", siop_ctoh32(&sc->sc_c,
1613 sc->sc_c.sc_script[i + 1]));
1614 }
1615 printf("\n");
1616 }
1617 }
1618
1619 void
1620 siop_morecbd(struct siop_softc *sc)
1621 {
1622 int error, off, i, j, s;
1623 bus_dma_segment_t seg;
1624 int rseg;
1625 struct siop_cbd *newcbd;
1626 struct siop_xfer *xfer;
1627 bus_addr_t dsa;
1628 uint32_t *scr;
1629
1630 /* allocate a new list head */
1631 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1632 if (newcbd == NULL) {
1633 aprint_error_dev(sc->sc_c.sc_dev,
1634 "can't allocate memory for command descriptors head\n");
1635 return;
1636 }
1637
1638 /* allocate cmd list */
1639 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1640 M_DEVBUF, M_NOWAIT|M_ZERO);
1641 if (newcbd->cmds == NULL) {
1642 aprint_error_dev(sc->sc_c.sc_dev,
1643 "can't allocate memory for command descriptors\n");
1644 goto bad3;
1645 }
1646 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE,
1647 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1648 if (error) {
1649 aprint_error_dev(sc->sc_c.sc_dev,
1650 "unable to allocate cbd DMA memory, error = %d\n",
1651 error);
1652 goto bad2;
1653 }
1654 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1655 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1656 if (error) {
1657 aprint_error_dev(sc->sc_c.sc_dev,
1658 "unable to map cbd DMA memory, error = %d\n",
1659 error);
1660 goto bad2;
1661 }
1662 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1663 BUS_DMA_NOWAIT, &newcbd->xferdma);
1664 if (error) {
1665 aprint_error_dev(sc->sc_c.sc_dev,
1666 "unable to create cbd DMA map, error = %d\n",
1667 error);
1668 goto bad1;
1669 }
1670 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1671 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1672 if (error) {
1673 aprint_error_dev(sc->sc_c.sc_dev,
1674 "unable to load cbd DMA map, error = %d\n",
1675 error);
1676 goto bad0;
1677 }
1678 #ifdef DEBUG
1679 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1680 device_xname(sc->sc_c.sc_dev),
1681 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1682 #endif
1683 off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0;
1684 for (i = 0; i < SIOP_NCMDPB; i++) {
1685 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1686 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1687 &newcbd->cmds[i].cmd_c.dmamap_data);
1688 if (error) {
1689 aprint_error_dev(sc->sc_c.sc_dev,
1690 "unable to create data DMA map for cbd: "
1691 "error %d\n", error);
1692 goto bad0;
1693 }
1694 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1695 sizeof(struct scsipi_generic), 1,
1696 sizeof(struct scsipi_generic), 0,
1697 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1698 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1699 if (error) {
1700 aprint_error_dev(sc->sc_c.sc_dev,
1701 "unable to create cmd DMA map for cbd %d\n", error);
1702 goto bad0;
1703 }
1704 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1705 newcbd->cmds[i].siop_cbdp = newcbd;
1706 xfer = &newcbd->xfers[i];
1707 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1708 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1709 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1710 i * sizeof(struct siop_xfer);
1711 newcbd->cmds[i].cmd_c.dsa = dsa;
1712 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1713 xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1);
1714 xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa);
1715 xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1);
1716 xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c,
1717 dsa + offsetof(struct siop_common_xfer, msg_in));
1718 xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2);
1719 xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c,
1720 dsa + offsetof(struct siop_common_xfer, msg_in) + 1);
1721 xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c,
1722 dsa + offsetof(struct siop_common_xfer, msg_in) + 3);
1723 xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1);
1724 xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c,
1725 dsa + offsetof(struct siop_common_xfer, status) + off);
1726 /* The select/reselect script */
1727 scr = &xfer->resel[0];
1728 for (j = 0; j < __arraycount(load_dsa); j++)
1729 scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]);
1730 /*
1731 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1732 * octet, reg offset is the third.
1733 */
1734 scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c,
1735 0x78100000 | ((dsa & 0x000000ff) << 8));
1736 scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c,
1737 0x78110000 | ( dsa & 0x0000ff00 ));
1738 scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c,
1739 0x78120000 | ((dsa & 0x00ff0000) >> 8));
1740 scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c,
1741 0x78130000 | ((dsa & 0xff000000) >> 16));
1742 scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c,
1743 sc->sc_c.sc_scriptaddr + Ent_reselected);
1744 scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c,
1745 sc->sc_c.sc_scriptaddr + Ent_reselect);
1746 scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c,
1747 sc->sc_c.sc_scriptaddr + Ent_selected);
1748 scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c,
1749 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data);
1750 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1751 scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000);
1752 s = splbio();
1753 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1754 splx(s);
1755 #ifdef SIOP_DEBUG
1756 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1757 siop_ctoh32(&sc->sc_c,
1758 newcbd->cmds[i].cmd_tables->t_msgin.addr),
1759 siop_ctoh32(&sc->sc_c,
1760 newcbd->cmds[i].cmd_tables->t_msgout.addr),
1761 siop_ctoh32(&sc->sc_c,
1762 newcbd->cmds[i].cmd_tables->t_status.addr));
1763 #endif
1764 }
1765 s = splbio();
1766 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1767 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1768 splx(s);
1769 return;
1770 bad0:
1771 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1772 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1773 bad1:
1774 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1775 bad2:
1776 free(newcbd->cmds, M_DEVBUF);
1777 bad3:
1778 free(newcbd, M_DEVBUF);
1779 }
1780
1781 struct siop_lunsw *
1782 siop_get_lunsw(struct siop_softc *sc)
1783 {
1784 struct siop_lunsw *lunsw;
1785 int i;
1786
1787 if (sc->script_free_lo + __arraycount(lun_switch) >= sc->script_free_hi)
1788 return NULL;
1789 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1790 if (lunsw != NULL) {
1791 #ifdef SIOP_DEBUG
1792 printf("siop_get_lunsw got lunsw at offset %d\n",
1793 lunsw->lunsw_off);
1794 #endif
1795 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1796 return lunsw;
1797 }
1798 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1799 if (lunsw == NULL)
1800 return NULL;
1801 #ifdef SIOP_DEBUG
1802 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1803 #endif
1804 if (sc->sc_c.features & SF_CHIP_RAM) {
1805 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1806 sc->script_free_lo * 4, lun_switch,
1807 __arraycount(lun_switch));
1808 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1809 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1810 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1811 } else {
1812 for (i = 0; i < __arraycount(lun_switch); i++)
1813 sc->sc_c.sc_script[sc->script_free_lo + i] =
1814 siop_htoc32(&sc->sc_c, lun_switch[i]);
1815 sc->sc_c.sc_script[
1816 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1817 siop_htoc32(&sc->sc_c,
1818 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1819 }
1820 lunsw->lunsw_off = sc->script_free_lo;
1821 lunsw->lunsw_size = __arraycount(lun_switch);
1822 sc->script_free_lo += lunsw->lunsw_size;
1823 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1824 return lunsw;
1825 }
1826
1827 void
1828 siop_add_reselsw(struct siop_softc *sc, int target)
1829 {
1830 int i, j;
1831 struct siop_target *siop_target;
1832 struct siop_lun *siop_lun;
1833
1834 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1835 /*
1836 * add an entry to resel switch
1837 */
1838 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1839 for (i = 0; i < 15; i++) {
1840 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1841 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1842 == 0xff) { /* it's free */
1843 #ifdef SIOP_DEBUG
1844 printf("siop: target %d slot %d offset %d\n",
1845 target, i, siop_target->reseloff);
1846 #endif
1847 /* JUMP abs_foo, IF target | 0x80; */
1848 siop_script_write(sc, siop_target->reseloff,
1849 0x800c0080 | target);
1850 siop_script_write(sc, siop_target->reseloff + 1,
1851 sc->sc_c.sc_scriptaddr +
1852 siop_target->lunsw->lunsw_off * 4 +
1853 Ent_lun_switch_entry);
1854 break;
1855 }
1856 }
1857 if (i == 15) /* no free slot, shouldn't happen */
1858 panic("siop: resel switch full");
1859
1860 sc->sc_ntargets++;
1861 for (i = 0; i < 8; i++) {
1862 siop_lun = siop_target->siop_lun[i];
1863 if (siop_lun == NULL)
1864 continue;
1865 if (siop_lun->reseloff > 0) {
1866 siop_lun->reseloff = 0;
1867 for (j = 0; j < SIOP_NTAG; j++)
1868 siop_lun->siop_tag[j].reseloff = 0;
1869 siop_add_dev(sc, target, i);
1870 }
1871 }
1872 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1873 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1874 }
1875
1876 void
1877 siop_update_scntl3(struct siop_softc *sc,
1878 struct siop_common_target *_siop_target)
1879 {
1880 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1881
1882 /* MOVE target->id >> 24 TO SCNTL3 */
1883 siop_script_write(sc,
1884 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1885 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1886 /* MOVE target->id >> 8 TO SXFER */
1887 siop_script_write(sc,
1888 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1889 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1890 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1891 }
1892
1893 void
1894 siop_add_dev(struct siop_softc *sc, int target, int lun)
1895 {
1896 struct siop_lunsw *lunsw;
1897 struct siop_target *siop_target =
1898 (struct siop_target *)sc->sc_c.targets[target];
1899 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1900 int i, ntargets;
1901
1902 if (siop_lun->reseloff > 0)
1903 return;
1904 lunsw = siop_target->lunsw;
1905 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1906 /*
1907 * can't extend this slot. Probably not worth trying to deal
1908 * with this case
1909 */
1910 #ifdef DEBUG
1911 aprint_error_dev(sc->sc_c.sc_dev,
1912 "%d:%d: can't allocate a lun sw slot\n", target, lun);
1913 #endif
1914 return;
1915 }
1916 /* count how many free targets we still have to probe */
1917 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1918
1919 /*
1920 * we need 8 bytes for the lun sw additional entry, and
1921 * eventually sizeof(tag_switch) for the tag switch entry.
1922 * Keep enough free space for the free targets that could be
1923 * probed later.
1924 */
1925 if (sc->script_free_lo + 2 +
1926 (ntargets * __arraycount(lun_switch)) >=
1927 ((siop_target->target_c.flags & TARF_TAG) ?
1928 sc->script_free_hi - __arraycount(tag_switch) :
1929 sc->script_free_hi)) {
1930 /*
1931 * not enough space, probably not worth dealing with it.
1932 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1933 */
1934 #ifdef DEBUG
1935 aprint_error_dev(sc->sc_c.sc_dev,
1936 "%d:%d: not enough memory for a lun sw slot\n",
1937 target, lun);
1938 #endif
1939 return;
1940 }
1941 #ifdef SIOP_DEBUG
1942 printf("%s:%d:%d: allocate lun sw entry\n",
1943 device_xname(sc->sc_c.sc_dev), target, lun);
1944 #endif
1945 /* INT int_resellun */
1946 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1947 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1948 /* Now the slot entry: JUMP abs_foo, IF lun */
1949 siop_script_write(sc, sc->script_free_lo - 2,
1950 0x800c0000 | lun);
1951 siop_script_write(sc, sc->script_free_lo - 1, 0);
1952 siop_lun->reseloff = sc->script_free_lo - 2;
1953 lunsw->lunsw_size += 2;
1954 sc->script_free_lo += 2;
1955 if (siop_target->target_c.flags & TARF_TAG) {
1956 /* we need a tag switch */
1957 sc->script_free_hi -= __arraycount(tag_switch);
1958 if (sc->sc_c.features & SF_CHIP_RAM) {
1959 bus_space_write_region_4(sc->sc_c.sc_ramt,
1960 sc->sc_c.sc_ramh,
1961 sc->script_free_hi * 4, tag_switch,
1962 __arraycount(tag_switch));
1963 } else {
1964 for(i = 0; i < __arraycount(tag_switch); i++) {
1965 sc->sc_c.sc_script[sc->script_free_hi + i] =
1966 siop_htoc32(&sc->sc_c, tag_switch[i]);
1967 }
1968 }
1969 siop_script_write(sc,
1970 siop_lun->reseloff + 1,
1971 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1972 Ent_tag_switch_entry);
1973
1974 for (i = 0; i < SIOP_NTAG; i++) {
1975 siop_lun->siop_tag[i].reseloff =
1976 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1977 }
1978 } else {
1979 /* non-tag case; just work with the lun switch */
1980 siop_lun->siop_tag[0].reseloff =
1981 siop_target->siop_lun[lun]->reseloff;
1982 }
1983 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1984 }
1985
1986 void
1987 siop_del_dev(struct siop_softc *sc, int target, int lun)
1988 {
1989 int i;
1990 struct siop_target *siop_target;
1991
1992 #ifdef SIOP_DEBUG
1993 printf("%s:%d:%d: free lun sw entry\n",
1994 device_xname(sc->sc_c.sc_dev), target, lun);
1995 #endif
1996 if (sc->sc_c.targets[target] == NULL)
1997 return;
1998 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1999 free(siop_target->siop_lun[lun], M_DEVBUF);
2000 siop_target->siop_lun[lun] = NULL;
2001 /* XXX compact sw entry too ? */
2002 /* check if we can free the whole target */
2003 for (i = 0; i < 8; i++) {
2004 if (siop_target->siop_lun[i] != NULL)
2005 return;
2006 }
2007 #ifdef SIOP_DEBUG
2008 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2009 device_xname(sc->sc_c.sc_dev), target, lun,
2010 siop_target->lunsw->lunsw_off);
2011 #endif
2012 /*
2013 * nothing here, free the target struct and resel
2014 * switch entry
2015 */
2016 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2017 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2018 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2019 free(sc->sc_c.targets[target], M_DEVBUF);
2020 sc->sc_c.targets[target] = NULL;
2021 sc->sc_ntargets--;
2022 }
2023
2024 #ifdef SIOP_STATS
2025 void
2026 siop_printstats(void)
2027 {
2028
2029 printf("siop_stat_intr %d\n", siop_stat_intr);
2030 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2031 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2032 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2033 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
2034 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2035 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2036 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2037 }
2038 #endif
2039