siop.c revision 1.87.4.2 1 /* $NetBSD: siop.c,v 1.87.4.2 2009/05/16 10:41:24 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2000 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.87.4.2 2009/05/16 10:41:24 yamt Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <sys/bus.h>
49
50 #include <dev/microcode/siop/siop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 /*
68 #define SIOP_DEBUG
69 #define SIOP_DEBUG_DR
70 #define SIOP_DEBUG_INTR
71 #define SIOP_DEBUG_SCHED
72 #define DUMP_SCRIPT
73 */
74
75 #define SIOP_STATS
76
77 #ifndef SIOP_DEFAULT_TARGET
78 #define SIOP_DEFAULT_TARGET 7
79 #endif
80
81 /* number of cmd descriptors per block */
82 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
83
84 /* Number of scheduler slot (needs to match script) */
85 #define SIOP_NSLOTS 40
86
87 void siop_reset(struct siop_softc *);
88 void siop_handle_reset(struct siop_softc *);
89 int siop_handle_qtag_reject(struct siop_cmd *);
90 void siop_scsicmd_end(struct siop_cmd *);
91 void siop_unqueue(struct siop_softc *, int, int);
92 static void siop_start(struct siop_softc *, struct siop_cmd *);
93 void siop_timeout(void *);
94 int siop_scsicmd(struct scsipi_xfer *);
95 void siop_scsipi_request(struct scsipi_channel *,
96 scsipi_adapter_req_t, void *);
97 void siop_dump_script(struct siop_softc *);
98 void siop_morecbd(struct siop_softc *);
99 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
100 void siop_add_reselsw(struct siop_softc *, int);
101 void siop_update_scntl3(struct siop_softc *,
102 struct siop_common_target *);
103
104 #ifdef SIOP_STATS
105 static int siop_stat_intr = 0;
106 static int siop_stat_intr_shortxfer = 0;
107 static int siop_stat_intr_sdp = 0;
108 static int siop_stat_intr_saveoffset = 0;
109 static int siop_stat_intr_done = 0;
110 static int siop_stat_intr_xferdisc = 0;
111 static int siop_stat_intr_lunresel = 0;
112 static int siop_stat_intr_qfull = 0;
113 void siop_printstats(void);
114 #define INCSTAT(x) x++
115 #else
116 #define INCSTAT(x)
117 #endif
118
119 static inline void siop_script_sync(struct siop_softc *, int);
120 static inline void
121 siop_script_sync(struct siop_softc *sc, int ops)
122 {
123
124 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
125 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
126 PAGE_SIZE, ops);
127 }
128
129 static inline uint32_t siop_script_read(struct siop_softc *, u_int);
130 static inline uint32_t
131 siop_script_read(struct siop_softc *sc, u_int offset)
132 {
133
134 if (sc->sc_c.features & SF_CHIP_RAM) {
135 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
136 offset * 4);
137 } else {
138 return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]);
139 }
140 }
141
142 static inline void siop_script_write(struct siop_softc *, u_int,
143 uint32_t);
144 static inline void
145 siop_script_write(struct siop_softc *sc, u_int offset, uint32_t val)
146 {
147
148 if (sc->sc_c.features & SF_CHIP_RAM) {
149 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
150 offset * 4, val);
151 } else {
152 sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val);
153 }
154 }
155
156 void
157 siop_attach(struct siop_softc *sc)
158 {
159
160 if (siop_common_attach(&sc->sc_c) != 0)
161 return;
162
163 TAILQ_INIT(&sc->free_list);
164 TAILQ_INIT(&sc->cmds);
165 TAILQ_INIT(&sc->lunsw_list);
166 sc->sc_currschedslot = 0;
167 #ifdef SIOP_DEBUG
168 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
169 device_xname(sc->sc_c.sc_dev), (int)sizeof(siop_script),
170 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
171 #endif
172
173 sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
174 sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
175
176 /* Do a bus reset, so that devices fall back to narrow/async */
177 siop_resetbus(&sc->sc_c);
178 /*
179 * siop_reset() will reset the chip, thus clearing pending interrupts
180 */
181 siop_reset(sc);
182 #ifdef DUMP_SCRIPT
183 siop_dump_script(sc);
184 #endif
185
186 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
187 }
188
189 void
190 siop_reset(struct siop_softc *sc)
191 {
192 int i, j;
193 struct siop_lunsw *lunsw;
194
195 siop_common_reset(&sc->sc_c);
196
197 /* copy and patch the script */
198 if (sc->sc_c.features & SF_CHIP_RAM) {
199 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
200 siop_script, __arraycount(siop_script));
201 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
202 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
203 E_abs_msgin_Used[j] * 4,
204 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
205 }
206 if (sc->sc_c.features & SF_CHIP_LED0) {
207 bus_space_write_region_4(sc->sc_c.sc_ramt,
208 sc->sc_c.sc_ramh,
209 Ent_led_on1, siop_led_on,
210 __arraycount(siop_led_on));
211 bus_space_write_region_4(sc->sc_c.sc_ramt,
212 sc->sc_c.sc_ramh,
213 Ent_led_on2, siop_led_on,
214 __arraycount(siop_led_on));
215 bus_space_write_region_4(sc->sc_c.sc_ramt,
216 sc->sc_c.sc_ramh,
217 Ent_led_off, siop_led_off,
218 __arraycount(siop_led_off));
219 }
220 } else {
221 for (j = 0; j < __arraycount(siop_script); j++) {
222 sc->sc_c.sc_script[j] =
223 siop_htoc32(&sc->sc_c, siop_script[j]);
224 }
225 for (j = 0; j < __arraycount(E_abs_msgin_Used); j++) {
226 sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
227 siop_htoc32(&sc->sc_c,
228 sc->sc_c.sc_scriptaddr + Ent_msgin_space);
229 }
230 if (sc->sc_c.features & SF_CHIP_LED0) {
231 for (j = 0; j < __arraycount(siop_led_on); j++)
232 sc->sc_c.sc_script[
233 Ent_led_on1 / sizeof(siop_led_on[0]) + j
234 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
235 for (j = 0; j < __arraycount(siop_led_on); j++)
236 sc->sc_c.sc_script[
237 Ent_led_on2 / sizeof(siop_led_on[0]) + j
238 ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
239 for (j = 0; j < __arraycount(siop_led_off); j++)
240 sc->sc_c.sc_script[
241 Ent_led_off / sizeof(siop_led_off[0]) + j
242 ] = siop_htoc32(&sc->sc_c, siop_led_off[j]);
243 }
244 }
245 sc->script_free_lo = __arraycount(siop_script);
246 sc->script_free_hi = sc->sc_c.ram_size / 4;
247 sc->sc_ntargets = 0;
248
249 /* free used and unused lun switches */
250 while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
251 #ifdef SIOP_DEBUG
252 printf("%s: free lunsw at offset %d\n",
253 device_xname(sc->sc_c.sc_dev), lunsw->lunsw_off);
254 #endif
255 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
256 free(lunsw, M_DEVBUF);
257 }
258 TAILQ_INIT(&sc->lunsw_list);
259 /* restore reselect switch */
260 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
261 struct siop_target *target;
262 if (sc->sc_c.targets[i] == NULL)
263 continue;
264 #ifdef SIOP_DEBUG
265 printf("%s: restore sw for target %d\n",
266 device_xname(sc->sc_c.sc_dev), i);
267 #endif
268 target = (struct siop_target *)sc->sc_c.targets[i];
269 free(target->lunsw, M_DEVBUF);
270 target->lunsw = siop_get_lunsw(sc);
271 if (target->lunsw == NULL) {
272 aprint_error_dev(sc->sc_c.sc_dev,
273 "can't alloc lunsw for target %d\n", i);
274 break;
275 }
276 siop_add_reselsw(sc, i);
277 }
278
279 /* start script */
280 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
281 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
282 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
283 }
284 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
285 sc->sc_c.sc_scriptaddr + Ent_reselect);
286 }
287
288 #if 0
289 #define CALL_SCRIPT(ent) do { \
290 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
291 siop_cmd->cmd_c.dsa, \
292 sc->sc_c.sc_scriptaddr + ent); \
293 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
294 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
295 } while (/* CONSTCOND */0)
296 #else
297 #define CALL_SCRIPT(ent) do { \
298 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
299 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
300 } while (/* CONSTCOND */0)
301 #endif
302
303 int
304 siop_intr(void *v)
305 {
306 struct siop_softc *sc = v;
307 struct siop_target *siop_target;
308 struct siop_cmd *siop_cmd;
309 struct siop_lun *siop_lun;
310 struct scsipi_xfer *xs;
311 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
312 uint32_t irqcode;
313 int need_reset = 0;
314 int offset, target, lun, tag;
315 bus_addr_t dsa;
316 struct siop_cbd *cbdp;
317 int freetarget = 0;
318 int restart = 0;
319
320 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
321 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
322 return 0;
323 INCSTAT(siop_stat_intr);
324 if (istat & ISTAT_INTF) {
325 printf("INTRF\n");
326 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
327 SIOP_ISTAT, ISTAT_INTF);
328 }
329 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
330 (ISTAT_DIP | ISTAT_ABRT)) {
331 /* clear abort */
332 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
333 SIOP_ISTAT, 0);
334 }
335 /* use DSA to find the current siop_cmd */
336 siop_cmd = NULL;
337 dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
338 TAILQ_FOREACH(cbdp, &sc->cmds, next) {
339 if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
340 dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
341 dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
342 siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
343 siop_table_sync(siop_cmd,
344 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
345 break;
346 }
347 }
348 if (siop_cmd) {
349 xs = siop_cmd->cmd_c.xs;
350 siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
351 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
352 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
353 tag = siop_cmd->cmd_c.tag;
354 siop_lun = siop_target->siop_lun[lun];
355 #ifdef DIAGNOSTIC
356 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
357 printf("siop_cmd (lun %d) for DSA 0x%x "
358 "not active (%d)\n", lun, (u_int)dsa,
359 siop_cmd->cmd_c.status);
360 xs = NULL;
361 siop_target = NULL;
362 target = -1;
363 lun = -1;
364 tag = -1;
365 siop_lun = NULL;
366 siop_cmd = NULL;
367 } else if (siop_lun->siop_tag[tag].active != siop_cmd) {
368 printf("siop_cmd (lun %d tag %d) not in siop_lun "
369 "active (%p != %p)\n", lun, tag, siop_cmd,
370 siop_lun->siop_tag[tag].active);
371 }
372 #endif
373 } else {
374 xs = NULL;
375 siop_target = NULL;
376 target = -1;
377 lun = -1;
378 tag = -1;
379 siop_lun = NULL;
380 }
381 if (istat & ISTAT_DIP) {
382 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
383 SIOP_DSTAT);
384 if (dstat & DSTAT_ABRT) {
385 /* was probably generated by a bus reset IOCTL */
386 if ((dstat & DSTAT_DFE) == 0)
387 siop_clearfifo(&sc->sc_c);
388 goto reset;
389 }
390 if (dstat & DSTAT_SSI) {
391 printf("single step dsp 0x%08x dsa 0x08%x\n",
392 (int)(bus_space_read_4(sc->sc_c.sc_rt,
393 sc->sc_c.sc_rh, SIOP_DSP) -
394 sc->sc_c.sc_scriptaddr),
395 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
396 SIOP_DSA));
397 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
398 (istat & ISTAT_SIP) == 0) {
399 bus_space_write_1(sc->sc_c.sc_rt,
400 sc->sc_c.sc_rh, SIOP_DCNTL,
401 bus_space_read_1(sc->sc_c.sc_rt,
402 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
403 }
404 return 1;
405 }
406
407 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
408 printf("DMA IRQ:");
409 if (dstat & DSTAT_IID)
410 printf(" Illegal instruction");
411 if (dstat & DSTAT_BF)
412 printf(" bus fault");
413 if (dstat & DSTAT_MDPE)
414 printf(" parity");
415 if (dstat & DSTAT_DFE)
416 printf(" DMA fifo empty");
417 else
418 siop_clearfifo(&sc->sc_c);
419 printf(", DSP=0x%x DSA=0x%x: ",
420 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
421 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
422 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
423 if (siop_cmd)
424 printf("last msg_in=0x%x status=0x%x\n",
425 siop_cmd->cmd_tables->msg_in[0],
426 siop_ctoh32(&sc->sc_c,
427 siop_cmd->cmd_tables->status));
428 else
429 aprint_error_dev(sc->sc_c.sc_dev,
430 "current DSA invalid\n");
431 need_reset = 1;
432 }
433 }
434 if (istat & ISTAT_SIP) {
435 if (istat & ISTAT_DIP)
436 delay(10);
437 /*
438 * Can't read sist0 & sist1 independently, or we have to
439 * insert delay
440 */
441 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
442 SIOP_SIST0);
443 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 SIOP_SSTAT1);
445 #ifdef SIOP_DEBUG_INTR
446 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
447 "DSA=0x%x DSP=0x%lx\n", sist,
448 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
449 SIOP_SSTAT1),
450 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
451 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
452 SIOP_DSP) -
453 sc->sc_c.sc_scriptaddr));
454 #endif
455 if (sist & SIST0_RST) {
456 siop_handle_reset(sc);
457 /* no table to flush here */
458 return 1;
459 }
460 if (sist & SIST0_SGE) {
461 if (siop_cmd)
462 scsipi_printaddr(xs->xs_periph);
463 else
464 printf("%s:", device_xname(sc->sc_c.sc_dev));
465 printf("scsi gross error\n");
466 goto reset;
467 }
468 if ((sist & SIST0_MA) && need_reset == 0) {
469 if (siop_cmd) {
470 int scratcha0;
471 dstat = bus_space_read_1(sc->sc_c.sc_rt,
472 sc->sc_c.sc_rh, SIOP_DSTAT);
473 /*
474 * first restore DSA, in case we were in a S/G
475 * operation.
476 */
477 bus_space_write_4(sc->sc_c.sc_rt,
478 sc->sc_c.sc_rh,
479 SIOP_DSA, siop_cmd->cmd_c.dsa);
480 scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
481 sc->sc_c.sc_rh, SIOP_SCRATCHA);
482 switch (sstat1 & SSTAT1_PHASE_MASK) {
483 case SSTAT1_PHASE_STATUS:
484 /*
485 * previous phase may be aborted for any reason
486 * ( for example, the target has less data to
487 * transfer than requested). Compute resid and
488 * just go to status, the command should
489 * terminate.
490 */
491 INCSTAT(siop_stat_intr_shortxfer);
492 if (scratcha0 & A_flag_data)
493 siop_ma(&siop_cmd->cmd_c);
494 else if ((dstat & DSTAT_DFE) == 0)
495 siop_clearfifo(&sc->sc_c);
496 CALL_SCRIPT(Ent_status);
497 return 1;
498 case SSTAT1_PHASE_MSGIN:
499 /*
500 * target may be ready to disconnect
501 * Compute resid which would be used later
502 * if a save data pointer is needed.
503 */
504 INCSTAT(siop_stat_intr_xferdisc);
505 if (scratcha0 & A_flag_data)
506 siop_ma(&siop_cmd->cmd_c);
507 else if ((dstat & DSTAT_DFE) == 0)
508 siop_clearfifo(&sc->sc_c);
509 bus_space_write_1(sc->sc_c.sc_rt,
510 sc->sc_c.sc_rh, SIOP_SCRATCHA,
511 scratcha0 & ~A_flag_data);
512 CALL_SCRIPT(Ent_msgin);
513 return 1;
514 }
515 aprint_error_dev(sc->sc_c.sc_dev,
516 "unexpected phase mismatch %d\n",
517 sstat1 & SSTAT1_PHASE_MASK);
518 } else {
519 aprint_error_dev(sc->sc_c.sc_dev,
520 "phase mismatch without command\n");
521 }
522 need_reset = 1;
523 }
524 if (sist & SIST0_PAR) {
525 /* parity error, reset */
526 if (siop_cmd)
527 scsipi_printaddr(xs->xs_periph);
528 else
529 printf("%s:", device_xname(sc->sc_c.sc_dev));
530 printf("parity error\n");
531 goto reset;
532 }
533 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
534 /* selection time out, assume there's no device here */
535 if (siop_cmd) {
536 siop_cmd->cmd_c.status = CMDST_DONE;
537 xs->error = XS_SELTIMEOUT;
538 freetarget = 1;
539 goto end;
540 } else {
541 aprint_error_dev(sc->sc_c.sc_dev,
542 "selection timeout without "
543 "command\n");
544 need_reset = 1;
545 }
546 }
547 if (sist & SIST0_UDC) {
548 /*
549 * unexpected disconnect. Usually the target signals
550 * a fatal condition this way. Attempt to get sense.
551 */
552 if (siop_cmd) {
553 siop_cmd->cmd_tables->status =
554 siop_htoc32(&sc->sc_c, SCSI_CHECK);
555 goto end;
556 }
557 aprint_error_dev(sc->sc_c.sc_dev,
558 "unexpected disconnect without "
559 "command\n");
560 goto reset;
561 }
562 if (sist & (SIST1_SBMC << 8)) {
563 /* SCSI bus mode change */
564 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
565 goto reset;
566 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
567 /*
568 * we have a script interrupt, it will
569 * restart the script.
570 */
571 goto scintr;
572 }
573 /*
574 * else we have to restart it ourselve, at the
575 * interrupted instruction.
576 */
577 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
578 SIOP_DSP,
579 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
580 SIOP_DSP) - 8);
581 return 1;
582 }
583 /* Else it's an unhandled exception (for now). */
584 aprint_error_dev(sc->sc_c.sc_dev,
585 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
586 "DSA=0x%x DSP=0x%x\n", sist,
587 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
588 SIOP_SSTAT1),
589 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
590 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
591 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
592 if (siop_cmd) {
593 siop_cmd->cmd_c.status = CMDST_DONE;
594 xs->error = XS_SELTIMEOUT;
595 goto end;
596 }
597 need_reset = 1;
598 }
599 if (need_reset) {
600 reset:
601 /* fatal error, reset the bus */
602 siop_resetbus(&sc->sc_c);
603 /* no table to flush here */
604 return 1;
605 }
606
607 scintr:
608 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
609 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
610 SIOP_DSPS);
611 #ifdef SIOP_DEBUG_INTR
612 printf("script interrupt 0x%x\n", irqcode);
613 #endif
614 /*
615 * no command, or an inactive command is only valid for a
616 * reselect interrupt
617 */
618 if ((irqcode & 0x80) == 0) {
619 if (siop_cmd == NULL) {
620 aprint_error_dev(sc->sc_c.sc_dev,
621 "script interrupt (0x%x) with "
622 "invalid DSA !!!\n",
623 irqcode);
624 goto reset;
625 }
626 if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
627 aprint_error_dev(sc->sc_c.sc_dev,
628 "command with invalid status "
629 "(IRQ code 0x%x current status %d) !\n",
630 irqcode, siop_cmd->cmd_c.status);
631 xs = NULL;
632 }
633 }
634 switch(irqcode) {
635 case A_int_err:
636 printf("error, DSP=0x%x\n",
637 (int)(bus_space_read_4(sc->sc_c.sc_rt,
638 sc->sc_c.sc_rh, SIOP_DSP) -
639 sc->sc_c.sc_scriptaddr));
640 if (xs) {
641 xs->error = XS_SELTIMEOUT;
642 goto end;
643 } else {
644 goto reset;
645 }
646 case A_int_reseltarg:
647 aprint_error_dev(sc->sc_c.sc_dev,
648 "reselect with invalid target\n");
649 goto reset;
650 case A_int_resellun:
651 INCSTAT(siop_stat_intr_lunresel);
652 target = bus_space_read_1(sc->sc_c.sc_rt,
653 sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
654 lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
655 SIOP_SCRATCHA + 1);
656 tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
657 SIOP_SCRATCHA + 2);
658 siop_target =
659 (struct siop_target *)sc->sc_c.targets[target];
660 if (siop_target == NULL) {
661 printf("%s: reselect with invalid target %d\n",
662 device_xname(sc->sc_c.sc_dev), target);
663 goto reset;
664 }
665 siop_lun = siop_target->siop_lun[lun];
666 if (siop_lun == NULL) {
667 printf("%s: target %d reselect with invalid "
668 "lun %d\n", device_xname(sc->sc_c.sc_dev),
669 target, lun);
670 goto reset;
671 }
672 if (siop_lun->siop_tag[tag].active == NULL) {
673 printf("%s: target %d lun %d tag %d reselect "
674 "without command\n",
675 device_xname(sc->sc_c.sc_dev),
676 target, lun, tag);
677 goto reset;
678 }
679 siop_cmd = siop_lun->siop_tag[tag].active;
680 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
681 SIOP_DSP, siop_cmd->cmd_c.dsa +
682 sizeof(struct siop_common_xfer) +
683 Ent_ldsa_reload_dsa);
684 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
685 return 1;
686 case A_int_reseltag:
687 printf("%s: reselect with invalid tag\n",
688 device_xname(sc->sc_c.sc_dev));
689 goto reset;
690 case A_int_msgin:
691 {
692 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
693 sc->sc_c.sc_rh, SIOP_SFBR);
694
695 if (msgin == MSG_MESSAGE_REJECT) {
696 int msg, extmsg;
697 if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
698 /*
699 * message was part of a identify +
700 * something else. Identify shouldn't
701 * have been rejected.
702 */
703 msg =
704 siop_cmd->cmd_tables->msg_out[1];
705 extmsg =
706 siop_cmd->cmd_tables->msg_out[3];
707 } else {
708 msg = siop_cmd->cmd_tables->msg_out[0];
709 extmsg =
710 siop_cmd->cmd_tables->msg_out[2];
711 }
712 if (msg == MSG_MESSAGE_REJECT) {
713 /* MSG_REJECT for a MSG_REJECT !*/
714 if (xs)
715 scsipi_printaddr(xs->xs_periph);
716 else
717 printf("%s: ", device_xname(
718 sc->sc_c.sc_dev));
719 printf("our reject message was "
720 "rejected\n");
721 goto reset;
722 }
723 if (msg == MSG_EXTENDED &&
724 extmsg == MSG_EXT_WDTR) {
725 /* WDTR rejected, initiate sync */
726 if ((siop_target->target_c.flags &
727 TARF_SYNC) == 0) {
728 siop_target->target_c.status =
729 TARST_OK;
730 siop_update_xfer_mode(&sc->sc_c,
731 target);
732 /* no table to flush here */
733 CALL_SCRIPT(Ent_msgin_ack);
734 return 1;
735 }
736 siop_target->target_c.status =
737 TARST_SYNC_NEG;
738 siop_sdtr_msg(&siop_cmd->cmd_c, 0,
739 sc->sc_c.st_minsync,
740 sc->sc_c.maxoff);
741 siop_table_sync(siop_cmd,
742 BUS_DMASYNC_PREREAD |
743 BUS_DMASYNC_PREWRITE);
744 CALL_SCRIPT(Ent_send_msgout);
745 return 1;
746 } else if (msg == MSG_EXTENDED &&
747 extmsg == MSG_EXT_SDTR) {
748 /* sync rejected */
749 siop_target->target_c.offset = 0;
750 siop_target->target_c.period = 0;
751 siop_target->target_c.status = TARST_OK;
752 siop_update_xfer_mode(&sc->sc_c,
753 target);
754 /* no table to flush here */
755 CALL_SCRIPT(Ent_msgin_ack);
756 return 1;
757 } else if (msg == MSG_SIMPLE_Q_TAG ||
758 msg == MSG_HEAD_OF_Q_TAG ||
759 msg == MSG_ORDERED_Q_TAG) {
760 if (siop_handle_qtag_reject(
761 siop_cmd) == -1)
762 goto reset;
763 CALL_SCRIPT(Ent_msgin_ack);
764 return 1;
765 }
766 if (xs)
767 scsipi_printaddr(xs->xs_periph);
768 else
769 printf("%s: ",
770 device_xname(sc->sc_c.sc_dev));
771 if (msg == MSG_EXTENDED) {
772 printf("scsi message reject, extended "
773 "message sent was 0x%x\n", extmsg);
774 } else {
775 printf("scsi message reject, message "
776 "sent was 0x%x\n", msg);
777 }
778 /* no table to flush here */
779 CALL_SCRIPT(Ent_msgin_ack);
780 return 1;
781 }
782 if (msgin == MSG_IGN_WIDE_RESIDUE) {
783 /* use the extmsgdata table to get the second byte */
784 siop_cmd->cmd_tables->t_extmsgdata.count =
785 siop_htoc32(&sc->sc_c, 1);
786 siop_table_sync(siop_cmd,
787 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
788 CALL_SCRIPT(Ent_get_extmsgdata);
789 return 1;
790 }
791 if (xs)
792 scsipi_printaddr(xs->xs_periph);
793 else
794 printf("%s: ", device_xname(sc->sc_c.sc_dev));
795 printf("unhandled message 0x%x\n",
796 siop_cmd->cmd_tables->msg_in[0]);
797 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
798 siop_cmd->cmd_tables->t_msgout.count =
799 siop_htoc32(&sc->sc_c, 1);
800 siop_table_sync(siop_cmd,
801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
802 CALL_SCRIPT(Ent_send_msgout);
803 return 1;
804 }
805 case A_int_extmsgin:
806 #ifdef SIOP_DEBUG_INTR
807 printf("extended message: msg 0x%x len %d\n",
808 siop_cmd->cmd_tables->msg_in[2],
809 siop_cmd->cmd_tables->msg_in[1]);
810 #endif
811 if (siop_cmd->cmd_tables->msg_in[1] >
812 sizeof(siop_cmd->cmd_tables->msg_in) - 2)
813 aprint_error_dev(sc->sc_c.sc_dev,
814 "extended message too big (%d)\n",
815 siop_cmd->cmd_tables->msg_in[1]);
816 siop_cmd->cmd_tables->t_extmsgdata.count =
817 siop_htoc32(&sc->sc_c,
818 siop_cmd->cmd_tables->msg_in[1] - 1);
819 siop_table_sync(siop_cmd,
820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
821 CALL_SCRIPT(Ent_get_extmsgdata);
822 return 1;
823 case A_int_extmsgdata:
824 #ifdef SIOP_DEBUG_INTR
825 {
826 int i;
827 printf("extended message: 0x%x, data:",
828 siop_cmd->cmd_tables->msg_in[2]);
829 for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
830 i++)
831 printf(" 0x%x",
832 siop_cmd->cmd_tables->msg_in[i]);
833 printf("\n");
834 }
835 #endif
836 if (siop_cmd->cmd_tables->msg_in[0] ==
837 MSG_IGN_WIDE_RESIDUE) {
838 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
839 if (siop_cmd->cmd_tables->msg_in[3] != 1)
840 printf("MSG_IGN_WIDE_RESIDUE: "
841 "bad len %d\n",
842 siop_cmd->cmd_tables->msg_in[3]);
843 switch (siop_iwr(&siop_cmd->cmd_c)) {
844 case SIOP_NEG_MSGOUT:
845 siop_table_sync(siop_cmd,
846 BUS_DMASYNC_PREREAD |
847 BUS_DMASYNC_PREWRITE);
848 CALL_SCRIPT(Ent_send_msgout);
849 return(1);
850 case SIOP_NEG_ACK:
851 CALL_SCRIPT(Ent_msgin_ack);
852 return(1);
853 default:
854 panic("invalid retval from "
855 "siop_iwr()");
856 }
857 return(1);
858 }
859 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
860 switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
861 case SIOP_NEG_MSGOUT:
862 siop_update_scntl3(sc,
863 siop_cmd->cmd_c.siop_target);
864 siop_table_sync(siop_cmd,
865 BUS_DMASYNC_PREREAD |
866 BUS_DMASYNC_PREWRITE);
867 CALL_SCRIPT(Ent_send_msgout);
868 return(1);
869 case SIOP_NEG_ACK:
870 siop_update_scntl3(sc,
871 siop_cmd->cmd_c.siop_target);
872 CALL_SCRIPT(Ent_msgin_ack);
873 return(1);
874 default:
875 panic("invalid retval from "
876 "siop_wdtr_neg()");
877 }
878 return(1);
879 }
880 if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
881 switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
882 case SIOP_NEG_MSGOUT:
883 siop_update_scntl3(sc,
884 siop_cmd->cmd_c.siop_target);
885 siop_table_sync(siop_cmd,
886 BUS_DMASYNC_PREREAD |
887 BUS_DMASYNC_PREWRITE);
888 CALL_SCRIPT(Ent_send_msgout);
889 return(1);
890 case SIOP_NEG_ACK:
891 siop_update_scntl3(sc,
892 siop_cmd->cmd_c.siop_target);
893 CALL_SCRIPT(Ent_msgin_ack);
894 return(1);
895 default:
896 panic("invalid retval from "
897 "siop_wdtr_neg()");
898 }
899 return(1);
900 }
901 /* send a message reject */
902 siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
903 siop_cmd->cmd_tables->t_msgout.count =
904 siop_htoc32(&sc->sc_c, 1);
905 siop_table_sync(siop_cmd,
906 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
907 CALL_SCRIPT(Ent_send_msgout);
908 return 1;
909 case A_int_disc:
910 INCSTAT(siop_stat_intr_sdp);
911 offset = bus_space_read_1(sc->sc_c.sc_rt,
912 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
913 #ifdef SIOP_DEBUG_DR
914 printf("disconnect offset %d\n", offset);
915 #endif
916 siop_sdp(&siop_cmd->cmd_c, offset);
917 /* we start again with no offset */
918 siop_cmd->saved_offset = SIOP_NOOFFSET;
919 siop_table_sync(siop_cmd,
920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
921 CALL_SCRIPT(Ent_script_sched);
922 return 1;
923 case A_int_saveoffset:
924 INCSTAT(siop_stat_intr_saveoffset);
925 offset = bus_space_read_1(sc->sc_c.sc_rt,
926 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
927 #ifdef SIOP_DEBUG_DR
928 printf("saveoffset offset %d\n", offset);
929 #endif
930 siop_cmd->saved_offset = offset;
931 CALL_SCRIPT(Ent_script_sched);
932 return 1;
933 case A_int_resfail:
934 printf("reselect failed\n");
935 CALL_SCRIPT(Ent_script_sched);
936 return 1;
937 case A_int_done:
938 if (xs == NULL) {
939 printf("%s: done without command, DSA=0x%lx\n",
940 device_xname(sc->sc_c.sc_dev),
941 (u_long)siop_cmd->cmd_c.dsa);
942 siop_cmd->cmd_c.status = CMDST_FREE;
943 CALL_SCRIPT(Ent_script_sched);
944 return 1;
945 }
946 #ifdef SIOP_DEBUG_INTR
947 printf("done, DSA=0x%lx target id 0x%x last msg "
948 "in=0x%x status=0x%x\n",
949 (u_long)siop_cmd->cmd_c.dsa,
950 siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id),
951 siop_cmd->cmd_tables->msg_in[0],
952 siop_ctoh32(&sc->sc_c,
953 siop_cmd->cmd_tables->status));
954 #endif
955 INCSTAT(siop_stat_intr_done);
956 /* update resid. */
957 offset = bus_space_read_1(sc->sc_c.sc_rt,
958 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
959 /*
960 * if we got a disconnect between the last data phase
961 * and the status phase, offset will be 0. In this
962 * case, siop_cmd->saved_offset will have the proper
963 * value if it got updated by the controller
964 */
965 if (offset == 0 &&
966 siop_cmd->saved_offset != SIOP_NOOFFSET)
967 offset = siop_cmd->saved_offset;
968 siop_update_resid(&siop_cmd->cmd_c, offset);
969 siop_cmd->cmd_c.status = CMDST_DONE;
970 goto end;
971 default:
972 printf("unknown irqcode %x\n", irqcode);
973 if (xs) {
974 xs->error = XS_SELTIMEOUT;
975 goto end;
976 }
977 goto reset;
978 }
979 return 1;
980 }
981 /* We just should't get there */
982 panic("siop_intr: I shouldn't be there !");
983
984 end:
985 /*
986 * restart the script now if command completed properly
987 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
988 * queue
989 */
990 xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status);
991 if (xs->status == SCSI_OK)
992 CALL_SCRIPT(Ent_script_sched);
993 else
994 restart = 1;
995 siop_lun->siop_tag[tag].active = NULL;
996 siop_scsicmd_end(siop_cmd);
997 if (freetarget && siop_target->target_c.status == TARST_PROBING)
998 siop_del_dev(sc, target, lun);
999 if (restart)
1000 CALL_SCRIPT(Ent_script_sched);
1001 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1002 /* a command terminated, so we have free slots now */
1003 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1004 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1005 }
1006
1007 return 1;
1008 }
1009
1010 void
1011 siop_scsicmd_end(struct siop_cmd *siop_cmd)
1012 {
1013 struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1014 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1015
1016 switch(xs->status) {
1017 case SCSI_OK:
1018 xs->error = XS_NOERROR;
1019 break;
1020 case SCSI_BUSY:
1021 xs->error = XS_BUSY;
1022 break;
1023 case SCSI_CHECK:
1024 xs->error = XS_BUSY;
1025 /* remove commands in the queue and scheduler */
1026 siop_unqueue(sc, xs->xs_periph->periph_target,
1027 xs->xs_periph->periph_lun);
1028 break;
1029 case SCSI_QUEUE_FULL:
1030 INCSTAT(siop_stat_intr_qfull);
1031 #ifdef SIOP_DEBUG
1032 printf("%s:%d:%d: queue full (tag %d)\n",
1033 device_xname(sc->sc_c.sc_dev),
1034 xs->xs_periph->periph_target,
1035 xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1036 #endif
1037 xs->error = XS_BUSY;
1038 break;
1039 case SCSI_SIOP_NOCHECK:
1040 /*
1041 * don't check status, xs->error is already valid
1042 */
1043 break;
1044 case SCSI_SIOP_NOSTATUS:
1045 /*
1046 * the status byte was not updated, cmd was
1047 * aborted
1048 */
1049 xs->error = XS_SELTIMEOUT;
1050 break;
1051 default:
1052 scsipi_printaddr(xs->xs_periph);
1053 printf("invalid status code %d\n", xs->status);
1054 xs->error = XS_DRIVER_STUFFUP;
1055 }
1056 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1057 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data,
1058 0, siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1059 (xs->xs_control & XS_CTL_DATA_IN) ?
1060 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1061 bus_dmamap_unload(sc->sc_c.sc_dmat,
1062 siop_cmd->cmd_c.dmamap_data);
1063 }
1064 bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1065 if ((xs->xs_control & XS_CTL_POLL) == 0)
1066 callout_stop(&xs->xs_callout);
1067 siop_cmd->cmd_c.status = CMDST_FREE;
1068 TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1069 #if 0
1070 if (xs->resid != 0)
1071 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1072 #endif
1073 scsipi_done(xs);
1074 }
1075
1076 void
1077 siop_unqueue(struct siop_softc *sc, int target, int lun)
1078 {
1079 int slot, tag;
1080 struct siop_cmd *siop_cmd;
1081 struct siop_lun *siop_lun =
1082 ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1083
1084 /* first make sure to read valid data */
1085 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1086
1087 for (tag = 1; tag < SIOP_NTAG; tag++) {
1088 /* look for commands in the scheduler, not yet started */
1089 if (siop_lun->siop_tag[tag].active == NULL)
1090 continue;
1091 siop_cmd = siop_lun->siop_tag[tag].active;
1092 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1093 if (siop_script_read(sc,
1094 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1095 siop_cmd->cmd_c.dsa +
1096 sizeof(struct siop_common_xfer) +
1097 Ent_ldsa_select)
1098 break;
1099 }
1100 if (slot > sc->sc_currschedslot)
1101 continue; /* didn't find it */
1102 if (siop_script_read(sc,
1103 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1104 continue; /* already started */
1105 /* clear the slot */
1106 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1107 0x80000000);
1108 /* ask to requeue */
1109 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1110 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1111 siop_lun->siop_tag[tag].active = NULL;
1112 siop_scsicmd_end(siop_cmd);
1113 }
1114 /* update sc_currschedslot */
1115 sc->sc_currschedslot = 0;
1116 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1117 if (siop_script_read(sc,
1118 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1119 sc->sc_currschedslot = slot;
1120 }
1121 }
1122
1123 /*
1124 * handle a rejected queue tag message: the command will run untagged,
1125 * has to adjust the reselect script.
1126 */
1127 int
1128 siop_handle_qtag_reject(struct siop_cmd *siop_cmd)
1129 {
1130 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1131 int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1132 int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1133 int tag = siop_cmd->cmd_tables->msg_out[2];
1134 struct siop_lun *siop_lun =
1135 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1136
1137 #ifdef SIOP_DEBUG
1138 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1139 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1140 siop_cmd->cmd_c.tag,
1141 siop_cmd->cmd_c.status);
1142 #endif
1143
1144 if (siop_lun->siop_tag[0].active != NULL) {
1145 printf("%s: untagged command already running for target %d "
1146 "lun %d (status %d)\n", device_xname(sc->sc_c.sc_dev),
1147 target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1148 return -1;
1149 }
1150 /* clear tag slot */
1151 siop_lun->siop_tag[tag].active = NULL;
1152 /* add command to non-tagged slot */
1153 siop_lun->siop_tag[0].active = siop_cmd;
1154 siop_cmd->cmd_c.tag = 0;
1155 /* adjust reselect script if there is one */
1156 if (siop_lun->siop_tag[0].reseloff > 0) {
1157 siop_script_write(sc,
1158 siop_lun->siop_tag[0].reseloff + 1,
1159 siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1160 Ent_ldsa_reload_dsa);
1161 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1162 }
1163 return 0;
1164 }
1165
1166 /*
1167 * handle a bus reset: reset chip, unqueue all active commands, free all
1168 * target struct and report lossage to upper layer.
1169 * As the upper layer may requeue immediatly we have to first store
1170 * all active commands in a temporary queue.
1171 */
1172 void
1173 siop_handle_reset(struct siop_softc *sc)
1174 {
1175 struct siop_cmd *siop_cmd;
1176 struct siop_lun *siop_lun;
1177 int target, lun, tag;
1178
1179 /*
1180 * scsi bus reset. reset the chip and restart
1181 * the queue. Need to clean up all active commands
1182 */
1183 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1184 /* stop, reset and restart the chip */
1185 siop_reset(sc);
1186 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1187 /* chip has been reset, all slots are free now */
1188 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1189 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1190 }
1191 /*
1192 * Process all commands: first commands being executed
1193 */
1194 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1195 target++) {
1196 if (sc->sc_c.targets[target] == NULL)
1197 continue;
1198 for (lun = 0; lun < 8; lun++) {
1199 struct siop_target *siop_target =
1200 (struct siop_target *)sc->sc_c.targets[target];
1201 siop_lun = siop_target->siop_lun[lun];
1202 if (siop_lun == NULL)
1203 continue;
1204 for (tag = 0; tag <
1205 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1206 SIOP_NTAG : 1);
1207 tag++) {
1208 siop_cmd = siop_lun->siop_tag[tag].active;
1209 if (siop_cmd == NULL)
1210 continue;
1211 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1212 printf("command with tag id %d reset\n", tag);
1213 siop_cmd->cmd_c.xs->error =
1214 (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1215 XS_TIMEOUT : XS_RESET;
1216 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1217 siop_lun->siop_tag[tag].active = NULL;
1218 siop_cmd->cmd_c.status = CMDST_DONE;
1219 siop_scsicmd_end(siop_cmd);
1220 }
1221 }
1222 sc->sc_c.targets[target]->status = TARST_ASYNC;
1223 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1224 sc->sc_c.targets[target]->period =
1225 sc->sc_c.targets[target]->offset = 0;
1226 siop_update_xfer_mode(&sc->sc_c, target);
1227 }
1228
1229 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1230 }
1231
1232 void
1233 siop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1234 void *arg)
1235 {
1236 struct scsipi_xfer *xs;
1237 struct scsipi_periph *periph;
1238 struct siop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1239 struct siop_cmd *siop_cmd;
1240 struct siop_target *siop_target;
1241 int s, error, i;
1242 int target;
1243 int lun;
1244
1245 switch (req) {
1246 case ADAPTER_REQ_RUN_XFER:
1247 xs = arg;
1248 periph = xs->xs_periph;
1249 target = periph->periph_target;
1250 lun = periph->periph_lun;
1251
1252 s = splbio();
1253 #ifdef SIOP_DEBUG_SCHED
1254 printf("starting cmd for %d:%d\n", target, lun);
1255 #endif
1256 siop_cmd = TAILQ_FIRST(&sc->free_list);
1257 if (siop_cmd == NULL) {
1258 xs->error = XS_RESOURCE_SHORTAGE;
1259 scsipi_done(xs);
1260 splx(s);
1261 return;
1262 }
1263 TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1264 #ifdef DIAGNOSTIC
1265 if (siop_cmd->cmd_c.status != CMDST_FREE)
1266 panic("siop_scsicmd: new cmd not free");
1267 #endif
1268 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1269 if (siop_target == NULL) {
1270 #ifdef SIOP_DEBUG
1271 printf("%s: alloc siop_target for target %d\n",
1272 device_xname(sc->sc_c.sc_dev), target);
1273 #endif
1274 sc->sc_c.targets[target] =
1275 malloc(sizeof(struct siop_target),
1276 M_DEVBUF, M_NOWAIT|M_ZERO);
1277 if (sc->sc_c.targets[target] == NULL) {
1278 aprint_error_dev(sc->sc_c.sc_dev,
1279 "can't malloc memory for "
1280 "target %d\n", target);
1281 xs->error = XS_RESOURCE_SHORTAGE;
1282 scsipi_done(xs);
1283 splx(s);
1284 return;
1285 }
1286 siop_target =
1287 (struct siop_target *)sc->sc_c.targets[target];
1288 siop_target->target_c.status = TARST_PROBING;
1289 siop_target->target_c.flags = 0;
1290 siop_target->target_c.id =
1291 sc->sc_c.clock_div << 24; /* scntl3 */
1292 siop_target->target_c.id |= target << 16; /* id */
1293 /* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1294
1295 /* get a lun switch script */
1296 siop_target->lunsw = siop_get_lunsw(sc);
1297 if (siop_target->lunsw == NULL) {
1298 aprint_error_dev(sc->sc_c.sc_dev,
1299 "can't alloc lunsw for target %d\n",
1300 target);
1301 xs->error = XS_RESOURCE_SHORTAGE;
1302 scsipi_done(xs);
1303 splx(s);
1304 return;
1305 }
1306 for (i=0; i < 8; i++)
1307 siop_target->siop_lun[i] = NULL;
1308 siop_add_reselsw(sc, target);
1309 }
1310 if (siop_target->siop_lun[lun] == NULL) {
1311 siop_target->siop_lun[lun] =
1312 malloc(sizeof(struct siop_lun), M_DEVBUF,
1313 M_NOWAIT|M_ZERO);
1314 if (siop_target->siop_lun[lun] == NULL) {
1315 aprint_error_dev(sc->sc_c.sc_dev,
1316 "can't alloc siop_lun for "
1317 "target %d lun %d\n",
1318 target, lun);
1319 xs->error = XS_RESOURCE_SHORTAGE;
1320 scsipi_done(xs);
1321 splx(s);
1322 return;
1323 }
1324 }
1325 siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1326 siop_cmd->cmd_c.xs = xs;
1327 siop_cmd->cmd_c.flags = 0;
1328 siop_cmd->cmd_c.status = CMDST_READY;
1329
1330 /* load the DMA maps */
1331 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1332 siop_cmd->cmd_c.dmamap_cmd,
1333 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1334 if (error) {
1335 aprint_error_dev(sc->sc_c.sc_dev,
1336 "unable to load cmd DMA map: %d\n",
1337 error);
1338 xs->error = XS_DRIVER_STUFFUP;
1339 scsipi_done(xs);
1340 splx(s);
1341 return;
1342 }
1343 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1344 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1345 siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1346 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1347 ((xs->xs_control & XS_CTL_DATA_IN) ?
1348 BUS_DMA_READ : BUS_DMA_WRITE));
1349 if (error) {
1350 aprint_error_dev(sc->sc_c.sc_dev,
1351 "unable to load cmd DMA map: %d",
1352 error);
1353 xs->error = XS_DRIVER_STUFFUP;
1354 scsipi_done(xs);
1355 bus_dmamap_unload(sc->sc_c.sc_dmat,
1356 siop_cmd->cmd_c.dmamap_cmd);
1357 splx(s);
1358 return;
1359 }
1360 bus_dmamap_sync(sc->sc_c.sc_dmat,
1361 siop_cmd->cmd_c.dmamap_data, 0,
1362 siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1363 (xs->xs_control & XS_CTL_DATA_IN) ?
1364 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1365 }
1366 bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1367 siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1368 BUS_DMASYNC_PREWRITE);
1369
1370 if (xs->xs_tag_type) {
1371 /* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1372 siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1373 } else {
1374 siop_cmd->cmd_c.tag = 0;
1375 }
1376 siop_setuptables(&siop_cmd->cmd_c);
1377 siop_cmd->saved_offset = SIOP_NOOFFSET;
1378 siop_table_sync(siop_cmd,
1379 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380 siop_start(sc, siop_cmd);
1381 if (xs->xs_control & XS_CTL_POLL) {
1382 /* poll for command completion */
1383 while ((xs->xs_status & XS_STS_DONE) == 0) {
1384 delay(1000);
1385 siop_intr(sc);
1386 }
1387 }
1388 splx(s);
1389 return;
1390
1391 case ADAPTER_REQ_GROW_RESOURCES:
1392 #ifdef SIOP_DEBUG
1393 printf("%s grow resources (%d)\n",
1394 device_xname(sc->sc_c.sc_dev),
1395 sc->sc_c.sc_adapt.adapt_openings);
1396 #endif
1397 siop_morecbd(sc);
1398 return;
1399
1400 case ADAPTER_REQ_SET_XFER_MODE:
1401 {
1402 struct scsipi_xfer_mode *xm = arg;
1403 if (sc->sc_c.targets[xm->xm_target] == NULL)
1404 return;
1405 s = splbio();
1406 if (xm->xm_mode & PERIPH_CAP_TQING)
1407 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1408 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1409 (sc->sc_c.features & SF_BUS_WIDE))
1410 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1411 if (xm->xm_mode & PERIPH_CAP_SYNC)
1412 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1413 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1414 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1415 sc->sc_c.targets[xm->xm_target]->status =
1416 TARST_ASYNC;
1417
1418 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1419 if (scsipi_lookup_periph(chan,
1420 xm->xm_target, lun) != NULL) {
1421 /* allocate a lun sw entry for this device */
1422 siop_add_dev(sc, xm->xm_target, lun);
1423 }
1424 }
1425
1426 splx(s);
1427 }
1428 }
1429 }
1430
1431 static void
1432 siop_start(struct siop_softc *sc, struct siop_cmd *siop_cmd)
1433 {
1434 struct siop_lun *siop_lun;
1435 struct siop_xfer *siop_xfer;
1436 uint32_t dsa;
1437 int timeout;
1438 int target, lun, slot;
1439
1440 /*
1441 * first make sure to read valid data
1442 */
1443 siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1444
1445 /*
1446 * The queue management here is a bit tricky: the script always looks
1447 * at the slot from first to last, so if we always use the first
1448 * free slot commands can stay at the tail of the queue ~forever.
1449 * The algorithm used here is to restart from the head when we know
1450 * that the queue is empty, and only add commands after the last one.
1451 * When we're at the end of the queue wait for the script to clear it.
1452 * The best thing to do here would be to implement a circular queue,
1453 * but using only 53c720 features this can be "interesting".
1454 * A mid-way solution could be to implement 2 queues and swap orders.
1455 */
1456 slot = sc->sc_currschedslot;
1457 /*
1458 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1459 * free. As this is the last used slot, all previous slots are free,
1460 * we can restart from 0.
1461 */
1462 if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1463 0x80000000) {
1464 slot = sc->sc_currschedslot = 0;
1465 } else {
1466 slot++;
1467 }
1468 target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1469 lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1470 siop_lun =
1471 ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1472 /* if non-tagged command active, panic: this shouldn't happen */
1473 if (siop_lun->siop_tag[0].active != NULL) {
1474 panic("siop_start: tagged cmd while untagged running");
1475 }
1476 #ifdef DIAGNOSTIC
1477 /* sanity check the tag if needed */
1478 if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1479 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1480 panic("siop_start: tag not free");
1481 if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1482 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1483 printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1484 panic("siop_start: invalid tag id");
1485 }
1486 }
1487 #endif
1488 /*
1489 * find a free scheduler slot and load it.
1490 */
1491 for (; slot < SIOP_NSLOTS; slot++) {
1492 /*
1493 * If cmd if 0x80000000 the slot is free
1494 */
1495 if (siop_script_read(sc,
1496 (Ent_script_sched_slot0 / 4) + slot * 2) ==
1497 0x80000000)
1498 break;
1499 }
1500 if (slot == SIOP_NSLOTS) {
1501 /*
1502 * no more free slot, no need to continue. freeze the queue
1503 * and requeue this command.
1504 */
1505 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1506 sc->sc_flags |= SCF_CHAN_NOSLOT;
1507 siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1508 siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1509 siop_scsicmd_end(siop_cmd);
1510 return;
1511 }
1512 #ifdef SIOP_DEBUG_SCHED
1513 printf("using slot %d for DSA 0x%lx\n", slot,
1514 (u_long)siop_cmd->cmd_c.dsa);
1515 #endif
1516 /* mark command as active */
1517 if (siop_cmd->cmd_c.status == CMDST_READY)
1518 siop_cmd->cmd_c.status = CMDST_ACTIVE;
1519 else
1520 panic("siop_start: bad status");
1521 siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1522 /* patch scripts with DSA addr */
1523 dsa = siop_cmd->cmd_c.dsa;
1524 /* first reselect switch, if we have an entry */
1525 if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1526 siop_script_write(sc,
1527 siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1528 dsa + sizeof(struct siop_common_xfer) +
1529 Ent_ldsa_reload_dsa);
1530 /* CMD script: MOVE MEMORY addr */
1531 siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1532 siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1533 siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr +
1534 Ent_script_sched_slot0 + slot * 8);
1535 siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1536 /* scheduler slot: JUMP ldsa_select */
1537 siop_script_write(sc,
1538 (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1539 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1540 /* handle timeout */
1541 if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1542 /* start exire timer */
1543 timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1544 if (timeout == 0)
1545 timeout = 1;
1546 callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1547 timeout, siop_timeout, siop_cmd);
1548 }
1549 /*
1550 * Change JUMP cmd so that this slot will be handled
1551 */
1552 siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1553 0x80080000);
1554 sc->sc_currschedslot = slot;
1555
1556 /* make sure SCRIPT processor will read valid data */
1557 siop_script_sync(sc,BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1558 /* Signal script it has some work to do */
1559 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1560 SIOP_ISTAT, ISTAT_SIGP);
1561 /* and wait for IRQ */
1562 }
1563
1564 void
1565 siop_timeout(void *v)
1566 {
1567 struct siop_cmd *siop_cmd = v;
1568 struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1569 int s;
1570
1571 scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1572 printf("command timeout, CDB: ");
1573 scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1574 printf("\n");
1575
1576 s = splbio();
1577 /* reset the scsi bus */
1578 siop_resetbus(&sc->sc_c);
1579
1580 /* deactivate callout */
1581 callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1582 /* mark command as being timed out; siop_intr will handle it */
1583 /*
1584 * mark command has being timed out and just return;
1585 * the bus reset will generate an interrupt,
1586 * it will be handled in siop_intr()
1587 */
1588 siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1589 splx(s);
1590 }
1591
1592 void
1593 siop_dump_script(struct siop_softc *sc)
1594 {
1595 int i;
1596
1597 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1598 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1599 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i]),
1600 siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i + 1]));
1601 if ((siop_ctoh32(&sc->sc_c,
1602 sc->sc_c.sc_script[i]) & 0xe0000000) == 0xc0000000) {
1603 i++;
1604 printf(" 0x%08x", siop_ctoh32(&sc->sc_c,
1605 sc->sc_c.sc_script[i + 1]));
1606 }
1607 printf("\n");
1608 }
1609 }
1610
1611 void
1612 siop_morecbd(struct siop_softc *sc)
1613 {
1614 int error, off, i, j, s;
1615 bus_dma_segment_t seg;
1616 int rseg;
1617 struct siop_cbd *newcbd;
1618 struct siop_xfer *xfer;
1619 bus_addr_t dsa;
1620 uint32_t *scr;
1621
1622 /* allocate a new list head */
1623 newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1624 if (newcbd == NULL) {
1625 aprint_error_dev(sc->sc_c.sc_dev,
1626 "can't allocate memory for command descriptors head\n");
1627 return;
1628 }
1629
1630 /* allocate cmd list */
1631 newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1632 M_DEVBUF, M_NOWAIT|M_ZERO);
1633 if (newcbd->cmds == NULL) {
1634 aprint_error_dev(sc->sc_c.sc_dev,
1635 "can't allocate memory for command descriptors\n");
1636 goto bad3;
1637 }
1638 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE,
1639 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1640 if (error) {
1641 aprint_error_dev(sc->sc_c.sc_dev,
1642 "unable to allocate cbd DMA memory, error = %d\n",
1643 error);
1644 goto bad2;
1645 }
1646 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1647 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1648 if (error) {
1649 aprint_error_dev(sc->sc_c.sc_dev,
1650 "unable to map cbd DMA memory, error = %d\n",
1651 error);
1652 goto bad2;
1653 }
1654 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1655 BUS_DMA_NOWAIT, &newcbd->xferdma);
1656 if (error) {
1657 aprint_error_dev(sc->sc_c.sc_dev,
1658 "unable to create cbd DMA map, error = %d\n",
1659 error);
1660 goto bad1;
1661 }
1662 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1663 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1664 if (error) {
1665 aprint_error_dev(sc->sc_c.sc_dev,
1666 "unable to load cbd DMA map, error = %d\n",
1667 error);
1668 goto bad0;
1669 }
1670 #ifdef DEBUG
1671 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1672 device_xname(sc->sc_c.sc_dev),
1673 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1674 #endif
1675 off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0;
1676 for (i = 0; i < SIOP_NCMDPB; i++) {
1677 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1678 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1679 &newcbd->cmds[i].cmd_c.dmamap_data);
1680 if (error) {
1681 aprint_error_dev(sc->sc_c.sc_dev,
1682 "unable to create data DMA map for cbd: "
1683 "error %d\n", error);
1684 goto bad0;
1685 }
1686 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1687 sizeof(struct scsipi_generic), 1,
1688 sizeof(struct scsipi_generic), 0,
1689 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1690 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1691 if (error) {
1692 aprint_error_dev(sc->sc_c.sc_dev,
1693 "unable to create cmd DMA map for cbd %d\n", error);
1694 goto bad0;
1695 }
1696 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1697 newcbd->cmds[i].siop_cbdp = newcbd;
1698 xfer = &newcbd->xfers[i];
1699 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1700 memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1701 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1702 i * sizeof(struct siop_xfer);
1703 newcbd->cmds[i].cmd_c.dsa = dsa;
1704 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1705 xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1);
1706 xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa);
1707 xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1);
1708 xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c,
1709 dsa + offsetof(struct siop_common_xfer, msg_in));
1710 xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2);
1711 xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c,
1712 dsa + offsetof(struct siop_common_xfer, msg_in) + 1);
1713 xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c,
1714 dsa + offsetof(struct siop_common_xfer, msg_in) + 3);
1715 xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1);
1716 xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c,
1717 dsa + offsetof(struct siop_common_xfer, status) + off);
1718 /* The select/reselect script */
1719 scr = &xfer->resel[0];
1720 for (j = 0; j < __arraycount(load_dsa); j++)
1721 scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]);
1722 /*
1723 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1724 * octet, reg offset is the third.
1725 */
1726 scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c,
1727 0x78100000 | ((dsa & 0x000000ff) << 8));
1728 scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c,
1729 0x78110000 | ( dsa & 0x0000ff00 ));
1730 scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c,
1731 0x78120000 | ((dsa & 0x00ff0000) >> 8));
1732 scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c,
1733 0x78130000 | ((dsa & 0xff000000) >> 16));
1734 scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c,
1735 sc->sc_c.sc_scriptaddr + Ent_reselected);
1736 scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c,
1737 sc->sc_c.sc_scriptaddr + Ent_reselect);
1738 scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c,
1739 sc->sc_c.sc_scriptaddr + Ent_selected);
1740 scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c,
1741 dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data);
1742 /* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1743 scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000);
1744 s = splbio();
1745 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1746 splx(s);
1747 #ifdef SIOP_DEBUG
1748 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1749 siop_ctoh32(&sc->sc_c,
1750 newcbd->cmds[i].cmd_tables->t_msgin.addr),
1751 siop_ctoh32(&sc->sc_c,
1752 newcbd->cmds[i].cmd_tables->t_msgout.addr),
1753 siop_ctoh32(&sc->sc_c,
1754 newcbd->cmds[i].cmd_tables->t_status.addr));
1755 #endif
1756 }
1757 s = splbio();
1758 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1759 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1760 splx(s);
1761 return;
1762 bad0:
1763 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1764 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1765 bad1:
1766 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1767 bad2:
1768 free(newcbd->cmds, M_DEVBUF);
1769 bad3:
1770 free(newcbd, M_DEVBUF);
1771 }
1772
1773 struct siop_lunsw *
1774 siop_get_lunsw(struct siop_softc *sc)
1775 {
1776 struct siop_lunsw *lunsw;
1777 int i;
1778
1779 if (sc->script_free_lo + __arraycount(lun_switch) >= sc->script_free_hi)
1780 return NULL;
1781 lunsw = TAILQ_FIRST(&sc->lunsw_list);
1782 if (lunsw != NULL) {
1783 #ifdef SIOP_DEBUG
1784 printf("siop_get_lunsw got lunsw at offset %d\n",
1785 lunsw->lunsw_off);
1786 #endif
1787 TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1788 return lunsw;
1789 }
1790 lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1791 if (lunsw == NULL)
1792 return NULL;
1793 #ifdef SIOP_DEBUG
1794 printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1795 #endif
1796 if (sc->sc_c.features & SF_CHIP_RAM) {
1797 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1798 sc->script_free_lo * 4, lun_switch,
1799 __arraycount(lun_switch));
1800 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1801 (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1802 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1803 } else {
1804 for (i = 0; i < __arraycount(lun_switch); i++)
1805 sc->sc_c.sc_script[sc->script_free_lo + i] =
1806 siop_htoc32(&sc->sc_c, lun_switch[i]);
1807 sc->sc_c.sc_script[
1808 sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1809 siop_htoc32(&sc->sc_c,
1810 sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1811 }
1812 lunsw->lunsw_off = sc->script_free_lo;
1813 lunsw->lunsw_size = __arraycount(lun_switch);
1814 sc->script_free_lo += lunsw->lunsw_size;
1815 siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1816 return lunsw;
1817 }
1818
1819 void
1820 siop_add_reselsw(struct siop_softc *sc, int target)
1821 {
1822 int i, j;
1823 struct siop_target *siop_target;
1824 struct siop_lun *siop_lun;
1825
1826 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1827 /*
1828 * add an entry to resel switch
1829 */
1830 siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1831 for (i = 0; i < 15; i++) {
1832 siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1833 if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1834 == 0xff) { /* it's free */
1835 #ifdef SIOP_DEBUG
1836 printf("siop: target %d slot %d offset %d\n",
1837 target, i, siop_target->reseloff);
1838 #endif
1839 /* JUMP abs_foo, IF target | 0x80; */
1840 siop_script_write(sc, siop_target->reseloff,
1841 0x800c0080 | target);
1842 siop_script_write(sc, siop_target->reseloff + 1,
1843 sc->sc_c.sc_scriptaddr +
1844 siop_target->lunsw->lunsw_off * 4 +
1845 Ent_lun_switch_entry);
1846 break;
1847 }
1848 }
1849 if (i == 15) /* no free slot, shouldn't happen */
1850 panic("siop: resel switch full");
1851
1852 sc->sc_ntargets++;
1853 for (i = 0; i < 8; i++) {
1854 siop_lun = siop_target->siop_lun[i];
1855 if (siop_lun == NULL)
1856 continue;
1857 if (siop_lun->reseloff > 0) {
1858 siop_lun->reseloff = 0;
1859 for (j = 0; j < SIOP_NTAG; j++)
1860 siop_lun->siop_tag[j].reseloff = 0;
1861 siop_add_dev(sc, target, i);
1862 }
1863 }
1864 siop_update_scntl3(sc, sc->sc_c.targets[target]);
1865 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1866 }
1867
1868 void
1869 siop_update_scntl3(struct siop_softc *sc,
1870 struct siop_common_target *_siop_target)
1871 {
1872 struct siop_target *siop_target = (struct siop_target *)_siop_target;
1873
1874 /* MOVE target->id >> 24 TO SCNTL3 */
1875 siop_script_write(sc,
1876 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1877 0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1878 /* MOVE target->id >> 8 TO SXFER */
1879 siop_script_write(sc,
1880 siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1881 0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1882 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1883 }
1884
1885 void
1886 siop_add_dev(struct siop_softc *sc, int target, int lun)
1887 {
1888 struct siop_lunsw *lunsw;
1889 struct siop_target *siop_target =
1890 (struct siop_target *)sc->sc_c.targets[target];
1891 struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1892 int i, ntargets;
1893
1894 if (siop_lun->reseloff > 0)
1895 return;
1896 lunsw = siop_target->lunsw;
1897 if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1898 /*
1899 * can't extend this slot. Probably not worth trying to deal
1900 * with this case
1901 */
1902 #ifdef DEBUG
1903 aprint_error_dev(sc->sc_c.sc_dev,
1904 "%d:%d: can't allocate a lun sw slot\n", target, lun);
1905 #endif
1906 return;
1907 }
1908 /* count how many free targets we still have to probe */
1909 ntargets = sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1910
1911 /*
1912 * we need 8 bytes for the lun sw additional entry, and
1913 * eventually sizeof(tag_switch) for the tag switch entry.
1914 * Keep enough free space for the free targets that could be
1915 * probed later.
1916 */
1917 if (sc->script_free_lo + 2 +
1918 (ntargets * __arraycount(lun_switch)) >=
1919 ((siop_target->target_c.flags & TARF_TAG) ?
1920 sc->script_free_hi - __arraycount(tag_switch) :
1921 sc->script_free_hi)) {
1922 /*
1923 * not enough space, probably not worth dealing with it.
1924 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1925 */
1926 #ifdef DEBUG
1927 aprint_error_dev(sc->sc_c.sc_dev,
1928 "%d:%d: not enough memory for a lun sw slot\n",
1929 target, lun);
1930 #endif
1931 return;
1932 }
1933 #ifdef SIOP_DEBUG
1934 printf("%s:%d:%d: allocate lun sw entry\n",
1935 device_xname(sc->sc_c.sc_dev), target, lun);
1936 #endif
1937 /* INT int_resellun */
1938 siop_script_write(sc, sc->script_free_lo, 0x98080000);
1939 siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1940 /* Now the slot entry: JUMP abs_foo, IF lun */
1941 siop_script_write(sc, sc->script_free_lo - 2,
1942 0x800c0000 | lun);
1943 siop_script_write(sc, sc->script_free_lo - 1, 0);
1944 siop_lun->reseloff = sc->script_free_lo - 2;
1945 lunsw->lunsw_size += 2;
1946 sc->script_free_lo += 2;
1947 if (siop_target->target_c.flags & TARF_TAG) {
1948 /* we need a tag switch */
1949 sc->script_free_hi -= __arraycount(tag_switch);
1950 if (sc->sc_c.features & SF_CHIP_RAM) {
1951 bus_space_write_region_4(sc->sc_c.sc_ramt,
1952 sc->sc_c.sc_ramh,
1953 sc->script_free_hi * 4, tag_switch,
1954 __arraycount(tag_switch));
1955 } else {
1956 for(i = 0; i < __arraycount(tag_switch); i++) {
1957 sc->sc_c.sc_script[sc->script_free_hi + i] =
1958 siop_htoc32(&sc->sc_c, tag_switch[i]);
1959 }
1960 }
1961 siop_script_write(sc,
1962 siop_lun->reseloff + 1,
1963 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1964 Ent_tag_switch_entry);
1965
1966 for (i = 0; i < SIOP_NTAG; i++) {
1967 siop_lun->siop_tag[i].reseloff =
1968 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1969 }
1970 } else {
1971 /* non-tag case; just work with the lun switch */
1972 siop_lun->siop_tag[0].reseloff =
1973 siop_target->siop_lun[lun]->reseloff;
1974 }
1975 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1976 }
1977
1978 void
1979 siop_del_dev(struct siop_softc *sc, int target, int lun)
1980 {
1981 int i;
1982 struct siop_target *siop_target;
1983
1984 #ifdef SIOP_DEBUG
1985 printf("%s:%d:%d: free lun sw entry\n",
1986 device_xname(sc->sc_c.sc_dev), target, lun);
1987 #endif
1988 if (sc->sc_c.targets[target] == NULL)
1989 return;
1990 siop_target = (struct siop_target *)sc->sc_c.targets[target];
1991 free(siop_target->siop_lun[lun], M_DEVBUF);
1992 siop_target->siop_lun[lun] = NULL;
1993 /* XXX compact sw entry too ? */
1994 /* check if we can free the whole target */
1995 for (i = 0; i < 8; i++) {
1996 if (siop_target->siop_lun[i] != NULL)
1997 return;
1998 }
1999 #ifdef SIOP_DEBUG
2000 printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
2001 device_xname(sc->sc_c.sc_dev), target, lun,
2002 siop_target->lunsw->lunsw_off);
2003 #endif
2004 /*
2005 * nothing here, free the target struct and resel
2006 * switch entry
2007 */
2008 siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
2009 siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
2010 TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
2011 free(sc->sc_c.targets[target], M_DEVBUF);
2012 sc->sc_c.targets[target] = NULL;
2013 sc->sc_ntargets--;
2014 }
2015
2016 #ifdef SIOP_STATS
2017 void
2018 siop_printstats(void)
2019 {
2020
2021 printf("siop_stat_intr %d\n", siop_stat_intr);
2022 printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
2023 printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
2024 printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
2025 printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
2026 printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
2027 printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
2028 printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2029 }
2030 #endif
2031