esiop.c revision 1.10 1 /* $NetBSD: esiop.c,v 1.10 2002/04/25 20:05:10 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.10 2002/04/25 20:05:10 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
98
99 void esiop_update_scntl3 __P((struct esiop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
117 static __inline__ void
118 esiop_script_sync(sc, ops)
119 struct esiop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
128 static __inline__ u_int32_t
129 esiop_script_read(sc, offset)
130 struct esiop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 esiop_script_write(sc, offset, val)
145 struct esiop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 esiop_attach(sc)
159 struct esiop_softc *sc;
160 {
161 if (siop_common_attach(&sc->sc_c) != 0 )
162 return;
163
164 TAILQ_INIT(&sc->free_list);
165 TAILQ_INIT(&sc->cmds);
166 TAILQ_INIT(&sc->free_tagtbl);
167 TAILQ_INIT(&sc->tag_tblblk);
168 sc->sc_currschedslot = 0;
169 #ifdef SIOP_DEBUG
170 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
171 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
172 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
173 #endif
174
175 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
176 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
177
178 /* Do a bus reset, so that devices fall back to narrow/async */
179 siop_resetbus(&sc->sc_c);
180 /*
181 * siop_reset() will reset the chip, thus clearing pending interrupts
182 */
183 esiop_reset(sc);
184 #ifdef DUMP_SCRIPT
185 esiop_dump_script(sc);
186 #endif
187
188 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
189 }
190
191 void
192 esiop_reset(sc)
193 struct esiop_softc *sc;
194 {
195 int i, j;
196 u_int32_t addr;
197 u_int32_t msgin_addr;
198
199 siop_common_reset(&sc->sc_c);
200
201 /*
202 * we copy the script at the beggining of RAM. Then there is 8 bytes
203 * for messages in.
204 */
205 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
206 msgin_addr =
207 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
208 sc->sc_free_offset += 2;
209 /* then we have the scheduler ring */
210 sc->sc_shedoffset = sc->sc_free_offset;
211 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
212 /* then the targets DSA table */
213 sc->sc_target_table_offset = sc->sc_free_offset;
214 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
215 /* copy and patch the script */
216 if (sc->sc_c.features & SF_CHIP_RAM) {
217 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
218 esiop_script,
219 sizeof(esiop_script) / sizeof(esiop_script[0]));
220 for (j = 0; j <
221 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
222 j++) {
223 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
224 E_tlq_offset_Used[j] * 4,
225 sizeof(struct siop_common_xfer));
226 }
227 for (j = 0; j <
228 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
229 j++) {
230 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
231 E_abs_msgin2_Used[j] * 4, msgin_addr);
232 }
233
234 if (sc->sc_c.features & SF_CHIP_LED0) {
235 bus_space_write_region_4(sc->sc_c.sc_ramt,
236 sc->sc_c.sc_ramh,
237 Ent_led_on1, esiop_led_on,
238 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
239 bus_space_write_region_4(sc->sc_c.sc_ramt,
240 sc->sc_c.sc_ramh,
241 Ent_led_on2, esiop_led_on,
242 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
243 bus_space_write_region_4(sc->sc_c.sc_ramt,
244 sc->sc_c.sc_ramh,
245 Ent_led_off, esiop_led_off,
246 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
247 }
248 } else {
249 for (j = 0;
250 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
251 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
252 }
253 for (j = 0; j <
254 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
255 j++) {
256 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
257 htole32(sizeof(struct siop_common_xfer));
258 }
259 for (j = 0; j <
260 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
261 j++) {
262 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
263 htole32(msgin_addr);
264 }
265
266 if (sc->sc_c.features & SF_CHIP_LED0) {
267 for (j = 0; j < (sizeof(esiop_led_on) /
268 sizeof(esiop_led_on[0])); j++)
269 sc->sc_c.sc_script[
270 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
271 ] = htole32(esiop_led_on[j]);
272 for (j = 0; j < (sizeof(esiop_led_on) /
273 sizeof(esiop_led_on[0])); j++)
274 sc->sc_c.sc_script[
275 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
276 ] = htole32(esiop_led_on[j]);
277 for (j = 0; j < (sizeof(esiop_led_off) /
278 sizeof(esiop_led_off[0])); j++)
279 sc->sc_c.sc_script[
280 Ent_led_off / sizeof(esiop_led_off[0]) + j
281 ] = htole32(esiop_led_off[j]);
282 }
283 }
284 /* get base of scheduler ring */
285 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
286 /* init scheduler */
287 for (i = 0; i < A_ncmd_slots; i++) {
288 esiop_script_write(sc,
289 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
290 }
291 sc->sc_currschedslot = 0;
292 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
293 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
294 /*
295 * 0x78000000 is a 'move data8 to reg'. data8 is the second
296 * octet, reg offset is the third.
297 */
298 esiop_script_write(sc, Ent_cmdr0 / 4,
299 0x78640000 | ((addr & 0x000000ff) << 8));
300 esiop_script_write(sc, Ent_cmdr1 / 4,
301 0x78650000 | ((addr & 0x0000ff00) ));
302 esiop_script_write(sc, Ent_cmdr2 / 4,
303 0x78660000 | ((addr & 0x00ff0000) >> 8));
304 esiop_script_write(sc, Ent_cmdr3 / 4,
305 0x78670000 | ((addr & 0xff000000) >> 16));
306 /* set flags */
307 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
308 /* write pointer of base of target DSA table */
309 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
310 sc->sc_c.sc_scriptaddr;
311 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
312 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
313 ((addr & 0x000000ff) << 8));
314 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
315 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
316 ((addr & 0x0000ff00) ));
317 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
318 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
319 ((addr & 0x00ff0000) >> 8));
320 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
321 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
322 ((addr & 0xff000000) >> 16));
323 #ifdef SIOP_DEBUG
324 printf("%s: target table offset %d free offset %d\n",
325 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
326 sc->sc_free_offset);
327 #endif
328
329 /* register existing targets */
330 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
331 if (sc->sc_c.targets[i])
332 esiop_target_register(sc, i);
333 }
334 /* start script */
335 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
336 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
337 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
338 }
339 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
340 sc->sc_c.sc_scriptaddr + Ent_reselect);
341 }
342
343 #if 0
344 #define CALL_SCRIPT(ent) do {\
345 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
346 esiop_cmd->cmd_c.dsa, \
347 sc->sc_c.sc_scriptaddr + ent); \
348 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
349 } while (0)
350 #else
351 #define CALL_SCRIPT(ent) do {\
352 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
353 } while (0)
354 #endif
355
356 int
357 esiop_intr(v)
358 void *v;
359 {
360 struct esiop_softc *sc = v;
361 struct esiop_target *esiop_target;
362 struct esiop_cmd *esiop_cmd;
363 struct esiop_lun *esiop_lun;
364 struct scsipi_xfer *xs;
365 int istat, sist, sstat1, dstat;
366 u_int32_t irqcode;
367 int need_reset = 0;
368 int offset, target, lun, tag;
369 u_int32_t tflags;
370 u_int32_t addr;
371 int freetarget = 0;
372 int slot;
373 int retval = 0;
374
375 again:
376 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
377 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
378 return retval;
379 }
380 retval = 1;
381 INCSTAT(esiop_stat_intr);
382 if (istat & ISTAT_INTF) {
383 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
384 SIOP_ISTAT, ISTAT_INTF);
385 esiop_checkdone(sc);
386 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
387 /*
388 * at last one command terminated,
389 * so we should have free slots now
390 */
391 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
392 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
393 }
394 goto again;
395 }
396
397 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
398 (ISTAT_DIP | ISTAT_ABRT)) {
399 /* clear abort */
400 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
401 SIOP_ISTAT, 0);
402 }
403
404 /* get CMD from T/L/Q */
405 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
406 SIOP_SCRATCHC);
407 #ifdef SIOP_DEBUG_INTR
408 printf("interrupt, istat=0x%x tflags=0x%x "
409 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
410 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
411 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
412 SIOP_DSP) -
413 sc->sc_c.sc_scriptaddr));
414 #endif
415 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
416 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
417 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
418 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
419 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
420
421 if (target >= 0 && lun >= 0) {
422 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
423 if (esiop_target == NULL) {
424 printf("esiop_target (target %d) not valid\n", target);
425 goto none;
426 }
427 esiop_lun = esiop_target->esiop_lun[lun];
428 if (esiop_lun == NULL) {
429 printf("esiop_lun (target %d lun %d) not valid\n",
430 target, lun);
431 goto none;
432 }
433 esiop_cmd =
434 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
435 if (esiop_cmd == NULL) {
436 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
437 target, lun, tag);
438 goto none;
439 }
440 xs = esiop_cmd->cmd_c.xs;
441 #ifdef DIAGNOSTIC
442 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
443 printf("esiop_cmd (target %d lun %d) "
444 "not active (%d)\n", target, lun,
445 esiop_cmd->cmd_c.status);
446 goto none;
447 }
448 #endif
449 esiop_table_sync(esiop_cmd,
450 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
451 } else {
452 none:
453 xs = NULL;
454 esiop_target = NULL;
455 esiop_lun = NULL;
456 esiop_cmd = NULL;
457 }
458 if (istat & ISTAT_DIP) {
459 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
460 SIOP_DSTAT);
461 if (dstat & DSTAT_ABRT) {
462 /* was probably generated by a bus reset IOCTL */
463 if ((dstat & DSTAT_DFE) == 0)
464 siop_clearfifo(&sc->sc_c);
465 goto reset;
466 }
467 if (dstat & DSTAT_SSI) {
468 printf("single step dsp 0x%08x dsa 0x08%x\n",
469 (int)(bus_space_read_4(sc->sc_c.sc_rt,
470 sc->sc_c.sc_rh, SIOP_DSP) -
471 sc->sc_c.sc_scriptaddr),
472 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
473 SIOP_DSA));
474 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
475 (istat & ISTAT_SIP) == 0) {
476 bus_space_write_1(sc->sc_c.sc_rt,
477 sc->sc_c.sc_rh, SIOP_DCNTL,
478 bus_space_read_1(sc->sc_c.sc_rt,
479 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
480 }
481 return 1;
482 }
483
484 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
485 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
486 if (dstat & DSTAT_IID)
487 printf(" Illegal instruction");
488 if (dstat & DSTAT_BF)
489 printf(" bus fault");
490 if (dstat & DSTAT_MDPE)
491 printf(" parity");
492 if (dstat & DSTAT_DFE)
493 printf(" dma fifo empty");
494 else
495 siop_clearfifo(&sc->sc_c);
496 printf(", DSP=0x%x DSA=0x%x: ",
497 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
498 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
499 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
500 if (esiop_cmd)
501 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
502 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
503 le32toh(esiop_cmd->cmd_tables->status));
504 else
505 printf(" current T/L/Q invalid\n");
506 need_reset = 1;
507 }
508 }
509 if (istat & ISTAT_SIP) {
510 if (istat & ISTAT_DIP)
511 delay(10);
512 /*
513 * Can't read sist0 & sist1 independantly, or we have to
514 * insert delay
515 */
516 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
517 SIOP_SIST0);
518 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
519 SIOP_SSTAT1);
520 #ifdef SIOP_DEBUG_INTR
521 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
522 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
523 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
524 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
525 SIOP_DSP) -
526 sc->sc_c.sc_scriptaddr));
527 #endif
528 if (sist & SIST0_RST) {
529 esiop_handle_reset(sc);
530 /* no table to flush here */
531 return 1;
532 }
533 if (sist & SIST0_SGE) {
534 if (esiop_cmd)
535 scsipi_printaddr(xs->xs_periph);
536 else
537 printf("%s:", sc->sc_c.sc_dev.dv_xname);
538 printf("scsi gross error\n");
539 if (esiop_target)
540 esiop_target->target_c.flags &= ~TARF_DT;
541 goto reset;
542 }
543 if ((sist & SIST0_MA) && need_reset == 0) {
544 if (esiop_cmd) {
545 int scratchc0;
546 dstat = bus_space_read_1(sc->sc_c.sc_rt,
547 sc->sc_c.sc_rh, SIOP_DSTAT);
548 /*
549 * first restore DSA, in case we were in a S/G
550 * operation.
551 */
552 bus_space_write_4(sc->sc_c.sc_rt,
553 sc->sc_c.sc_rh,
554 SIOP_DSA, esiop_cmd->cmd_c.dsa);
555 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
556 sc->sc_c.sc_rh, SIOP_SCRATCHC);
557 switch (sstat1 & SSTAT1_PHASE_MASK) {
558 case SSTAT1_PHASE_STATUS:
559 /*
560 * previous phase may be aborted for any reason
561 * ( for example, the target has less data to
562 * transfer than requested). Just go to status
563 * and the command should terminate.
564 */
565 INCSTAT(esiop_stat_intr_shortxfer);
566 if ((dstat & DSTAT_DFE) == 0)
567 siop_clearfifo(&sc->sc_c);
568 /* no table to flush here */
569 CALL_SCRIPT(Ent_status);
570 return 1;
571 case SSTAT1_PHASE_MSGIN:
572 /*
573 * target may be ready to disconnect
574 * Save data pointers just in case.
575 */
576 INCSTAT(esiop_stat_intr_xferdisc);
577 if (scratchc0 & A_f_c_data)
578 siop_sdp(&esiop_cmd->cmd_c);
579 else if ((dstat & DSTAT_DFE) == 0)
580 siop_clearfifo(&sc->sc_c);
581 bus_space_write_1(sc->sc_c.sc_rt,
582 sc->sc_c.sc_rh, SIOP_SCRATCHC,
583 scratchc0 & ~A_f_c_data);
584 esiop_table_sync(esiop_cmd,
585 BUS_DMASYNC_PREREAD |
586 BUS_DMASYNC_PREWRITE);
587 CALL_SCRIPT(Ent_msgin);
588 return 1;
589 }
590 printf("%s: unexpected phase mismatch %d\n",
591 sc->sc_c.sc_dev.dv_xname,
592 sstat1 & SSTAT1_PHASE_MASK);
593 } else {
594 printf("%s: phase mismatch without command\n",
595 sc->sc_c.sc_dev.dv_xname);
596 }
597 need_reset = 1;
598 }
599 if (sist & SIST0_PAR) {
600 /* parity error, reset */
601 if (esiop_cmd)
602 scsipi_printaddr(xs->xs_periph);
603 else
604 printf("%s:", sc->sc_c.sc_dev.dv_xname);
605 printf("parity error\n");
606 if (esiop_target)
607 esiop_target->target_c.flags &= ~TARF_DT;
608 goto reset;
609 }
610 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
611 /*
612 * selection time out, assume there's no device here
613 * We also have to update the ring pointer ourselve
614 */
615 slot = bus_space_read_1(sc->sc_c.sc_rt,
616 sc->sc_c.sc_rh, SIOP_SCRATCHE);
617 esiop_script_sync(sc,
618 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
619 #ifdef SIOP_DEBUG_SCHED
620 printf("sel timeout target %d, slot %d\n", target, slot);
621 #endif
622 /*
623 * mark this slot as free, and advance to next slot
624 */
625 esiop_script_write(sc,
626 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
627 A_f_cmd_free);
628 addr = bus_space_read_4(sc->sc_c.sc_rt,
629 sc->sc_c.sc_rh, SIOP_SCRATCHD);
630 if (slot < (A_ncmd_slots - 1)) {
631 bus_space_write_1(sc->sc_c.sc_rt,
632 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
633 addr = addr + sizeof(struct esiop_slot);
634 } else {
635 bus_space_write_1(sc->sc_c.sc_rt,
636 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
637 addr = sc->sc_c.sc_scriptaddr +
638 sc->sc_shedoffset * sizeof(u_int32_t);
639 }
640 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
641 SIOP_SCRATCHD, addr);
642 esiop_script_sync(sc,
643 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
644 if (esiop_cmd) {
645 esiop_cmd->cmd_c.status = CMDST_DONE;
646 xs->error = XS_SELTIMEOUT;
647 freetarget = 1;
648 goto end;
649 } else {
650 printf("%s: selection timeout without "
651 "command, target %d (sdid 0x%x), "
652 "slot %d\n",
653 sc->sc_c.sc_dev.dv_xname, target,
654 bus_space_read_1(sc->sc_c.sc_rt,
655 sc->sc_c.sc_rh, SIOP_SDID), slot);
656 need_reset = 1;
657 }
658 }
659 if (sist & SIST0_UDC) {
660 /*
661 * unexpected disconnect. Usually the target signals
662 * a fatal condition this way. Attempt to get sense.
663 */
664 if (esiop_cmd) {
665 esiop_cmd->cmd_tables->status =
666 htole32(SCSI_CHECK);
667 goto end;
668 }
669 printf("%s: unexpected disconnect without "
670 "command\n", sc->sc_c.sc_dev.dv_xname);
671 goto reset;
672 }
673 if (sist & (SIST1_SBMC << 8)) {
674 /* SCSI bus mode change */
675 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
676 goto reset;
677 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
678 /*
679 * we have a script interrupt, it will
680 * restart the script.
681 */
682 goto scintr;
683 }
684 /*
685 * else we have to restart it ourselve, at the
686 * interrupted instruction.
687 */
688 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
689 SIOP_DSP,
690 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
691 SIOP_DSP) - 8);
692 return 1;
693 }
694 /* Else it's an unhandled exeption (for now). */
695 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
696 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
697 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
698 SIOP_SSTAT1),
699 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
700 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
701 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
702 if (esiop_cmd) {
703 esiop_cmd->cmd_c.status = CMDST_DONE;
704 xs->error = XS_SELTIMEOUT;
705 goto end;
706 }
707 need_reset = 1;
708 }
709 if (need_reset) {
710 reset:
711 /* fatal error, reset the bus */
712 siop_resetbus(&sc->sc_c);
713 /* no table to flush here */
714 return 1;
715 }
716
717 scintr:
718 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
719 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
720 SIOP_DSPS);
721 #ifdef SIOP_DEBUG_INTR
722 printf("script interrupt 0x%x\n", irqcode);
723 #endif
724 /*
725 * no command, or an inactive command is only valid for a
726 * reselect interrupt
727 */
728 if ((irqcode & 0x80) == 0) {
729 if (esiop_cmd == NULL) {
730 printf(
731 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
732 sc->sc_c.sc_dev.dv_xname, irqcode);
733 goto reset;
734 }
735 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
736 printf("%s: command with invalid status "
737 "(IRQ code 0x%x current status %d) !\n",
738 sc->sc_c.sc_dev.dv_xname,
739 irqcode, esiop_cmd->cmd_c.status);
740 xs = NULL;
741 }
742 }
743 switch(irqcode) {
744 case A_int_err:
745 printf("error, DSP=0x%x\n",
746 (int)(bus_space_read_4(sc->sc_c.sc_rt,
747 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
748 if (xs) {
749 xs->error = XS_SELTIMEOUT;
750 goto end;
751 } else {
752 goto reset;
753 }
754 case A_int_msgin:
755 {
756 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
757 sc->sc_c.sc_rh, SIOP_SFBR);
758 if (msgin == MSG_MESSAGE_REJECT) {
759 int msg, extmsg;
760 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
761 /*
762 * message was part of a identify +
763 * something else. Identify shoudl't
764 * have been rejected.
765 */
766 msg =
767 esiop_cmd->cmd_tables->msg_out[1];
768 extmsg =
769 esiop_cmd->cmd_tables->msg_out[3];
770 } else {
771 msg =
772 esiop_cmd->cmd_tables->msg_out[0];
773 extmsg =
774 esiop_cmd->cmd_tables->msg_out[2];
775 }
776 if (msg == MSG_MESSAGE_REJECT) {
777 /* MSG_REJECT for a MSG_REJECT !*/
778 if (xs)
779 scsipi_printaddr(xs->xs_periph);
780 else
781 printf("%s: ",
782 sc->sc_c.sc_dev.dv_xname);
783 printf("our reject message was "
784 "rejected\n");
785 goto reset;
786 }
787 if (msg == MSG_EXTENDED &&
788 extmsg == MSG_EXT_WDTR) {
789 /* WDTR rejected, initiate sync */
790 if ((esiop_target->target_c.flags &
791 TARF_SYNC) == 0) {
792 esiop_target->target_c.status =
793 TARST_OK;
794 siop_update_xfer_mode(&sc->sc_c,
795 target);
796 /* no table to flush here */
797 CALL_SCRIPT(Ent_msgin_ack);
798 return 1;
799 }
800 esiop_target->target_c.status =
801 TARST_SYNC_NEG;
802 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
803 sc->sc_c.st_minsync,
804 sc->sc_c.maxoff);
805 esiop_table_sync(esiop_cmd,
806 BUS_DMASYNC_PREREAD |
807 BUS_DMASYNC_PREWRITE);
808 CALL_SCRIPT(Ent_send_msgout);
809 return 1;
810 } else if (msg == MSG_EXTENDED &&
811 extmsg == MSG_EXT_SDTR) {
812 /* sync rejected */
813 esiop_target->target_c.offset = 0;
814 esiop_target->target_c.period = 0;
815 esiop_target->target_c.status =
816 TARST_OK;
817 siop_update_xfer_mode(&sc->sc_c,
818 target);
819 /* no table to flush here */
820 CALL_SCRIPT(Ent_msgin_ack);
821 return 1;
822 } else if (msg == MSG_EXTENDED &&
823 extmsg == MSG_EXT_PPR) {
824 /* PPR rejected */
825 esiop_target->target_c.offset = 0;
826 esiop_target->target_c.period = 0;
827 esiop_target->target_c.status =
828 TARST_OK;
829 siop_update_xfer_mode(&sc->sc_c,
830 target);
831 /* no table to flush here */
832 CALL_SCRIPT(Ent_msgin_ack);
833 return 1;
834 } else if (msg == MSG_SIMPLE_Q_TAG ||
835 msg == MSG_HEAD_OF_Q_TAG ||
836 msg == MSG_ORDERED_Q_TAG) {
837 if (esiop_handle_qtag_reject(
838 esiop_cmd) == -1)
839 goto reset;
840 CALL_SCRIPT(Ent_msgin_ack);
841 return 1;
842 }
843 if (xs)
844 scsipi_printaddr(xs->xs_periph);
845 else
846 printf("%s: ",
847 sc->sc_c.sc_dev.dv_xname);
848 if (msg == MSG_EXTENDED) {
849 printf("scsi message reject, extended "
850 "message sent was 0x%x\n", extmsg);
851 } else {
852 printf("scsi message reject, message "
853 "sent was 0x%x\n", msg);
854 }
855 /* no table to flush here */
856 CALL_SCRIPT(Ent_msgin_ack);
857 return 1;
858 }
859 if (xs)
860 scsipi_printaddr(xs->xs_periph);
861 else
862 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
863 printf("unhandled message 0x%x\n",
864 esiop_cmd->cmd_tables->msg_in[0]);
865 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
866 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
867 esiop_table_sync(esiop_cmd,
868 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
869 CALL_SCRIPT(Ent_send_msgout);
870 return 1;
871 }
872 case A_int_extmsgin:
873 #ifdef SIOP_DEBUG_INTR
874 printf("extended message: msg 0x%x len %d\n",
875 esiop_cmd->cmd_tables->msg_in[2],
876 esiop_cmd->cmd_tables->msg_in[1]);
877 #endif
878 if (esiop_cmd->cmd_tables->msg_in[1] >
879 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
880 printf("%s: extended message too big (%d)\n",
881 sc->sc_c.sc_dev.dv_xname,
882 esiop_cmd->cmd_tables->msg_in[1]);
883 esiop_cmd->cmd_tables->t_extmsgdata.count =
884 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
885 esiop_table_sync(esiop_cmd,
886 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
887 CALL_SCRIPT(Ent_get_extmsgdata);
888 return 1;
889 case A_int_extmsgdata:
890 #ifdef SIOP_DEBUG_INTR
891 {
892 int i;
893 printf("extended message: 0x%x, data:",
894 esiop_cmd->cmd_tables->msg_in[2]);
895 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
896 i++)
897 printf(" 0x%x",
898 esiop_cmd->cmd_tables->msg_in[i]);
899 printf("\n");
900 }
901 #endif
902 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
903 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
904 case SIOP_NEG_MSGOUT:
905 esiop_update_scntl3(sc,
906 esiop_cmd->cmd_c.siop_target);
907 esiop_table_sync(esiop_cmd,
908 BUS_DMASYNC_PREREAD |
909 BUS_DMASYNC_PREWRITE);
910 CALL_SCRIPT(Ent_send_msgout);
911 return 1;
912 case SIOP_NEG_ACK:
913 esiop_update_scntl3(sc,
914 esiop_cmd->cmd_c.siop_target);
915 CALL_SCRIPT(Ent_msgin_ack);
916 return 1;
917 default:
918 panic("invalid retval from "
919 "siop_wdtr_neg()");
920 }
921 return 1;
922 }
923 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
924 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
925 case SIOP_NEG_MSGOUT:
926 esiop_update_scntl3(sc,
927 esiop_cmd->cmd_c.siop_target);
928 esiop_table_sync(esiop_cmd,
929 BUS_DMASYNC_PREREAD |
930 BUS_DMASYNC_PREWRITE);
931 CALL_SCRIPT(Ent_send_msgout);
932 return 1;
933 case SIOP_NEG_ACK:
934 esiop_update_scntl3(sc,
935 esiop_cmd->cmd_c.siop_target);
936 CALL_SCRIPT(Ent_msgin_ack);
937 return 1;
938 default:
939 panic("invalid retval from "
940 "siop_wdtr_neg()");
941 }
942 return 1;
943 }
944 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
945 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
946 case SIOP_NEG_MSGOUT:
947 esiop_update_scntl3(sc,
948 esiop_cmd->cmd_c.siop_target);
949 esiop_table_sync(esiop_cmd,
950 BUS_DMASYNC_PREREAD |
951 BUS_DMASYNC_PREWRITE);
952 CALL_SCRIPT(Ent_send_msgout);
953 return 1;
954 case SIOP_NEG_ACK:
955 esiop_update_scntl3(sc,
956 esiop_cmd->cmd_c.siop_target);
957 CALL_SCRIPT(Ent_msgin_ack);
958 return 1;
959 default:
960 panic("invalid retval from "
961 "siop_wdtr_neg()");
962 }
963 return 1;
964 }
965 /* send a message reject */
966 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
967 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
968 esiop_table_sync(esiop_cmd,
969 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
970 CALL_SCRIPT(Ent_send_msgout);
971 return 1;
972 case A_int_disc:
973 INCSTAT(esiop_stat_intr_sdp);
974 offset = bus_space_read_1(sc->sc_c.sc_rt,
975 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
976 #ifdef SIOP_DEBUG_DR
977 printf("disconnect offset %d\n", offset);
978 #endif
979 if (offset > SIOP_NSG) {
980 printf("%s: bad offset for disconnect (%d)\n",
981 sc->sc_c.sc_dev.dv_xname, offset);
982 goto reset;
983 }
984 /*
985 * offset == SIOP_NSG may be a valid condition if
986 * we get a sdp when the xfer is done.
987 * Don't call memmove in this case.
988 */
989 if (offset < SIOP_NSG) {
990 memmove(&esiop_cmd->cmd_tables->data[0],
991 &esiop_cmd->cmd_tables->data[offset],
992 (SIOP_NSG - offset) * sizeof(scr_table_t));
993 esiop_table_sync(esiop_cmd,
994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
995 }
996 CALL_SCRIPT(Ent_script_sched);
997 return 1;
998 case A_int_resfail:
999 printf("reselect failed\n");
1000 CALL_SCRIPT(Ent_script_sched);
1001 return 1;
1002 case A_int_done:
1003 if (xs == NULL) {
1004 printf("%s: done without command\n",
1005 sc->sc_c.sc_dev.dv_xname);
1006 CALL_SCRIPT(Ent_script_sched);
1007 return 1;
1008 }
1009 #ifdef SIOP_DEBUG_INTR
1010 printf("done, DSA=0x%lx target id 0x%x last msg "
1011 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1012 le32toh(esiop_cmd->cmd_tables->id),
1013 esiop_cmd->cmd_tables->msg_in[0],
1014 le32toh(esiop_cmd->cmd_tables->status));
1015 #endif
1016 INCSTAT(esiop_stat_intr_done);
1017 esiop_cmd->cmd_c.status = CMDST_DONE;
1018 goto end;
1019 default:
1020 printf("unknown irqcode %x\n", irqcode);
1021 if (xs) {
1022 xs->error = XS_SELTIMEOUT;
1023 goto end;
1024 }
1025 goto reset;
1026 }
1027 return 1;
1028 }
1029 /* We just should't get there */
1030 panic("siop_intr: I shouldn't be there !");
1031
1032 end:
1033 /*
1034 * restart the script now if command completed properly
1035 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1036 * queue
1037 */
1038 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1039 #ifdef SIOP_DEBUG_INTR
1040 printf("esiop_intr end: status %d\n", xs->status);
1041 #endif
1042 if (tag >= 0)
1043 esiop_lun->tactive[tag] = NULL;
1044 else
1045 esiop_lun->active = NULL;
1046 esiop_scsicmd_end(esiop_cmd);
1047 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1048 esiop_del_dev(sc, target, lun);
1049 CALL_SCRIPT(Ent_script_sched);
1050 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1051 /* a command terminated, so we have free slots now */
1052 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1053 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1054 }
1055
1056 return 1;
1057 }
1058
1059 void
1060 esiop_scsicmd_end(esiop_cmd)
1061 struct esiop_cmd *esiop_cmd;
1062 {
1063 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1064 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1065
1066 switch(xs->status) {
1067 case SCSI_OK:
1068 xs->error = XS_NOERROR;
1069 break;
1070 case SCSI_BUSY:
1071 xs->error = XS_BUSY;
1072 break;
1073 case SCSI_CHECK:
1074 xs->error = XS_BUSY;
1075 /* remove commands in the queue and scheduler */
1076 esiop_unqueue(sc, xs->xs_periph->periph_target,
1077 xs->xs_periph->periph_lun);
1078 break;
1079 case SCSI_QUEUE_FULL:
1080 INCSTAT(esiop_stat_intr_qfull);
1081 #ifdef SIOP_DEBUG
1082 printf("%s:%d:%d: queue full (tag %d)\n",
1083 sc->sc_c.sc_dev.dv_xname,
1084 xs->xs_periph->periph_target,
1085 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1086 #endif
1087 xs->error = XS_BUSY;
1088 break;
1089 case SCSI_SIOP_NOCHECK:
1090 /*
1091 * don't check status, xs->error is already valid
1092 */
1093 break;
1094 case SCSI_SIOP_NOSTATUS:
1095 /*
1096 * the status byte was not updated, cmd was
1097 * aborted
1098 */
1099 xs->error = XS_SELTIMEOUT;
1100 break;
1101 default:
1102 xs->error = XS_DRIVER_STUFFUP;
1103 }
1104 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1105 bus_dmamap_sync(sc->sc_c.sc_dmat,
1106 esiop_cmd->cmd_c.dmamap_data, 0,
1107 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1108 (xs->xs_control & XS_CTL_DATA_IN) ?
1109 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1110 bus_dmamap_unload(sc->sc_c.sc_dmat,
1111 esiop_cmd->cmd_c.dmamap_data);
1112 }
1113 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1114 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1115 esiop_cmd->cmd_c.status = CMDST_FREE;
1116 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1117 xs->resid = 0;
1118 scsipi_done (xs);
1119 }
1120
1121 void
1122 esiop_checkdone(sc)
1123 struct esiop_softc *sc;
1124 {
1125 int target, lun, tag;
1126 struct esiop_target *esiop_target;
1127 struct esiop_lun *esiop_lun;
1128 struct esiop_cmd *esiop_cmd;
1129 int status;
1130
1131 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1132 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1133 if (esiop_target == NULL)
1134 continue;
1135 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1136 esiop_lun = esiop_target->esiop_lun[lun];
1137 if (esiop_lun == NULL)
1138 continue;
1139 esiop_cmd = esiop_lun->active;
1140 if (esiop_cmd) {
1141 esiop_table_sync(esiop_cmd,
1142 BUS_DMASYNC_POSTREAD |
1143 BUS_DMASYNC_POSTWRITE);
1144 status = le32toh(esiop_cmd->cmd_tables->status);
1145 if (status == SCSI_OK) {
1146 /* Ok, this command has been handled */
1147 esiop_cmd->cmd_c.xs->status = status;
1148 esiop_lun->active = NULL;
1149 esiop_scsicmd_end(esiop_cmd);
1150 }
1151 }
1152 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1153 esiop_cmd = esiop_lun->tactive[tag];
1154 if (esiop_cmd == NULL)
1155 continue;
1156 esiop_table_sync(esiop_cmd,
1157 BUS_DMASYNC_POSTREAD |
1158 BUS_DMASYNC_POSTWRITE);
1159 status = le32toh(esiop_cmd->cmd_tables->status);
1160 if (status == SCSI_OK) {
1161 /* Ok, this command has been handled */
1162 esiop_cmd->cmd_c.xs->status = status;
1163 esiop_lun->tactive[tag] = NULL;
1164 esiop_scsicmd_end(esiop_cmd);
1165 }
1166 }
1167 }
1168 }
1169 }
1170
1171 void
1172 esiop_unqueue(sc, target, lun)
1173 struct esiop_softc *sc;
1174 int target;
1175 int lun;
1176 {
1177 int slot, tag;
1178 u_int32_t slotdsa;
1179 struct esiop_cmd *esiop_cmd;
1180 struct esiop_lun *esiop_lun =
1181 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1182
1183 /* first make sure to read valid data */
1184 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1185
1186 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1187 /* look for commands in the scheduler, not yet started */
1188 if (esiop_lun->tactive[tag] == NULL)
1189 continue;
1190 esiop_cmd = esiop_lun->tactive[tag];
1191 for (slot = 0; slot < A_ncmd_slots; slot++) {
1192 slotdsa = esiop_script_read(sc,
1193 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1194 /* if the slot has any flag, it won't match the DSA */
1195 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1196 /* Mark this slot as ignore */
1197 esiop_script_write(sc,
1198 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1199 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1200 /* ask to requeue */
1201 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1202 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1203 esiop_lun->tactive[tag] = NULL;
1204 esiop_scsicmd_end(esiop_cmd);
1205 break;
1206 }
1207 }
1208 }
1209 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210 }
1211
1212 /*
1213 * handle a rejected queue tag message: the command will run untagged,
1214 * has to adjust the reselect script.
1215 */
1216
1217
1218 int
1219 esiop_handle_qtag_reject(esiop_cmd)
1220 struct esiop_cmd *esiop_cmd;
1221 {
1222 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1223 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1224 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1225 int tag = esiop_cmd->cmd_tables->msg_out[2];
1226 struct esiop_target *esiop_target =
1227 (struct esiop_target*)sc->sc_c.targets[target];
1228 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1229
1230 #ifdef SIOP_DEBUG
1231 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1232 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1233 esiop_cmd->cmd_c.status);
1234 #endif
1235
1236 if (esiop_lun->active != NULL) {
1237 printf("%s: untagged command already running for target %d "
1238 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1239 target, lun, esiop_lun->active->cmd_c.status);
1240 return -1;
1241 }
1242 /* clear tag slot */
1243 esiop_lun->tactive[tag] = NULL;
1244 /* add command to non-tagged slot */
1245 esiop_lun->active = esiop_cmd;
1246 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1247 esiop_cmd->cmd_c.tag = -1;
1248 /* update DSA table */
1249 esiop_script_write(sc, esiop_target->lun_table_offset +
1250 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1251 esiop_cmd->cmd_c.dsa);
1252 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1253 return 0;
1254 }
1255
1256 /*
1257 * handle a bus reset: reset chip, unqueue all active commands, free all
1258 * target struct and report loosage to upper layer.
1259 * As the upper layer may requeue immediatly we have to first store
1260 * all active commands in a temporary queue.
1261 */
1262 void
1263 esiop_handle_reset(sc)
1264 struct esiop_softc *sc;
1265 {
1266 struct esiop_cmd *esiop_cmd;
1267 struct esiop_lun *esiop_lun;
1268 int target, lun, tag;
1269 /*
1270 * scsi bus reset. reset the chip and restart
1271 * the queue. Need to clean up all active commands
1272 */
1273 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1274 /* stop, reset and restart the chip */
1275 esiop_reset(sc);
1276
1277 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1278 /* chip has been reset, all slots are free now */
1279 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1280 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1281 }
1282 /*
1283 * Process all commands: first commmands completes, then commands
1284 * being executed
1285 */
1286 esiop_checkdone(sc);
1287 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1288 target++) {
1289 struct esiop_target *esiop_target =
1290 (struct esiop_target *)sc->sc_c.targets[target];
1291 if (esiop_target == NULL)
1292 continue;
1293 for (lun = 0; lun < 8; lun++) {
1294 esiop_lun = esiop_target->esiop_lun[lun];
1295 if (esiop_lun == NULL)
1296 continue;
1297 for (tag = -1; tag <
1298 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1299 ESIOP_NTAG : 0);
1300 tag++) {
1301 if (tag >= 0)
1302 esiop_cmd = esiop_lun->tactive[tag];
1303 else
1304 esiop_cmd = esiop_lun->active;
1305 if (esiop_cmd == NULL)
1306 continue;
1307 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1308 printf("command with tag id %d reset\n", tag);
1309 esiop_cmd->cmd_c.xs->error =
1310 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1311 XS_TIMEOUT : XS_RESET;
1312 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1313 if (tag >= 0)
1314 esiop_lun->tactive[tag] = NULL;
1315 else
1316 esiop_lun->active = NULL;
1317 esiop_cmd->cmd_c.status = CMDST_DONE;
1318 esiop_scsicmd_end(esiop_cmd);
1319 }
1320 }
1321 sc->sc_c.targets[target]->status = TARST_ASYNC;
1322 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1323 sc->sc_c.targets[target]->period =
1324 sc->sc_c.targets[target]->offset = 0;
1325 siop_update_xfer_mode(&sc->sc_c, target);
1326 }
1327
1328 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1329 }
1330
1331 void
1332 esiop_scsipi_request(chan, req, arg)
1333 struct scsipi_channel *chan;
1334 scsipi_adapter_req_t req;
1335 void *arg;
1336 {
1337 struct scsipi_xfer *xs;
1338 struct scsipi_periph *periph;
1339 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1340 struct esiop_cmd *esiop_cmd;
1341 struct esiop_target *esiop_target;
1342 int s, error, i;
1343 int target;
1344 int lun;
1345
1346 switch (req) {
1347 case ADAPTER_REQ_RUN_XFER:
1348 xs = arg;
1349 periph = xs->xs_periph;
1350 target = periph->periph_target;
1351 lun = periph->periph_lun;
1352
1353 s = splbio();
1354 #ifdef SIOP_DEBUG_SCHED
1355 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1356 xs->xs_tag_type, xs->xs_tag_id);
1357 #endif
1358 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1359 if (esiop_cmd == NULL) {
1360 xs->error = XS_RESOURCE_SHORTAGE;
1361 scsipi_done(xs);
1362 splx(s);
1363 return;
1364 }
1365 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1366 #ifdef DIAGNOSTIC
1367 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1368 panic("siop_scsicmd: new cmd not free");
1369 #endif
1370 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1371 if (esiop_target == NULL) {
1372 #ifdef SIOP_DEBUG
1373 printf("%s: alloc siop_target for target %d\n",
1374 sc->sc_c.sc_dev.dv_xname, target);
1375 #endif
1376 sc->sc_c.targets[target] =
1377 malloc(sizeof(struct esiop_target),
1378 M_DEVBUF, M_NOWAIT | M_ZERO);
1379 if (sc->sc_c.targets[target] == NULL) {
1380 printf("%s: can't malloc memory for "
1381 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1382 target);
1383 xs->error = XS_RESOURCE_SHORTAGE;
1384 scsipi_done(xs);
1385 splx(s);
1386 return;
1387 }
1388 esiop_target =
1389 (struct esiop_target*)sc->sc_c.targets[target];
1390 esiop_target->target_c.status = TARST_PROBING;
1391 esiop_target->target_c.flags = 0;
1392 esiop_target->target_c.id =
1393 sc->sc_c.clock_div << 24; /* scntl3 */
1394 esiop_target->target_c.id |= target << 16; /* id */
1395 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1396
1397 for (i=0; i < 8; i++)
1398 esiop_target->esiop_lun[i] = NULL;
1399 esiop_target_register(sc, target);
1400 }
1401 if (esiop_target->esiop_lun[lun] == NULL) {
1402 esiop_target->esiop_lun[lun] =
1403 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1404 M_NOWAIT|M_ZERO);
1405 if (esiop_target->esiop_lun[lun] == NULL) {
1406 printf("%s: can't alloc esiop_lun for "
1407 "target %d lun %d\n",
1408 sc->sc_c.sc_dev.dv_xname, target, lun);
1409 xs->error = XS_RESOURCE_SHORTAGE;
1410 scsipi_done(xs);
1411 splx(s);
1412 return;
1413 }
1414 }
1415 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1416 esiop_cmd->cmd_c.xs = xs;
1417 esiop_cmd->cmd_c.flags = 0;
1418 esiop_cmd->cmd_c.status = CMDST_READY;
1419
1420 /* load the DMA maps */
1421 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1422 esiop_cmd->cmd_c.dmamap_cmd,
1423 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1424 if (error) {
1425 printf("%s: unable to load cmd DMA map: %d\n",
1426 sc->sc_c.sc_dev.dv_xname, error);
1427 xs->error = XS_DRIVER_STUFFUP;
1428 scsipi_done(xs);
1429 splx(s);
1430 return;
1431 }
1432 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1433 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1434 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1435 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1436 ((xs->xs_control & XS_CTL_DATA_IN) ?
1437 BUS_DMA_READ : BUS_DMA_WRITE));
1438 if (error) {
1439 printf("%s: unable to load cmd DMA map: %d",
1440 sc->sc_c.sc_dev.dv_xname, error);
1441 xs->error = XS_DRIVER_STUFFUP;
1442 scsipi_done(xs);
1443 bus_dmamap_unload(sc->sc_c.sc_dmat,
1444 esiop_cmd->cmd_c.dmamap_cmd);
1445 splx(s);
1446 return;
1447 }
1448 bus_dmamap_sync(sc->sc_c.sc_dmat,
1449 esiop_cmd->cmd_c.dmamap_data, 0,
1450 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1451 (xs->xs_control & XS_CTL_DATA_IN) ?
1452 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1453 }
1454 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1455 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1456 BUS_DMASYNC_PREWRITE);
1457
1458 if (xs->xs_tag_type)
1459 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1460 else
1461 esiop_cmd->cmd_c.tag = -1;
1462 siop_setuptables(&esiop_cmd->cmd_c);
1463 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1464 htole32(A_f_c_target | A_f_c_lun);
1465 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1466 htole32((target << 8) | (lun << 16));
1467 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1468 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1469 htole32(A_f_c_tag);
1470 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1471 htole32(esiop_cmd->cmd_c.tag << 24);
1472 }
1473
1474 esiop_table_sync(esiop_cmd,
1475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1476 esiop_start(sc, esiop_cmd);
1477 if (xs->xs_control & XS_CTL_POLL) {
1478 /* poll for command completion */
1479 while ((xs->xs_status & XS_STS_DONE) == 0) {
1480 delay(1000);
1481 esiop_intr(sc);
1482 }
1483 }
1484 splx(s);
1485 return;
1486
1487 case ADAPTER_REQ_GROW_RESOURCES:
1488 #ifdef SIOP_DEBUG
1489 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1490 sc->sc_c.sc_adapt.adapt_openings);
1491 #endif
1492 esiop_morecbd(sc);
1493 return;
1494
1495 case ADAPTER_REQ_SET_XFER_MODE:
1496 {
1497 struct scsipi_xfer_mode *xm = arg;
1498 if (sc->sc_c.targets[xm->xm_target] == NULL)
1499 return;
1500 s = splbio();
1501 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1502 (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1503 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1504 /* allocate tag tables for this device */
1505 for (lun = 0;
1506 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1507 if (sc->sc_c.sc_chan.chan_periphs[
1508 xm->xm_target][lun])
1509 esiop_add_dev(sc, xm->xm_target, lun);
1510 }
1511 }
1512 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1513 (sc->sc_c.features & SF_BUS_WIDE))
1514 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1515 if (xm->xm_mode & PERIPH_CAP_SYNC)
1516 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1517 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1518 (sc->sc_c.features & SF_CHIP_DT))
1519 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1520 if ((xm->xm_mode &
1521 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1522 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1523 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1524
1525 splx(s);
1526 }
1527 }
1528 }
1529
1530 static void
1531 esiop_start(sc, esiop_cmd)
1532 struct esiop_softc *sc;
1533 struct esiop_cmd *esiop_cmd;
1534 {
1535 struct esiop_lun *esiop_lun;
1536 struct esiop_target *esiop_target;
1537 int timeout;
1538 int target, lun, slot;
1539
1540 /*
1541 * first make sure to read valid data
1542 */
1543 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1544
1545 /*
1546 * We use a circular queue here. sc->sc_currschedslot points to a
1547 * free slot, unless we have filled the queue. Check this.
1548 */
1549 slot = sc->sc_currschedslot;
1550 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1551 A_f_cmd_free) == 0) {
1552 /*
1553 * no more free slot, no need to continue. freeze the queue
1554 * and requeue this command.
1555 */
1556 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1557 sc->sc_flags |= SCF_CHAN_NOSLOT;
1558 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1559 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1560 esiop_scsicmd_end(esiop_cmd);
1561 return;
1562 }
1563 /* OK, we can use this slot */
1564
1565 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1566 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1567 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1568 esiop_lun = esiop_target->esiop_lun[lun];
1569 /* if non-tagged command active, panic: this shouldn't happen */
1570 if (esiop_lun->active != NULL) {
1571 panic("esiop_start: tagged cmd while untagged running");
1572 }
1573 #ifdef DIAGNOSTIC
1574 /* sanity check the tag if needed */
1575 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1576 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1577 panic("esiop_start: tag not free");
1578 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1579 esiop_cmd->cmd_c.tag < 0) {
1580 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1581 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1582 panic("esiop_start: invalid tag id");
1583 }
1584 }
1585 #endif
1586 #ifdef SIOP_DEBUG_SCHED
1587 printf("using slot %d for DSA 0x%lx\n", slot,
1588 (u_long)esiop_cmd->cmd_c.dsa);
1589 #endif
1590 /* mark command as active */
1591 if (esiop_cmd->cmd_c.status == CMDST_READY)
1592 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1593 else
1594 panic("esiop_start: bad status");
1595 /* DSA table for reselect */
1596 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1597 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1598 /* DSA table for reselect */
1599 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1600 htole32(esiop_cmd->cmd_c.dsa);
1601 bus_dmamap_sync(sc->sc_c.sc_dmat,
1602 esiop_lun->lun_tagtbl->tblblk->blkmap,
1603 esiop_lun->lun_tagtbl->tbl_offset,
1604 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1605 } else {
1606 esiop_lun->active = esiop_cmd;
1607 esiop_script_write(sc,
1608 esiop_target->lun_table_offset +
1609 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1610 esiop_cmd->cmd_c.dsa);
1611 }
1612 /* scheduler slot: DSA */
1613 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1614 esiop_cmd->cmd_c.dsa);
1615 /* make sure SCRIPT processor will read valid data */
1616 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1617 /* handle timeout */
1618 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1619 /* start exire timer */
1620 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1621 if (timeout == 0)
1622 timeout = 1;
1623 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1624 timeout, esiop_timeout, esiop_cmd);
1625 }
1626 /* Signal script it has some work to do */
1627 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1628 SIOP_ISTAT, ISTAT_SIGP);
1629 /* update the current slot, and wait for IRQ */
1630 sc->sc_currschedslot++;
1631 if (sc->sc_currschedslot >= A_ncmd_slots)
1632 sc->sc_currschedslot = 0;
1633 return;
1634 }
1635
1636 void
1637 esiop_timeout(v)
1638 void *v;
1639 {
1640 struct esiop_cmd *esiop_cmd = v;
1641 struct esiop_softc *sc =
1642 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1643 int s;
1644 #ifdef SIOP_DEBUG
1645 int slot, slotdsa;
1646 #endif
1647
1648 s = splbio();
1649 esiop_table_sync(esiop_cmd,
1650 BUS_DMASYNC_POSTREAD |
1651 BUS_DMASYNC_POSTWRITE);
1652 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1653 #ifdef SIOP_DEBUG
1654 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1655
1656 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1657 for (slot = 0; slot < A_ncmd_slots; slot++) {
1658 slotdsa = esiop_script_read(sc,
1659 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1660 if ((slotdsa & 0x01) == 0)
1661 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1662 }
1663 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1664 printf("DSP 0x%lx DSA 0x%x\n",
1665 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1666 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1667 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1668 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1669 #else
1670 printf("command timeout\n");
1671 #endif
1672 /* reset the scsi bus */
1673 siop_resetbus(&sc->sc_c);
1674
1675 /* deactivate callout */
1676 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1677 /*
1678 * mark command has being timed out and just return;
1679 * the bus reset will generate an interrupt,
1680 * it will be handled in siop_intr()
1681 */
1682 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1683 splx(s);
1684 return;
1685
1686 }
1687
1688 void
1689 esiop_dump_script(sc)
1690 struct esiop_softc *sc;
1691 {
1692 int i;
1693 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1694 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1695 le32toh(sc->sc_c.sc_script[i]),
1696 le32toh(sc->sc_c.sc_script[i+1]));
1697 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1698 0xc0000000) {
1699 i++;
1700 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1701 }
1702 printf("\n");
1703 }
1704 }
1705
1706 void
1707 esiop_morecbd(sc)
1708 struct esiop_softc *sc;
1709 {
1710 int error, i, s;
1711 bus_dma_segment_t seg;
1712 int rseg;
1713 struct esiop_cbd *newcbd;
1714 struct esiop_xfer *xfer;
1715 bus_addr_t dsa;
1716
1717 /* allocate a new list head */
1718 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1719 if (newcbd == NULL) {
1720 printf("%s: can't allocate memory for command descriptors "
1721 "head\n", sc->sc_c.sc_dev.dv_xname);
1722 return;
1723 }
1724
1725 /* allocate cmd list */
1726 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1727 M_DEVBUF, M_NOWAIT|M_ZERO);
1728 if (newcbd->cmds == NULL) {
1729 printf("%s: can't allocate memory for command descriptors\n",
1730 sc->sc_c.sc_dev.dv_xname);
1731 goto bad3;
1732 }
1733 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1734 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1735 if (error) {
1736 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1737 sc->sc_c.sc_dev.dv_xname, error);
1738 goto bad2;
1739 }
1740 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1741 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1742 if (error) {
1743 printf("%s: unable to map cbd DMA memory, error = %d\n",
1744 sc->sc_c.sc_dev.dv_xname, error);
1745 goto bad2;
1746 }
1747 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1748 BUS_DMA_NOWAIT, &newcbd->xferdma);
1749 if (error) {
1750 printf("%s: unable to create cbd DMA map, error = %d\n",
1751 sc->sc_c.sc_dev.dv_xname, error);
1752 goto bad1;
1753 }
1754 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1755 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1756 if (error) {
1757 printf("%s: unable to load cbd DMA map, error = %d\n",
1758 sc->sc_c.sc_dev.dv_xname, error);
1759 goto bad0;
1760 }
1761 #ifdef DEBUG
1762 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1763 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1764 #endif
1765 for (i = 0; i < SIOP_NCMDPB; i++) {
1766 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1767 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1768 &newcbd->cmds[i].cmd_c.dmamap_data);
1769 if (error) {
1770 printf("%s: unable to create data DMA map for cbd: "
1771 "error %d\n",
1772 sc->sc_c.sc_dev.dv_xname, error);
1773 goto bad0;
1774 }
1775 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1776 sizeof(struct scsipi_generic), 1,
1777 sizeof(struct scsipi_generic), 0,
1778 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1779 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1780 if (error) {
1781 printf("%s: unable to create cmd DMA map for cbd %d\n",
1782 sc->sc_c.sc_dev.dv_xname, error);
1783 goto bad0;
1784 }
1785 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1786 newcbd->cmds[i].esiop_cbdp = newcbd;
1787 xfer = &newcbd->xfers[i];
1788 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1789 memset(newcbd->cmds[i].cmd_tables, 0,
1790 sizeof(struct esiop_xfer));
1791 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1792 i * sizeof(struct esiop_xfer);
1793 newcbd->cmds[i].cmd_c.dsa = dsa;
1794 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1795 xfer->siop_tables.t_msgout.count= htole32(1);
1796 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1797 xfer->siop_tables.t_msgin.count= htole32(1);
1798 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1799 offsetof(struct siop_common_xfer, msg_in));
1800 xfer->siop_tables.t_extmsgin.count= htole32(2);
1801 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1802 offsetof(struct siop_common_xfer, msg_in) + 1);
1803 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1804 offsetof(struct siop_common_xfer, msg_in) + 3);
1805 xfer->siop_tables.t_status.count= htole32(1);
1806 xfer->siop_tables.t_status.addr = htole32(dsa +
1807 offsetof(struct siop_common_xfer, status));
1808
1809 s = splbio();
1810 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1811 splx(s);
1812 #ifdef SIOP_DEBUG
1813 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1814 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1815 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1816 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1817 #endif
1818 }
1819 s = splbio();
1820 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1821 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1822 splx(s);
1823 return;
1824 bad0:
1825 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1826 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1827 bad1:
1828 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1829 bad2:
1830 free(newcbd->cmds, M_DEVBUF);
1831 bad3:
1832 free(newcbd, M_DEVBUF);
1833 return;
1834 }
1835
1836 void
1837 esiop_moretagtbl(sc)
1838 struct esiop_softc *sc;
1839 {
1840 int error, i, j, s;
1841 bus_dma_segment_t seg;
1842 int rseg;
1843 struct esiop_dsatblblk *newtblblk;
1844 struct esiop_dsatbl *newtbls;
1845 u_int32_t *tbls;
1846
1847 /* allocate a new list head */
1848 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1849 M_DEVBUF, M_NOWAIT|M_ZERO);
1850 if (newtblblk == NULL) {
1851 printf("%s: can't allocate memory for tag DSA table block\n",
1852 sc->sc_c.sc_dev.dv_xname);
1853 return;
1854 }
1855
1856 /* allocate tbl list */
1857 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1858 M_DEVBUF, M_NOWAIT|M_ZERO);
1859 if (newtbls == NULL) {
1860 printf("%s: can't allocate memory for command descriptors\n",
1861 sc->sc_c.sc_dev.dv_xname);
1862 goto bad3;
1863 }
1864 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1865 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1866 if (error) {
1867 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1868 sc->sc_c.sc_dev.dv_xname, error);
1869 goto bad2;
1870 }
1871 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1872 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1873 if (error) {
1874 printf("%s: unable to map tbls DMA memory, error = %d\n",
1875 sc->sc_c.sc_dev.dv_xname, error);
1876 goto bad2;
1877 }
1878 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1879 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1880 if (error) {
1881 printf("%s: unable to create tbl DMA map, error = %d\n",
1882 sc->sc_c.sc_dev.dv_xname, error);
1883 goto bad1;
1884 }
1885 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1886 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1887 if (error) {
1888 printf("%s: unable to load tbl DMA map, error = %d\n",
1889 sc->sc_c.sc_dev.dv_xname, error);
1890 goto bad0;
1891 }
1892 #ifdef DEBUG
1893 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1894 sc->sc_c.sc_dev.dv_xname,
1895 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1896 #endif
1897 for (i = 0; i < ESIOP_NTPB; i++) {
1898 newtbls[i].tblblk = newtblblk;
1899 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1900 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1901 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1902 newtbls[i].tbl_offset;
1903 for (j = 0; j < ESIOP_NTAG; j++)
1904 newtbls[i].tbl[j] = j;
1905 s = splbio();
1906 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1907 splx(s);
1908 }
1909 s = splbio();
1910 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1911 splx(s);
1912 return;
1913 bad0:
1914 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1915 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1916 bad1:
1917 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1918 bad2:
1919 free(newtbls, M_DEVBUF);
1920 bad3:
1921 free(newtblblk, M_DEVBUF);
1922 return;
1923 }
1924
1925 void
1926 esiop_update_scntl3(sc, _siop_target)
1927 struct esiop_softc *sc;
1928 struct siop_common_target *_siop_target;
1929 {
1930 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1931 esiop_script_write(sc, esiop_target->lun_table_offset,
1932 esiop_target->target_c.id);
1933 }
1934
1935 void
1936 esiop_add_dev(sc, target, lun)
1937 struct esiop_softc *sc;
1938 int target;
1939 int lun;
1940 {
1941 struct esiop_target *esiop_target =
1942 (struct esiop_target *)sc->sc_c.targets[target];
1943 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1944
1945 /* we need a tag DSA table */
1946 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1947 if (esiop_lun->lun_tagtbl == NULL) {
1948 esiop_moretagtbl(sc);
1949 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1950 if (esiop_lun->lun_tagtbl == NULL) {
1951 /* no resources, run untagged */
1952 esiop_target->target_c.flags &= ~TARF_TAG;
1953 return;
1954 }
1955 }
1956 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1957 /* Update LUN DSA table */
1958 esiop_script_write(sc, esiop_target->lun_table_offset +
1959 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
1960 esiop_lun->lun_tagtbl->tbl_dsa);
1961 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1962 }
1963
1964 void
1965 esiop_del_dev(sc, target, lun)
1966 struct esiop_softc *sc;
1967 int target;
1968 int lun;
1969 {
1970 struct esiop_target *esiop_target;
1971 #ifdef SIOP_DEBUG
1972 printf("%s:%d:%d: free lun sw entry\n",
1973 sc->sc_c.sc_dev.dv_xname, target, lun);
1974 #endif
1975 if (sc->sc_c.targets[target] == NULL)
1976 return;
1977 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1978 free(esiop_target->esiop_lun[lun], M_DEVBUF);
1979 esiop_target->esiop_lun[lun] = NULL;
1980 }
1981
1982 void
1983 esiop_target_register(sc, target)
1984 struct esiop_softc *sc;
1985 u_int32_t target;
1986 {
1987 struct esiop_target *esiop_target =
1988 (struct esiop_target *)sc->sc_c.targets[target];
1989 struct esiop_lun *esiop_lun;
1990 int lun;
1991
1992 /* get a DSA table for this target */
1993 esiop_target->lun_table_offset = sc->sc_free_offset;
1994 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
1995 #ifdef SIOP_DEBUG
1996 printf("%s: lun table for target %d offset %d free offset %d\n",
1997 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
1998 sc->sc_free_offset);
1999 #endif
2000 /* first 32 bytes are ID (for select) */
2001 esiop_script_write(sc, esiop_target->lun_table_offset,
2002 esiop_target->target_c.id);
2003 /* Record this table in the target DSA table */
2004 esiop_script_write(sc,
2005 sc->sc_target_table_offset + target,
2006 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2007 sc->sc_c.sc_scriptaddr);
2008 /* if we have a tag table, register it */
2009 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2010 esiop_lun = esiop_target->esiop_lun[lun];
2011 if (esiop_lun == NULL)
2012 continue;
2013 if (esiop_lun->lun_tagtbl)
2014 esiop_script_write(sc, esiop_target->lun_table_offset +
2015 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2016 esiop_lun->lun_tagtbl->tbl_dsa);
2017 }
2018 esiop_script_sync(sc,
2019 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2020 }
2021
2022 #ifdef SIOP_STATS
2023 void
2024 esiop_printstats()
2025 {
2026 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2027 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2028 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2029 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2030 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2031 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2032 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2033 }
2034 #endif
2035