esiop.c revision 1.6 1 /* $NetBSD: esiop.c,v 1.6 2002/04/23 17:33:27 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.6 2002/04/23 17:33:27 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_update_scntl3 __P((struct esiop_softc *,
98 struct siop_common_target *));
99 struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
100 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
101
102 static int nintr = 0;
103
104 #ifdef SIOP_STATS
105 static int esiop_stat_intr = 0;
106 static int esiop_stat_intr_shortxfer = 0;
107 static int esiop_stat_intr_sdp = 0;
108 static int esiop_stat_intr_done = 0;
109 static int esiop_stat_intr_xferdisc = 0;
110 static int esiop_stat_intr_lunresel = 0;
111 static int esiop_stat_intr_qfull = 0;
112 void esiop_printstats __P((void));
113 #define INCSTAT(x) x++
114 #else
115 #define INCSTAT(x)
116 #endif
117
118 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
119 static __inline__ void
120 esiop_script_sync(sc, ops)
121 struct esiop_softc *sc;
122 int ops;
123 {
124 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
125 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
126 PAGE_SIZE, ops);
127 }
128
129 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
130 static __inline__ u_int32_t
131 esiop_script_read(sc, offset)
132 struct esiop_softc *sc;
133 u_int offset;
134 {
135 if (sc->sc_c.features & SF_CHIP_RAM) {
136 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
137 offset * 4);
138 } else {
139 return le32toh(sc->sc_c.sc_script[offset]);
140 }
141 }
142
143 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
144 u_int32_t));
145 static __inline__ void
146 esiop_script_write(sc, offset, val)
147 struct esiop_softc *sc;
148 u_int offset;
149 u_int32_t val;
150 {
151 if (sc->sc_c.features & SF_CHIP_RAM) {
152 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
153 offset * 4, val);
154 } else {
155 sc->sc_c.sc_script[offset] = htole32(val);
156 }
157 }
158
159 void
160 esiop_attach(sc)
161 struct esiop_softc *sc;
162 {
163 if (siop_common_attach(&sc->sc_c) != 0 )
164 return;
165
166 TAILQ_INIT(&sc->free_list);
167 TAILQ_INIT(&sc->cmds);
168 TAILQ_INIT(&sc->free_tagtbl);
169 TAILQ_INIT(&sc->tag_tblblk);
170 sc->sc_currschedslot = 0;
171 #ifdef SIOP_DEBUG
172 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
173 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
174 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
175 #endif
176
177 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
178 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
179
180 /* Do a bus reset, so that devices fall back to narrow/async */
181 siop_resetbus(&sc->sc_c);
182 /*
183 * siop_reset() will reset the chip, thus clearing pending interrupts
184 */
185 esiop_reset(sc);
186 #ifdef DUMP_SCRIPT
187 esiop_dump_script(sc);
188 #endif
189
190 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
191 }
192
193 void
194 esiop_reset(sc)
195 struct esiop_softc *sc;
196 {
197 int i, j;
198 u_int32_t addr;
199 u_int32_t msgin_addr;
200
201 siop_common_reset(&sc->sc_c);
202
203 /*
204 * we copy the script at the beggining of RAM. Then there is 8 bytes
205 * for messages in.
206 */
207 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
208 msgin_addr =
209 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
210 sc->sc_free_offset += 2;
211 /* then we have the scheduler ring */
212 sc->sc_shedoffset = sc->sc_free_offset;
213 sc->sc_free_offset += A_ncmd_slots * 2;
214 /* then the targets DSA table */
215 sc->sc_target_table_offset = sc->sc_free_offset;
216 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
217 /* copy and patch the script */
218 if (sc->sc_c.features & SF_CHIP_RAM) {
219 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
220 esiop_script,
221 sizeof(esiop_script) / sizeof(esiop_script[0]));
222 for (j = 0; j <
223 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
224 j++) {
225 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
226 E_tlq_offset_Used[j] * 4,
227 sizeof(struct siop_common_xfer));
228 }
229 for (j = 0; j <
230 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
231 j++) {
232 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
233 E_abs_msgin2_Used[j] * 4, msgin_addr);
234 }
235
236 if (sc->sc_c.features & SF_CHIP_LED0) {
237 bus_space_write_region_4(sc->sc_c.sc_ramt,
238 sc->sc_c.sc_ramh,
239 Ent_led_on1, esiop_led_on,
240 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
241 bus_space_write_region_4(sc->sc_c.sc_ramt,
242 sc->sc_c.sc_ramh,
243 Ent_led_on2, esiop_led_on,
244 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
245 bus_space_write_region_4(sc->sc_c.sc_ramt,
246 sc->sc_c.sc_ramh,
247 Ent_led_off, esiop_led_off,
248 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
249 }
250 } else {
251 for (j = 0;
252 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
253 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
254 }
255 for (j = 0; j <
256 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
257 j++) {
258 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
259 htole32(sizeof(struct siop_common_xfer));
260 }
261 for (j = 0; j <
262 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
263 j++) {
264 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
265 htole32(msgin_addr);
266 }
267
268 if (sc->sc_c.features & SF_CHIP_LED0) {
269 for (j = 0; j < (sizeof(esiop_led_on) /
270 sizeof(esiop_led_on[0])); j++)
271 sc->sc_c.sc_script[
272 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
273 ] = htole32(esiop_led_on[j]);
274 for (j = 0; j < (sizeof(esiop_led_on) /
275 sizeof(esiop_led_on[0])); j++)
276 sc->sc_c.sc_script[
277 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
278 ] = htole32(esiop_led_on[j]);
279 for (j = 0; j < (sizeof(esiop_led_off) /
280 sizeof(esiop_led_off[0])); j++)
281 sc->sc_c.sc_script[
282 Ent_led_off / sizeof(esiop_led_off[0]) + j
283 ] = htole32(esiop_led_off[j]);
284 }
285 }
286 /* get base of scheduler ring */
287 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
288 /* init scheduler */
289 for (i = 0; i < A_ncmd_slots; i++) {
290 esiop_script_write(sc, sc->sc_shedoffset + i * 2, A_f_cmd_free);
291 esiop_script_write(sc, sc->sc_shedoffset + i * 2 + 1, 0);
292 }
293 sc->sc_currschedslot = 0;
294 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
295 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
296 /*
297 * 0x78000000 is a 'move data8 to reg'. data8 is the second
298 * octet, reg offset is the third.
299 */
300 esiop_script_write(sc, Ent_cmdr0 / 4,
301 0x78640000 | ((addr & 0x000000ff) << 8));
302 esiop_script_write(sc, Ent_cmdr1 / 4,
303 0x78650000 | ((addr & 0x0000ff00) ));
304 esiop_script_write(sc, Ent_cmdr2 / 4,
305 0x78660000 | ((addr & 0x00ff0000) >> 8));
306 esiop_script_write(sc, Ent_cmdr3 / 4,
307 0x78670000 | ((addr & 0xff000000) >> 16));
308 /* set flags */
309 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
310 /* write pointer of base of target DSA table */
311 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
312 sc->sc_c.sc_scriptaddr;
313 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
314 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
315 ((addr & 0x000000ff) << 8));
316 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
317 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
318 ((addr & 0x0000ff00) ));
319 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
320 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
321 ((addr & 0x00ff0000) >> 8));
322 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
323 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
324 ((addr & 0xff000000) >> 16));
325 #ifdef SIOP_DEBUG
326 printf("%s: target table offset %d free offset %d\n",
327 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
328 sc->sc_free_offset);
329 #endif
330
331 /* register existing targets */
332 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
333 if (sc->sc_c.targets[i])
334 esiop_target_register(sc, i);
335 }
336 /* start script */
337 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
338 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
339 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
340 }
341 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
342 sc->sc_c.sc_scriptaddr + Ent_reselect);
343 }
344
345 #if 0
346 #define CALL_SCRIPT(ent) do {\
347 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
348 esiop_cmd->cmd_c.dsa, \
349 sc->sc_c.sc_scriptaddr + ent); \
350 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
351 } while (0)
352 #else
353 #define CALL_SCRIPT(ent) do {\
354 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
355 } while (0)
356 #endif
357
358 int
359 esiop_intr(v)
360 void *v;
361 {
362 struct esiop_softc *sc = v;
363 struct esiop_target *esiop_target;
364 struct esiop_cmd *esiop_cmd;
365 struct esiop_lun *esiop_lun;
366 struct scsipi_xfer *xs;
367 int istat, sist, sstat1, dstat;
368 u_int32_t irqcode;
369 int need_reset = 0;
370 int offset, target, lun, tag;
371 u_int32_t tflags;
372 u_int32_t addr;
373 int freetarget = 0;
374 int restart = 0;
375 int slot;
376 int retval = 0;
377
378 again:
379 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
380 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
381 if (istat & ISTAT_SEM) {
382 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
383 SIOP_ISTAT, (istat & ~ISTAT_SEM));
384 esiop_checkdone(sc);
385 }
386 return retval;
387 }
388 retval = 1;
389 nintr++;
390 if (nintr > 100) {
391 panic("esiop: intr loop");
392 }
393 INCSTAT(esiop_stat_intr);
394 if (istat & ISTAT_INTF) {
395 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
396 SIOP_ISTAT, ISTAT_INTF);
397 esiop_checkdone(sc);
398 goto again;
399 }
400 /* get CMD from T/L/Q */
401 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
402 SIOP_SCRATCHC);
403 #ifdef SIOP_DEBUG_INTR
404 printf("interrupt, istat=0x%x tflags=0x%x "
405 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
406 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
407 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
408 SIOP_DSP) -
409 sc->sc_c.sc_scriptaddr));
410 #endif
411 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
412 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
413 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
414 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
415 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
416
417 if (target >= 0 && lun >= 0) {
418 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
419 if (esiop_target == NULL) {
420 printf("esiop_target (target %d) not valid\n", target);
421 goto none;
422 }
423 esiop_lun = esiop_target->esiop_lun[lun];
424 if (esiop_lun == NULL) {
425 printf("esiop_lun (target %d lun %d) not valid\n",
426 target, lun);
427 goto none;
428 }
429 esiop_cmd =
430 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
431 if (esiop_cmd == NULL) {
432 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
433 target, lun, tag);
434 goto none;
435 }
436 xs = esiop_cmd->cmd_c.xs;
437 #ifdef DIAGNOSTIC
438 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
439 printf("esiop_cmd (target %d lun %d) "
440 "not active (%d)\n", target, lun,
441 esiop_cmd->cmd_c.status);
442 goto none;
443 }
444 #endif
445 esiop_table_sync(esiop_cmd,
446 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
447 } else {
448 none:
449 xs = NULL;
450 esiop_target = NULL;
451 esiop_lun = NULL;
452 esiop_cmd = NULL;
453 }
454 if (istat & ISTAT_DIP) {
455 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
456 SIOP_DSTAT);
457 if (dstat & DSTAT_SSI) {
458 printf("single step dsp 0x%08x dsa 0x08%x\n",
459 (int)(bus_space_read_4(sc->sc_c.sc_rt,
460 sc->sc_c.sc_rh, SIOP_DSP) -
461 sc->sc_c.sc_scriptaddr),
462 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
463 SIOP_DSA));
464 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
465 (istat & ISTAT_SIP) == 0) {
466 bus_space_write_1(sc->sc_c.sc_rt,
467 sc->sc_c.sc_rh, SIOP_DCNTL,
468 bus_space_read_1(sc->sc_c.sc_rt,
469 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
470 }
471 return 1;
472 }
473 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
474 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
475 if (dstat & DSTAT_IID)
476 printf(" Illegal instruction");
477 if (dstat & DSTAT_ABRT)
478 printf(" abort");
479 if (dstat & DSTAT_BF)
480 printf(" bus fault");
481 if (dstat & DSTAT_MDPE)
482 printf(" parity");
483 if (dstat & DSTAT_DFE)
484 printf(" dma fifo empty");
485 printf(", DSP=0x%x DSA=0x%x: ",
486 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
487 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
488 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
489 if (esiop_cmd)
490 printf("last msg_in=0x%x status=0x%x\n",
491 esiop_cmd->cmd_tables->msg_in[0],
492 le32toh(esiop_cmd->cmd_tables->status));
493 else
494 printf(" current T/L/Q invalid\n");
495 need_reset = 1;
496 }
497 }
498 if (istat & ISTAT_SIP) {
499 if (istat & ISTAT_DIP)
500 delay(10);
501 /*
502 * Can't read sist0 & sist1 independantly, or we have to
503 * insert delay
504 */
505 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
506 SIOP_SIST0);
507 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
508 SIOP_SSTAT1);
509 #ifdef SIOP_DEBUG_INTR
510 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
511 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
512 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
513 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
514 SIOP_DSP) -
515 sc->sc_c.sc_scriptaddr));
516 #endif
517 if (sist & SIST0_RST) {
518 esiop_handle_reset(sc);
519 /* no table to flush here */
520 return 1;
521 }
522 if (sist & SIST0_SGE) {
523 if (esiop_cmd)
524 scsipi_printaddr(xs->xs_periph);
525 else
526 printf("%s:", sc->sc_c.sc_dev.dv_xname);
527 printf("scsi gross error\n");
528 if (esiop_target)
529 esiop_target->target_c.flags &= ~TARF_DT;
530 goto reset;
531 }
532 if ((sist & SIST0_MA) && need_reset == 0) {
533 if (esiop_cmd) {
534 int scratchc0;
535 dstat = bus_space_read_1(sc->sc_c.sc_rt,
536 sc->sc_c.sc_rh, SIOP_DSTAT);
537 /*
538 * first restore DSA, in case we were in a S/G
539 * operation.
540 */
541 bus_space_write_4(sc->sc_c.sc_rt,
542 sc->sc_c.sc_rh,
543 SIOP_DSA, esiop_cmd->cmd_c.dsa);
544 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
545 sc->sc_c.sc_rh, SIOP_SCRATCHC);
546 switch (sstat1 & SSTAT1_PHASE_MASK) {
547 case SSTAT1_PHASE_STATUS:
548 /*
549 * previous phase may be aborted for any reason
550 * ( for example, the target has less data to
551 * transfer than requested). Just go to status
552 * and the command should terminate.
553 */
554 INCSTAT(esiop_stat_intr_shortxfer);
555 if ((dstat & DSTAT_DFE) == 0)
556 siop_clearfifo(&sc->sc_c);
557 /* no table to flush here */
558 CALL_SCRIPT(Ent_status);
559 return 1;
560 case SSTAT1_PHASE_MSGIN:
561 /*
562 * target may be ready to disconnect
563 * Save data pointers just in case.
564 */
565 INCSTAT(esiop_stat_intr_xferdisc);
566 if (scratchc0 & A_f_c_data)
567 siop_sdp(&esiop_cmd->cmd_c);
568 else if ((dstat & DSTAT_DFE) == 0)
569 siop_clearfifo(&sc->sc_c);
570 bus_space_write_1(sc->sc_c.sc_rt,
571 sc->sc_c.sc_rh, SIOP_SCRATCHC,
572 scratchc0 & ~A_f_c_data);
573 esiop_table_sync(esiop_cmd,
574 BUS_DMASYNC_PREREAD |
575 BUS_DMASYNC_PREWRITE);
576 CALL_SCRIPT(Ent_msgin);
577 return 1;
578 }
579 printf("%s: unexpected phase mismatch %d\n",
580 sc->sc_c.sc_dev.dv_xname,
581 sstat1 & SSTAT1_PHASE_MASK);
582 } else {
583 printf("%s: phase mismatch without command\n",
584 sc->sc_c.sc_dev.dv_xname);
585 }
586 need_reset = 1;
587 }
588 if (sist & SIST0_PAR) {
589 /* parity error, reset */
590 if (esiop_cmd)
591 scsipi_printaddr(xs->xs_periph);
592 else
593 printf("%s:", sc->sc_c.sc_dev.dv_xname);
594 printf("parity error\n");
595 if (esiop_target)
596 esiop_target->target_c.flags &= ~TARF_DT;
597 goto reset;
598 }
599 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
600 /* selection time out, assume there's no device here */
601 /*
602 * SCRATCHC has not been loaded yet, we have to find
603 * params by ourselve. scratchE0 should point to
604 * the slot.
605 */
606 slot = bus_space_read_1(sc->sc_c.sc_rt,
607 sc->sc_c.sc_rh, SIOP_SCRATCHE);
608 esiop_script_sync(sc,
609 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
610 target = esiop_script_read(sc,
611 sc->sc_shedoffset + slot * 2 + 1) & 0x00ff0000;
612 target = (target >> 16) & 0xff;
613 esiop_cmd = esiop_cmd_find(sc, target,
614 esiop_script_read(sc,
615 sc->sc_shedoffset + slot * 2) & ~0x3);
616 /*
617 * mark this slot as free, and advance to next slot
618 */
619 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
620 A_f_cmd_free);
621 addr = bus_space_read_4(sc->sc_c.sc_rt,
622 sc->sc_c.sc_rh, SIOP_SCRATCHD);
623 if (slot < (A_ncmd_slots - 1)) {
624 bus_space_write_1(sc->sc_c.sc_rt,
625 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
626 addr = addr + 8;
627 } else {
628 bus_space_write_1(sc->sc_c.sc_rt,
629 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
630 addr = sc->sc_c.sc_scriptaddr +
631 sc->sc_shedoffset * sizeof(u_int32_t);
632 }
633 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
634 SIOP_SCRATCHD, addr);
635 esiop_script_sync(sc,
636 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
637 if (esiop_cmd) {
638 xs = esiop_cmd->cmd_c.xs;
639 esiop_target = (struct esiop_target *)
640 esiop_cmd->cmd_c.siop_target;
641 lun = xs->xs_periph->periph_lun;
642 tag = esiop_cmd->cmd_c.tag;
643 esiop_lun = esiop_target->esiop_lun[lun];
644 esiop_cmd->cmd_c.status = CMDST_DONE;
645 xs->error = XS_SELTIMEOUT;
646 freetarget = 1;
647 goto end;
648 } else {
649 printf("%s: selection timeout without "
650 "command, target %d (sdid 0x%x), "
651 "slot %d\n",
652 sc->sc_c.sc_dev.dv_xname, target,
653 bus_space_read_1(sc->sc_c.sc_rt,
654 sc->sc_c.sc_rh, SIOP_SDID), slot);
655 need_reset = 1;
656 }
657 }
658 if (sist & SIST0_UDC) {
659 /*
660 * unexpected disconnect. Usually the target signals
661 * a fatal condition this way. Attempt to get sense.
662 */
663 if (esiop_cmd) {
664 esiop_cmd->cmd_tables->status =
665 htole32(SCSI_CHECK);
666 goto end;
667 }
668 printf("%s: unexpected disconnect without "
669 "command\n", sc->sc_c.sc_dev.dv_xname);
670 goto reset;
671 }
672 if (sist & (SIST1_SBMC << 8)) {
673 /* SCSI bus mode change */
674 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
675 goto reset;
676 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
677 /*
678 * we have a script interrupt, it will
679 * restart the script.
680 */
681 goto scintr;
682 }
683 /*
684 * else we have to restart it ourselve, at the
685 * interrupted instruction.
686 */
687 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
688 SIOP_DSP,
689 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
690 SIOP_DSP) - 8);
691 return 1;
692 }
693 /* Else it's an unhandled exeption (for now). */
694 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
695 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
696 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
697 SIOP_SSTAT1),
698 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
699 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
700 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
701 if (esiop_cmd) {
702 esiop_cmd->cmd_c.status = CMDST_DONE;
703 xs->error = XS_SELTIMEOUT;
704 goto end;
705 }
706 need_reset = 1;
707 }
708 if (need_reset) {
709 reset:
710 /* fatal error, reset the bus */
711 siop_resetbus(&sc->sc_c);
712 /* no table to flush here */
713 return 1;
714 }
715
716 scintr:
717 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
718 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
719 SIOP_DSPS);
720 #ifdef SIOP_DEBUG_INTR
721 printf("script interrupt 0x%x\n", irqcode);
722 #endif
723 /*
724 * no command, or an inactive command is only valid for a
725 * reselect interrupt
726 */
727 if ((irqcode & 0x80) == 0) {
728 if (esiop_cmd == NULL) {
729 printf(
730 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
731 sc->sc_c.sc_dev.dv_xname, irqcode);
732 goto reset;
733 }
734 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
735 printf("%s: command with invalid status "
736 "(IRQ code 0x%x current status %d) !\n",
737 sc->sc_c.sc_dev.dv_xname,
738 irqcode, esiop_cmd->cmd_c.status);
739 xs = NULL;
740 }
741 }
742 switch(irqcode) {
743 case A_int_err:
744 printf("error, DSP=0x%x\n",
745 (int)(bus_space_read_4(sc->sc_c.sc_rt,
746 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
747 if (xs) {
748 xs->error = XS_SELTIMEOUT;
749 goto end;
750 } else {
751 goto reset;
752 }
753 case A_int_msgin:
754 {
755 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
756 sc->sc_c.sc_rh, SIOP_SFBR);
757 if (msgin == MSG_MESSAGE_REJECT) {
758 int msg, extmsg;
759 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
760 /*
761 * message was part of a identify +
762 * something else. Identify shoudl't
763 * have been rejected.
764 */
765 msg =
766 esiop_cmd->cmd_tables->msg_out[1];
767 extmsg =
768 esiop_cmd->cmd_tables->msg_out[3];
769 } else {
770 msg =
771 esiop_cmd->cmd_tables->msg_out[0];
772 extmsg =
773 esiop_cmd->cmd_tables->msg_out[2];
774 }
775 if (msg == MSG_MESSAGE_REJECT) {
776 /* MSG_REJECT for a MSG_REJECT !*/
777 if (xs)
778 scsipi_printaddr(xs->xs_periph);
779 else
780 printf("%s: ",
781 sc->sc_c.sc_dev.dv_xname);
782 printf("our reject message was "
783 "rejected\n");
784 goto reset;
785 }
786 if (msg == MSG_EXTENDED &&
787 extmsg == MSG_EXT_WDTR) {
788 /* WDTR rejected, initiate sync */
789 if ((esiop_target->target_c.flags &
790 TARF_SYNC) == 0) {
791 esiop_target->target_c.status =
792 TARST_OK;
793 siop_update_xfer_mode(&sc->sc_c,
794 target);
795 /* no table to flush here */
796 CALL_SCRIPT(Ent_msgin_ack);
797 return 1;
798 }
799 esiop_target->target_c.status =
800 TARST_SYNC_NEG;
801 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
802 sc->sc_c.st_minsync,
803 sc->sc_c.maxoff);
804 esiop_table_sync(esiop_cmd,
805 BUS_DMASYNC_PREREAD |
806 BUS_DMASYNC_PREWRITE);
807 CALL_SCRIPT(Ent_send_msgout);
808 return 1;
809 } else if (msg == MSG_EXTENDED &&
810 extmsg == MSG_EXT_SDTR) {
811 /* sync rejected */
812 esiop_target->target_c.offset = 0;
813 esiop_target->target_c.period = 0;
814 esiop_target->target_c.status =
815 TARST_OK;
816 siop_update_xfer_mode(&sc->sc_c,
817 target);
818 /* no table to flush here */
819 CALL_SCRIPT(Ent_msgin_ack);
820 return 1;
821 } else if (msg == MSG_EXTENDED &&
822 extmsg == MSG_EXT_PPR) {
823 /* PPR rejected */
824 esiop_target->target_c.offset = 0;
825 esiop_target->target_c.period = 0;
826 esiop_target->target_c.status =
827 TARST_OK;
828 siop_update_xfer_mode(&sc->sc_c,
829 target);
830 /* no table to flush here */
831 CALL_SCRIPT(Ent_msgin_ack);
832 return 1;
833 } else if (msg == MSG_SIMPLE_Q_TAG ||
834 msg == MSG_HEAD_OF_Q_TAG ||
835 msg == MSG_ORDERED_Q_TAG) {
836 if (esiop_handle_qtag_reject(
837 esiop_cmd) == -1)
838 goto reset;
839 CALL_SCRIPT(Ent_msgin_ack);
840 return 1;
841 }
842 if (xs)
843 scsipi_printaddr(xs->xs_periph);
844 else
845 printf("%s: ",
846 sc->sc_c.sc_dev.dv_xname);
847 if (msg == MSG_EXTENDED) {
848 printf("scsi message reject, extended "
849 "message sent was 0x%x\n", extmsg);
850 } else {
851 printf("scsi message reject, message "
852 "sent was 0x%x\n", msg);
853 }
854 /* no table to flush here */
855 CALL_SCRIPT(Ent_msgin_ack);
856 return 1;
857 }
858 if (xs)
859 scsipi_printaddr(xs->xs_periph);
860 else
861 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
862 printf("unhandled message 0x%x\n",
863 esiop_cmd->cmd_tables->msg_in[0]);
864 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
865 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
866 esiop_table_sync(esiop_cmd,
867 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
868 CALL_SCRIPT(Ent_send_msgout);
869 return 1;
870 }
871 case A_int_extmsgin:
872 #ifdef SIOP_DEBUG_INTR
873 printf("extended message: msg 0x%x len %d\n",
874 esiop_cmd->cmd_tables->msg_in[2],
875 esiop_cmd->cmd_tables->msg_in[1]);
876 #endif
877 if (esiop_cmd->cmd_tables->msg_in[1] >
878 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
879 printf("%s: extended message too big (%d)\n",
880 sc->sc_c.sc_dev.dv_xname,
881 esiop_cmd->cmd_tables->msg_in[1]);
882 esiop_cmd->cmd_tables->t_extmsgdata.count =
883 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
884 esiop_table_sync(esiop_cmd,
885 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
886 CALL_SCRIPT(Ent_get_extmsgdata);
887 return 1;
888 case A_int_extmsgdata:
889 #ifdef SIOP_DEBUG_INTR
890 {
891 int i;
892 printf("extended message: 0x%x, data:",
893 esiop_cmd->cmd_tables->msg_in[2]);
894 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
895 i++)
896 printf(" 0x%x",
897 esiop_cmd->cmd_tables->msg_in[i]);
898 printf("\n");
899 }
900 #endif
901 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
902 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
903 case SIOP_NEG_MSGOUT:
904 esiop_update_scntl3(sc,
905 esiop_cmd->cmd_c.siop_target);
906 esiop_table_sync(esiop_cmd,
907 BUS_DMASYNC_PREREAD |
908 BUS_DMASYNC_PREWRITE);
909 CALL_SCRIPT(Ent_send_msgout);
910 return(1);
911 case SIOP_NEG_ACK:
912 esiop_update_scntl3(sc,
913 esiop_cmd->cmd_c.siop_target);
914 CALL_SCRIPT(Ent_msgin_ack);
915 return(1);
916 default:
917 panic("invalid retval from "
918 "siop_wdtr_neg()");
919 }
920 return(1);
921 }
922 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
923 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
924 case SIOP_NEG_MSGOUT:
925 esiop_update_scntl3(sc,
926 esiop_cmd->cmd_c.siop_target);
927 esiop_table_sync(esiop_cmd,
928 BUS_DMASYNC_PREREAD |
929 BUS_DMASYNC_PREWRITE);
930 CALL_SCRIPT(Ent_send_msgout);
931 return(1);
932 case SIOP_NEG_ACK:
933 esiop_update_scntl3(sc,
934 esiop_cmd->cmd_c.siop_target);
935 CALL_SCRIPT(Ent_msgin_ack);
936 return(1);
937 default:
938 panic("invalid retval from "
939 "siop_wdtr_neg()");
940 }
941 return(1);
942 }
943 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
944 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
945 case SIOP_NEG_MSGOUT:
946 esiop_update_scntl3(sc,
947 esiop_cmd->cmd_c.siop_target);
948 esiop_table_sync(esiop_cmd,
949 BUS_DMASYNC_PREREAD |
950 BUS_DMASYNC_PREWRITE);
951 CALL_SCRIPT(Ent_send_msgout);
952 return(1);
953 case SIOP_NEG_ACK:
954 esiop_update_scntl3(sc,
955 esiop_cmd->cmd_c.siop_target);
956 CALL_SCRIPT(Ent_msgin_ack);
957 return(1);
958 default:
959 panic("invalid retval from "
960 "siop_wdtr_neg()");
961 }
962 return(1);
963 }
964 /* send a message reject */
965 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
966 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
967 esiop_table_sync(esiop_cmd,
968 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
969 CALL_SCRIPT(Ent_send_msgout);
970 return 1;
971 case A_int_disc:
972 INCSTAT(esiop_stat_intr_sdp);
973 offset = bus_space_read_1(sc->sc_c.sc_rt,
974 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
975 #ifdef SIOP_DEBUG_DR
976 printf("disconnect offset %d\n", offset);
977 #endif
978 if (offset > SIOP_NSG) {
979 printf("%s: bad offset for disconnect (%d)\n",
980 sc->sc_c.sc_dev.dv_xname, offset);
981 goto reset;
982 }
983 /*
984 * offset == SIOP_NSG may be a valid condition if
985 * we get a sdp when the xfer is done.
986 * Don't call memmove in this case.
987 */
988 if (offset < SIOP_NSG) {
989 memmove(&esiop_cmd->cmd_tables->data[0],
990 &esiop_cmd->cmd_tables->data[offset],
991 (SIOP_NSG - offset) * sizeof(scr_table_t));
992 esiop_table_sync(esiop_cmd,
993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
994 }
995 CALL_SCRIPT(Ent_script_sched);
996 return 1;
997 case A_int_resfail:
998 printf("reselect failed\n");
999 CALL_SCRIPT(Ent_script_sched);
1000 return 1;
1001 case A_int_done:
1002 if (xs == NULL) {
1003 printf("%s: done without command\n",
1004 sc->sc_c.sc_dev.dv_xname);
1005 CALL_SCRIPT(Ent_script_sched);
1006 return 1;
1007 }
1008 #ifdef SIOP_DEBUG_INTR
1009 printf("done, DSA=0x%lx target id 0x%x last msg "
1010 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1011 le32toh(esiop_cmd->cmd_tables->id),
1012 esiop_cmd->cmd_tables->msg_in[0],
1013 le32toh(esiop_cmd->cmd_tables->status));
1014 #endif
1015 INCSTAT(esiop_stat_intr_done);
1016 esiop_cmd->cmd_c.status = CMDST_DONE;
1017 goto end;
1018 default:
1019 printf("unknown irqcode %x\n", irqcode);
1020 if (xs) {
1021 xs->error = XS_SELTIMEOUT;
1022 goto end;
1023 }
1024 goto reset;
1025 }
1026 return 1;
1027 }
1028 /* We just should't get there */
1029 panic("siop_intr: I shouldn't be there !");
1030
1031 end:
1032 /*
1033 * restart the script now if command completed properly
1034 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1035 * queue
1036 */
1037 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1038 #ifdef SIOP_DEBUG_INTR
1039 printf("esiop_intr end: status %d\n", xs->status);
1040 #endif
1041 if (xs->status == SCSI_OK)
1042 CALL_SCRIPT(Ent_script_sched);
1043 else
1044 restart = 1;
1045 if (tag >= 0)
1046 esiop_lun->tactive[tag] = NULL;
1047 else
1048 esiop_lun->active = NULL;
1049 esiop_scsicmd_end(esiop_cmd);
1050 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1051 esiop_del_dev(sc, target, lun);
1052 if (restart)
1053 CALL_SCRIPT(Ent_script_sched);
1054 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1055 /* a command terminated, so we have free slots now */
1056 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1057 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1058 }
1059
1060 return retval;
1061 }
1062
1063 void
1064 esiop_scsicmd_end(esiop_cmd)
1065 struct esiop_cmd *esiop_cmd;
1066 {
1067 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1068 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1069
1070 switch(xs->status) {
1071 case SCSI_OK:
1072 xs->error = XS_NOERROR;
1073 break;
1074 case SCSI_BUSY:
1075 xs->error = XS_BUSY;
1076 break;
1077 case SCSI_CHECK:
1078 xs->error = XS_BUSY;
1079 /* remove commands in the queue and scheduler */
1080 esiop_unqueue(sc, xs->xs_periph->periph_target,
1081 xs->xs_periph->periph_lun);
1082 break;
1083 case SCSI_QUEUE_FULL:
1084 INCSTAT(esiop_stat_intr_qfull);
1085 #ifdef SIOP_DEBUG
1086 printf("%s:%d:%d: queue full (tag %d)\n",
1087 sc->sc_c.sc_dev.dv_xname,
1088 xs->xs_periph->periph_target,
1089 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1090 #endif
1091 xs->error = XS_BUSY;
1092 break;
1093 case SCSI_SIOP_NOCHECK:
1094 /*
1095 * don't check status, xs->error is already valid
1096 */
1097 break;
1098 case SCSI_SIOP_NOSTATUS:
1099 /*
1100 * the status byte was not updated, cmd was
1101 * aborted
1102 */
1103 xs->error = XS_SELTIMEOUT;
1104 break;
1105 default:
1106 xs->error = XS_DRIVER_STUFFUP;
1107 }
1108 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1109 bus_dmamap_sync(sc->sc_c.sc_dmat,
1110 esiop_cmd->cmd_c.dmamap_data, 0,
1111 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1112 (xs->xs_control & XS_CTL_DATA_IN) ?
1113 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1114 bus_dmamap_unload(sc->sc_c.sc_dmat,
1115 esiop_cmd->cmd_c.dmamap_data);
1116 }
1117 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1118 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1119 esiop_cmd->cmd_c.status = CMDST_FREE;
1120 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1121 xs->resid = 0;
1122 scsipi_done (xs);
1123 }
1124
1125 void
1126 esiop_checkdone(sc)
1127 struct esiop_softc *sc;
1128 {
1129 int target, lun, tag;
1130 struct esiop_target *esiop_target;
1131 struct esiop_lun *esiop_lun;
1132 struct esiop_cmd *esiop_cmd;
1133 int status;
1134
1135 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1136 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1137 if (esiop_target == NULL)
1138 continue;
1139 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1140 esiop_lun = esiop_target->esiop_lun[lun];
1141 if (esiop_lun == NULL)
1142 continue;
1143 esiop_cmd = esiop_lun->active;
1144 if (esiop_cmd) {
1145 esiop_table_sync(esiop_cmd,
1146 BUS_DMASYNC_POSTREAD |
1147 BUS_DMASYNC_POSTWRITE);
1148 status = le32toh(esiop_cmd->cmd_tables->status);
1149 if (status == SCSI_OK) {
1150 /* Ok, this command has been handled */
1151 esiop_cmd->cmd_c.xs->status = status;
1152 esiop_lun->active = NULL;
1153 esiop_scsicmd_end(esiop_cmd);
1154 }
1155 }
1156 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1157 esiop_cmd = esiop_lun->tactive[tag];
1158 if (esiop_cmd == NULL)
1159 continue;
1160 esiop_table_sync(esiop_cmd,
1161 BUS_DMASYNC_POSTREAD |
1162 BUS_DMASYNC_POSTWRITE);
1163 status = le32toh(esiop_cmd->cmd_tables->status);
1164 if (status == SCSI_OK) {
1165 /* Ok, this command has been handled */
1166 esiop_cmd->cmd_c.xs->status = status;
1167 esiop_lun->tactive[tag] = NULL;
1168 esiop_scsicmd_end(esiop_cmd);
1169 }
1170 }
1171 }
1172 }
1173 }
1174
1175 void
1176 esiop_unqueue(sc, target, lun)
1177 struct esiop_softc *sc;
1178 int target;
1179 int lun;
1180 {
1181 int slot, tag;
1182 u_int32_t slotdsa;
1183 struct esiop_cmd *esiop_cmd;
1184 struct esiop_lun *esiop_lun =
1185 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1186
1187 /* first make sure to read valid data */
1188 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1189
1190 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1191 /* look for commands in the scheduler, not yet started */
1192 if (esiop_lun->tactive[tag] == NULL)
1193 continue;
1194 esiop_cmd = esiop_lun->tactive[tag];
1195 for (slot = 0; slot < A_ncmd_slots; slot++) {
1196 slotdsa = esiop_script_read(sc,
1197 sc->sc_shedoffset + slot * 2);
1198 if (slotdsa & A_f_cmd_free)
1199 continue;
1200 if ((slotdsa & ~A_f_cmd_free) == esiop_cmd->cmd_c.dsa)
1201 break;
1202 }
1203 if (slot > ESIOP_NTAG)
1204 continue; /* didn't find it */
1205 /* Mark this slot as ignore */
1206 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1207 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1208 /* ask to requeue */
1209 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1210 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1211 esiop_lun->tactive[tag] = NULL;
1212 esiop_scsicmd_end(esiop_cmd);
1213 }
1214 }
1215
1216 /*
1217 * handle a rejected queue tag message: the command will run untagged,
1218 * has to adjust the reselect script.
1219 */
1220
1221
1222 int
1223 esiop_handle_qtag_reject(esiop_cmd)
1224 struct esiop_cmd *esiop_cmd;
1225 {
1226 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1227 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1228 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1229 int tag = esiop_cmd->cmd_tables->msg_out[2];
1230 struct esiop_target *esiop_target =
1231 (struct esiop_target*)sc->sc_c.targets[target];
1232 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1233
1234 #ifdef SIOP_DEBUG
1235 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1236 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1237 esiop_cmd->cmd_c.status);
1238 #endif
1239
1240 if (esiop_lun->active != NULL) {
1241 printf("%s: untagged command already running for target %d "
1242 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1243 target, lun, esiop_lun->active->cmd_c.status);
1244 return -1;
1245 }
1246 /* clear tag slot */
1247 esiop_lun->tactive[tag] = NULL;
1248 /* add command to non-tagged slot */
1249 esiop_lun->active = esiop_cmd;
1250 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1251 esiop_cmd->cmd_c.tag = -1;
1252 /* update DSA table */
1253 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1254 esiop_cmd->cmd_c.dsa);
1255 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1256 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1257 return 0;
1258 }
1259
1260 /*
1261 * handle a bus reset: reset chip, unqueue all active commands, free all
1262 * target struct and report loosage to upper layer.
1263 * As the upper layer may requeue immediatly we have to first store
1264 * all active commands in a temporary queue.
1265 */
1266 void
1267 esiop_handle_reset(sc)
1268 struct esiop_softc *sc;
1269 {
1270 struct esiop_cmd *esiop_cmd;
1271 struct esiop_lun *esiop_lun;
1272 int target, lun, tag;
1273 /*
1274 * scsi bus reset. reset the chip and restart
1275 * the queue. Need to clean up all active commands
1276 */
1277 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1278 /* stop, reset and restart the chip */
1279 esiop_reset(sc);
1280 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1281 /* chip has been reset, all slots are free now */
1282 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1283 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1284 }
1285 /*
1286 * Process all commands: first commmands completes, then commands
1287 * being executed
1288 */
1289 esiop_checkdone(sc);
1290 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1291 target++) {
1292 struct esiop_target *esiop_target =
1293 (struct esiop_target *)sc->sc_c.targets[target];
1294 if (esiop_target == NULL)
1295 continue;
1296 for (lun = 0; lun < 8; lun++) {
1297 esiop_lun = esiop_target->esiop_lun[lun];
1298 if (esiop_lun == NULL)
1299 continue;
1300 for (tag = -1; tag <
1301 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1302 ESIOP_NTAG : 0);
1303 tag++) {
1304 if (tag >= 0)
1305 esiop_cmd = esiop_lun->tactive[tag];
1306 else
1307 esiop_cmd = esiop_lun->active;
1308 if (esiop_cmd == NULL)
1309 continue;
1310 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1311 printf("command with tag id %d reset\n", tag);
1312 esiop_cmd->cmd_c.xs->error =
1313 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1314 XS_TIMEOUT : XS_RESET;
1315 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1316 if (tag >= 0)
1317 esiop_lun->tactive[tag] = NULL;
1318 else
1319 esiop_lun->active = NULL;
1320 esiop_cmd->cmd_c.status = CMDST_DONE;
1321 esiop_scsicmd_end(esiop_cmd);
1322 }
1323 }
1324 sc->sc_c.targets[target]->status = TARST_ASYNC;
1325 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1326 sc->sc_c.targets[target]->period =
1327 sc->sc_c.targets[target]->offset = 0;
1328 siop_update_xfer_mode(&sc->sc_c, target);
1329 }
1330
1331 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1332 }
1333
1334 void
1335 esiop_scsipi_request(chan, req, arg)
1336 struct scsipi_channel *chan;
1337 scsipi_adapter_req_t req;
1338 void *arg;
1339 {
1340 struct scsipi_xfer *xs;
1341 struct scsipi_periph *periph;
1342 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1343 struct esiop_cmd *esiop_cmd;
1344 struct esiop_target *esiop_target;
1345 int s, error, i;
1346 int target;
1347 int lun;
1348
1349 switch (req) {
1350 case ADAPTER_REQ_RUN_XFER:
1351 xs = arg;
1352 periph = xs->xs_periph;
1353 target = periph->periph_target;
1354 lun = periph->periph_lun;
1355
1356 s = splbio();
1357 #ifdef SIOP_DEBUG_SCHED
1358 printf("starting cmd for %d:%d\n", target, lun);
1359 #endif
1360 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1361 if (esiop_cmd == NULL) {
1362 xs->error = XS_RESOURCE_SHORTAGE;
1363 scsipi_done(xs);
1364 splx(s);
1365 return;
1366 }
1367 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1368 #ifdef DIAGNOSTIC
1369 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1370 panic("siop_scsicmd: new cmd not free");
1371 #endif
1372 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1373 if (esiop_target == NULL) {
1374 #ifdef SIOP_DEBUG
1375 printf("%s: alloc siop_target for target %d\n",
1376 sc->sc_c.sc_dev.dv_xname, target);
1377 #endif
1378 sc->sc_c.targets[target] =
1379 malloc(sizeof(struct esiop_target),
1380 M_DEVBUF, M_NOWAIT | M_ZERO);
1381 if (sc->sc_c.targets[target] == NULL) {
1382 printf("%s: can't malloc memory for "
1383 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1384 target);
1385 xs->error = XS_RESOURCE_SHORTAGE;
1386 scsipi_done(xs);
1387 splx(s);
1388 return;
1389 }
1390 esiop_target =
1391 (struct esiop_target*)sc->sc_c.targets[target];
1392 esiop_target->target_c.status = TARST_PROBING;
1393 esiop_target->target_c.flags = 0;
1394 esiop_target->target_c.id =
1395 sc->sc_c.clock_div << 24; /* scntl3 */
1396 esiop_target->target_c.id |= target << 16; /* id */
1397 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1398
1399 for (i=0; i < 8; i++)
1400 esiop_target->esiop_lun[i] = NULL;
1401 esiop_target_register(sc, target);
1402 }
1403 if (esiop_target->esiop_lun[lun] == NULL) {
1404 esiop_target->esiop_lun[lun] =
1405 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1406 M_NOWAIT|M_ZERO);
1407 if (esiop_target->esiop_lun[lun] == NULL) {
1408 printf("%s: can't alloc esiop_lun for "
1409 "target %d lun %d\n",
1410 sc->sc_c.sc_dev.dv_xname, target, lun);
1411 xs->error = XS_RESOURCE_SHORTAGE;
1412 scsipi_done(xs);
1413 splx(s);
1414 return;
1415 }
1416 }
1417 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1418 esiop_cmd->cmd_c.xs = xs;
1419 esiop_cmd->cmd_c.flags = 0;
1420 esiop_cmd->cmd_c.status = CMDST_READY;
1421
1422 /* load the DMA maps */
1423 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1424 esiop_cmd->cmd_c.dmamap_cmd,
1425 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1426 if (error) {
1427 printf("%s: unable to load cmd DMA map: %d\n",
1428 sc->sc_c.sc_dev.dv_xname, error);
1429 xs->error = XS_DRIVER_STUFFUP;
1430 scsipi_done(xs);
1431 splx(s);
1432 return;
1433 }
1434 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1435 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1436 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1437 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1438 ((xs->xs_control & XS_CTL_DATA_IN) ?
1439 BUS_DMA_READ : BUS_DMA_WRITE));
1440 if (error) {
1441 printf("%s: unable to load cmd DMA map: %d",
1442 sc->sc_c.sc_dev.dv_xname, error);
1443 xs->error = XS_DRIVER_STUFFUP;
1444 scsipi_done(xs);
1445 bus_dmamap_unload(sc->sc_c.sc_dmat,
1446 esiop_cmd->cmd_c.dmamap_cmd);
1447 splx(s);
1448 return;
1449 }
1450 bus_dmamap_sync(sc->sc_c.sc_dmat,
1451 esiop_cmd->cmd_c.dmamap_data, 0,
1452 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1453 (xs->xs_control & XS_CTL_DATA_IN) ?
1454 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1455 }
1456 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1457 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1458 BUS_DMASYNC_PREWRITE);
1459
1460 if (xs->xs_tag_type)
1461 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1462 else
1463 esiop_cmd->cmd_c.tag = -1;
1464 siop_setuptables(&esiop_cmd->cmd_c);
1465 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1466 htole32(A_f_c_target | A_f_c_lun);
1467 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1468 htole32((target << 8) | (lun << 16));
1469 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1470 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1471 htole32(A_f_c_tag);
1472 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1473 htole32(esiop_cmd->cmd_c.tag << 24);
1474 }
1475
1476 esiop_table_sync(esiop_cmd,
1477 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1478 esiop_start(sc, esiop_cmd);
1479 if (xs->xs_control & XS_CTL_POLL) {
1480 /* poll for command completion */
1481 while ((xs->xs_status & XS_STS_DONE) == 0) {
1482 delay(1000);
1483 esiop_intr(sc);
1484 }
1485 }
1486 splx(s);
1487 return;
1488
1489 case ADAPTER_REQ_GROW_RESOURCES:
1490 #ifdef SIOP_DEBUG
1491 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1492 sc->sc_c.sc_adapt.adapt_openings);
1493 #endif
1494 esiop_morecbd(sc);
1495 return;
1496
1497 case ADAPTER_REQ_SET_XFER_MODE:
1498 {
1499 struct scsipi_xfer_mode *xm = arg;
1500 if (sc->sc_c.targets[xm->xm_target] == NULL)
1501 return;
1502 s = splbio();
1503 if (xm->xm_mode & PERIPH_CAP_TQING)
1504 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1505 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1506 (sc->sc_c.features & SF_BUS_WIDE))
1507 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1508 if (xm->xm_mode & PERIPH_CAP_SYNC)
1509 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1510 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1511 (sc->sc_c.features & SF_CHIP_DT))
1512 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1513 if ((xm->xm_mode &
1514 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1515 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1516 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1517
1518 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1519 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1520 /* allocate a lun sw entry for this device */
1521 esiop_add_dev(sc, xm->xm_target, lun);
1522 }
1523 splx(s);
1524 }
1525 }
1526 }
1527
1528 static void
1529 esiop_start(sc, esiop_cmd)
1530 struct esiop_softc *sc;
1531 struct esiop_cmd *esiop_cmd;
1532 {
1533 struct esiop_lun *esiop_lun;
1534 struct esiop_target *esiop_target;
1535 int timeout;
1536 int target, lun, slot;
1537
1538 nintr = 0;
1539
1540 /*
1541 * first make sure to read valid data
1542 */
1543 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1544
1545 /*
1546 * We use a circular queue here. sc->sc_currschedslot points to a
1547 * free slot, unless we have filled the queue. Check this.
1548 */
1549 slot = sc->sc_currschedslot;
1550 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * 2) &
1551 A_f_cmd_free) == 0) {
1552 /*
1553 * no more free slot, no need to continue. freeze the queue
1554 * and requeue this command.
1555 */
1556 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1557 sc->sc_flags |= SCF_CHAN_NOSLOT;
1558 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1559 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1560 esiop_scsicmd_end(esiop_cmd);
1561 return;
1562 }
1563 /* OK, we can use this slot */
1564
1565 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1566 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1567 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1568 esiop_lun = esiop_target->esiop_lun[lun];
1569 /* if non-tagged command active, panic: this shouldn't happen */
1570 if (esiop_lun->active != NULL) {
1571 panic("esiop_start: tagged cmd while untagged running");
1572 }
1573 #ifdef DIAGNOSTIC
1574 /* sanity check the tag if needed */
1575 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1576 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1577 panic("esiop_start: tag not free");
1578 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1579 esiop_cmd->cmd_c.tag < 0) {
1580 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1581 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1582 panic("esiop_start: invalid tag id");
1583 }
1584 }
1585 #endif
1586 #ifdef SIOP_DEBUG_SCHED
1587 printf("using slot %d for DSA 0x%lx\n", slot,
1588 (u_long)esiop_cmd->cmd_c.dsa);
1589 #endif
1590 /* mark command as active */
1591 if (esiop_cmd->cmd_c.status == CMDST_READY)
1592 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1593 else
1594 panic("esiop_start: bad status");
1595 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1596 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1597 /* DSA table for reselect */
1598 if ((esiop_lun->lun_flags & LUNF_TAGTABLE) == 0) {
1599 esiop_script_write(sc,
1600 esiop_target->lun_table_offset + lun + 2,
1601 esiop_lun->lun_tagtbl->tbl_dsa);
1602 esiop_lun->lun_flags |= LUNF_TAGTABLE;
1603 }
1604 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1605 htole32(esiop_cmd->cmd_c.dsa);
1606 bus_dmamap_sync(sc->sc_c.sc_dmat,
1607 esiop_lun->lun_tagtbl->tblblk->blkmap,
1608 esiop_lun->lun_tagtbl->tbl_offset,
1609 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1610 } else {
1611 esiop_lun->active = esiop_cmd;
1612 /* DSA table for reselect */
1613 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1614 esiop_cmd->cmd_c.dsa);
1615 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1616
1617 }
1618 /* scheduler slot: ID, then DSA */
1619 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1620 sc->sc_c.targets[target]->id);
1621 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1622 esiop_cmd->cmd_c.dsa);
1623 /* handle timeout */
1624 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1625 /* start exire timer */
1626 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1627 if (timeout == 0)
1628 timeout = 1;
1629 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1630 timeout, esiop_timeout, esiop_cmd);
1631 }
1632 /* make sure SCRIPT processor will read valid data */
1633 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1634 /* Signal script it has some work to do */
1635 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1636 SIOP_ISTAT, ISTAT_SIGP);
1637 /* update the current slot, and wait for IRQ */
1638 sc->sc_currschedslot++;
1639 if (sc->sc_currschedslot >= A_ncmd_slots)
1640 sc->sc_currschedslot = 0;
1641 return;
1642 }
1643
1644 void
1645 esiop_timeout(v)
1646 void *v;
1647 {
1648 struct esiop_cmd *esiop_cmd = v;
1649 struct esiop_softc *sc =
1650 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1651 int s;
1652
1653 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1654 printf("command timeout\n");
1655
1656 s = splbio();
1657 /* reset the scsi bus */
1658 siop_resetbus(&sc->sc_c);
1659
1660 /* deactivate callout */
1661 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1662 /*
1663 * mark command has being timed out and just return;
1664 * the bus reset will generate an interrupt,
1665 * it will be handled in siop_intr()
1666 */
1667 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1668 splx(s);
1669 return;
1670
1671 }
1672
1673 void
1674 esiop_dump_script(sc)
1675 struct esiop_softc *sc;
1676 {
1677 int i;
1678 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1679 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1680 le32toh(sc->sc_c.sc_script[i]),
1681 le32toh(sc->sc_c.sc_script[i+1]));
1682 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1683 0xc0000000) {
1684 i++;
1685 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1686 }
1687 printf("\n");
1688 }
1689 }
1690
1691 void
1692 esiop_morecbd(sc)
1693 struct esiop_softc *sc;
1694 {
1695 int error, i, s;
1696 bus_dma_segment_t seg;
1697 int rseg;
1698 struct esiop_cbd *newcbd;
1699 struct esiop_xfer *xfer;
1700 bus_addr_t dsa;
1701
1702 /* allocate a new list head */
1703 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1704 if (newcbd == NULL) {
1705 printf("%s: can't allocate memory for command descriptors "
1706 "head\n", sc->sc_c.sc_dev.dv_xname);
1707 return;
1708 }
1709
1710 /* allocate cmd list */
1711 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1712 M_DEVBUF, M_NOWAIT|M_ZERO);
1713 if (newcbd->cmds == NULL) {
1714 printf("%s: can't allocate memory for command descriptors\n",
1715 sc->sc_c.sc_dev.dv_xname);
1716 goto bad3;
1717 }
1718 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1719 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1720 if (error) {
1721 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1722 sc->sc_c.sc_dev.dv_xname, error);
1723 goto bad2;
1724 }
1725 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1726 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1727 if (error) {
1728 printf("%s: unable to map cbd DMA memory, error = %d\n",
1729 sc->sc_c.sc_dev.dv_xname, error);
1730 goto bad2;
1731 }
1732 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1733 BUS_DMA_NOWAIT, &newcbd->xferdma);
1734 if (error) {
1735 printf("%s: unable to create cbd DMA map, error = %d\n",
1736 sc->sc_c.sc_dev.dv_xname, error);
1737 goto bad1;
1738 }
1739 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1740 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1741 if (error) {
1742 printf("%s: unable to load cbd DMA map, error = %d\n",
1743 sc->sc_c.sc_dev.dv_xname, error);
1744 goto bad0;
1745 }
1746 #ifdef DEBUG
1747 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1748 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1749 #endif
1750 for (i = 0; i < SIOP_NCMDPB; i++) {
1751 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1752 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1753 &newcbd->cmds[i].cmd_c.dmamap_data);
1754 if (error) {
1755 printf("%s: unable to create data DMA map for cbd: "
1756 "error %d\n",
1757 sc->sc_c.sc_dev.dv_xname, error);
1758 goto bad0;
1759 }
1760 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1761 sizeof(struct scsipi_generic), 1,
1762 sizeof(struct scsipi_generic), 0,
1763 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1764 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1765 if (error) {
1766 printf("%s: unable to create cmd DMA map for cbd %d\n",
1767 sc->sc_c.sc_dev.dv_xname, error);
1768 goto bad0;
1769 }
1770 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1771 newcbd->cmds[i].esiop_cbdp = newcbd;
1772 xfer = &newcbd->xfers[i];
1773 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1774 memset(newcbd->cmds[i].cmd_tables, 0,
1775 sizeof(struct esiop_xfer));
1776 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1777 i * sizeof(struct esiop_xfer);
1778 newcbd->cmds[i].cmd_c.dsa = dsa;
1779 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1780 xfer->siop_tables.t_msgout.count= htole32(1);
1781 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1782 xfer->siop_tables.t_msgin.count= htole32(1);
1783 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1784 offsetof(struct siop_common_xfer, msg_in));
1785 xfer->siop_tables.t_extmsgin.count= htole32(2);
1786 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1787 offsetof(struct siop_common_xfer, msg_in) + 1);
1788 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1789 offsetof(struct siop_common_xfer, msg_in) + 3);
1790 xfer->siop_tables.t_status.count= htole32(1);
1791 xfer->siop_tables.t_status.addr = htole32(dsa +
1792 offsetof(struct siop_common_xfer, status));
1793
1794 s = splbio();
1795 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1796 splx(s);
1797 #ifdef SIOP_DEBUG
1798 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1799 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1800 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1801 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1802 #endif
1803 }
1804 s = splbio();
1805 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1806 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1807 splx(s);
1808 return;
1809 bad0:
1810 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1811 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1812 bad1:
1813 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1814 bad2:
1815 free(newcbd->cmds, M_DEVBUF);
1816 bad3:
1817 free(newcbd, M_DEVBUF);
1818 return;
1819 }
1820
1821 void
1822 esiop_moretagtbl(sc)
1823 struct esiop_softc *sc;
1824 {
1825 int error, i, j, s;
1826 bus_dma_segment_t seg;
1827 int rseg;
1828 struct esiop_dsatblblk *newtblblk;
1829 struct esiop_dsatbl *newtbls;
1830 u_int32_t *tbls;
1831
1832 /* allocate a new list head */
1833 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1834 M_DEVBUF, M_NOWAIT|M_ZERO);
1835 if (newtblblk == NULL) {
1836 printf("%s: can't allocate memory for tag DSA table block\n",
1837 sc->sc_c.sc_dev.dv_xname);
1838 return;
1839 }
1840
1841 /* allocate tbl list */
1842 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1843 M_DEVBUF, M_NOWAIT|M_ZERO);
1844 if (newtbls == NULL) {
1845 printf("%s: can't allocate memory for command descriptors\n",
1846 sc->sc_c.sc_dev.dv_xname);
1847 goto bad3;
1848 }
1849 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1850 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1851 if (error) {
1852 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1853 sc->sc_c.sc_dev.dv_xname, error);
1854 goto bad2;
1855 }
1856 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1857 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1858 if (error) {
1859 printf("%s: unable to map tbls DMA memory, error = %d\n",
1860 sc->sc_c.sc_dev.dv_xname, error);
1861 goto bad2;
1862 }
1863 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1864 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1865 if (error) {
1866 printf("%s: unable to create tbl DMA map, error = %d\n",
1867 sc->sc_c.sc_dev.dv_xname, error);
1868 goto bad1;
1869 }
1870 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1871 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1872 if (error) {
1873 printf("%s: unable to load tbl DMA map, error = %d\n",
1874 sc->sc_c.sc_dev.dv_xname, error);
1875 goto bad0;
1876 }
1877 #ifdef DEBUG
1878 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1879 sc->sc_c.sc_dev.dv_xname,
1880 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1881 #endif
1882 for (i = 0; i < ESIOP_NTPB; i++) {
1883 newtbls[i].tblblk = newtblblk;
1884 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1885 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1886 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1887 newtbls[i].tbl_offset;
1888 for (j = 0; j < ESIOP_NTAG; j++)
1889 newtbls[i].tbl[j] = j;
1890 s = splbio();
1891 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1892 splx(s);
1893 }
1894 s = splbio();
1895 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1896 splx(s);
1897 return;
1898 bad0:
1899 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1900 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1901 bad1:
1902 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1903 bad2:
1904 free(newtbls, M_DEVBUF);
1905 bad3:
1906 free(newtblblk, M_DEVBUF);
1907 return;
1908 }
1909
1910 void
1911 esiop_update_scntl3(sc, _siop_target)
1912 struct esiop_softc *sc;
1913 struct siop_common_target *_siop_target;
1914 {
1915 int slot;
1916 u_int32_t slotid, id;
1917
1918 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1919 esiop_script_write(sc, esiop_target->lun_table_offset,
1920 esiop_target->target_c.id);
1921 id = esiop_target->target_c.id & 0x00ff0000;
1922 /* There may be other commands waiting in the scheduler. handle them */
1923 for (slot = 0; slot < A_ncmd_slots; slot++) {
1924 slotid =
1925 esiop_script_read(sc, sc->sc_shedoffset + slot * 2 + 1);
1926 if ((slotid & 0x00ff0000) == id)
1927 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1928 esiop_target->target_c.id);
1929 }
1930 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1931 }
1932
1933 void
1934 esiop_add_dev(sc, target, lun)
1935 struct esiop_softc *sc;
1936 int target;
1937 int lun;
1938 {
1939 struct esiop_target *esiop_target =
1940 (struct esiop_target *)sc->sc_c.targets[target];
1941 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1942
1943 if (esiop_target->target_c.flags & TARF_TAG) {
1944 /* we need a tag DSA table */
1945 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1946 if (esiop_lun->lun_tagtbl == NULL) {
1947 esiop_moretagtbl(sc);
1948 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1949 if (esiop_lun->lun_tagtbl == NULL) {
1950 /* no resources, run untagged */
1951 esiop_target->target_c.flags &= ~TARF_TAG;
1952 return;
1953 }
1954 }
1955 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1956
1957 }
1958 }
1959
1960 void
1961 esiop_del_dev(sc, target, lun)
1962 struct esiop_softc *sc;
1963 int target;
1964 int lun;
1965 {
1966 struct esiop_target *esiop_target;
1967 #ifdef SIOP_DEBUG
1968 printf("%s:%d:%d: free lun sw entry\n",
1969 sc->sc_c.sc_dev.dv_xname, target, lun);
1970 #endif
1971 if (sc->sc_c.targets[target] == NULL)
1972 return;
1973 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1974 free(esiop_target->esiop_lun[lun], M_DEVBUF);
1975 esiop_target->esiop_lun[lun] = NULL;
1976 }
1977
1978 struct esiop_cmd *
1979 esiop_cmd_find(sc, target, dsa)
1980 struct esiop_softc *sc;
1981 int target;
1982 u_int32_t dsa;
1983 {
1984 int lun, tag;
1985 struct esiop_cmd *cmd;
1986 struct esiop_lun *esiop_lun;
1987 struct esiop_target *esiop_target =
1988 (struct esiop_target *)sc->sc_c.targets[target];
1989
1990 if (esiop_target == NULL)
1991 return NULL;
1992
1993 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1994 esiop_lun = esiop_target->esiop_lun[lun];
1995 if (esiop_lun == NULL)
1996 continue;
1997 cmd = esiop_lun->active;
1998 if (cmd && cmd->cmd_c.dsa == dsa)
1999 return cmd;
2000 if (esiop_target->target_c.flags & TARF_TAG) {
2001 for (tag = 0; tag < ESIOP_NTAG; tag++) {
2002 cmd = esiop_lun->tactive[tag];
2003 if (cmd && cmd->cmd_c.dsa == dsa)
2004 return cmd;
2005 }
2006 }
2007 }
2008 return NULL;
2009 }
2010
2011 void
2012 esiop_target_register(sc, target)
2013 struct esiop_softc *sc;
2014 u_int32_t target;
2015 {
2016 struct esiop_target *esiop_target =
2017 (struct esiop_target *)sc->sc_c.targets[target];
2018
2019 /* get a DSA table for this target */
2020 esiop_target->lun_table_offset = sc->sc_free_offset;
2021 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
2022 #ifdef SIOP_DEBUG
2023 printf("%s: lun table for target %d offset %d free offset %d\n",
2024 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2025 sc->sc_free_offset);
2026 #endif
2027 /* first 32 bytes are ID (for select) */
2028 esiop_script_write(sc, esiop_target->lun_table_offset,
2029 esiop_target->target_c.id);
2030 /* Record this table in the target DSA table */
2031 esiop_script_write(sc,
2032 sc->sc_target_table_offset + target,
2033 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2034 sc->sc_c.sc_scriptaddr);
2035 esiop_script_sync(sc,
2036 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2037 }
2038
2039 #ifdef SIOP_STATS
2040 void
2041 esiop_printstats()
2042 {
2043 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2044 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2045 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2046 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2047 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2048 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2049 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2050 }
2051 #endif
2052