esiop.c revision 1.1 1 /* $NetBSD: esiop.c,v 1.1 2002/04/21 22:52:05 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.1 2002/04/21 22:52:05 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
88 void esiop_timeout __P((void *));
89 int esiop_scsicmd __P((struct scsipi_xfer *));
90 void esiop_scsipi_request __P((struct scsipi_channel *,
91 scsipi_adapter_req_t, void *));
92 void esiop_dump_script __P((struct esiop_softc *));
93 void esiop_morecbd __P((struct esiop_softc *));
94 void siop_add_reselsw __P((struct esiop_softc *, int));
95 void esiop_update_scntl3 __P((struct esiop_softc *,
96 struct siop_common_target *));
97 struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
98 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
99
100 static int nintr = 0;
101
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
117 static __inline__ void
118 esiop_script_sync(sc, ops)
119 struct esiop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
128 static __inline__ u_int32_t
129 esiop_script_read(sc, offset)
130 struct esiop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 esiop_script_write(sc, offset, val)
145 struct esiop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 esiop_attach(sc)
159 struct esiop_softc *sc;
160 {
161 int error, i;
162 bus_dma_segment_t seg;
163 int rseg;
164
165 /*
166 * Allocate DMA-safe memory for the script and map it.
167 */
168 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
169 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
170 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
171 if (error) {
172 printf("%s: unable to allocate script DMA memory, "
173 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
174 return;
175 }
176 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
177 (caddr_t *)&sc->sc_c.sc_script,
178 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
179 if (error) {
180 printf("%s: unable to map script DMA memory, "
181 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
182 return;
183 }
184 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
185 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
186 if (error) {
187 printf("%s: unable to create script DMA map, "
188 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
189 return;
190 }
191 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
192 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
193 if (error) {
194 printf("%s: unable to load script DMA map, "
195 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
196 return;
197 }
198 sc->sc_c.sc_scriptaddr =
199 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
200 sc->sc_c.ram_size = PAGE_SIZE;
201 }
202 TAILQ_INIT(&sc->free_list);
203 TAILQ_INIT(&sc->cmds);
204 sc->sc_currschedslot = 0;
205 #ifdef SIOP_DEBUG
206 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
207 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
208 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
209 #endif
210
211 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
212 sc->sc_c.sc_adapt.adapt_nchannels = 1;
213 sc->sc_c.sc_adapt.adapt_openings = 0;
214 sc->sc_c.sc_adapt.adapt_max_periph = 1 /* XXX ESIOP_NTAG - 1 */ ;
215 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
216 sc->sc_c.sc_adapt.adapt_minphys = minphys;
217 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
218
219 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
220 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
221 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
222 sc->sc_c.sc_chan.chan_channel = 0;
223 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
224 sc->sc_c.sc_chan.chan_ntargets =
225 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
226 sc->sc_c.sc_chan.chan_nluns = 8;
227 sc->sc_c.sc_chan.chan_id =
228 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
229 if (sc->sc_c.sc_chan.chan_id == 0 ||
230 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
231 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
232
233 for (i = 0; i < 16; i++)
234 sc->sc_c.targets[i] = NULL;
235
236 /* find min/max sync period for this chip */
237 sc->sc_c.maxsync = 0;
238 sc->sc_c.minsync = 255;
239 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
240 if (sc->sc_c.clock_period != scf_period[i].clock)
241 continue;
242 if (sc->sc_c.maxsync < scf_period[i].period)
243 sc->sc_c.maxsync = scf_period[i].period;
244 if (sc->sc_c.minsync > scf_period[i].period)
245 sc->sc_c.minsync = scf_period[i].period;
246 }
247 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
248 panic("siop: can't find my sync parameters\n");
249 /* Do a bus reset, so that devices fall back to narrow/async */
250 siop_resetbus(&sc->sc_c);
251 /*
252 * siop_reset() will reset the chip, thus clearing pending interrupts
253 */
254 esiop_reset(sc);
255 #ifdef DUMP_SCRIPT
256 esiop_dump_script(sc);
257 #endif
258
259 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
260 }
261
262 void
263 esiop_reset(sc)
264 struct esiop_softc *sc;
265 {
266 int i, j;
267 u_int32_t addr;
268 u_int32_t msgin_addr;
269
270 siop_common_reset(&sc->sc_c);
271
272 /*
273 * we copy the script at the beggining of RAM. Then there is 8 bytes
274 * for messages in.
275 */
276 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
277 msgin_addr =
278 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
279 sc->sc_free_offset += 2;
280 /* then we have the scheduler ring */
281 sc->sc_shedoffset = sc->sc_free_offset;
282 sc->sc_free_offset += A_ncmd_slots * 2;
283 /* then the targets DSA table */
284 sc->sc_target_table_offset = sc->sc_free_offset;
285 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
286 /* copy and patch the script */
287 if (sc->sc_c.features & SF_CHIP_RAM) {
288 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
289 esiop_script,
290 sizeof(esiop_script) / sizeof(esiop_script[0]));
291 for (j = 0; j <
292 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
293 j++) {
294 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
295 E_tlq_offset_Used[j] * 4,
296 sizeof(struct siop_common_xfer));
297 }
298 for (j = 0; j <
299 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
300 j++) {
301 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
302 E_abs_msgin2_Used[j] * 4, msgin_addr);
303 }
304
305 #ifdef SIOP_SYMLED
306 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
307 Ent_led_on1, siop_led_on,
308 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
309 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
310 Ent_led_on2, siop_led_on,
311 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
312 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
313 Ent_led_off, siop_led_off,
314 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
315 #endif
316 } else {
317 for (j = 0;
318 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
319 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
320 }
321 for (j = 0; j <
322 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
323 j++) {
324 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
325 htole32(sizeof(struct siop_common_xfer));
326 }
327 for (j = 0; j <
328 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
329 j++) {
330 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
331 htole32(msgin_addr);
332 }
333
334 #ifdef SIOP_SYMLED
335 for (j = 0;
336 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
337 sc->sc_c.sc_script[
338 Ent_led_on1 / sizeof(siop_led_on[0]) + j
339 ] = htole32(siop_led_on[j]);
340 for (j = 0;
341 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
342 sc->sc_c.sc_script[
343 Ent_led_on2 / sizeof(siop_led_on[0]) + j
344 ] = htole32(siop_led_on[j]);
345 for (j = 0;
346 j < (sizeof(siop_led_off) / sizeof(siop_led_off[0])); j++)
347 sc->sc_c.sc_script[
348 Ent_led_off / sizeof(siop_led_off[0]) + j
349 ] = htole32(siop_led_off[j]);
350 #endif
351 }
352 /* get base of scheduler ring */
353 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
354 /* init scheduler */
355 for (i = 0; i < A_ncmd_slots; i++) {
356 esiop_script_write(sc, sc->sc_shedoffset + i * 2, A_f_cmd_free);
357 esiop_script_write(sc, sc->sc_shedoffset + i * 2 + 1, 0);
358 }
359 sc->sc_currschedslot = 0;
360 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
361 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
362 /*
363 * 0x78000000 is a 'move data8 to reg'. data8 is the second
364 * octet, reg offset is the third.
365 */
366 esiop_script_write(sc, Ent_cmdr0 / 4,
367 0x78640000 | ((addr & 0x000000ff) << 8));
368 esiop_script_write(sc, Ent_cmdr1 / 4,
369 0x78650000 | ((addr & 0x0000ff00) ));
370 esiop_script_write(sc, Ent_cmdr2 / 4,
371 0x78660000 | ((addr & 0x00ff0000) >> 8));
372 esiop_script_write(sc, Ent_cmdr3 / 4,
373 0x78670000 | ((addr & 0xff000000) >> 16));
374 /* set flags */
375 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
376 /* write pointer of base of target DSA table */
377 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
378 sc->sc_c.sc_scriptaddr;
379 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
380 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
381 ((addr & 0x000000ff) << 8));
382 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
383 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
384 ((addr & 0x0000ff00) ));
385 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
386 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
387 ((addr & 0x00ff0000) >> 8));
388 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
389 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
390 ((addr & 0xff000000) >> 16));
391 #ifdef SIOP_DEBUG
392 printf("%s: target table offset %d free offset %d\n",
393 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
394 sc->sc_free_offset);
395 #endif
396
397 /* register existing targets */
398 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
399 if (sc->sc_c.targets[i])
400 esiop_target_register(sc, i);
401 }
402 /* start script */
403 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
404 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
405 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
406 }
407 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
408 sc->sc_c.sc_scriptaddr + Ent_reselect);
409 }
410
411 #if 0
412 #define CALL_SCRIPT(ent) do {\
413 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
414 esiop_cmd->cmd_c.dsa, \
415 sc->sc_c.sc_scriptaddr + ent); \
416 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
417 } while (0)
418 #else
419 #define CALL_SCRIPT(ent) do {\
420 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
421 } while (0)
422 #endif
423
424 int
425 esiop_intr(v)
426 void *v;
427 {
428 struct esiop_softc *sc = v;
429 struct esiop_target *esiop_target;
430 struct esiop_cmd *esiop_cmd;
431 struct esiop_lun *esiop_lun;
432 struct scsipi_xfer *xs;
433 int istat, sist, sstat1, dstat;
434 u_int32_t irqcode;
435 int need_reset = 0;
436 int offset, target, lun, tag;
437 u_int32_t tflags;
438 int freetarget = 0;
439 int restart = 0;
440 int slot;
441 int retval = 0;
442
443 again:
444 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
445 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
446 if (istat & ISTAT_SEM) {
447 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
448 SIOP_ISTAT, (istat & ~ISTAT_SEM));
449 esiop_checkdone(sc);
450 }
451 return retval;
452 }
453 retval = 1;
454 nintr++;
455 if (nintr > 100) {
456 panic("esiop: intr loop");
457 }
458 INCSTAT(esiop_stat_intr);
459 if (istat & ISTAT_INTF) {
460 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
461 SIOP_ISTAT, ISTAT_INTF);
462 esiop_checkdone(sc);
463 goto again;
464 }
465 /* get CMD from T/L/Q */
466 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
467 SIOP_SCRATCHC);
468 #ifdef SIOP_DEBUG_INTR
469 printf("interrupt, istat=0x%x tflags=0x%x "
470 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
471 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
472 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
473 SIOP_DSP) -
474 sc->sc_c.sc_scriptaddr));
475 #endif
476 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
477 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
478 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
479 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
480 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
481
482 if (target >= 0 && lun >= 0) {
483 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
484 if (esiop_target == NULL) {
485 printf("esiop_target (target %d) not valid\n", target);
486 goto none;
487 }
488 esiop_lun = esiop_target->esiop_lun[lun];
489 if (esiop_lun == NULL) {
490 printf("esiop_lun (target %d lun %d) not valid\n",
491 target, lun);
492 goto none;
493 }
494 esiop_cmd = esiop_lun->active;
495 if (esiop_cmd == NULL) {
496 printf("esiop_cmd (target %d lun %d) not valid\n",
497 target, lun);
498 goto none;
499 }
500 xs = esiop_cmd->cmd_c.xs;
501 #ifdef DIAGNOSTIC
502 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
503 printf("esiop_cmd (target %d lun %d) "
504 "not active (%d)\n", target, lun,
505 esiop_cmd->cmd_c.status);
506 goto none;
507 }
508 #endif
509 } else {
510 none:
511 xs = NULL;
512 esiop_target = NULL;
513 esiop_lun = NULL;
514 esiop_cmd = NULL;
515 }
516 if (istat & ISTAT_DIP) {
517 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
518 SIOP_DSTAT);
519 if (dstat & DSTAT_SSI) {
520 printf("single step dsp 0x%08x dsa 0x08%x\n",
521 (int)(bus_space_read_4(sc->sc_c.sc_rt,
522 sc->sc_c.sc_rh, SIOP_DSP) -
523 sc->sc_c.sc_scriptaddr),
524 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
525 SIOP_DSA));
526 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
527 (istat & ISTAT_SIP) == 0) {
528 bus_space_write_1(sc->sc_c.sc_rt,
529 sc->sc_c.sc_rh, SIOP_DCNTL,
530 bus_space_read_1(sc->sc_c.sc_rt,
531 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
532 }
533 return 1;
534 }
535 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
536 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
537 if (dstat & DSTAT_IID)
538 printf(" Illegal instruction");
539 if (dstat & DSTAT_ABRT)
540 printf(" abort");
541 if (dstat & DSTAT_BF)
542 printf(" bus fault");
543 if (dstat & DSTAT_MDPE)
544 printf(" parity");
545 if (dstat & DSTAT_DFE)
546 printf(" dma fifo empty");
547 printf(", DSP=0x%x DSA=0x%x: ",
548 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
549 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
550 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
551 if (esiop_cmd)
552 printf("last msg_in=0x%x status=0x%x\n",
553 esiop_cmd->cmd_tables->msg_in[0],
554 le32toh(esiop_cmd->cmd_tables->status));
555 else
556 printf(" current T/L/Q invalid\n");
557 need_reset = 1;
558 }
559 }
560 if (istat & ISTAT_SIP) {
561 if (istat & ISTAT_DIP)
562 delay(10);
563 /*
564 * Can't read sist0 & sist1 independantly, or we have to
565 * insert delay
566 */
567 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
568 SIOP_SIST0);
569 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
570 SIOP_SSTAT1);
571 #ifdef SIOP_DEBUG_INTR
572 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
573 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
574 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
575 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
576 SIOP_DSP) -
577 sc->sc_c.sc_scriptaddr));
578 #endif
579 if (sist & SIST0_RST) {
580 esiop_handle_reset(sc);
581 /* no table to flush here */
582 return 1;
583 }
584 if (sist & SIST0_SGE) {
585 if (esiop_cmd)
586 scsipi_printaddr(xs->xs_periph);
587 else
588 printf("%s:", sc->sc_c.sc_dev.dv_xname);
589 printf("scsi gross error\n");
590 goto reset;
591 }
592 if ((sist & SIST0_MA) && need_reset == 0) {
593 if (esiop_cmd) {
594 int scratchc0;
595 dstat = bus_space_read_1(sc->sc_c.sc_rt,
596 sc->sc_c.sc_rh, SIOP_DSTAT);
597 /*
598 * first restore DSA, in case we were in a S/G
599 * operation.
600 */
601 bus_space_write_4(sc->sc_c.sc_rt,
602 sc->sc_c.sc_rh,
603 SIOP_DSA, esiop_cmd->cmd_c.dsa);
604 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
605 sc->sc_c.sc_rh, SIOP_SCRATCHC);
606 switch (sstat1 & SSTAT1_PHASE_MASK) {
607 case SSTAT1_PHASE_STATUS:
608 /*
609 * previous phase may be aborted for any reason
610 * ( for example, the target has less data to
611 * transfer than requested). Just go to status
612 * and the command should terminate.
613 */
614 INCSTAT(esiop_stat_intr_shortxfer);
615 if ((dstat & DSTAT_DFE) == 0)
616 siop_clearfifo(&sc->sc_c);
617 /* no table to flush here */
618 CALL_SCRIPT(Ent_status);
619 return 1;
620 case SSTAT1_PHASE_MSGIN:
621 /*
622 * target may be ready to disconnect
623 * Save data pointers just in case.
624 */
625 INCSTAT(esiop_stat_intr_xferdisc);
626 if (scratchc0 & A_f_c_data)
627 siop_sdp(&esiop_cmd->cmd_c);
628 else if ((dstat & DSTAT_DFE) == 0)
629 siop_clearfifo(&sc->sc_c);
630 bus_space_write_1(sc->sc_c.sc_rt,
631 sc->sc_c.sc_rh, SIOP_SCRATCHC,
632 scratchc0 & ~A_f_c_data);
633 esiop_table_sync(esiop_cmd,
634 BUS_DMASYNC_PREREAD |
635 BUS_DMASYNC_PREWRITE);
636 CALL_SCRIPT(Ent_msgin);
637 return 1;
638 }
639 printf("%s: unexpected phase mismatch %d\n",
640 sc->sc_c.sc_dev.dv_xname,
641 sstat1 & SSTAT1_PHASE_MASK);
642 } else {
643 printf("%s: phase mismatch without command\n",
644 sc->sc_c.sc_dev.dv_xname);
645 }
646 need_reset = 1;
647 }
648 if (sist & SIST0_PAR) {
649 /* parity error, reset */
650 if (esiop_cmd)
651 scsipi_printaddr(xs->xs_periph);
652 else
653 printf("%s:", sc->sc_c.sc_dev.dv_xname);
654 printf("parity error\n");
655 goto reset;
656 }
657 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
658 /* selection time out, assume there's no device here */
659 /*
660 * SCRATCHC has not been loaded yet, we have to find
661 * params by ourselve. scratchE0 should point to
662 * the next slot.
663 */
664 slot = bus_space_read_1(sc->sc_c.sc_rt,
665 sc->sc_c.sc_rh, SIOP_SCRATCHE);
666 slot = (slot == 0) ? A_ncmd_slots : slot - 1;
667 esiop_script_sync(sc,
668 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
669 target = esiop_script_read(sc,
670 sc->sc_shedoffset + slot * 2 + 1) & 0x00ff0000;
671 target = (target >> 16) & 0xff;
672 esiop_cmd = esiop_cmd_find(sc, target,
673 esiop_script_read(sc,
674 sc->sc_shedoffset + slot * 2) & ~0x3);
675 if (esiop_cmd) {
676 xs = esiop_cmd->cmd_c.xs;
677 esiop_target = (struct esiop_target *)
678 esiop_cmd->cmd_c.siop_target;
679 lun = xs->xs_periph->periph_lun;
680 #if 0 /* XXX TAG */
681 tag = esiop_cmd->cmd_c.tag;
682 #endif
683 tag = -1;
684 esiop_lun = esiop_target->esiop_lun[lun];
685 esiop_cmd->cmd_c.status = CMDST_DONE;
686 xs->error = XS_SELTIMEOUT;
687 freetarget = 1;
688 goto end;
689 } else {
690 printf("%s: selection timeout without "
691 "command\n", sc->sc_c.sc_dev.dv_xname);
692 need_reset = 1;
693 }
694 }
695 if (sist & SIST0_UDC) {
696 /*
697 * unexpected disconnect. Usually the target signals
698 * a fatal condition this way. Attempt to get sense.
699 */
700 if (esiop_cmd) {
701 esiop_cmd->cmd_tables->status =
702 htole32(SCSI_CHECK);
703 goto end;
704 }
705 printf("%s: unexpected disconnect without "
706 "command\n", sc->sc_c.sc_dev.dv_xname);
707 goto reset;
708 }
709 if (sist & (SIST1_SBMC << 8)) {
710 /* SCSI bus mode change */
711 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
712 goto reset;
713 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
714 /*
715 * we have a script interrupt, it will
716 * restart the script.
717 */
718 goto scintr;
719 }
720 /*
721 * else we have to restart it ourselve, at the
722 * interrupted instruction.
723 */
724 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
725 SIOP_DSP,
726 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
727 SIOP_DSP) - 8);
728 return 1;
729 }
730 /* Else it's an unhandled exeption (for now). */
731 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
732 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
733 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
734 SIOP_SSTAT1),
735 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
736 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
737 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
738 if (esiop_cmd) {
739 esiop_cmd->cmd_c.status = CMDST_DONE;
740 xs->error = XS_SELTIMEOUT;
741 goto end;
742 }
743 need_reset = 1;
744 }
745 if (need_reset) {
746 reset:
747 /* fatal error, reset the bus */
748 siop_resetbus(&sc->sc_c);
749 /* no table to flush here */
750 return 1;
751 }
752
753 scintr:
754 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
755 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
756 SIOP_DSPS);
757 #ifdef SIOP_DEBUG_INTR
758 printf("script interrupt 0x%x\n", irqcode);
759 #endif
760 /*
761 * no command, or an inactive command is only valid for a
762 * reselect interrupt
763 */
764 if ((irqcode & 0x80) == 0) {
765 if (esiop_cmd == NULL) {
766 printf(
767 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
768 sc->sc_c.sc_dev.dv_xname, irqcode);
769 goto reset;
770 }
771 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
772 printf("%s: command with invalid status "
773 "(IRQ code 0x%x current status %d) !\n",
774 sc->sc_c.sc_dev.dv_xname,
775 irqcode, esiop_cmd->cmd_c.status);
776 xs = NULL;
777 }
778 }
779 switch(irqcode) {
780 case A_int_err:
781 printf("error, DSP=0x%x\n",
782 (int)(bus_space_read_4(sc->sc_c.sc_rt,
783 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
784 if (xs) {
785 xs->error = XS_SELTIMEOUT;
786 goto end;
787 } else {
788 goto reset;
789 }
790 case A_int_msgin:
791 {
792 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
793 sc->sc_c.sc_rh, SIOP_SFBR);
794 if (msgin == MSG_MESSAGE_REJECT) {
795 int msg, extmsg;
796 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
797 /*
798 * message was part of a identify +
799 * something else. Identify shoudl't
800 * have been rejected.
801 */
802 msg =
803 esiop_cmd->cmd_tables->msg_out[1];
804 extmsg =
805 esiop_cmd->cmd_tables->msg_out[3];
806 } else {
807 msg =
808 esiop_cmd->cmd_tables->msg_out[0];
809 extmsg =
810 esiop_cmd->cmd_tables->msg_out[2];
811 }
812 if (msg == MSG_MESSAGE_REJECT) {
813 /* MSG_REJECT for a MSG_REJECT !*/
814 if (xs)
815 scsipi_printaddr(xs->xs_periph);
816 else
817 printf("%s: ",
818 sc->sc_c.sc_dev.dv_xname);
819 printf("our reject message was "
820 "rejected\n");
821 goto reset;
822 }
823 if (msg == MSG_EXTENDED &&
824 extmsg == MSG_EXT_WDTR) {
825 /* WDTR rejected, initiate sync */
826 if ((esiop_target->target_c.flags &
827 TARF_SYNC) == 0) {
828 esiop_target->target_c.status =
829 TARST_OK;
830 siop_update_xfer_mode(&sc->sc_c,
831 target);
832 /* no table to flush here */
833 CALL_SCRIPT(Ent_msgin_ack);
834 return 1;
835 }
836 esiop_target->target_c.status =
837 TARST_SYNC_NEG;
838 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
839 sc->sc_c.minsync, sc->sc_c.maxoff);
840 esiop_table_sync(esiop_cmd,
841 BUS_DMASYNC_PREREAD |
842 BUS_DMASYNC_PREWRITE);
843 CALL_SCRIPT(Ent_send_msgout);
844 return 1;
845 } else if (msg == MSG_EXTENDED &&
846 extmsg == MSG_EXT_SDTR) {
847 /* sync rejected */
848 esiop_target->target_c.offset = 0;
849 esiop_target->target_c.period = 0;
850 esiop_target->target_c.status =
851 TARST_OK;
852 siop_update_xfer_mode(&sc->sc_c,
853 target);
854 /* no table to flush here */
855 CALL_SCRIPT(Ent_msgin_ack);
856 return 1;
857 }
858 #if 0 /* XXX TAG */
859 else if (msg == MSG_SIMPLE_Q_TAG ||
860 msg == MSG_HEAD_OF_Q_TAG ||
861 msg == MSG_ORDERED_Q_TAG) {
862 if (siop_handle_qtag_reject(
863 esiop_cmd) == -1)
864 goto reset;
865 CALL_SCRIPT(Ent_msgin_ack);
866 return 1;
867 }
868 #endif /* XXX TAG */
869 if (xs)
870 scsipi_printaddr(xs->xs_periph);
871 else
872 printf("%s: ",
873 sc->sc_c.sc_dev.dv_xname);
874 if (msg == MSG_EXTENDED) {
875 printf("scsi message reject, extended "
876 "message sent was 0x%x\n", extmsg);
877 } else {
878 printf("scsi message reject, message "
879 "sent was 0x%x\n", msg);
880 }
881 /* no table to flush here */
882 CALL_SCRIPT(Ent_msgin_ack);
883 return 1;
884 }
885 if (xs)
886 scsipi_printaddr(xs->xs_periph);
887 else
888 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
889 printf("unhandled message 0x%x\n",
890 esiop_cmd->cmd_tables->msg_in[0]);
891 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
892 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
893 esiop_table_sync(esiop_cmd,
894 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
895 CALL_SCRIPT(Ent_send_msgout);
896 return 1;
897 }
898 case A_int_extmsgin:
899 #ifdef SIOP_DEBUG_INTR
900 printf("extended message: msg 0x%x len %d\n",
901 esiop_cmd->cmd_tables->msg_in[2],
902 esiop_cmd->cmd_tables->msg_in[1]);
903 #endif
904 if (esiop_cmd->cmd_tables->msg_in[1] > 6)
905 printf("%s: extended message too big (%d)\n",
906 sc->sc_c.sc_dev.dv_xname,
907 esiop_cmd->cmd_tables->msg_in[1]);
908 esiop_cmd->cmd_tables->t_extmsgdata.count =
909 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
910 esiop_table_sync(esiop_cmd,
911 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
912 CALL_SCRIPT(Ent_get_extmsgdata);
913 return 1;
914 case A_int_extmsgdata:
915 #ifdef SIOP_DEBUG_INTR
916 {
917 int i;
918 printf("extended message: 0x%x, data:",
919 esiop_cmd->cmd_tables->msg_in[2]);
920 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
921 i++)
922 printf(" 0x%x",
923 esiop_cmd->cmd_tables->msg_in[i]);
924 printf("\n");
925 }
926 #endif
927 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
928 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
929 case SIOP_NEG_MSGOUT:
930 esiop_update_scntl3(sc,
931 esiop_cmd->cmd_c.siop_target);
932 esiop_table_sync(esiop_cmd,
933 BUS_DMASYNC_PREREAD |
934 BUS_DMASYNC_PREWRITE);
935 CALL_SCRIPT(Ent_send_msgout);
936 return(1);
937 case SIOP_NEG_ACK:
938 esiop_update_scntl3(sc,
939 esiop_cmd->cmd_c.siop_target);
940 CALL_SCRIPT(Ent_msgin_ack);
941 return(1);
942 default:
943 panic("invalid retval from "
944 "siop_wdtr_neg()");
945 }
946 return(1);
947 }
948 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
949 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
950 case SIOP_NEG_MSGOUT:
951 esiop_update_scntl3(sc,
952 esiop_cmd->cmd_c.siop_target);
953 esiop_table_sync(esiop_cmd,
954 BUS_DMASYNC_PREREAD |
955 BUS_DMASYNC_PREWRITE);
956 CALL_SCRIPT(Ent_send_msgout);
957 return(1);
958 case SIOP_NEG_ACK:
959 esiop_update_scntl3(sc,
960 esiop_cmd->cmd_c.siop_target);
961 CALL_SCRIPT(Ent_msgin_ack);
962 return(1);
963 default:
964 panic("invalid retval from "
965 "siop_wdtr_neg()");
966 }
967 return(1);
968 }
969 /* send a message reject */
970 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
971 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
972 esiop_table_sync(esiop_cmd,
973 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
974 CALL_SCRIPT(Ent_send_msgout);
975 return 1;
976 case A_int_disc:
977 INCSTAT(esiop_stat_intr_sdp);
978 offset = bus_space_read_1(sc->sc_c.sc_rt,
979 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
980 #ifdef SIOP_DEBUG_DR
981 printf("disconnect offset %d\n", offset);
982 #endif
983 if (offset > SIOP_NSG) {
984 printf("%s: bad offset for disconnect (%d)\n",
985 sc->sc_c.sc_dev.dv_xname, offset);
986 goto reset;
987 }
988 /*
989 * offset == SIOP_NSG may be a valid condition if
990 * we get a sdp when the xfer is done.
991 * Don't call memmove in this case.
992 */
993 if (offset < SIOP_NSG) {
994 memmove(&esiop_cmd->cmd_tables->data[0],
995 &esiop_cmd->cmd_tables->data[offset],
996 (SIOP_NSG - offset) * sizeof(scr_table_t));
997 esiop_table_sync(esiop_cmd,
998 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
999 }
1000 CALL_SCRIPT(Ent_script_sched);
1001 return 1;
1002 case A_int_resfail:
1003 printf("reselect failed\n");
1004 CALL_SCRIPT(Ent_script_sched);
1005 return 1;
1006 case A_int_done:
1007 if (xs == NULL) {
1008 printf("%s: done without command\n",
1009 sc->sc_c.sc_dev.dv_xname);
1010 CALL_SCRIPT(Ent_script_sched);
1011 return 1;
1012 }
1013 #ifdef SIOP_DEBUG_INTR
1014 printf("done, DSA=0x%lx target id 0x%x last msg "
1015 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1016 le32toh(esiop_cmd->cmd_tables->id),
1017 esiop_cmd->cmd_tables->msg_in[0],
1018 le32toh(esiop_cmd->cmd_tables->status));
1019 #endif
1020 INCSTAT(esiop_stat_intr_done);
1021 esiop_cmd->cmd_c.status = CMDST_DONE;
1022 goto end;
1023 default:
1024 printf("unknown irqcode %x\n", irqcode);
1025 if (xs) {
1026 xs->error = XS_SELTIMEOUT;
1027 goto end;
1028 }
1029 goto reset;
1030 }
1031 return 1;
1032 }
1033 /* We just should't get there */
1034 panic("siop_intr: I shouldn't be there !");
1035
1036 end:
1037 /*
1038 * restart the script now if command completed properly
1039 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1040 * queue
1041 */
1042 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1043 #ifdef SIOP_DEBUG_INTR
1044 printf("esiop_intr end: status %d\n", xs->status);
1045 #endif
1046 if (xs->status == SCSI_OK)
1047 CALL_SCRIPT(Ent_script_sched);
1048 else
1049 restart = 1;
1050 #if 0 /* XXX TAG */
1051 esiop_lun->esiop_tag[tag].active = NULL;
1052 #endif
1053 esiop_lun->active = NULL;
1054 esiop_scsicmd_end(esiop_cmd);
1055 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1056 esiop_del_dev(sc, target, lun);
1057 if (restart)
1058 CALL_SCRIPT(Ent_script_sched);
1059 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1060 /* a command terminated, so we have free slots now */
1061 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1062 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1063 }
1064
1065 goto again;
1066 }
1067
1068 void
1069 esiop_scsicmd_end(esiop_cmd)
1070 struct esiop_cmd *esiop_cmd;
1071 {
1072 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1073 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1074
1075 switch(xs->status) {
1076 case SCSI_OK:
1077 xs->error = XS_NOERROR;
1078 break;
1079 case SCSI_BUSY:
1080 xs->error = XS_BUSY;
1081 break;
1082 case SCSI_CHECK:
1083 xs->error = XS_BUSY;
1084 /* remove commands in the queue and scheduler */
1085 esiop_unqueue(sc, xs->xs_periph->periph_target,
1086 xs->xs_periph->periph_lun);
1087 break;
1088 case SCSI_QUEUE_FULL:
1089 INCSTAT(esiop_stat_intr_qfull);
1090 #ifdef SIOP_DEBUG
1091 printf("%s:%d:%d: queue full (tag %d)\n",
1092 sc->sc_c.sc_dev.dv_xname,
1093 xs->xs_periph->periph_target,
1094 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1095 #endif
1096 xs->error = XS_BUSY;
1097 break;
1098 case SCSI_SIOP_NOCHECK:
1099 /*
1100 * don't check status, xs->error is already valid
1101 */
1102 break;
1103 case SCSI_SIOP_NOSTATUS:
1104 /*
1105 * the status byte was not updated, cmd was
1106 * aborted
1107 */
1108 xs->error = XS_SELTIMEOUT;
1109 break;
1110 default:
1111 xs->error = XS_DRIVER_STUFFUP;
1112 }
1113 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1114 bus_dmamap_sync(sc->sc_c.sc_dmat,
1115 esiop_cmd->cmd_c.dmamap_data, 0,
1116 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1117 (xs->xs_control & XS_CTL_DATA_IN) ?
1118 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1119 bus_dmamap_unload(sc->sc_c.sc_dmat,
1120 esiop_cmd->cmd_c.dmamap_data);
1121 }
1122 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1123 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1124 esiop_cmd->cmd_c.status = CMDST_FREE;
1125 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1126 xs->resid = 0;
1127 scsipi_done (xs);
1128 }
1129
1130 void
1131 esiop_checkdone(sc)
1132 struct esiop_softc *sc;
1133 {
1134 int target, lun;
1135 struct esiop_target *esiop_target;
1136 struct esiop_lun *esiop_lun;
1137 struct esiop_cmd *esiop_cmd;
1138 int status;
1139
1140 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1141 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1142 if (esiop_target == NULL)
1143 continue;
1144 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1145 esiop_lun = esiop_target->esiop_lun[lun];
1146 if (esiop_lun == NULL)
1147 continue;
1148 esiop_cmd = esiop_lun->active;
1149 if (esiop_cmd == NULL)
1150 continue;
1151 status = le32toh(esiop_cmd->cmd_tables->status);
1152 if (status != SCSI_OK)
1153 continue;
1154 /* Ok, this command has been handled */
1155 esiop_cmd->cmd_c.xs->status = status;
1156 esiop_lun->active = NULL;
1157 esiop_scsicmd_end(esiop_cmd);
1158 }
1159 }
1160 }
1161
1162 void
1163 esiop_unqueue(sc, target, lun)
1164 struct esiop_softc *sc;
1165 int target;
1166 int lun;
1167 {
1168 #if 0 /* XXX TAG */
1169 int slot, tag;
1170 struct esiop_cmd *esiop_cmd;
1171 struct esiop_lun *esiop_lun =
1172 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1173
1174 /* first make sure to read valid data */
1175 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1176
1177 for (tag = 1; tag < ESIOP_NTAG; tag++) {
1178 /* look for commands in the scheduler, not yet started */
1179 if (siop_lun->siop_tag[tag].active == NULL)
1180 continue;
1181 esiop_cmd = siop_lun->siop_tag[tag].active;
1182 for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1183 if (esiop_script_read(sc,
1184 (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1185 esiop_cmd->cmd_c.dsa +
1186 sizeof(struct siop_common_xfer) +
1187 Ent_ldsa_select)
1188 break;
1189 }
1190 if (slot > sc->sc_currschedslot)
1191 continue; /* didn't find it */
1192 if (esiop_script_read(sc,
1193 (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1194 continue; /* already started */
1195 /* clear the slot */
1196 esiop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1197 0x80000000);
1198 /* ask to requeue */
1199 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1200 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1201 siop_lun->siop_tag[tag].active = NULL;
1202 siop_scsicmd_end(esiop_cmd);
1203 }
1204 /* update sc_currschedslot */
1205 sc->sc_currschedslot = 0;
1206 for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1207 if (esiop_script_read(sc,
1208 (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1209 sc->sc_currschedslot = slot;
1210 }
1211 #endif /* XXX TAG */
1212 }
1213
1214 /*
1215 * handle a rejected queue tag message: the command will run untagged,
1216 * has to adjust the reselect script.
1217 */
1218
1219 #if 0 /* XXX TAG */
1220 int
1221 esiop_handle_qtag_reject(esiop_cmd)
1222 struct esiop_cmd *esiop_cmd;
1223 {
1224 struct siop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1225 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1226 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1227 int tag = esiop_cmd->cmd_tables->msg_out[2];
1228 struct esiop_lun *esiop_lun =
1229 ((struct esiop_target*)sc->sc_c.targets[target])->esiop_lun[lun];
1230
1231 #ifdef SIOP_DEBUG
1232 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1233 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1234 esiop_cmd->cmd_c.status);
1235 #endif
1236
1237 if (esiop_lun->siop_tag[0].active != NULL) {
1238 printf("%s: untagged command already running for target %d "
1239 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1240 target, lun, esiop_lun->siop_tag[0].active->cmd_c.status);
1241 return -1;
1242 }
1243 /* clear tag slot */
1244 esiop_lun->siop_tag[tag].active = NULL;
1245 /* add command to non-tagged slot */
1246 esiop_lun->siop_tag[0].active = esiop_cmd;
1247 esiop_cmd->cmd_c.tag = 0;
1248 /* adjust reselect script if there is one */
1249 if (esiop_lun->siop_tag[0].reseloff > 0) {
1250 esiop_script_write(sc,
1251 esiop_lun->siop_tag[0].reseloff + 1,
1252 esiop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1253 Ent_ldsa_reload_dsa);
1254 esiop_table_sync(esiop_cmd, BUS_DMASYNC_PREWRITE);
1255 }
1256 return 0;
1257 }
1258
1259 #endif /* XXX TAG */
1260
1261 /*
1262 * handle a bus reset: reset chip, unqueue all active commands, free all
1263 * target struct and report loosage to upper layer.
1264 * As the upper layer may requeue immediatly we have to first store
1265 * all active commands in a temporary queue.
1266 */
1267 void
1268 esiop_handle_reset(sc)
1269 struct esiop_softc *sc;
1270 {
1271 struct esiop_cmd *esiop_cmd;
1272 struct esiop_lun *esiop_lun;
1273 int target, lun, tag;
1274 /*
1275 * scsi bus reset. reset the chip and restart
1276 * the queue. Need to clean up all active commands
1277 */
1278 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1279 /* stop, reset and restart the chip */
1280 esiop_reset(sc);
1281 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1282 /* chip has been reset, all slots are free now */
1283 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1284 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1285 }
1286 /*
1287 * Process all commands: first commmands completes, then commands
1288 * being executed
1289 */
1290 esiop_checkdone(sc);
1291 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1292 target++) {
1293 struct esiop_target *esiop_target =
1294 (struct esiop_target *)sc->sc_c.targets[target];
1295 if (esiop_target == NULL)
1296 continue;
1297 for (lun = 0; lun < 8; lun++) {
1298 esiop_lun = esiop_target->esiop_lun[lun];
1299 if (esiop_lun == NULL)
1300 continue;
1301 #if 0 /* XXX TAG */
1302 for (tag = 0; tag <
1303 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1304 ESIOP_NTAG : 1);
1305 tag++) {
1306 esiop_cmd = esiop_lun->siop_tag[tag].active;
1307 #else
1308 {
1309 tag = -1;
1310 esiop_cmd = esiop_lun->active;
1311 #endif /* XXX TAG */
1312 if (esiop_cmd == NULL)
1313 continue;
1314 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1315 printf("command with tag id %d reset\n", tag);
1316 esiop_cmd->cmd_c.xs->error =
1317 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1318 XS_TIMEOUT : XS_RESET;
1319 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1320 #if 0 /* XXX TAG */
1321 esiop_lun->siop_tag[tag].active = NULL;
1322 #endif
1323 esiop_lun->active = NULL;
1324 esiop_cmd->cmd_c.status = CMDST_DONE;
1325 esiop_scsicmd_end(esiop_cmd);
1326 }
1327 }
1328 sc->sc_c.targets[target]->status = TARST_ASYNC;
1329 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1330 sc->sc_c.targets[target]->period =
1331 sc->sc_c.targets[target]->offset = 0;
1332 siop_update_xfer_mode(&sc->sc_c, target);
1333 }
1334
1335 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1336 }
1337
1338 void
1339 esiop_scsipi_request(chan, req, arg)
1340 struct scsipi_channel *chan;
1341 scsipi_adapter_req_t req;
1342 void *arg;
1343 {
1344 struct scsipi_xfer *xs;
1345 struct scsipi_periph *periph;
1346 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1347 struct esiop_cmd *esiop_cmd;
1348 struct esiop_target *esiop_target;
1349 int s, error, i;
1350 int target;
1351 int lun;
1352
1353 switch (req) {
1354 case ADAPTER_REQ_RUN_XFER:
1355 xs = arg;
1356 periph = xs->xs_periph;
1357 target = periph->periph_target;
1358 lun = periph->periph_lun;
1359
1360 s = splbio();
1361 #ifdef SIOP_DEBUG_SCHED
1362 printf("starting cmd for %d:%d\n", target, lun);
1363 #endif
1364 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1365 if (esiop_cmd == NULL) {
1366 xs->error = XS_RESOURCE_SHORTAGE;
1367 scsipi_done(xs);
1368 splx(s);
1369 return;
1370 }
1371 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1372 #ifdef DIAGNOSTIC
1373 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1374 panic("siop_scsicmd: new cmd not free");
1375 #endif
1376 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1377 if (esiop_target == NULL) {
1378 #ifdef SIOP_DEBUG
1379 printf("%s: alloc siop_target for target %d\n",
1380 sc->sc_c.sc_dev.dv_xname, target);
1381 #endif
1382 sc->sc_c.targets[target] =
1383 malloc(sizeof(struct esiop_target),
1384 M_DEVBUF, M_NOWAIT | M_ZERO);
1385 if (sc->sc_c.targets[target] == NULL) {
1386 printf("%s: can't malloc memory for "
1387 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1388 target);
1389 xs->error = XS_RESOURCE_SHORTAGE;
1390 scsipi_done(xs);
1391 splx(s);
1392 return;
1393 }
1394 esiop_target =
1395 (struct esiop_target*)sc->sc_c.targets[target];
1396 esiop_target->target_c.status = TARST_PROBING;
1397 esiop_target->target_c.flags = 0;
1398 esiop_target->target_c.id =
1399 sc->sc_c.clock_div << 24; /* scntl3 */
1400 esiop_target->target_c.id |= target << 16; /* id */
1401 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1402
1403 for (i=0; i < 8; i++)
1404 esiop_target->esiop_lun[i] = NULL;
1405 esiop_target_register(sc, target);
1406 }
1407 if (esiop_target->esiop_lun[lun] == NULL) {
1408 esiop_target->esiop_lun[lun] =
1409 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1410 M_NOWAIT|M_ZERO);
1411 if (esiop_target->esiop_lun[lun] == NULL) {
1412 printf("%s: can't alloc esiop_lun for "
1413 "target %d lun %d\n",
1414 sc->sc_c.sc_dev.dv_xname, target, lun);
1415 xs->error = XS_RESOURCE_SHORTAGE;
1416 scsipi_done(xs);
1417 splx(s);
1418 return;
1419 }
1420 }
1421 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1422 esiop_cmd->cmd_c.xs = xs;
1423 esiop_cmd->cmd_c.flags = 0;
1424 esiop_cmd->cmd_c.status = CMDST_READY;
1425
1426 /* load the DMA maps */
1427 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1428 esiop_cmd->cmd_c.dmamap_cmd,
1429 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1430 if (error) {
1431 printf("%s: unable to load cmd DMA map: %d\n",
1432 sc->sc_c.sc_dev.dv_xname, error);
1433 xs->error = XS_DRIVER_STUFFUP;
1434 scsipi_done(xs);
1435 splx(s);
1436 return;
1437 }
1438 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1439 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1440 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1441 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1442 ((xs->xs_control & XS_CTL_DATA_IN) ?
1443 BUS_DMA_READ : BUS_DMA_WRITE));
1444 if (error) {
1445 printf("%s: unable to load cmd DMA map: %d",
1446 sc->sc_c.sc_dev.dv_xname, error);
1447 xs->error = XS_DRIVER_STUFFUP;
1448 scsipi_done(xs);
1449 bus_dmamap_unload(sc->sc_c.sc_dmat,
1450 esiop_cmd->cmd_c.dmamap_cmd);
1451 splx(s);
1452 return;
1453 }
1454 bus_dmamap_sync(sc->sc_c.sc_dmat,
1455 esiop_cmd->cmd_c.dmamap_data, 0,
1456 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1457 (xs->xs_control & XS_CTL_DATA_IN) ?
1458 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1459 }
1460 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1461 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1462 BUS_DMASYNC_PREWRITE);
1463
1464 siop_setuptables(&esiop_cmd->cmd_c);
1465 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1466 htole32(A_f_c_target | A_f_c_lun); /* XXX TAG */
1467 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1468 htole32((target << 8) | (lun << 16)); /* XXX TAG */
1469
1470 esiop_table_sync(esiop_cmd,
1471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1472 esiop_start(sc, esiop_cmd);
1473 if (xs->xs_control & XS_CTL_POLL) {
1474 /* poll for command completion */
1475 while ((xs->xs_status & XS_STS_DONE) == 0) {
1476 delay(1000);
1477 esiop_intr(sc);
1478 }
1479 }
1480 splx(s);
1481 return;
1482
1483 case ADAPTER_REQ_GROW_RESOURCES:
1484 #ifdef SIOP_DEBUG
1485 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1486 sc->sc_c.sc_adapt.adapt_openings);
1487 #endif
1488 esiop_morecbd(sc);
1489 return;
1490
1491 case ADAPTER_REQ_SET_XFER_MODE:
1492 {
1493 struct scsipi_xfer_mode *xm = arg;
1494 if (sc->sc_c.targets[xm->xm_target] == NULL)
1495 return;
1496 s = splbio();
1497 #if 0
1498 if (xm->xm_mode & PERIPH_CAP_TQING)
1499 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1500 #endif
1501 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1502 (sc->sc_c.features & SF_BUS_WIDE))
1503 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1504 if (xm->xm_mode & PERIPH_CAP_SYNC)
1505 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1506 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1507 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1508 sc->sc_c.targets[xm->xm_target]->status =
1509 TARST_ASYNC;
1510
1511 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1512 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1513 /* allocate a lun sw entry for this device */
1514 esiop_add_dev(sc, xm->xm_target, lun);
1515 }
1516
1517 splx(s);
1518 }
1519 }
1520 }
1521
1522 static void
1523 esiop_start(sc, esiop_cmd)
1524 struct esiop_softc *sc;
1525 struct esiop_cmd *esiop_cmd;
1526 {
1527 struct esiop_lun *esiop_lun;
1528 struct esiop_target *esiop_target;
1529 int timeout;
1530 int target, lun, slot;
1531
1532 nintr = 0;
1533
1534 /*
1535 * first make sure to read valid data
1536 */
1537 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1538
1539 /*
1540 * We use a circular queue here. sc->sc_currschedslot points to a
1541 * free slot, unless we have filled the queue. Check this.
1542 */
1543 slot = sc->sc_currschedslot;
1544 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * 2) &
1545 A_f_cmd_free) == 0) {
1546 /*
1547 * no more free slot, no need to continue. freeze the queue
1548 * and requeue this command.
1549 */
1550 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1551 sc->sc_flags |= SCF_CHAN_NOSLOT;
1552 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1553 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1554 esiop_scsicmd_end(esiop_cmd);
1555 return;
1556 }
1557 /* OK, we can use this slot */
1558
1559 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1560 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1561 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1562 esiop_lun = esiop_target->esiop_lun[lun];
1563 /* if non-tagged command active, panic: this shouldn't happen */
1564 if (esiop_lun->active != NULL) {
1565 panic("esiop_start: tagged cmd while untagged running");
1566 }
1567 #ifdef DIAGNOSTIC
1568 /* sanity check the tag if needed */
1569 #if 0 /* XXX TAG */
1570 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1571 if (esiop_lun->esiop_tag[esiop_cmd->cmd_c.tag].active != NULL)
1572 panic("esiop_start: tag not free");
1573 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG) {
1574 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1575 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1576 panic("esiop_start: invalid tag id");
1577 }
1578 }
1579 #endif /* XXX TAG */
1580 #endif
1581 #ifdef SIOP_DEBUG_SCHED
1582 printf("using slot %d for DSA 0x%lx\n", slot,
1583 (u_long)esiop_cmd->cmd_c.dsa);
1584 #endif
1585 /* mark command as active */
1586 if (esiop_cmd->cmd_c.status == CMDST_READY)
1587 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1588 else
1589 panic("esiop_start: bad status");
1590 #if 0 /* XXX TAG */
1591 esiop_lun->esiop_tag[esiop_cmd->cmd_c.tag].active = esiop_cmd;
1592 #endif
1593 esiop_lun->active = esiop_cmd;
1594 /* DSA table for reselect */
1595 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1596 esiop_cmd->cmd_c.dsa);
1597 /* scheduler slot: ID, then DSA */
1598 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1599 sc->sc_c.targets[target]->id);
1600 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1601 esiop_cmd->cmd_c.dsa);
1602 /* handle timeout */
1603 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1604 /* start exire timer */
1605 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1606 if (timeout == 0)
1607 timeout = 1;
1608 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1609 timeout, esiop_timeout, esiop_cmd);
1610 }
1611 /* make sure SCRIPT processor will read valid data */
1612 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1613 /* Signal script it has some work to do */
1614 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1615 SIOP_ISTAT, ISTAT_SIGP);
1616 /* update the current slot, and wait for IRQ */
1617 sc->sc_currschedslot++;
1618 if (sc->sc_currschedslot >= A_ncmd_slots)
1619 sc->sc_currschedslot = 0;
1620 return;
1621 }
1622
1623 void
1624 esiop_timeout(v)
1625 void *v;
1626 {
1627 struct esiop_cmd *esiop_cmd = v;
1628 struct esiop_softc *sc =
1629 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1630 int s;
1631
1632 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1633 printf("command timeout\n");
1634
1635 s = splbio();
1636 /* reset the scsi bus */
1637 siop_resetbus(&sc->sc_c);
1638
1639 /* deactivate callout */
1640 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1641 /*
1642 * mark command has being timed out and just return;
1643 * the bus reset will generate an interrupt,
1644 * it will be handled in siop_intr()
1645 */
1646 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1647 splx(s);
1648 return;
1649
1650 }
1651
1652 void
1653 esiop_dump_script(sc)
1654 struct esiop_softc *sc;
1655 {
1656 int i;
1657 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1658 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1659 le32toh(sc->sc_c.sc_script[i]),
1660 le32toh(sc->sc_c.sc_script[i+1]));
1661 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1662 0xc0000000) {
1663 i++;
1664 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1665 }
1666 printf("\n");
1667 }
1668 }
1669
1670 void
1671 esiop_morecbd(sc)
1672 struct esiop_softc *sc;
1673 {
1674 int error, i, s;
1675 bus_dma_segment_t seg;
1676 int rseg;
1677 struct esiop_cbd *newcbd;
1678 struct esiop_xfer *xfer;
1679 bus_addr_t dsa;
1680
1681 /* allocate a new list head */
1682 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1683 if (newcbd == NULL) {
1684 printf("%s: can't allocate memory for command descriptors "
1685 "head\n", sc->sc_c.sc_dev.dv_xname);
1686 return;
1687 }
1688
1689 /* allocate cmd list */
1690 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1691 M_DEVBUF, M_NOWAIT|M_ZERO);
1692 if (newcbd->cmds == NULL) {
1693 printf("%s: can't allocate memory for command descriptors\n",
1694 sc->sc_c.sc_dev.dv_xname);
1695 goto bad3;
1696 }
1697 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1698 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1699 if (error) {
1700 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1701 sc->sc_c.sc_dev.dv_xname, error);
1702 goto bad2;
1703 }
1704 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1705 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1706 if (error) {
1707 printf("%s: unable to map cbd DMA memory, error = %d\n",
1708 sc->sc_c.sc_dev.dv_xname, error);
1709 goto bad2;
1710 }
1711 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1712 BUS_DMA_NOWAIT, &newcbd->xferdma);
1713 if (error) {
1714 printf("%s: unable to create cbd DMA map, error = %d\n",
1715 sc->sc_c.sc_dev.dv_xname, error);
1716 goto bad1;
1717 }
1718 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1719 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1720 if (error) {
1721 printf("%s: unable to load cbd DMA map, error = %d\n",
1722 sc->sc_c.sc_dev.dv_xname, error);
1723 goto bad0;
1724 }
1725 #ifdef DEBUG
1726 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1727 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1728 #endif
1729 for (i = 0; i < SIOP_NCMDPB; i++) {
1730 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1731 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1732 &newcbd->cmds[i].cmd_c.dmamap_data);
1733 if (error) {
1734 printf("%s: unable to create data DMA map for cbd: "
1735 "error %d\n",
1736 sc->sc_c.sc_dev.dv_xname, error);
1737 goto bad0;
1738 }
1739 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1740 sizeof(struct scsipi_generic), 1,
1741 sizeof(struct scsipi_generic), 0,
1742 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1743 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1744 if (error) {
1745 printf("%s: unable to create cmd DMA map for cbd %d\n",
1746 sc->sc_c.sc_dev.dv_xname, error);
1747 goto bad0;
1748 }
1749 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1750 newcbd->cmds[i].esiop_cbdp = newcbd;
1751 xfer = &newcbd->xfers[i];
1752 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1753 memset(newcbd->cmds[i].cmd_tables, 0,
1754 sizeof(struct esiop_xfer));
1755 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1756 i * sizeof(struct esiop_xfer);
1757 newcbd->cmds[i].cmd_c.dsa = dsa;
1758 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1759 xfer->siop_tables.t_msgout.count= htole32(1);
1760 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1761 xfer->siop_tables.t_msgin.count= htole32(1);
1762 xfer->siop_tables.t_msgin.addr = htole32(dsa + 8);
1763 xfer->siop_tables.t_extmsgin.count= htole32(2);
1764 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1765 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 11);
1766 xfer->siop_tables.t_status.count= htole32(1);
1767 xfer->siop_tables.t_status.addr = htole32(dsa + 16);
1768
1769 s = splbio();
1770 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1771 splx(s);
1772 #ifdef SIOP_DEBUG
1773 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1774 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1775 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1776 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1777 #endif
1778 }
1779 s = splbio();
1780 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1781 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1782 splx(s);
1783 return;
1784 bad0:
1785 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1786 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1787 bad1:
1788 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1789 bad2:
1790 free(newcbd->cmds, M_DEVBUF);
1791 bad3:
1792 free(newcbd, M_DEVBUF);
1793 return;
1794 }
1795
1796 void
1797 esiop_update_scntl3(sc, _siop_target)
1798 struct esiop_softc *sc;
1799 struct siop_common_target *_siop_target;
1800 {
1801 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1802 esiop_script_write(sc, esiop_target->lun_table_offset,
1803 esiop_target->target_c.id);
1804 /* XXX TAG */
1805 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1806 }
1807
1808 void
1809 esiop_add_dev(sc, target, lun)
1810 struct esiop_softc *sc;
1811 int target;
1812 int lun;
1813 {
1814 #if 0 /* XXX TAG */
1815 struct esiop_target *esiop_target =
1816 (struct esiop_target *)sc->sc_c.targets[target];
1817 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1818 int i, ntargets;
1819 if (siop_target->target_c.flags & TARF_TAG) {
1820 /* we need a tag switch */
1821 sc->script_free_hi -=
1822 sizeof(tag_switch) / sizeof(tag_switch[0]);
1823 if (sc->sc_c.features & SF_CHIP_RAM) {
1824 bus_space_write_region_4(sc->sc_c.sc_ramt,
1825 sc->sc_c.sc_ramh,
1826 sc->script_free_hi * 4, tag_switch,
1827 sizeof(tag_switch) / sizeof(tag_switch[0]));
1828 } else {
1829 for(i = 0;
1830 i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1831 i++) {
1832 sc->sc_c.sc_script[sc->script_free_hi + i] =
1833 htole32(tag_switch[i]);
1834 }
1835 }
1836 esiop_script_write(sc,
1837 siop_lun->reseloff + 1,
1838 sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1839 Ent_tag_switch_entry);
1840
1841 for (i = 0; i < ESIOP_NTAG; i++) {
1842 siop_lun->siop_tag[i].reseloff =
1843 sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1844 }
1845 } else {
1846 /* non-tag case; just work with the lun switch */
1847 siop_lun->siop_tag[0].reseloff =
1848 siop_target->siop_lun[lun]->reseloff;
1849 }
1850 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1851 #endif /* XXX TAG */
1852 }
1853
1854 void
1855 esiop_del_dev(sc, target, lun)
1856 struct esiop_softc *sc;
1857 int target;
1858 int lun;
1859 {
1860 struct esiop_target *esiop_target;
1861 #ifdef SIOP_DEBUG
1862 printf("%s:%d:%d: free lun sw entry\n",
1863 sc->sc_c.sc_dev.dv_xname, target, lun);
1864 #endif
1865 if (sc->sc_c.targets[target] == NULL)
1866 return;
1867 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1868 free(esiop_target->esiop_lun[lun], M_DEVBUF);
1869 esiop_target->esiop_lun[lun] = NULL;
1870 }
1871
1872 struct esiop_cmd *
1873 esiop_cmd_find(sc, target, dsa)
1874 struct esiop_softc *sc;
1875 int target;
1876 u_int32_t dsa;
1877 {
1878 int lun;
1879 struct esiop_cmd *cmd;
1880 struct esiop_lun *esiop_lun;
1881 struct esiop_target *esiop_target =
1882 (struct esiop_target *)sc->sc_c.targets[target];
1883
1884 if (esiop_target == NULL)
1885 return NULL;
1886
1887 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1888 esiop_lun = esiop_target->esiop_lun[lun];
1889 if (esiop_lun == NULL)
1890 continue;
1891 cmd = esiop_lun->active;
1892 if (cmd && cmd->cmd_c.dsa == dsa)
1893 return cmd;
1894 /* XXX TAG */
1895 }
1896 return NULL;
1897 }
1898
1899 void
1900 esiop_target_register(sc, target)
1901 struct esiop_softc *sc;
1902 u_int32_t target;
1903 {
1904 struct esiop_target *esiop_target =
1905 (struct esiop_target *)sc->sc_c.targets[target];
1906
1907 /* get a DSA table for this target */
1908 esiop_target->lun_table_offset = sc->sc_free_offset;
1909 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
1910 #ifdef SIOP_DEBUG
1911 printf("%s: lun table for target %d offset %d free offset %d\n",
1912 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
1913 sc->sc_free_offset);
1914 #endif
1915 /* first 32 bytes are ID (for select) */
1916 esiop_script_write(sc, esiop_target->lun_table_offset,
1917 esiop_target->target_c.id);
1918 /* Record this table in the target DSA table */
1919 esiop_script_write(sc,
1920 sc->sc_target_table_offset + target,
1921 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
1922 sc->sc_c.sc_scriptaddr);
1923 esiop_script_sync(sc,
1924 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1925 }
1926
1927 #ifdef SIOP_STATS
1928 void
1929 esiop_printstats()
1930 {
1931 printf("esiop_stat_intr %d\n", esiop_stat_intr);
1932 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
1933 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
1934 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
1935 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
1936 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
1937 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
1938 }
1939 #endif
1940