esiop.c revision 1.4 1 /* $NetBSD: esiop.c,v 1.4 2002/04/23 10:38:37 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.4 2002/04/23 10:38:37 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #define DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_update_scntl3 __P((struct esiop_softc *,
98 struct siop_common_target *));
99 struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
100 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
101
102 static int nintr = 0;
103
104 #ifdef SIOP_STATS
105 static int esiop_stat_intr = 0;
106 static int esiop_stat_intr_shortxfer = 0;
107 static int esiop_stat_intr_sdp = 0;
108 static int esiop_stat_intr_done = 0;
109 static int esiop_stat_intr_xferdisc = 0;
110 static int esiop_stat_intr_lunresel = 0;
111 static int esiop_stat_intr_qfull = 0;
112 void esiop_printstats __P((void));
113 #define INCSTAT(x) x++
114 #else
115 #define INCSTAT(x)
116 #endif
117
118 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
119 static __inline__ void
120 esiop_script_sync(sc, ops)
121 struct esiop_softc *sc;
122 int ops;
123 {
124 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
125 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
126 PAGE_SIZE, ops);
127 }
128
129 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
130 static __inline__ u_int32_t
131 esiop_script_read(sc, offset)
132 struct esiop_softc *sc;
133 u_int offset;
134 {
135 if (sc->sc_c.features & SF_CHIP_RAM) {
136 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
137 offset * 4);
138 } else {
139 return le32toh(sc->sc_c.sc_script[offset]);
140 }
141 }
142
143 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
144 u_int32_t));
145 static __inline__ void
146 esiop_script_write(sc, offset, val)
147 struct esiop_softc *sc;
148 u_int offset;
149 u_int32_t val;
150 {
151 if (sc->sc_c.features & SF_CHIP_RAM) {
152 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
153 offset * 4, val);
154 } else {
155 sc->sc_c.sc_script[offset] = htole32(val);
156 }
157 }
158
159 void
160 esiop_attach(sc)
161 struct esiop_softc *sc;
162 {
163 int error, i;
164 bus_dma_segment_t seg;
165 int rseg;
166
167 /*
168 * Allocate DMA-safe memory for the script and map it.
169 */
170 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
171 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
172 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
173 if (error) {
174 printf("%s: unable to allocate script DMA memory, "
175 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
176 return;
177 }
178 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
179 (caddr_t *)&sc->sc_c.sc_script,
180 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
181 if (error) {
182 printf("%s: unable to map script DMA memory, "
183 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
184 return;
185 }
186 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
187 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
188 if (error) {
189 printf("%s: unable to create script DMA map, "
190 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
191 return;
192 }
193 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
194 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
195 if (error) {
196 printf("%s: unable to load script DMA map, "
197 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
198 return;
199 }
200 sc->sc_c.sc_scriptaddr =
201 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
202 sc->sc_c.ram_size = PAGE_SIZE;
203 }
204 TAILQ_INIT(&sc->free_list);
205 TAILQ_INIT(&sc->cmds);
206 TAILQ_INIT(&sc->free_tagtbl);
207 TAILQ_INIT(&sc->tag_tblblk);
208 sc->sc_currschedslot = 0;
209 #ifdef SIOP_DEBUG
210 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
211 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
212 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
213 #endif
214
215 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
216 sc->sc_c.sc_adapt.adapt_nchannels = 1;
217 sc->sc_c.sc_adapt.adapt_openings = 0;
218 sc->sc_c.sc_adapt.adapt_max_periph = 1 /* XXX ESIOP_NTAG - 1 */ ;
219 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
220 sc->sc_c.sc_adapt.adapt_minphys = minphys;
221 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
222
223 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
224 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
225 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
226 sc->sc_c.sc_chan.chan_channel = 0;
227 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
228 sc->sc_c.sc_chan.chan_ntargets =
229 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
230 sc->sc_c.sc_chan.chan_nluns = 8;
231 sc->sc_c.sc_chan.chan_id =
232 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
233 if (sc->sc_c.sc_chan.chan_id == 0 ||
234 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
235 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
236
237 for (i = 0; i < 16; i++)
238 sc->sc_c.targets[i] = NULL;
239
240 /* find min/max sync period for this chip */
241 sc->sc_c.maxsync = 0;
242 sc->sc_c.minsync = 255;
243 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
244 if (sc->sc_c.clock_period != scf_period[i].clock)
245 continue;
246 if (sc->sc_c.maxsync < scf_period[i].period)
247 sc->sc_c.maxsync = scf_period[i].period;
248 if (sc->sc_c.minsync > scf_period[i].period)
249 sc->sc_c.minsync = scf_period[i].period;
250 }
251 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
252 panic("siop: can't find my sync parameters\n");
253 /* Do a bus reset, so that devices fall back to narrow/async */
254 siop_resetbus(&sc->sc_c);
255 /*
256 * siop_reset() will reset the chip, thus clearing pending interrupts
257 */
258 esiop_reset(sc);
259 #ifdef DUMP_SCRIPT
260 esiop_dump_script(sc);
261 #endif
262
263 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
264 }
265
266 void
267 esiop_reset(sc)
268 struct esiop_softc *sc;
269 {
270 int i, j;
271 u_int32_t addr;
272 u_int32_t msgin_addr;
273
274 siop_common_reset(&sc->sc_c);
275
276 /*
277 * we copy the script at the beggining of RAM. Then there is 8 bytes
278 * for messages in.
279 */
280 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
281 msgin_addr =
282 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
283 sc->sc_free_offset += 2;
284 /* then we have the scheduler ring */
285 sc->sc_shedoffset = sc->sc_free_offset;
286 sc->sc_free_offset += A_ncmd_slots * 2;
287 /* then the targets DSA table */
288 sc->sc_target_table_offset = sc->sc_free_offset;
289 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
290 /* copy and patch the script */
291 if (sc->sc_c.features & SF_CHIP_RAM) {
292 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
293 esiop_script,
294 sizeof(esiop_script) / sizeof(esiop_script[0]));
295 for (j = 0; j <
296 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
297 j++) {
298 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
299 E_tlq_offset_Used[j] * 4,
300 sizeof(struct siop_common_xfer));
301 }
302 for (j = 0; j <
303 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
304 j++) {
305 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
306 E_abs_msgin2_Used[j] * 4, msgin_addr);
307 }
308
309 if (sc->sc_c.features & SF_CHIP_LED0) {
310 bus_space_write_region_4(sc->sc_c.sc_ramt,
311 sc->sc_c.sc_ramh,
312 Ent_led_on1, esiop_led_on,
313 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
314 bus_space_write_region_4(sc->sc_c.sc_ramt,
315 sc->sc_c.sc_ramh,
316 Ent_led_on2, esiop_led_on,
317 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
318 bus_space_write_region_4(sc->sc_c.sc_ramt,
319 sc->sc_c.sc_ramh,
320 Ent_led_off, esiop_led_off,
321 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
322 }
323 } else {
324 for (j = 0;
325 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
326 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
327 }
328 for (j = 0; j <
329 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
330 j++) {
331 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
332 htole32(sizeof(struct siop_common_xfer));
333 }
334 for (j = 0; j <
335 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
336 j++) {
337 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
338 htole32(msgin_addr);
339 }
340
341 if (sc->sc_c.features & SF_CHIP_LED0) {
342 for (j = 0; j < (sizeof(esiop_led_on) /
343 sizeof(esiop_led_on[0])); j++)
344 sc->sc_c.sc_script[
345 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
346 ] = htole32(esiop_led_on[j]);
347 for (j = 0; j < (sizeof(esiop_led_on) /
348 sizeof(esiop_led_on[0])); j++)
349 sc->sc_c.sc_script[
350 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
351 ] = htole32(esiop_led_on[j]);
352 for (j = 0; j < (sizeof(esiop_led_off) /
353 sizeof(esiop_led_off[0])); j++)
354 sc->sc_c.sc_script[
355 Ent_led_off / sizeof(esiop_led_off[0]) + j
356 ] = htole32(esiop_led_off[j]);
357 }
358 }
359 /* get base of scheduler ring */
360 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
361 /* init scheduler */
362 for (i = 0; i < A_ncmd_slots; i++) {
363 esiop_script_write(sc, sc->sc_shedoffset + i * 2, A_f_cmd_free);
364 esiop_script_write(sc, sc->sc_shedoffset + i * 2 + 1, 0);
365 }
366 sc->sc_currschedslot = 0;
367 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
368 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
369 /*
370 * 0x78000000 is a 'move data8 to reg'. data8 is the second
371 * octet, reg offset is the third.
372 */
373 esiop_script_write(sc, Ent_cmdr0 / 4,
374 0x78640000 | ((addr & 0x000000ff) << 8));
375 esiop_script_write(sc, Ent_cmdr1 / 4,
376 0x78650000 | ((addr & 0x0000ff00) ));
377 esiop_script_write(sc, Ent_cmdr2 / 4,
378 0x78660000 | ((addr & 0x00ff0000) >> 8));
379 esiop_script_write(sc, Ent_cmdr3 / 4,
380 0x78670000 | ((addr & 0xff000000) >> 16));
381 /* set flags */
382 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
383 /* write pointer of base of target DSA table */
384 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
385 sc->sc_c.sc_scriptaddr;
386 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
387 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
388 ((addr & 0x000000ff) << 8));
389 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
390 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
391 ((addr & 0x0000ff00) ));
392 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
393 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
394 ((addr & 0x00ff0000) >> 8));
395 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
396 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
397 ((addr & 0xff000000) >> 16));
398 #ifdef SIOP_DEBUG
399 printf("%s: target table offset %d free offset %d\n",
400 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
401 sc->sc_free_offset);
402 #endif
403
404 /* register existing targets */
405 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
406 if (sc->sc_c.targets[i])
407 esiop_target_register(sc, i);
408 }
409 /* start script */
410 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
411 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
412 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
413 }
414 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
415 sc->sc_c.sc_scriptaddr + Ent_reselect);
416 }
417
418 #if 0
419 #define CALL_SCRIPT(ent) do {\
420 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
421 esiop_cmd->cmd_c.dsa, \
422 sc->sc_c.sc_scriptaddr + ent); \
423 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
424 } while (0)
425 #else
426 #define CALL_SCRIPT(ent) do {\
427 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
428 } while (0)
429 #endif
430
431 int
432 esiop_intr(v)
433 void *v;
434 {
435 struct esiop_softc *sc = v;
436 struct esiop_target *esiop_target;
437 struct esiop_cmd *esiop_cmd;
438 struct esiop_lun *esiop_lun;
439 struct scsipi_xfer *xs;
440 int istat, sist, sstat1, dstat;
441 u_int32_t irqcode;
442 int need_reset = 0;
443 int offset, target, lun, tag;
444 u_int32_t tflags;
445 u_int32_t addr;
446 int freetarget = 0;
447 int restart = 0;
448 int slot;
449 int retval = 0;
450
451 again:
452 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
453 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
454 if (istat & ISTAT_SEM) {
455 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
456 SIOP_ISTAT, (istat & ~ISTAT_SEM));
457 esiop_checkdone(sc);
458 }
459 return retval;
460 }
461 retval = 1;
462 nintr++;
463 if (nintr > 100) {
464 panic("esiop: intr loop");
465 }
466 INCSTAT(esiop_stat_intr);
467 if (istat & ISTAT_INTF) {
468 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
469 SIOP_ISTAT, ISTAT_INTF);
470 esiop_checkdone(sc);
471 goto again;
472 }
473 /* get CMD from T/L/Q */
474 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
475 SIOP_SCRATCHC);
476 #ifdef SIOP_DEBUG_INTR
477 printf("interrupt, istat=0x%x tflags=0x%x "
478 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
479 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
480 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
481 SIOP_DSP) -
482 sc->sc_c.sc_scriptaddr));
483 #endif
484 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
485 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
486 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
487 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
488 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
489
490 if (target >= 0 && lun >= 0) {
491 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
492 if (esiop_target == NULL) {
493 printf("esiop_target (target %d) not valid\n", target);
494 goto none;
495 }
496 esiop_lun = esiop_target->esiop_lun[lun];
497 if (esiop_lun == NULL) {
498 printf("esiop_lun (target %d lun %d) not valid\n",
499 target, lun);
500 goto none;
501 }
502 esiop_cmd =
503 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
504 if (esiop_cmd == NULL) {
505 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
506 target, lun, tag);
507 goto none;
508 }
509 xs = esiop_cmd->cmd_c.xs;
510 #ifdef DIAGNOSTIC
511 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
512 printf("esiop_cmd (target %d lun %d) "
513 "not active (%d)\n", target, lun,
514 esiop_cmd->cmd_c.status);
515 goto none;
516 }
517 #endif
518 esiop_table_sync(esiop_cmd,
519 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
520 } else {
521 none:
522 xs = NULL;
523 esiop_target = NULL;
524 esiop_lun = NULL;
525 esiop_cmd = NULL;
526 }
527 if (istat & ISTAT_DIP) {
528 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
529 SIOP_DSTAT);
530 if (dstat & DSTAT_SSI) {
531 printf("single step dsp 0x%08x dsa 0x08%x\n",
532 (int)(bus_space_read_4(sc->sc_c.sc_rt,
533 sc->sc_c.sc_rh, SIOP_DSP) -
534 sc->sc_c.sc_scriptaddr),
535 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
536 SIOP_DSA));
537 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
538 (istat & ISTAT_SIP) == 0) {
539 bus_space_write_1(sc->sc_c.sc_rt,
540 sc->sc_c.sc_rh, SIOP_DCNTL,
541 bus_space_read_1(sc->sc_c.sc_rt,
542 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
543 }
544 return 1;
545 }
546 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
547 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
548 if (dstat & DSTAT_IID)
549 printf(" Illegal instruction");
550 if (dstat & DSTAT_ABRT)
551 printf(" abort");
552 if (dstat & DSTAT_BF)
553 printf(" bus fault");
554 if (dstat & DSTAT_MDPE)
555 printf(" parity");
556 if (dstat & DSTAT_DFE)
557 printf(" dma fifo empty");
558 printf(", DSP=0x%x DSA=0x%x: ",
559 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
560 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
561 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
562 if (esiop_cmd)
563 printf("last msg_in=0x%x status=0x%x\n",
564 esiop_cmd->cmd_tables->msg_in[0],
565 le32toh(esiop_cmd->cmd_tables->status));
566 else
567 printf(" current T/L/Q invalid\n");
568 need_reset = 1;
569 }
570 }
571 if (istat & ISTAT_SIP) {
572 if (istat & ISTAT_DIP)
573 delay(10);
574 /*
575 * Can't read sist0 & sist1 independantly, or we have to
576 * insert delay
577 */
578 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
579 SIOP_SIST0);
580 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
581 SIOP_SSTAT1);
582 #ifdef SIOP_DEBUG_INTR
583 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
584 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
585 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
586 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
587 SIOP_DSP) -
588 sc->sc_c.sc_scriptaddr));
589 #endif
590 if (sist & SIST0_RST) {
591 esiop_handle_reset(sc);
592 /* no table to flush here */
593 return 1;
594 }
595 if (sist & SIST0_SGE) {
596 if (esiop_cmd)
597 scsipi_printaddr(xs->xs_periph);
598 else
599 printf("%s:", sc->sc_c.sc_dev.dv_xname);
600 printf("scsi gross error\n");
601 goto reset;
602 }
603 if ((sist & SIST0_MA) && need_reset == 0) {
604 if (esiop_cmd) {
605 int scratchc0;
606 dstat = bus_space_read_1(sc->sc_c.sc_rt,
607 sc->sc_c.sc_rh, SIOP_DSTAT);
608 /*
609 * first restore DSA, in case we were in a S/G
610 * operation.
611 */
612 bus_space_write_4(sc->sc_c.sc_rt,
613 sc->sc_c.sc_rh,
614 SIOP_DSA, esiop_cmd->cmd_c.dsa);
615 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
616 sc->sc_c.sc_rh, SIOP_SCRATCHC);
617 switch (sstat1 & SSTAT1_PHASE_MASK) {
618 case SSTAT1_PHASE_STATUS:
619 /*
620 * previous phase may be aborted for any reason
621 * ( for example, the target has less data to
622 * transfer than requested). Just go to status
623 * and the command should terminate.
624 */
625 INCSTAT(esiop_stat_intr_shortxfer);
626 if ((dstat & DSTAT_DFE) == 0)
627 siop_clearfifo(&sc->sc_c);
628 /* no table to flush here */
629 CALL_SCRIPT(Ent_status);
630 return 1;
631 case SSTAT1_PHASE_MSGIN:
632 /*
633 * target may be ready to disconnect
634 * Save data pointers just in case.
635 */
636 INCSTAT(esiop_stat_intr_xferdisc);
637 if (scratchc0 & A_f_c_data)
638 siop_sdp(&esiop_cmd->cmd_c);
639 else if ((dstat & DSTAT_DFE) == 0)
640 siop_clearfifo(&sc->sc_c);
641 bus_space_write_1(sc->sc_c.sc_rt,
642 sc->sc_c.sc_rh, SIOP_SCRATCHC,
643 scratchc0 & ~A_f_c_data);
644 esiop_table_sync(esiop_cmd,
645 BUS_DMASYNC_PREREAD |
646 BUS_DMASYNC_PREWRITE);
647 CALL_SCRIPT(Ent_msgin);
648 return 1;
649 }
650 printf("%s: unexpected phase mismatch %d\n",
651 sc->sc_c.sc_dev.dv_xname,
652 sstat1 & SSTAT1_PHASE_MASK);
653 } else {
654 printf("%s: phase mismatch without command\n",
655 sc->sc_c.sc_dev.dv_xname);
656 }
657 need_reset = 1;
658 }
659 if (sist & SIST0_PAR) {
660 /* parity error, reset */
661 if (esiop_cmd)
662 scsipi_printaddr(xs->xs_periph);
663 else
664 printf("%s:", sc->sc_c.sc_dev.dv_xname);
665 printf("parity error\n");
666 goto reset;
667 }
668 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
669 /* selection time out, assume there's no device here */
670 /*
671 * SCRATCHC has not been loaded yet, we have to find
672 * params by ourselve. scratchE0 should point to
673 * the slot.
674 */
675 slot = bus_space_read_1(sc->sc_c.sc_rt,
676 sc->sc_c.sc_rh, SIOP_SCRATCHE);
677 esiop_script_sync(sc,
678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
679 target = esiop_script_read(sc,
680 sc->sc_shedoffset + slot * 2 + 1) & 0x00ff0000;
681 target = (target >> 16) & 0xff;
682 esiop_cmd = esiop_cmd_find(sc, target,
683 esiop_script_read(sc,
684 sc->sc_shedoffset + slot * 2) & ~0x3);
685 /*
686 * mark this slot as free, and advance to next slot
687 */
688 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
689 A_f_cmd_free);
690 addr = bus_space_read_4(sc->sc_c.sc_rt,
691 sc->sc_c.sc_rh, SIOP_SCRATCHD);
692 if (slot < (A_ncmd_slots - 1)) {
693 bus_space_write_1(sc->sc_c.sc_rt,
694 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
695 addr = addr + 8;
696 } else {
697 bus_space_write_1(sc->sc_c.sc_rt,
698 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
699 addr = sc->sc_c.sc_scriptaddr +
700 sc->sc_shedoffset * sizeof(u_int32_t);
701 }
702 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
703 SIOP_SCRATCHD, addr);
704 esiop_script_sync(sc,
705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
706 if (esiop_cmd) {
707 xs = esiop_cmd->cmd_c.xs;
708 esiop_target = (struct esiop_target *)
709 esiop_cmd->cmd_c.siop_target;
710 lun = xs->xs_periph->periph_lun;
711 tag = esiop_cmd->cmd_c.tag;
712 esiop_lun = esiop_target->esiop_lun[lun];
713 esiop_cmd->cmd_c.status = CMDST_DONE;
714 xs->error = XS_SELTIMEOUT;
715 freetarget = 1;
716 goto end;
717 } else {
718 printf("%s: selection timeout without "
719 "command, target %d (sdid 0x%x), "
720 "slot %d\n",
721 sc->sc_c.sc_dev.dv_xname, target,
722 bus_space_read_1(sc->sc_c.sc_rt,
723 sc->sc_c.sc_rh, SIOP_SDID), slot);
724 need_reset = 1;
725 }
726 }
727 if (sist & SIST0_UDC) {
728 /*
729 * unexpected disconnect. Usually the target signals
730 * a fatal condition this way. Attempt to get sense.
731 */
732 if (esiop_cmd) {
733 esiop_cmd->cmd_tables->status =
734 htole32(SCSI_CHECK);
735 goto end;
736 }
737 printf("%s: unexpected disconnect without "
738 "command\n", sc->sc_c.sc_dev.dv_xname);
739 goto reset;
740 }
741 if (sist & (SIST1_SBMC << 8)) {
742 /* SCSI bus mode change */
743 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
744 goto reset;
745 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
746 /*
747 * we have a script interrupt, it will
748 * restart the script.
749 */
750 goto scintr;
751 }
752 /*
753 * else we have to restart it ourselve, at the
754 * interrupted instruction.
755 */
756 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
757 SIOP_DSP,
758 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
759 SIOP_DSP) - 8);
760 return 1;
761 }
762 /* Else it's an unhandled exeption (for now). */
763 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
764 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
765 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
766 SIOP_SSTAT1),
767 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
768 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
769 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
770 if (esiop_cmd) {
771 esiop_cmd->cmd_c.status = CMDST_DONE;
772 xs->error = XS_SELTIMEOUT;
773 goto end;
774 }
775 need_reset = 1;
776 }
777 if (need_reset) {
778 reset:
779 /* fatal error, reset the bus */
780 siop_resetbus(&sc->sc_c);
781 /* no table to flush here */
782 return 1;
783 }
784
785 scintr:
786 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
787 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
788 SIOP_DSPS);
789 #ifdef SIOP_DEBUG_INTR
790 printf("script interrupt 0x%x\n", irqcode);
791 #endif
792 /*
793 * no command, or an inactive command is only valid for a
794 * reselect interrupt
795 */
796 if ((irqcode & 0x80) == 0) {
797 if (esiop_cmd == NULL) {
798 printf(
799 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
800 sc->sc_c.sc_dev.dv_xname, irqcode);
801 goto reset;
802 }
803 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
804 printf("%s: command with invalid status "
805 "(IRQ code 0x%x current status %d) !\n",
806 sc->sc_c.sc_dev.dv_xname,
807 irqcode, esiop_cmd->cmd_c.status);
808 xs = NULL;
809 }
810 }
811 switch(irqcode) {
812 case A_int_err:
813 printf("error, DSP=0x%x\n",
814 (int)(bus_space_read_4(sc->sc_c.sc_rt,
815 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
816 if (xs) {
817 xs->error = XS_SELTIMEOUT;
818 goto end;
819 } else {
820 goto reset;
821 }
822 case A_int_msgin:
823 {
824 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
825 sc->sc_c.sc_rh, SIOP_SFBR);
826 if (msgin == MSG_MESSAGE_REJECT) {
827 int msg, extmsg;
828 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
829 /*
830 * message was part of a identify +
831 * something else. Identify shoudl't
832 * have been rejected.
833 */
834 msg =
835 esiop_cmd->cmd_tables->msg_out[1];
836 extmsg =
837 esiop_cmd->cmd_tables->msg_out[3];
838 } else {
839 msg =
840 esiop_cmd->cmd_tables->msg_out[0];
841 extmsg =
842 esiop_cmd->cmd_tables->msg_out[2];
843 }
844 if (msg == MSG_MESSAGE_REJECT) {
845 /* MSG_REJECT for a MSG_REJECT !*/
846 if (xs)
847 scsipi_printaddr(xs->xs_periph);
848 else
849 printf("%s: ",
850 sc->sc_c.sc_dev.dv_xname);
851 printf("our reject message was "
852 "rejected\n");
853 goto reset;
854 }
855 if (msg == MSG_EXTENDED &&
856 extmsg == MSG_EXT_WDTR) {
857 /* WDTR rejected, initiate sync */
858 if ((esiop_target->target_c.flags &
859 TARF_SYNC) == 0) {
860 esiop_target->target_c.status =
861 TARST_OK;
862 siop_update_xfer_mode(&sc->sc_c,
863 target);
864 /* no table to flush here */
865 CALL_SCRIPT(Ent_msgin_ack);
866 return 1;
867 }
868 esiop_target->target_c.status =
869 TARST_SYNC_NEG;
870 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
871 sc->sc_c.minsync, sc->sc_c.maxoff);
872 esiop_table_sync(esiop_cmd,
873 BUS_DMASYNC_PREREAD |
874 BUS_DMASYNC_PREWRITE);
875 CALL_SCRIPT(Ent_send_msgout);
876 return 1;
877 } else if (msg == MSG_EXTENDED &&
878 extmsg == MSG_EXT_SDTR) {
879 /* sync rejected */
880 esiop_target->target_c.offset = 0;
881 esiop_target->target_c.period = 0;
882 esiop_target->target_c.status =
883 TARST_OK;
884 siop_update_xfer_mode(&sc->sc_c,
885 target);
886 /* no table to flush here */
887 CALL_SCRIPT(Ent_msgin_ack);
888 return 1;
889 } else if (msg == MSG_SIMPLE_Q_TAG ||
890 msg == MSG_HEAD_OF_Q_TAG ||
891 msg == MSG_ORDERED_Q_TAG) {
892 if (esiop_handle_qtag_reject(
893 esiop_cmd) == -1)
894 goto reset;
895 CALL_SCRIPT(Ent_msgin_ack);
896 return 1;
897 }
898 if (xs)
899 scsipi_printaddr(xs->xs_periph);
900 else
901 printf("%s: ",
902 sc->sc_c.sc_dev.dv_xname);
903 if (msg == MSG_EXTENDED) {
904 printf("scsi message reject, extended "
905 "message sent was 0x%x\n", extmsg);
906 } else {
907 printf("scsi message reject, message "
908 "sent was 0x%x\n", msg);
909 }
910 /* no table to flush here */
911 CALL_SCRIPT(Ent_msgin_ack);
912 return 1;
913 }
914 if (xs)
915 scsipi_printaddr(xs->xs_periph);
916 else
917 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
918 printf("unhandled message 0x%x\n",
919 esiop_cmd->cmd_tables->msg_in[0]);
920 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
921 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
922 esiop_table_sync(esiop_cmd,
923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
924 CALL_SCRIPT(Ent_send_msgout);
925 return 1;
926 }
927 case A_int_extmsgin:
928 #ifdef SIOP_DEBUG_INTR
929 printf("extended message: msg 0x%x len %d\n",
930 esiop_cmd->cmd_tables->msg_in[2],
931 esiop_cmd->cmd_tables->msg_in[1]);
932 #endif
933 if (esiop_cmd->cmd_tables->msg_in[1] > 6)
934 printf("%s: extended message too big (%d)\n",
935 sc->sc_c.sc_dev.dv_xname,
936 esiop_cmd->cmd_tables->msg_in[1]);
937 esiop_cmd->cmd_tables->t_extmsgdata.count =
938 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
939 esiop_table_sync(esiop_cmd,
940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
941 CALL_SCRIPT(Ent_get_extmsgdata);
942 return 1;
943 case A_int_extmsgdata:
944 #ifdef SIOP_DEBUG_INTR
945 {
946 int i;
947 printf("extended message: 0x%x, data:",
948 esiop_cmd->cmd_tables->msg_in[2]);
949 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
950 i++)
951 printf(" 0x%x",
952 esiop_cmd->cmd_tables->msg_in[i]);
953 printf("\n");
954 }
955 #endif
956 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
957 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
958 case SIOP_NEG_MSGOUT:
959 esiop_update_scntl3(sc,
960 esiop_cmd->cmd_c.siop_target);
961 esiop_table_sync(esiop_cmd,
962 BUS_DMASYNC_PREREAD |
963 BUS_DMASYNC_PREWRITE);
964 CALL_SCRIPT(Ent_send_msgout);
965 return(1);
966 case SIOP_NEG_ACK:
967 esiop_update_scntl3(sc,
968 esiop_cmd->cmd_c.siop_target);
969 CALL_SCRIPT(Ent_msgin_ack);
970 return(1);
971 default:
972 panic("invalid retval from "
973 "siop_wdtr_neg()");
974 }
975 return(1);
976 }
977 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
978 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
979 case SIOP_NEG_MSGOUT:
980 esiop_update_scntl3(sc,
981 esiop_cmd->cmd_c.siop_target);
982 esiop_table_sync(esiop_cmd,
983 BUS_DMASYNC_PREREAD |
984 BUS_DMASYNC_PREWRITE);
985 CALL_SCRIPT(Ent_send_msgout);
986 return(1);
987 case SIOP_NEG_ACK:
988 esiop_update_scntl3(sc,
989 esiop_cmd->cmd_c.siop_target);
990 CALL_SCRIPT(Ent_msgin_ack);
991 return(1);
992 default:
993 panic("invalid retval from "
994 "siop_wdtr_neg()");
995 }
996 return(1);
997 }
998 /* send a message reject */
999 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1000 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1001 esiop_table_sync(esiop_cmd,
1002 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1003 CALL_SCRIPT(Ent_send_msgout);
1004 return 1;
1005 case A_int_disc:
1006 INCSTAT(esiop_stat_intr_sdp);
1007 offset = bus_space_read_1(sc->sc_c.sc_rt,
1008 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1009 #ifdef SIOP_DEBUG_DR
1010 printf("disconnect offset %d\n", offset);
1011 #endif
1012 if (offset > SIOP_NSG) {
1013 printf("%s: bad offset for disconnect (%d)\n",
1014 sc->sc_c.sc_dev.dv_xname, offset);
1015 goto reset;
1016 }
1017 /*
1018 * offset == SIOP_NSG may be a valid condition if
1019 * we get a sdp when the xfer is done.
1020 * Don't call memmove in this case.
1021 */
1022 if (offset < SIOP_NSG) {
1023 memmove(&esiop_cmd->cmd_tables->data[0],
1024 &esiop_cmd->cmd_tables->data[offset],
1025 (SIOP_NSG - offset) * sizeof(scr_table_t));
1026 esiop_table_sync(esiop_cmd,
1027 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1028 }
1029 CALL_SCRIPT(Ent_script_sched);
1030 return 1;
1031 case A_int_resfail:
1032 printf("reselect failed\n");
1033 CALL_SCRIPT(Ent_script_sched);
1034 return 1;
1035 case A_int_done:
1036 if (xs == NULL) {
1037 printf("%s: done without command\n",
1038 sc->sc_c.sc_dev.dv_xname);
1039 CALL_SCRIPT(Ent_script_sched);
1040 return 1;
1041 }
1042 #ifdef SIOP_DEBUG_INTR
1043 printf("done, DSA=0x%lx target id 0x%x last msg "
1044 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1045 le32toh(esiop_cmd->cmd_tables->id),
1046 esiop_cmd->cmd_tables->msg_in[0],
1047 le32toh(esiop_cmd->cmd_tables->status));
1048 #endif
1049 INCSTAT(esiop_stat_intr_done);
1050 esiop_cmd->cmd_c.status = CMDST_DONE;
1051 goto end;
1052 default:
1053 printf("unknown irqcode %x\n", irqcode);
1054 if (xs) {
1055 xs->error = XS_SELTIMEOUT;
1056 goto end;
1057 }
1058 goto reset;
1059 }
1060 return 1;
1061 }
1062 /* We just should't get there */
1063 panic("siop_intr: I shouldn't be there !");
1064
1065 end:
1066 /*
1067 * restart the script now if command completed properly
1068 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1069 * queue
1070 */
1071 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1072 #ifdef SIOP_DEBUG_INTR
1073 printf("esiop_intr end: status %d\n", xs->status);
1074 #endif
1075 if (xs->status == SCSI_OK)
1076 CALL_SCRIPT(Ent_script_sched);
1077 else
1078 restart = 1;
1079 if (tag >= 0)
1080 esiop_lun->tactive[tag] = NULL;
1081 else
1082 esiop_lun->active = NULL;
1083 esiop_scsicmd_end(esiop_cmd);
1084 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1085 esiop_del_dev(sc, target, lun);
1086 if (restart)
1087 CALL_SCRIPT(Ent_script_sched);
1088 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1089 /* a command terminated, so we have free slots now */
1090 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1091 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1092 }
1093
1094 return retval;
1095 }
1096
1097 void
1098 esiop_scsicmd_end(esiop_cmd)
1099 struct esiop_cmd *esiop_cmd;
1100 {
1101 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1102 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1103
1104 switch(xs->status) {
1105 case SCSI_OK:
1106 xs->error = XS_NOERROR;
1107 break;
1108 case SCSI_BUSY:
1109 xs->error = XS_BUSY;
1110 break;
1111 case SCSI_CHECK:
1112 xs->error = XS_BUSY;
1113 /* remove commands in the queue and scheduler */
1114 esiop_unqueue(sc, xs->xs_periph->periph_target,
1115 xs->xs_periph->periph_lun);
1116 break;
1117 case SCSI_QUEUE_FULL:
1118 INCSTAT(esiop_stat_intr_qfull);
1119 #ifdef SIOP_DEBUG
1120 printf("%s:%d:%d: queue full (tag %d)\n",
1121 sc->sc_c.sc_dev.dv_xname,
1122 xs->xs_periph->periph_target,
1123 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1124 #endif
1125 xs->error = XS_BUSY;
1126 break;
1127 case SCSI_SIOP_NOCHECK:
1128 /*
1129 * don't check status, xs->error is already valid
1130 */
1131 break;
1132 case SCSI_SIOP_NOSTATUS:
1133 /*
1134 * the status byte was not updated, cmd was
1135 * aborted
1136 */
1137 xs->error = XS_SELTIMEOUT;
1138 break;
1139 default:
1140 xs->error = XS_DRIVER_STUFFUP;
1141 }
1142 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1143 bus_dmamap_sync(sc->sc_c.sc_dmat,
1144 esiop_cmd->cmd_c.dmamap_data, 0,
1145 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1146 (xs->xs_control & XS_CTL_DATA_IN) ?
1147 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1148 bus_dmamap_unload(sc->sc_c.sc_dmat,
1149 esiop_cmd->cmd_c.dmamap_data);
1150 }
1151 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1152 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1153 esiop_cmd->cmd_c.status = CMDST_FREE;
1154 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1155 xs->resid = 0;
1156 scsipi_done (xs);
1157 }
1158
1159 void
1160 esiop_checkdone(sc)
1161 struct esiop_softc *sc;
1162 {
1163 int target, lun, tag;
1164 struct esiop_target *esiop_target;
1165 struct esiop_lun *esiop_lun;
1166 struct esiop_cmd *esiop_cmd;
1167 int status;
1168
1169 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1170 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1171 if (esiop_target == NULL)
1172 continue;
1173 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1174 esiop_lun = esiop_target->esiop_lun[lun];
1175 if (esiop_lun == NULL)
1176 continue;
1177 esiop_cmd = esiop_lun->active;
1178 if (esiop_cmd) {
1179 esiop_table_sync(esiop_cmd,
1180 BUS_DMASYNC_POSTREAD |
1181 BUS_DMASYNC_POSTWRITE);
1182 status = le32toh(esiop_cmd->cmd_tables->status);
1183 if (status == SCSI_OK) {
1184 /* Ok, this command has been handled */
1185 esiop_cmd->cmd_c.xs->status = status;
1186 esiop_lun->active = NULL;
1187 esiop_scsicmd_end(esiop_cmd);
1188 }
1189 }
1190 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1191 esiop_cmd = esiop_lun->tactive[tag];
1192 if (esiop_cmd == NULL)
1193 continue;
1194 esiop_table_sync(esiop_cmd,
1195 BUS_DMASYNC_POSTREAD |
1196 BUS_DMASYNC_POSTWRITE);
1197 status = le32toh(esiop_cmd->cmd_tables->status);
1198 if (status == SCSI_OK) {
1199 /* Ok, this command has been handled */
1200 esiop_cmd->cmd_c.xs->status = status;
1201 esiop_lun->tactive[tag] = NULL;
1202 esiop_scsicmd_end(esiop_cmd);
1203 }
1204 }
1205 }
1206 }
1207 }
1208
1209 void
1210 esiop_unqueue(sc, target, lun)
1211 struct esiop_softc *sc;
1212 int target;
1213 int lun;
1214 {
1215 int slot, tag;
1216 u_int32_t slotdsa;
1217 struct esiop_cmd *esiop_cmd;
1218 struct esiop_lun *esiop_lun =
1219 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1220
1221 /* first make sure to read valid data */
1222 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1223
1224 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1225 /* look for commands in the scheduler, not yet started */
1226 if (esiop_lun->tactive[tag] == NULL)
1227 continue;
1228 esiop_cmd = esiop_lun->tactive[tag];
1229 for (slot = 0; slot < A_ncmd_slots; slot++) {
1230 slotdsa = esiop_script_read(sc,
1231 sc->sc_shedoffset + slot * 2);
1232 if (slotdsa & A_f_cmd_free)
1233 continue;
1234 if ((slotdsa & ~A_f_cmd_free) == esiop_cmd->cmd_c.dsa)
1235 break;
1236 }
1237 if (slot > ESIOP_NTAG)
1238 continue; /* didn't find it */
1239 /* Mark this slot as ignore */
1240 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1241 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1242 /* ask to requeue */
1243 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1244 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1245 esiop_lun->tactive[tag] = NULL;
1246 esiop_scsicmd_end(esiop_cmd);
1247 }
1248 }
1249
1250 /*
1251 * handle a rejected queue tag message: the command will run untagged,
1252 * has to adjust the reselect script.
1253 */
1254
1255
1256 int
1257 esiop_handle_qtag_reject(esiop_cmd)
1258 struct esiop_cmd *esiop_cmd;
1259 {
1260 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1261 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1262 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1263 int tag = esiop_cmd->cmd_tables->msg_out[2];
1264 struct esiop_target *esiop_target =
1265 (struct esiop_target*)sc->sc_c.targets[target];
1266 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1267
1268 #ifdef SIOP_DEBUG
1269 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1270 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1271 esiop_cmd->cmd_c.status);
1272 #endif
1273
1274 if (esiop_lun->active != NULL) {
1275 printf("%s: untagged command already running for target %d "
1276 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1277 target, lun, esiop_lun->active->cmd_c.status);
1278 return -1;
1279 }
1280 /* clear tag slot */
1281 esiop_lun->tactive[tag] = NULL;
1282 /* add command to non-tagged slot */
1283 esiop_lun->active = esiop_cmd;
1284 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1285 esiop_cmd->cmd_c.tag = -1;
1286 /* update DSA table */
1287 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1288 esiop_cmd->cmd_c.dsa);
1289 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1290 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1291 return 0;
1292 }
1293
1294 /*
1295 * handle a bus reset: reset chip, unqueue all active commands, free all
1296 * target struct and report loosage to upper layer.
1297 * As the upper layer may requeue immediatly we have to first store
1298 * all active commands in a temporary queue.
1299 */
1300 void
1301 esiop_handle_reset(sc)
1302 struct esiop_softc *sc;
1303 {
1304 struct esiop_cmd *esiop_cmd;
1305 struct esiop_lun *esiop_lun;
1306 int target, lun, tag;
1307 /*
1308 * scsi bus reset. reset the chip and restart
1309 * the queue. Need to clean up all active commands
1310 */
1311 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1312 /* stop, reset and restart the chip */
1313 esiop_reset(sc);
1314 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1315 /* chip has been reset, all slots are free now */
1316 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1317 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1318 }
1319 /*
1320 * Process all commands: first commmands completes, then commands
1321 * being executed
1322 */
1323 esiop_checkdone(sc);
1324 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1325 target++) {
1326 struct esiop_target *esiop_target =
1327 (struct esiop_target *)sc->sc_c.targets[target];
1328 if (esiop_target == NULL)
1329 continue;
1330 for (lun = 0; lun < 8; lun++) {
1331 esiop_lun = esiop_target->esiop_lun[lun];
1332 if (esiop_lun == NULL)
1333 continue;
1334 for (tag = -1; tag <
1335 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1336 ESIOP_NTAG : 0);
1337 tag++) {
1338 if (tag >= 0)
1339 esiop_cmd = esiop_lun->tactive[tag];
1340 else
1341 esiop_cmd = esiop_lun->active;
1342 if (esiop_cmd == NULL)
1343 continue;
1344 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1345 printf("command with tag id %d reset\n", tag);
1346 esiop_cmd->cmd_c.xs->error =
1347 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1348 XS_TIMEOUT : XS_RESET;
1349 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1350 if (tag >= 0)
1351 esiop_lun->tactive[tag] = NULL;
1352 else
1353 esiop_lun->active = NULL;
1354 esiop_cmd->cmd_c.status = CMDST_DONE;
1355 esiop_scsicmd_end(esiop_cmd);
1356 }
1357 }
1358 sc->sc_c.targets[target]->status = TARST_ASYNC;
1359 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1360 sc->sc_c.targets[target]->period =
1361 sc->sc_c.targets[target]->offset = 0;
1362 siop_update_xfer_mode(&sc->sc_c, target);
1363 }
1364
1365 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1366 }
1367
1368 void
1369 esiop_scsipi_request(chan, req, arg)
1370 struct scsipi_channel *chan;
1371 scsipi_adapter_req_t req;
1372 void *arg;
1373 {
1374 struct scsipi_xfer *xs;
1375 struct scsipi_periph *periph;
1376 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1377 struct esiop_cmd *esiop_cmd;
1378 struct esiop_target *esiop_target;
1379 int s, error, i;
1380 int target;
1381 int lun;
1382
1383 switch (req) {
1384 case ADAPTER_REQ_RUN_XFER:
1385 xs = arg;
1386 periph = xs->xs_periph;
1387 target = periph->periph_target;
1388 lun = periph->periph_lun;
1389
1390 s = splbio();
1391 #ifdef SIOP_DEBUG_SCHED
1392 printf("starting cmd for %d:%d\n", target, lun);
1393 #endif
1394 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1395 if (esiop_cmd == NULL) {
1396 xs->error = XS_RESOURCE_SHORTAGE;
1397 scsipi_done(xs);
1398 splx(s);
1399 return;
1400 }
1401 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1402 #ifdef DIAGNOSTIC
1403 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1404 panic("siop_scsicmd: new cmd not free");
1405 #endif
1406 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1407 if (esiop_target == NULL) {
1408 #ifdef SIOP_DEBUG
1409 printf("%s: alloc siop_target for target %d\n",
1410 sc->sc_c.sc_dev.dv_xname, target);
1411 #endif
1412 sc->sc_c.targets[target] =
1413 malloc(sizeof(struct esiop_target),
1414 M_DEVBUF, M_NOWAIT | M_ZERO);
1415 if (sc->sc_c.targets[target] == NULL) {
1416 printf("%s: can't malloc memory for "
1417 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1418 target);
1419 xs->error = XS_RESOURCE_SHORTAGE;
1420 scsipi_done(xs);
1421 splx(s);
1422 return;
1423 }
1424 esiop_target =
1425 (struct esiop_target*)sc->sc_c.targets[target];
1426 esiop_target->target_c.status = TARST_PROBING;
1427 esiop_target->target_c.flags = 0;
1428 esiop_target->target_c.id =
1429 sc->sc_c.clock_div << 24; /* scntl3 */
1430 esiop_target->target_c.id |= target << 16; /* id */
1431 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1432
1433 for (i=0; i < 8; i++)
1434 esiop_target->esiop_lun[i] = NULL;
1435 esiop_target_register(sc, target);
1436 }
1437 if (esiop_target->esiop_lun[lun] == NULL) {
1438 esiop_target->esiop_lun[lun] =
1439 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1440 M_NOWAIT|M_ZERO);
1441 if (esiop_target->esiop_lun[lun] == NULL) {
1442 printf("%s: can't alloc esiop_lun for "
1443 "target %d lun %d\n",
1444 sc->sc_c.sc_dev.dv_xname, target, lun);
1445 xs->error = XS_RESOURCE_SHORTAGE;
1446 scsipi_done(xs);
1447 splx(s);
1448 return;
1449 }
1450 }
1451 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1452 esiop_cmd->cmd_c.xs = xs;
1453 esiop_cmd->cmd_c.flags = 0;
1454 esiop_cmd->cmd_c.status = CMDST_READY;
1455
1456 /* load the DMA maps */
1457 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1458 esiop_cmd->cmd_c.dmamap_cmd,
1459 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1460 if (error) {
1461 printf("%s: unable to load cmd DMA map: %d\n",
1462 sc->sc_c.sc_dev.dv_xname, error);
1463 xs->error = XS_DRIVER_STUFFUP;
1464 scsipi_done(xs);
1465 splx(s);
1466 return;
1467 }
1468 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1469 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1470 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1471 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1472 ((xs->xs_control & XS_CTL_DATA_IN) ?
1473 BUS_DMA_READ : BUS_DMA_WRITE));
1474 if (error) {
1475 printf("%s: unable to load cmd DMA map: %d",
1476 sc->sc_c.sc_dev.dv_xname, error);
1477 xs->error = XS_DRIVER_STUFFUP;
1478 scsipi_done(xs);
1479 bus_dmamap_unload(sc->sc_c.sc_dmat,
1480 esiop_cmd->cmd_c.dmamap_cmd);
1481 splx(s);
1482 return;
1483 }
1484 bus_dmamap_sync(sc->sc_c.sc_dmat,
1485 esiop_cmd->cmd_c.dmamap_data, 0,
1486 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1487 (xs->xs_control & XS_CTL_DATA_IN) ?
1488 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1489 }
1490 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1491 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1492 BUS_DMASYNC_PREWRITE);
1493
1494 if (xs->xs_tag_type)
1495 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1496 else
1497 esiop_cmd->cmd_c.tag = -1;
1498 siop_setuptables(&esiop_cmd->cmd_c);
1499 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1500 htole32(A_f_c_target | A_f_c_lun);
1501 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1502 htole32((target << 8) | (lun << 16));
1503 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1504 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1505 htole32(A_f_c_tag);
1506 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1507 htole32(esiop_cmd->cmd_c.tag << 24);
1508 }
1509
1510 esiop_table_sync(esiop_cmd,
1511 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1512 esiop_start(sc, esiop_cmd);
1513 if (xs->xs_control & XS_CTL_POLL) {
1514 /* poll for command completion */
1515 while ((xs->xs_status & XS_STS_DONE) == 0) {
1516 delay(1000);
1517 esiop_intr(sc);
1518 }
1519 }
1520 splx(s);
1521 return;
1522
1523 case ADAPTER_REQ_GROW_RESOURCES:
1524 #ifdef SIOP_DEBUG
1525 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1526 sc->sc_c.sc_adapt.adapt_openings);
1527 #endif
1528 esiop_morecbd(sc);
1529 return;
1530
1531 case ADAPTER_REQ_SET_XFER_MODE:
1532 {
1533 struct scsipi_xfer_mode *xm = arg;
1534 if (sc->sc_c.targets[xm->xm_target] == NULL)
1535 return;
1536 s = splbio();
1537 if (xm->xm_mode & PERIPH_CAP_TQING)
1538 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1539 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1540 (sc->sc_c.features & SF_BUS_WIDE))
1541 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1542 if (xm->xm_mode & PERIPH_CAP_SYNC)
1543 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1544 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1545 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1546 sc->sc_c.targets[xm->xm_target]->status =
1547 TARST_ASYNC;
1548
1549 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1550 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1551 /* allocate a lun sw entry for this device */
1552 esiop_add_dev(sc, xm->xm_target, lun);
1553 }
1554
1555 splx(s);
1556 }
1557 }
1558 }
1559
1560 static void
1561 esiop_start(sc, esiop_cmd)
1562 struct esiop_softc *sc;
1563 struct esiop_cmd *esiop_cmd;
1564 {
1565 struct esiop_lun *esiop_lun;
1566 struct esiop_target *esiop_target;
1567 int timeout;
1568 int target, lun, slot;
1569
1570 nintr = 0;
1571
1572 /*
1573 * first make sure to read valid data
1574 */
1575 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1576
1577 /*
1578 * We use a circular queue here. sc->sc_currschedslot points to a
1579 * free slot, unless we have filled the queue. Check this.
1580 */
1581 slot = sc->sc_currschedslot;
1582 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * 2) &
1583 A_f_cmd_free) == 0) {
1584 /*
1585 * no more free slot, no need to continue. freeze the queue
1586 * and requeue this command.
1587 */
1588 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1589 sc->sc_flags |= SCF_CHAN_NOSLOT;
1590 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1591 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1592 esiop_scsicmd_end(esiop_cmd);
1593 return;
1594 }
1595 /* OK, we can use this slot */
1596
1597 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1598 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1599 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1600 esiop_lun = esiop_target->esiop_lun[lun];
1601 /* if non-tagged command active, panic: this shouldn't happen */
1602 if (esiop_lun->active != NULL) {
1603 panic("esiop_start: tagged cmd while untagged running");
1604 }
1605 #ifdef DIAGNOSTIC
1606 /* sanity check the tag if needed */
1607 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1608 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1609 panic("esiop_start: tag not free");
1610 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1611 esiop_cmd->cmd_c.tag < 0) {
1612 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1613 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1614 panic("esiop_start: invalid tag id");
1615 }
1616 }
1617 #endif
1618 #ifdef SIOP_DEBUG_SCHED
1619 printf("using slot %d for DSA 0x%lx\n", slot,
1620 (u_long)esiop_cmd->cmd_c.dsa);
1621 #endif
1622 /* mark command as active */
1623 if (esiop_cmd->cmd_c.status == CMDST_READY)
1624 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1625 else
1626 panic("esiop_start: bad status");
1627 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1628 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1629 /* DSA table for reselect */
1630 if ((esiop_lun->lun_flags & LUNF_TAGTABLE) == 0) {
1631 esiop_script_write(sc,
1632 esiop_target->lun_table_offset + lun + 2,
1633 esiop_lun->lun_tagtbl->tbl_dsa);
1634 esiop_lun->lun_flags |= LUNF_TAGTABLE;
1635 }
1636 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1637 htole32(esiop_cmd->cmd_c.dsa);
1638 bus_dmamap_sync(sc->sc_c.sc_dmat,
1639 esiop_lun->lun_tagtbl->tblblk->blkmap,
1640 esiop_lun->lun_tagtbl->tbl_offset,
1641 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1642 } else {
1643 esiop_lun->active = esiop_cmd;
1644 /* DSA table for reselect */
1645 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1646 esiop_cmd->cmd_c.dsa);
1647 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1648
1649 }
1650 /* scheduler slot: ID, then DSA */
1651 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1652 sc->sc_c.targets[target]->id);
1653 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1654 esiop_cmd->cmd_c.dsa);
1655 /* handle timeout */
1656 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1657 /* start exire timer */
1658 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1659 if (timeout == 0)
1660 timeout = 1;
1661 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1662 timeout, esiop_timeout, esiop_cmd);
1663 }
1664 /* make sure SCRIPT processor will read valid data */
1665 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1666 /* Signal script it has some work to do */
1667 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1668 SIOP_ISTAT, ISTAT_SIGP);
1669 /* update the current slot, and wait for IRQ */
1670 sc->sc_currschedslot++;
1671 if (sc->sc_currschedslot >= A_ncmd_slots)
1672 sc->sc_currschedslot = 0;
1673 return;
1674 }
1675
1676 void
1677 esiop_timeout(v)
1678 void *v;
1679 {
1680 struct esiop_cmd *esiop_cmd = v;
1681 struct esiop_softc *sc =
1682 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1683 int s;
1684
1685 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1686 printf("command timeout\n");
1687
1688 s = splbio();
1689 /* reset the scsi bus */
1690 siop_resetbus(&sc->sc_c);
1691
1692 /* deactivate callout */
1693 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1694 /*
1695 * mark command has being timed out and just return;
1696 * the bus reset will generate an interrupt,
1697 * it will be handled in siop_intr()
1698 */
1699 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1700 splx(s);
1701 return;
1702
1703 }
1704
1705 void
1706 esiop_dump_script(sc)
1707 struct esiop_softc *sc;
1708 {
1709 int i;
1710 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1711 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1712 le32toh(sc->sc_c.sc_script[i]),
1713 le32toh(sc->sc_c.sc_script[i+1]));
1714 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1715 0xc0000000) {
1716 i++;
1717 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1718 }
1719 printf("\n");
1720 }
1721 }
1722
1723 void
1724 esiop_morecbd(sc)
1725 struct esiop_softc *sc;
1726 {
1727 int error, i, s;
1728 bus_dma_segment_t seg;
1729 int rseg;
1730 struct esiop_cbd *newcbd;
1731 struct esiop_xfer *xfer;
1732 bus_addr_t dsa;
1733
1734 /* allocate a new list head */
1735 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1736 if (newcbd == NULL) {
1737 printf("%s: can't allocate memory for command descriptors "
1738 "head\n", sc->sc_c.sc_dev.dv_xname);
1739 return;
1740 }
1741
1742 /* allocate cmd list */
1743 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1744 M_DEVBUF, M_NOWAIT|M_ZERO);
1745 if (newcbd->cmds == NULL) {
1746 printf("%s: can't allocate memory for command descriptors\n",
1747 sc->sc_c.sc_dev.dv_xname);
1748 goto bad3;
1749 }
1750 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1751 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1752 if (error) {
1753 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1754 sc->sc_c.sc_dev.dv_xname, error);
1755 goto bad2;
1756 }
1757 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1758 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1759 if (error) {
1760 printf("%s: unable to map cbd DMA memory, error = %d\n",
1761 sc->sc_c.sc_dev.dv_xname, error);
1762 goto bad2;
1763 }
1764 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1765 BUS_DMA_NOWAIT, &newcbd->xferdma);
1766 if (error) {
1767 printf("%s: unable to create cbd DMA map, error = %d\n",
1768 sc->sc_c.sc_dev.dv_xname, error);
1769 goto bad1;
1770 }
1771 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1772 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1773 if (error) {
1774 printf("%s: unable to load cbd DMA map, error = %d\n",
1775 sc->sc_c.sc_dev.dv_xname, error);
1776 goto bad0;
1777 }
1778 #ifdef DEBUG
1779 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1780 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1781 #endif
1782 for (i = 0; i < SIOP_NCMDPB; i++) {
1783 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1784 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1785 &newcbd->cmds[i].cmd_c.dmamap_data);
1786 if (error) {
1787 printf("%s: unable to create data DMA map for cbd: "
1788 "error %d\n",
1789 sc->sc_c.sc_dev.dv_xname, error);
1790 goto bad0;
1791 }
1792 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1793 sizeof(struct scsipi_generic), 1,
1794 sizeof(struct scsipi_generic), 0,
1795 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1796 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1797 if (error) {
1798 printf("%s: unable to create cmd DMA map for cbd %d\n",
1799 sc->sc_c.sc_dev.dv_xname, error);
1800 goto bad0;
1801 }
1802 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1803 newcbd->cmds[i].esiop_cbdp = newcbd;
1804 xfer = &newcbd->xfers[i];
1805 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1806 memset(newcbd->cmds[i].cmd_tables, 0,
1807 sizeof(struct esiop_xfer));
1808 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1809 i * sizeof(struct esiop_xfer);
1810 newcbd->cmds[i].cmd_c.dsa = dsa;
1811 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1812 xfer->siop_tables.t_msgout.count= htole32(1);
1813 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1814 xfer->siop_tables.t_msgin.count= htole32(1);
1815 xfer->siop_tables.t_msgin.addr = htole32(dsa + 8);
1816 xfer->siop_tables.t_extmsgin.count= htole32(2);
1817 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1818 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 11);
1819 xfer->siop_tables.t_status.count= htole32(1);
1820 xfer->siop_tables.t_status.addr = htole32(dsa + 16);
1821
1822 s = splbio();
1823 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1824 splx(s);
1825 #ifdef SIOP_DEBUG
1826 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1827 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1828 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1829 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1830 #endif
1831 }
1832 s = splbio();
1833 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1834 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1835 splx(s);
1836 return;
1837 bad0:
1838 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1839 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1840 bad1:
1841 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1842 bad2:
1843 free(newcbd->cmds, M_DEVBUF);
1844 bad3:
1845 free(newcbd, M_DEVBUF);
1846 return;
1847 }
1848
1849 void
1850 esiop_moretagtbl(sc)
1851 struct esiop_softc *sc;
1852 {
1853 int error, i, j, s;
1854 bus_dma_segment_t seg;
1855 int rseg;
1856 struct esiop_dsatblblk *newtblblk;
1857 struct esiop_dsatbl *newtbls;
1858 u_int32_t *tbls;
1859
1860 /* allocate a new list head */
1861 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1862 M_DEVBUF, M_NOWAIT|M_ZERO);
1863 if (newtblblk == NULL) {
1864 printf("%s: can't allocate memory for tag DSA table block\n",
1865 sc->sc_c.sc_dev.dv_xname);
1866 return;
1867 }
1868
1869 /* allocate tbl list */
1870 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1871 M_DEVBUF, M_NOWAIT|M_ZERO);
1872 if (newtbls == NULL) {
1873 printf("%s: can't allocate memory for command descriptors\n",
1874 sc->sc_c.sc_dev.dv_xname);
1875 goto bad3;
1876 }
1877 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1878 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1879 if (error) {
1880 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1881 sc->sc_c.sc_dev.dv_xname, error);
1882 goto bad2;
1883 }
1884 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1885 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1886 if (error) {
1887 printf("%s: unable to map tbls DMA memory, error = %d\n",
1888 sc->sc_c.sc_dev.dv_xname, error);
1889 goto bad2;
1890 }
1891 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1892 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1893 if (error) {
1894 printf("%s: unable to create tbl DMA map, error = %d\n",
1895 sc->sc_c.sc_dev.dv_xname, error);
1896 goto bad1;
1897 }
1898 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1899 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1900 if (error) {
1901 printf("%s: unable to load tbl DMA map, error = %d\n",
1902 sc->sc_c.sc_dev.dv_xname, error);
1903 goto bad0;
1904 }
1905 #ifdef DEBUG
1906 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1907 sc->sc_c.sc_dev.dv_xname,
1908 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1909 #endif
1910 for (i = 0; i < ESIOP_NTPB; i++) {
1911 newtbls[i].tblblk = newtblblk;
1912 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1913 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1914 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1915 newtbls[i].tbl_offset;
1916 for (j = 0; j < ESIOP_NTAG; j++)
1917 newtbls[i].tbl[j] = j;
1918 s = splbio();
1919 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1920 splx(s);
1921 }
1922 s = splbio();
1923 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1924 splx(s);
1925 return;
1926 bad0:
1927 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1928 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1929 bad1:
1930 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1931 bad2:
1932 free(newtbls, M_DEVBUF);
1933 bad3:
1934 free(newtblblk, M_DEVBUF);
1935 return;
1936 }
1937
1938 void
1939 esiop_update_scntl3(sc, _siop_target)
1940 struct esiop_softc *sc;
1941 struct siop_common_target *_siop_target;
1942 {
1943 int slot;
1944 u_int32_t slotid, id;
1945
1946 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1947 esiop_script_write(sc, esiop_target->lun_table_offset,
1948 esiop_target->target_c.id);
1949 id = esiop_target->target_c.id & 0x00ff0000;
1950 /* There may be other commands waiting in the scheduler. handle them */
1951 for (slot = 0; slot < A_ncmd_slots; slot++) {
1952 slotid =
1953 esiop_script_read(sc, sc->sc_shedoffset + slot * 2 + 1);
1954 if ((slotid & 0x00ff0000) == id)
1955 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1956 esiop_target->target_c.id);
1957 }
1958 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1959 }
1960
1961 void
1962 esiop_add_dev(sc, target, lun)
1963 struct esiop_softc *sc;
1964 int target;
1965 int lun;
1966 {
1967 struct esiop_target *esiop_target =
1968 (struct esiop_target *)sc->sc_c.targets[target];
1969 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1970
1971 if (esiop_target->target_c.flags & TARF_TAG) {
1972 /* we need a tag DSA table */
1973 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1974 if (esiop_lun->lun_tagtbl == NULL) {
1975 esiop_moretagtbl(sc);
1976 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1977 if (esiop_lun->lun_tagtbl == NULL) {
1978 /* no resources, run untagged */
1979 esiop_target->target_c.flags &= ~TARF_TAG;
1980 return;
1981 }
1982 }
1983 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1984
1985 }
1986 }
1987
1988 void
1989 esiop_del_dev(sc, target, lun)
1990 struct esiop_softc *sc;
1991 int target;
1992 int lun;
1993 {
1994 struct esiop_target *esiop_target;
1995 #ifdef SIOP_DEBUG
1996 printf("%s:%d:%d: free lun sw entry\n",
1997 sc->sc_c.sc_dev.dv_xname, target, lun);
1998 #endif
1999 if (sc->sc_c.targets[target] == NULL)
2000 return;
2001 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2002 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2003 esiop_target->esiop_lun[lun] = NULL;
2004 }
2005
2006 struct esiop_cmd *
2007 esiop_cmd_find(sc, target, dsa)
2008 struct esiop_softc *sc;
2009 int target;
2010 u_int32_t dsa;
2011 {
2012 int lun, tag;
2013 struct esiop_cmd *cmd;
2014 struct esiop_lun *esiop_lun;
2015 struct esiop_target *esiop_target =
2016 (struct esiop_target *)sc->sc_c.targets[target];
2017
2018 if (esiop_target == NULL)
2019 return NULL;
2020
2021 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2022 esiop_lun = esiop_target->esiop_lun[lun];
2023 if (esiop_lun == NULL)
2024 continue;
2025 cmd = esiop_lun->active;
2026 if (cmd && cmd->cmd_c.dsa == dsa)
2027 return cmd;
2028 if (esiop_target->target_c.flags & TARF_TAG) {
2029 for (tag = 0; tag < ESIOP_NTAG; tag++) {
2030 cmd = esiop_lun->tactive[tag];
2031 if (cmd && cmd->cmd_c.dsa == dsa)
2032 return cmd;
2033 }
2034 }
2035 }
2036 return NULL;
2037 }
2038
2039 void
2040 esiop_target_register(sc, target)
2041 struct esiop_softc *sc;
2042 u_int32_t target;
2043 {
2044 struct esiop_target *esiop_target =
2045 (struct esiop_target *)sc->sc_c.targets[target];
2046
2047 /* get a DSA table for this target */
2048 esiop_target->lun_table_offset = sc->sc_free_offset;
2049 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
2050 #ifdef SIOP_DEBUG
2051 printf("%s: lun table for target %d offset %d free offset %d\n",
2052 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2053 sc->sc_free_offset);
2054 #endif
2055 /* first 32 bytes are ID (for select) */
2056 esiop_script_write(sc, esiop_target->lun_table_offset,
2057 esiop_target->target_c.id);
2058 /* Record this table in the target DSA table */
2059 esiop_script_write(sc,
2060 sc->sc_target_table_offset + target,
2061 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2062 sc->sc_c.sc_scriptaddr);
2063 esiop_script_sync(sc,
2064 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2065 }
2066
2067 #ifdef SIOP_STATS
2068 void
2069 esiop_printstats()
2070 {
2071 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2072 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2073 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2074 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2075 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2076 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2077 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2078 }
2079 #endif
2080