esiop.c revision 1.5 1 /* $NetBSD: esiop.c,v 1.5 2002/04/23 12:55:27 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.5 2002/04/23 12:55:27 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #define DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_update_scntl3 __P((struct esiop_softc *,
98 struct siop_common_target *));
99 struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
100 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
101
102 static int nintr = 0;
103
104 #ifdef SIOP_STATS
105 static int esiop_stat_intr = 0;
106 static int esiop_stat_intr_shortxfer = 0;
107 static int esiop_stat_intr_sdp = 0;
108 static int esiop_stat_intr_done = 0;
109 static int esiop_stat_intr_xferdisc = 0;
110 static int esiop_stat_intr_lunresel = 0;
111 static int esiop_stat_intr_qfull = 0;
112 void esiop_printstats __P((void));
113 #define INCSTAT(x) x++
114 #else
115 #define INCSTAT(x)
116 #endif
117
118 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
119 static __inline__ void
120 esiop_script_sync(sc, ops)
121 struct esiop_softc *sc;
122 int ops;
123 {
124 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
125 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
126 PAGE_SIZE, ops);
127 }
128
129 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
130 static __inline__ u_int32_t
131 esiop_script_read(sc, offset)
132 struct esiop_softc *sc;
133 u_int offset;
134 {
135 if (sc->sc_c.features & SF_CHIP_RAM) {
136 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
137 offset * 4);
138 } else {
139 return le32toh(sc->sc_c.sc_script[offset]);
140 }
141 }
142
143 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
144 u_int32_t));
145 static __inline__ void
146 esiop_script_write(sc, offset, val)
147 struct esiop_softc *sc;
148 u_int offset;
149 u_int32_t val;
150 {
151 if (sc->sc_c.features & SF_CHIP_RAM) {
152 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
153 offset * 4, val);
154 } else {
155 sc->sc_c.sc_script[offset] = htole32(val);
156 }
157 }
158
159 void
160 esiop_attach(sc)
161 struct esiop_softc *sc;
162 {
163 int error, i;
164 bus_dma_segment_t seg;
165 int rseg;
166
167 /*
168 * Allocate DMA-safe memory for the script and map it.
169 */
170 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
171 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
172 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
173 if (error) {
174 printf("%s: unable to allocate script DMA memory, "
175 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
176 return;
177 }
178 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
179 (caddr_t *)&sc->sc_c.sc_script,
180 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
181 if (error) {
182 printf("%s: unable to map script DMA memory, "
183 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
184 return;
185 }
186 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
187 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
188 if (error) {
189 printf("%s: unable to create script DMA map, "
190 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
191 return;
192 }
193 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
194 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
195 if (error) {
196 printf("%s: unable to load script DMA map, "
197 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
198 return;
199 }
200 sc->sc_c.sc_scriptaddr =
201 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
202 sc->sc_c.ram_size = PAGE_SIZE;
203 }
204 TAILQ_INIT(&sc->free_list);
205 TAILQ_INIT(&sc->cmds);
206 TAILQ_INIT(&sc->free_tagtbl);
207 TAILQ_INIT(&sc->tag_tblblk);
208 sc->sc_currschedslot = 0;
209 #ifdef SIOP_DEBUG
210 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
211 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
212 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
213 #endif
214
215 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
216 sc->sc_c.sc_adapt.adapt_nchannels = 1;
217 sc->sc_c.sc_adapt.adapt_openings = 0;
218 sc->sc_c.sc_adapt.adapt_max_periph = 1 /* XXX ESIOP_NTAG - 1 */ ;
219 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
220 sc->sc_c.sc_adapt.adapt_minphys = minphys;
221 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
222
223 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
224 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
225 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
226 sc->sc_c.sc_chan.chan_channel = 0;
227 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
228 sc->sc_c.sc_chan.chan_ntargets =
229 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
230 sc->sc_c.sc_chan.chan_nluns = 8;
231 sc->sc_c.sc_chan.chan_id =
232 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
233 if (sc->sc_c.sc_chan.chan_id == 0 ||
234 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
235 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
236
237 for (i = 0; i < 16; i++)
238 sc->sc_c.targets[i] = NULL;
239
240 /* find min/max sync period for this chip */
241 sc->sc_c.maxsync = 0;
242 sc->sc_c.minsync = 255;
243 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
244 if (sc->sc_c.clock_period != scf_period[i].clock)
245 continue;
246 if (sc->sc_c.maxsync < scf_period[i].period)
247 sc->sc_c.maxsync = scf_period[i].period;
248 if (sc->sc_c.minsync > scf_period[i].period)
249 sc->sc_c.minsync = scf_period[i].period;
250 }
251 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
252 panic("siop: can't find my sync parameters\n");
253 /* Do a bus reset, so that devices fall back to narrow/async */
254 siop_resetbus(&sc->sc_c);
255 /*
256 * siop_reset() will reset the chip, thus clearing pending interrupts
257 */
258 esiop_reset(sc);
259 #ifdef DUMP_SCRIPT
260 esiop_dump_script(sc);
261 #endif
262
263 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
264 }
265
266 void
267 esiop_reset(sc)
268 struct esiop_softc *sc;
269 {
270 int i, j;
271 u_int32_t addr;
272 u_int32_t msgin_addr;
273
274 siop_common_reset(&sc->sc_c);
275
276 /*
277 * we copy the script at the beggining of RAM. Then there is 8 bytes
278 * for messages in.
279 */
280 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
281 msgin_addr =
282 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
283 sc->sc_free_offset += 2;
284 /* then we have the scheduler ring */
285 sc->sc_shedoffset = sc->sc_free_offset;
286 sc->sc_free_offset += A_ncmd_slots * 2;
287 /* then the targets DSA table */
288 sc->sc_target_table_offset = sc->sc_free_offset;
289 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
290 /* copy and patch the script */
291 if (sc->sc_c.features & SF_CHIP_RAM) {
292 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
293 esiop_script,
294 sizeof(esiop_script) / sizeof(esiop_script[0]));
295 for (j = 0; j <
296 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
297 j++) {
298 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
299 E_tlq_offset_Used[j] * 4,
300 sizeof(struct siop_common_xfer));
301 }
302 for (j = 0; j <
303 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
304 j++) {
305 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
306 E_abs_msgin2_Used[j] * 4, msgin_addr);
307 }
308
309 if (sc->sc_c.features & SF_CHIP_LED0) {
310 bus_space_write_region_4(sc->sc_c.sc_ramt,
311 sc->sc_c.sc_ramh,
312 Ent_led_on1, esiop_led_on,
313 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
314 bus_space_write_region_4(sc->sc_c.sc_ramt,
315 sc->sc_c.sc_ramh,
316 Ent_led_on2, esiop_led_on,
317 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
318 bus_space_write_region_4(sc->sc_c.sc_ramt,
319 sc->sc_c.sc_ramh,
320 Ent_led_off, esiop_led_off,
321 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
322 }
323 } else {
324 for (j = 0;
325 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
326 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
327 }
328 for (j = 0; j <
329 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
330 j++) {
331 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
332 htole32(sizeof(struct siop_common_xfer));
333 }
334 for (j = 0; j <
335 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
336 j++) {
337 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
338 htole32(msgin_addr);
339 }
340
341 if (sc->sc_c.features & SF_CHIP_LED0) {
342 for (j = 0; j < (sizeof(esiop_led_on) /
343 sizeof(esiop_led_on[0])); j++)
344 sc->sc_c.sc_script[
345 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
346 ] = htole32(esiop_led_on[j]);
347 for (j = 0; j < (sizeof(esiop_led_on) /
348 sizeof(esiop_led_on[0])); j++)
349 sc->sc_c.sc_script[
350 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
351 ] = htole32(esiop_led_on[j]);
352 for (j = 0; j < (sizeof(esiop_led_off) /
353 sizeof(esiop_led_off[0])); j++)
354 sc->sc_c.sc_script[
355 Ent_led_off / sizeof(esiop_led_off[0]) + j
356 ] = htole32(esiop_led_off[j]);
357 }
358 }
359 /* get base of scheduler ring */
360 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
361 /* init scheduler */
362 for (i = 0; i < A_ncmd_slots; i++) {
363 esiop_script_write(sc, sc->sc_shedoffset + i * 2, A_f_cmd_free);
364 esiop_script_write(sc, sc->sc_shedoffset + i * 2 + 1, 0);
365 }
366 sc->sc_currschedslot = 0;
367 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
368 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
369 /*
370 * 0x78000000 is a 'move data8 to reg'. data8 is the second
371 * octet, reg offset is the third.
372 */
373 esiop_script_write(sc, Ent_cmdr0 / 4,
374 0x78640000 | ((addr & 0x000000ff) << 8));
375 esiop_script_write(sc, Ent_cmdr1 / 4,
376 0x78650000 | ((addr & 0x0000ff00) ));
377 esiop_script_write(sc, Ent_cmdr2 / 4,
378 0x78660000 | ((addr & 0x00ff0000) >> 8));
379 esiop_script_write(sc, Ent_cmdr3 / 4,
380 0x78670000 | ((addr & 0xff000000) >> 16));
381 /* set flags */
382 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
383 /* write pointer of base of target DSA table */
384 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
385 sc->sc_c.sc_scriptaddr;
386 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
387 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
388 ((addr & 0x000000ff) << 8));
389 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
390 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
391 ((addr & 0x0000ff00) ));
392 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
393 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
394 ((addr & 0x00ff0000) >> 8));
395 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
396 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
397 ((addr & 0xff000000) >> 16));
398 #ifdef SIOP_DEBUG
399 printf("%s: target table offset %d free offset %d\n",
400 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
401 sc->sc_free_offset);
402 #endif
403
404 /* register existing targets */
405 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
406 if (sc->sc_c.targets[i])
407 esiop_target_register(sc, i);
408 }
409 /* start script */
410 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
411 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
412 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
413 }
414 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
415 sc->sc_c.sc_scriptaddr + Ent_reselect);
416 }
417
418 #if 0
419 #define CALL_SCRIPT(ent) do {\
420 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
421 esiop_cmd->cmd_c.dsa, \
422 sc->sc_c.sc_scriptaddr + ent); \
423 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
424 } while (0)
425 #else
426 #define CALL_SCRIPT(ent) do {\
427 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
428 } while (0)
429 #endif
430
431 int
432 esiop_intr(v)
433 void *v;
434 {
435 struct esiop_softc *sc = v;
436 struct esiop_target *esiop_target;
437 struct esiop_cmd *esiop_cmd;
438 struct esiop_lun *esiop_lun;
439 struct scsipi_xfer *xs;
440 int istat, sist, sstat1, dstat;
441 u_int32_t irqcode;
442 int need_reset = 0;
443 int offset, target, lun, tag;
444 u_int32_t tflags;
445 u_int32_t addr;
446 int freetarget = 0;
447 int restart = 0;
448 int slot;
449 int retval = 0;
450
451 again:
452 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
453 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
454 if (istat & ISTAT_SEM) {
455 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
456 SIOP_ISTAT, (istat & ~ISTAT_SEM));
457 esiop_checkdone(sc);
458 }
459 return retval;
460 }
461 retval = 1;
462 nintr++;
463 if (nintr > 100) {
464 panic("esiop: intr loop");
465 }
466 INCSTAT(esiop_stat_intr);
467 if (istat & ISTAT_INTF) {
468 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
469 SIOP_ISTAT, ISTAT_INTF);
470 esiop_checkdone(sc);
471 goto again;
472 }
473 /* get CMD from T/L/Q */
474 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
475 SIOP_SCRATCHC);
476 #ifdef SIOP_DEBUG_INTR
477 printf("interrupt, istat=0x%x tflags=0x%x "
478 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
479 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
480 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
481 SIOP_DSP) -
482 sc->sc_c.sc_scriptaddr));
483 #endif
484 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
485 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
486 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
487 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
488 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
489
490 if (target >= 0 && lun >= 0) {
491 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
492 if (esiop_target == NULL) {
493 printf("esiop_target (target %d) not valid\n", target);
494 goto none;
495 }
496 esiop_lun = esiop_target->esiop_lun[lun];
497 if (esiop_lun == NULL) {
498 printf("esiop_lun (target %d lun %d) not valid\n",
499 target, lun);
500 goto none;
501 }
502 esiop_cmd =
503 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
504 if (esiop_cmd == NULL) {
505 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
506 target, lun, tag);
507 goto none;
508 }
509 xs = esiop_cmd->cmd_c.xs;
510 #ifdef DIAGNOSTIC
511 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
512 printf("esiop_cmd (target %d lun %d) "
513 "not active (%d)\n", target, lun,
514 esiop_cmd->cmd_c.status);
515 goto none;
516 }
517 #endif
518 esiop_table_sync(esiop_cmd,
519 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
520 } else {
521 none:
522 xs = NULL;
523 esiop_target = NULL;
524 esiop_lun = NULL;
525 esiop_cmd = NULL;
526 }
527 if (istat & ISTAT_DIP) {
528 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
529 SIOP_DSTAT);
530 if (dstat & DSTAT_SSI) {
531 printf("single step dsp 0x%08x dsa 0x08%x\n",
532 (int)(bus_space_read_4(sc->sc_c.sc_rt,
533 sc->sc_c.sc_rh, SIOP_DSP) -
534 sc->sc_c.sc_scriptaddr),
535 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
536 SIOP_DSA));
537 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
538 (istat & ISTAT_SIP) == 0) {
539 bus_space_write_1(sc->sc_c.sc_rt,
540 sc->sc_c.sc_rh, SIOP_DCNTL,
541 bus_space_read_1(sc->sc_c.sc_rt,
542 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
543 }
544 return 1;
545 }
546 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
547 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
548 if (dstat & DSTAT_IID)
549 printf(" Illegal instruction");
550 if (dstat & DSTAT_ABRT)
551 printf(" abort");
552 if (dstat & DSTAT_BF)
553 printf(" bus fault");
554 if (dstat & DSTAT_MDPE)
555 printf(" parity");
556 if (dstat & DSTAT_DFE)
557 printf(" dma fifo empty");
558 printf(", DSP=0x%x DSA=0x%x: ",
559 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
560 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
561 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
562 if (esiop_cmd)
563 printf("last msg_in=0x%x status=0x%x\n",
564 esiop_cmd->cmd_tables->msg_in[0],
565 le32toh(esiop_cmd->cmd_tables->status));
566 else
567 printf(" current T/L/Q invalid\n");
568 need_reset = 1;
569 }
570 }
571 if (istat & ISTAT_SIP) {
572 if (istat & ISTAT_DIP)
573 delay(10);
574 /*
575 * Can't read sist0 & sist1 independantly, or we have to
576 * insert delay
577 */
578 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
579 SIOP_SIST0);
580 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
581 SIOP_SSTAT1);
582 #ifdef SIOP_DEBUG_INTR
583 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
584 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
585 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
586 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
587 SIOP_DSP) -
588 sc->sc_c.sc_scriptaddr));
589 #endif
590 if (sist & SIST0_RST) {
591 esiop_handle_reset(sc);
592 /* no table to flush here */
593 return 1;
594 }
595 if (sist & SIST0_SGE) {
596 if (esiop_cmd)
597 scsipi_printaddr(xs->xs_periph);
598 else
599 printf("%s:", sc->sc_c.sc_dev.dv_xname);
600 printf("scsi gross error\n");
601 goto reset;
602 }
603 if ((sist & SIST0_MA) && need_reset == 0) {
604 if (esiop_cmd) {
605 int scratchc0;
606 dstat = bus_space_read_1(sc->sc_c.sc_rt,
607 sc->sc_c.sc_rh, SIOP_DSTAT);
608 /*
609 * first restore DSA, in case we were in a S/G
610 * operation.
611 */
612 bus_space_write_4(sc->sc_c.sc_rt,
613 sc->sc_c.sc_rh,
614 SIOP_DSA, esiop_cmd->cmd_c.dsa);
615 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
616 sc->sc_c.sc_rh, SIOP_SCRATCHC);
617 switch (sstat1 & SSTAT1_PHASE_MASK) {
618 case SSTAT1_PHASE_STATUS:
619 /*
620 * previous phase may be aborted for any reason
621 * ( for example, the target has less data to
622 * transfer than requested). Just go to status
623 * and the command should terminate.
624 */
625 INCSTAT(esiop_stat_intr_shortxfer);
626 if ((dstat & DSTAT_DFE) == 0)
627 siop_clearfifo(&sc->sc_c);
628 /* no table to flush here */
629 CALL_SCRIPT(Ent_status);
630 return 1;
631 case SSTAT1_PHASE_MSGIN:
632 /*
633 * target may be ready to disconnect
634 * Save data pointers just in case.
635 */
636 INCSTAT(esiop_stat_intr_xferdisc);
637 if (scratchc0 & A_f_c_data)
638 siop_sdp(&esiop_cmd->cmd_c);
639 else if ((dstat & DSTAT_DFE) == 0)
640 siop_clearfifo(&sc->sc_c);
641 bus_space_write_1(sc->sc_c.sc_rt,
642 sc->sc_c.sc_rh, SIOP_SCRATCHC,
643 scratchc0 & ~A_f_c_data);
644 esiop_table_sync(esiop_cmd,
645 BUS_DMASYNC_PREREAD |
646 BUS_DMASYNC_PREWRITE);
647 CALL_SCRIPT(Ent_msgin);
648 return 1;
649 }
650 printf("%s: unexpected phase mismatch %d\n",
651 sc->sc_c.sc_dev.dv_xname,
652 sstat1 & SSTAT1_PHASE_MASK);
653 } else {
654 printf("%s: phase mismatch without command\n",
655 sc->sc_c.sc_dev.dv_xname);
656 }
657 need_reset = 1;
658 }
659 if (sist & SIST0_PAR) {
660 /* parity error, reset */
661 if (esiop_cmd)
662 scsipi_printaddr(xs->xs_periph);
663 else
664 printf("%s:", sc->sc_c.sc_dev.dv_xname);
665 printf("parity error\n");
666 goto reset;
667 }
668 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
669 /* selection time out, assume there's no device here */
670 /*
671 * SCRATCHC has not been loaded yet, we have to find
672 * params by ourselve. scratchE0 should point to
673 * the slot.
674 */
675 slot = bus_space_read_1(sc->sc_c.sc_rt,
676 sc->sc_c.sc_rh, SIOP_SCRATCHE);
677 esiop_script_sync(sc,
678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
679 target = esiop_script_read(sc,
680 sc->sc_shedoffset + slot * 2 + 1) & 0x00ff0000;
681 target = (target >> 16) & 0xff;
682 esiop_cmd = esiop_cmd_find(sc, target,
683 esiop_script_read(sc,
684 sc->sc_shedoffset + slot * 2) & ~0x3);
685 /*
686 * mark this slot as free, and advance to next slot
687 */
688 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
689 A_f_cmd_free);
690 addr = bus_space_read_4(sc->sc_c.sc_rt,
691 sc->sc_c.sc_rh, SIOP_SCRATCHD);
692 if (slot < (A_ncmd_slots - 1)) {
693 bus_space_write_1(sc->sc_c.sc_rt,
694 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
695 addr = addr + 8;
696 } else {
697 bus_space_write_1(sc->sc_c.sc_rt,
698 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
699 addr = sc->sc_c.sc_scriptaddr +
700 sc->sc_shedoffset * sizeof(u_int32_t);
701 }
702 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
703 SIOP_SCRATCHD, addr);
704 esiop_script_sync(sc,
705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
706 if (esiop_cmd) {
707 xs = esiop_cmd->cmd_c.xs;
708 esiop_target = (struct esiop_target *)
709 esiop_cmd->cmd_c.siop_target;
710 lun = xs->xs_periph->periph_lun;
711 tag = esiop_cmd->cmd_c.tag;
712 esiop_lun = esiop_target->esiop_lun[lun];
713 esiop_cmd->cmd_c.status = CMDST_DONE;
714 xs->error = XS_SELTIMEOUT;
715 freetarget = 1;
716 goto end;
717 } else {
718 printf("%s: selection timeout without "
719 "command, target %d (sdid 0x%x), "
720 "slot %d\n",
721 sc->sc_c.sc_dev.dv_xname, target,
722 bus_space_read_1(sc->sc_c.sc_rt,
723 sc->sc_c.sc_rh, SIOP_SDID), slot);
724 need_reset = 1;
725 }
726 }
727 if (sist & SIST0_UDC) {
728 /*
729 * unexpected disconnect. Usually the target signals
730 * a fatal condition this way. Attempt to get sense.
731 */
732 if (esiop_cmd) {
733 esiop_cmd->cmd_tables->status =
734 htole32(SCSI_CHECK);
735 goto end;
736 }
737 printf("%s: unexpected disconnect without "
738 "command\n", sc->sc_c.sc_dev.dv_xname);
739 goto reset;
740 }
741 if (sist & (SIST1_SBMC << 8)) {
742 /* SCSI bus mode change */
743 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
744 goto reset;
745 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
746 /*
747 * we have a script interrupt, it will
748 * restart the script.
749 */
750 goto scintr;
751 }
752 /*
753 * else we have to restart it ourselve, at the
754 * interrupted instruction.
755 */
756 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
757 SIOP_DSP,
758 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
759 SIOP_DSP) - 8);
760 return 1;
761 }
762 /* Else it's an unhandled exeption (for now). */
763 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
764 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
765 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
766 SIOP_SSTAT1),
767 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
768 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
769 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
770 if (esiop_cmd) {
771 esiop_cmd->cmd_c.status = CMDST_DONE;
772 xs->error = XS_SELTIMEOUT;
773 goto end;
774 }
775 need_reset = 1;
776 }
777 if (need_reset) {
778 reset:
779 /* fatal error, reset the bus */
780 siop_resetbus(&sc->sc_c);
781 /* no table to flush here */
782 return 1;
783 }
784
785 scintr:
786 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
787 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
788 SIOP_DSPS);
789 #ifdef SIOP_DEBUG_INTR
790 printf("script interrupt 0x%x\n", irqcode);
791 #endif
792 /*
793 * no command, or an inactive command is only valid for a
794 * reselect interrupt
795 */
796 if ((irqcode & 0x80) == 0) {
797 if (esiop_cmd == NULL) {
798 printf(
799 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
800 sc->sc_c.sc_dev.dv_xname, irqcode);
801 goto reset;
802 }
803 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
804 printf("%s: command with invalid status "
805 "(IRQ code 0x%x current status %d) !\n",
806 sc->sc_c.sc_dev.dv_xname,
807 irqcode, esiop_cmd->cmd_c.status);
808 xs = NULL;
809 }
810 }
811 switch(irqcode) {
812 case A_int_err:
813 printf("error, DSP=0x%x\n",
814 (int)(bus_space_read_4(sc->sc_c.sc_rt,
815 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
816 if (xs) {
817 xs->error = XS_SELTIMEOUT;
818 goto end;
819 } else {
820 goto reset;
821 }
822 case A_int_msgin:
823 {
824 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
825 sc->sc_c.sc_rh, SIOP_SFBR);
826 if (msgin == MSG_MESSAGE_REJECT) {
827 int msg, extmsg;
828 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
829 /*
830 * message was part of a identify +
831 * something else. Identify shoudl't
832 * have been rejected.
833 */
834 msg =
835 esiop_cmd->cmd_tables->msg_out[1];
836 extmsg =
837 esiop_cmd->cmd_tables->msg_out[3];
838 } else {
839 msg =
840 esiop_cmd->cmd_tables->msg_out[0];
841 extmsg =
842 esiop_cmd->cmd_tables->msg_out[2];
843 }
844 if (msg == MSG_MESSAGE_REJECT) {
845 /* MSG_REJECT for a MSG_REJECT !*/
846 if (xs)
847 scsipi_printaddr(xs->xs_periph);
848 else
849 printf("%s: ",
850 sc->sc_c.sc_dev.dv_xname);
851 printf("our reject message was "
852 "rejected\n");
853 goto reset;
854 }
855 if (msg == MSG_EXTENDED &&
856 extmsg == MSG_EXT_WDTR) {
857 /* WDTR rejected, initiate sync */
858 if ((esiop_target->target_c.flags &
859 TARF_SYNC) == 0) {
860 esiop_target->target_c.status =
861 TARST_OK;
862 siop_update_xfer_mode(&sc->sc_c,
863 target);
864 /* no table to flush here */
865 CALL_SCRIPT(Ent_msgin_ack);
866 return 1;
867 }
868 esiop_target->target_c.status =
869 TARST_SYNC_NEG;
870 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
871 sc->sc_c.minsync, sc->sc_c.maxoff);
872 esiop_table_sync(esiop_cmd,
873 BUS_DMASYNC_PREREAD |
874 BUS_DMASYNC_PREWRITE);
875 CALL_SCRIPT(Ent_send_msgout);
876 return 1;
877 } else if (msg == MSG_EXTENDED &&
878 extmsg == MSG_EXT_SDTR) {
879 /* sync rejected */
880 esiop_target->target_c.offset = 0;
881 esiop_target->target_c.period = 0;
882 esiop_target->target_c.status =
883 TARST_OK;
884 siop_update_xfer_mode(&sc->sc_c,
885 target);
886 /* no table to flush here */
887 CALL_SCRIPT(Ent_msgin_ack);
888 return 1;
889 } else if (msg == MSG_SIMPLE_Q_TAG ||
890 msg == MSG_HEAD_OF_Q_TAG ||
891 msg == MSG_ORDERED_Q_TAG) {
892 if (esiop_handle_qtag_reject(
893 esiop_cmd) == -1)
894 goto reset;
895 CALL_SCRIPT(Ent_msgin_ack);
896 return 1;
897 }
898 if (xs)
899 scsipi_printaddr(xs->xs_periph);
900 else
901 printf("%s: ",
902 sc->sc_c.sc_dev.dv_xname);
903 if (msg == MSG_EXTENDED) {
904 printf("scsi message reject, extended "
905 "message sent was 0x%x\n", extmsg);
906 } else {
907 printf("scsi message reject, message "
908 "sent was 0x%x\n", msg);
909 }
910 /* no table to flush here */
911 CALL_SCRIPT(Ent_msgin_ack);
912 return 1;
913 }
914 if (xs)
915 scsipi_printaddr(xs->xs_periph);
916 else
917 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
918 printf("unhandled message 0x%x\n",
919 esiop_cmd->cmd_tables->msg_in[0]);
920 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
921 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
922 esiop_table_sync(esiop_cmd,
923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
924 CALL_SCRIPT(Ent_send_msgout);
925 return 1;
926 }
927 case A_int_extmsgin:
928 #ifdef SIOP_DEBUG_INTR
929 printf("extended message: msg 0x%x len %d\n",
930 esiop_cmd->cmd_tables->msg_in[2],
931 esiop_cmd->cmd_tables->msg_in[1]);
932 #endif
933 if (esiop_cmd->cmd_tables->msg_in[1] >
934 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
935 printf("%s: extended message too big (%d)\n",
936 sc->sc_c.sc_dev.dv_xname,
937 esiop_cmd->cmd_tables->msg_in[1]);
938 esiop_cmd->cmd_tables->t_extmsgdata.count =
939 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
940 esiop_table_sync(esiop_cmd,
941 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
942 CALL_SCRIPT(Ent_get_extmsgdata);
943 return 1;
944 case A_int_extmsgdata:
945 #ifdef SIOP_DEBUG_INTR
946 {
947 int i;
948 printf("extended message: 0x%x, data:",
949 esiop_cmd->cmd_tables->msg_in[2]);
950 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
951 i++)
952 printf(" 0x%x",
953 esiop_cmd->cmd_tables->msg_in[i]);
954 printf("\n");
955 }
956 #endif
957 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
958 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
959 case SIOP_NEG_MSGOUT:
960 esiop_update_scntl3(sc,
961 esiop_cmd->cmd_c.siop_target);
962 esiop_table_sync(esiop_cmd,
963 BUS_DMASYNC_PREREAD |
964 BUS_DMASYNC_PREWRITE);
965 CALL_SCRIPT(Ent_send_msgout);
966 return(1);
967 case SIOP_NEG_ACK:
968 esiop_update_scntl3(sc,
969 esiop_cmd->cmd_c.siop_target);
970 CALL_SCRIPT(Ent_msgin_ack);
971 return(1);
972 default:
973 panic("invalid retval from "
974 "siop_wdtr_neg()");
975 }
976 return(1);
977 }
978 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
979 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
980 case SIOP_NEG_MSGOUT:
981 esiop_update_scntl3(sc,
982 esiop_cmd->cmd_c.siop_target);
983 esiop_table_sync(esiop_cmd,
984 BUS_DMASYNC_PREREAD |
985 BUS_DMASYNC_PREWRITE);
986 CALL_SCRIPT(Ent_send_msgout);
987 return(1);
988 case SIOP_NEG_ACK:
989 esiop_update_scntl3(sc,
990 esiop_cmd->cmd_c.siop_target);
991 CALL_SCRIPT(Ent_msgin_ack);
992 return(1);
993 default:
994 panic("invalid retval from "
995 "siop_wdtr_neg()");
996 }
997 return(1);
998 }
999 /* send a message reject */
1000 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1001 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1002 esiop_table_sync(esiop_cmd,
1003 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1004 CALL_SCRIPT(Ent_send_msgout);
1005 return 1;
1006 case A_int_disc:
1007 INCSTAT(esiop_stat_intr_sdp);
1008 offset = bus_space_read_1(sc->sc_c.sc_rt,
1009 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1010 #ifdef SIOP_DEBUG_DR
1011 printf("disconnect offset %d\n", offset);
1012 #endif
1013 if (offset > SIOP_NSG) {
1014 printf("%s: bad offset for disconnect (%d)\n",
1015 sc->sc_c.sc_dev.dv_xname, offset);
1016 goto reset;
1017 }
1018 /*
1019 * offset == SIOP_NSG may be a valid condition if
1020 * we get a sdp when the xfer is done.
1021 * Don't call memmove in this case.
1022 */
1023 if (offset < SIOP_NSG) {
1024 memmove(&esiop_cmd->cmd_tables->data[0],
1025 &esiop_cmd->cmd_tables->data[offset],
1026 (SIOP_NSG - offset) * sizeof(scr_table_t));
1027 esiop_table_sync(esiop_cmd,
1028 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1029 }
1030 CALL_SCRIPT(Ent_script_sched);
1031 return 1;
1032 case A_int_resfail:
1033 printf("reselect failed\n");
1034 CALL_SCRIPT(Ent_script_sched);
1035 return 1;
1036 case A_int_done:
1037 if (xs == NULL) {
1038 printf("%s: done without command\n",
1039 sc->sc_c.sc_dev.dv_xname);
1040 CALL_SCRIPT(Ent_script_sched);
1041 return 1;
1042 }
1043 #ifdef SIOP_DEBUG_INTR
1044 printf("done, DSA=0x%lx target id 0x%x last msg "
1045 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1046 le32toh(esiop_cmd->cmd_tables->id),
1047 esiop_cmd->cmd_tables->msg_in[0],
1048 le32toh(esiop_cmd->cmd_tables->status));
1049 #endif
1050 INCSTAT(esiop_stat_intr_done);
1051 esiop_cmd->cmd_c.status = CMDST_DONE;
1052 goto end;
1053 default:
1054 printf("unknown irqcode %x\n", irqcode);
1055 if (xs) {
1056 xs->error = XS_SELTIMEOUT;
1057 goto end;
1058 }
1059 goto reset;
1060 }
1061 return 1;
1062 }
1063 /* We just should't get there */
1064 panic("siop_intr: I shouldn't be there !");
1065
1066 end:
1067 /*
1068 * restart the script now if command completed properly
1069 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1070 * queue
1071 */
1072 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1073 #ifdef SIOP_DEBUG_INTR
1074 printf("esiop_intr end: status %d\n", xs->status);
1075 #endif
1076 if (xs->status == SCSI_OK)
1077 CALL_SCRIPT(Ent_script_sched);
1078 else
1079 restart = 1;
1080 if (tag >= 0)
1081 esiop_lun->tactive[tag] = NULL;
1082 else
1083 esiop_lun->active = NULL;
1084 esiop_scsicmd_end(esiop_cmd);
1085 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1086 esiop_del_dev(sc, target, lun);
1087 if (restart)
1088 CALL_SCRIPT(Ent_script_sched);
1089 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1090 /* a command terminated, so we have free slots now */
1091 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1092 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1093 }
1094
1095 return retval;
1096 }
1097
1098 void
1099 esiop_scsicmd_end(esiop_cmd)
1100 struct esiop_cmd *esiop_cmd;
1101 {
1102 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1103 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1104
1105 switch(xs->status) {
1106 case SCSI_OK:
1107 xs->error = XS_NOERROR;
1108 break;
1109 case SCSI_BUSY:
1110 xs->error = XS_BUSY;
1111 break;
1112 case SCSI_CHECK:
1113 xs->error = XS_BUSY;
1114 /* remove commands in the queue and scheduler */
1115 esiop_unqueue(sc, xs->xs_periph->periph_target,
1116 xs->xs_periph->periph_lun);
1117 break;
1118 case SCSI_QUEUE_FULL:
1119 INCSTAT(esiop_stat_intr_qfull);
1120 #ifdef SIOP_DEBUG
1121 printf("%s:%d:%d: queue full (tag %d)\n",
1122 sc->sc_c.sc_dev.dv_xname,
1123 xs->xs_periph->periph_target,
1124 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1125 #endif
1126 xs->error = XS_BUSY;
1127 break;
1128 case SCSI_SIOP_NOCHECK:
1129 /*
1130 * don't check status, xs->error is already valid
1131 */
1132 break;
1133 case SCSI_SIOP_NOSTATUS:
1134 /*
1135 * the status byte was not updated, cmd was
1136 * aborted
1137 */
1138 xs->error = XS_SELTIMEOUT;
1139 break;
1140 default:
1141 xs->error = XS_DRIVER_STUFFUP;
1142 }
1143 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1144 bus_dmamap_sync(sc->sc_c.sc_dmat,
1145 esiop_cmd->cmd_c.dmamap_data, 0,
1146 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1147 (xs->xs_control & XS_CTL_DATA_IN) ?
1148 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1149 bus_dmamap_unload(sc->sc_c.sc_dmat,
1150 esiop_cmd->cmd_c.dmamap_data);
1151 }
1152 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1153 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1154 esiop_cmd->cmd_c.status = CMDST_FREE;
1155 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1156 xs->resid = 0;
1157 scsipi_done (xs);
1158 }
1159
1160 void
1161 esiop_checkdone(sc)
1162 struct esiop_softc *sc;
1163 {
1164 int target, lun, tag;
1165 struct esiop_target *esiop_target;
1166 struct esiop_lun *esiop_lun;
1167 struct esiop_cmd *esiop_cmd;
1168 int status;
1169
1170 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1171 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1172 if (esiop_target == NULL)
1173 continue;
1174 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1175 esiop_lun = esiop_target->esiop_lun[lun];
1176 if (esiop_lun == NULL)
1177 continue;
1178 esiop_cmd = esiop_lun->active;
1179 if (esiop_cmd) {
1180 esiop_table_sync(esiop_cmd,
1181 BUS_DMASYNC_POSTREAD |
1182 BUS_DMASYNC_POSTWRITE);
1183 status = le32toh(esiop_cmd->cmd_tables->status);
1184 if (status == SCSI_OK) {
1185 /* Ok, this command has been handled */
1186 esiop_cmd->cmd_c.xs->status = status;
1187 esiop_lun->active = NULL;
1188 esiop_scsicmd_end(esiop_cmd);
1189 }
1190 }
1191 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1192 esiop_cmd = esiop_lun->tactive[tag];
1193 if (esiop_cmd == NULL)
1194 continue;
1195 esiop_table_sync(esiop_cmd,
1196 BUS_DMASYNC_POSTREAD |
1197 BUS_DMASYNC_POSTWRITE);
1198 status = le32toh(esiop_cmd->cmd_tables->status);
1199 if (status == SCSI_OK) {
1200 /* Ok, this command has been handled */
1201 esiop_cmd->cmd_c.xs->status = status;
1202 esiop_lun->tactive[tag] = NULL;
1203 esiop_scsicmd_end(esiop_cmd);
1204 }
1205 }
1206 }
1207 }
1208 }
1209
1210 void
1211 esiop_unqueue(sc, target, lun)
1212 struct esiop_softc *sc;
1213 int target;
1214 int lun;
1215 {
1216 int slot, tag;
1217 u_int32_t slotdsa;
1218 struct esiop_cmd *esiop_cmd;
1219 struct esiop_lun *esiop_lun =
1220 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1221
1222 /* first make sure to read valid data */
1223 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1224
1225 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1226 /* look for commands in the scheduler, not yet started */
1227 if (esiop_lun->tactive[tag] == NULL)
1228 continue;
1229 esiop_cmd = esiop_lun->tactive[tag];
1230 for (slot = 0; slot < A_ncmd_slots; slot++) {
1231 slotdsa = esiop_script_read(sc,
1232 sc->sc_shedoffset + slot * 2);
1233 if (slotdsa & A_f_cmd_free)
1234 continue;
1235 if ((slotdsa & ~A_f_cmd_free) == esiop_cmd->cmd_c.dsa)
1236 break;
1237 }
1238 if (slot > ESIOP_NTAG)
1239 continue; /* didn't find it */
1240 /* Mark this slot as ignore */
1241 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1242 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1243 /* ask to requeue */
1244 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1245 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1246 esiop_lun->tactive[tag] = NULL;
1247 esiop_scsicmd_end(esiop_cmd);
1248 }
1249 }
1250
1251 /*
1252 * handle a rejected queue tag message: the command will run untagged,
1253 * has to adjust the reselect script.
1254 */
1255
1256
1257 int
1258 esiop_handle_qtag_reject(esiop_cmd)
1259 struct esiop_cmd *esiop_cmd;
1260 {
1261 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1262 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1263 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1264 int tag = esiop_cmd->cmd_tables->msg_out[2];
1265 struct esiop_target *esiop_target =
1266 (struct esiop_target*)sc->sc_c.targets[target];
1267 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1268
1269 #ifdef SIOP_DEBUG
1270 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1271 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1272 esiop_cmd->cmd_c.status);
1273 #endif
1274
1275 if (esiop_lun->active != NULL) {
1276 printf("%s: untagged command already running for target %d "
1277 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1278 target, lun, esiop_lun->active->cmd_c.status);
1279 return -1;
1280 }
1281 /* clear tag slot */
1282 esiop_lun->tactive[tag] = NULL;
1283 /* add command to non-tagged slot */
1284 esiop_lun->active = esiop_cmd;
1285 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1286 esiop_cmd->cmd_c.tag = -1;
1287 /* update DSA table */
1288 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1289 esiop_cmd->cmd_c.dsa);
1290 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1291 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1292 return 0;
1293 }
1294
1295 /*
1296 * handle a bus reset: reset chip, unqueue all active commands, free all
1297 * target struct and report loosage to upper layer.
1298 * As the upper layer may requeue immediatly we have to first store
1299 * all active commands in a temporary queue.
1300 */
1301 void
1302 esiop_handle_reset(sc)
1303 struct esiop_softc *sc;
1304 {
1305 struct esiop_cmd *esiop_cmd;
1306 struct esiop_lun *esiop_lun;
1307 int target, lun, tag;
1308 /*
1309 * scsi bus reset. reset the chip and restart
1310 * the queue. Need to clean up all active commands
1311 */
1312 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1313 /* stop, reset and restart the chip */
1314 esiop_reset(sc);
1315 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1316 /* chip has been reset, all slots are free now */
1317 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1318 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1319 }
1320 /*
1321 * Process all commands: first commmands completes, then commands
1322 * being executed
1323 */
1324 esiop_checkdone(sc);
1325 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1326 target++) {
1327 struct esiop_target *esiop_target =
1328 (struct esiop_target *)sc->sc_c.targets[target];
1329 if (esiop_target == NULL)
1330 continue;
1331 for (lun = 0; lun < 8; lun++) {
1332 esiop_lun = esiop_target->esiop_lun[lun];
1333 if (esiop_lun == NULL)
1334 continue;
1335 for (tag = -1; tag <
1336 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1337 ESIOP_NTAG : 0);
1338 tag++) {
1339 if (tag >= 0)
1340 esiop_cmd = esiop_lun->tactive[tag];
1341 else
1342 esiop_cmd = esiop_lun->active;
1343 if (esiop_cmd == NULL)
1344 continue;
1345 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1346 printf("command with tag id %d reset\n", tag);
1347 esiop_cmd->cmd_c.xs->error =
1348 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1349 XS_TIMEOUT : XS_RESET;
1350 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1351 if (tag >= 0)
1352 esiop_lun->tactive[tag] = NULL;
1353 else
1354 esiop_lun->active = NULL;
1355 esiop_cmd->cmd_c.status = CMDST_DONE;
1356 esiop_scsicmd_end(esiop_cmd);
1357 }
1358 }
1359 sc->sc_c.targets[target]->status = TARST_ASYNC;
1360 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1361 sc->sc_c.targets[target]->period =
1362 sc->sc_c.targets[target]->offset = 0;
1363 siop_update_xfer_mode(&sc->sc_c, target);
1364 }
1365
1366 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1367 }
1368
1369 void
1370 esiop_scsipi_request(chan, req, arg)
1371 struct scsipi_channel *chan;
1372 scsipi_adapter_req_t req;
1373 void *arg;
1374 {
1375 struct scsipi_xfer *xs;
1376 struct scsipi_periph *periph;
1377 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1378 struct esiop_cmd *esiop_cmd;
1379 struct esiop_target *esiop_target;
1380 int s, error, i;
1381 int target;
1382 int lun;
1383
1384 switch (req) {
1385 case ADAPTER_REQ_RUN_XFER:
1386 xs = arg;
1387 periph = xs->xs_periph;
1388 target = periph->periph_target;
1389 lun = periph->periph_lun;
1390
1391 s = splbio();
1392 #ifdef SIOP_DEBUG_SCHED
1393 printf("starting cmd for %d:%d\n", target, lun);
1394 #endif
1395 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1396 if (esiop_cmd == NULL) {
1397 xs->error = XS_RESOURCE_SHORTAGE;
1398 scsipi_done(xs);
1399 splx(s);
1400 return;
1401 }
1402 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1403 #ifdef DIAGNOSTIC
1404 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1405 panic("siop_scsicmd: new cmd not free");
1406 #endif
1407 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1408 if (esiop_target == NULL) {
1409 #ifdef SIOP_DEBUG
1410 printf("%s: alloc siop_target for target %d\n",
1411 sc->sc_c.sc_dev.dv_xname, target);
1412 #endif
1413 sc->sc_c.targets[target] =
1414 malloc(sizeof(struct esiop_target),
1415 M_DEVBUF, M_NOWAIT | M_ZERO);
1416 if (sc->sc_c.targets[target] == NULL) {
1417 printf("%s: can't malloc memory for "
1418 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1419 target);
1420 xs->error = XS_RESOURCE_SHORTAGE;
1421 scsipi_done(xs);
1422 splx(s);
1423 return;
1424 }
1425 esiop_target =
1426 (struct esiop_target*)sc->sc_c.targets[target];
1427 esiop_target->target_c.status = TARST_PROBING;
1428 esiop_target->target_c.flags = 0;
1429 esiop_target->target_c.id =
1430 sc->sc_c.clock_div << 24; /* scntl3 */
1431 esiop_target->target_c.id |= target << 16; /* id */
1432 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1433
1434 for (i=0; i < 8; i++)
1435 esiop_target->esiop_lun[i] = NULL;
1436 esiop_target_register(sc, target);
1437 }
1438 if (esiop_target->esiop_lun[lun] == NULL) {
1439 esiop_target->esiop_lun[lun] =
1440 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1441 M_NOWAIT|M_ZERO);
1442 if (esiop_target->esiop_lun[lun] == NULL) {
1443 printf("%s: can't alloc esiop_lun for "
1444 "target %d lun %d\n",
1445 sc->sc_c.sc_dev.dv_xname, target, lun);
1446 xs->error = XS_RESOURCE_SHORTAGE;
1447 scsipi_done(xs);
1448 splx(s);
1449 return;
1450 }
1451 }
1452 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1453 esiop_cmd->cmd_c.xs = xs;
1454 esiop_cmd->cmd_c.flags = 0;
1455 esiop_cmd->cmd_c.status = CMDST_READY;
1456
1457 /* load the DMA maps */
1458 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1459 esiop_cmd->cmd_c.dmamap_cmd,
1460 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1461 if (error) {
1462 printf("%s: unable to load cmd DMA map: %d\n",
1463 sc->sc_c.sc_dev.dv_xname, error);
1464 xs->error = XS_DRIVER_STUFFUP;
1465 scsipi_done(xs);
1466 splx(s);
1467 return;
1468 }
1469 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1470 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1471 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1472 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1473 ((xs->xs_control & XS_CTL_DATA_IN) ?
1474 BUS_DMA_READ : BUS_DMA_WRITE));
1475 if (error) {
1476 printf("%s: unable to load cmd DMA map: %d",
1477 sc->sc_c.sc_dev.dv_xname, error);
1478 xs->error = XS_DRIVER_STUFFUP;
1479 scsipi_done(xs);
1480 bus_dmamap_unload(sc->sc_c.sc_dmat,
1481 esiop_cmd->cmd_c.dmamap_cmd);
1482 splx(s);
1483 return;
1484 }
1485 bus_dmamap_sync(sc->sc_c.sc_dmat,
1486 esiop_cmd->cmd_c.dmamap_data, 0,
1487 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1488 (xs->xs_control & XS_CTL_DATA_IN) ?
1489 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1490 }
1491 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1492 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1493 BUS_DMASYNC_PREWRITE);
1494
1495 if (xs->xs_tag_type)
1496 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1497 else
1498 esiop_cmd->cmd_c.tag = -1;
1499 siop_setuptables(&esiop_cmd->cmd_c);
1500 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1501 htole32(A_f_c_target | A_f_c_lun);
1502 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1503 htole32((target << 8) | (lun << 16));
1504 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1505 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1506 htole32(A_f_c_tag);
1507 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1508 htole32(esiop_cmd->cmd_c.tag << 24);
1509 }
1510
1511 esiop_table_sync(esiop_cmd,
1512 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1513 esiop_start(sc, esiop_cmd);
1514 if (xs->xs_control & XS_CTL_POLL) {
1515 /* poll for command completion */
1516 while ((xs->xs_status & XS_STS_DONE) == 0) {
1517 delay(1000);
1518 esiop_intr(sc);
1519 }
1520 }
1521 splx(s);
1522 return;
1523
1524 case ADAPTER_REQ_GROW_RESOURCES:
1525 #ifdef SIOP_DEBUG
1526 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1527 sc->sc_c.sc_adapt.adapt_openings);
1528 #endif
1529 esiop_morecbd(sc);
1530 return;
1531
1532 case ADAPTER_REQ_SET_XFER_MODE:
1533 {
1534 struct scsipi_xfer_mode *xm = arg;
1535 if (sc->sc_c.targets[xm->xm_target] == NULL)
1536 return;
1537 s = splbio();
1538 if (xm->xm_mode & PERIPH_CAP_TQING)
1539 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1540 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1541 (sc->sc_c.features & SF_BUS_WIDE))
1542 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1543 if (xm->xm_mode & PERIPH_CAP_SYNC)
1544 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1545 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1546 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1547 sc->sc_c.targets[xm->xm_target]->status =
1548 TARST_ASYNC;
1549
1550 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1551 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1552 /* allocate a lun sw entry for this device */
1553 esiop_add_dev(sc, xm->xm_target, lun);
1554 }
1555
1556 splx(s);
1557 }
1558 }
1559 }
1560
1561 static void
1562 esiop_start(sc, esiop_cmd)
1563 struct esiop_softc *sc;
1564 struct esiop_cmd *esiop_cmd;
1565 {
1566 struct esiop_lun *esiop_lun;
1567 struct esiop_target *esiop_target;
1568 int timeout;
1569 int target, lun, slot;
1570
1571 nintr = 0;
1572
1573 /*
1574 * first make sure to read valid data
1575 */
1576 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1577
1578 /*
1579 * We use a circular queue here. sc->sc_currschedslot points to a
1580 * free slot, unless we have filled the queue. Check this.
1581 */
1582 slot = sc->sc_currschedslot;
1583 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * 2) &
1584 A_f_cmd_free) == 0) {
1585 /*
1586 * no more free slot, no need to continue. freeze the queue
1587 * and requeue this command.
1588 */
1589 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1590 sc->sc_flags |= SCF_CHAN_NOSLOT;
1591 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1592 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1593 esiop_scsicmd_end(esiop_cmd);
1594 return;
1595 }
1596 /* OK, we can use this slot */
1597
1598 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1599 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1600 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1601 esiop_lun = esiop_target->esiop_lun[lun];
1602 /* if non-tagged command active, panic: this shouldn't happen */
1603 if (esiop_lun->active != NULL) {
1604 panic("esiop_start: tagged cmd while untagged running");
1605 }
1606 #ifdef DIAGNOSTIC
1607 /* sanity check the tag if needed */
1608 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1609 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1610 panic("esiop_start: tag not free");
1611 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1612 esiop_cmd->cmd_c.tag < 0) {
1613 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1614 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1615 panic("esiop_start: invalid tag id");
1616 }
1617 }
1618 #endif
1619 #ifdef SIOP_DEBUG_SCHED
1620 printf("using slot %d for DSA 0x%lx\n", slot,
1621 (u_long)esiop_cmd->cmd_c.dsa);
1622 #endif
1623 /* mark command as active */
1624 if (esiop_cmd->cmd_c.status == CMDST_READY)
1625 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1626 else
1627 panic("esiop_start: bad status");
1628 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1629 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1630 /* DSA table for reselect */
1631 if ((esiop_lun->lun_flags & LUNF_TAGTABLE) == 0) {
1632 esiop_script_write(sc,
1633 esiop_target->lun_table_offset + lun + 2,
1634 esiop_lun->lun_tagtbl->tbl_dsa);
1635 esiop_lun->lun_flags |= LUNF_TAGTABLE;
1636 }
1637 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1638 htole32(esiop_cmd->cmd_c.dsa);
1639 bus_dmamap_sync(sc->sc_c.sc_dmat,
1640 esiop_lun->lun_tagtbl->tblblk->blkmap,
1641 esiop_lun->lun_tagtbl->tbl_offset,
1642 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1643 } else {
1644 esiop_lun->active = esiop_cmd;
1645 /* DSA table for reselect */
1646 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1647 esiop_cmd->cmd_c.dsa);
1648 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1649
1650 }
1651 /* scheduler slot: ID, then DSA */
1652 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1653 sc->sc_c.targets[target]->id);
1654 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1655 esiop_cmd->cmd_c.dsa);
1656 /* handle timeout */
1657 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1658 /* start exire timer */
1659 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1660 if (timeout == 0)
1661 timeout = 1;
1662 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1663 timeout, esiop_timeout, esiop_cmd);
1664 }
1665 /* make sure SCRIPT processor will read valid data */
1666 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1667 /* Signal script it has some work to do */
1668 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1669 SIOP_ISTAT, ISTAT_SIGP);
1670 /* update the current slot, and wait for IRQ */
1671 sc->sc_currschedslot++;
1672 if (sc->sc_currschedslot >= A_ncmd_slots)
1673 sc->sc_currschedslot = 0;
1674 return;
1675 }
1676
1677 void
1678 esiop_timeout(v)
1679 void *v;
1680 {
1681 struct esiop_cmd *esiop_cmd = v;
1682 struct esiop_softc *sc =
1683 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1684 int s;
1685
1686 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1687 printf("command timeout\n");
1688
1689 s = splbio();
1690 /* reset the scsi bus */
1691 siop_resetbus(&sc->sc_c);
1692
1693 /* deactivate callout */
1694 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1695 /*
1696 * mark command has being timed out and just return;
1697 * the bus reset will generate an interrupt,
1698 * it will be handled in siop_intr()
1699 */
1700 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1701 splx(s);
1702 return;
1703
1704 }
1705
1706 void
1707 esiop_dump_script(sc)
1708 struct esiop_softc *sc;
1709 {
1710 int i;
1711 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1712 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1713 le32toh(sc->sc_c.sc_script[i]),
1714 le32toh(sc->sc_c.sc_script[i+1]));
1715 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1716 0xc0000000) {
1717 i++;
1718 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1719 }
1720 printf("\n");
1721 }
1722 }
1723
1724 void
1725 esiop_morecbd(sc)
1726 struct esiop_softc *sc;
1727 {
1728 int error, i, s;
1729 bus_dma_segment_t seg;
1730 int rseg;
1731 struct esiop_cbd *newcbd;
1732 struct esiop_xfer *xfer;
1733 bus_addr_t dsa;
1734
1735 /* allocate a new list head */
1736 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1737 if (newcbd == NULL) {
1738 printf("%s: can't allocate memory for command descriptors "
1739 "head\n", sc->sc_c.sc_dev.dv_xname);
1740 return;
1741 }
1742
1743 /* allocate cmd list */
1744 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1745 M_DEVBUF, M_NOWAIT|M_ZERO);
1746 if (newcbd->cmds == NULL) {
1747 printf("%s: can't allocate memory for command descriptors\n",
1748 sc->sc_c.sc_dev.dv_xname);
1749 goto bad3;
1750 }
1751 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1752 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1753 if (error) {
1754 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1755 sc->sc_c.sc_dev.dv_xname, error);
1756 goto bad2;
1757 }
1758 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1759 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1760 if (error) {
1761 printf("%s: unable to map cbd DMA memory, error = %d\n",
1762 sc->sc_c.sc_dev.dv_xname, error);
1763 goto bad2;
1764 }
1765 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1766 BUS_DMA_NOWAIT, &newcbd->xferdma);
1767 if (error) {
1768 printf("%s: unable to create cbd DMA map, error = %d\n",
1769 sc->sc_c.sc_dev.dv_xname, error);
1770 goto bad1;
1771 }
1772 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1773 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1774 if (error) {
1775 printf("%s: unable to load cbd DMA map, error = %d\n",
1776 sc->sc_c.sc_dev.dv_xname, error);
1777 goto bad0;
1778 }
1779 #ifdef DEBUG
1780 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1781 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1782 #endif
1783 for (i = 0; i < SIOP_NCMDPB; i++) {
1784 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1785 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1786 &newcbd->cmds[i].cmd_c.dmamap_data);
1787 if (error) {
1788 printf("%s: unable to create data DMA map for cbd: "
1789 "error %d\n",
1790 sc->sc_c.sc_dev.dv_xname, error);
1791 goto bad0;
1792 }
1793 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1794 sizeof(struct scsipi_generic), 1,
1795 sizeof(struct scsipi_generic), 0,
1796 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1797 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1798 if (error) {
1799 printf("%s: unable to create cmd DMA map for cbd %d\n",
1800 sc->sc_c.sc_dev.dv_xname, error);
1801 goto bad0;
1802 }
1803 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1804 newcbd->cmds[i].esiop_cbdp = newcbd;
1805 xfer = &newcbd->xfers[i];
1806 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1807 memset(newcbd->cmds[i].cmd_tables, 0,
1808 sizeof(struct esiop_xfer));
1809 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1810 i * sizeof(struct esiop_xfer);
1811 newcbd->cmds[i].cmd_c.dsa = dsa;
1812 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1813 xfer->siop_tables.t_msgout.count= htole32(1);
1814 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1815 xfer->siop_tables.t_msgin.count= htole32(1);
1816 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1817 offsetof(struct siop_common_xfer, msg_in));
1818 xfer->siop_tables.t_extmsgin.count= htole32(2);
1819 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1820 offsetof(struct siop_common_xfer, msg_in) + 1);
1821 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1822 offsetof(struct siop_common_xfer, msg_in) + 3);
1823 xfer->siop_tables.t_status.count= htole32(1);
1824 xfer->siop_tables.t_status.addr = htole32(dsa +
1825 offsetof(struct siop_common_xfer, status));
1826
1827 s = splbio();
1828 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1829 splx(s);
1830 #ifdef SIOP_DEBUG
1831 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1832 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1833 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1834 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1835 #endif
1836 }
1837 s = splbio();
1838 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1839 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1840 splx(s);
1841 return;
1842 bad0:
1843 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1844 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1845 bad1:
1846 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1847 bad2:
1848 free(newcbd->cmds, M_DEVBUF);
1849 bad3:
1850 free(newcbd, M_DEVBUF);
1851 return;
1852 }
1853
1854 void
1855 esiop_moretagtbl(sc)
1856 struct esiop_softc *sc;
1857 {
1858 int error, i, j, s;
1859 bus_dma_segment_t seg;
1860 int rseg;
1861 struct esiop_dsatblblk *newtblblk;
1862 struct esiop_dsatbl *newtbls;
1863 u_int32_t *tbls;
1864
1865 /* allocate a new list head */
1866 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1867 M_DEVBUF, M_NOWAIT|M_ZERO);
1868 if (newtblblk == NULL) {
1869 printf("%s: can't allocate memory for tag DSA table block\n",
1870 sc->sc_c.sc_dev.dv_xname);
1871 return;
1872 }
1873
1874 /* allocate tbl list */
1875 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1876 M_DEVBUF, M_NOWAIT|M_ZERO);
1877 if (newtbls == NULL) {
1878 printf("%s: can't allocate memory for command descriptors\n",
1879 sc->sc_c.sc_dev.dv_xname);
1880 goto bad3;
1881 }
1882 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1883 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1884 if (error) {
1885 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1886 sc->sc_c.sc_dev.dv_xname, error);
1887 goto bad2;
1888 }
1889 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1890 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1891 if (error) {
1892 printf("%s: unable to map tbls DMA memory, error = %d\n",
1893 sc->sc_c.sc_dev.dv_xname, error);
1894 goto bad2;
1895 }
1896 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1897 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1898 if (error) {
1899 printf("%s: unable to create tbl DMA map, error = %d\n",
1900 sc->sc_c.sc_dev.dv_xname, error);
1901 goto bad1;
1902 }
1903 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1904 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1905 if (error) {
1906 printf("%s: unable to load tbl DMA map, error = %d\n",
1907 sc->sc_c.sc_dev.dv_xname, error);
1908 goto bad0;
1909 }
1910 #ifdef DEBUG
1911 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1912 sc->sc_c.sc_dev.dv_xname,
1913 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1914 #endif
1915 for (i = 0; i < ESIOP_NTPB; i++) {
1916 newtbls[i].tblblk = newtblblk;
1917 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1918 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1919 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1920 newtbls[i].tbl_offset;
1921 for (j = 0; j < ESIOP_NTAG; j++)
1922 newtbls[i].tbl[j] = j;
1923 s = splbio();
1924 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1925 splx(s);
1926 }
1927 s = splbio();
1928 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1929 splx(s);
1930 return;
1931 bad0:
1932 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1933 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1934 bad1:
1935 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1936 bad2:
1937 free(newtbls, M_DEVBUF);
1938 bad3:
1939 free(newtblblk, M_DEVBUF);
1940 return;
1941 }
1942
1943 void
1944 esiop_update_scntl3(sc, _siop_target)
1945 struct esiop_softc *sc;
1946 struct siop_common_target *_siop_target;
1947 {
1948 int slot;
1949 u_int32_t slotid, id;
1950
1951 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1952 esiop_script_write(sc, esiop_target->lun_table_offset,
1953 esiop_target->target_c.id);
1954 id = esiop_target->target_c.id & 0x00ff0000;
1955 /* There may be other commands waiting in the scheduler. handle them */
1956 for (slot = 0; slot < A_ncmd_slots; slot++) {
1957 slotid =
1958 esiop_script_read(sc, sc->sc_shedoffset + slot * 2 + 1);
1959 if ((slotid & 0x00ff0000) == id)
1960 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1961 esiop_target->target_c.id);
1962 }
1963 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1964 }
1965
1966 void
1967 esiop_add_dev(sc, target, lun)
1968 struct esiop_softc *sc;
1969 int target;
1970 int lun;
1971 {
1972 struct esiop_target *esiop_target =
1973 (struct esiop_target *)sc->sc_c.targets[target];
1974 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1975
1976 if (esiop_target->target_c.flags & TARF_TAG) {
1977 /* we need a tag DSA table */
1978 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1979 if (esiop_lun->lun_tagtbl == NULL) {
1980 esiop_moretagtbl(sc);
1981 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1982 if (esiop_lun->lun_tagtbl == NULL) {
1983 /* no resources, run untagged */
1984 esiop_target->target_c.flags &= ~TARF_TAG;
1985 return;
1986 }
1987 }
1988 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1989
1990 }
1991 }
1992
1993 void
1994 esiop_del_dev(sc, target, lun)
1995 struct esiop_softc *sc;
1996 int target;
1997 int lun;
1998 {
1999 struct esiop_target *esiop_target;
2000 #ifdef SIOP_DEBUG
2001 printf("%s:%d:%d: free lun sw entry\n",
2002 sc->sc_c.sc_dev.dv_xname, target, lun);
2003 #endif
2004 if (sc->sc_c.targets[target] == NULL)
2005 return;
2006 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2007 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2008 esiop_target->esiop_lun[lun] = NULL;
2009 }
2010
2011 struct esiop_cmd *
2012 esiop_cmd_find(sc, target, dsa)
2013 struct esiop_softc *sc;
2014 int target;
2015 u_int32_t dsa;
2016 {
2017 int lun, tag;
2018 struct esiop_cmd *cmd;
2019 struct esiop_lun *esiop_lun;
2020 struct esiop_target *esiop_target =
2021 (struct esiop_target *)sc->sc_c.targets[target];
2022
2023 if (esiop_target == NULL)
2024 return NULL;
2025
2026 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2027 esiop_lun = esiop_target->esiop_lun[lun];
2028 if (esiop_lun == NULL)
2029 continue;
2030 cmd = esiop_lun->active;
2031 if (cmd && cmd->cmd_c.dsa == dsa)
2032 return cmd;
2033 if (esiop_target->target_c.flags & TARF_TAG) {
2034 for (tag = 0; tag < ESIOP_NTAG; tag++) {
2035 cmd = esiop_lun->tactive[tag];
2036 if (cmd && cmd->cmd_c.dsa == dsa)
2037 return cmd;
2038 }
2039 }
2040 }
2041 return NULL;
2042 }
2043
2044 void
2045 esiop_target_register(sc, target)
2046 struct esiop_softc *sc;
2047 u_int32_t target;
2048 {
2049 struct esiop_target *esiop_target =
2050 (struct esiop_target *)sc->sc_c.targets[target];
2051
2052 /* get a DSA table for this target */
2053 esiop_target->lun_table_offset = sc->sc_free_offset;
2054 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
2055 #ifdef SIOP_DEBUG
2056 printf("%s: lun table for target %d offset %d free offset %d\n",
2057 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2058 sc->sc_free_offset);
2059 #endif
2060 /* first 32 bytes are ID (for select) */
2061 esiop_script_write(sc, esiop_target->lun_table_offset,
2062 esiop_target->target_c.id);
2063 /* Record this table in the target DSA table */
2064 esiop_script_write(sc,
2065 sc->sc_target_table_offset + target,
2066 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2067 sc->sc_c.sc_scriptaddr);
2068 esiop_script_sync(sc,
2069 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2070 }
2071
2072 #ifdef SIOP_STATS
2073 void
2074 esiop_printstats()
2075 {
2076 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2077 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2078 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2079 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2080 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2081 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2082 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2083 }
2084 #endif
2085