esiop.c revision 1.3 1 /* $NetBSD: esiop.c,v 1.3 2002/04/22 20:47:20 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.3 2002/04/22 20:47:20 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #define DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_update_scntl3 __P((struct esiop_softc *,
98 struct siop_common_target *));
99 struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
100 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
101
102 static int nintr = 0;
103
104 #ifdef SIOP_STATS
105 static int esiop_stat_intr = 0;
106 static int esiop_stat_intr_shortxfer = 0;
107 static int esiop_stat_intr_sdp = 0;
108 static int esiop_stat_intr_done = 0;
109 static int esiop_stat_intr_xferdisc = 0;
110 static int esiop_stat_intr_lunresel = 0;
111 static int esiop_stat_intr_qfull = 0;
112 void esiop_printstats __P((void));
113 #define INCSTAT(x) x++
114 #else
115 #define INCSTAT(x)
116 #endif
117
118 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
119 static __inline__ void
120 esiop_script_sync(sc, ops)
121 struct esiop_softc *sc;
122 int ops;
123 {
124 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
125 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
126 PAGE_SIZE, ops);
127 }
128
129 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
130 static __inline__ u_int32_t
131 esiop_script_read(sc, offset)
132 struct esiop_softc *sc;
133 u_int offset;
134 {
135 if (sc->sc_c.features & SF_CHIP_RAM) {
136 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
137 offset * 4);
138 } else {
139 return le32toh(sc->sc_c.sc_script[offset]);
140 }
141 }
142
143 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
144 u_int32_t));
145 static __inline__ void
146 esiop_script_write(sc, offset, val)
147 struct esiop_softc *sc;
148 u_int offset;
149 u_int32_t val;
150 {
151 if (sc->sc_c.features & SF_CHIP_RAM) {
152 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
153 offset * 4, val);
154 } else {
155 sc->sc_c.sc_script[offset] = htole32(val);
156 }
157 }
158
159 void
160 esiop_attach(sc)
161 struct esiop_softc *sc;
162 {
163 int error, i;
164 bus_dma_segment_t seg;
165 int rseg;
166
167 /*
168 * Allocate DMA-safe memory for the script and map it.
169 */
170 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
171 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
172 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
173 if (error) {
174 printf("%s: unable to allocate script DMA memory, "
175 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
176 return;
177 }
178 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
179 (caddr_t *)&sc->sc_c.sc_script,
180 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
181 if (error) {
182 printf("%s: unable to map script DMA memory, "
183 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
184 return;
185 }
186 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
187 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
188 if (error) {
189 printf("%s: unable to create script DMA map, "
190 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
191 return;
192 }
193 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
194 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
195 if (error) {
196 printf("%s: unable to load script DMA map, "
197 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
198 return;
199 }
200 sc->sc_c.sc_scriptaddr =
201 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
202 sc->sc_c.ram_size = PAGE_SIZE;
203 }
204 TAILQ_INIT(&sc->free_list);
205 TAILQ_INIT(&sc->cmds);
206 TAILQ_INIT(&sc->free_tagtbl);
207 TAILQ_INIT(&sc->tag_tblblk);
208 sc->sc_currschedslot = 0;
209 #ifdef SIOP_DEBUG
210 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
211 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
212 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
213 #endif
214
215 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
216 sc->sc_c.sc_adapt.adapt_nchannels = 1;
217 sc->sc_c.sc_adapt.adapt_openings = 0;
218 sc->sc_c.sc_adapt.adapt_max_periph = 1 /* XXX ESIOP_NTAG - 1 */ ;
219 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
220 sc->sc_c.sc_adapt.adapt_minphys = minphys;
221 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
222
223 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
224 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
225 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
226 sc->sc_c.sc_chan.chan_channel = 0;
227 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
228 sc->sc_c.sc_chan.chan_ntargets =
229 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
230 sc->sc_c.sc_chan.chan_nluns = 8;
231 sc->sc_c.sc_chan.chan_id =
232 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
233 if (sc->sc_c.sc_chan.chan_id == 0 ||
234 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
235 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
236
237 for (i = 0; i < 16; i++)
238 sc->sc_c.targets[i] = NULL;
239
240 /* find min/max sync period for this chip */
241 sc->sc_c.maxsync = 0;
242 sc->sc_c.minsync = 255;
243 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
244 if (sc->sc_c.clock_period != scf_period[i].clock)
245 continue;
246 if (sc->sc_c.maxsync < scf_period[i].period)
247 sc->sc_c.maxsync = scf_period[i].period;
248 if (sc->sc_c.minsync > scf_period[i].period)
249 sc->sc_c.minsync = scf_period[i].period;
250 }
251 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
252 panic("siop: can't find my sync parameters\n");
253 /* Do a bus reset, so that devices fall back to narrow/async */
254 siop_resetbus(&sc->sc_c);
255 /*
256 * siop_reset() will reset the chip, thus clearing pending interrupts
257 */
258 esiop_reset(sc);
259 #ifdef DUMP_SCRIPT
260 esiop_dump_script(sc);
261 #endif
262
263 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
264 }
265
266 void
267 esiop_reset(sc)
268 struct esiop_softc *sc;
269 {
270 int i, j;
271 u_int32_t addr;
272 u_int32_t msgin_addr;
273
274 siop_common_reset(&sc->sc_c);
275
276 /*
277 * we copy the script at the beggining of RAM. Then there is 8 bytes
278 * for messages in.
279 */
280 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
281 msgin_addr =
282 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
283 sc->sc_free_offset += 2;
284 /* then we have the scheduler ring */
285 sc->sc_shedoffset = sc->sc_free_offset;
286 sc->sc_free_offset += A_ncmd_slots * 2;
287 /* then the targets DSA table */
288 sc->sc_target_table_offset = sc->sc_free_offset;
289 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
290 /* copy and patch the script */
291 if (sc->sc_c.features & SF_CHIP_RAM) {
292 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
293 esiop_script,
294 sizeof(esiop_script) / sizeof(esiop_script[0]));
295 for (j = 0; j <
296 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
297 j++) {
298 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
299 E_tlq_offset_Used[j] * 4,
300 sizeof(struct siop_common_xfer));
301 }
302 for (j = 0; j <
303 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
304 j++) {
305 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
306 E_abs_msgin2_Used[j] * 4, msgin_addr);
307 }
308
309 #ifdef SIOP_SYMLED
310 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
311 Ent_led_on1, esiop_led_on,
312 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
313 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
314 Ent_led_on2, esiop_led_on,
315 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
316 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
317 Ent_led_off, esiop_led_off,
318 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
319 #endif
320 } else {
321 for (j = 0;
322 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
323 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
324 }
325 for (j = 0; j <
326 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
327 j++) {
328 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
329 htole32(sizeof(struct siop_common_xfer));
330 }
331 for (j = 0; j <
332 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
333 j++) {
334 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
335 htole32(msgin_addr);
336 }
337
338 #ifdef SIOP_SYMLED
339 for (j = 0;
340 j < (sizeof(esiop_led_on) / sizeof(esiop_led_on[0])); j++)
341 sc->sc_c.sc_script[
342 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
343 ] = htole32(esiop_led_on[j]);
344 for (j = 0;
345 j < (sizeof(esiop_led_on) / sizeof(esiop_led_on[0])); j++)
346 sc->sc_c.sc_script[
347 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
348 ] = htole32(esiop_led_on[j]);
349 for (j = 0;
350 j < (sizeof(esiop_led_off) / sizeof(esiop_led_off[0])); j++)
351 sc->sc_c.sc_script[
352 Ent_led_off / sizeof(esiop_led_off[0]) + j
353 ] = htole32(esiop_led_off[j]);
354 #endif
355 }
356 /* get base of scheduler ring */
357 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
358 /* init scheduler */
359 for (i = 0; i < A_ncmd_slots; i++) {
360 esiop_script_write(sc, sc->sc_shedoffset + i * 2, A_f_cmd_free);
361 esiop_script_write(sc, sc->sc_shedoffset + i * 2 + 1, 0);
362 }
363 sc->sc_currschedslot = 0;
364 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
365 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
366 /*
367 * 0x78000000 is a 'move data8 to reg'. data8 is the second
368 * octet, reg offset is the third.
369 */
370 esiop_script_write(sc, Ent_cmdr0 / 4,
371 0x78640000 | ((addr & 0x000000ff) << 8));
372 esiop_script_write(sc, Ent_cmdr1 / 4,
373 0x78650000 | ((addr & 0x0000ff00) ));
374 esiop_script_write(sc, Ent_cmdr2 / 4,
375 0x78660000 | ((addr & 0x00ff0000) >> 8));
376 esiop_script_write(sc, Ent_cmdr3 / 4,
377 0x78670000 | ((addr & 0xff000000) >> 16));
378 /* set flags */
379 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
380 /* write pointer of base of target DSA table */
381 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
382 sc->sc_c.sc_scriptaddr;
383 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
384 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
385 ((addr & 0x000000ff) << 8));
386 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
387 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
388 ((addr & 0x0000ff00) ));
389 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
390 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
391 ((addr & 0x00ff0000) >> 8));
392 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
393 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
394 ((addr & 0xff000000) >> 16));
395 #ifdef SIOP_DEBUG
396 printf("%s: target table offset %d free offset %d\n",
397 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
398 sc->sc_free_offset);
399 #endif
400
401 /* register existing targets */
402 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
403 if (sc->sc_c.targets[i])
404 esiop_target_register(sc, i);
405 }
406 /* start script */
407 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
408 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
409 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
410 }
411 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
412 sc->sc_c.sc_scriptaddr + Ent_reselect);
413 }
414
415 #if 0
416 #define CALL_SCRIPT(ent) do {\
417 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
418 esiop_cmd->cmd_c.dsa, \
419 sc->sc_c.sc_scriptaddr + ent); \
420 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
421 } while (0)
422 #else
423 #define CALL_SCRIPT(ent) do {\
424 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
425 } while (0)
426 #endif
427
428 int
429 esiop_intr(v)
430 void *v;
431 {
432 struct esiop_softc *sc = v;
433 struct esiop_target *esiop_target;
434 struct esiop_cmd *esiop_cmd;
435 struct esiop_lun *esiop_lun;
436 struct scsipi_xfer *xs;
437 int istat, sist, sstat1, dstat;
438 u_int32_t irqcode;
439 int need_reset = 0;
440 int offset, target, lun, tag;
441 u_int32_t tflags;
442 u_int32_t addr;
443 int freetarget = 0;
444 int restart = 0;
445 int slot;
446 int retval = 0;
447
448 again:
449 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
450 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
451 if (istat & ISTAT_SEM) {
452 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
453 SIOP_ISTAT, (istat & ~ISTAT_SEM));
454 esiop_checkdone(sc);
455 }
456 return retval;
457 }
458 retval = 1;
459 nintr++;
460 if (nintr > 100) {
461 panic("esiop: intr loop");
462 }
463 INCSTAT(esiop_stat_intr);
464 if (istat & ISTAT_INTF) {
465 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
466 SIOP_ISTAT, ISTAT_INTF);
467 esiop_checkdone(sc);
468 goto again;
469 }
470 /* get CMD from T/L/Q */
471 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
472 SIOP_SCRATCHC);
473 #ifdef SIOP_DEBUG_INTR
474 printf("interrupt, istat=0x%x tflags=0x%x "
475 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
476 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
477 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
478 SIOP_DSP) -
479 sc->sc_c.sc_scriptaddr));
480 #endif
481 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
482 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
483 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
484 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
485 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
486
487 if (target >= 0 && lun >= 0) {
488 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
489 if (esiop_target == NULL) {
490 printf("esiop_target (target %d) not valid\n", target);
491 goto none;
492 }
493 esiop_lun = esiop_target->esiop_lun[lun];
494 if (esiop_lun == NULL) {
495 printf("esiop_lun (target %d lun %d) not valid\n",
496 target, lun);
497 goto none;
498 }
499 esiop_cmd =
500 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
501 if (esiop_cmd == NULL) {
502 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
503 target, lun, tag);
504 goto none;
505 }
506 xs = esiop_cmd->cmd_c.xs;
507 #ifdef DIAGNOSTIC
508 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
509 printf("esiop_cmd (target %d lun %d) "
510 "not active (%d)\n", target, lun,
511 esiop_cmd->cmd_c.status);
512 goto none;
513 }
514 #endif
515 esiop_table_sync(esiop_cmd,
516 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
517 } else {
518 none:
519 xs = NULL;
520 esiop_target = NULL;
521 esiop_lun = NULL;
522 esiop_cmd = NULL;
523 }
524 if (istat & ISTAT_DIP) {
525 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
526 SIOP_DSTAT);
527 if (dstat & DSTAT_SSI) {
528 printf("single step dsp 0x%08x dsa 0x08%x\n",
529 (int)(bus_space_read_4(sc->sc_c.sc_rt,
530 sc->sc_c.sc_rh, SIOP_DSP) -
531 sc->sc_c.sc_scriptaddr),
532 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
533 SIOP_DSA));
534 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
535 (istat & ISTAT_SIP) == 0) {
536 bus_space_write_1(sc->sc_c.sc_rt,
537 sc->sc_c.sc_rh, SIOP_DCNTL,
538 bus_space_read_1(sc->sc_c.sc_rt,
539 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
540 }
541 return 1;
542 }
543 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
544 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
545 if (dstat & DSTAT_IID)
546 printf(" Illegal instruction");
547 if (dstat & DSTAT_ABRT)
548 printf(" abort");
549 if (dstat & DSTAT_BF)
550 printf(" bus fault");
551 if (dstat & DSTAT_MDPE)
552 printf(" parity");
553 if (dstat & DSTAT_DFE)
554 printf(" dma fifo empty");
555 printf(", DSP=0x%x DSA=0x%x: ",
556 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
557 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
558 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
559 if (esiop_cmd)
560 printf("last msg_in=0x%x status=0x%x\n",
561 esiop_cmd->cmd_tables->msg_in[0],
562 le32toh(esiop_cmd->cmd_tables->status));
563 else
564 printf(" current T/L/Q invalid\n");
565 need_reset = 1;
566 }
567 }
568 if (istat & ISTAT_SIP) {
569 if (istat & ISTAT_DIP)
570 delay(10);
571 /*
572 * Can't read sist0 & sist1 independantly, or we have to
573 * insert delay
574 */
575 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
576 SIOP_SIST0);
577 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
578 SIOP_SSTAT1);
579 #ifdef SIOP_DEBUG_INTR
580 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
581 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
582 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
583 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
584 SIOP_DSP) -
585 sc->sc_c.sc_scriptaddr));
586 #endif
587 if (sist & SIST0_RST) {
588 esiop_handle_reset(sc);
589 /* no table to flush here */
590 return 1;
591 }
592 if (sist & SIST0_SGE) {
593 if (esiop_cmd)
594 scsipi_printaddr(xs->xs_periph);
595 else
596 printf("%s:", sc->sc_c.sc_dev.dv_xname);
597 printf("scsi gross error\n");
598 goto reset;
599 }
600 if ((sist & SIST0_MA) && need_reset == 0) {
601 if (esiop_cmd) {
602 int scratchc0;
603 dstat = bus_space_read_1(sc->sc_c.sc_rt,
604 sc->sc_c.sc_rh, SIOP_DSTAT);
605 /*
606 * first restore DSA, in case we were in a S/G
607 * operation.
608 */
609 bus_space_write_4(sc->sc_c.sc_rt,
610 sc->sc_c.sc_rh,
611 SIOP_DSA, esiop_cmd->cmd_c.dsa);
612 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
613 sc->sc_c.sc_rh, SIOP_SCRATCHC);
614 switch (sstat1 & SSTAT1_PHASE_MASK) {
615 case SSTAT1_PHASE_STATUS:
616 /*
617 * previous phase may be aborted for any reason
618 * ( for example, the target has less data to
619 * transfer than requested). Just go to status
620 * and the command should terminate.
621 */
622 INCSTAT(esiop_stat_intr_shortxfer);
623 if ((dstat & DSTAT_DFE) == 0)
624 siop_clearfifo(&sc->sc_c);
625 /* no table to flush here */
626 CALL_SCRIPT(Ent_status);
627 return 1;
628 case SSTAT1_PHASE_MSGIN:
629 /*
630 * target may be ready to disconnect
631 * Save data pointers just in case.
632 */
633 INCSTAT(esiop_stat_intr_xferdisc);
634 if (scratchc0 & A_f_c_data)
635 siop_sdp(&esiop_cmd->cmd_c);
636 else if ((dstat & DSTAT_DFE) == 0)
637 siop_clearfifo(&sc->sc_c);
638 bus_space_write_1(sc->sc_c.sc_rt,
639 sc->sc_c.sc_rh, SIOP_SCRATCHC,
640 scratchc0 & ~A_f_c_data);
641 esiop_table_sync(esiop_cmd,
642 BUS_DMASYNC_PREREAD |
643 BUS_DMASYNC_PREWRITE);
644 CALL_SCRIPT(Ent_msgin);
645 return 1;
646 }
647 printf("%s: unexpected phase mismatch %d\n",
648 sc->sc_c.sc_dev.dv_xname,
649 sstat1 & SSTAT1_PHASE_MASK);
650 } else {
651 printf("%s: phase mismatch without command\n",
652 sc->sc_c.sc_dev.dv_xname);
653 }
654 need_reset = 1;
655 }
656 if (sist & SIST0_PAR) {
657 /* parity error, reset */
658 if (esiop_cmd)
659 scsipi_printaddr(xs->xs_periph);
660 else
661 printf("%s:", sc->sc_c.sc_dev.dv_xname);
662 printf("parity error\n");
663 goto reset;
664 }
665 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
666 /* selection time out, assume there's no device here */
667 /*
668 * SCRATCHC has not been loaded yet, we have to find
669 * params by ourselve. scratchE0 should point to
670 * the slot.
671 */
672 slot = bus_space_read_1(sc->sc_c.sc_rt,
673 sc->sc_c.sc_rh, SIOP_SCRATCHE);
674 esiop_script_sync(sc,
675 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
676 target = esiop_script_read(sc,
677 sc->sc_shedoffset + slot * 2 + 1) & 0x00ff0000;
678 target = (target >> 16) & 0xff;
679 esiop_cmd = esiop_cmd_find(sc, target,
680 esiop_script_read(sc,
681 sc->sc_shedoffset + slot * 2) & ~0x3);
682 /*
683 * mark this slot as free, and advance to next slot
684 */
685 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
686 A_f_cmd_free);
687 addr = bus_space_read_4(sc->sc_c.sc_rt,
688 sc->sc_c.sc_rh, SIOP_SCRATCHD);
689 if (slot < (A_ncmd_slots - 1)) {
690 bus_space_write_1(sc->sc_c.sc_rt,
691 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
692 addr = addr + 8;
693 } else {
694 bus_space_write_1(sc->sc_c.sc_rt,
695 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
696 addr = sc->sc_c.sc_scriptaddr +
697 sc->sc_shedoffset * sizeof(u_int32_t);
698 }
699 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
700 SIOP_SCRATCHD, addr);
701 esiop_script_sync(sc,
702 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
703 if (esiop_cmd) {
704 xs = esiop_cmd->cmd_c.xs;
705 esiop_target = (struct esiop_target *)
706 esiop_cmd->cmd_c.siop_target;
707 lun = xs->xs_periph->periph_lun;
708 tag = esiop_cmd->cmd_c.tag;
709 esiop_lun = esiop_target->esiop_lun[lun];
710 esiop_cmd->cmd_c.status = CMDST_DONE;
711 xs->error = XS_SELTIMEOUT;
712 freetarget = 1;
713 goto end;
714 } else {
715 printf("%s: selection timeout without "
716 "command, target %d (sdid 0x%x), "
717 "slot %d\n",
718 sc->sc_c.sc_dev.dv_xname, target,
719 bus_space_read_1(sc->sc_c.sc_rt,
720 sc->sc_c.sc_rh, SIOP_SDID), slot);
721 need_reset = 1;
722 }
723 }
724 if (sist & SIST0_UDC) {
725 /*
726 * unexpected disconnect. Usually the target signals
727 * a fatal condition this way. Attempt to get sense.
728 */
729 if (esiop_cmd) {
730 esiop_cmd->cmd_tables->status =
731 htole32(SCSI_CHECK);
732 goto end;
733 }
734 printf("%s: unexpected disconnect without "
735 "command\n", sc->sc_c.sc_dev.dv_xname);
736 goto reset;
737 }
738 if (sist & (SIST1_SBMC << 8)) {
739 /* SCSI bus mode change */
740 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
741 goto reset;
742 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
743 /*
744 * we have a script interrupt, it will
745 * restart the script.
746 */
747 goto scintr;
748 }
749 /*
750 * else we have to restart it ourselve, at the
751 * interrupted instruction.
752 */
753 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
754 SIOP_DSP,
755 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
756 SIOP_DSP) - 8);
757 return 1;
758 }
759 /* Else it's an unhandled exeption (for now). */
760 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
761 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
762 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
763 SIOP_SSTAT1),
764 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
765 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
766 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
767 if (esiop_cmd) {
768 esiop_cmd->cmd_c.status = CMDST_DONE;
769 xs->error = XS_SELTIMEOUT;
770 goto end;
771 }
772 need_reset = 1;
773 }
774 if (need_reset) {
775 reset:
776 /* fatal error, reset the bus */
777 siop_resetbus(&sc->sc_c);
778 /* no table to flush here */
779 return 1;
780 }
781
782 scintr:
783 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
784 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
785 SIOP_DSPS);
786 #ifdef SIOP_DEBUG_INTR
787 printf("script interrupt 0x%x\n", irqcode);
788 #endif
789 /*
790 * no command, or an inactive command is only valid for a
791 * reselect interrupt
792 */
793 if ((irqcode & 0x80) == 0) {
794 if (esiop_cmd == NULL) {
795 printf(
796 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
797 sc->sc_c.sc_dev.dv_xname, irqcode);
798 goto reset;
799 }
800 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
801 printf("%s: command with invalid status "
802 "(IRQ code 0x%x current status %d) !\n",
803 sc->sc_c.sc_dev.dv_xname,
804 irqcode, esiop_cmd->cmd_c.status);
805 xs = NULL;
806 }
807 }
808 switch(irqcode) {
809 case A_int_err:
810 printf("error, DSP=0x%x\n",
811 (int)(bus_space_read_4(sc->sc_c.sc_rt,
812 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
813 if (xs) {
814 xs->error = XS_SELTIMEOUT;
815 goto end;
816 } else {
817 goto reset;
818 }
819 case A_int_msgin:
820 {
821 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
822 sc->sc_c.sc_rh, SIOP_SFBR);
823 if (msgin == MSG_MESSAGE_REJECT) {
824 int msg, extmsg;
825 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
826 /*
827 * message was part of a identify +
828 * something else. Identify shoudl't
829 * have been rejected.
830 */
831 msg =
832 esiop_cmd->cmd_tables->msg_out[1];
833 extmsg =
834 esiop_cmd->cmd_tables->msg_out[3];
835 } else {
836 msg =
837 esiop_cmd->cmd_tables->msg_out[0];
838 extmsg =
839 esiop_cmd->cmd_tables->msg_out[2];
840 }
841 if (msg == MSG_MESSAGE_REJECT) {
842 /* MSG_REJECT for a MSG_REJECT !*/
843 if (xs)
844 scsipi_printaddr(xs->xs_periph);
845 else
846 printf("%s: ",
847 sc->sc_c.sc_dev.dv_xname);
848 printf("our reject message was "
849 "rejected\n");
850 goto reset;
851 }
852 if (msg == MSG_EXTENDED &&
853 extmsg == MSG_EXT_WDTR) {
854 /* WDTR rejected, initiate sync */
855 if ((esiop_target->target_c.flags &
856 TARF_SYNC) == 0) {
857 esiop_target->target_c.status =
858 TARST_OK;
859 siop_update_xfer_mode(&sc->sc_c,
860 target);
861 /* no table to flush here */
862 CALL_SCRIPT(Ent_msgin_ack);
863 return 1;
864 }
865 esiop_target->target_c.status =
866 TARST_SYNC_NEG;
867 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
868 sc->sc_c.minsync, sc->sc_c.maxoff);
869 esiop_table_sync(esiop_cmd,
870 BUS_DMASYNC_PREREAD |
871 BUS_DMASYNC_PREWRITE);
872 CALL_SCRIPT(Ent_send_msgout);
873 return 1;
874 } else if (msg == MSG_EXTENDED &&
875 extmsg == MSG_EXT_SDTR) {
876 /* sync rejected */
877 esiop_target->target_c.offset = 0;
878 esiop_target->target_c.period = 0;
879 esiop_target->target_c.status =
880 TARST_OK;
881 siop_update_xfer_mode(&sc->sc_c,
882 target);
883 /* no table to flush here */
884 CALL_SCRIPT(Ent_msgin_ack);
885 return 1;
886 } else if (msg == MSG_SIMPLE_Q_TAG ||
887 msg == MSG_HEAD_OF_Q_TAG ||
888 msg == MSG_ORDERED_Q_TAG) {
889 if (esiop_handle_qtag_reject(
890 esiop_cmd) == -1)
891 goto reset;
892 CALL_SCRIPT(Ent_msgin_ack);
893 return 1;
894 }
895 if (xs)
896 scsipi_printaddr(xs->xs_periph);
897 else
898 printf("%s: ",
899 sc->sc_c.sc_dev.dv_xname);
900 if (msg == MSG_EXTENDED) {
901 printf("scsi message reject, extended "
902 "message sent was 0x%x\n", extmsg);
903 } else {
904 printf("scsi message reject, message "
905 "sent was 0x%x\n", msg);
906 }
907 /* no table to flush here */
908 CALL_SCRIPT(Ent_msgin_ack);
909 return 1;
910 }
911 if (xs)
912 scsipi_printaddr(xs->xs_periph);
913 else
914 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
915 printf("unhandled message 0x%x\n",
916 esiop_cmd->cmd_tables->msg_in[0]);
917 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
918 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
919 esiop_table_sync(esiop_cmd,
920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
921 CALL_SCRIPT(Ent_send_msgout);
922 return 1;
923 }
924 case A_int_extmsgin:
925 #ifdef SIOP_DEBUG_INTR
926 printf("extended message: msg 0x%x len %d\n",
927 esiop_cmd->cmd_tables->msg_in[2],
928 esiop_cmd->cmd_tables->msg_in[1]);
929 #endif
930 if (esiop_cmd->cmd_tables->msg_in[1] > 6)
931 printf("%s: extended message too big (%d)\n",
932 sc->sc_c.sc_dev.dv_xname,
933 esiop_cmd->cmd_tables->msg_in[1]);
934 esiop_cmd->cmd_tables->t_extmsgdata.count =
935 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
936 esiop_table_sync(esiop_cmd,
937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 CALL_SCRIPT(Ent_get_extmsgdata);
939 return 1;
940 case A_int_extmsgdata:
941 #ifdef SIOP_DEBUG_INTR
942 {
943 int i;
944 printf("extended message: 0x%x, data:",
945 esiop_cmd->cmd_tables->msg_in[2]);
946 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
947 i++)
948 printf(" 0x%x",
949 esiop_cmd->cmd_tables->msg_in[i]);
950 printf("\n");
951 }
952 #endif
953 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
954 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
955 case SIOP_NEG_MSGOUT:
956 esiop_update_scntl3(sc,
957 esiop_cmd->cmd_c.siop_target);
958 esiop_table_sync(esiop_cmd,
959 BUS_DMASYNC_PREREAD |
960 BUS_DMASYNC_PREWRITE);
961 CALL_SCRIPT(Ent_send_msgout);
962 return(1);
963 case SIOP_NEG_ACK:
964 esiop_update_scntl3(sc,
965 esiop_cmd->cmd_c.siop_target);
966 CALL_SCRIPT(Ent_msgin_ack);
967 return(1);
968 default:
969 panic("invalid retval from "
970 "siop_wdtr_neg()");
971 }
972 return(1);
973 }
974 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
975 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
976 case SIOP_NEG_MSGOUT:
977 esiop_update_scntl3(sc,
978 esiop_cmd->cmd_c.siop_target);
979 esiop_table_sync(esiop_cmd,
980 BUS_DMASYNC_PREREAD |
981 BUS_DMASYNC_PREWRITE);
982 CALL_SCRIPT(Ent_send_msgout);
983 return(1);
984 case SIOP_NEG_ACK:
985 esiop_update_scntl3(sc,
986 esiop_cmd->cmd_c.siop_target);
987 CALL_SCRIPT(Ent_msgin_ack);
988 return(1);
989 default:
990 panic("invalid retval from "
991 "siop_wdtr_neg()");
992 }
993 return(1);
994 }
995 /* send a message reject */
996 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
997 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
998 esiop_table_sync(esiop_cmd,
999 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1000 CALL_SCRIPT(Ent_send_msgout);
1001 return 1;
1002 case A_int_disc:
1003 INCSTAT(esiop_stat_intr_sdp);
1004 offset = bus_space_read_1(sc->sc_c.sc_rt,
1005 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1006 #ifdef SIOP_DEBUG_DR
1007 printf("disconnect offset %d\n", offset);
1008 #endif
1009 if (offset > SIOP_NSG) {
1010 printf("%s: bad offset for disconnect (%d)\n",
1011 sc->sc_c.sc_dev.dv_xname, offset);
1012 goto reset;
1013 }
1014 /*
1015 * offset == SIOP_NSG may be a valid condition if
1016 * we get a sdp when the xfer is done.
1017 * Don't call memmove in this case.
1018 */
1019 if (offset < SIOP_NSG) {
1020 memmove(&esiop_cmd->cmd_tables->data[0],
1021 &esiop_cmd->cmd_tables->data[offset],
1022 (SIOP_NSG - offset) * sizeof(scr_table_t));
1023 esiop_table_sync(esiop_cmd,
1024 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025 }
1026 CALL_SCRIPT(Ent_script_sched);
1027 return 1;
1028 case A_int_resfail:
1029 printf("reselect failed\n");
1030 CALL_SCRIPT(Ent_script_sched);
1031 return 1;
1032 case A_int_done:
1033 if (xs == NULL) {
1034 printf("%s: done without command\n",
1035 sc->sc_c.sc_dev.dv_xname);
1036 CALL_SCRIPT(Ent_script_sched);
1037 return 1;
1038 }
1039 #ifdef SIOP_DEBUG_INTR
1040 printf("done, DSA=0x%lx target id 0x%x last msg "
1041 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1042 le32toh(esiop_cmd->cmd_tables->id),
1043 esiop_cmd->cmd_tables->msg_in[0],
1044 le32toh(esiop_cmd->cmd_tables->status));
1045 #endif
1046 INCSTAT(esiop_stat_intr_done);
1047 esiop_cmd->cmd_c.status = CMDST_DONE;
1048 goto end;
1049 default:
1050 printf("unknown irqcode %x\n", irqcode);
1051 if (xs) {
1052 xs->error = XS_SELTIMEOUT;
1053 goto end;
1054 }
1055 goto reset;
1056 }
1057 return 1;
1058 }
1059 /* We just should't get there */
1060 panic("siop_intr: I shouldn't be there !");
1061
1062 end:
1063 /*
1064 * restart the script now if command completed properly
1065 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1066 * queue
1067 */
1068 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1069 #ifdef SIOP_DEBUG_INTR
1070 printf("esiop_intr end: status %d\n", xs->status);
1071 #endif
1072 if (xs->status == SCSI_OK)
1073 CALL_SCRIPT(Ent_script_sched);
1074 else
1075 restart = 1;
1076 if (tag >= 0)
1077 esiop_lun->tactive[tag] = NULL;
1078 else
1079 esiop_lun->active = NULL;
1080 esiop_scsicmd_end(esiop_cmd);
1081 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1082 esiop_del_dev(sc, target, lun);
1083 if (restart)
1084 CALL_SCRIPT(Ent_script_sched);
1085 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1086 /* a command terminated, so we have free slots now */
1087 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1088 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1089 }
1090
1091 return retval;
1092 }
1093
1094 void
1095 esiop_scsicmd_end(esiop_cmd)
1096 struct esiop_cmd *esiop_cmd;
1097 {
1098 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1099 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1100
1101 switch(xs->status) {
1102 case SCSI_OK:
1103 xs->error = XS_NOERROR;
1104 break;
1105 case SCSI_BUSY:
1106 xs->error = XS_BUSY;
1107 break;
1108 case SCSI_CHECK:
1109 xs->error = XS_BUSY;
1110 /* remove commands in the queue and scheduler */
1111 esiop_unqueue(sc, xs->xs_periph->periph_target,
1112 xs->xs_periph->periph_lun);
1113 break;
1114 case SCSI_QUEUE_FULL:
1115 INCSTAT(esiop_stat_intr_qfull);
1116 #ifdef SIOP_DEBUG
1117 printf("%s:%d:%d: queue full (tag %d)\n",
1118 sc->sc_c.sc_dev.dv_xname,
1119 xs->xs_periph->periph_target,
1120 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1121 #endif
1122 xs->error = XS_BUSY;
1123 break;
1124 case SCSI_SIOP_NOCHECK:
1125 /*
1126 * don't check status, xs->error is already valid
1127 */
1128 break;
1129 case SCSI_SIOP_NOSTATUS:
1130 /*
1131 * the status byte was not updated, cmd was
1132 * aborted
1133 */
1134 xs->error = XS_SELTIMEOUT;
1135 break;
1136 default:
1137 xs->error = XS_DRIVER_STUFFUP;
1138 }
1139 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1140 bus_dmamap_sync(sc->sc_c.sc_dmat,
1141 esiop_cmd->cmd_c.dmamap_data, 0,
1142 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1143 (xs->xs_control & XS_CTL_DATA_IN) ?
1144 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1145 bus_dmamap_unload(sc->sc_c.sc_dmat,
1146 esiop_cmd->cmd_c.dmamap_data);
1147 }
1148 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1149 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1150 esiop_cmd->cmd_c.status = CMDST_FREE;
1151 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1152 xs->resid = 0;
1153 scsipi_done (xs);
1154 }
1155
1156 void
1157 esiop_checkdone(sc)
1158 struct esiop_softc *sc;
1159 {
1160 int target, lun, tag;
1161 struct esiop_target *esiop_target;
1162 struct esiop_lun *esiop_lun;
1163 struct esiop_cmd *esiop_cmd;
1164 int status;
1165
1166 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1167 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1168 if (esiop_target == NULL)
1169 continue;
1170 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1171 esiop_lun = esiop_target->esiop_lun[lun];
1172 if (esiop_lun == NULL)
1173 continue;
1174 esiop_cmd = esiop_lun->active;
1175 if (esiop_cmd) {
1176 esiop_table_sync(esiop_cmd,
1177 BUS_DMASYNC_POSTREAD |
1178 BUS_DMASYNC_POSTWRITE);
1179 status = le32toh(esiop_cmd->cmd_tables->status);
1180 if (status == SCSI_OK) {
1181 /* Ok, this command has been handled */
1182 esiop_cmd->cmd_c.xs->status = status;
1183 esiop_lun->active = NULL;
1184 esiop_scsicmd_end(esiop_cmd);
1185 }
1186 }
1187 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1188 esiop_cmd = esiop_lun->tactive[tag];
1189 if (esiop_cmd == NULL)
1190 continue;
1191 esiop_table_sync(esiop_cmd,
1192 BUS_DMASYNC_POSTREAD |
1193 BUS_DMASYNC_POSTWRITE);
1194 status = le32toh(esiop_cmd->cmd_tables->status);
1195 if (status == SCSI_OK) {
1196 /* Ok, this command has been handled */
1197 esiop_cmd->cmd_c.xs->status = status;
1198 esiop_lun->tactive[tag] = NULL;
1199 esiop_scsicmd_end(esiop_cmd);
1200 }
1201 }
1202 }
1203 }
1204 }
1205
1206 void
1207 esiop_unqueue(sc, target, lun)
1208 struct esiop_softc *sc;
1209 int target;
1210 int lun;
1211 {
1212 int slot, tag;
1213 u_int32_t slotdsa;
1214 struct esiop_cmd *esiop_cmd;
1215 struct esiop_lun *esiop_lun =
1216 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1217
1218 /* first make sure to read valid data */
1219 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1220
1221 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1222 /* look for commands in the scheduler, not yet started */
1223 if (esiop_lun->tactive[tag] == NULL)
1224 continue;
1225 esiop_cmd = esiop_lun->tactive[tag];
1226 for (slot = 0; slot < A_ncmd_slots; slot++) {
1227 slotdsa = esiop_script_read(sc,
1228 sc->sc_shedoffset + slot * 2);
1229 if (slotdsa & A_f_cmd_free)
1230 continue;
1231 if ((slotdsa & ~A_f_cmd_free) == esiop_cmd->cmd_c.dsa)
1232 break;
1233 }
1234 if (slot > ESIOP_NTAG)
1235 continue; /* didn't find it */
1236 /* Mark this slot as ignore */
1237 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1238 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1239 /* ask to requeue */
1240 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1241 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1242 esiop_lun->tactive[tag] = NULL;
1243 esiop_scsicmd_end(esiop_cmd);
1244 }
1245 }
1246
1247 /*
1248 * handle a rejected queue tag message: the command will run untagged,
1249 * has to adjust the reselect script.
1250 */
1251
1252
1253 int
1254 esiop_handle_qtag_reject(esiop_cmd)
1255 struct esiop_cmd *esiop_cmd;
1256 {
1257 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1258 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1259 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1260 int tag = esiop_cmd->cmd_tables->msg_out[2];
1261 struct esiop_target *esiop_target =
1262 (struct esiop_target*)sc->sc_c.targets[target];
1263 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1264
1265 #ifdef SIOP_DEBUG
1266 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1267 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1268 esiop_cmd->cmd_c.status);
1269 #endif
1270
1271 if (esiop_lun->active != NULL) {
1272 printf("%s: untagged command already running for target %d "
1273 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1274 target, lun, esiop_lun->active->cmd_c.status);
1275 return -1;
1276 }
1277 /* clear tag slot */
1278 esiop_lun->tactive[tag] = NULL;
1279 /* add command to non-tagged slot */
1280 esiop_lun->active = esiop_cmd;
1281 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1282 esiop_cmd->cmd_c.tag = -1;
1283 /* update DSA table */
1284 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1285 esiop_cmd->cmd_c.dsa);
1286 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1287 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1288 return 0;
1289 }
1290
1291 /*
1292 * handle a bus reset: reset chip, unqueue all active commands, free all
1293 * target struct and report loosage to upper layer.
1294 * As the upper layer may requeue immediatly we have to first store
1295 * all active commands in a temporary queue.
1296 */
1297 void
1298 esiop_handle_reset(sc)
1299 struct esiop_softc *sc;
1300 {
1301 struct esiop_cmd *esiop_cmd;
1302 struct esiop_lun *esiop_lun;
1303 int target, lun, tag;
1304 /*
1305 * scsi bus reset. reset the chip and restart
1306 * the queue. Need to clean up all active commands
1307 */
1308 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1309 /* stop, reset and restart the chip */
1310 esiop_reset(sc);
1311 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1312 /* chip has been reset, all slots are free now */
1313 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1314 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1315 }
1316 /*
1317 * Process all commands: first commmands completes, then commands
1318 * being executed
1319 */
1320 esiop_checkdone(sc);
1321 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1322 target++) {
1323 struct esiop_target *esiop_target =
1324 (struct esiop_target *)sc->sc_c.targets[target];
1325 if (esiop_target == NULL)
1326 continue;
1327 for (lun = 0; lun < 8; lun++) {
1328 esiop_lun = esiop_target->esiop_lun[lun];
1329 if (esiop_lun == NULL)
1330 continue;
1331 for (tag = -1; tag <
1332 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1333 ESIOP_NTAG : 0);
1334 tag++) {
1335 if (tag >= 0)
1336 esiop_cmd = esiop_lun->tactive[tag];
1337 else
1338 esiop_cmd = esiop_lun->active;
1339 if (esiop_cmd == NULL)
1340 continue;
1341 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1342 printf("command with tag id %d reset\n", tag);
1343 esiop_cmd->cmd_c.xs->error =
1344 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1345 XS_TIMEOUT : XS_RESET;
1346 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1347 if (tag >= 0)
1348 esiop_lun->tactive[tag] = NULL;
1349 else
1350 esiop_lun->active = NULL;
1351 esiop_cmd->cmd_c.status = CMDST_DONE;
1352 esiop_scsicmd_end(esiop_cmd);
1353 }
1354 }
1355 sc->sc_c.targets[target]->status = TARST_ASYNC;
1356 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1357 sc->sc_c.targets[target]->period =
1358 sc->sc_c.targets[target]->offset = 0;
1359 siop_update_xfer_mode(&sc->sc_c, target);
1360 }
1361
1362 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1363 }
1364
1365 void
1366 esiop_scsipi_request(chan, req, arg)
1367 struct scsipi_channel *chan;
1368 scsipi_adapter_req_t req;
1369 void *arg;
1370 {
1371 struct scsipi_xfer *xs;
1372 struct scsipi_periph *periph;
1373 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1374 struct esiop_cmd *esiop_cmd;
1375 struct esiop_target *esiop_target;
1376 int s, error, i;
1377 int target;
1378 int lun;
1379
1380 switch (req) {
1381 case ADAPTER_REQ_RUN_XFER:
1382 xs = arg;
1383 periph = xs->xs_periph;
1384 target = periph->periph_target;
1385 lun = periph->periph_lun;
1386
1387 s = splbio();
1388 #ifdef SIOP_DEBUG_SCHED
1389 printf("starting cmd for %d:%d\n", target, lun);
1390 #endif
1391 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1392 if (esiop_cmd == NULL) {
1393 xs->error = XS_RESOURCE_SHORTAGE;
1394 scsipi_done(xs);
1395 splx(s);
1396 return;
1397 }
1398 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1399 #ifdef DIAGNOSTIC
1400 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1401 panic("siop_scsicmd: new cmd not free");
1402 #endif
1403 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1404 if (esiop_target == NULL) {
1405 #ifdef SIOP_DEBUG
1406 printf("%s: alloc siop_target for target %d\n",
1407 sc->sc_c.sc_dev.dv_xname, target);
1408 #endif
1409 sc->sc_c.targets[target] =
1410 malloc(sizeof(struct esiop_target),
1411 M_DEVBUF, M_NOWAIT | M_ZERO);
1412 if (sc->sc_c.targets[target] == NULL) {
1413 printf("%s: can't malloc memory for "
1414 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1415 target);
1416 xs->error = XS_RESOURCE_SHORTAGE;
1417 scsipi_done(xs);
1418 splx(s);
1419 return;
1420 }
1421 esiop_target =
1422 (struct esiop_target*)sc->sc_c.targets[target];
1423 esiop_target->target_c.status = TARST_PROBING;
1424 esiop_target->target_c.flags = 0;
1425 esiop_target->target_c.id =
1426 sc->sc_c.clock_div << 24; /* scntl3 */
1427 esiop_target->target_c.id |= target << 16; /* id */
1428 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1429
1430 for (i=0; i < 8; i++)
1431 esiop_target->esiop_lun[i] = NULL;
1432 esiop_target_register(sc, target);
1433 }
1434 if (esiop_target->esiop_lun[lun] == NULL) {
1435 esiop_target->esiop_lun[lun] =
1436 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1437 M_NOWAIT|M_ZERO);
1438 if (esiop_target->esiop_lun[lun] == NULL) {
1439 printf("%s: can't alloc esiop_lun for "
1440 "target %d lun %d\n",
1441 sc->sc_c.sc_dev.dv_xname, target, lun);
1442 xs->error = XS_RESOURCE_SHORTAGE;
1443 scsipi_done(xs);
1444 splx(s);
1445 return;
1446 }
1447 }
1448 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1449 esiop_cmd->cmd_c.xs = xs;
1450 esiop_cmd->cmd_c.flags = 0;
1451 esiop_cmd->cmd_c.status = CMDST_READY;
1452
1453 /* load the DMA maps */
1454 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1455 esiop_cmd->cmd_c.dmamap_cmd,
1456 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1457 if (error) {
1458 printf("%s: unable to load cmd DMA map: %d\n",
1459 sc->sc_c.sc_dev.dv_xname, error);
1460 xs->error = XS_DRIVER_STUFFUP;
1461 scsipi_done(xs);
1462 splx(s);
1463 return;
1464 }
1465 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1466 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1467 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1468 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1469 ((xs->xs_control & XS_CTL_DATA_IN) ?
1470 BUS_DMA_READ : BUS_DMA_WRITE));
1471 if (error) {
1472 printf("%s: unable to load cmd DMA map: %d",
1473 sc->sc_c.sc_dev.dv_xname, error);
1474 xs->error = XS_DRIVER_STUFFUP;
1475 scsipi_done(xs);
1476 bus_dmamap_unload(sc->sc_c.sc_dmat,
1477 esiop_cmd->cmd_c.dmamap_cmd);
1478 splx(s);
1479 return;
1480 }
1481 bus_dmamap_sync(sc->sc_c.sc_dmat,
1482 esiop_cmd->cmd_c.dmamap_data, 0,
1483 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1484 (xs->xs_control & XS_CTL_DATA_IN) ?
1485 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1486 }
1487 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1488 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1489 BUS_DMASYNC_PREWRITE);
1490
1491 if (xs->xs_tag_type)
1492 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1493 else
1494 esiop_cmd->cmd_c.tag = -1;
1495 siop_setuptables(&esiop_cmd->cmd_c);
1496 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1497 htole32(A_f_c_target | A_f_c_lun);
1498 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1499 htole32((target << 8) | (lun << 16));
1500 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1501 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1502 htole32(A_f_c_tag);
1503 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1504 htole32(esiop_cmd->cmd_c.tag << 24);
1505 }
1506
1507 esiop_table_sync(esiop_cmd,
1508 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1509 esiop_start(sc, esiop_cmd);
1510 if (xs->xs_control & XS_CTL_POLL) {
1511 /* poll for command completion */
1512 while ((xs->xs_status & XS_STS_DONE) == 0) {
1513 delay(1000);
1514 esiop_intr(sc);
1515 }
1516 }
1517 splx(s);
1518 return;
1519
1520 case ADAPTER_REQ_GROW_RESOURCES:
1521 #ifdef SIOP_DEBUG
1522 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1523 sc->sc_c.sc_adapt.adapt_openings);
1524 #endif
1525 esiop_morecbd(sc);
1526 return;
1527
1528 case ADAPTER_REQ_SET_XFER_MODE:
1529 {
1530 struct scsipi_xfer_mode *xm = arg;
1531 if (sc->sc_c.targets[xm->xm_target] == NULL)
1532 return;
1533 s = splbio();
1534 if (xm->xm_mode & PERIPH_CAP_TQING)
1535 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1536 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1537 (sc->sc_c.features & SF_BUS_WIDE))
1538 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1539 if (xm->xm_mode & PERIPH_CAP_SYNC)
1540 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1541 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1542 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1543 sc->sc_c.targets[xm->xm_target]->status =
1544 TARST_ASYNC;
1545
1546 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1547 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1548 /* allocate a lun sw entry for this device */
1549 esiop_add_dev(sc, xm->xm_target, lun);
1550 }
1551
1552 splx(s);
1553 }
1554 }
1555 }
1556
1557 static void
1558 esiop_start(sc, esiop_cmd)
1559 struct esiop_softc *sc;
1560 struct esiop_cmd *esiop_cmd;
1561 {
1562 struct esiop_lun *esiop_lun;
1563 struct esiop_target *esiop_target;
1564 int timeout;
1565 int target, lun, slot;
1566
1567 nintr = 0;
1568
1569 /*
1570 * first make sure to read valid data
1571 */
1572 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1573
1574 /*
1575 * We use a circular queue here. sc->sc_currschedslot points to a
1576 * free slot, unless we have filled the queue. Check this.
1577 */
1578 slot = sc->sc_currschedslot;
1579 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * 2) &
1580 A_f_cmd_free) == 0) {
1581 /*
1582 * no more free slot, no need to continue. freeze the queue
1583 * and requeue this command.
1584 */
1585 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1586 sc->sc_flags |= SCF_CHAN_NOSLOT;
1587 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1588 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1589 esiop_scsicmd_end(esiop_cmd);
1590 return;
1591 }
1592 /* OK, we can use this slot */
1593
1594 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1595 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1596 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1597 esiop_lun = esiop_target->esiop_lun[lun];
1598 /* if non-tagged command active, panic: this shouldn't happen */
1599 if (esiop_lun->active != NULL) {
1600 panic("esiop_start: tagged cmd while untagged running");
1601 }
1602 #ifdef DIAGNOSTIC
1603 /* sanity check the tag if needed */
1604 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1605 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1606 panic("esiop_start: tag not free");
1607 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1608 esiop_cmd->cmd_c.tag < 0) {
1609 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1610 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1611 panic("esiop_start: invalid tag id");
1612 }
1613 }
1614 #endif
1615 #ifdef SIOP_DEBUG_SCHED
1616 printf("using slot %d for DSA 0x%lx\n", slot,
1617 (u_long)esiop_cmd->cmd_c.dsa);
1618 #endif
1619 /* mark command as active */
1620 if (esiop_cmd->cmd_c.status == CMDST_READY)
1621 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1622 else
1623 panic("esiop_start: bad status");
1624 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1625 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1626 /* DSA table for reselect */
1627 if ((esiop_lun->lun_flags & LUNF_TAGTABLE) == 0) {
1628 esiop_script_write(sc,
1629 esiop_target->lun_table_offset + lun + 2,
1630 esiop_lun->lun_tagtbl->tbl_dsa);
1631 esiop_lun->lun_flags |= LUNF_TAGTABLE;
1632 }
1633 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1634 htole32(esiop_cmd->cmd_c.dsa);
1635 bus_dmamap_sync(sc->sc_c.sc_dmat,
1636 esiop_lun->lun_tagtbl->tblblk->blkmap,
1637 esiop_lun->lun_tagtbl->tbl_offset,
1638 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1639 } else {
1640 esiop_lun->active = esiop_cmd;
1641 /* DSA table for reselect */
1642 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1643 esiop_cmd->cmd_c.dsa);
1644 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1645
1646 }
1647 /* scheduler slot: ID, then DSA */
1648 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1649 sc->sc_c.targets[target]->id);
1650 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1651 esiop_cmd->cmd_c.dsa);
1652 /* handle timeout */
1653 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1654 /* start exire timer */
1655 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1656 if (timeout == 0)
1657 timeout = 1;
1658 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1659 timeout, esiop_timeout, esiop_cmd);
1660 }
1661 /* make sure SCRIPT processor will read valid data */
1662 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1663 /* Signal script it has some work to do */
1664 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1665 SIOP_ISTAT, ISTAT_SIGP);
1666 /* update the current slot, and wait for IRQ */
1667 sc->sc_currschedslot++;
1668 if (sc->sc_currschedslot >= A_ncmd_slots)
1669 sc->sc_currschedslot = 0;
1670 return;
1671 }
1672
1673 void
1674 esiop_timeout(v)
1675 void *v;
1676 {
1677 struct esiop_cmd *esiop_cmd = v;
1678 struct esiop_softc *sc =
1679 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1680 int s;
1681
1682 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1683 printf("command timeout\n");
1684
1685 s = splbio();
1686 /* reset the scsi bus */
1687 siop_resetbus(&sc->sc_c);
1688
1689 /* deactivate callout */
1690 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1691 /*
1692 * mark command has being timed out and just return;
1693 * the bus reset will generate an interrupt,
1694 * it will be handled in siop_intr()
1695 */
1696 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1697 splx(s);
1698 return;
1699
1700 }
1701
1702 void
1703 esiop_dump_script(sc)
1704 struct esiop_softc *sc;
1705 {
1706 int i;
1707 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1708 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1709 le32toh(sc->sc_c.sc_script[i]),
1710 le32toh(sc->sc_c.sc_script[i+1]));
1711 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1712 0xc0000000) {
1713 i++;
1714 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1715 }
1716 printf("\n");
1717 }
1718 }
1719
1720 void
1721 esiop_morecbd(sc)
1722 struct esiop_softc *sc;
1723 {
1724 int error, i, s;
1725 bus_dma_segment_t seg;
1726 int rseg;
1727 struct esiop_cbd *newcbd;
1728 struct esiop_xfer *xfer;
1729 bus_addr_t dsa;
1730
1731 /* allocate a new list head */
1732 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1733 if (newcbd == NULL) {
1734 printf("%s: can't allocate memory for command descriptors "
1735 "head\n", sc->sc_c.sc_dev.dv_xname);
1736 return;
1737 }
1738
1739 /* allocate cmd list */
1740 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1741 M_DEVBUF, M_NOWAIT|M_ZERO);
1742 if (newcbd->cmds == NULL) {
1743 printf("%s: can't allocate memory for command descriptors\n",
1744 sc->sc_c.sc_dev.dv_xname);
1745 goto bad3;
1746 }
1747 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1748 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1749 if (error) {
1750 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1751 sc->sc_c.sc_dev.dv_xname, error);
1752 goto bad2;
1753 }
1754 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1755 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1756 if (error) {
1757 printf("%s: unable to map cbd DMA memory, error = %d\n",
1758 sc->sc_c.sc_dev.dv_xname, error);
1759 goto bad2;
1760 }
1761 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1762 BUS_DMA_NOWAIT, &newcbd->xferdma);
1763 if (error) {
1764 printf("%s: unable to create cbd DMA map, error = %d\n",
1765 sc->sc_c.sc_dev.dv_xname, error);
1766 goto bad1;
1767 }
1768 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1769 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1770 if (error) {
1771 printf("%s: unable to load cbd DMA map, error = %d\n",
1772 sc->sc_c.sc_dev.dv_xname, error);
1773 goto bad0;
1774 }
1775 #ifdef DEBUG
1776 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1777 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1778 #endif
1779 for (i = 0; i < SIOP_NCMDPB; i++) {
1780 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1781 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1782 &newcbd->cmds[i].cmd_c.dmamap_data);
1783 if (error) {
1784 printf("%s: unable to create data DMA map for cbd: "
1785 "error %d\n",
1786 sc->sc_c.sc_dev.dv_xname, error);
1787 goto bad0;
1788 }
1789 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1790 sizeof(struct scsipi_generic), 1,
1791 sizeof(struct scsipi_generic), 0,
1792 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1793 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1794 if (error) {
1795 printf("%s: unable to create cmd DMA map for cbd %d\n",
1796 sc->sc_c.sc_dev.dv_xname, error);
1797 goto bad0;
1798 }
1799 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1800 newcbd->cmds[i].esiop_cbdp = newcbd;
1801 xfer = &newcbd->xfers[i];
1802 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1803 memset(newcbd->cmds[i].cmd_tables, 0,
1804 sizeof(struct esiop_xfer));
1805 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1806 i * sizeof(struct esiop_xfer);
1807 newcbd->cmds[i].cmd_c.dsa = dsa;
1808 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1809 xfer->siop_tables.t_msgout.count= htole32(1);
1810 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1811 xfer->siop_tables.t_msgin.count= htole32(1);
1812 xfer->siop_tables.t_msgin.addr = htole32(dsa + 8);
1813 xfer->siop_tables.t_extmsgin.count= htole32(2);
1814 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1815 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 11);
1816 xfer->siop_tables.t_status.count= htole32(1);
1817 xfer->siop_tables.t_status.addr = htole32(dsa + 16);
1818
1819 s = splbio();
1820 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1821 splx(s);
1822 #ifdef SIOP_DEBUG
1823 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1824 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1825 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1826 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1827 #endif
1828 }
1829 s = splbio();
1830 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1831 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1832 splx(s);
1833 return;
1834 bad0:
1835 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1836 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1837 bad1:
1838 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1839 bad2:
1840 free(newcbd->cmds, M_DEVBUF);
1841 bad3:
1842 free(newcbd, M_DEVBUF);
1843 return;
1844 }
1845
1846 void
1847 esiop_moretagtbl(sc)
1848 struct esiop_softc *sc;
1849 {
1850 int error, i, j, s;
1851 bus_dma_segment_t seg;
1852 int rseg;
1853 struct esiop_dsatblblk *newtblblk;
1854 struct esiop_dsatbl *newtbls;
1855 u_int32_t *tbls;
1856
1857 /* allocate a new list head */
1858 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1859 M_DEVBUF, M_NOWAIT|M_ZERO);
1860 if (newtblblk == NULL) {
1861 printf("%s: can't allocate memory for tag DSA table block\n",
1862 sc->sc_c.sc_dev.dv_xname);
1863 return;
1864 }
1865
1866 /* allocate tbl list */
1867 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1868 M_DEVBUF, M_NOWAIT|M_ZERO);
1869 if (newtbls == NULL) {
1870 printf("%s: can't allocate memory for command descriptors\n",
1871 sc->sc_c.sc_dev.dv_xname);
1872 goto bad3;
1873 }
1874 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1875 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1876 if (error) {
1877 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1878 sc->sc_c.sc_dev.dv_xname, error);
1879 goto bad2;
1880 }
1881 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1882 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1883 if (error) {
1884 printf("%s: unable to map tbls DMA memory, error = %d\n",
1885 sc->sc_c.sc_dev.dv_xname, error);
1886 goto bad2;
1887 }
1888 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1889 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1890 if (error) {
1891 printf("%s: unable to create tbl DMA map, error = %d\n",
1892 sc->sc_c.sc_dev.dv_xname, error);
1893 goto bad1;
1894 }
1895 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1896 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1897 if (error) {
1898 printf("%s: unable to load tbl DMA map, error = %d\n",
1899 sc->sc_c.sc_dev.dv_xname, error);
1900 goto bad0;
1901 }
1902 #ifdef DEBUG
1903 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1904 sc->sc_c.sc_dev.dv_xname,
1905 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1906 #endif
1907 for (i = 0; i < ESIOP_NTPB; i++) {
1908 newtbls[i].tblblk = newtblblk;
1909 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1910 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1911 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1912 newtbls[i].tbl_offset;
1913 for (j = 0; j < ESIOP_NTAG; j++)
1914 newtbls[i].tbl[j] = j;
1915 s = splbio();
1916 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1917 splx(s);
1918 }
1919 s = splbio();
1920 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1921 splx(s);
1922 return;
1923 bad0:
1924 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1925 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1926 bad1:
1927 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1928 bad2:
1929 free(newtbls, M_DEVBUF);
1930 bad3:
1931 free(newtblblk, M_DEVBUF);
1932 return;
1933 }
1934
1935 void
1936 esiop_update_scntl3(sc, _siop_target)
1937 struct esiop_softc *sc;
1938 struct siop_common_target *_siop_target;
1939 {
1940 int slot;
1941 u_int32_t slotid, id;
1942
1943 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1944 esiop_script_write(sc, esiop_target->lun_table_offset,
1945 esiop_target->target_c.id);
1946 id = esiop_target->target_c.id & 0x00ff0000;
1947 /* There may be other commands waiting in the scheduler. handle them */
1948 for (slot = 0; slot < A_ncmd_slots; slot++) {
1949 slotid =
1950 esiop_script_read(sc, sc->sc_shedoffset + slot * 2 + 1);
1951 if ((slotid & 0x00ff0000) == id)
1952 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1953 esiop_target->target_c.id);
1954 }
1955 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1956 }
1957
1958 void
1959 esiop_add_dev(sc, target, lun)
1960 struct esiop_softc *sc;
1961 int target;
1962 int lun;
1963 {
1964 struct esiop_target *esiop_target =
1965 (struct esiop_target *)sc->sc_c.targets[target];
1966 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1967
1968 if (esiop_target->target_c.flags & TARF_TAG) {
1969 /* we need a tag DSA table */
1970 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1971 if (esiop_lun->lun_tagtbl == NULL) {
1972 esiop_moretagtbl(sc);
1973 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1974 if (esiop_lun->lun_tagtbl == NULL) {
1975 /* no resources, run untagged */
1976 esiop_target->target_c.flags &= ~TARF_TAG;
1977 return;
1978 }
1979 }
1980 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1981
1982 }
1983 }
1984
1985 void
1986 esiop_del_dev(sc, target, lun)
1987 struct esiop_softc *sc;
1988 int target;
1989 int lun;
1990 {
1991 struct esiop_target *esiop_target;
1992 #ifdef SIOP_DEBUG
1993 printf("%s:%d:%d: free lun sw entry\n",
1994 sc->sc_c.sc_dev.dv_xname, target, lun);
1995 #endif
1996 if (sc->sc_c.targets[target] == NULL)
1997 return;
1998 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1999 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2000 esiop_target->esiop_lun[lun] = NULL;
2001 }
2002
2003 struct esiop_cmd *
2004 esiop_cmd_find(sc, target, dsa)
2005 struct esiop_softc *sc;
2006 int target;
2007 u_int32_t dsa;
2008 {
2009 int lun, tag;
2010 struct esiop_cmd *cmd;
2011 struct esiop_lun *esiop_lun;
2012 struct esiop_target *esiop_target =
2013 (struct esiop_target *)sc->sc_c.targets[target];
2014
2015 if (esiop_target == NULL)
2016 return NULL;
2017
2018 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2019 esiop_lun = esiop_target->esiop_lun[lun];
2020 if (esiop_lun == NULL)
2021 continue;
2022 cmd = esiop_lun->active;
2023 if (cmd && cmd->cmd_c.dsa == dsa)
2024 return cmd;
2025 if (esiop_target->target_c.flags & TARF_TAG) {
2026 for (tag = 0; tag < ESIOP_NTAG; tag++) {
2027 cmd = esiop_lun->tactive[tag];
2028 if (cmd && cmd->cmd_c.dsa == dsa)
2029 return cmd;
2030 }
2031 }
2032 }
2033 return NULL;
2034 }
2035
2036 void
2037 esiop_target_register(sc, target)
2038 struct esiop_softc *sc;
2039 u_int32_t target;
2040 {
2041 struct esiop_target *esiop_target =
2042 (struct esiop_target *)sc->sc_c.targets[target];
2043
2044 /* get a DSA table for this target */
2045 esiop_target->lun_table_offset = sc->sc_free_offset;
2046 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
2047 #ifdef SIOP_DEBUG
2048 printf("%s: lun table for target %d offset %d free offset %d\n",
2049 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2050 sc->sc_free_offset);
2051 #endif
2052 /* first 32 bytes are ID (for select) */
2053 esiop_script_write(sc, esiop_target->lun_table_offset,
2054 esiop_target->target_c.id);
2055 /* Record this table in the target DSA table */
2056 esiop_script_write(sc,
2057 sc->sc_target_table_offset + target,
2058 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2059 sc->sc_c.sc_scriptaddr);
2060 esiop_script_sync(sc,
2061 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2062 }
2063
2064 #ifdef SIOP_STATS
2065 void
2066 esiop_printstats()
2067 {
2068 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2069 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2070 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2071 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2072 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2073 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2074 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2075 }
2076 #endif
2077