esiop.c revision 1.2 1 /* $NetBSD: esiop.c,v 1.2 2002/04/22 15:53:39 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.2 2002/04/22 15:53:39 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_update_scntl3 __P((struct esiop_softc *,
98 struct siop_common_target *));
99 struct esiop_cmd * esiop_cmd_find __P((struct esiop_softc *, int, u_int32_t));
100 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
101
102 static int nintr = 0;
103
104 #ifdef SIOP_STATS
105 static int esiop_stat_intr = 0;
106 static int esiop_stat_intr_shortxfer = 0;
107 static int esiop_stat_intr_sdp = 0;
108 static int esiop_stat_intr_done = 0;
109 static int esiop_stat_intr_xferdisc = 0;
110 static int esiop_stat_intr_lunresel = 0;
111 static int esiop_stat_intr_qfull = 0;
112 void esiop_printstats __P((void));
113 #define INCSTAT(x) x++
114 #else
115 #define INCSTAT(x)
116 #endif
117
118 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
119 static __inline__ void
120 esiop_script_sync(sc, ops)
121 struct esiop_softc *sc;
122 int ops;
123 {
124 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
125 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
126 PAGE_SIZE, ops);
127 }
128
129 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
130 static __inline__ u_int32_t
131 esiop_script_read(sc, offset)
132 struct esiop_softc *sc;
133 u_int offset;
134 {
135 if (sc->sc_c.features & SF_CHIP_RAM) {
136 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
137 offset * 4);
138 } else {
139 return le32toh(sc->sc_c.sc_script[offset]);
140 }
141 }
142
143 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
144 u_int32_t));
145 static __inline__ void
146 esiop_script_write(sc, offset, val)
147 struct esiop_softc *sc;
148 u_int offset;
149 u_int32_t val;
150 {
151 if (sc->sc_c.features & SF_CHIP_RAM) {
152 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
153 offset * 4, val);
154 } else {
155 sc->sc_c.sc_script[offset] = htole32(val);
156 }
157 }
158
159 void
160 esiop_attach(sc)
161 struct esiop_softc *sc;
162 {
163 int error, i;
164 bus_dma_segment_t seg;
165 int rseg;
166
167 /*
168 * Allocate DMA-safe memory for the script and map it.
169 */
170 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
171 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE,
172 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
173 if (error) {
174 printf("%s: unable to allocate script DMA memory, "
175 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
176 return;
177 }
178 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
179 (caddr_t *)&sc->sc_c.sc_script,
180 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
181 if (error) {
182 printf("%s: unable to map script DMA memory, "
183 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
184 return;
185 }
186 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1,
187 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_c.sc_scriptdma);
188 if (error) {
189 printf("%s: unable to create script DMA map, "
190 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
191 return;
192 }
193 error = bus_dmamap_load(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma,
194 sc->sc_c.sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
195 if (error) {
196 printf("%s: unable to load script DMA map, "
197 "error = %d\n", sc->sc_c.sc_dev.dv_xname, error);
198 return;
199 }
200 sc->sc_c.sc_scriptaddr =
201 sc->sc_c.sc_scriptdma->dm_segs[0].ds_addr;
202 sc->sc_c.ram_size = PAGE_SIZE;
203 }
204 TAILQ_INIT(&sc->free_list);
205 TAILQ_INIT(&sc->cmds);
206 TAILQ_INIT(&sc->free_tagtbl);
207 TAILQ_INIT(&sc->tag_tblblk);
208 sc->sc_currschedslot = 0;
209 #ifdef SIOP_DEBUG
210 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
211 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
212 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
213 #endif
214
215 sc->sc_c.sc_adapt.adapt_dev = &sc->sc_c.sc_dev;
216 sc->sc_c.sc_adapt.adapt_nchannels = 1;
217 sc->sc_c.sc_adapt.adapt_openings = 0;
218 sc->sc_c.sc_adapt.adapt_max_periph = 1 /* XXX ESIOP_NTAG - 1 */ ;
219 sc->sc_c.sc_adapt.adapt_ioctl = siop_ioctl;
220 sc->sc_c.sc_adapt.adapt_minphys = minphys;
221 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
222
223 memset(&sc->sc_c.sc_chan, 0, sizeof(sc->sc_c.sc_chan));
224 sc->sc_c.sc_chan.chan_adapter = &sc->sc_c.sc_adapt;
225 sc->sc_c.sc_chan.chan_bustype = &scsi_bustype;
226 sc->sc_c.sc_chan.chan_channel = 0;
227 sc->sc_c.sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
228 sc->sc_c.sc_chan.chan_ntargets =
229 (sc->sc_c.features & SF_BUS_WIDE) ? 16 : 8;
230 sc->sc_c.sc_chan.chan_nluns = 8;
231 sc->sc_c.sc_chan.chan_id =
232 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCID);
233 if (sc->sc_c.sc_chan.chan_id == 0 ||
234 sc->sc_c.sc_chan.chan_id >= sc->sc_c.sc_chan.chan_ntargets)
235 sc->sc_c.sc_chan.chan_id = SIOP_DEFAULT_TARGET;
236
237 for (i = 0; i < 16; i++)
238 sc->sc_c.targets[i] = NULL;
239
240 /* find min/max sync period for this chip */
241 sc->sc_c.maxsync = 0;
242 sc->sc_c.minsync = 255;
243 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
244 if (sc->sc_c.clock_period != scf_period[i].clock)
245 continue;
246 if (sc->sc_c.maxsync < scf_period[i].period)
247 sc->sc_c.maxsync = scf_period[i].period;
248 if (sc->sc_c.minsync > scf_period[i].period)
249 sc->sc_c.minsync = scf_period[i].period;
250 }
251 if (sc->sc_c.maxsync == 255 || sc->sc_c.minsync == 0)
252 panic("siop: can't find my sync parameters\n");
253 /* Do a bus reset, so that devices fall back to narrow/async */
254 siop_resetbus(&sc->sc_c);
255 /*
256 * siop_reset() will reset the chip, thus clearing pending interrupts
257 */
258 esiop_reset(sc);
259 #ifdef DUMP_SCRIPT
260 esiop_dump_script(sc);
261 #endif
262
263 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
264 }
265
266 void
267 esiop_reset(sc)
268 struct esiop_softc *sc;
269 {
270 int i, j;
271 u_int32_t addr;
272 u_int32_t msgin_addr;
273
274 siop_common_reset(&sc->sc_c);
275
276 /*
277 * we copy the script at the beggining of RAM. Then there is 8 bytes
278 * for messages in.
279 */
280 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
281 msgin_addr =
282 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
283 sc->sc_free_offset += 2;
284 /* then we have the scheduler ring */
285 sc->sc_shedoffset = sc->sc_free_offset;
286 sc->sc_free_offset += A_ncmd_slots * 2;
287 /* then the targets DSA table */
288 sc->sc_target_table_offset = sc->sc_free_offset;
289 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
290 /* copy and patch the script */
291 if (sc->sc_c.features & SF_CHIP_RAM) {
292 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
293 esiop_script,
294 sizeof(esiop_script) / sizeof(esiop_script[0]));
295 for (j = 0; j <
296 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
297 j++) {
298 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
299 E_tlq_offset_Used[j] * 4,
300 sizeof(struct siop_common_xfer));
301 }
302 for (j = 0; j <
303 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
304 j++) {
305 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
306 E_abs_msgin2_Used[j] * 4, msgin_addr);
307 }
308
309 #ifdef SIOP_SYMLED
310 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
311 Ent_led_on1, siop_led_on,
312 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
313 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
314 Ent_led_on2, siop_led_on,
315 sizeof(siop_led_on) / sizeof(siop_led_on[0]));
316 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
317 Ent_led_off, siop_led_off,
318 sizeof(siop_led_off) / sizeof(siop_led_off[0]));
319 #endif
320 } else {
321 for (j = 0;
322 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
323 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
324 }
325 for (j = 0; j <
326 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
327 j++) {
328 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
329 htole32(sizeof(struct siop_common_xfer));
330 }
331 for (j = 0; j <
332 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
333 j++) {
334 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
335 htole32(msgin_addr);
336 }
337
338 #ifdef SIOP_SYMLED
339 for (j = 0;
340 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
341 sc->sc_c.sc_script[
342 Ent_led_on1 / sizeof(siop_led_on[0]) + j
343 ] = htole32(siop_led_on[j]);
344 for (j = 0;
345 j < (sizeof(siop_led_on) / sizeof(siop_led_on[0])); j++)
346 sc->sc_c.sc_script[
347 Ent_led_on2 / sizeof(siop_led_on[0]) + j
348 ] = htole32(siop_led_on[j]);
349 for (j = 0;
350 j < (sizeof(siop_led_off) / sizeof(siop_led_off[0])); j++)
351 sc->sc_c.sc_script[
352 Ent_led_off / sizeof(siop_led_off[0]) + j
353 ] = htole32(siop_led_off[j]);
354 #endif
355 }
356 /* get base of scheduler ring */
357 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
358 /* init scheduler */
359 for (i = 0; i < A_ncmd_slots; i++) {
360 esiop_script_write(sc, sc->sc_shedoffset + i * 2, A_f_cmd_free);
361 esiop_script_write(sc, sc->sc_shedoffset + i * 2 + 1, 0);
362 }
363 sc->sc_currschedslot = 0;
364 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
365 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
366 /*
367 * 0x78000000 is a 'move data8 to reg'. data8 is the second
368 * octet, reg offset is the third.
369 */
370 esiop_script_write(sc, Ent_cmdr0 / 4,
371 0x78640000 | ((addr & 0x000000ff) << 8));
372 esiop_script_write(sc, Ent_cmdr1 / 4,
373 0x78650000 | ((addr & 0x0000ff00) ));
374 esiop_script_write(sc, Ent_cmdr2 / 4,
375 0x78660000 | ((addr & 0x00ff0000) >> 8));
376 esiop_script_write(sc, Ent_cmdr3 / 4,
377 0x78670000 | ((addr & 0xff000000) >> 16));
378 /* set flags */
379 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
380 /* write pointer of base of target DSA table */
381 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
382 sc->sc_c.sc_scriptaddr;
383 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
384 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
385 ((addr & 0x000000ff) << 8));
386 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
387 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
388 ((addr & 0x0000ff00) ));
389 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
390 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
391 ((addr & 0x00ff0000) >> 8));
392 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
393 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
394 ((addr & 0xff000000) >> 16));
395 #ifdef SIOP_DEBUG
396 printf("%s: target table offset %d free offset %d\n",
397 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
398 sc->sc_free_offset);
399 #endif
400
401 /* register existing targets */
402 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
403 if (sc->sc_c.targets[i])
404 esiop_target_register(sc, i);
405 }
406 /* start script */
407 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
408 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
409 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
410 }
411 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
412 sc->sc_c.sc_scriptaddr + Ent_reselect);
413 }
414
415 #if 0
416 #define CALL_SCRIPT(ent) do {\
417 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
418 esiop_cmd->cmd_c.dsa, \
419 sc->sc_c.sc_scriptaddr + ent); \
420 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
421 } while (0)
422 #else
423 #define CALL_SCRIPT(ent) do {\
424 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
425 } while (0)
426 #endif
427
428 int
429 esiop_intr(v)
430 void *v;
431 {
432 struct esiop_softc *sc = v;
433 struct esiop_target *esiop_target;
434 struct esiop_cmd *esiop_cmd;
435 struct esiop_lun *esiop_lun;
436 struct scsipi_xfer *xs;
437 int istat, sist, sstat1, dstat;
438 u_int32_t irqcode;
439 int need_reset = 0;
440 int offset, target, lun, tag;
441 u_int32_t tflags;
442 int freetarget = 0;
443 int restart = 0;
444 int slot;
445 int retval = 0;
446
447 again:
448 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
449 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
450 if (istat & ISTAT_SEM) {
451 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
452 SIOP_ISTAT, (istat & ~ISTAT_SEM));
453 esiop_checkdone(sc);
454 }
455 return retval;
456 }
457 retval = 1;
458 nintr++;
459 if (nintr > 100) {
460 panic("esiop: intr loop");
461 }
462 INCSTAT(esiop_stat_intr);
463 if (istat & ISTAT_INTF) {
464 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
465 SIOP_ISTAT, ISTAT_INTF);
466 esiop_checkdone(sc);
467 goto again;
468 }
469 /* get CMD from T/L/Q */
470 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
471 SIOP_SCRATCHC);
472 #ifdef SIOP_DEBUG_INTR
473 printf("interrupt, istat=0x%x tflags=0x%x "
474 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
475 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
476 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
477 SIOP_DSP) -
478 sc->sc_c.sc_scriptaddr));
479 #endif
480 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
481 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
482 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
483 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
484 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
485
486 if (target >= 0 && lun >= 0) {
487 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
488 if (esiop_target == NULL) {
489 printf("esiop_target (target %d) not valid\n", target);
490 goto none;
491 }
492 esiop_lun = esiop_target->esiop_lun[lun];
493 if (esiop_lun == NULL) {
494 printf("esiop_lun (target %d lun %d) not valid\n",
495 target, lun);
496 goto none;
497 }
498 esiop_cmd =
499 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
500 if (esiop_cmd == NULL) {
501 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
502 target, lun, tag);
503 goto none;
504 }
505 xs = esiop_cmd->cmd_c.xs;
506 #ifdef DIAGNOSTIC
507 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
508 printf("esiop_cmd (target %d lun %d) "
509 "not active (%d)\n", target, lun,
510 esiop_cmd->cmd_c.status);
511 goto none;
512 }
513 #endif
514 } else {
515 none:
516 xs = NULL;
517 esiop_target = NULL;
518 esiop_lun = NULL;
519 esiop_cmd = NULL;
520 }
521 if (istat & ISTAT_DIP) {
522 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
523 SIOP_DSTAT);
524 if (dstat & DSTAT_SSI) {
525 printf("single step dsp 0x%08x dsa 0x08%x\n",
526 (int)(bus_space_read_4(sc->sc_c.sc_rt,
527 sc->sc_c.sc_rh, SIOP_DSP) -
528 sc->sc_c.sc_scriptaddr),
529 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
530 SIOP_DSA));
531 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
532 (istat & ISTAT_SIP) == 0) {
533 bus_space_write_1(sc->sc_c.sc_rt,
534 sc->sc_c.sc_rh, SIOP_DCNTL,
535 bus_space_read_1(sc->sc_c.sc_rt,
536 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
537 }
538 return 1;
539 }
540 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
541 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
542 if (dstat & DSTAT_IID)
543 printf(" Illegal instruction");
544 if (dstat & DSTAT_ABRT)
545 printf(" abort");
546 if (dstat & DSTAT_BF)
547 printf(" bus fault");
548 if (dstat & DSTAT_MDPE)
549 printf(" parity");
550 if (dstat & DSTAT_DFE)
551 printf(" dma fifo empty");
552 printf(", DSP=0x%x DSA=0x%x: ",
553 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
554 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
555 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
556 if (esiop_cmd)
557 printf("last msg_in=0x%x status=0x%x\n",
558 esiop_cmd->cmd_tables->msg_in[0],
559 le32toh(esiop_cmd->cmd_tables->status));
560 else
561 printf(" current T/L/Q invalid\n");
562 need_reset = 1;
563 }
564 }
565 if (istat & ISTAT_SIP) {
566 if (istat & ISTAT_DIP)
567 delay(10);
568 /*
569 * Can't read sist0 & sist1 independantly, or we have to
570 * insert delay
571 */
572 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
573 SIOP_SIST0);
574 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
575 SIOP_SSTAT1);
576 #ifdef SIOP_DEBUG_INTR
577 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
578 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
579 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
580 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
581 SIOP_DSP) -
582 sc->sc_c.sc_scriptaddr));
583 #endif
584 if (sist & SIST0_RST) {
585 esiop_handle_reset(sc);
586 /* no table to flush here */
587 return 1;
588 }
589 if (sist & SIST0_SGE) {
590 if (esiop_cmd)
591 scsipi_printaddr(xs->xs_periph);
592 else
593 printf("%s:", sc->sc_c.sc_dev.dv_xname);
594 printf("scsi gross error\n");
595 goto reset;
596 }
597 if ((sist & SIST0_MA) && need_reset == 0) {
598 if (esiop_cmd) {
599 int scratchc0;
600 dstat = bus_space_read_1(sc->sc_c.sc_rt,
601 sc->sc_c.sc_rh, SIOP_DSTAT);
602 /*
603 * first restore DSA, in case we were in a S/G
604 * operation.
605 */
606 bus_space_write_4(sc->sc_c.sc_rt,
607 sc->sc_c.sc_rh,
608 SIOP_DSA, esiop_cmd->cmd_c.dsa);
609 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
610 sc->sc_c.sc_rh, SIOP_SCRATCHC);
611 switch (sstat1 & SSTAT1_PHASE_MASK) {
612 case SSTAT1_PHASE_STATUS:
613 /*
614 * previous phase may be aborted for any reason
615 * ( for example, the target has less data to
616 * transfer than requested). Just go to status
617 * and the command should terminate.
618 */
619 INCSTAT(esiop_stat_intr_shortxfer);
620 if ((dstat & DSTAT_DFE) == 0)
621 siop_clearfifo(&sc->sc_c);
622 /* no table to flush here */
623 CALL_SCRIPT(Ent_status);
624 return 1;
625 case SSTAT1_PHASE_MSGIN:
626 /*
627 * target may be ready to disconnect
628 * Save data pointers just in case.
629 */
630 INCSTAT(esiop_stat_intr_xferdisc);
631 if (scratchc0 & A_f_c_data)
632 siop_sdp(&esiop_cmd->cmd_c);
633 else if ((dstat & DSTAT_DFE) == 0)
634 siop_clearfifo(&sc->sc_c);
635 bus_space_write_1(sc->sc_c.sc_rt,
636 sc->sc_c.sc_rh, SIOP_SCRATCHC,
637 scratchc0 & ~A_f_c_data);
638 esiop_table_sync(esiop_cmd,
639 BUS_DMASYNC_PREREAD |
640 BUS_DMASYNC_PREWRITE);
641 CALL_SCRIPT(Ent_msgin);
642 return 1;
643 }
644 printf("%s: unexpected phase mismatch %d\n",
645 sc->sc_c.sc_dev.dv_xname,
646 sstat1 & SSTAT1_PHASE_MASK);
647 } else {
648 printf("%s: phase mismatch without command\n",
649 sc->sc_c.sc_dev.dv_xname);
650 }
651 need_reset = 1;
652 }
653 if (sist & SIST0_PAR) {
654 /* parity error, reset */
655 if (esiop_cmd)
656 scsipi_printaddr(xs->xs_periph);
657 else
658 printf("%s:", sc->sc_c.sc_dev.dv_xname);
659 printf("parity error\n");
660 goto reset;
661 }
662 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
663 /* selection time out, assume there's no device here */
664 /*
665 * SCRATCHC has not been loaded yet, we have to find
666 * params by ourselve. scratchE0 should point to
667 * the next slot.
668 */
669 slot = bus_space_read_1(sc->sc_c.sc_rt,
670 sc->sc_c.sc_rh, SIOP_SCRATCHE);
671 slot = (slot == 0) ? A_ncmd_slots : slot - 1;
672 esiop_script_sync(sc,
673 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674 target = esiop_script_read(sc,
675 sc->sc_shedoffset + slot * 2 + 1) & 0x00ff0000;
676 target = (target >> 16) & 0xff;
677 esiop_cmd = esiop_cmd_find(sc, target,
678 esiop_script_read(sc,
679 sc->sc_shedoffset + slot * 2) & ~0x3);
680 if (esiop_cmd) {
681 xs = esiop_cmd->cmd_c.xs;
682 esiop_target = (struct esiop_target *)
683 esiop_cmd->cmd_c.siop_target;
684 lun = xs->xs_periph->periph_lun;
685 tag = esiop_cmd->cmd_c.tag;
686 esiop_lun = esiop_target->esiop_lun[lun];
687 esiop_cmd->cmd_c.status = CMDST_DONE;
688 xs->error = XS_SELTIMEOUT;
689 freetarget = 1;
690 goto end;
691 } else {
692 printf("%s: selection timeout without "
693 "command\n", sc->sc_c.sc_dev.dv_xname);
694 need_reset = 1;
695 }
696 }
697 if (sist & SIST0_UDC) {
698 /*
699 * unexpected disconnect. Usually the target signals
700 * a fatal condition this way. Attempt to get sense.
701 */
702 if (esiop_cmd) {
703 esiop_cmd->cmd_tables->status =
704 htole32(SCSI_CHECK);
705 goto end;
706 }
707 printf("%s: unexpected disconnect without "
708 "command\n", sc->sc_c.sc_dev.dv_xname);
709 goto reset;
710 }
711 if (sist & (SIST1_SBMC << 8)) {
712 /* SCSI bus mode change */
713 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
714 goto reset;
715 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
716 /*
717 * we have a script interrupt, it will
718 * restart the script.
719 */
720 goto scintr;
721 }
722 /*
723 * else we have to restart it ourselve, at the
724 * interrupted instruction.
725 */
726 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
727 SIOP_DSP,
728 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
729 SIOP_DSP) - 8);
730 return 1;
731 }
732 /* Else it's an unhandled exeption (for now). */
733 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
734 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
735 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
736 SIOP_SSTAT1),
737 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
738 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
739 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
740 if (esiop_cmd) {
741 esiop_cmd->cmd_c.status = CMDST_DONE;
742 xs->error = XS_SELTIMEOUT;
743 goto end;
744 }
745 need_reset = 1;
746 }
747 if (need_reset) {
748 reset:
749 /* fatal error, reset the bus */
750 siop_resetbus(&sc->sc_c);
751 /* no table to flush here */
752 return 1;
753 }
754
755 scintr:
756 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
757 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
758 SIOP_DSPS);
759 #ifdef SIOP_DEBUG_INTR
760 printf("script interrupt 0x%x\n", irqcode);
761 #endif
762 /*
763 * no command, or an inactive command is only valid for a
764 * reselect interrupt
765 */
766 if ((irqcode & 0x80) == 0) {
767 if (esiop_cmd == NULL) {
768 printf(
769 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
770 sc->sc_c.sc_dev.dv_xname, irqcode);
771 goto reset;
772 }
773 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
774 printf("%s: command with invalid status "
775 "(IRQ code 0x%x current status %d) !\n",
776 sc->sc_c.sc_dev.dv_xname,
777 irqcode, esiop_cmd->cmd_c.status);
778 xs = NULL;
779 }
780 }
781 switch(irqcode) {
782 case A_int_err:
783 printf("error, DSP=0x%x\n",
784 (int)(bus_space_read_4(sc->sc_c.sc_rt,
785 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
786 if (xs) {
787 xs->error = XS_SELTIMEOUT;
788 goto end;
789 } else {
790 goto reset;
791 }
792 case A_int_msgin:
793 {
794 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
795 sc->sc_c.sc_rh, SIOP_SFBR);
796 if (msgin == MSG_MESSAGE_REJECT) {
797 int msg, extmsg;
798 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
799 /*
800 * message was part of a identify +
801 * something else. Identify shoudl't
802 * have been rejected.
803 */
804 msg =
805 esiop_cmd->cmd_tables->msg_out[1];
806 extmsg =
807 esiop_cmd->cmd_tables->msg_out[3];
808 } else {
809 msg =
810 esiop_cmd->cmd_tables->msg_out[0];
811 extmsg =
812 esiop_cmd->cmd_tables->msg_out[2];
813 }
814 if (msg == MSG_MESSAGE_REJECT) {
815 /* MSG_REJECT for a MSG_REJECT !*/
816 if (xs)
817 scsipi_printaddr(xs->xs_periph);
818 else
819 printf("%s: ",
820 sc->sc_c.sc_dev.dv_xname);
821 printf("our reject message was "
822 "rejected\n");
823 goto reset;
824 }
825 if (msg == MSG_EXTENDED &&
826 extmsg == MSG_EXT_WDTR) {
827 /* WDTR rejected, initiate sync */
828 if ((esiop_target->target_c.flags &
829 TARF_SYNC) == 0) {
830 esiop_target->target_c.status =
831 TARST_OK;
832 siop_update_xfer_mode(&sc->sc_c,
833 target);
834 /* no table to flush here */
835 CALL_SCRIPT(Ent_msgin_ack);
836 return 1;
837 }
838 esiop_target->target_c.status =
839 TARST_SYNC_NEG;
840 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
841 sc->sc_c.minsync, sc->sc_c.maxoff);
842 esiop_table_sync(esiop_cmd,
843 BUS_DMASYNC_PREREAD |
844 BUS_DMASYNC_PREWRITE);
845 CALL_SCRIPT(Ent_send_msgout);
846 return 1;
847 } else if (msg == MSG_EXTENDED &&
848 extmsg == MSG_EXT_SDTR) {
849 /* sync rejected */
850 esiop_target->target_c.offset = 0;
851 esiop_target->target_c.period = 0;
852 esiop_target->target_c.status =
853 TARST_OK;
854 siop_update_xfer_mode(&sc->sc_c,
855 target);
856 /* no table to flush here */
857 CALL_SCRIPT(Ent_msgin_ack);
858 return 1;
859 } else if (msg == MSG_SIMPLE_Q_TAG ||
860 msg == MSG_HEAD_OF_Q_TAG ||
861 msg == MSG_ORDERED_Q_TAG) {
862 if (esiop_handle_qtag_reject(
863 esiop_cmd) == -1)
864 goto reset;
865 CALL_SCRIPT(Ent_msgin_ack);
866 return 1;
867 }
868 if (xs)
869 scsipi_printaddr(xs->xs_periph);
870 else
871 printf("%s: ",
872 sc->sc_c.sc_dev.dv_xname);
873 if (msg == MSG_EXTENDED) {
874 printf("scsi message reject, extended "
875 "message sent was 0x%x\n", extmsg);
876 } else {
877 printf("scsi message reject, message "
878 "sent was 0x%x\n", msg);
879 }
880 /* no table to flush here */
881 CALL_SCRIPT(Ent_msgin_ack);
882 return 1;
883 }
884 if (xs)
885 scsipi_printaddr(xs->xs_periph);
886 else
887 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
888 printf("unhandled message 0x%x\n",
889 esiop_cmd->cmd_tables->msg_in[0]);
890 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
891 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
892 esiop_table_sync(esiop_cmd,
893 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
894 CALL_SCRIPT(Ent_send_msgout);
895 return 1;
896 }
897 case A_int_extmsgin:
898 #ifdef SIOP_DEBUG_INTR
899 printf("extended message: msg 0x%x len %d\n",
900 esiop_cmd->cmd_tables->msg_in[2],
901 esiop_cmd->cmd_tables->msg_in[1]);
902 #endif
903 if (esiop_cmd->cmd_tables->msg_in[1] > 6)
904 printf("%s: extended message too big (%d)\n",
905 sc->sc_c.sc_dev.dv_xname,
906 esiop_cmd->cmd_tables->msg_in[1]);
907 esiop_cmd->cmd_tables->t_extmsgdata.count =
908 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
909 esiop_table_sync(esiop_cmd,
910 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
911 CALL_SCRIPT(Ent_get_extmsgdata);
912 return 1;
913 case A_int_extmsgdata:
914 #ifdef SIOP_DEBUG_INTR
915 {
916 int i;
917 printf("extended message: 0x%x, data:",
918 esiop_cmd->cmd_tables->msg_in[2]);
919 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
920 i++)
921 printf(" 0x%x",
922 esiop_cmd->cmd_tables->msg_in[i]);
923 printf("\n");
924 }
925 #endif
926 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
927 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
928 case SIOP_NEG_MSGOUT:
929 esiop_update_scntl3(sc,
930 esiop_cmd->cmd_c.siop_target);
931 esiop_table_sync(esiop_cmd,
932 BUS_DMASYNC_PREREAD |
933 BUS_DMASYNC_PREWRITE);
934 CALL_SCRIPT(Ent_send_msgout);
935 return(1);
936 case SIOP_NEG_ACK:
937 esiop_update_scntl3(sc,
938 esiop_cmd->cmd_c.siop_target);
939 CALL_SCRIPT(Ent_msgin_ack);
940 return(1);
941 default:
942 panic("invalid retval from "
943 "siop_wdtr_neg()");
944 }
945 return(1);
946 }
947 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
948 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
949 case SIOP_NEG_MSGOUT:
950 esiop_update_scntl3(sc,
951 esiop_cmd->cmd_c.siop_target);
952 esiop_table_sync(esiop_cmd,
953 BUS_DMASYNC_PREREAD |
954 BUS_DMASYNC_PREWRITE);
955 CALL_SCRIPT(Ent_send_msgout);
956 return(1);
957 case SIOP_NEG_ACK:
958 esiop_update_scntl3(sc,
959 esiop_cmd->cmd_c.siop_target);
960 CALL_SCRIPT(Ent_msgin_ack);
961 return(1);
962 default:
963 panic("invalid retval from "
964 "siop_wdtr_neg()");
965 }
966 return(1);
967 }
968 /* send a message reject */
969 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
970 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
971 esiop_table_sync(esiop_cmd,
972 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
973 CALL_SCRIPT(Ent_send_msgout);
974 return 1;
975 case A_int_disc:
976 INCSTAT(esiop_stat_intr_sdp);
977 offset = bus_space_read_1(sc->sc_c.sc_rt,
978 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
979 #ifdef SIOP_DEBUG_DR
980 printf("disconnect offset %d\n", offset);
981 #endif
982 if (offset > SIOP_NSG) {
983 printf("%s: bad offset for disconnect (%d)\n",
984 sc->sc_c.sc_dev.dv_xname, offset);
985 goto reset;
986 }
987 /*
988 * offset == SIOP_NSG may be a valid condition if
989 * we get a sdp when the xfer is done.
990 * Don't call memmove in this case.
991 */
992 if (offset < SIOP_NSG) {
993 memmove(&esiop_cmd->cmd_tables->data[0],
994 &esiop_cmd->cmd_tables->data[offset],
995 (SIOP_NSG - offset) * sizeof(scr_table_t));
996 esiop_table_sync(esiop_cmd,
997 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
998 }
999 CALL_SCRIPT(Ent_script_sched);
1000 return 1;
1001 case A_int_resfail:
1002 printf("reselect failed\n");
1003 CALL_SCRIPT(Ent_script_sched);
1004 return 1;
1005 case A_int_done:
1006 if (xs == NULL) {
1007 printf("%s: done without command\n",
1008 sc->sc_c.sc_dev.dv_xname);
1009 CALL_SCRIPT(Ent_script_sched);
1010 return 1;
1011 }
1012 #ifdef SIOP_DEBUG_INTR
1013 printf("done, DSA=0x%lx target id 0x%x last msg "
1014 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1015 le32toh(esiop_cmd->cmd_tables->id),
1016 esiop_cmd->cmd_tables->msg_in[0],
1017 le32toh(esiop_cmd->cmd_tables->status));
1018 #endif
1019 INCSTAT(esiop_stat_intr_done);
1020 esiop_cmd->cmd_c.status = CMDST_DONE;
1021 goto end;
1022 default:
1023 printf("unknown irqcode %x\n", irqcode);
1024 if (xs) {
1025 xs->error = XS_SELTIMEOUT;
1026 goto end;
1027 }
1028 goto reset;
1029 }
1030 return 1;
1031 }
1032 /* We just should't get there */
1033 panic("siop_intr: I shouldn't be there !");
1034
1035 end:
1036 /*
1037 * restart the script now if command completed properly
1038 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1039 * queue
1040 */
1041 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1042 #ifdef SIOP_DEBUG_INTR
1043 printf("esiop_intr end: status %d\n", xs->status);
1044 #endif
1045 if (xs->status == SCSI_OK)
1046 CALL_SCRIPT(Ent_script_sched);
1047 else
1048 restart = 1;
1049 if (tag >= 0)
1050 esiop_lun->tactive[tag] = NULL;
1051 else
1052 esiop_lun->active = NULL;
1053 esiop_scsicmd_end(esiop_cmd);
1054 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1055 esiop_del_dev(sc, target, lun);
1056 if (restart)
1057 CALL_SCRIPT(Ent_script_sched);
1058 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1059 /* a command terminated, so we have free slots now */
1060 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1061 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1062 }
1063
1064 goto again;
1065 }
1066
1067 void
1068 esiop_scsicmd_end(esiop_cmd)
1069 struct esiop_cmd *esiop_cmd;
1070 {
1071 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1072 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1073
1074 switch(xs->status) {
1075 case SCSI_OK:
1076 xs->error = XS_NOERROR;
1077 break;
1078 case SCSI_BUSY:
1079 xs->error = XS_BUSY;
1080 break;
1081 case SCSI_CHECK:
1082 xs->error = XS_BUSY;
1083 /* remove commands in the queue and scheduler */
1084 esiop_unqueue(sc, xs->xs_periph->periph_target,
1085 xs->xs_periph->periph_lun);
1086 break;
1087 case SCSI_QUEUE_FULL:
1088 INCSTAT(esiop_stat_intr_qfull);
1089 #ifdef SIOP_DEBUG
1090 printf("%s:%d:%d: queue full (tag %d)\n",
1091 sc->sc_c.sc_dev.dv_xname,
1092 xs->xs_periph->periph_target,
1093 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1094 #endif
1095 xs->error = XS_BUSY;
1096 break;
1097 case SCSI_SIOP_NOCHECK:
1098 /*
1099 * don't check status, xs->error is already valid
1100 */
1101 break;
1102 case SCSI_SIOP_NOSTATUS:
1103 /*
1104 * the status byte was not updated, cmd was
1105 * aborted
1106 */
1107 xs->error = XS_SELTIMEOUT;
1108 break;
1109 default:
1110 xs->error = XS_DRIVER_STUFFUP;
1111 }
1112 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1113 bus_dmamap_sync(sc->sc_c.sc_dmat,
1114 esiop_cmd->cmd_c.dmamap_data, 0,
1115 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1116 (xs->xs_control & XS_CTL_DATA_IN) ?
1117 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1118 bus_dmamap_unload(sc->sc_c.sc_dmat,
1119 esiop_cmd->cmd_c.dmamap_data);
1120 }
1121 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1122 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1123 esiop_cmd->cmd_c.status = CMDST_FREE;
1124 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1125 xs->resid = 0;
1126 scsipi_done (xs);
1127 }
1128
1129 void
1130 esiop_checkdone(sc)
1131 struct esiop_softc *sc;
1132 {
1133 int target, lun, tag;
1134 struct esiop_target *esiop_target;
1135 struct esiop_lun *esiop_lun;
1136 struct esiop_cmd *esiop_cmd;
1137 int status;
1138
1139 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1140 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1141 if (esiop_target == NULL)
1142 continue;
1143 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1144 esiop_lun = esiop_target->esiop_lun[lun];
1145 if (esiop_lun == NULL)
1146 continue;
1147 esiop_cmd = esiop_lun->active;
1148 if (esiop_cmd) {
1149 status = le32toh(esiop_cmd->cmd_tables->status);
1150 if (status == SCSI_OK) {
1151 /* Ok, this command has been handled */
1152 esiop_cmd->cmd_c.xs->status = status;
1153 esiop_lun->active = NULL;
1154 esiop_scsicmd_end(esiop_cmd);
1155 }
1156 }
1157 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1158 esiop_cmd = esiop_lun->tactive[tag];
1159 if (esiop_cmd == NULL)
1160 continue;
1161 status = le32toh(esiop_cmd->cmd_tables->status);
1162 if (status == SCSI_OK) {
1163 /* Ok, this command has been handled */
1164 esiop_cmd->cmd_c.xs->status = status;
1165 esiop_lun->tactive[tag] = NULL;
1166 esiop_scsicmd_end(esiop_cmd);
1167 }
1168 }
1169 }
1170 }
1171 }
1172
1173 void
1174 esiop_unqueue(sc, target, lun)
1175 struct esiop_softc *sc;
1176 int target;
1177 int lun;
1178 {
1179 int slot, tag;
1180 u_int32_t slotdsa;
1181 struct esiop_cmd *esiop_cmd;
1182 struct esiop_lun *esiop_lun =
1183 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1184
1185 /* first make sure to read valid data */
1186 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1187
1188 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1189 /* look for commands in the scheduler, not yet started */
1190 if (esiop_lun->tactive[tag] == NULL)
1191 continue;
1192 esiop_cmd = esiop_lun->tactive[tag];
1193 for (slot = 0; slot <= A_ncmd_slots; slot++) {
1194 slotdsa = esiop_script_read(sc,
1195 sc->sc_shedoffset + slot * 2);
1196 if (slotdsa & A_f_cmd_free)
1197 continue;
1198 if ((slotdsa & ~A_f_cmd_free) == esiop_cmd->cmd_c.dsa)
1199 break;
1200 }
1201 if (slot > ESIOP_NTAG)
1202 continue; /* didn't find it */
1203 /* Mark this slot as ignore */
1204 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1205 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1206 /* ask to requeue */
1207 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1208 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1209 esiop_lun->tactive[tag] = NULL;
1210 esiop_scsicmd_end(esiop_cmd);
1211 }
1212 }
1213
1214 /*
1215 * handle a rejected queue tag message: the command will run untagged,
1216 * has to adjust the reselect script.
1217 */
1218
1219
1220 int
1221 esiop_handle_qtag_reject(esiop_cmd)
1222 struct esiop_cmd *esiop_cmd;
1223 {
1224 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1225 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1226 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1227 int tag = esiop_cmd->cmd_tables->msg_out[2];
1228 struct esiop_target *esiop_target =
1229 (struct esiop_target*)sc->sc_c.targets[target];
1230 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1231
1232 #ifdef SIOP_DEBUG
1233 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1234 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1235 esiop_cmd->cmd_c.status);
1236 #endif
1237
1238 if (esiop_lun->active != NULL) {
1239 printf("%s: untagged command already running for target %d "
1240 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1241 target, lun, esiop_lun->active->cmd_c.status);
1242 return -1;
1243 }
1244 /* clear tag slot */
1245 esiop_lun->tactive[tag] = NULL;
1246 /* add command to non-tagged slot */
1247 esiop_lun->active = esiop_cmd;
1248 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1249 esiop_cmd->cmd_c.tag = -1;
1250 /* update DSA table */
1251 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1252 esiop_cmd->cmd_c.dsa);
1253 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1254 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1255 return 0;
1256 }
1257
1258 /*
1259 * handle a bus reset: reset chip, unqueue all active commands, free all
1260 * target struct and report loosage to upper layer.
1261 * As the upper layer may requeue immediatly we have to first store
1262 * all active commands in a temporary queue.
1263 */
1264 void
1265 esiop_handle_reset(sc)
1266 struct esiop_softc *sc;
1267 {
1268 struct esiop_cmd *esiop_cmd;
1269 struct esiop_lun *esiop_lun;
1270 int target, lun, tag;
1271 /*
1272 * scsi bus reset. reset the chip and restart
1273 * the queue. Need to clean up all active commands
1274 */
1275 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1276 /* stop, reset and restart the chip */
1277 esiop_reset(sc);
1278 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1279 /* chip has been reset, all slots are free now */
1280 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1281 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1282 }
1283 /*
1284 * Process all commands: first commmands completes, then commands
1285 * being executed
1286 */
1287 esiop_checkdone(sc);
1288 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1289 target++) {
1290 struct esiop_target *esiop_target =
1291 (struct esiop_target *)sc->sc_c.targets[target];
1292 if (esiop_target == NULL)
1293 continue;
1294 for (lun = 0; lun < 8; lun++) {
1295 esiop_lun = esiop_target->esiop_lun[lun];
1296 if (esiop_lun == NULL)
1297 continue;
1298 for (tag = -1; tag <
1299 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1300 ESIOP_NTAG : 0);
1301 tag++) {
1302 if (tag >= 0)
1303 esiop_cmd = esiop_lun->tactive[tag];
1304 else
1305 esiop_cmd = esiop_lun->active;
1306 if (esiop_cmd == NULL)
1307 continue;
1308 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1309 printf("command with tag id %d reset\n", tag);
1310 esiop_cmd->cmd_c.xs->error =
1311 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1312 XS_TIMEOUT : XS_RESET;
1313 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1314 if (tag >= 0)
1315 esiop_lun->tactive[tag] = NULL;
1316 else
1317 esiop_lun->active = NULL;
1318 esiop_cmd->cmd_c.status = CMDST_DONE;
1319 esiop_scsicmd_end(esiop_cmd);
1320 }
1321 }
1322 sc->sc_c.targets[target]->status = TARST_ASYNC;
1323 sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1324 sc->sc_c.targets[target]->period =
1325 sc->sc_c.targets[target]->offset = 0;
1326 siop_update_xfer_mode(&sc->sc_c, target);
1327 }
1328
1329 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1330 }
1331
1332 void
1333 esiop_scsipi_request(chan, req, arg)
1334 struct scsipi_channel *chan;
1335 scsipi_adapter_req_t req;
1336 void *arg;
1337 {
1338 struct scsipi_xfer *xs;
1339 struct scsipi_periph *periph;
1340 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1341 struct esiop_cmd *esiop_cmd;
1342 struct esiop_target *esiop_target;
1343 int s, error, i;
1344 int target;
1345 int lun;
1346
1347 switch (req) {
1348 case ADAPTER_REQ_RUN_XFER:
1349 xs = arg;
1350 periph = xs->xs_periph;
1351 target = periph->periph_target;
1352 lun = periph->periph_lun;
1353
1354 s = splbio();
1355 #ifdef SIOP_DEBUG_SCHED
1356 printf("starting cmd for %d:%d\n", target, lun);
1357 #endif
1358 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1359 if (esiop_cmd == NULL) {
1360 xs->error = XS_RESOURCE_SHORTAGE;
1361 scsipi_done(xs);
1362 splx(s);
1363 return;
1364 }
1365 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1366 #ifdef DIAGNOSTIC
1367 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1368 panic("siop_scsicmd: new cmd not free");
1369 #endif
1370 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1371 if (esiop_target == NULL) {
1372 #ifdef SIOP_DEBUG
1373 printf("%s: alloc siop_target for target %d\n",
1374 sc->sc_c.sc_dev.dv_xname, target);
1375 #endif
1376 sc->sc_c.targets[target] =
1377 malloc(sizeof(struct esiop_target),
1378 M_DEVBUF, M_NOWAIT | M_ZERO);
1379 if (sc->sc_c.targets[target] == NULL) {
1380 printf("%s: can't malloc memory for "
1381 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1382 target);
1383 xs->error = XS_RESOURCE_SHORTAGE;
1384 scsipi_done(xs);
1385 splx(s);
1386 return;
1387 }
1388 esiop_target =
1389 (struct esiop_target*)sc->sc_c.targets[target];
1390 esiop_target->target_c.status = TARST_PROBING;
1391 esiop_target->target_c.flags = 0;
1392 esiop_target->target_c.id =
1393 sc->sc_c.clock_div << 24; /* scntl3 */
1394 esiop_target->target_c.id |= target << 16; /* id */
1395 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1396
1397 for (i=0; i < 8; i++)
1398 esiop_target->esiop_lun[i] = NULL;
1399 esiop_target_register(sc, target);
1400 }
1401 if (esiop_target->esiop_lun[lun] == NULL) {
1402 esiop_target->esiop_lun[lun] =
1403 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1404 M_NOWAIT|M_ZERO);
1405 if (esiop_target->esiop_lun[lun] == NULL) {
1406 printf("%s: can't alloc esiop_lun for "
1407 "target %d lun %d\n",
1408 sc->sc_c.sc_dev.dv_xname, target, lun);
1409 xs->error = XS_RESOURCE_SHORTAGE;
1410 scsipi_done(xs);
1411 splx(s);
1412 return;
1413 }
1414 }
1415 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1416 esiop_cmd->cmd_c.xs = xs;
1417 esiop_cmd->cmd_c.flags = 0;
1418 esiop_cmd->cmd_c.status = CMDST_READY;
1419
1420 /* load the DMA maps */
1421 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1422 esiop_cmd->cmd_c.dmamap_cmd,
1423 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1424 if (error) {
1425 printf("%s: unable to load cmd DMA map: %d\n",
1426 sc->sc_c.sc_dev.dv_xname, error);
1427 xs->error = XS_DRIVER_STUFFUP;
1428 scsipi_done(xs);
1429 splx(s);
1430 return;
1431 }
1432 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1433 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1434 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1435 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1436 ((xs->xs_control & XS_CTL_DATA_IN) ?
1437 BUS_DMA_READ : BUS_DMA_WRITE));
1438 if (error) {
1439 printf("%s: unable to load cmd DMA map: %d",
1440 sc->sc_c.sc_dev.dv_xname, error);
1441 xs->error = XS_DRIVER_STUFFUP;
1442 scsipi_done(xs);
1443 bus_dmamap_unload(sc->sc_c.sc_dmat,
1444 esiop_cmd->cmd_c.dmamap_cmd);
1445 splx(s);
1446 return;
1447 }
1448 bus_dmamap_sync(sc->sc_c.sc_dmat,
1449 esiop_cmd->cmd_c.dmamap_data, 0,
1450 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1451 (xs->xs_control & XS_CTL_DATA_IN) ?
1452 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1453 }
1454 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1455 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1456 BUS_DMASYNC_PREWRITE);
1457
1458 if (xs->xs_tag_type)
1459 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1460 else
1461 esiop_cmd->cmd_c.tag = -1;
1462 siop_setuptables(&esiop_cmd->cmd_c);
1463 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1464 htole32(A_f_c_target | A_f_c_lun);
1465 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1466 htole32((target << 8) | (lun << 16));
1467 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1468 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1469 htole32(A_f_c_tag);
1470 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1471 htole32(esiop_cmd->cmd_c.tag << 24);
1472 }
1473
1474 esiop_table_sync(esiop_cmd,
1475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1476 esiop_start(sc, esiop_cmd);
1477 if (xs->xs_control & XS_CTL_POLL) {
1478 /* poll for command completion */
1479 while ((xs->xs_status & XS_STS_DONE) == 0) {
1480 delay(1000);
1481 esiop_intr(sc);
1482 }
1483 }
1484 splx(s);
1485 return;
1486
1487 case ADAPTER_REQ_GROW_RESOURCES:
1488 #ifdef SIOP_DEBUG
1489 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1490 sc->sc_c.sc_adapt.adapt_openings);
1491 #endif
1492 esiop_morecbd(sc);
1493 return;
1494
1495 case ADAPTER_REQ_SET_XFER_MODE:
1496 {
1497 struct scsipi_xfer_mode *xm = arg;
1498 if (sc->sc_c.targets[xm->xm_target] == NULL)
1499 return;
1500 s = splbio();
1501 if (xm->xm_mode & PERIPH_CAP_TQING)
1502 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1503 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1504 (sc->sc_c.features & SF_BUS_WIDE))
1505 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1506 if (xm->xm_mode & PERIPH_CAP_SYNC)
1507 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1508 if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1509 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1510 sc->sc_c.targets[xm->xm_target]->status =
1511 TARST_ASYNC;
1512
1513 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1514 if (sc->sc_c.sc_chan.chan_periphs[xm->xm_target][lun])
1515 /* allocate a lun sw entry for this device */
1516 esiop_add_dev(sc, xm->xm_target, lun);
1517 }
1518
1519 splx(s);
1520 }
1521 }
1522 }
1523
1524 static void
1525 esiop_start(sc, esiop_cmd)
1526 struct esiop_softc *sc;
1527 struct esiop_cmd *esiop_cmd;
1528 {
1529 struct esiop_lun *esiop_lun;
1530 struct esiop_target *esiop_target;
1531 int timeout;
1532 int target, lun, slot;
1533
1534 nintr = 0;
1535
1536 /*
1537 * first make sure to read valid data
1538 */
1539 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1540
1541 /*
1542 * We use a circular queue here. sc->sc_currschedslot points to a
1543 * free slot, unless we have filled the queue. Check this.
1544 */
1545 slot = sc->sc_currschedslot;
1546 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * 2) &
1547 A_f_cmd_free) == 0) {
1548 /*
1549 * no more free slot, no need to continue. freeze the queue
1550 * and requeue this command.
1551 */
1552 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1553 sc->sc_flags |= SCF_CHAN_NOSLOT;
1554 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1555 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1556 esiop_scsicmd_end(esiop_cmd);
1557 return;
1558 }
1559 /* OK, we can use this slot */
1560
1561 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1562 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1563 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1564 esiop_lun = esiop_target->esiop_lun[lun];
1565 /* if non-tagged command active, panic: this shouldn't happen */
1566 if (esiop_lun->active != NULL) {
1567 panic("esiop_start: tagged cmd while untagged running");
1568 }
1569 #ifdef DIAGNOSTIC
1570 /* sanity check the tag if needed */
1571 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1572 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1573 panic("esiop_start: tag not free");
1574 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1575 esiop_cmd->cmd_c.tag < 0) {
1576 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1577 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1578 panic("esiop_start: invalid tag id");
1579 }
1580 }
1581 #endif
1582 #ifdef SIOP_DEBUG_SCHED
1583 printf("using slot %d for DSA 0x%lx\n", slot,
1584 (u_long)esiop_cmd->cmd_c.dsa);
1585 #endif
1586 /* mark command as active */
1587 if (esiop_cmd->cmd_c.status == CMDST_READY)
1588 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1589 else
1590 panic("esiop_start: bad status");
1591 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1592 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1593 /* DSA table for reselect */
1594 if ((esiop_lun->lun_flags & LUNF_TAGTABLE) == 0) {
1595 esiop_script_write(sc,
1596 esiop_target->lun_table_offset + lun + 2,
1597 esiop_lun->lun_tagtbl->tbl_dsa);
1598 esiop_lun->lun_flags |= LUNF_TAGTABLE;
1599 }
1600 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1601 htole32(esiop_cmd->cmd_c.dsa);
1602 bus_dmamap_sync(sc->sc_c.sc_dmat,
1603 esiop_lun->lun_tagtbl->tblblk->blkmap,
1604 esiop_lun->lun_tagtbl->tbl_offset,
1605 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1606 } else {
1607 esiop_lun->active = esiop_cmd;
1608 /* DSA table for reselect */
1609 esiop_script_write(sc, esiop_target->lun_table_offset + lun + 2,
1610 esiop_cmd->cmd_c.dsa);
1611 esiop_lun->lun_flags &= ~LUNF_TAGTABLE;
1612
1613 }
1614 /* scheduler slot: ID, then DSA */
1615 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1616 sc->sc_c.targets[target]->id);
1617 esiop_script_write(sc, sc->sc_shedoffset + slot * 2,
1618 esiop_cmd->cmd_c.dsa);
1619 /* handle timeout */
1620 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1621 /* start exire timer */
1622 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1623 if (timeout == 0)
1624 timeout = 1;
1625 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1626 timeout, esiop_timeout, esiop_cmd);
1627 }
1628 /* make sure SCRIPT processor will read valid data */
1629 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1630 /* Signal script it has some work to do */
1631 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1632 SIOP_ISTAT, ISTAT_SIGP);
1633 /* update the current slot, and wait for IRQ */
1634 sc->sc_currschedslot++;
1635 if (sc->sc_currschedslot >= A_ncmd_slots)
1636 sc->sc_currschedslot = 0;
1637 return;
1638 }
1639
1640 void
1641 esiop_timeout(v)
1642 void *v;
1643 {
1644 struct esiop_cmd *esiop_cmd = v;
1645 struct esiop_softc *sc =
1646 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1647 int s;
1648
1649 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1650 printf("command timeout\n");
1651
1652 s = splbio();
1653 /* reset the scsi bus */
1654 siop_resetbus(&sc->sc_c);
1655
1656 /* deactivate callout */
1657 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1658 /*
1659 * mark command has being timed out and just return;
1660 * the bus reset will generate an interrupt,
1661 * it will be handled in siop_intr()
1662 */
1663 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1664 splx(s);
1665 return;
1666
1667 }
1668
1669 void
1670 esiop_dump_script(sc)
1671 struct esiop_softc *sc;
1672 {
1673 int i;
1674 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1675 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1676 le32toh(sc->sc_c.sc_script[i]),
1677 le32toh(sc->sc_c.sc_script[i+1]));
1678 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1679 0xc0000000) {
1680 i++;
1681 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1682 }
1683 printf("\n");
1684 }
1685 }
1686
1687 void
1688 esiop_morecbd(sc)
1689 struct esiop_softc *sc;
1690 {
1691 int error, i, s;
1692 bus_dma_segment_t seg;
1693 int rseg;
1694 struct esiop_cbd *newcbd;
1695 struct esiop_xfer *xfer;
1696 bus_addr_t dsa;
1697
1698 /* allocate a new list head */
1699 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1700 if (newcbd == NULL) {
1701 printf("%s: can't allocate memory for command descriptors "
1702 "head\n", sc->sc_c.sc_dev.dv_xname);
1703 return;
1704 }
1705
1706 /* allocate cmd list */
1707 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1708 M_DEVBUF, M_NOWAIT|M_ZERO);
1709 if (newcbd->cmds == NULL) {
1710 printf("%s: can't allocate memory for command descriptors\n",
1711 sc->sc_c.sc_dev.dv_xname);
1712 goto bad3;
1713 }
1714 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1715 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1716 if (error) {
1717 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1718 sc->sc_c.sc_dev.dv_xname, error);
1719 goto bad2;
1720 }
1721 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1722 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1723 if (error) {
1724 printf("%s: unable to map cbd DMA memory, error = %d\n",
1725 sc->sc_c.sc_dev.dv_xname, error);
1726 goto bad2;
1727 }
1728 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1729 BUS_DMA_NOWAIT, &newcbd->xferdma);
1730 if (error) {
1731 printf("%s: unable to create cbd DMA map, error = %d\n",
1732 sc->sc_c.sc_dev.dv_xname, error);
1733 goto bad1;
1734 }
1735 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1736 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1737 if (error) {
1738 printf("%s: unable to load cbd DMA map, error = %d\n",
1739 sc->sc_c.sc_dev.dv_xname, error);
1740 goto bad0;
1741 }
1742 #ifdef DEBUG
1743 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1744 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1745 #endif
1746 for (i = 0; i < SIOP_NCMDPB; i++) {
1747 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1748 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1749 &newcbd->cmds[i].cmd_c.dmamap_data);
1750 if (error) {
1751 printf("%s: unable to create data DMA map for cbd: "
1752 "error %d\n",
1753 sc->sc_c.sc_dev.dv_xname, error);
1754 goto bad0;
1755 }
1756 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1757 sizeof(struct scsipi_generic), 1,
1758 sizeof(struct scsipi_generic), 0,
1759 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1760 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1761 if (error) {
1762 printf("%s: unable to create cmd DMA map for cbd %d\n",
1763 sc->sc_c.sc_dev.dv_xname, error);
1764 goto bad0;
1765 }
1766 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1767 newcbd->cmds[i].esiop_cbdp = newcbd;
1768 xfer = &newcbd->xfers[i];
1769 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1770 memset(newcbd->cmds[i].cmd_tables, 0,
1771 sizeof(struct esiop_xfer));
1772 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1773 i * sizeof(struct esiop_xfer);
1774 newcbd->cmds[i].cmd_c.dsa = dsa;
1775 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1776 xfer->siop_tables.t_msgout.count= htole32(1);
1777 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1778 xfer->siop_tables.t_msgin.count= htole32(1);
1779 xfer->siop_tables.t_msgin.addr = htole32(dsa + 8);
1780 xfer->siop_tables.t_extmsgin.count= htole32(2);
1781 xfer->siop_tables.t_extmsgin.addr = htole32(dsa + 9);
1782 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa + 11);
1783 xfer->siop_tables.t_status.count= htole32(1);
1784 xfer->siop_tables.t_status.addr = htole32(dsa + 16);
1785
1786 s = splbio();
1787 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1788 splx(s);
1789 #ifdef SIOP_DEBUG
1790 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1791 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1792 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1793 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1794 #endif
1795 }
1796 s = splbio();
1797 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1798 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1799 splx(s);
1800 return;
1801 bad0:
1802 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1803 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1804 bad1:
1805 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1806 bad2:
1807 free(newcbd->cmds, M_DEVBUF);
1808 bad3:
1809 free(newcbd, M_DEVBUF);
1810 return;
1811 }
1812
1813 void
1814 esiop_moretagtbl(sc)
1815 struct esiop_softc *sc;
1816 {
1817 int error, i, j, s;
1818 bus_dma_segment_t seg;
1819 int rseg;
1820 struct esiop_dsatblblk *newtblblk;
1821 struct esiop_dsatbl *newtbls;
1822 u_int32_t *tbls;
1823
1824 /* allocate a new list head */
1825 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1826 M_DEVBUF, M_NOWAIT|M_ZERO);
1827 if (newtblblk == NULL) {
1828 printf("%s: can't allocate memory for tag DSA table block\n",
1829 sc->sc_c.sc_dev.dv_xname);
1830 return;
1831 }
1832
1833 /* allocate tbl list */
1834 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1835 M_DEVBUF, M_NOWAIT|M_ZERO);
1836 if (newtbls == NULL) {
1837 printf("%s: can't allocate memory for command descriptors\n",
1838 sc->sc_c.sc_dev.dv_xname);
1839 goto bad3;
1840 }
1841 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1842 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1843 if (error) {
1844 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1845 sc->sc_c.sc_dev.dv_xname, error);
1846 goto bad2;
1847 }
1848 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1849 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1850 if (error) {
1851 printf("%s: unable to map tbls DMA memory, error = %d\n",
1852 sc->sc_c.sc_dev.dv_xname, error);
1853 goto bad2;
1854 }
1855 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1856 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1857 if (error) {
1858 printf("%s: unable to create tbl DMA map, error = %d\n",
1859 sc->sc_c.sc_dev.dv_xname, error);
1860 goto bad1;
1861 }
1862 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1863 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1864 if (error) {
1865 printf("%s: unable to load tbl DMA map, error = %d\n",
1866 sc->sc_c.sc_dev.dv_xname, error);
1867 goto bad0;
1868 }
1869 #ifdef DEBUG
1870 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
1871 sc->sc_c.sc_dev.dv_xname,
1872 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
1873 #endif
1874 for (i = 0; i < ESIOP_NTPB; i++) {
1875 newtbls[i].tblblk = newtblblk;
1876 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
1877 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
1878 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
1879 newtbls[i].tbl_offset;
1880 for (j = 0; j < ESIOP_NTAG; j++)
1881 newtbls[i].tbl[j] = j;
1882 s = splbio();
1883 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
1884 splx(s);
1885 }
1886 s = splbio();
1887 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
1888 splx(s);
1889 return;
1890 bad0:
1891 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
1892 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
1893 bad1:
1894 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1895 bad2:
1896 free(newtbls, M_DEVBUF);
1897 bad3:
1898 free(newtblblk, M_DEVBUF);
1899 return;
1900 }
1901
1902 void
1903 esiop_update_scntl3(sc, _siop_target)
1904 struct esiop_softc *sc;
1905 struct siop_common_target *_siop_target;
1906 {
1907 int slot;
1908 u_int32_t slotid, id;
1909
1910 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
1911 esiop_script_write(sc, esiop_target->lun_table_offset,
1912 esiop_target->target_c.id);
1913 id = esiop_target->target_c.id & 0x00ff0000;
1914 /* There may be other commands waiting in the scheduler. handle them */
1915 for (slot = 0; slot <= A_ncmd_slots; slot++) {
1916 slotid =
1917 esiop_script_read(sc, sc->sc_shedoffset + slot * 2 + 1);
1918 if ((slotid & 0x00ff0000) == id)
1919 esiop_script_write(sc, sc->sc_shedoffset + slot * 2 + 1,
1920 esiop_target->target_c.id);
1921 }
1922 esiop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1923 }
1924
1925 void
1926 esiop_add_dev(sc, target, lun)
1927 struct esiop_softc *sc;
1928 int target;
1929 int lun;
1930 {
1931 struct esiop_target *esiop_target =
1932 (struct esiop_target *)sc->sc_c.targets[target];
1933 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1934
1935 if (esiop_target->target_c.flags & TARF_TAG) {
1936 /* we need a tag DSA table */
1937 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1938 if (esiop_lun->lun_tagtbl == NULL) {
1939 esiop_moretagtbl(sc);
1940 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
1941 if (esiop_lun->lun_tagtbl == NULL) {
1942 /* no resources, run untagged */
1943 esiop_target->target_c.flags &= ~TARF_TAG;
1944 return;
1945 }
1946 }
1947 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
1948
1949 }
1950 }
1951
1952 void
1953 esiop_del_dev(sc, target, lun)
1954 struct esiop_softc *sc;
1955 int target;
1956 int lun;
1957 {
1958 struct esiop_target *esiop_target;
1959 #ifdef SIOP_DEBUG
1960 printf("%s:%d:%d: free lun sw entry\n",
1961 sc->sc_c.sc_dev.dv_xname, target, lun);
1962 #endif
1963 if (sc->sc_c.targets[target] == NULL)
1964 return;
1965 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
1966 free(esiop_target->esiop_lun[lun], M_DEVBUF);
1967 esiop_target->esiop_lun[lun] = NULL;
1968 }
1969
1970 struct esiop_cmd *
1971 esiop_cmd_find(sc, target, dsa)
1972 struct esiop_softc *sc;
1973 int target;
1974 u_int32_t dsa;
1975 {
1976 int lun, tag;
1977 struct esiop_cmd *cmd;
1978 struct esiop_lun *esiop_lun;
1979 struct esiop_target *esiop_target =
1980 (struct esiop_target *)sc->sc_c.targets[target];
1981
1982 if (esiop_target == NULL)
1983 return NULL;
1984
1985 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1986 esiop_lun = esiop_target->esiop_lun[lun];
1987 if (esiop_lun == NULL)
1988 continue;
1989 cmd = esiop_lun->active;
1990 if (cmd && cmd->cmd_c.dsa == dsa)
1991 return cmd;
1992 if (esiop_target->target_c.flags & TARF_TAG) {
1993 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1994 cmd = esiop_lun->tactive[tag];
1995 if (cmd && cmd->cmd_c.dsa == dsa)
1996 return cmd;
1997 }
1998 }
1999 }
2000 return NULL;
2001 }
2002
2003 void
2004 esiop_target_register(sc, target)
2005 struct esiop_softc *sc;
2006 u_int32_t target;
2007 {
2008 struct esiop_target *esiop_target =
2009 (struct esiop_target *)sc->sc_c.targets[target];
2010
2011 /* get a DSA table for this target */
2012 esiop_target->lun_table_offset = sc->sc_free_offset;
2013 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns + 2;
2014 #ifdef SIOP_DEBUG
2015 printf("%s: lun table for target %d offset %d free offset %d\n",
2016 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2017 sc->sc_free_offset);
2018 #endif
2019 /* first 32 bytes are ID (for select) */
2020 esiop_script_write(sc, esiop_target->lun_table_offset,
2021 esiop_target->target_c.id);
2022 /* Record this table in the target DSA table */
2023 esiop_script_write(sc,
2024 sc->sc_target_table_offset + target,
2025 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2026 sc->sc_c.sc_scriptaddr);
2027 esiop_script_sync(sc,
2028 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2029 }
2030
2031 #ifdef SIOP_STATS
2032 void
2033 esiop_printstats()
2034 {
2035 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2036 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2037 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2038 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2039 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2040 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2041 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2042 }
2043 #endif
2044