esiop.c revision 1.12 1 /* $NetBSD: esiop.c,v 1.12 2002/04/27 18:46:49 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.12 2002/04/27 18:46:49 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
98
99 void esiop_update_scntl3 __P((struct esiop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
117 static __inline__ void
118 esiop_script_sync(sc, ops)
119 struct esiop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
128 static __inline__ u_int32_t
129 esiop_script_read(sc, offset)
130 struct esiop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 esiop_script_write(sc, offset, val)
145 struct esiop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 esiop_attach(sc)
159 struct esiop_softc *sc;
160 {
161 struct esiop_dsatbl *tagtbl_donering;
162
163 if (siop_common_attach(&sc->sc_c) != 0 )
164 return;
165
166 TAILQ_INIT(&sc->free_list);
167 TAILQ_INIT(&sc->cmds);
168 TAILQ_INIT(&sc->free_tagtbl);
169 TAILQ_INIT(&sc->tag_tblblk);
170 sc->sc_currschedslot = 0;
171 #ifdef SIOP_DEBUG
172 printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
173 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
174 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
175 #endif
176
177 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
178 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
179
180 /*
181 * get space for the CMD done slot. For this we use a tag table entry.
182 * It's the same size and allows us to not waste 3/4 of a page
183 */
184 #ifdef DIAGNOSTIC
185 if (ESIOP_NTAG != A_ndone_slots) {
186 printf("%s: size of tag DSA table different from the done"
187 "ring\n", sc->sc_c.sc_dev.dv_xname);
188 return;
189 }
190 #endif
191 esiop_moretagtbl(sc);
192 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
193 if (tagtbl_donering == NULL) {
194 printf("%s: no memory for command done ring\n",
195 "ring\n", sc->sc_c.sc_dev.dv_xname);
196 return;
197 }
198 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
199 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
200 sc->sc_done_offset = tagtbl_donering->tbl_offset;
201 sc->sc_done_slot = &tagtbl_donering->tbl[0];
202
203 /* Do a bus reset, so that devices fall back to narrow/async */
204 siop_resetbus(&sc->sc_c);
205 /*
206 * siop_reset() will reset the chip, thus clearing pending interrupts
207 */
208 esiop_reset(sc);
209 #ifdef DUMP_SCRIPT
210 esiop_dump_script(sc);
211 #endif
212
213 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
214 }
215
216 void
217 esiop_reset(sc)
218 struct esiop_softc *sc;
219 {
220 int i, j;
221 u_int32_t addr;
222 u_int32_t msgin_addr, sem_addr;
223
224 siop_common_reset(&sc->sc_c);
225
226 /*
227 * we copy the script at the beggining of RAM. Then there is 4 bytes
228 * for messages in, and 4 bytes for semaphore
229 */
230 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
231 msgin_addr =
232 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
233 sc->sc_free_offset += 1;
234 sc->sc_semoffset = sc->sc_free_offset;
235 sem_addr =
236 sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
237 sc->sc_free_offset += 1;
238 /* then we have the scheduler ring */
239 sc->sc_shedoffset = sc->sc_free_offset;
240 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
241 /* then the targets DSA table */
242 sc->sc_target_table_offset = sc->sc_free_offset;
243 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
244 /* copy and patch the script */
245 if (sc->sc_c.features & SF_CHIP_RAM) {
246 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
247 esiop_script,
248 sizeof(esiop_script) / sizeof(esiop_script[0]));
249 for (j = 0; j <
250 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
251 j++) {
252 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
253 E_tlq_offset_Used[j] * 4,
254 sizeof(struct siop_common_xfer));
255 }
256 for (j = 0; j <
257 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
258 j++) {
259 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
260 E_abs_msgin2_Used[j] * 4, msgin_addr);
261 }
262 for (j = 0; j <
263 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
264 j++) {
265 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
266 E_abs_sem_Used[j] * 4, sem_addr);
267 }
268
269 if (sc->sc_c.features & SF_CHIP_LED0) {
270 bus_space_write_region_4(sc->sc_c.sc_ramt,
271 sc->sc_c.sc_ramh,
272 Ent_led_on1, esiop_led_on,
273 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
274 bus_space_write_region_4(sc->sc_c.sc_ramt,
275 sc->sc_c.sc_ramh,
276 Ent_led_on2, esiop_led_on,
277 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
278 bus_space_write_region_4(sc->sc_c.sc_ramt,
279 sc->sc_c.sc_ramh,
280 Ent_led_off, esiop_led_off,
281 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
282 }
283 } else {
284 for (j = 0;
285 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
286 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
287 }
288 for (j = 0; j <
289 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
290 j++) {
291 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
292 htole32(sizeof(struct siop_common_xfer));
293 }
294 for (j = 0; j <
295 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
296 j++) {
297 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
298 htole32(msgin_addr);
299 }
300 for (j = 0; j <
301 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
302 j++) {
303 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
304 htole32(sem_addr);
305 }
306
307 if (sc->sc_c.features & SF_CHIP_LED0) {
308 for (j = 0; j < (sizeof(esiop_led_on) /
309 sizeof(esiop_led_on[0])); j++)
310 sc->sc_c.sc_script[
311 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
312 ] = htole32(esiop_led_on[j]);
313 for (j = 0; j < (sizeof(esiop_led_on) /
314 sizeof(esiop_led_on[0])); j++)
315 sc->sc_c.sc_script[
316 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
317 ] = htole32(esiop_led_on[j]);
318 for (j = 0; j < (sizeof(esiop_led_off) /
319 sizeof(esiop_led_off[0])); j++)
320 sc->sc_c.sc_script[
321 Ent_led_off / sizeof(esiop_led_off[0]) + j
322 ] = htole32(esiop_led_off[j]);
323 }
324 }
325 /* get base of scheduler ring */
326 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
327 /* init scheduler */
328 for (i = 0; i < A_ncmd_slots; i++) {
329 esiop_script_write(sc,
330 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
331 }
332 sc->sc_currschedslot = 0;
333 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
334 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
335 /*
336 * 0x78000000 is a 'move data8 to reg'. data8 is the second
337 * octet, reg offset is the third.
338 */
339 esiop_script_write(sc, Ent_cmdr0 / 4,
340 0x78640000 | ((addr & 0x000000ff) << 8));
341 esiop_script_write(sc, Ent_cmdr1 / 4,
342 0x78650000 | ((addr & 0x0000ff00) ));
343 esiop_script_write(sc, Ent_cmdr2 / 4,
344 0x78660000 | ((addr & 0x00ff0000) >> 8));
345 esiop_script_write(sc, Ent_cmdr3 / 4,
346 0x78670000 | ((addr & 0xff000000) >> 16));
347 /* done ring */
348 for (i = 0; i < A_ndone_slots; i++)
349 sc->sc_done_slot[i] = 0;
350 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
351 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
352 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
353 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
354 sc->sc_currdoneslot = 0;
355 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
356 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
357 esiop_script_write(sc, Ent_doner0 / 4,
358 0x786c0000 | ((addr & 0x000000ff) << 8));
359 esiop_script_write(sc, Ent_doner1 / 4,
360 0x786d0000 | ((addr & 0x0000ff00) ));
361 esiop_script_write(sc, Ent_doner2 / 4,
362 0x786e0000 | ((addr & 0x00ff0000) >> 8));
363 esiop_script_write(sc, Ent_doner3 / 4,
364 0x786f0000 | ((addr & 0xff000000) >> 16));
365
366 /* set flags */
367 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
368 /* write pointer of base of target DSA table */
369 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
370 sc->sc_c.sc_scriptaddr;
371 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
372 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
373 ((addr & 0x000000ff) << 8));
374 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
375 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
376 ((addr & 0x0000ff00) ));
377 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
378 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
379 ((addr & 0x00ff0000) >> 8));
380 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
381 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
382 ((addr & 0xff000000) >> 16));
383 #ifdef SIOP_DEBUG
384 printf("%s: target table offset %d free offset %d\n",
385 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
386 sc->sc_free_offset);
387 #endif
388
389 /* register existing targets */
390 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
391 if (sc->sc_c.targets[i])
392 esiop_target_register(sc, i);
393 }
394 /* start script */
395 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
396 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
397 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
398 }
399 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
400 sc->sc_c.sc_scriptaddr + Ent_reselect);
401 }
402
403 #if 0
404 #define CALL_SCRIPT(ent) do {\
405 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
406 esiop_cmd->cmd_c.dsa, \
407 sc->sc_c.sc_scriptaddr + ent); \
408 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
409 } while (0)
410 #else
411 #define CALL_SCRIPT(ent) do {\
412 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
413 } while (0)
414 #endif
415
416 int
417 esiop_intr(v)
418 void *v;
419 {
420 struct esiop_softc *sc = v;
421 struct esiop_target *esiop_target;
422 struct esiop_cmd *esiop_cmd;
423 struct esiop_lun *esiop_lun;
424 struct scsipi_xfer *xs;
425 int istat, sist, sstat1, dstat;
426 u_int32_t irqcode;
427 int need_reset = 0;
428 int offset, target, lun, tag;
429 u_int32_t tflags;
430 u_int32_t addr;
431 int freetarget = 0;
432 int slot;
433 int retval = 0;
434
435 again:
436 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
437 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
438 return retval;
439 }
440 retval = 1;
441 INCSTAT(esiop_stat_intr);
442 esiop_checkdone(sc);
443 if (istat & ISTAT_INTF) {
444 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
445 SIOP_ISTAT, ISTAT_INTF);
446 goto again;
447 }
448
449 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
450 (ISTAT_DIP | ISTAT_ABRT)) {
451 /* clear abort */
452 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
453 SIOP_ISTAT, 0);
454 }
455
456 /* get CMD from T/L/Q */
457 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
458 SIOP_SCRATCHC);
459 #ifdef SIOP_DEBUG_INTR
460 printf("interrupt, istat=0x%x tflags=0x%x "
461 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
462 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
463 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
464 SIOP_DSP) -
465 sc->sc_c.sc_scriptaddr));
466 #endif
467 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
468 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
469 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
470 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
471 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
472
473 if (target >= 0 && lun >= 0) {
474 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
475 if (esiop_target == NULL) {
476 printf("esiop_target (target %d) not valid\n", target);
477 goto none;
478 }
479 esiop_lun = esiop_target->esiop_lun[lun];
480 if (esiop_lun == NULL) {
481 printf("esiop_lun (target %d lun %d) not valid\n",
482 target, lun);
483 goto none;
484 }
485 esiop_cmd =
486 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
487 if (esiop_cmd == NULL) {
488 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
489 target, lun, tag);
490 goto none;
491 }
492 xs = esiop_cmd->cmd_c.xs;
493 #ifdef DIAGNOSTIC
494 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
495 printf("esiop_cmd (target %d lun %d) "
496 "not active (%d)\n", target, lun,
497 esiop_cmd->cmd_c.status);
498 goto none;
499 }
500 #endif
501 esiop_table_sync(esiop_cmd,
502 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
503 } else {
504 none:
505 xs = NULL;
506 esiop_target = NULL;
507 esiop_lun = NULL;
508 esiop_cmd = NULL;
509 }
510 if (istat & ISTAT_DIP) {
511 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
512 SIOP_DSTAT);
513 if (dstat & DSTAT_ABRT) {
514 /* was probably generated by a bus reset IOCTL */
515 if ((dstat & DSTAT_DFE) == 0)
516 siop_clearfifo(&sc->sc_c);
517 goto reset;
518 }
519 if (dstat & DSTAT_SSI) {
520 printf("single step dsp 0x%08x dsa 0x08%x\n",
521 (int)(bus_space_read_4(sc->sc_c.sc_rt,
522 sc->sc_c.sc_rh, SIOP_DSP) -
523 sc->sc_c.sc_scriptaddr),
524 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
525 SIOP_DSA));
526 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
527 (istat & ISTAT_SIP) == 0) {
528 bus_space_write_1(sc->sc_c.sc_rt,
529 sc->sc_c.sc_rh, SIOP_DCNTL,
530 bus_space_read_1(sc->sc_c.sc_rt,
531 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
532 }
533 return 1;
534 }
535
536 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
537 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
538 if (dstat & DSTAT_IID)
539 printf(" Illegal instruction");
540 if (dstat & DSTAT_BF)
541 printf(" bus fault");
542 if (dstat & DSTAT_MDPE)
543 printf(" parity");
544 if (dstat & DSTAT_DFE)
545 printf(" dma fifo empty");
546 else
547 siop_clearfifo(&sc->sc_c);
548 printf(", DSP=0x%x DSA=0x%x: ",
549 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
550 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
551 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
552 if (esiop_cmd)
553 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
554 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
555 le32toh(esiop_cmd->cmd_tables->status));
556 else
557 printf(" current T/L/Q invalid\n");
558 need_reset = 1;
559 }
560 }
561 if (istat & ISTAT_SIP) {
562 if (istat & ISTAT_DIP)
563 delay(10);
564 /*
565 * Can't read sist0 & sist1 independantly, or we have to
566 * insert delay
567 */
568 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
569 SIOP_SIST0);
570 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
571 SIOP_SSTAT1);
572 #ifdef SIOP_DEBUG_INTR
573 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
574 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
575 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
576 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
577 SIOP_DSP) -
578 sc->sc_c.sc_scriptaddr));
579 #endif
580 if (sist & SIST0_RST) {
581 esiop_handle_reset(sc);
582 /* no table to flush here */
583 return 1;
584 }
585 if (sist & SIST0_SGE) {
586 if (esiop_cmd)
587 scsipi_printaddr(xs->xs_periph);
588 else
589 printf("%s:", sc->sc_c.sc_dev.dv_xname);
590 printf("scsi gross error\n");
591 if (esiop_target)
592 esiop_target->target_c.flags &= ~TARF_DT;
593 goto reset;
594 }
595 if ((sist & SIST0_MA) && need_reset == 0) {
596 if (esiop_cmd) {
597 int scratchc0;
598 dstat = bus_space_read_1(sc->sc_c.sc_rt,
599 sc->sc_c.sc_rh, SIOP_DSTAT);
600 /*
601 * first restore DSA, in case we were in a S/G
602 * operation.
603 */
604 bus_space_write_4(sc->sc_c.sc_rt,
605 sc->sc_c.sc_rh,
606 SIOP_DSA, esiop_cmd->cmd_c.dsa);
607 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
608 sc->sc_c.sc_rh, SIOP_SCRATCHC);
609 switch (sstat1 & SSTAT1_PHASE_MASK) {
610 case SSTAT1_PHASE_STATUS:
611 /*
612 * previous phase may be aborted for any reason
613 * ( for example, the target has less data to
614 * transfer than requested). Just go to status
615 * and the command should terminate.
616 */
617 INCSTAT(esiop_stat_intr_shortxfer);
618 if ((dstat & DSTAT_DFE) == 0)
619 siop_clearfifo(&sc->sc_c);
620 /* no table to flush here */
621 CALL_SCRIPT(Ent_status);
622 return 1;
623 case SSTAT1_PHASE_MSGIN:
624 /*
625 * target may be ready to disconnect
626 * Save data pointers just in case.
627 */
628 INCSTAT(esiop_stat_intr_xferdisc);
629 if (scratchc0 & A_f_c_data)
630 siop_sdp(&esiop_cmd->cmd_c);
631 else if ((dstat & DSTAT_DFE) == 0)
632 siop_clearfifo(&sc->sc_c);
633 bus_space_write_1(sc->sc_c.sc_rt,
634 sc->sc_c.sc_rh, SIOP_SCRATCHC,
635 scratchc0 & ~A_f_c_data);
636 esiop_table_sync(esiop_cmd,
637 BUS_DMASYNC_PREREAD |
638 BUS_DMASYNC_PREWRITE);
639 CALL_SCRIPT(Ent_msgin);
640 return 1;
641 }
642 printf("%s: unexpected phase mismatch %d\n",
643 sc->sc_c.sc_dev.dv_xname,
644 sstat1 & SSTAT1_PHASE_MASK);
645 } else {
646 printf("%s: phase mismatch without command\n",
647 sc->sc_c.sc_dev.dv_xname);
648 }
649 need_reset = 1;
650 }
651 if (sist & SIST0_PAR) {
652 /* parity error, reset */
653 if (esiop_cmd)
654 scsipi_printaddr(xs->xs_periph);
655 else
656 printf("%s:", sc->sc_c.sc_dev.dv_xname);
657 printf("parity error\n");
658 if (esiop_target)
659 esiop_target->target_c.flags &= ~TARF_DT;
660 goto reset;
661 }
662 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
663 /*
664 * selection time out, assume there's no device here
665 * We also have to update the ring pointer ourselve
666 */
667 slot = bus_space_read_1(sc->sc_c.sc_rt,
668 sc->sc_c.sc_rh, SIOP_SCRATCHE);
669 esiop_script_sync(sc,
670 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
671 #ifdef SIOP_DEBUG_SCHED
672 printf("sel timeout target %d, slot %d\n", target, slot);
673 #endif
674 /*
675 * mark this slot as free, and advance to next slot
676 */
677 esiop_script_write(sc,
678 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
679 A_f_cmd_free);
680 addr = bus_space_read_4(sc->sc_c.sc_rt,
681 sc->sc_c.sc_rh, SIOP_SCRATCHD);
682 if (slot < (A_ncmd_slots - 1)) {
683 bus_space_write_1(sc->sc_c.sc_rt,
684 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
685 addr = addr + sizeof(struct esiop_slot);
686 } else {
687 bus_space_write_1(sc->sc_c.sc_rt,
688 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
689 addr = sc->sc_c.sc_scriptaddr +
690 sc->sc_shedoffset * sizeof(u_int32_t);
691 }
692 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
693 SIOP_SCRATCHD, addr);
694 esiop_script_sync(sc,
695 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
696 if (esiop_cmd) {
697 esiop_cmd->cmd_c.status = CMDST_DONE;
698 xs->error = XS_SELTIMEOUT;
699 freetarget = 1;
700 goto end;
701 } else {
702 printf("%s: selection timeout without "
703 "command, target %d (sdid 0x%x), "
704 "slot %d\n",
705 sc->sc_c.sc_dev.dv_xname, target,
706 bus_space_read_1(sc->sc_c.sc_rt,
707 sc->sc_c.sc_rh, SIOP_SDID), slot);
708 need_reset = 1;
709 }
710 }
711 if (sist & SIST0_UDC) {
712 /*
713 * unexpected disconnect. Usually the target signals
714 * a fatal condition this way. Attempt to get sense.
715 */
716 if (esiop_cmd) {
717 esiop_cmd->cmd_tables->status =
718 htole32(SCSI_CHECK);
719 goto end;
720 }
721 printf("%s: unexpected disconnect without "
722 "command\n", sc->sc_c.sc_dev.dv_xname);
723 goto reset;
724 }
725 if (sist & (SIST1_SBMC << 8)) {
726 /* SCSI bus mode change */
727 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
728 goto reset;
729 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
730 /*
731 * we have a script interrupt, it will
732 * restart the script.
733 */
734 goto scintr;
735 }
736 /*
737 * else we have to restart it ourselve, at the
738 * interrupted instruction.
739 */
740 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
741 SIOP_DSP,
742 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
743 SIOP_DSP) - 8);
744 return 1;
745 }
746 /* Else it's an unhandled exeption (for now). */
747 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
748 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
749 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
750 SIOP_SSTAT1),
751 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
752 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
753 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
754 if (esiop_cmd) {
755 esiop_cmd->cmd_c.status = CMDST_DONE;
756 xs->error = XS_SELTIMEOUT;
757 goto end;
758 }
759 need_reset = 1;
760 }
761 if (need_reset) {
762 reset:
763 /* fatal error, reset the bus */
764 siop_resetbus(&sc->sc_c);
765 /* no table to flush here */
766 return 1;
767 }
768
769 scintr:
770 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
771 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
772 SIOP_DSPS);
773 #ifdef SIOP_DEBUG_INTR
774 printf("script interrupt 0x%x\n", irqcode);
775 #endif
776 /*
777 * no command, or an inactive command is only valid for a
778 * reselect interrupt
779 */
780 if ((irqcode & 0x80) == 0) {
781 if (esiop_cmd == NULL) {
782 printf(
783 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
784 sc->sc_c.sc_dev.dv_xname, irqcode);
785 goto reset;
786 }
787 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
788 printf("%s: command with invalid status "
789 "(IRQ code 0x%x current status %d) !\n",
790 sc->sc_c.sc_dev.dv_xname,
791 irqcode, esiop_cmd->cmd_c.status);
792 xs = NULL;
793 }
794 }
795 switch(irqcode) {
796 case A_int_err:
797 printf("error, DSP=0x%x\n",
798 (int)(bus_space_read_4(sc->sc_c.sc_rt,
799 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
800 if (xs) {
801 xs->error = XS_SELTIMEOUT;
802 goto end;
803 } else {
804 goto reset;
805 }
806 case A_int_msgin:
807 {
808 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
809 sc->sc_c.sc_rh, SIOP_SFBR);
810 if (msgin == MSG_MESSAGE_REJECT) {
811 int msg, extmsg;
812 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
813 /*
814 * message was part of a identify +
815 * something else. Identify shoudl't
816 * have been rejected.
817 */
818 msg =
819 esiop_cmd->cmd_tables->msg_out[1];
820 extmsg =
821 esiop_cmd->cmd_tables->msg_out[3];
822 } else {
823 msg =
824 esiop_cmd->cmd_tables->msg_out[0];
825 extmsg =
826 esiop_cmd->cmd_tables->msg_out[2];
827 }
828 if (msg == MSG_MESSAGE_REJECT) {
829 /* MSG_REJECT for a MSG_REJECT !*/
830 if (xs)
831 scsipi_printaddr(xs->xs_periph);
832 else
833 printf("%s: ",
834 sc->sc_c.sc_dev.dv_xname);
835 printf("our reject message was "
836 "rejected\n");
837 goto reset;
838 }
839 if (msg == MSG_EXTENDED &&
840 extmsg == MSG_EXT_WDTR) {
841 /* WDTR rejected, initiate sync */
842 if ((esiop_target->target_c.flags &
843 TARF_SYNC) == 0) {
844 esiop_target->target_c.status =
845 TARST_OK;
846 siop_update_xfer_mode(&sc->sc_c,
847 target);
848 /* no table to flush here */
849 CALL_SCRIPT(Ent_msgin_ack);
850 return 1;
851 }
852 esiop_target->target_c.status =
853 TARST_SYNC_NEG;
854 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
855 sc->sc_c.st_minsync,
856 sc->sc_c.maxoff);
857 esiop_table_sync(esiop_cmd,
858 BUS_DMASYNC_PREREAD |
859 BUS_DMASYNC_PREWRITE);
860 CALL_SCRIPT(Ent_send_msgout);
861 return 1;
862 } else if (msg == MSG_EXTENDED &&
863 extmsg == MSG_EXT_SDTR) {
864 /* sync rejected */
865 esiop_target->target_c.offset = 0;
866 esiop_target->target_c.period = 0;
867 esiop_target->target_c.status =
868 TARST_OK;
869 siop_update_xfer_mode(&sc->sc_c,
870 target);
871 /* no table to flush here */
872 CALL_SCRIPT(Ent_msgin_ack);
873 return 1;
874 } else if (msg == MSG_EXTENDED &&
875 extmsg == MSG_EXT_PPR) {
876 /* PPR rejected */
877 esiop_target->target_c.offset = 0;
878 esiop_target->target_c.period = 0;
879 esiop_target->target_c.status =
880 TARST_OK;
881 siop_update_xfer_mode(&sc->sc_c,
882 target);
883 /* no table to flush here */
884 CALL_SCRIPT(Ent_msgin_ack);
885 return 1;
886 } else if (msg == MSG_SIMPLE_Q_TAG ||
887 msg == MSG_HEAD_OF_Q_TAG ||
888 msg == MSG_ORDERED_Q_TAG) {
889 if (esiop_handle_qtag_reject(
890 esiop_cmd) == -1)
891 goto reset;
892 CALL_SCRIPT(Ent_msgin_ack);
893 return 1;
894 }
895 if (xs)
896 scsipi_printaddr(xs->xs_periph);
897 else
898 printf("%s: ",
899 sc->sc_c.sc_dev.dv_xname);
900 if (msg == MSG_EXTENDED) {
901 printf("scsi message reject, extended "
902 "message sent was 0x%x\n", extmsg);
903 } else {
904 printf("scsi message reject, message "
905 "sent was 0x%x\n", msg);
906 }
907 /* no table to flush here */
908 CALL_SCRIPT(Ent_msgin_ack);
909 return 1;
910 }
911 if (xs)
912 scsipi_printaddr(xs->xs_periph);
913 else
914 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
915 printf("unhandled message 0x%x\n",
916 esiop_cmd->cmd_tables->msg_in[0]);
917 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
918 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
919 esiop_table_sync(esiop_cmd,
920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
921 CALL_SCRIPT(Ent_send_msgout);
922 return 1;
923 }
924 case A_int_extmsgin:
925 #ifdef SIOP_DEBUG_INTR
926 printf("extended message: msg 0x%x len %d\n",
927 esiop_cmd->cmd_tables->msg_in[2],
928 esiop_cmd->cmd_tables->msg_in[1]);
929 #endif
930 if (esiop_cmd->cmd_tables->msg_in[1] >
931 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
932 printf("%s: extended message too big (%d)\n",
933 sc->sc_c.sc_dev.dv_xname,
934 esiop_cmd->cmd_tables->msg_in[1]);
935 esiop_cmd->cmd_tables->t_extmsgdata.count =
936 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
937 esiop_table_sync(esiop_cmd,
938 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
939 CALL_SCRIPT(Ent_get_extmsgdata);
940 return 1;
941 case A_int_extmsgdata:
942 #ifdef SIOP_DEBUG_INTR
943 {
944 int i;
945 printf("extended message: 0x%x, data:",
946 esiop_cmd->cmd_tables->msg_in[2]);
947 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
948 i++)
949 printf(" 0x%x",
950 esiop_cmd->cmd_tables->msg_in[i]);
951 printf("\n");
952 }
953 #endif
954 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
955 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
956 case SIOP_NEG_MSGOUT:
957 esiop_update_scntl3(sc,
958 esiop_cmd->cmd_c.siop_target);
959 esiop_table_sync(esiop_cmd,
960 BUS_DMASYNC_PREREAD |
961 BUS_DMASYNC_PREWRITE);
962 CALL_SCRIPT(Ent_send_msgout);
963 return 1;
964 case SIOP_NEG_ACK:
965 esiop_update_scntl3(sc,
966 esiop_cmd->cmd_c.siop_target);
967 CALL_SCRIPT(Ent_msgin_ack);
968 return 1;
969 default:
970 panic("invalid retval from "
971 "siop_wdtr_neg()");
972 }
973 return 1;
974 }
975 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
976 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
977 case SIOP_NEG_MSGOUT:
978 esiop_update_scntl3(sc,
979 esiop_cmd->cmd_c.siop_target);
980 esiop_table_sync(esiop_cmd,
981 BUS_DMASYNC_PREREAD |
982 BUS_DMASYNC_PREWRITE);
983 CALL_SCRIPT(Ent_send_msgout);
984 return 1;
985 case SIOP_NEG_ACK:
986 esiop_update_scntl3(sc,
987 esiop_cmd->cmd_c.siop_target);
988 CALL_SCRIPT(Ent_msgin_ack);
989 return 1;
990 default:
991 panic("invalid retval from "
992 "siop_wdtr_neg()");
993 }
994 return 1;
995 }
996 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
997 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
998 case SIOP_NEG_MSGOUT:
999 esiop_update_scntl3(sc,
1000 esiop_cmd->cmd_c.siop_target);
1001 esiop_table_sync(esiop_cmd,
1002 BUS_DMASYNC_PREREAD |
1003 BUS_DMASYNC_PREWRITE);
1004 CALL_SCRIPT(Ent_send_msgout);
1005 return 1;
1006 case SIOP_NEG_ACK:
1007 esiop_update_scntl3(sc,
1008 esiop_cmd->cmd_c.siop_target);
1009 CALL_SCRIPT(Ent_msgin_ack);
1010 return 1;
1011 default:
1012 panic("invalid retval from "
1013 "siop_wdtr_neg()");
1014 }
1015 return 1;
1016 }
1017 /* send a message reject */
1018 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1019 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1020 esiop_table_sync(esiop_cmd,
1021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1022 CALL_SCRIPT(Ent_send_msgout);
1023 return 1;
1024 case A_int_disc:
1025 INCSTAT(esiop_stat_intr_sdp);
1026 offset = bus_space_read_1(sc->sc_c.sc_rt,
1027 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1028 #ifdef SIOP_DEBUG_DR
1029 printf("disconnect offset %d\n", offset);
1030 #endif
1031 if (offset > SIOP_NSG) {
1032 printf("%s: bad offset for disconnect (%d)\n",
1033 sc->sc_c.sc_dev.dv_xname, offset);
1034 goto reset;
1035 }
1036 /*
1037 * offset == SIOP_NSG may be a valid condition if
1038 * we get a sdp when the xfer is done.
1039 * Don't call memmove in this case.
1040 */
1041 if (offset < SIOP_NSG) {
1042 memmove(&esiop_cmd->cmd_tables->data[0],
1043 &esiop_cmd->cmd_tables->data[offset],
1044 (SIOP_NSG - offset) * sizeof(scr_table_t));
1045 esiop_table_sync(esiop_cmd,
1046 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1047 }
1048 CALL_SCRIPT(Ent_script_sched);
1049 return 1;
1050 case A_int_resfail:
1051 printf("reselect failed\n");
1052 CALL_SCRIPT(Ent_script_sched);
1053 return 1;
1054 case A_int_done:
1055 if (xs == NULL) {
1056 printf("%s: done without command\n",
1057 sc->sc_c.sc_dev.dv_xname);
1058 CALL_SCRIPT(Ent_script_sched);
1059 return 1;
1060 }
1061 #ifdef SIOP_DEBUG_INTR
1062 printf("done, DSA=0x%lx target id 0x%x last msg "
1063 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1064 le32toh(esiop_cmd->cmd_tables->id),
1065 esiop_cmd->cmd_tables->msg_in[0],
1066 le32toh(esiop_cmd->cmd_tables->status));
1067 #endif
1068 INCSTAT(esiop_stat_intr_done);
1069 esiop_cmd->cmd_c.status = CMDST_DONE;
1070 goto end;
1071 default:
1072 printf("unknown irqcode %x\n", irqcode);
1073 if (xs) {
1074 xs->error = XS_SELTIMEOUT;
1075 goto end;
1076 }
1077 goto reset;
1078 }
1079 return 1;
1080 }
1081 /* We just should't get there */
1082 panic("siop_intr: I shouldn't be there !");
1083
1084 end:
1085 /*
1086 * restart the script now if command completed properly
1087 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1088 * queue
1089 */
1090 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1091 #ifdef SIOP_DEBUG_INTR
1092 printf("esiop_intr end: status %d\n", xs->status);
1093 #endif
1094 if (tag >= 0)
1095 esiop_lun->tactive[tag] = NULL;
1096 else
1097 esiop_lun->active = NULL;
1098 esiop_scsicmd_end(esiop_cmd);
1099 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1100 esiop_del_dev(sc, target, lun);
1101 CALL_SCRIPT(Ent_script_sched);
1102 return 1;
1103 }
1104
1105 void
1106 esiop_scsicmd_end(esiop_cmd)
1107 struct esiop_cmd *esiop_cmd;
1108 {
1109 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1110 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1111
1112 switch(xs->status) {
1113 case SCSI_OK:
1114 xs->error = XS_NOERROR;
1115 break;
1116 case SCSI_BUSY:
1117 xs->error = XS_BUSY;
1118 break;
1119 case SCSI_CHECK:
1120 xs->error = XS_BUSY;
1121 /* remove commands in the queue and scheduler */
1122 esiop_unqueue(sc, xs->xs_periph->periph_target,
1123 xs->xs_periph->periph_lun);
1124 break;
1125 case SCSI_QUEUE_FULL:
1126 INCSTAT(esiop_stat_intr_qfull);
1127 #ifdef SIOP_DEBUG
1128 printf("%s:%d:%d: queue full (tag %d)\n",
1129 sc->sc_c.sc_dev.dv_xname,
1130 xs->xs_periph->periph_target,
1131 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1132 #endif
1133 xs->error = XS_BUSY;
1134 break;
1135 case SCSI_SIOP_NOCHECK:
1136 /*
1137 * don't check status, xs->error is already valid
1138 */
1139 break;
1140 case SCSI_SIOP_NOSTATUS:
1141 /*
1142 * the status byte was not updated, cmd was
1143 * aborted
1144 */
1145 xs->error = XS_SELTIMEOUT;
1146 break;
1147 default:
1148 xs->error = XS_DRIVER_STUFFUP;
1149 }
1150 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1151 bus_dmamap_sync(sc->sc_c.sc_dmat,
1152 esiop_cmd->cmd_c.dmamap_data, 0,
1153 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1154 (xs->xs_control & XS_CTL_DATA_IN) ?
1155 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1156 bus_dmamap_unload(sc->sc_c.sc_dmat,
1157 esiop_cmd->cmd_c.dmamap_data);
1158 }
1159 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1160 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1161 esiop_cmd->cmd_c.status = CMDST_FREE;
1162 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1163 xs->resid = 0;
1164 scsipi_done (xs);
1165 }
1166
1167 void
1168 esiop_checkdone(sc)
1169 struct esiop_softc *sc;
1170 {
1171 int target, lun, tag;
1172 struct esiop_target *esiop_target;
1173 struct esiop_lun *esiop_lun;
1174 struct esiop_cmd *esiop_cmd;
1175 u_int32_t slot;
1176 int needsync = 0;
1177 int status;
1178 u_int32_t sem;
1179
1180 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1181 sem = esiop_script_read(sc, sc->sc_semoffset);
1182 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1183 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1184 /*
1185 * at last one command have been started,
1186 * so we should have free slots now
1187 */
1188 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1189 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1190 }
1191 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1192
1193 if ((sem & A_sem_done) == 0) {
1194 /* no pending done command */
1195 return;
1196 }
1197
1198 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1199 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1200 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1201 next:
1202 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1203 if (needsync)
1204 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1205 sc->sc_done_offset,
1206 A_ndone_slots * sizeof(u_int32_t),
1207 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1208 return;
1209 }
1210
1211 needsync = 1;
1212
1213 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1214 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1215 sc->sc_currdoneslot += 1;
1216 if (sc->sc_currdoneslot == A_ndone_slots)
1217 sc->sc_currdoneslot = 0;
1218
1219 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1220 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1221 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1222
1223 esiop_target = (target >= 0) ?
1224 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1225 if (esiop_target == NULL) {
1226 printf("esiop_target (target %d) not valid\n", target);
1227 goto next;
1228 }
1229 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1230 if (esiop_lun == NULL) {
1231 printf("esiop_lun (target %d lun %d) not valid\n",
1232 target, lun);
1233 goto next;
1234 }
1235 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1236 if (esiop_cmd == NULL) {
1237 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1238 target, lun, tag);
1239 goto next;
1240 }
1241
1242 esiop_table_sync(esiop_cmd,
1243 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1244 status = le32toh(esiop_cmd->cmd_tables->status);
1245 #ifdef DIAGNOSTIC
1246 if (status != SCSI_OK) {
1247 printf("command for T/L/Q %d/%d/%d status %d\n",
1248 target, lun, tag, status);
1249 goto next;
1250 }
1251
1252 #endif
1253 /* Ok, this command has been handled */
1254 esiop_cmd->cmd_c.xs->status = status;
1255 if (tag >= 0)
1256 esiop_lun->tactive[tag] = NULL;
1257 else
1258 esiop_lun->active = NULL;
1259 esiop_scsicmd_end(esiop_cmd);
1260 goto next;
1261 }
1262
1263 void
1264 esiop_unqueue(sc, target, lun)
1265 struct esiop_softc *sc;
1266 int target;
1267 int lun;
1268 {
1269 int slot, tag;
1270 u_int32_t slotdsa;
1271 struct esiop_cmd *esiop_cmd;
1272 struct esiop_lun *esiop_lun =
1273 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1274
1275 /* first make sure to read valid data */
1276 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1277
1278 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1279 /* look for commands in the scheduler, not yet started */
1280 if (esiop_lun->tactive[tag] == NULL)
1281 continue;
1282 esiop_cmd = esiop_lun->tactive[tag];
1283 for (slot = 0; slot < A_ncmd_slots; slot++) {
1284 slotdsa = esiop_script_read(sc,
1285 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1286 /* if the slot has any flag, it won't match the DSA */
1287 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1288 /* Mark this slot as ignore */
1289 esiop_script_write(sc,
1290 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1291 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1292 /* ask to requeue */
1293 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1294 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1295 esiop_lun->tactive[tag] = NULL;
1296 esiop_scsicmd_end(esiop_cmd);
1297 break;
1298 }
1299 }
1300 }
1301 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1302 }
1303
1304 /*
1305 * handle a rejected queue tag message: the command will run untagged,
1306 * has to adjust the reselect script.
1307 */
1308
1309
1310 int
1311 esiop_handle_qtag_reject(esiop_cmd)
1312 struct esiop_cmd *esiop_cmd;
1313 {
1314 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1315 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1316 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1317 int tag = esiop_cmd->cmd_tables->msg_out[2];
1318 struct esiop_target *esiop_target =
1319 (struct esiop_target*)sc->sc_c.targets[target];
1320 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1321
1322 #ifdef SIOP_DEBUG
1323 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1324 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1325 esiop_cmd->cmd_c.status);
1326 #endif
1327
1328 if (esiop_lun->active != NULL) {
1329 printf("%s: untagged command already running for target %d "
1330 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1331 target, lun, esiop_lun->active->cmd_c.status);
1332 return -1;
1333 }
1334 /* clear tag slot */
1335 esiop_lun->tactive[tag] = NULL;
1336 /* add command to non-tagged slot */
1337 esiop_lun->active = esiop_cmd;
1338 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1339 esiop_cmd->cmd_c.tag = -1;
1340 /* update DSA table */
1341 esiop_script_write(sc, esiop_target->lun_table_offset +
1342 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1343 esiop_cmd->cmd_c.dsa);
1344 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1345 return 0;
1346 }
1347
1348 /*
1349 * handle a bus reset: reset chip, unqueue all active commands, free all
1350 * target struct and report loosage to upper layer.
1351 * As the upper layer may requeue immediatly we have to first store
1352 * all active commands in a temporary queue.
1353 */
1354 void
1355 esiop_handle_reset(sc)
1356 struct esiop_softc *sc;
1357 {
1358 struct esiop_cmd *esiop_cmd;
1359 struct esiop_lun *esiop_lun;
1360 int target, lun, tag;
1361 /*
1362 * scsi bus reset. reset the chip and restart
1363 * the queue. Need to clean up all active commands
1364 */
1365 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1366 /* stop, reset and restart the chip */
1367 esiop_reset(sc);
1368
1369 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1370 /* chip has been reset, all slots are free now */
1371 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1372 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1373 }
1374 /*
1375 * Process all commands: first commmands completes, then commands
1376 * being executed
1377 */
1378 esiop_checkdone(sc);
1379 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1380 target++) {
1381 struct esiop_target *esiop_target =
1382 (struct esiop_target *)sc->sc_c.targets[target];
1383 if (esiop_target == NULL)
1384 continue;
1385 for (lun = 0; lun < 8; lun++) {
1386 esiop_lun = esiop_target->esiop_lun[lun];
1387 if (esiop_lun == NULL)
1388 continue;
1389 for (tag = -1; tag <
1390 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1391 ESIOP_NTAG : 0);
1392 tag++) {
1393 if (tag >= 0)
1394 esiop_cmd = esiop_lun->tactive[tag];
1395 else
1396 esiop_cmd = esiop_lun->active;
1397 if (esiop_cmd == NULL)
1398 continue;
1399 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1400 printf("command with tag id %d reset\n", tag);
1401 esiop_cmd->cmd_c.xs->error =
1402 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1403 XS_TIMEOUT : XS_RESET;
1404 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1405 if (tag >= 0)
1406 esiop_lun->tactive[tag] = NULL;
1407 else
1408 esiop_lun->active = NULL;
1409 esiop_cmd->cmd_c.status = CMDST_DONE;
1410 esiop_scsicmd_end(esiop_cmd);
1411 }
1412 }
1413 sc->sc_c.targets[target]->status = TARST_ASYNC;
1414 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1415 sc->sc_c.targets[target]->period =
1416 sc->sc_c.targets[target]->offset = 0;
1417 siop_update_xfer_mode(&sc->sc_c, target);
1418 }
1419
1420 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1421 }
1422
1423 void
1424 esiop_scsipi_request(chan, req, arg)
1425 struct scsipi_channel *chan;
1426 scsipi_adapter_req_t req;
1427 void *arg;
1428 {
1429 struct scsipi_xfer *xs;
1430 struct scsipi_periph *periph;
1431 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1432 struct esiop_cmd *esiop_cmd;
1433 struct esiop_target *esiop_target;
1434 int s, error, i;
1435 int target;
1436 int lun;
1437
1438 switch (req) {
1439 case ADAPTER_REQ_RUN_XFER:
1440 xs = arg;
1441 periph = xs->xs_periph;
1442 target = periph->periph_target;
1443 lun = periph->periph_lun;
1444
1445 s = splbio();
1446 /*
1447 * first check if there are pending complete commands.
1448 * this can free us some resources (in the rings for example).
1449 * we have to lock it to avoid recursion.
1450 */
1451 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1452 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1453 esiop_checkdone(sc);
1454 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1455 }
1456 #ifdef SIOP_DEBUG_SCHED
1457 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1458 xs->xs_tag_type, xs->xs_tag_id);
1459 #endif
1460 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1461 if (esiop_cmd == NULL) {
1462 xs->error = XS_RESOURCE_SHORTAGE;
1463 scsipi_done(xs);
1464 splx(s);
1465 return;
1466 }
1467 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1468 #ifdef DIAGNOSTIC
1469 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1470 panic("siop_scsicmd: new cmd not free");
1471 #endif
1472 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1473 if (esiop_target == NULL) {
1474 #ifdef SIOP_DEBUG
1475 printf("%s: alloc siop_target for target %d\n",
1476 sc->sc_c.sc_dev.dv_xname, target);
1477 #endif
1478 sc->sc_c.targets[target] =
1479 malloc(sizeof(struct esiop_target),
1480 M_DEVBUF, M_NOWAIT | M_ZERO);
1481 if (sc->sc_c.targets[target] == NULL) {
1482 printf("%s: can't malloc memory for "
1483 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1484 target);
1485 xs->error = XS_RESOURCE_SHORTAGE;
1486 scsipi_done(xs);
1487 splx(s);
1488 return;
1489 }
1490 esiop_target =
1491 (struct esiop_target*)sc->sc_c.targets[target];
1492 esiop_target->target_c.status = TARST_PROBING;
1493 esiop_target->target_c.flags = 0;
1494 esiop_target->target_c.id =
1495 sc->sc_c.clock_div << 24; /* scntl3 */
1496 esiop_target->target_c.id |= target << 16; /* id */
1497 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1498
1499 for (i=0; i < 8; i++)
1500 esiop_target->esiop_lun[i] = NULL;
1501 esiop_target_register(sc, target);
1502 }
1503 if (esiop_target->esiop_lun[lun] == NULL) {
1504 esiop_target->esiop_lun[lun] =
1505 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1506 M_NOWAIT|M_ZERO);
1507 if (esiop_target->esiop_lun[lun] == NULL) {
1508 printf("%s: can't alloc esiop_lun for "
1509 "target %d lun %d\n",
1510 sc->sc_c.sc_dev.dv_xname, target, lun);
1511 xs->error = XS_RESOURCE_SHORTAGE;
1512 scsipi_done(xs);
1513 splx(s);
1514 return;
1515 }
1516 }
1517 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1518 esiop_cmd->cmd_c.xs = xs;
1519 esiop_cmd->cmd_c.flags = 0;
1520 esiop_cmd->cmd_c.status = CMDST_READY;
1521
1522 /* load the DMA maps */
1523 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1524 esiop_cmd->cmd_c.dmamap_cmd,
1525 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1526 if (error) {
1527 printf("%s: unable to load cmd DMA map: %d\n",
1528 sc->sc_c.sc_dev.dv_xname, error);
1529 xs->error = XS_DRIVER_STUFFUP;
1530 scsipi_done(xs);
1531 splx(s);
1532 return;
1533 }
1534 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1535 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1536 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1537 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1538 ((xs->xs_control & XS_CTL_DATA_IN) ?
1539 BUS_DMA_READ : BUS_DMA_WRITE));
1540 if (error) {
1541 printf("%s: unable to load cmd DMA map: %d",
1542 sc->sc_c.sc_dev.dv_xname, error);
1543 xs->error = XS_DRIVER_STUFFUP;
1544 scsipi_done(xs);
1545 bus_dmamap_unload(sc->sc_c.sc_dmat,
1546 esiop_cmd->cmd_c.dmamap_cmd);
1547 splx(s);
1548 return;
1549 }
1550 bus_dmamap_sync(sc->sc_c.sc_dmat,
1551 esiop_cmd->cmd_c.dmamap_data, 0,
1552 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1553 (xs->xs_control & XS_CTL_DATA_IN) ?
1554 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1555 }
1556 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1557 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1558 BUS_DMASYNC_PREWRITE);
1559
1560 if (xs->xs_tag_type)
1561 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1562 else
1563 esiop_cmd->cmd_c.tag = -1;
1564 siop_setuptables(&esiop_cmd->cmd_c);
1565 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1566 htole32(A_f_c_target | A_f_c_lun);
1567 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1568 htole32((target << 8) | (lun << 16));
1569 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1570 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1571 htole32(A_f_c_tag);
1572 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1573 htole32(esiop_cmd->cmd_c.tag << 24);
1574 }
1575
1576 esiop_table_sync(esiop_cmd,
1577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1578 esiop_start(sc, esiop_cmd);
1579 if (xs->xs_control & XS_CTL_POLL) {
1580 /* poll for command completion */
1581 while ((xs->xs_status & XS_STS_DONE) == 0) {
1582 delay(1000);
1583 esiop_intr(sc);
1584 }
1585 }
1586 splx(s);
1587 return;
1588
1589 case ADAPTER_REQ_GROW_RESOURCES:
1590 #ifdef SIOP_DEBUG
1591 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1592 sc->sc_c.sc_adapt.adapt_openings);
1593 #endif
1594 esiop_morecbd(sc);
1595 return;
1596
1597 case ADAPTER_REQ_SET_XFER_MODE:
1598 {
1599 struct scsipi_xfer_mode *xm = arg;
1600 if (sc->sc_c.targets[xm->xm_target] == NULL)
1601 return;
1602 s = splbio();
1603 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1604 (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1605 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1606 /* allocate tag tables for this device */
1607 for (lun = 0;
1608 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1609 if (sc->sc_c.sc_chan.chan_periphs[
1610 xm->xm_target][lun])
1611 esiop_add_dev(sc, xm->xm_target, lun);
1612 }
1613 }
1614 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1615 (sc->sc_c.features & SF_BUS_WIDE))
1616 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1617 if (xm->xm_mode & PERIPH_CAP_SYNC)
1618 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1619 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1620 (sc->sc_c.features & SF_CHIP_DT))
1621 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1622 if ((xm->xm_mode &
1623 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1624 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1625 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1626
1627 splx(s);
1628 }
1629 }
1630 }
1631
1632 static void
1633 esiop_start(sc, esiop_cmd)
1634 struct esiop_softc *sc;
1635 struct esiop_cmd *esiop_cmd;
1636 {
1637 struct esiop_lun *esiop_lun;
1638 struct esiop_target *esiop_target;
1639 int timeout;
1640 int target, lun, slot;
1641
1642 /*
1643 * first make sure to read valid data
1644 */
1645 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1646
1647 /*
1648 * We use a circular queue here. sc->sc_currschedslot points to a
1649 * free slot, unless we have filled the queue. Check this.
1650 */
1651 slot = sc->sc_currschedslot;
1652 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1653 A_f_cmd_free) == 0) {
1654 /*
1655 * no more free slot, no need to continue. freeze the queue
1656 * and requeue this command.
1657 */
1658 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1659 sc->sc_flags |= SCF_CHAN_NOSLOT;
1660 esiop_script_sync(sc,
1661 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1662 esiop_script_write(sc, sc->sc_semoffset,
1663 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1664 esiop_script_sync(sc,
1665 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1666 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1667 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1668 esiop_scsicmd_end(esiop_cmd);
1669 return;
1670 }
1671 /* OK, we can use this slot */
1672
1673 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1674 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1675 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1676 esiop_lun = esiop_target->esiop_lun[lun];
1677 /* if non-tagged command active, panic: this shouldn't happen */
1678 if (esiop_lun->active != NULL) {
1679 panic("esiop_start: tagged cmd while untagged running");
1680 }
1681 #ifdef DIAGNOSTIC
1682 /* sanity check the tag if needed */
1683 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1684 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1685 panic("esiop_start: tag not free");
1686 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1687 esiop_cmd->cmd_c.tag < 0) {
1688 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1689 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1690 panic("esiop_start: invalid tag id");
1691 }
1692 }
1693 #endif
1694 #ifdef SIOP_DEBUG_SCHED
1695 printf("using slot %d for DSA 0x%lx\n", slot,
1696 (u_long)esiop_cmd->cmd_c.dsa);
1697 #endif
1698 /* mark command as active */
1699 if (esiop_cmd->cmd_c.status == CMDST_READY)
1700 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1701 else
1702 panic("esiop_start: bad status");
1703 /* DSA table for reselect */
1704 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1705 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1706 /* DSA table for reselect */
1707 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1708 htole32(esiop_cmd->cmd_c.dsa);
1709 bus_dmamap_sync(sc->sc_c.sc_dmat,
1710 esiop_lun->lun_tagtbl->tblblk->blkmap,
1711 esiop_lun->lun_tagtbl->tbl_offset,
1712 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1713 } else {
1714 esiop_lun->active = esiop_cmd;
1715 esiop_script_write(sc,
1716 esiop_target->lun_table_offset +
1717 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1718 esiop_cmd->cmd_c.dsa);
1719 }
1720 /* scheduler slot: DSA */
1721 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1722 esiop_cmd->cmd_c.dsa);
1723 /* make sure SCRIPT processor will read valid data */
1724 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1725 /* handle timeout */
1726 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1727 /* start exire timer */
1728 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1729 if (timeout == 0)
1730 timeout = 1;
1731 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1732 timeout, esiop_timeout, esiop_cmd);
1733 }
1734 /* Signal script it has some work to do */
1735 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1736 SIOP_ISTAT, ISTAT_SIGP);
1737 /* update the current slot, and wait for IRQ */
1738 sc->sc_currschedslot++;
1739 if (sc->sc_currschedslot >= A_ncmd_slots)
1740 sc->sc_currschedslot = 0;
1741 return;
1742 }
1743
1744 void
1745 esiop_timeout(v)
1746 void *v;
1747 {
1748 struct esiop_cmd *esiop_cmd = v;
1749 struct esiop_softc *sc =
1750 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1751 int s;
1752 #ifdef SIOP_DEBUG
1753 int slot, slotdsa;
1754 #endif
1755
1756 s = splbio();
1757 esiop_table_sync(esiop_cmd,
1758 BUS_DMASYNC_POSTREAD |
1759 BUS_DMASYNC_POSTWRITE);
1760 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1761 #ifdef SIOP_DEBUG
1762 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1763
1764 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1765 for (slot = 0; slot < A_ncmd_slots; slot++) {
1766 slotdsa = esiop_script_read(sc,
1767 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1768 if ((slotdsa & 0x01) == 0)
1769 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1770 }
1771 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1772 printf("DSP 0x%lx DSA 0x%x\n",
1773 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1774 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1775 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1776 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1777 #else
1778 printf("command timeout\n");
1779 #endif
1780 /* reset the scsi bus */
1781 siop_resetbus(&sc->sc_c);
1782
1783 /* deactivate callout */
1784 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1785 /*
1786 * mark command has being timed out and just return;
1787 * the bus reset will generate an interrupt,
1788 * it will be handled in siop_intr()
1789 */
1790 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1791 splx(s);
1792 return;
1793
1794 }
1795
1796 void
1797 esiop_dump_script(sc)
1798 struct esiop_softc *sc;
1799 {
1800 int i;
1801 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1802 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1803 le32toh(sc->sc_c.sc_script[i]),
1804 le32toh(sc->sc_c.sc_script[i+1]));
1805 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1806 0xc0000000) {
1807 i++;
1808 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1809 }
1810 printf("\n");
1811 }
1812 }
1813
1814 void
1815 esiop_morecbd(sc)
1816 struct esiop_softc *sc;
1817 {
1818 int error, i, s;
1819 bus_dma_segment_t seg;
1820 int rseg;
1821 struct esiop_cbd *newcbd;
1822 struct esiop_xfer *xfer;
1823 bus_addr_t dsa;
1824
1825 /* allocate a new list head */
1826 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1827 if (newcbd == NULL) {
1828 printf("%s: can't allocate memory for command descriptors "
1829 "head\n", sc->sc_c.sc_dev.dv_xname);
1830 return;
1831 }
1832
1833 /* allocate cmd list */
1834 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1835 M_DEVBUF, M_NOWAIT|M_ZERO);
1836 if (newcbd->cmds == NULL) {
1837 printf("%s: can't allocate memory for command descriptors\n",
1838 sc->sc_c.sc_dev.dv_xname);
1839 goto bad3;
1840 }
1841 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1842 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1843 if (error) {
1844 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1845 sc->sc_c.sc_dev.dv_xname, error);
1846 goto bad2;
1847 }
1848 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1849 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1850 if (error) {
1851 printf("%s: unable to map cbd DMA memory, error = %d\n",
1852 sc->sc_c.sc_dev.dv_xname, error);
1853 goto bad2;
1854 }
1855 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1856 BUS_DMA_NOWAIT, &newcbd->xferdma);
1857 if (error) {
1858 printf("%s: unable to create cbd DMA map, error = %d\n",
1859 sc->sc_c.sc_dev.dv_xname, error);
1860 goto bad1;
1861 }
1862 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1863 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1864 if (error) {
1865 printf("%s: unable to load cbd DMA map, error = %d\n",
1866 sc->sc_c.sc_dev.dv_xname, error);
1867 goto bad0;
1868 }
1869 #ifdef DEBUG
1870 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1871 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1872 #endif
1873 for (i = 0; i < SIOP_NCMDPB; i++) {
1874 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1875 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1876 &newcbd->cmds[i].cmd_c.dmamap_data);
1877 if (error) {
1878 printf("%s: unable to create data DMA map for cbd: "
1879 "error %d\n",
1880 sc->sc_c.sc_dev.dv_xname, error);
1881 goto bad0;
1882 }
1883 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1884 sizeof(struct scsipi_generic), 1,
1885 sizeof(struct scsipi_generic), 0,
1886 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1887 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1888 if (error) {
1889 printf("%s: unable to create cmd DMA map for cbd %d\n",
1890 sc->sc_c.sc_dev.dv_xname, error);
1891 goto bad0;
1892 }
1893 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1894 newcbd->cmds[i].esiop_cbdp = newcbd;
1895 xfer = &newcbd->xfers[i];
1896 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1897 memset(newcbd->cmds[i].cmd_tables, 0,
1898 sizeof(struct esiop_xfer));
1899 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1900 i * sizeof(struct esiop_xfer);
1901 newcbd->cmds[i].cmd_c.dsa = dsa;
1902 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1903 xfer->siop_tables.t_msgout.count= htole32(1);
1904 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1905 xfer->siop_tables.t_msgin.count= htole32(1);
1906 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1907 offsetof(struct siop_common_xfer, msg_in));
1908 xfer->siop_tables.t_extmsgin.count= htole32(2);
1909 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1910 offsetof(struct siop_common_xfer, msg_in) + 1);
1911 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1912 offsetof(struct siop_common_xfer, msg_in) + 3);
1913 xfer->siop_tables.t_status.count= htole32(1);
1914 xfer->siop_tables.t_status.addr = htole32(dsa +
1915 offsetof(struct siop_common_xfer, status));
1916
1917 s = splbio();
1918 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1919 splx(s);
1920 #ifdef SIOP_DEBUG
1921 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1922 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1923 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1924 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1925 #endif
1926 }
1927 s = splbio();
1928 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1929 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1930 splx(s);
1931 return;
1932 bad0:
1933 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1934 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1935 bad1:
1936 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1937 bad2:
1938 free(newcbd->cmds, M_DEVBUF);
1939 bad3:
1940 free(newcbd, M_DEVBUF);
1941 return;
1942 }
1943
1944 void
1945 esiop_moretagtbl(sc)
1946 struct esiop_softc *sc;
1947 {
1948 int error, i, j, s;
1949 bus_dma_segment_t seg;
1950 int rseg;
1951 struct esiop_dsatblblk *newtblblk;
1952 struct esiop_dsatbl *newtbls;
1953 u_int32_t *tbls;
1954
1955 /* allocate a new list head */
1956 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1957 M_DEVBUF, M_NOWAIT|M_ZERO);
1958 if (newtblblk == NULL) {
1959 printf("%s: can't allocate memory for tag DSA table block\n",
1960 sc->sc_c.sc_dev.dv_xname);
1961 return;
1962 }
1963
1964 /* allocate tbl list */
1965 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1966 M_DEVBUF, M_NOWAIT|M_ZERO);
1967 if (newtbls == NULL) {
1968 printf("%s: can't allocate memory for command descriptors\n",
1969 sc->sc_c.sc_dev.dv_xname);
1970 goto bad3;
1971 }
1972 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1973 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1974 if (error) {
1975 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1976 sc->sc_c.sc_dev.dv_xname, error);
1977 goto bad2;
1978 }
1979 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1980 (caddr_t *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1981 if (error) {
1982 printf("%s: unable to map tbls DMA memory, error = %d\n",
1983 sc->sc_c.sc_dev.dv_xname, error);
1984 goto bad2;
1985 }
1986 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1987 BUS_DMA_NOWAIT, &newtblblk->blkmap);
1988 if (error) {
1989 printf("%s: unable to create tbl DMA map, error = %d\n",
1990 sc->sc_c.sc_dev.dv_xname, error);
1991 goto bad1;
1992 }
1993 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
1994 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1995 if (error) {
1996 printf("%s: unable to load tbl DMA map, error = %d\n",
1997 sc->sc_c.sc_dev.dv_xname, error);
1998 goto bad0;
1999 }
2000 #ifdef DEBUG
2001 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2002 sc->sc_c.sc_dev.dv_xname,
2003 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2004 #endif
2005 for (i = 0; i < ESIOP_NTPB; i++) {
2006 newtbls[i].tblblk = newtblblk;
2007 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2008 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2009 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2010 newtbls[i].tbl_offset;
2011 for (j = 0; j < ESIOP_NTAG; j++)
2012 newtbls[i].tbl[j] = j;
2013 s = splbio();
2014 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2015 splx(s);
2016 }
2017 s = splbio();
2018 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2019 splx(s);
2020 return;
2021 bad0:
2022 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2023 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2024 bad1:
2025 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2026 bad2:
2027 free(newtbls, M_DEVBUF);
2028 bad3:
2029 free(newtblblk, M_DEVBUF);
2030 return;
2031 }
2032
2033 void
2034 esiop_update_scntl3(sc, _siop_target)
2035 struct esiop_softc *sc;
2036 struct siop_common_target *_siop_target;
2037 {
2038 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2039 esiop_script_write(sc, esiop_target->lun_table_offset,
2040 esiop_target->target_c.id);
2041 }
2042
2043 void
2044 esiop_add_dev(sc, target, lun)
2045 struct esiop_softc *sc;
2046 int target;
2047 int lun;
2048 {
2049 struct esiop_target *esiop_target =
2050 (struct esiop_target *)sc->sc_c.targets[target];
2051 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2052
2053 /* we need a tag DSA table */
2054 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2055 if (esiop_lun->lun_tagtbl == NULL) {
2056 esiop_moretagtbl(sc);
2057 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2058 if (esiop_lun->lun_tagtbl == NULL) {
2059 /* no resources, run untagged */
2060 esiop_target->target_c.flags &= ~TARF_TAG;
2061 return;
2062 }
2063 }
2064 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2065 /* Update LUN DSA table */
2066 esiop_script_write(sc, esiop_target->lun_table_offset +
2067 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2068 esiop_lun->lun_tagtbl->tbl_dsa);
2069 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2070 }
2071
2072 void
2073 esiop_del_dev(sc, target, lun)
2074 struct esiop_softc *sc;
2075 int target;
2076 int lun;
2077 {
2078 struct esiop_target *esiop_target;
2079 #ifdef SIOP_DEBUG
2080 printf("%s:%d:%d: free lun sw entry\n",
2081 sc->sc_c.sc_dev.dv_xname, target, lun);
2082 #endif
2083 if (sc->sc_c.targets[target] == NULL)
2084 return;
2085 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2086 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2087 esiop_target->esiop_lun[lun] = NULL;
2088 }
2089
2090 void
2091 esiop_target_register(sc, target)
2092 struct esiop_softc *sc;
2093 u_int32_t target;
2094 {
2095 struct esiop_target *esiop_target =
2096 (struct esiop_target *)sc->sc_c.targets[target];
2097 struct esiop_lun *esiop_lun;
2098 int lun;
2099
2100 /* get a DSA table for this target */
2101 esiop_target->lun_table_offset = sc->sc_free_offset;
2102 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2103 #ifdef SIOP_DEBUG
2104 printf("%s: lun table for target %d offset %d free offset %d\n",
2105 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2106 sc->sc_free_offset);
2107 #endif
2108 /* first 32 bytes are ID (for select) */
2109 esiop_script_write(sc, esiop_target->lun_table_offset,
2110 esiop_target->target_c.id);
2111 /* Record this table in the target DSA table */
2112 esiop_script_write(sc,
2113 sc->sc_target_table_offset + target,
2114 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2115 sc->sc_c.sc_scriptaddr);
2116 /* if we have a tag table, register it */
2117 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2118 esiop_lun = esiop_target->esiop_lun[lun];
2119 if (esiop_lun == NULL)
2120 continue;
2121 if (esiop_lun->lun_tagtbl)
2122 esiop_script_write(sc, esiop_target->lun_table_offset +
2123 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2124 esiop_lun->lun_tagtbl->tbl_dsa);
2125 }
2126 esiop_script_sync(sc,
2127 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2128 }
2129
2130 #ifdef SIOP_STATS
2131 void
2132 esiop_printstats()
2133 {
2134 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2135 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2136 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2137 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2138 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2139 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2140 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2141 }
2142 #endif
2143