esiop.c revision 1.20 1 /* $NetBSD: esiop.c,v 1.20 2003/07/03 11:12:32 drochner Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.20 2003/07/03 11:12:32 drochner Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 int esiop_scsicmd __P((struct scsipi_xfer *));
91 void esiop_scsipi_request __P((struct scsipi_channel *,
92 scsipi_adapter_req_t, void *));
93 void esiop_dump_script __P((struct esiop_softc *));
94 void esiop_morecbd __P((struct esiop_softc *));
95 void esiop_moretagtbl __P((struct esiop_softc *));
96 void siop_add_reselsw __P((struct esiop_softc *, int));
97 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
98
99 void esiop_update_scntl3 __P((struct esiop_softc *,
100 struct siop_common_target *));
101
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats __P((void));
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
117 static __inline__ void
118 esiop_script_sync(sc, ops)
119 struct esiop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
128 static __inline__ u_int32_t
129 esiop_script_read(sc, offset)
130 struct esiop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
142 u_int32_t));
143 static __inline__ void
144 esiop_script_write(sc, offset, val)
145 struct esiop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 esiop_attach(sc)
159 struct esiop_softc *sc;
160 {
161 struct esiop_dsatbl *tagtbl_donering;
162
163 if (siop_common_attach(&sc->sc_c) != 0 )
164 return;
165
166 TAILQ_INIT(&sc->free_list);
167 TAILQ_INIT(&sc->cmds);
168 TAILQ_INIT(&sc->free_tagtbl);
169 TAILQ_INIT(&sc->tag_tblblk);
170 sc->sc_currschedslot = 0;
171 #ifdef SIOP_DEBUG
172 aprint_debug("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
173 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
174 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
175 #endif
176
177 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
178 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
179
180 /*
181 * get space for the CMD done slot. For this we use a tag table entry.
182 * It's the same size and allows us to not waste 3/4 of a page
183 */
184 #ifdef DIAGNOSTIC
185 if (ESIOP_NTAG != A_ndone_slots) {
186 aprint_error("%s: size of tag DSA table different from the done"
187 " ring\n", sc->sc_c.sc_dev.dv_xname);
188 return;
189 }
190 #endif
191 esiop_moretagtbl(sc);
192 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
193 if (tagtbl_donering == NULL) {
194 aprint_error("%s: no memory for command done ring\n",
195 sc->sc_c.sc_dev.dv_xname);
196 return;
197 }
198 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
199 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
200 sc->sc_done_offset = tagtbl_donering->tbl_offset;
201 sc->sc_done_slot = &tagtbl_donering->tbl[0];
202
203 /* Do a bus reset, so that devices fall back to narrow/async */
204 siop_resetbus(&sc->sc_c);
205 /*
206 * siop_reset() will reset the chip, thus clearing pending interrupts
207 */
208 esiop_reset(sc);
209 #ifdef DUMP_SCRIPT
210 esiop_dump_script(sc);
211 #endif
212
213 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
214 }
215
216 void
217 esiop_reset(sc)
218 struct esiop_softc *sc;
219 {
220 int i, j;
221 u_int32_t addr;
222 u_int32_t msgin_addr, sem_addr;
223
224 siop_common_reset(&sc->sc_c);
225
226 /*
227 * we copy the script at the beggining of RAM. Then there is 4 bytes
228 * for messages in, and 4 bytes for semaphore
229 */
230 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
231 msgin_addr =
232 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
233 sc->sc_free_offset += 1;
234 sc->sc_semoffset = sc->sc_free_offset;
235 sem_addr =
236 sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
237 sc->sc_free_offset += 1;
238 /* then we have the scheduler ring */
239 sc->sc_shedoffset = sc->sc_free_offset;
240 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
241 /* then the targets DSA table */
242 sc->sc_target_table_offset = sc->sc_free_offset;
243 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
244 /* copy and patch the script */
245 if (sc->sc_c.features & SF_CHIP_RAM) {
246 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
247 esiop_script,
248 sizeof(esiop_script) / sizeof(esiop_script[0]));
249 for (j = 0; j <
250 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
251 j++) {
252 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
253 E_tlq_offset_Used[j] * 4,
254 sizeof(struct siop_common_xfer));
255 }
256 for (j = 0; j <
257 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
258 j++) {
259 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
260 E_abs_msgin2_Used[j] * 4, msgin_addr);
261 }
262 for (j = 0; j <
263 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
264 j++) {
265 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
266 E_abs_sem_Used[j] * 4, sem_addr);
267 }
268
269 if (sc->sc_c.features & SF_CHIP_LED0) {
270 bus_space_write_region_4(sc->sc_c.sc_ramt,
271 sc->sc_c.sc_ramh,
272 Ent_led_on1, esiop_led_on,
273 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
274 bus_space_write_region_4(sc->sc_c.sc_ramt,
275 sc->sc_c.sc_ramh,
276 Ent_led_on2, esiop_led_on,
277 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
278 bus_space_write_region_4(sc->sc_c.sc_ramt,
279 sc->sc_c.sc_ramh,
280 Ent_led_off, esiop_led_off,
281 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
282 }
283 } else {
284 for (j = 0;
285 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
286 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
287 }
288 for (j = 0; j <
289 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
290 j++) {
291 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
292 htole32(sizeof(struct siop_common_xfer));
293 }
294 for (j = 0; j <
295 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
296 j++) {
297 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
298 htole32(msgin_addr);
299 }
300 for (j = 0; j <
301 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
302 j++) {
303 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
304 htole32(sem_addr);
305 }
306
307 if (sc->sc_c.features & SF_CHIP_LED0) {
308 for (j = 0; j < (sizeof(esiop_led_on) /
309 sizeof(esiop_led_on[0])); j++)
310 sc->sc_c.sc_script[
311 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
312 ] = htole32(esiop_led_on[j]);
313 for (j = 0; j < (sizeof(esiop_led_on) /
314 sizeof(esiop_led_on[0])); j++)
315 sc->sc_c.sc_script[
316 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
317 ] = htole32(esiop_led_on[j]);
318 for (j = 0; j < (sizeof(esiop_led_off) /
319 sizeof(esiop_led_off[0])); j++)
320 sc->sc_c.sc_script[
321 Ent_led_off / sizeof(esiop_led_off[0]) + j
322 ] = htole32(esiop_led_off[j]);
323 }
324 }
325 /* get base of scheduler ring */
326 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
327 /* init scheduler */
328 for (i = 0; i < A_ncmd_slots; i++) {
329 esiop_script_write(sc,
330 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
331 }
332 sc->sc_currschedslot = 0;
333 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
334 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
335 /*
336 * 0x78000000 is a 'move data8 to reg'. data8 is the second
337 * octet, reg offset is the third.
338 */
339 esiop_script_write(sc, Ent_cmdr0 / 4,
340 0x78640000 | ((addr & 0x000000ff) << 8));
341 esiop_script_write(sc, Ent_cmdr1 / 4,
342 0x78650000 | ((addr & 0x0000ff00) ));
343 esiop_script_write(sc, Ent_cmdr2 / 4,
344 0x78660000 | ((addr & 0x00ff0000) >> 8));
345 esiop_script_write(sc, Ent_cmdr3 / 4,
346 0x78670000 | ((addr & 0xff000000) >> 16));
347 /* done ring */
348 for (i = 0; i < A_ndone_slots; i++)
349 sc->sc_done_slot[i] = 0;
350 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
351 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
352 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
353 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
354 sc->sc_currdoneslot = 0;
355 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
356 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
357 esiop_script_write(sc, Ent_doner0 / 4,
358 0x786c0000 | ((addr & 0x000000ff) << 8));
359 esiop_script_write(sc, Ent_doner1 / 4,
360 0x786d0000 | ((addr & 0x0000ff00) ));
361 esiop_script_write(sc, Ent_doner2 / 4,
362 0x786e0000 | ((addr & 0x00ff0000) >> 8));
363 esiop_script_write(sc, Ent_doner3 / 4,
364 0x786f0000 | ((addr & 0xff000000) >> 16));
365
366 /* set flags */
367 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
368 /* write pointer of base of target DSA table */
369 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
370 sc->sc_c.sc_scriptaddr;
371 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
372 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
373 ((addr & 0x000000ff) << 8));
374 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
375 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
376 ((addr & 0x0000ff00) ));
377 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
378 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
379 ((addr & 0x00ff0000) >> 8));
380 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
381 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
382 ((addr & 0xff000000) >> 16));
383 #ifdef SIOP_DEBUG
384 printf("%s: target table offset %d free offset %d\n",
385 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
386 sc->sc_free_offset);
387 #endif
388
389 /* register existing targets */
390 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
391 if (sc->sc_c.targets[i])
392 esiop_target_register(sc, i);
393 }
394 /* start script */
395 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
396 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
397 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
398 }
399 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
400 sc->sc_c.sc_scriptaddr + Ent_reselect);
401 }
402
403 #if 0
404 #define CALL_SCRIPT(ent) do {\
405 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
406 esiop_cmd->cmd_c.dsa, \
407 sc->sc_c.sc_scriptaddr + ent); \
408 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
409 } while (0)
410 #else
411 #define CALL_SCRIPT(ent) do {\
412 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
413 } while (0)
414 #endif
415
416 int
417 esiop_intr(v)
418 void *v;
419 {
420 struct esiop_softc *sc = v;
421 struct esiop_target *esiop_target;
422 struct esiop_cmd *esiop_cmd;
423 struct esiop_lun *esiop_lun;
424 struct scsipi_xfer *xs;
425 int istat, sist, sstat1, dstat;
426 u_int32_t irqcode;
427 int need_reset = 0;
428 int offset, target, lun, tag;
429 u_int32_t tflags;
430 u_int32_t addr;
431 int freetarget = 0;
432 int slot;
433 int retval = 0;
434
435 again:
436 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
437 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
438 return retval;
439 }
440 retval = 1;
441 INCSTAT(esiop_stat_intr);
442 esiop_checkdone(sc);
443 if (istat & ISTAT_INTF) {
444 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
445 SIOP_ISTAT, ISTAT_INTF);
446 goto again;
447 }
448
449 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
450 (ISTAT_DIP | ISTAT_ABRT)) {
451 /* clear abort */
452 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
453 SIOP_ISTAT, 0);
454 }
455
456 /* get CMD from T/L/Q */
457 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
458 SIOP_SCRATCHC);
459 #ifdef SIOP_DEBUG_INTR
460 printf("interrupt, istat=0x%x tflags=0x%x "
461 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
462 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
463 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
464 SIOP_DSP) -
465 sc->sc_c.sc_scriptaddr));
466 #endif
467 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
468 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
469 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
470 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
471 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
472
473 if (target >= 0 && lun >= 0) {
474 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
475 if (esiop_target == NULL) {
476 printf("esiop_target (target %d) not valid\n", target);
477 goto none;
478 }
479 esiop_lun = esiop_target->esiop_lun[lun];
480 if (esiop_lun == NULL) {
481 printf("esiop_lun (target %d lun %d) not valid\n",
482 target, lun);
483 goto none;
484 }
485 esiop_cmd =
486 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
487 if (esiop_cmd == NULL) {
488 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
489 target, lun, tag);
490 goto none;
491 }
492 xs = esiop_cmd->cmd_c.xs;
493 #ifdef DIAGNOSTIC
494 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
495 printf("esiop_cmd (target %d lun %d) "
496 "not active (%d)\n", target, lun,
497 esiop_cmd->cmd_c.status);
498 goto none;
499 }
500 #endif
501 esiop_table_sync(esiop_cmd,
502 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
503 } else {
504 none:
505 xs = NULL;
506 esiop_target = NULL;
507 esiop_lun = NULL;
508 esiop_cmd = NULL;
509 }
510 if (istat & ISTAT_DIP) {
511 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
512 SIOP_DSTAT);
513 if (dstat & DSTAT_ABRT) {
514 /* was probably generated by a bus reset IOCTL */
515 if ((dstat & DSTAT_DFE) == 0)
516 siop_clearfifo(&sc->sc_c);
517 goto reset;
518 }
519 if (dstat & DSTAT_SSI) {
520 printf("single step dsp 0x%08x dsa 0x08%x\n",
521 (int)(bus_space_read_4(sc->sc_c.sc_rt,
522 sc->sc_c.sc_rh, SIOP_DSP) -
523 sc->sc_c.sc_scriptaddr),
524 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
525 SIOP_DSA));
526 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
527 (istat & ISTAT_SIP) == 0) {
528 bus_space_write_1(sc->sc_c.sc_rt,
529 sc->sc_c.sc_rh, SIOP_DCNTL,
530 bus_space_read_1(sc->sc_c.sc_rt,
531 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
532 }
533 return 1;
534 }
535
536 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
537 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
538 if (dstat & DSTAT_IID)
539 printf(" Illegal instruction");
540 if (dstat & DSTAT_BF)
541 printf(" bus fault");
542 if (dstat & DSTAT_MDPE)
543 printf(" parity");
544 if (dstat & DSTAT_DFE)
545 printf(" DMA fifo empty");
546 else
547 siop_clearfifo(&sc->sc_c);
548 printf(", DSP=0x%x DSA=0x%x: ",
549 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
550 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
551 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
552 if (esiop_cmd)
553 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
554 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
555 le32toh(esiop_cmd->cmd_tables->status));
556 else
557 printf(" current T/L/Q invalid\n");
558 need_reset = 1;
559 }
560 }
561 if (istat & ISTAT_SIP) {
562 if (istat & ISTAT_DIP)
563 delay(10);
564 /*
565 * Can't read sist0 & sist1 independantly, or we have to
566 * insert delay
567 */
568 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
569 SIOP_SIST0);
570 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
571 SIOP_SSTAT1);
572 #ifdef SIOP_DEBUG_INTR
573 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
574 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
575 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
576 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
577 SIOP_DSP) -
578 sc->sc_c.sc_scriptaddr));
579 #endif
580 if (sist & SIST0_RST) {
581 esiop_handle_reset(sc);
582 /* no table to flush here */
583 return 1;
584 }
585 if (sist & SIST0_SGE) {
586 if (esiop_cmd)
587 scsipi_printaddr(xs->xs_periph);
588 else
589 printf("%s:", sc->sc_c.sc_dev.dv_xname);
590 printf("scsi gross error\n");
591 if (esiop_target)
592 esiop_target->target_c.flags &= ~TARF_DT;
593 #ifdef DEBUG
594 printf("DSA=0x%x DSP=0x%lx\n",
595 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
596 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
597 SIOP_DSP) -
598 sc->sc_c.sc_scriptaddr));
599 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
600 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SDID),
601 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL3),
602 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SXFER),
603 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL4));
604
605 #endif
606 goto reset;
607 }
608 if ((sist & SIST0_MA) && need_reset == 0) {
609 if (esiop_cmd) {
610 int scratchc0;
611 dstat = bus_space_read_1(sc->sc_c.sc_rt,
612 sc->sc_c.sc_rh, SIOP_DSTAT);
613 /*
614 * first restore DSA, in case we were in a S/G
615 * operation.
616 */
617 bus_space_write_4(sc->sc_c.sc_rt,
618 sc->sc_c.sc_rh,
619 SIOP_DSA, esiop_cmd->cmd_c.dsa);
620 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
621 sc->sc_c.sc_rh, SIOP_SCRATCHC);
622 switch (sstat1 & SSTAT1_PHASE_MASK) {
623 case SSTAT1_PHASE_STATUS:
624 /*
625 * previous phase may be aborted for any reason
626 * ( for example, the target has less data to
627 * transfer than requested). Just go to status
628 * and the command should terminate.
629 */
630 INCSTAT(esiop_stat_intr_shortxfer);
631 if ((dstat & DSTAT_DFE) == 0)
632 siop_clearfifo(&sc->sc_c);
633 /* no table to flush here */
634 CALL_SCRIPT(Ent_status);
635 return 1;
636 case SSTAT1_PHASE_MSGIN:
637 /*
638 * target may be ready to disconnect
639 * Save data pointers just in case.
640 */
641 INCSTAT(esiop_stat_intr_xferdisc);
642 if (scratchc0 & A_f_c_data)
643 siop_sdp(&esiop_cmd->cmd_c);
644 else if ((dstat & DSTAT_DFE) == 0)
645 siop_clearfifo(&sc->sc_c);
646 bus_space_write_1(sc->sc_c.sc_rt,
647 sc->sc_c.sc_rh, SIOP_SCRATCHC,
648 scratchc0 & ~A_f_c_data);
649 esiop_table_sync(esiop_cmd,
650 BUS_DMASYNC_PREREAD |
651 BUS_DMASYNC_PREWRITE);
652 CALL_SCRIPT(Ent_msgin);
653 return 1;
654 }
655 printf("%s: unexpected phase mismatch %d\n",
656 sc->sc_c.sc_dev.dv_xname,
657 sstat1 & SSTAT1_PHASE_MASK);
658 } else {
659 printf("%s: phase mismatch without command\n",
660 sc->sc_c.sc_dev.dv_xname);
661 }
662 need_reset = 1;
663 }
664 if (sist & SIST0_PAR) {
665 /* parity error, reset */
666 if (esiop_cmd)
667 scsipi_printaddr(xs->xs_periph);
668 else
669 printf("%s:", sc->sc_c.sc_dev.dv_xname);
670 printf("parity error\n");
671 if (esiop_target)
672 esiop_target->target_c.flags &= ~TARF_DT;
673 goto reset;
674 }
675 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
676 /*
677 * selection time out, assume there's no device here
678 * We also have to update the ring pointer ourselve
679 */
680 slot = bus_space_read_1(sc->sc_c.sc_rt,
681 sc->sc_c.sc_rh, SIOP_SCRATCHE);
682 esiop_script_sync(sc,
683 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
684 #ifdef SIOP_DEBUG_SCHED
685 printf("sel timeout target %d, slot %d\n", target, slot);
686 #endif
687 /*
688 * mark this slot as free, and advance to next slot
689 */
690 esiop_script_write(sc,
691 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
692 A_f_cmd_free);
693 addr = bus_space_read_4(sc->sc_c.sc_rt,
694 sc->sc_c.sc_rh, SIOP_SCRATCHD);
695 if (slot < (A_ncmd_slots - 1)) {
696 bus_space_write_1(sc->sc_c.sc_rt,
697 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
698 addr = addr + sizeof(struct esiop_slot);
699 } else {
700 bus_space_write_1(sc->sc_c.sc_rt,
701 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
702 addr = sc->sc_c.sc_scriptaddr +
703 sc->sc_shedoffset * sizeof(u_int32_t);
704 }
705 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
706 SIOP_SCRATCHD, addr);
707 esiop_script_sync(sc,
708 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
709 if (esiop_cmd) {
710 esiop_cmd->cmd_c.status = CMDST_DONE;
711 xs->error = XS_SELTIMEOUT;
712 freetarget = 1;
713 goto end;
714 } else {
715 printf("%s: selection timeout without "
716 "command, target %d (sdid 0x%x), "
717 "slot %d\n",
718 sc->sc_c.sc_dev.dv_xname, target,
719 bus_space_read_1(sc->sc_c.sc_rt,
720 sc->sc_c.sc_rh, SIOP_SDID), slot);
721 need_reset = 1;
722 }
723 }
724 if (sist & SIST0_UDC) {
725 /*
726 * unexpected disconnect. Usually the target signals
727 * a fatal condition this way. Attempt to get sense.
728 */
729 if (esiop_cmd) {
730 esiop_cmd->cmd_tables->status =
731 htole32(SCSI_CHECK);
732 goto end;
733 }
734 printf("%s: unexpected disconnect without "
735 "command\n", sc->sc_c.sc_dev.dv_xname);
736 goto reset;
737 }
738 if (sist & (SIST1_SBMC << 8)) {
739 /* SCSI bus mode change */
740 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
741 goto reset;
742 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
743 /*
744 * we have a script interrupt, it will
745 * restart the script.
746 */
747 goto scintr;
748 }
749 /*
750 * else we have to restart it ourselve, at the
751 * interrupted instruction.
752 */
753 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
754 SIOP_DSP,
755 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
756 SIOP_DSP) - 8);
757 return 1;
758 }
759 /* Else it's an unhandled exeption (for now). */
760 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
761 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
762 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
763 SIOP_SSTAT1),
764 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
765 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
766 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
767 if (esiop_cmd) {
768 esiop_cmd->cmd_c.status = CMDST_DONE;
769 xs->error = XS_SELTIMEOUT;
770 goto end;
771 }
772 need_reset = 1;
773 }
774 if (need_reset) {
775 reset:
776 /* fatal error, reset the bus */
777 siop_resetbus(&sc->sc_c);
778 /* no table to flush here */
779 return 1;
780 }
781
782 scintr:
783 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
784 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
785 SIOP_DSPS);
786 #ifdef SIOP_DEBUG_INTR
787 printf("script interrupt 0x%x\n", irqcode);
788 #endif
789 /*
790 * no command, or an inactive command is only valid for a
791 * reselect interrupt
792 */
793 if ((irqcode & 0x80) == 0) {
794 if (esiop_cmd == NULL) {
795 printf(
796 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
797 sc->sc_c.sc_dev.dv_xname, irqcode);
798 goto reset;
799 }
800 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
801 printf("%s: command with invalid status "
802 "(IRQ code 0x%x current status %d) !\n",
803 sc->sc_c.sc_dev.dv_xname,
804 irqcode, esiop_cmd->cmd_c.status);
805 xs = NULL;
806 }
807 }
808 switch(irqcode) {
809 case A_int_err:
810 printf("error, DSP=0x%x\n",
811 (int)(bus_space_read_4(sc->sc_c.sc_rt,
812 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
813 if (xs) {
814 xs->error = XS_SELTIMEOUT;
815 goto end;
816 } else {
817 goto reset;
818 }
819 case A_int_msgin:
820 {
821 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
822 sc->sc_c.sc_rh, SIOP_SFBR);
823 if (msgin == MSG_MESSAGE_REJECT) {
824 int msg, extmsg;
825 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
826 /*
827 * message was part of a identify +
828 * something else. Identify shouldn't
829 * have been rejected.
830 */
831 msg =
832 esiop_cmd->cmd_tables->msg_out[1];
833 extmsg =
834 esiop_cmd->cmd_tables->msg_out[3];
835 } else {
836 msg =
837 esiop_cmd->cmd_tables->msg_out[0];
838 extmsg =
839 esiop_cmd->cmd_tables->msg_out[2];
840 }
841 if (msg == MSG_MESSAGE_REJECT) {
842 /* MSG_REJECT for a MSG_REJECT !*/
843 if (xs)
844 scsipi_printaddr(xs->xs_periph);
845 else
846 printf("%s: ",
847 sc->sc_c.sc_dev.dv_xname);
848 printf("our reject message was "
849 "rejected\n");
850 goto reset;
851 }
852 if (msg == MSG_EXTENDED &&
853 extmsg == MSG_EXT_WDTR) {
854 /* WDTR rejected, initiate sync */
855 if ((esiop_target->target_c.flags &
856 TARF_SYNC) == 0) {
857 esiop_target->target_c.status =
858 TARST_OK;
859 siop_update_xfer_mode(&sc->sc_c,
860 target);
861 /* no table to flush here */
862 CALL_SCRIPT(Ent_msgin_ack);
863 return 1;
864 }
865 esiop_target->target_c.status =
866 TARST_SYNC_NEG;
867 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
868 sc->sc_c.st_minsync,
869 sc->sc_c.maxoff);
870 esiop_table_sync(esiop_cmd,
871 BUS_DMASYNC_PREREAD |
872 BUS_DMASYNC_PREWRITE);
873 CALL_SCRIPT(Ent_send_msgout);
874 return 1;
875 } else if (msg == MSG_EXTENDED &&
876 extmsg == MSG_EXT_SDTR) {
877 /* sync rejected */
878 esiop_target->target_c.offset = 0;
879 esiop_target->target_c.period = 0;
880 esiop_target->target_c.status =
881 TARST_OK;
882 siop_update_xfer_mode(&sc->sc_c,
883 target);
884 /* no table to flush here */
885 CALL_SCRIPT(Ent_msgin_ack);
886 return 1;
887 } else if (msg == MSG_EXTENDED &&
888 extmsg == MSG_EXT_PPR) {
889 /* PPR rejected */
890 esiop_target->target_c.offset = 0;
891 esiop_target->target_c.period = 0;
892 esiop_target->target_c.status =
893 TARST_OK;
894 siop_update_xfer_mode(&sc->sc_c,
895 target);
896 /* no table to flush here */
897 CALL_SCRIPT(Ent_msgin_ack);
898 return 1;
899 } else if (msg == MSG_SIMPLE_Q_TAG ||
900 msg == MSG_HEAD_OF_Q_TAG ||
901 msg == MSG_ORDERED_Q_TAG) {
902 if (esiop_handle_qtag_reject(
903 esiop_cmd) == -1)
904 goto reset;
905 CALL_SCRIPT(Ent_msgin_ack);
906 return 1;
907 }
908 if (xs)
909 scsipi_printaddr(xs->xs_periph);
910 else
911 printf("%s: ",
912 sc->sc_c.sc_dev.dv_xname);
913 if (msg == MSG_EXTENDED) {
914 printf("scsi message reject, extended "
915 "message sent was 0x%x\n", extmsg);
916 } else {
917 printf("scsi message reject, message "
918 "sent was 0x%x\n", msg);
919 }
920 /* no table to flush here */
921 CALL_SCRIPT(Ent_msgin_ack);
922 return 1;
923 }
924 if (xs)
925 scsipi_printaddr(xs->xs_periph);
926 else
927 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
928 printf("unhandled message 0x%x\n",
929 esiop_cmd->cmd_tables->msg_in[0]);
930 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
931 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
932 esiop_table_sync(esiop_cmd,
933 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
934 CALL_SCRIPT(Ent_send_msgout);
935 return 1;
936 }
937 case A_int_extmsgin:
938 #ifdef SIOP_DEBUG_INTR
939 printf("extended message: msg 0x%x len %d\n",
940 esiop_cmd->cmd_tables->msg_in[2],
941 esiop_cmd->cmd_tables->msg_in[1]);
942 #endif
943 if (esiop_cmd->cmd_tables->msg_in[1] >
944 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
945 printf("%s: extended message too big (%d)\n",
946 sc->sc_c.sc_dev.dv_xname,
947 esiop_cmd->cmd_tables->msg_in[1]);
948 esiop_cmd->cmd_tables->t_extmsgdata.count =
949 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
950 esiop_table_sync(esiop_cmd,
951 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
952 CALL_SCRIPT(Ent_get_extmsgdata);
953 return 1;
954 case A_int_extmsgdata:
955 #ifdef SIOP_DEBUG_INTR
956 {
957 int i;
958 printf("extended message: 0x%x, data:",
959 esiop_cmd->cmd_tables->msg_in[2]);
960 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
961 i++)
962 printf(" 0x%x",
963 esiop_cmd->cmd_tables->msg_in[i]);
964 printf("\n");
965 }
966 #endif
967 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
968 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
969 case SIOP_NEG_MSGOUT:
970 esiop_update_scntl3(sc,
971 esiop_cmd->cmd_c.siop_target);
972 esiop_table_sync(esiop_cmd,
973 BUS_DMASYNC_PREREAD |
974 BUS_DMASYNC_PREWRITE);
975 CALL_SCRIPT(Ent_send_msgout);
976 return 1;
977 case SIOP_NEG_ACK:
978 esiop_update_scntl3(sc,
979 esiop_cmd->cmd_c.siop_target);
980 CALL_SCRIPT(Ent_msgin_ack);
981 return 1;
982 default:
983 panic("invalid retval from "
984 "siop_wdtr_neg()");
985 }
986 return 1;
987 }
988 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
989 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
990 case SIOP_NEG_MSGOUT:
991 esiop_update_scntl3(sc,
992 esiop_cmd->cmd_c.siop_target);
993 esiop_table_sync(esiop_cmd,
994 BUS_DMASYNC_PREREAD |
995 BUS_DMASYNC_PREWRITE);
996 CALL_SCRIPT(Ent_send_msgout);
997 return 1;
998 case SIOP_NEG_ACK:
999 esiop_update_scntl3(sc,
1000 esiop_cmd->cmd_c.siop_target);
1001 CALL_SCRIPT(Ent_msgin_ack);
1002 return 1;
1003 default:
1004 panic("invalid retval from "
1005 "siop_wdtr_neg()");
1006 }
1007 return 1;
1008 }
1009 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1010 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1011 case SIOP_NEG_MSGOUT:
1012 esiop_update_scntl3(sc,
1013 esiop_cmd->cmd_c.siop_target);
1014 esiop_table_sync(esiop_cmd,
1015 BUS_DMASYNC_PREREAD |
1016 BUS_DMASYNC_PREWRITE);
1017 CALL_SCRIPT(Ent_send_msgout);
1018 return 1;
1019 case SIOP_NEG_ACK:
1020 esiop_update_scntl3(sc,
1021 esiop_cmd->cmd_c.siop_target);
1022 CALL_SCRIPT(Ent_msgin_ack);
1023 return 1;
1024 default:
1025 panic("invalid retval from "
1026 "siop_wdtr_neg()");
1027 }
1028 return 1;
1029 }
1030 /* send a message reject */
1031 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1032 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1033 esiop_table_sync(esiop_cmd,
1034 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1035 CALL_SCRIPT(Ent_send_msgout);
1036 return 1;
1037 case A_int_disc:
1038 INCSTAT(esiop_stat_intr_sdp);
1039 offset = bus_space_read_1(sc->sc_c.sc_rt,
1040 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1041 #ifdef SIOP_DEBUG_DR
1042 printf("disconnect offset %d\n", offset);
1043 #endif
1044 if (offset > SIOP_NSG) {
1045 printf("%s: bad offset for disconnect (%d)\n",
1046 sc->sc_c.sc_dev.dv_xname, offset);
1047 goto reset;
1048 }
1049 /*
1050 * offset == SIOP_NSG may be a valid condition if
1051 * we get a sdp when the xfer is done.
1052 * Don't call memmove in this case.
1053 */
1054 if (offset < SIOP_NSG) {
1055 memmove(&esiop_cmd->cmd_tables->data[0],
1056 &esiop_cmd->cmd_tables->data[offset],
1057 (SIOP_NSG - offset) * sizeof(scr_table_t));
1058 esiop_table_sync(esiop_cmd,
1059 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1060 }
1061 CALL_SCRIPT(Ent_script_sched);
1062 return 1;
1063 case A_int_resfail:
1064 printf("reselect failed\n");
1065 CALL_SCRIPT(Ent_script_sched);
1066 return 1;
1067 case A_int_done:
1068 if (xs == NULL) {
1069 printf("%s: done without command\n",
1070 sc->sc_c.sc_dev.dv_xname);
1071 CALL_SCRIPT(Ent_script_sched);
1072 return 1;
1073 }
1074 #ifdef SIOP_DEBUG_INTR
1075 printf("done, DSA=0x%lx target id 0x%x last msg "
1076 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1077 le32toh(esiop_cmd->cmd_tables->id),
1078 esiop_cmd->cmd_tables->msg_in[0],
1079 le32toh(esiop_cmd->cmd_tables->status));
1080 #endif
1081 INCSTAT(esiop_stat_intr_done);
1082 esiop_cmd->cmd_c.status = CMDST_DONE;
1083 goto end;
1084 default:
1085 printf("unknown irqcode %x\n", irqcode);
1086 if (xs) {
1087 xs->error = XS_SELTIMEOUT;
1088 goto end;
1089 }
1090 goto reset;
1091 }
1092 return 1;
1093 }
1094 /* We just should't get there */
1095 panic("siop_intr: I shouldn't be there !");
1096
1097 end:
1098 /*
1099 * restart the script now if command completed properly
1100 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1101 * queue
1102 */
1103 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1104 #ifdef SIOP_DEBUG_INTR
1105 printf("esiop_intr end: status %d\n", xs->status);
1106 #endif
1107 if (tag >= 0)
1108 esiop_lun->tactive[tag] = NULL;
1109 else
1110 esiop_lun->active = NULL;
1111 esiop_scsicmd_end(esiop_cmd);
1112 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1113 esiop_del_dev(sc, target, lun);
1114 CALL_SCRIPT(Ent_script_sched);
1115 return 1;
1116 }
1117
1118 void
1119 esiop_scsicmd_end(esiop_cmd)
1120 struct esiop_cmd *esiop_cmd;
1121 {
1122 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1123 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1124
1125 switch(xs->status) {
1126 case SCSI_OK:
1127 xs->error = XS_NOERROR;
1128 break;
1129 case SCSI_BUSY:
1130 xs->error = XS_BUSY;
1131 break;
1132 case SCSI_CHECK:
1133 xs->error = XS_BUSY;
1134 /* remove commands in the queue and scheduler */
1135 esiop_unqueue(sc, xs->xs_periph->periph_target,
1136 xs->xs_periph->periph_lun);
1137 break;
1138 case SCSI_QUEUE_FULL:
1139 INCSTAT(esiop_stat_intr_qfull);
1140 #ifdef SIOP_DEBUG
1141 printf("%s:%d:%d: queue full (tag %d)\n",
1142 sc->sc_c.sc_dev.dv_xname,
1143 xs->xs_periph->periph_target,
1144 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1145 #endif
1146 xs->error = XS_BUSY;
1147 break;
1148 case SCSI_SIOP_NOCHECK:
1149 /*
1150 * don't check status, xs->error is already valid
1151 */
1152 break;
1153 case SCSI_SIOP_NOSTATUS:
1154 /*
1155 * the status byte was not updated, cmd was
1156 * aborted
1157 */
1158 xs->error = XS_SELTIMEOUT;
1159 break;
1160 default:
1161 scsipi_printaddr(xs->xs_periph);
1162 printf("invalid status code %d\n", xs->status);
1163 xs->error = XS_DRIVER_STUFFUP;
1164 }
1165 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1166 bus_dmamap_sync(sc->sc_c.sc_dmat,
1167 esiop_cmd->cmd_c.dmamap_data, 0,
1168 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1169 (xs->xs_control & XS_CTL_DATA_IN) ?
1170 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1171 bus_dmamap_unload(sc->sc_c.sc_dmat,
1172 esiop_cmd->cmd_c.dmamap_data);
1173 }
1174 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1175 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1176 esiop_cmd->cmd_c.status = CMDST_FREE;
1177 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1178 xs->resid = 0;
1179 scsipi_done (xs);
1180 }
1181
1182 void
1183 esiop_checkdone(sc)
1184 struct esiop_softc *sc;
1185 {
1186 int target, lun, tag;
1187 struct esiop_target *esiop_target;
1188 struct esiop_lun *esiop_lun;
1189 struct esiop_cmd *esiop_cmd;
1190 u_int32_t slot;
1191 int needsync = 0;
1192 int status;
1193 u_int32_t sem;
1194
1195 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1196 sem = esiop_script_read(sc, sc->sc_semoffset);
1197 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1198 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1199 /*
1200 * at last one command have been started,
1201 * so we should have free slots now
1202 */
1203 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1204 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1205 }
1206 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1207
1208 if ((sem & A_sem_done) == 0) {
1209 /* no pending done command */
1210 return;
1211 }
1212
1213 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1214 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1215 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1216 next:
1217 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1218 if (needsync)
1219 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1220 sc->sc_done_offset,
1221 A_ndone_slots * sizeof(u_int32_t),
1222 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1223 return;
1224 }
1225
1226 needsync = 1;
1227
1228 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1229 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1230 sc->sc_currdoneslot += 1;
1231 if (sc->sc_currdoneslot == A_ndone_slots)
1232 sc->sc_currdoneslot = 0;
1233
1234 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1235 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1236 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1237
1238 esiop_target = (target >= 0) ?
1239 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1240 if (esiop_target == NULL) {
1241 printf("esiop_target (target %d) not valid\n", target);
1242 goto next;
1243 }
1244 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1245 if (esiop_lun == NULL) {
1246 printf("esiop_lun (target %d lun %d) not valid\n",
1247 target, lun);
1248 goto next;
1249 }
1250 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1251 if (esiop_cmd == NULL) {
1252 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1253 target, lun, tag);
1254 goto next;
1255 }
1256
1257 esiop_table_sync(esiop_cmd,
1258 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1259 status = le32toh(esiop_cmd->cmd_tables->status);
1260 #ifdef DIAGNOSTIC
1261 if (status != SCSI_OK) {
1262 printf("command for T/L/Q %d/%d/%d status %d\n",
1263 target, lun, tag, status);
1264 goto next;
1265 }
1266
1267 #endif
1268 /* Ok, this command has been handled */
1269 esiop_cmd->cmd_c.xs->status = status;
1270 if (tag >= 0)
1271 esiop_lun->tactive[tag] = NULL;
1272 else
1273 esiop_lun->active = NULL;
1274 esiop_scsicmd_end(esiop_cmd);
1275 goto next;
1276 }
1277
1278 void
1279 esiop_unqueue(sc, target, lun)
1280 struct esiop_softc *sc;
1281 int target;
1282 int lun;
1283 {
1284 int slot, tag;
1285 u_int32_t slotdsa;
1286 struct esiop_cmd *esiop_cmd;
1287 struct esiop_lun *esiop_lun =
1288 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1289
1290 /* first make sure to read valid data */
1291 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1292
1293 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1294 /* look for commands in the scheduler, not yet started */
1295 if (esiop_lun->tactive[tag] == NULL)
1296 continue;
1297 esiop_cmd = esiop_lun->tactive[tag];
1298 for (slot = 0; slot < A_ncmd_slots; slot++) {
1299 slotdsa = esiop_script_read(sc,
1300 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1301 /* if the slot has any flag, it won't match the DSA */
1302 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1303 /* Mark this slot as ignore */
1304 esiop_script_write(sc,
1305 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1306 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1307 /* ask to requeue */
1308 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1309 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1310 esiop_lun->tactive[tag] = NULL;
1311 esiop_scsicmd_end(esiop_cmd);
1312 break;
1313 }
1314 }
1315 }
1316 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1317 }
1318
1319 /*
1320 * handle a rejected queue tag message: the command will run untagged,
1321 * has to adjust the reselect script.
1322 */
1323
1324
1325 int
1326 esiop_handle_qtag_reject(esiop_cmd)
1327 struct esiop_cmd *esiop_cmd;
1328 {
1329 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1330 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1331 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1332 int tag = esiop_cmd->cmd_tables->msg_out[2];
1333 struct esiop_target *esiop_target =
1334 (struct esiop_target*)sc->sc_c.targets[target];
1335 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1336
1337 #ifdef SIOP_DEBUG
1338 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1339 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1340 esiop_cmd->cmd_c.status);
1341 #endif
1342
1343 if (esiop_lun->active != NULL) {
1344 printf("%s: untagged command already running for target %d "
1345 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1346 target, lun, esiop_lun->active->cmd_c.status);
1347 return -1;
1348 }
1349 /* clear tag slot */
1350 esiop_lun->tactive[tag] = NULL;
1351 /* add command to non-tagged slot */
1352 esiop_lun->active = esiop_cmd;
1353 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1354 esiop_cmd->cmd_c.tag = -1;
1355 /* update DSA table */
1356 esiop_script_write(sc, esiop_target->lun_table_offset +
1357 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1358 esiop_cmd->cmd_c.dsa);
1359 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1360 return 0;
1361 }
1362
1363 /*
1364 * handle a bus reset: reset chip, unqueue all active commands, free all
1365 * target struct and report loosage to upper layer.
1366 * As the upper layer may requeue immediatly we have to first store
1367 * all active commands in a temporary queue.
1368 */
1369 void
1370 esiop_handle_reset(sc)
1371 struct esiop_softc *sc;
1372 {
1373 struct esiop_cmd *esiop_cmd;
1374 struct esiop_lun *esiop_lun;
1375 int target, lun, tag;
1376 /*
1377 * scsi bus reset. reset the chip and restart
1378 * the queue. Need to clean up all active commands
1379 */
1380 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1381 /* stop, reset and restart the chip */
1382 esiop_reset(sc);
1383
1384 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1385 /* chip has been reset, all slots are free now */
1386 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1387 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1388 }
1389 /*
1390 * Process all commands: first commmands completes, then commands
1391 * being executed
1392 */
1393 esiop_checkdone(sc);
1394 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1395 target++) {
1396 struct esiop_target *esiop_target =
1397 (struct esiop_target *)sc->sc_c.targets[target];
1398 if (esiop_target == NULL)
1399 continue;
1400 for (lun = 0; lun < 8; lun++) {
1401 esiop_lun = esiop_target->esiop_lun[lun];
1402 if (esiop_lun == NULL)
1403 continue;
1404 for (tag = -1; tag <
1405 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1406 ESIOP_NTAG : 0);
1407 tag++) {
1408 if (tag >= 0)
1409 esiop_cmd = esiop_lun->tactive[tag];
1410 else
1411 esiop_cmd = esiop_lun->active;
1412 if (esiop_cmd == NULL)
1413 continue;
1414 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1415 printf("command with tag id %d reset\n", tag);
1416 esiop_cmd->cmd_c.xs->error =
1417 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1418 XS_TIMEOUT : XS_RESET;
1419 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1420 if (tag >= 0)
1421 esiop_lun->tactive[tag] = NULL;
1422 else
1423 esiop_lun->active = NULL;
1424 esiop_cmd->cmd_c.status = CMDST_DONE;
1425 esiop_scsicmd_end(esiop_cmd);
1426 }
1427 }
1428 sc->sc_c.targets[target]->status = TARST_ASYNC;
1429 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1430 sc->sc_c.targets[target]->period =
1431 sc->sc_c.targets[target]->offset = 0;
1432 siop_update_xfer_mode(&sc->sc_c, target);
1433 }
1434
1435 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1436 }
1437
1438 void
1439 esiop_scsipi_request(chan, req, arg)
1440 struct scsipi_channel *chan;
1441 scsipi_adapter_req_t req;
1442 void *arg;
1443 {
1444 struct scsipi_xfer *xs;
1445 struct scsipi_periph *periph;
1446 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1447 struct esiop_cmd *esiop_cmd;
1448 struct esiop_target *esiop_target;
1449 int s, error, i;
1450 int target;
1451 int lun;
1452
1453 switch (req) {
1454 case ADAPTER_REQ_RUN_XFER:
1455 xs = arg;
1456 periph = xs->xs_periph;
1457 target = periph->periph_target;
1458 lun = periph->periph_lun;
1459
1460 s = splbio();
1461 /*
1462 * first check if there are pending complete commands.
1463 * this can free us some resources (in the rings for example).
1464 * we have to lock it to avoid recursion.
1465 */
1466 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1467 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1468 esiop_checkdone(sc);
1469 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1470 }
1471 #ifdef SIOP_DEBUG_SCHED
1472 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1473 xs->xs_tag_type, xs->xs_tag_id);
1474 #endif
1475 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1476 if (esiop_cmd == NULL) {
1477 xs->error = XS_RESOURCE_SHORTAGE;
1478 scsipi_done(xs);
1479 splx(s);
1480 return;
1481 }
1482 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1483 #ifdef DIAGNOSTIC
1484 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1485 panic("siop_scsicmd: new cmd not free");
1486 #endif
1487 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1488 if (esiop_target == NULL) {
1489 #ifdef SIOP_DEBUG
1490 printf("%s: alloc siop_target for target %d\n",
1491 sc->sc_c.sc_dev.dv_xname, target);
1492 #endif
1493 sc->sc_c.targets[target] =
1494 malloc(sizeof(struct esiop_target),
1495 M_DEVBUF, M_NOWAIT | M_ZERO);
1496 if (sc->sc_c.targets[target] == NULL) {
1497 printf("%s: can't malloc memory for "
1498 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1499 target);
1500 xs->error = XS_RESOURCE_SHORTAGE;
1501 scsipi_done(xs);
1502 splx(s);
1503 return;
1504 }
1505 esiop_target =
1506 (struct esiop_target*)sc->sc_c.targets[target];
1507 esiop_target->target_c.status = TARST_PROBING;
1508 esiop_target->target_c.flags = 0;
1509 esiop_target->target_c.id =
1510 sc->sc_c.clock_div << 24; /* scntl3 */
1511 esiop_target->target_c.id |= target << 16; /* id */
1512 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1513
1514 for (i=0; i < 8; i++)
1515 esiop_target->esiop_lun[i] = NULL;
1516 esiop_target_register(sc, target);
1517 }
1518 if (esiop_target->esiop_lun[lun] == NULL) {
1519 esiop_target->esiop_lun[lun] =
1520 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1521 M_NOWAIT|M_ZERO);
1522 if (esiop_target->esiop_lun[lun] == NULL) {
1523 printf("%s: can't alloc esiop_lun for "
1524 "target %d lun %d\n",
1525 sc->sc_c.sc_dev.dv_xname, target, lun);
1526 xs->error = XS_RESOURCE_SHORTAGE;
1527 scsipi_done(xs);
1528 splx(s);
1529 return;
1530 }
1531 }
1532 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1533 esiop_cmd->cmd_c.xs = xs;
1534 esiop_cmd->cmd_c.flags = 0;
1535 esiop_cmd->cmd_c.status = CMDST_READY;
1536
1537 /* load the DMA maps */
1538 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1539 esiop_cmd->cmd_c.dmamap_cmd,
1540 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1541 if (error) {
1542 printf("%s: unable to load cmd DMA map: %d\n",
1543 sc->sc_c.sc_dev.dv_xname, error);
1544 xs->error = XS_DRIVER_STUFFUP;
1545 scsipi_done(xs);
1546 splx(s);
1547 return;
1548 }
1549 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1550 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1551 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1552 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1553 ((xs->xs_control & XS_CTL_DATA_IN) ?
1554 BUS_DMA_READ : BUS_DMA_WRITE));
1555 if (error) {
1556 printf("%s: unable to load cmd DMA map: %d",
1557 sc->sc_c.sc_dev.dv_xname, error);
1558 xs->error = XS_DRIVER_STUFFUP;
1559 scsipi_done(xs);
1560 bus_dmamap_unload(sc->sc_c.sc_dmat,
1561 esiop_cmd->cmd_c.dmamap_cmd);
1562 splx(s);
1563 return;
1564 }
1565 bus_dmamap_sync(sc->sc_c.sc_dmat,
1566 esiop_cmd->cmd_c.dmamap_data, 0,
1567 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1568 (xs->xs_control & XS_CTL_DATA_IN) ?
1569 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1570 }
1571 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1572 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1573 BUS_DMASYNC_PREWRITE);
1574
1575 if (xs->xs_tag_type)
1576 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1577 else
1578 esiop_cmd->cmd_c.tag = -1;
1579 siop_setuptables(&esiop_cmd->cmd_c);
1580 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1581 htole32(A_f_c_target | A_f_c_lun);
1582 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1583 htole32((target << 8) | (lun << 16));
1584 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1585 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1586 htole32(A_f_c_tag);
1587 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1588 htole32(esiop_cmd->cmd_c.tag << 24);
1589 }
1590
1591 esiop_table_sync(esiop_cmd,
1592 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1593 esiop_start(sc, esiop_cmd);
1594 if (xs->xs_control & XS_CTL_POLL) {
1595 /* poll for command completion */
1596 while ((xs->xs_status & XS_STS_DONE) == 0) {
1597 delay(1000);
1598 esiop_intr(sc);
1599 }
1600 }
1601 splx(s);
1602 return;
1603
1604 case ADAPTER_REQ_GROW_RESOURCES:
1605 #ifdef SIOP_DEBUG
1606 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1607 sc->sc_c.sc_adapt.adapt_openings);
1608 #endif
1609 esiop_morecbd(sc);
1610 return;
1611
1612 case ADAPTER_REQ_SET_XFER_MODE:
1613 {
1614 struct scsipi_xfer_mode *xm = arg;
1615 if (sc->sc_c.targets[xm->xm_target] == NULL)
1616 return;
1617 s = splbio();
1618 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1619 (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1620 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1621 /* allocate tag tables for this device */
1622 for (lun = 0;
1623 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1624 if (scsipi_lookup_periph(chan,
1625 xm->xm_target, lun) != NULL)
1626 esiop_add_dev(sc, xm->xm_target, lun);
1627 }
1628 }
1629 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1630 (sc->sc_c.features & SF_BUS_WIDE))
1631 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1632 if (xm->xm_mode & PERIPH_CAP_SYNC)
1633 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1634 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1635 (sc->sc_c.features & SF_CHIP_DT))
1636 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1637 if ((xm->xm_mode &
1638 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1639 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1640 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1641
1642 splx(s);
1643 }
1644 }
1645 }
1646
1647 static void
1648 esiop_start(sc, esiop_cmd)
1649 struct esiop_softc *sc;
1650 struct esiop_cmd *esiop_cmd;
1651 {
1652 struct esiop_lun *esiop_lun;
1653 struct esiop_target *esiop_target;
1654 int timeout;
1655 int target, lun, slot;
1656
1657 /*
1658 * first make sure to read valid data
1659 */
1660 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1661
1662 /*
1663 * We use a circular queue here. sc->sc_currschedslot points to a
1664 * free slot, unless we have filled the queue. Check this.
1665 */
1666 slot = sc->sc_currschedslot;
1667 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1668 A_f_cmd_free) == 0) {
1669 /*
1670 * no more free slot, no need to continue. freeze the queue
1671 * and requeue this command.
1672 */
1673 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1674 sc->sc_flags |= SCF_CHAN_NOSLOT;
1675 esiop_script_sync(sc,
1676 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1677 esiop_script_write(sc, sc->sc_semoffset,
1678 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1679 esiop_script_sync(sc,
1680 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1681 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1682 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1683 esiop_scsicmd_end(esiop_cmd);
1684 return;
1685 }
1686 /* OK, we can use this slot */
1687
1688 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1689 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1690 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1691 esiop_lun = esiop_target->esiop_lun[lun];
1692 /* if non-tagged command active, panic: this shouldn't happen */
1693 if (esiop_lun->active != NULL) {
1694 panic("esiop_start: tagged cmd while untagged running");
1695 }
1696 #ifdef DIAGNOSTIC
1697 /* sanity check the tag if needed */
1698 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1699 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1700 panic("esiop_start: tag not free");
1701 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1702 esiop_cmd->cmd_c.tag < 0) {
1703 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1704 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1705 panic("esiop_start: invalid tag id");
1706 }
1707 }
1708 #endif
1709 #ifdef SIOP_DEBUG_SCHED
1710 printf("using slot %d for DSA 0x%lx\n", slot,
1711 (u_long)esiop_cmd->cmd_c.dsa);
1712 #endif
1713 /* mark command as active */
1714 if (esiop_cmd->cmd_c.status == CMDST_READY)
1715 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1716 else
1717 panic("esiop_start: bad status");
1718 /* DSA table for reselect */
1719 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1720 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1721 /* DSA table for reselect */
1722 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1723 htole32(esiop_cmd->cmd_c.dsa);
1724 bus_dmamap_sync(sc->sc_c.sc_dmat,
1725 esiop_lun->lun_tagtbl->tblblk->blkmap,
1726 esiop_lun->lun_tagtbl->tbl_offset,
1727 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1728 } else {
1729 esiop_lun->active = esiop_cmd;
1730 esiop_script_write(sc,
1731 esiop_target->lun_table_offset +
1732 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1733 esiop_cmd->cmd_c.dsa);
1734 }
1735 /* scheduler slot: DSA */
1736 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1737 esiop_cmd->cmd_c.dsa);
1738 /* make sure SCRIPT processor will read valid data */
1739 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1740 /* handle timeout */
1741 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1742 /* start exire timer */
1743 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1744 if (timeout == 0)
1745 timeout = 1;
1746 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1747 timeout, esiop_timeout, esiop_cmd);
1748 }
1749 /* Signal script it has some work to do */
1750 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1751 SIOP_ISTAT, ISTAT_SIGP);
1752 /* update the current slot, and wait for IRQ */
1753 sc->sc_currschedslot++;
1754 if (sc->sc_currschedslot >= A_ncmd_slots)
1755 sc->sc_currschedslot = 0;
1756 return;
1757 }
1758
1759 void
1760 esiop_timeout(v)
1761 void *v;
1762 {
1763 struct esiop_cmd *esiop_cmd = v;
1764 struct esiop_softc *sc =
1765 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1766 int s;
1767 #ifdef SIOP_DEBUG
1768 int slot, slotdsa;
1769 #endif
1770
1771 s = splbio();
1772 esiop_table_sync(esiop_cmd,
1773 BUS_DMASYNC_POSTREAD |
1774 BUS_DMASYNC_POSTWRITE);
1775 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1776 #ifdef SIOP_DEBUG
1777 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1778
1779 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1780 for (slot = 0; slot < A_ncmd_slots; slot++) {
1781 slotdsa = esiop_script_read(sc,
1782 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1783 if ((slotdsa & 0x01) == 0)
1784 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1785 }
1786 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1787 printf("DSP 0x%lx DSA 0x%x\n",
1788 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1789 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1790 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1791 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1792 #else
1793 printf("command timeout\n");
1794 #endif
1795 /* reset the scsi bus */
1796 siop_resetbus(&sc->sc_c);
1797
1798 /* deactivate callout */
1799 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1800 /*
1801 * mark command has being timed out and just return;
1802 * the bus reset will generate an interrupt,
1803 * it will be handled in siop_intr()
1804 */
1805 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1806 splx(s);
1807 return;
1808
1809 }
1810
1811 void
1812 esiop_dump_script(sc)
1813 struct esiop_softc *sc;
1814 {
1815 int i;
1816 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1817 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1818 le32toh(sc->sc_c.sc_script[i]),
1819 le32toh(sc->sc_c.sc_script[i+1]));
1820 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1821 0xc0000000) {
1822 i++;
1823 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1824 }
1825 printf("\n");
1826 }
1827 }
1828
1829 void
1830 esiop_morecbd(sc)
1831 struct esiop_softc *sc;
1832 {
1833 int error, i, s;
1834 bus_dma_segment_t seg;
1835 int rseg;
1836 struct esiop_cbd *newcbd;
1837 struct esiop_xfer *xfer;
1838 bus_addr_t dsa;
1839
1840 /* allocate a new list head */
1841 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1842 if (newcbd == NULL) {
1843 printf("%s: can't allocate memory for command descriptors "
1844 "head\n", sc->sc_c.sc_dev.dv_xname);
1845 return;
1846 }
1847
1848 /* allocate cmd list */
1849 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1850 M_DEVBUF, M_NOWAIT|M_ZERO);
1851 if (newcbd->cmds == NULL) {
1852 printf("%s: can't allocate memory for command descriptors\n",
1853 sc->sc_c.sc_dev.dv_xname);
1854 goto bad3;
1855 }
1856 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1857 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1858 if (error) {
1859 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1860 sc->sc_c.sc_dev.dv_xname, error);
1861 goto bad2;
1862 }
1863 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1864 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1865 if (error) {
1866 printf("%s: unable to map cbd DMA memory, error = %d\n",
1867 sc->sc_c.sc_dev.dv_xname, error);
1868 goto bad2;
1869 }
1870 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1871 BUS_DMA_NOWAIT, &newcbd->xferdma);
1872 if (error) {
1873 printf("%s: unable to create cbd DMA map, error = %d\n",
1874 sc->sc_c.sc_dev.dv_xname, error);
1875 goto bad1;
1876 }
1877 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1878 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1879 if (error) {
1880 printf("%s: unable to load cbd DMA map, error = %d\n",
1881 sc->sc_c.sc_dev.dv_xname, error);
1882 goto bad0;
1883 }
1884 #ifdef DEBUG
1885 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1886 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1887 #endif
1888 for (i = 0; i < SIOP_NCMDPB; i++) {
1889 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1890 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1891 &newcbd->cmds[i].cmd_c.dmamap_data);
1892 if (error) {
1893 printf("%s: unable to create data DMA map for cbd: "
1894 "error %d\n",
1895 sc->sc_c.sc_dev.dv_xname, error);
1896 goto bad0;
1897 }
1898 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1899 sizeof(struct scsipi_generic), 1,
1900 sizeof(struct scsipi_generic), 0,
1901 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1902 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1903 if (error) {
1904 printf("%s: unable to create cmd DMA map for cbd %d\n",
1905 sc->sc_c.sc_dev.dv_xname, error);
1906 goto bad0;
1907 }
1908 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1909 newcbd->cmds[i].esiop_cbdp = newcbd;
1910 xfer = &newcbd->xfers[i];
1911 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1912 memset(newcbd->cmds[i].cmd_tables, 0,
1913 sizeof(struct esiop_xfer));
1914 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1915 i * sizeof(struct esiop_xfer);
1916 newcbd->cmds[i].cmd_c.dsa = dsa;
1917 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1918 xfer->siop_tables.t_msgout.count= htole32(1);
1919 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1920 xfer->siop_tables.t_msgin.count= htole32(1);
1921 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1922 offsetof(struct siop_common_xfer, msg_in));
1923 xfer->siop_tables.t_extmsgin.count= htole32(2);
1924 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1925 offsetof(struct siop_common_xfer, msg_in) + 1);
1926 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1927 offsetof(struct siop_common_xfer, msg_in) + 3);
1928 xfer->siop_tables.t_status.count= htole32(1);
1929 xfer->siop_tables.t_status.addr = htole32(dsa +
1930 offsetof(struct siop_common_xfer, status));
1931
1932 s = splbio();
1933 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1934 splx(s);
1935 #ifdef SIOP_DEBUG
1936 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1937 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1938 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1939 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1940 #endif
1941 }
1942 s = splbio();
1943 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1944 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1945 splx(s);
1946 return;
1947 bad0:
1948 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1949 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1950 bad1:
1951 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1952 bad2:
1953 free(newcbd->cmds, M_DEVBUF);
1954 bad3:
1955 free(newcbd, M_DEVBUF);
1956 return;
1957 }
1958
1959 void
1960 esiop_moretagtbl(sc)
1961 struct esiop_softc *sc;
1962 {
1963 int error, i, j, s;
1964 bus_dma_segment_t seg;
1965 int rseg;
1966 struct esiop_dsatblblk *newtblblk;
1967 struct esiop_dsatbl *newtbls;
1968 u_int32_t *tbls;
1969
1970 /* allocate a new list head */
1971 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1972 M_DEVBUF, M_NOWAIT|M_ZERO);
1973 if (newtblblk == NULL) {
1974 printf("%s: can't allocate memory for tag DSA table block\n",
1975 sc->sc_c.sc_dev.dv_xname);
1976 return;
1977 }
1978
1979 /* allocate tbl list */
1980 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
1981 M_DEVBUF, M_NOWAIT|M_ZERO);
1982 if (newtbls == NULL) {
1983 printf("%s: can't allocate memory for command descriptors\n",
1984 sc->sc_c.sc_dev.dv_xname);
1985 goto bad3;
1986 }
1987 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1988 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1989 if (error) {
1990 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
1991 sc->sc_c.sc_dev.dv_xname, error);
1992 goto bad2;
1993 }
1994 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1995 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1996 if (error) {
1997 printf("%s: unable to map tbls DMA memory, error = %d\n",
1998 sc->sc_c.sc_dev.dv_xname, error);
1999 goto bad2;
2000 }
2001 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2002 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2003 if (error) {
2004 printf("%s: unable to create tbl DMA map, error = %d\n",
2005 sc->sc_c.sc_dev.dv_xname, error);
2006 goto bad1;
2007 }
2008 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2009 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2010 if (error) {
2011 printf("%s: unable to load tbl DMA map, error = %d\n",
2012 sc->sc_c.sc_dev.dv_xname, error);
2013 goto bad0;
2014 }
2015 #ifdef DEBUG
2016 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2017 sc->sc_c.sc_dev.dv_xname,
2018 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2019 #endif
2020 for (i = 0; i < ESIOP_NTPB; i++) {
2021 newtbls[i].tblblk = newtblblk;
2022 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2023 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2024 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2025 newtbls[i].tbl_offset;
2026 for (j = 0; j < ESIOP_NTAG; j++)
2027 newtbls[i].tbl[j] = j;
2028 s = splbio();
2029 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2030 splx(s);
2031 }
2032 s = splbio();
2033 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2034 splx(s);
2035 return;
2036 bad0:
2037 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2038 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2039 bad1:
2040 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2041 bad2:
2042 free(newtbls, M_DEVBUF);
2043 bad3:
2044 free(newtblblk, M_DEVBUF);
2045 return;
2046 }
2047
2048 void
2049 esiop_update_scntl3(sc, _siop_target)
2050 struct esiop_softc *sc;
2051 struct siop_common_target *_siop_target;
2052 {
2053 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2054 esiop_script_write(sc, esiop_target->lun_table_offset,
2055 esiop_target->target_c.id);
2056 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2057 }
2058
2059 void
2060 esiop_add_dev(sc, target, lun)
2061 struct esiop_softc *sc;
2062 int target;
2063 int lun;
2064 {
2065 struct esiop_target *esiop_target =
2066 (struct esiop_target *)sc->sc_c.targets[target];
2067 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2068
2069 /* we need a tag DSA table */
2070 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2071 if (esiop_lun->lun_tagtbl == NULL) {
2072 esiop_moretagtbl(sc);
2073 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2074 if (esiop_lun->lun_tagtbl == NULL) {
2075 /* no resources, run untagged */
2076 esiop_target->target_c.flags &= ~TARF_TAG;
2077 return;
2078 }
2079 }
2080 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2081 /* Update LUN DSA table */
2082 esiop_script_write(sc, esiop_target->lun_table_offset +
2083 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2084 esiop_lun->lun_tagtbl->tbl_dsa);
2085 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2086 }
2087
2088 void
2089 esiop_del_dev(sc, target, lun)
2090 struct esiop_softc *sc;
2091 int target;
2092 int lun;
2093 {
2094 struct esiop_target *esiop_target;
2095 #ifdef SIOP_DEBUG
2096 printf("%s:%d:%d: free lun sw entry\n",
2097 sc->sc_c.sc_dev.dv_xname, target, lun);
2098 #endif
2099 if (sc->sc_c.targets[target] == NULL)
2100 return;
2101 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2102 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2103 esiop_target->esiop_lun[lun] = NULL;
2104 }
2105
2106 void
2107 esiop_target_register(sc, target)
2108 struct esiop_softc *sc;
2109 u_int32_t target;
2110 {
2111 struct esiop_target *esiop_target =
2112 (struct esiop_target *)sc->sc_c.targets[target];
2113 struct esiop_lun *esiop_lun;
2114 int lun;
2115
2116 /* get a DSA table for this target */
2117 esiop_target->lun_table_offset = sc->sc_free_offset;
2118 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2119 #ifdef SIOP_DEBUG
2120 printf("%s: lun table for target %d offset %d free offset %d\n",
2121 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2122 sc->sc_free_offset);
2123 #endif
2124 /* first 32 bytes are ID (for select) */
2125 esiop_script_write(sc, esiop_target->lun_table_offset,
2126 esiop_target->target_c.id);
2127 /* Record this table in the target DSA table */
2128 esiop_script_write(sc,
2129 sc->sc_target_table_offset + target,
2130 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2131 sc->sc_c.sc_scriptaddr);
2132 /* if we have a tag table, register it */
2133 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2134 esiop_lun = esiop_target->esiop_lun[lun];
2135 if (esiop_lun == NULL)
2136 continue;
2137 if (esiop_lun->lun_tagtbl)
2138 esiop_script_write(sc, esiop_target->lun_table_offset +
2139 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2140 esiop_lun->lun_tagtbl->tbl_dsa);
2141 }
2142 esiop_script_sync(sc,
2143 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2144 }
2145
2146 #ifdef SIOP_STATS
2147 void
2148 esiop_printstats()
2149 {
2150 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2151 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2152 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2153 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2154 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2155 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2156 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2157 }
2158 #endif
2159