esiop.c revision 1.28 1 /* $NetBSD: esiop.c,v 1.28 2004/05/17 11:10:24 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.28 2004/05/17 11:10:24 bouyer Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset __P((struct esiop_softc *));
83 void esiop_checkdone __P((struct esiop_softc *));
84 void esiop_handle_reset __P((struct esiop_softc *));
85 void esiop_scsicmd_end __P((struct esiop_cmd *));
86 void esiop_unqueue __P((struct esiop_softc *, int, int));
87 int esiop_handle_qtag_reject __P((struct esiop_cmd *));
88 static void esiop_start __P((struct esiop_softc *, struct esiop_cmd *));
89 void esiop_timeout __P((void *));
90 void esiop_scsipi_request __P((struct scsipi_channel *,
91 scsipi_adapter_req_t, void *));
92 void esiop_dump_script __P((struct esiop_softc *));
93 void esiop_morecbd __P((struct esiop_softc *));
94 void esiop_moretagtbl __P((struct esiop_softc *));
95 void siop_add_reselsw __P((struct esiop_softc *, int));
96 void esiop_target_register __P((struct esiop_softc *, u_int32_t));
97
98 void esiop_update_scntl3 __P((struct esiop_softc *,
99 struct siop_common_target *));
100
101 #ifdef SIOP_STATS
102 static int esiop_stat_intr = 0;
103 static int esiop_stat_intr_shortxfer = 0;
104 static int esiop_stat_intr_sdp = 0;
105 static int esiop_stat_intr_done = 0;
106 static int esiop_stat_intr_xferdisc = 0;
107 static int esiop_stat_intr_lunresel = 0;
108 static int esiop_stat_intr_qfull = 0;
109 void esiop_printstats __P((void));
110 #define INCSTAT(x) x++
111 #else
112 #define INCSTAT(x)
113 #endif
114
115 static __inline__ void esiop_script_sync __P((struct esiop_softc *, int));
116 static __inline__ void
117 esiop_script_sync(sc, ops)
118 struct esiop_softc *sc;
119 int ops;
120 {
121 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
122 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
123 PAGE_SIZE, ops);
124 }
125
126 static __inline__ u_int32_t esiop_script_read __P((struct esiop_softc *, u_int));
127 static __inline__ u_int32_t
128 esiop_script_read(sc, offset)
129 struct esiop_softc *sc;
130 u_int offset;
131 {
132 if (sc->sc_c.features & SF_CHIP_RAM) {
133 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
134 offset * 4);
135 } else {
136 return le32toh(sc->sc_c.sc_script[offset]);
137 }
138 }
139
140 static __inline__ void esiop_script_write __P((struct esiop_softc *, u_int,
141 u_int32_t));
142 static __inline__ void
143 esiop_script_write(sc, offset, val)
144 struct esiop_softc *sc;
145 u_int offset;
146 u_int32_t val;
147 {
148 if (sc->sc_c.features & SF_CHIP_RAM) {
149 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
150 offset * 4, val);
151 } else {
152 sc->sc_c.sc_script[offset] = htole32(val);
153 }
154 }
155
156 void
157 esiop_attach(sc)
158 struct esiop_softc *sc;
159 {
160 struct esiop_dsatbl *tagtbl_donering;
161
162 if (siop_common_attach(&sc->sc_c) != 0 )
163 return;
164
165 TAILQ_INIT(&sc->free_list);
166 TAILQ_INIT(&sc->cmds);
167 TAILQ_INIT(&sc->free_tagtbl);
168 TAILQ_INIT(&sc->tag_tblblk);
169 sc->sc_currschedslot = 0;
170 #ifdef SIOP_DEBUG
171 aprint_debug("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
172 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
173 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
174 #endif
175
176 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
177 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
178
179 /*
180 * get space for the CMD done slot. For this we use a tag table entry.
181 * It's the same size and allows us to not waste 3/4 of a page
182 */
183 #ifdef DIAGNOSTIC
184 if (ESIOP_NTAG != A_ndone_slots) {
185 aprint_error("%s: size of tag DSA table different from the done"
186 " ring\n", sc->sc_c.sc_dev.dv_xname);
187 return;
188 }
189 #endif
190 esiop_moretagtbl(sc);
191 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
192 if (tagtbl_donering == NULL) {
193 aprint_error("%s: no memory for command done ring\n",
194 sc->sc_c.sc_dev.dv_xname);
195 return;
196 }
197 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
198 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
199 sc->sc_done_offset = tagtbl_donering->tbl_offset;
200 sc->sc_done_slot = &tagtbl_donering->tbl[0];
201
202 /* Do a bus reset, so that devices fall back to narrow/async */
203 siop_resetbus(&sc->sc_c);
204 /*
205 * siop_reset() will reset the chip, thus clearing pending interrupts
206 */
207 esiop_reset(sc);
208 #ifdef DUMP_SCRIPT
209 esiop_dump_script(sc);
210 #endif
211
212 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
213 }
214
215 void
216 esiop_reset(sc)
217 struct esiop_softc *sc;
218 {
219 int i, j;
220 u_int32_t addr;
221 u_int32_t msgin_addr, sem_addr;
222
223 siop_common_reset(&sc->sc_c);
224
225 /*
226 * we copy the script at the beggining of RAM. Then there is 4 bytes
227 * for messages in, and 4 bytes for semaphore
228 */
229 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
230 msgin_addr =
231 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
232 sc->sc_free_offset += 1;
233 sc->sc_semoffset = sc->sc_free_offset;
234 sem_addr =
235 sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
236 sc->sc_free_offset += 1;
237 /* then we have the scheduler ring */
238 sc->sc_shedoffset = sc->sc_free_offset;
239 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
240 /* then the targets DSA table */
241 sc->sc_target_table_offset = sc->sc_free_offset;
242 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
243 /* copy and patch the script */
244 if (sc->sc_c.features & SF_CHIP_RAM) {
245 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
246 esiop_script,
247 sizeof(esiop_script) / sizeof(esiop_script[0]));
248 for (j = 0; j <
249 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
250 j++) {
251 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
252 E_tlq_offset_Used[j] * 4,
253 sizeof(struct siop_common_xfer));
254 }
255 for (j = 0; j <
256 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
257 j++) {
258 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
259 E_abs_msgin2_Used[j] * 4, msgin_addr);
260 }
261 for (j = 0; j <
262 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
263 j++) {
264 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
265 E_abs_sem_Used[j] * 4, sem_addr);
266 }
267
268 if (sc->sc_c.features & SF_CHIP_LED0) {
269 bus_space_write_region_4(sc->sc_c.sc_ramt,
270 sc->sc_c.sc_ramh,
271 Ent_led_on1, esiop_led_on,
272 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
273 bus_space_write_region_4(sc->sc_c.sc_ramt,
274 sc->sc_c.sc_ramh,
275 Ent_led_on2, esiop_led_on,
276 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
277 bus_space_write_region_4(sc->sc_c.sc_ramt,
278 sc->sc_c.sc_ramh,
279 Ent_led_off, esiop_led_off,
280 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
281 }
282 } else {
283 for (j = 0;
284 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
285 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
286 }
287 for (j = 0; j <
288 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
289 j++) {
290 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
291 htole32(sizeof(struct siop_common_xfer));
292 }
293 for (j = 0; j <
294 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
295 j++) {
296 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
297 htole32(msgin_addr);
298 }
299 for (j = 0; j <
300 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
301 j++) {
302 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
303 htole32(sem_addr);
304 }
305
306 if (sc->sc_c.features & SF_CHIP_LED0) {
307 for (j = 0; j < (sizeof(esiop_led_on) /
308 sizeof(esiop_led_on[0])); j++)
309 sc->sc_c.sc_script[
310 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
311 ] = htole32(esiop_led_on[j]);
312 for (j = 0; j < (sizeof(esiop_led_on) /
313 sizeof(esiop_led_on[0])); j++)
314 sc->sc_c.sc_script[
315 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
316 ] = htole32(esiop_led_on[j]);
317 for (j = 0; j < (sizeof(esiop_led_off) /
318 sizeof(esiop_led_off[0])); j++)
319 sc->sc_c.sc_script[
320 Ent_led_off / sizeof(esiop_led_off[0]) + j
321 ] = htole32(esiop_led_off[j]);
322 }
323 }
324 /* get base of scheduler ring */
325 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
326 /* init scheduler */
327 for (i = 0; i < A_ncmd_slots; i++) {
328 esiop_script_write(sc,
329 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
330 }
331 sc->sc_currschedslot = 0;
332 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
333 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
334 /*
335 * 0x78000000 is a 'move data8 to reg'. data8 is the second
336 * octet, reg offset is the third.
337 */
338 esiop_script_write(sc, Ent_cmdr0 / 4,
339 0x78640000 | ((addr & 0x000000ff) << 8));
340 esiop_script_write(sc, Ent_cmdr1 / 4,
341 0x78650000 | ((addr & 0x0000ff00) ));
342 esiop_script_write(sc, Ent_cmdr2 / 4,
343 0x78660000 | ((addr & 0x00ff0000) >> 8));
344 esiop_script_write(sc, Ent_cmdr3 / 4,
345 0x78670000 | ((addr & 0xff000000) >> 16));
346 /* done ring */
347 for (i = 0; i < A_ndone_slots; i++)
348 sc->sc_done_slot[i] = 0;
349 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
350 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
351 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
352 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
353 sc->sc_currdoneslot = 0;
354 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
355 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
356 esiop_script_write(sc, Ent_doner0 / 4,
357 0x786c0000 | ((addr & 0x000000ff) << 8));
358 esiop_script_write(sc, Ent_doner1 / 4,
359 0x786d0000 | ((addr & 0x0000ff00) ));
360 esiop_script_write(sc, Ent_doner2 / 4,
361 0x786e0000 | ((addr & 0x00ff0000) >> 8));
362 esiop_script_write(sc, Ent_doner3 / 4,
363 0x786f0000 | ((addr & 0xff000000) >> 16));
364
365 /* set flags */
366 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
367 /* write pointer of base of target DSA table */
368 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
369 sc->sc_c.sc_scriptaddr;
370 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
371 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
372 ((addr & 0x000000ff) << 8));
373 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
374 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
375 ((addr & 0x0000ff00) ));
376 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
377 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
378 ((addr & 0x00ff0000) >> 8));
379 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
380 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
381 ((addr & 0xff000000) >> 16));
382 #ifdef SIOP_DEBUG
383 printf("%s: target table offset %d free offset %d\n",
384 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
385 sc->sc_free_offset);
386 #endif
387
388 /* register existing targets */
389 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
390 if (sc->sc_c.targets[i])
391 esiop_target_register(sc, i);
392 }
393 /* start script */
394 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
395 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
396 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
397 }
398 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
399 sc->sc_c.sc_scriptaddr + Ent_reselect);
400 }
401
402 #if 0
403 #define CALL_SCRIPT(ent) do {\
404 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
405 esiop_cmd->cmd_c.dsa, \
406 sc->sc_c.sc_scriptaddr + ent); \
407 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
408 } while (0)
409 #else
410 #define CALL_SCRIPT(ent) do {\
411 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
412 } while (0)
413 #endif
414
415 int
416 esiop_intr(v)
417 void *v;
418 {
419 struct esiop_softc *sc = v;
420 struct esiop_target *esiop_target;
421 struct esiop_cmd *esiop_cmd;
422 struct esiop_lun *esiop_lun;
423 struct scsipi_xfer *xs;
424 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
425 u_int32_t irqcode;
426 int need_reset = 0;
427 int offset, target, lun, tag;
428 u_int32_t tflags;
429 u_int32_t addr;
430 int freetarget = 0;
431 int slot;
432 int retval = 0;
433
434 again:
435 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
436 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
437 return retval;
438 }
439 retval = 1;
440 INCSTAT(esiop_stat_intr);
441 esiop_checkdone(sc);
442 if (istat & ISTAT_INTF) {
443 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 SIOP_ISTAT, ISTAT_INTF);
445 goto again;
446 }
447
448 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
449 (ISTAT_DIP | ISTAT_ABRT)) {
450 /* clear abort */
451 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
452 SIOP_ISTAT, 0);
453 }
454
455 /* get CMD from T/L/Q */
456 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
457 SIOP_SCRATCHC);
458 #ifdef SIOP_DEBUG_INTR
459 printf("interrupt, istat=0x%x tflags=0x%x "
460 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
461 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
462 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
463 SIOP_DSP) -
464 sc->sc_c.sc_scriptaddr));
465 #endif
466 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
467 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
468 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
469 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
470 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
471
472 if (target >= 0 && lun >= 0) {
473 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
474 if (esiop_target == NULL) {
475 printf("esiop_target (target %d) not valid\n", target);
476 goto none;
477 }
478 esiop_lun = esiop_target->esiop_lun[lun];
479 if (esiop_lun == NULL) {
480 printf("esiop_lun (target %d lun %d) not valid\n",
481 target, lun);
482 goto none;
483 }
484 esiop_cmd =
485 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
486 if (esiop_cmd == NULL) {
487 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
488 target, lun, tag);
489 goto none;
490 }
491 xs = esiop_cmd->cmd_c.xs;
492 #ifdef DIAGNOSTIC
493 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
494 printf("esiop_cmd (target %d lun %d) "
495 "not active (%d)\n", target, lun,
496 esiop_cmd->cmd_c.status);
497 goto none;
498 }
499 #endif
500 esiop_table_sync(esiop_cmd,
501 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
502 } else {
503 none:
504 xs = NULL;
505 esiop_target = NULL;
506 esiop_lun = NULL;
507 esiop_cmd = NULL;
508 }
509 if (istat & ISTAT_DIP) {
510 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
511 SIOP_DSTAT);
512 if (dstat & DSTAT_ABRT) {
513 /* was probably generated by a bus reset IOCTL */
514 if ((dstat & DSTAT_DFE) == 0)
515 siop_clearfifo(&sc->sc_c);
516 goto reset;
517 }
518 if (dstat & DSTAT_SSI) {
519 printf("single step dsp 0x%08x dsa 0x08%x\n",
520 (int)(bus_space_read_4(sc->sc_c.sc_rt,
521 sc->sc_c.sc_rh, SIOP_DSP) -
522 sc->sc_c.sc_scriptaddr),
523 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
524 SIOP_DSA));
525 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
526 (istat & ISTAT_SIP) == 0) {
527 bus_space_write_1(sc->sc_c.sc_rt,
528 sc->sc_c.sc_rh, SIOP_DCNTL,
529 bus_space_read_1(sc->sc_c.sc_rt,
530 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
531 }
532 return 1;
533 }
534
535 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
536 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
537 if (dstat & DSTAT_IID)
538 printf(" Illegal instruction");
539 if (dstat & DSTAT_BF)
540 printf(" bus fault");
541 if (dstat & DSTAT_MDPE)
542 printf(" parity");
543 if (dstat & DSTAT_DFE)
544 printf(" DMA fifo empty");
545 else
546 siop_clearfifo(&sc->sc_c);
547 printf(", DSP=0x%x DSA=0x%x: ",
548 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
549 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
550 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
551 if (esiop_cmd)
552 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
553 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
554 le32toh(esiop_cmd->cmd_tables->status));
555 else
556 printf(" current T/L/Q invalid\n");
557 need_reset = 1;
558 }
559 }
560 if (istat & ISTAT_SIP) {
561 if (istat & ISTAT_DIP)
562 delay(10);
563 /*
564 * Can't read sist0 & sist1 independently, or we have to
565 * insert delay
566 */
567 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
568 SIOP_SIST0);
569 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
570 SIOP_SSTAT1);
571 #ifdef SIOP_DEBUG_INTR
572 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
573 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
574 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
575 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
576 SIOP_DSP) -
577 sc->sc_c.sc_scriptaddr));
578 #endif
579 if (sist & SIST0_RST) {
580 esiop_handle_reset(sc);
581 /* no table to flush here */
582 return 1;
583 }
584 if (sist & SIST0_SGE) {
585 if (esiop_cmd)
586 scsipi_printaddr(xs->xs_periph);
587 else
588 printf("%s:", sc->sc_c.sc_dev.dv_xname);
589 printf("scsi gross error\n");
590 if (esiop_target)
591 esiop_target->target_c.flags &= ~TARF_DT;
592 #ifdef DEBUG
593 printf("DSA=0x%x DSP=0x%lx\n",
594 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
595 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
596 SIOP_DSP) -
597 sc->sc_c.sc_scriptaddr));
598 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
599 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SDID),
600 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL3),
601 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SXFER),
602 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL4));
603
604 #endif
605 goto reset;
606 }
607 if ((sist & SIST0_MA) && need_reset == 0) {
608 if (esiop_cmd) {
609 int scratchc0;
610 dstat = bus_space_read_1(sc->sc_c.sc_rt,
611 sc->sc_c.sc_rh, SIOP_DSTAT);
612 /*
613 * first restore DSA, in case we were in a S/G
614 * operation.
615 */
616 bus_space_write_4(sc->sc_c.sc_rt,
617 sc->sc_c.sc_rh,
618 SIOP_DSA, esiop_cmd->cmd_c.dsa);
619 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
620 sc->sc_c.sc_rh, SIOP_SCRATCHC);
621 switch (sstat1 & SSTAT1_PHASE_MASK) {
622 case SSTAT1_PHASE_STATUS:
623 /*
624 * previous phase may be aborted for any reason
625 * ( for example, the target has less data to
626 * transfer than requested). Just go to status
627 * and the command should terminate.
628 */
629 INCSTAT(esiop_stat_intr_shortxfer);
630 /*
631 * sdp not needed here, but this
632 * will cause xs->resid to be adjusted
633 */
634 if (scratchc0 & A_f_c_data)
635 siop_sdp(&esiop_cmd->cmd_c);
636 else if ((dstat & DSTAT_DFE) == 0)
637 siop_clearfifo(&sc->sc_c);
638 /* no table to flush here */
639 CALL_SCRIPT(Ent_status);
640 return 1;
641 case SSTAT1_PHASE_MSGIN:
642 /*
643 * target may be ready to disconnect
644 * Save data pointers just in case.
645 */
646 INCSTAT(esiop_stat_intr_xferdisc);
647 if (scratchc0 & A_f_c_data)
648 siop_sdp(&esiop_cmd->cmd_c);
649 else if ((dstat & DSTAT_DFE) == 0)
650 siop_clearfifo(&sc->sc_c);
651 bus_space_write_1(sc->sc_c.sc_rt,
652 sc->sc_c.sc_rh, SIOP_SCRATCHC,
653 scratchc0 & ~A_f_c_data);
654 esiop_table_sync(esiop_cmd,
655 BUS_DMASYNC_PREREAD |
656 BUS_DMASYNC_PREWRITE);
657 CALL_SCRIPT(Ent_msgin);
658 return 1;
659 }
660 printf("%s: unexpected phase mismatch %d\n",
661 sc->sc_c.sc_dev.dv_xname,
662 sstat1 & SSTAT1_PHASE_MASK);
663 } else {
664 printf("%s: phase mismatch without command\n",
665 sc->sc_c.sc_dev.dv_xname);
666 }
667 need_reset = 1;
668 }
669 if (sist & SIST0_PAR) {
670 /* parity error, reset */
671 if (esiop_cmd)
672 scsipi_printaddr(xs->xs_periph);
673 else
674 printf("%s:", sc->sc_c.sc_dev.dv_xname);
675 printf("parity error\n");
676 if (esiop_target)
677 esiop_target->target_c.flags &= ~TARF_DT;
678 goto reset;
679 }
680 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
681 /*
682 * selection time out, assume there's no device here
683 * We also have to update the ring pointer ourselve
684 */
685 slot = bus_space_read_1(sc->sc_c.sc_rt,
686 sc->sc_c.sc_rh, SIOP_SCRATCHE);
687 esiop_script_sync(sc,
688 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
689 #ifdef SIOP_DEBUG_SCHED
690 printf("sel timeout target %d, slot %d\n", target, slot);
691 #endif
692 /*
693 * mark this slot as free, and advance to next slot
694 */
695 esiop_script_write(sc,
696 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
697 A_f_cmd_free);
698 addr = bus_space_read_4(sc->sc_c.sc_rt,
699 sc->sc_c.sc_rh, SIOP_SCRATCHD);
700 if (slot < (A_ncmd_slots - 1)) {
701 bus_space_write_1(sc->sc_c.sc_rt,
702 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
703 addr = addr + sizeof(struct esiop_slot);
704 } else {
705 bus_space_write_1(sc->sc_c.sc_rt,
706 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
707 addr = sc->sc_c.sc_scriptaddr +
708 sc->sc_shedoffset * sizeof(u_int32_t);
709 }
710 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
711 SIOP_SCRATCHD, addr);
712 esiop_script_sync(sc,
713 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
714 if (esiop_cmd) {
715 esiop_cmd->cmd_c.status = CMDST_DONE;
716 xs->error = XS_SELTIMEOUT;
717 freetarget = 1;
718 goto end_nodata;
719 } else {
720 printf("%s: selection timeout without "
721 "command, target %d (sdid 0x%x), "
722 "slot %d\n",
723 sc->sc_c.sc_dev.dv_xname, target,
724 bus_space_read_1(sc->sc_c.sc_rt,
725 sc->sc_c.sc_rh, SIOP_SDID), slot);
726 need_reset = 1;
727 }
728 }
729 if (sist & SIST0_UDC) {
730 /*
731 * unexpected disconnect. Usually the target signals
732 * a fatal condition this way. Attempt to get sense.
733 */
734 if (esiop_cmd) {
735 esiop_cmd->cmd_tables->status =
736 htole32(SCSI_CHECK);
737 goto end_nodata;
738 }
739 printf("%s: unexpected disconnect without "
740 "command\n", sc->sc_c.sc_dev.dv_xname);
741 goto reset;
742 }
743 if (sist & (SIST1_SBMC << 8)) {
744 /* SCSI bus mode change */
745 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
746 goto reset;
747 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
748 /*
749 * we have a script interrupt, it will
750 * restart the script.
751 */
752 goto scintr;
753 }
754 /*
755 * else we have to restart it ourselve, at the
756 * interrupted instruction.
757 */
758 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
759 SIOP_DSP,
760 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
761 SIOP_DSP) - 8);
762 return 1;
763 }
764 /* Else it's an unhandled exception (for now). */
765 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
766 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
767 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
768 SIOP_SSTAT1),
769 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
770 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
771 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
772 if (esiop_cmd) {
773 esiop_cmd->cmd_c.status = CMDST_DONE;
774 xs->error = XS_SELTIMEOUT;
775 goto end_nodata;
776 }
777 need_reset = 1;
778 }
779 if (need_reset) {
780 reset:
781 /* fatal error, reset the bus */
782 siop_resetbus(&sc->sc_c);
783 /* no table to flush here */
784 return 1;
785 }
786
787 scintr:
788 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
789 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
790 SIOP_DSPS);
791 #ifdef SIOP_DEBUG_INTR
792 printf("script interrupt 0x%x\n", irqcode);
793 #endif
794 /*
795 * no command, or an inactive command is only valid for a
796 * reselect interrupt
797 */
798 if ((irqcode & 0x80) == 0) {
799 if (esiop_cmd == NULL) {
800 printf(
801 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
802 sc->sc_c.sc_dev.dv_xname, irqcode);
803 goto reset;
804 }
805 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
806 printf("%s: command with invalid status "
807 "(IRQ code 0x%x current status %d) !\n",
808 sc->sc_c.sc_dev.dv_xname,
809 irqcode, esiop_cmd->cmd_c.status);
810 xs = NULL;
811 }
812 }
813 switch(irqcode) {
814 case A_int_err:
815 printf("error, DSP=0x%x\n",
816 (int)(bus_space_read_4(sc->sc_c.sc_rt,
817 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
818 if (xs) {
819 xs->error = XS_SELTIMEOUT;
820 goto end_nodata;
821 } else {
822 goto reset;
823 }
824 case A_int_msgin:
825 {
826 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
827 sc->sc_c.sc_rh, SIOP_SFBR);
828 if (msgin == MSG_MESSAGE_REJECT) {
829 int msg, extmsg;
830 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
831 /*
832 * message was part of a identify +
833 * something else. Identify shouldn't
834 * have been rejected.
835 */
836 msg =
837 esiop_cmd->cmd_tables->msg_out[1];
838 extmsg =
839 esiop_cmd->cmd_tables->msg_out[3];
840 } else {
841 msg =
842 esiop_cmd->cmd_tables->msg_out[0];
843 extmsg =
844 esiop_cmd->cmd_tables->msg_out[2];
845 }
846 if (msg == MSG_MESSAGE_REJECT) {
847 /* MSG_REJECT for a MSG_REJECT !*/
848 if (xs)
849 scsipi_printaddr(xs->xs_periph);
850 else
851 printf("%s: ",
852 sc->sc_c.sc_dev.dv_xname);
853 printf("our reject message was "
854 "rejected\n");
855 goto reset;
856 }
857 if (msg == MSG_EXTENDED &&
858 extmsg == MSG_EXT_WDTR) {
859 /* WDTR rejected, initiate sync */
860 if ((esiop_target->target_c.flags &
861 TARF_SYNC) == 0) {
862 esiop_target->target_c.status =
863 TARST_OK;
864 siop_update_xfer_mode(&sc->sc_c,
865 target);
866 /* no table to flush here */
867 CALL_SCRIPT(Ent_msgin_ack);
868 return 1;
869 }
870 esiop_target->target_c.status =
871 TARST_SYNC_NEG;
872 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
873 sc->sc_c.st_minsync,
874 sc->sc_c.maxoff);
875 esiop_table_sync(esiop_cmd,
876 BUS_DMASYNC_PREREAD |
877 BUS_DMASYNC_PREWRITE);
878 CALL_SCRIPT(Ent_send_msgout);
879 return 1;
880 } else if (msg == MSG_EXTENDED &&
881 extmsg == MSG_EXT_SDTR) {
882 /* sync rejected */
883 esiop_target->target_c.offset = 0;
884 esiop_target->target_c.period = 0;
885 esiop_target->target_c.status =
886 TARST_OK;
887 siop_update_xfer_mode(&sc->sc_c,
888 target);
889 /* no table to flush here */
890 CALL_SCRIPT(Ent_msgin_ack);
891 return 1;
892 } else if (msg == MSG_EXTENDED &&
893 extmsg == MSG_EXT_PPR) {
894 /* PPR rejected */
895 esiop_target->target_c.offset = 0;
896 esiop_target->target_c.period = 0;
897 esiop_target->target_c.status =
898 TARST_OK;
899 siop_update_xfer_mode(&sc->sc_c,
900 target);
901 /* no table to flush here */
902 CALL_SCRIPT(Ent_msgin_ack);
903 return 1;
904 } else if (msg == MSG_SIMPLE_Q_TAG ||
905 msg == MSG_HEAD_OF_Q_TAG ||
906 msg == MSG_ORDERED_Q_TAG) {
907 if (esiop_handle_qtag_reject(
908 esiop_cmd) == -1)
909 goto reset;
910 CALL_SCRIPT(Ent_msgin_ack);
911 return 1;
912 }
913 if (xs)
914 scsipi_printaddr(xs->xs_periph);
915 else
916 printf("%s: ",
917 sc->sc_c.sc_dev.dv_xname);
918 if (msg == MSG_EXTENDED) {
919 printf("scsi message reject, extended "
920 "message sent was 0x%x\n", extmsg);
921 } else {
922 printf("scsi message reject, message "
923 "sent was 0x%x\n", msg);
924 }
925 /* no table to flush here */
926 CALL_SCRIPT(Ent_msgin_ack);
927 return 1;
928 }
929 if (xs)
930 scsipi_printaddr(xs->xs_periph);
931 else
932 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
933 printf("unhandled message 0x%x\n", msgin);
934 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
935 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
936 esiop_table_sync(esiop_cmd,
937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 CALL_SCRIPT(Ent_send_msgout);
939 return 1;
940 }
941 case A_int_extmsgin:
942 #ifdef SIOP_DEBUG_INTR
943 printf("extended message: msg 0x%x len %d\n",
944 esiop_cmd->cmd_tables->msg_in[2],
945 esiop_cmd->cmd_tables->msg_in[1]);
946 #endif
947 if (esiop_cmd->cmd_tables->msg_in[1] >
948 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
949 printf("%s: extended message too big (%d)\n",
950 sc->sc_c.sc_dev.dv_xname,
951 esiop_cmd->cmd_tables->msg_in[1]);
952 esiop_cmd->cmd_tables->t_extmsgdata.count =
953 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
954 esiop_table_sync(esiop_cmd,
955 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
956 CALL_SCRIPT(Ent_get_extmsgdata);
957 return 1;
958 case A_int_extmsgdata:
959 #ifdef SIOP_DEBUG_INTR
960 {
961 int i;
962 printf("extended message: 0x%x, data:",
963 esiop_cmd->cmd_tables->msg_in[2]);
964 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
965 i++)
966 printf(" 0x%x",
967 esiop_cmd->cmd_tables->msg_in[i]);
968 printf("\n");
969 }
970 #endif
971 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
972 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
973 case SIOP_NEG_MSGOUT:
974 esiop_update_scntl3(sc,
975 esiop_cmd->cmd_c.siop_target);
976 esiop_table_sync(esiop_cmd,
977 BUS_DMASYNC_PREREAD |
978 BUS_DMASYNC_PREWRITE);
979 CALL_SCRIPT(Ent_send_msgout);
980 return 1;
981 case SIOP_NEG_ACK:
982 esiop_update_scntl3(sc,
983 esiop_cmd->cmd_c.siop_target);
984 CALL_SCRIPT(Ent_msgin_ack);
985 return 1;
986 default:
987 panic("invalid retval from "
988 "siop_wdtr_neg()");
989 }
990 return 1;
991 }
992 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
993 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
994 case SIOP_NEG_MSGOUT:
995 esiop_update_scntl3(sc,
996 esiop_cmd->cmd_c.siop_target);
997 esiop_table_sync(esiop_cmd,
998 BUS_DMASYNC_PREREAD |
999 BUS_DMASYNC_PREWRITE);
1000 CALL_SCRIPT(Ent_send_msgout);
1001 return 1;
1002 case SIOP_NEG_ACK:
1003 esiop_update_scntl3(sc,
1004 esiop_cmd->cmd_c.siop_target);
1005 CALL_SCRIPT(Ent_msgin_ack);
1006 return 1;
1007 default:
1008 panic("invalid retval from "
1009 "siop_wdtr_neg()");
1010 }
1011 return 1;
1012 }
1013 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1014 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1015 case SIOP_NEG_MSGOUT:
1016 esiop_update_scntl3(sc,
1017 esiop_cmd->cmd_c.siop_target);
1018 esiop_table_sync(esiop_cmd,
1019 BUS_DMASYNC_PREREAD |
1020 BUS_DMASYNC_PREWRITE);
1021 CALL_SCRIPT(Ent_send_msgout);
1022 return 1;
1023 case SIOP_NEG_ACK:
1024 esiop_update_scntl3(sc,
1025 esiop_cmd->cmd_c.siop_target);
1026 CALL_SCRIPT(Ent_msgin_ack);
1027 return 1;
1028 default:
1029 panic("invalid retval from "
1030 "siop_wdtr_neg()");
1031 }
1032 return 1;
1033 }
1034 /* send a message reject */
1035 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1036 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1037 esiop_table_sync(esiop_cmd,
1038 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1039 CALL_SCRIPT(Ent_send_msgout);
1040 return 1;
1041 case A_int_disc:
1042 INCSTAT(esiop_stat_intr_sdp);
1043 offset = bus_space_read_1(sc->sc_c.sc_rt,
1044 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1045 #ifdef SIOP_DEBUG_DR
1046 printf("disconnect offset %d\n", offset);
1047 #endif
1048 if (offset > SIOP_NSG) {
1049 printf("%s: bad offset for disconnect (%d)\n",
1050 sc->sc_c.sc_dev.dv_xname, offset);
1051 goto reset;
1052 }
1053 /*
1054 * offset == SIOP_NSG may be a valid condition if
1055 * we get a sdp when the xfer is done.
1056 * Don't call memmove in this case.
1057 */
1058 if (offset < SIOP_NSG) {
1059 int i;
1060 /*
1061 * adjust xs->resid for already-transfered
1062 * data
1063 */
1064 for (i = 0; i < offset; i++)
1065 xs->resid -= le32toh(
1066 esiop_cmd->cmd_tables->data[i].count
1067 );
1068 memmove(&esiop_cmd->cmd_tables->data[0],
1069 &esiop_cmd->cmd_tables->data[offset],
1070 (SIOP_NSG - offset) * sizeof(scr_table_t));
1071 esiop_table_sync(esiop_cmd,
1072 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1073 }
1074 CALL_SCRIPT(Ent_script_sched);
1075 return 1;
1076 case A_int_resfail:
1077 printf("reselect failed\n");
1078 CALL_SCRIPT(Ent_script_sched);
1079 return 1;
1080 case A_int_done:
1081 if (xs == NULL) {
1082 printf("%s: done without command\n",
1083 sc->sc_c.sc_dev.dv_xname);
1084 CALL_SCRIPT(Ent_script_sched);
1085 return 1;
1086 }
1087 #ifdef SIOP_DEBUG_INTR
1088 printf("done, DSA=0x%lx target id 0x%x last msg "
1089 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1090 le32toh(esiop_cmd->cmd_tables->id),
1091 esiop_cmd->cmd_tables->msg_in[0],
1092 le32toh(esiop_cmd->cmd_tables->status));
1093 #endif
1094 INCSTAT(esiop_stat_intr_done);
1095 esiop_cmd->cmd_c.status = CMDST_DONE;
1096 goto end;
1097 default:
1098 printf("unknown irqcode %x\n", irqcode);
1099 if (xs) {
1100 xs->error = XS_SELTIMEOUT;
1101 goto end_nodata;
1102 }
1103 goto reset;
1104 }
1105 return 1;
1106 }
1107 /* We just should't get there */
1108 panic("siop_intr: I shouldn't be there !");
1109
1110 end_nodata:
1111 /*
1112 * no data was transfered, and the script didn't update tlp with the
1113 * current offset (which is still 0)
1114 */
1115 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq = 0;
1116 end:
1117 /*
1118 * restart the script now if command completed properly
1119 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1120 * queue
1121 */
1122 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1123 #ifdef SIOP_DEBUG_INTR
1124 printf("esiop_intr end: status %d\n", xs->status);
1125 #endif
1126 if (tag >= 0)
1127 esiop_lun->tactive[tag] = NULL;
1128 else
1129 esiop_lun->active = NULL;
1130 esiop_scsicmd_end(esiop_cmd);
1131 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1132 esiop_del_dev(sc, target, lun);
1133 CALL_SCRIPT(Ent_script_sched);
1134 return 1;
1135 }
1136
1137 void
1138 esiop_scsicmd_end(esiop_cmd)
1139 struct esiop_cmd *esiop_cmd;
1140 {
1141 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1142 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1143 int offset, i;
1144
1145 /* scratcha was saved in tlq by script. fetch offset from it */
1146 offset =
1147 (le32toh(((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq) >> 8)
1148 & 0xff;
1149 /*
1150 * update resid. If we completed a xfer with
1151 * some data transfers, offset will be at last 1.
1152 * If it's 0 then either no data was transfered at
1153 * all, or resid was already adjusted by a save
1154 * data pointer, or a phase mismatch.
1155 */
1156 for (i = 0; i < offset; i++)
1157 xs->resid -= le32toh(esiop_cmd->cmd_tables->data[i].count);
1158
1159 switch(xs->status) {
1160 case SCSI_OK:
1161 xs->error = XS_NOERROR;
1162 break;
1163 case SCSI_BUSY:
1164 xs->error = XS_BUSY;
1165 break;
1166 case SCSI_CHECK:
1167 xs->error = XS_BUSY;
1168 /* remove commands in the queue and scheduler */
1169 esiop_unqueue(sc, xs->xs_periph->periph_target,
1170 xs->xs_periph->periph_lun);
1171 break;
1172 case SCSI_QUEUE_FULL:
1173 INCSTAT(esiop_stat_intr_qfull);
1174 #ifdef SIOP_DEBUG
1175 printf("%s:%d:%d: queue full (tag %d)\n",
1176 sc->sc_c.sc_dev.dv_xname,
1177 xs->xs_periph->periph_target,
1178 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1179 #endif
1180 xs->error = XS_BUSY;
1181 break;
1182 case SCSI_SIOP_NOCHECK:
1183 /*
1184 * don't check status, xs->error is already valid
1185 */
1186 break;
1187 case SCSI_SIOP_NOSTATUS:
1188 /*
1189 * the status byte was not updated, cmd was
1190 * aborted
1191 */
1192 xs->error = XS_SELTIMEOUT;
1193 break;
1194 default:
1195 scsipi_printaddr(xs->xs_periph);
1196 printf("invalid status code %d\n", xs->status);
1197 xs->error = XS_DRIVER_STUFFUP;
1198 }
1199 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1200 bus_dmamap_sync(sc->sc_c.sc_dmat,
1201 esiop_cmd->cmd_c.dmamap_data, 0,
1202 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1203 (xs->xs_control & XS_CTL_DATA_IN) ?
1204 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1205 bus_dmamap_unload(sc->sc_c.sc_dmat,
1206 esiop_cmd->cmd_c.dmamap_data);
1207 }
1208 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1209 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1210 esiop_cmd->cmd_c.status = CMDST_FREE;
1211 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1212 scsipi_done (xs);
1213 }
1214
1215 void
1216 esiop_checkdone(sc)
1217 struct esiop_softc *sc;
1218 {
1219 int target, lun, tag;
1220 struct esiop_target *esiop_target;
1221 struct esiop_lun *esiop_lun;
1222 struct esiop_cmd *esiop_cmd;
1223 u_int32_t slot;
1224 int needsync = 0;
1225 int status;
1226 u_int32_t sem;
1227
1228 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1229 sem = esiop_script_read(sc, sc->sc_semoffset);
1230 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1231 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1232 /*
1233 * at last one command have been started,
1234 * so we should have free slots now
1235 */
1236 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1237 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1238 }
1239 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1240
1241 if ((sem & A_sem_done) == 0) {
1242 /* no pending done command */
1243 return;
1244 }
1245
1246 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1247 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1248 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1249 next:
1250 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1251 if (needsync)
1252 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1253 sc->sc_done_offset,
1254 A_ndone_slots * sizeof(u_int32_t),
1255 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1256 return;
1257 }
1258
1259 needsync = 1;
1260
1261 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1262 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1263 sc->sc_currdoneslot += 1;
1264 if (sc->sc_currdoneslot == A_ndone_slots)
1265 sc->sc_currdoneslot = 0;
1266
1267 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1268 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1269 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1270
1271 esiop_target = (target >= 0) ?
1272 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1273 if (esiop_target == NULL) {
1274 printf("esiop_target (target %d) not valid\n", target);
1275 goto next;
1276 }
1277 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1278 if (esiop_lun == NULL) {
1279 printf("esiop_lun (target %d lun %d) not valid\n",
1280 target, lun);
1281 goto next;
1282 }
1283 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1284 if (esiop_cmd == NULL) {
1285 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1286 target, lun, tag);
1287 goto next;
1288 }
1289
1290 esiop_table_sync(esiop_cmd,
1291 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1292 status = le32toh(esiop_cmd->cmd_tables->status);
1293 #ifdef DIAGNOSTIC
1294 if (status != SCSI_OK) {
1295 printf("command for T/L/Q %d/%d/%d status %d\n",
1296 target, lun, tag, status);
1297 goto next;
1298 }
1299
1300 #endif
1301 /* Ok, this command has been handled */
1302 esiop_cmd->cmd_c.xs->status = status;
1303 if (tag >= 0)
1304 esiop_lun->tactive[tag] = NULL;
1305 else
1306 esiop_lun->active = NULL;
1307 esiop_scsicmd_end(esiop_cmd);
1308 goto next;
1309 }
1310
1311 void
1312 esiop_unqueue(sc, target, lun)
1313 struct esiop_softc *sc;
1314 int target;
1315 int lun;
1316 {
1317 int slot, tag;
1318 u_int32_t slotdsa;
1319 struct esiop_cmd *esiop_cmd;
1320 struct esiop_lun *esiop_lun =
1321 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1322
1323 /* first make sure to read valid data */
1324 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1325
1326 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1327 /* look for commands in the scheduler, not yet started */
1328 if (esiop_lun->tactive[tag] == NULL)
1329 continue;
1330 esiop_cmd = esiop_lun->tactive[tag];
1331 for (slot = 0; slot < A_ncmd_slots; slot++) {
1332 slotdsa = esiop_script_read(sc,
1333 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1334 /* if the slot has any flag, it won't match the DSA */
1335 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1336 /* Mark this slot as ignore */
1337 esiop_script_write(sc,
1338 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1339 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1340 /* ask to requeue */
1341 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1342 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1343 esiop_lun->tactive[tag] = NULL;
1344 esiop_scsicmd_end(esiop_cmd);
1345 break;
1346 }
1347 }
1348 }
1349 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1350 }
1351
1352 /*
1353 * handle a rejected queue tag message: the command will run untagged,
1354 * has to adjust the reselect script.
1355 */
1356
1357
1358 int
1359 esiop_handle_qtag_reject(esiop_cmd)
1360 struct esiop_cmd *esiop_cmd;
1361 {
1362 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1363 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1364 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1365 int tag = esiop_cmd->cmd_tables->msg_out[2];
1366 struct esiop_target *esiop_target =
1367 (struct esiop_target*)sc->sc_c.targets[target];
1368 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1369
1370 #ifdef SIOP_DEBUG
1371 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1372 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1373 esiop_cmd->cmd_c.status);
1374 #endif
1375
1376 if (esiop_lun->active != NULL) {
1377 printf("%s: untagged command already running for target %d "
1378 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1379 target, lun, esiop_lun->active->cmd_c.status);
1380 return -1;
1381 }
1382 /* clear tag slot */
1383 esiop_lun->tactive[tag] = NULL;
1384 /* add command to non-tagged slot */
1385 esiop_lun->active = esiop_cmd;
1386 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1387 esiop_cmd->cmd_c.tag = -1;
1388 /* update DSA table */
1389 esiop_script_write(sc, esiop_target->lun_table_offset +
1390 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1391 esiop_cmd->cmd_c.dsa);
1392 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1393 return 0;
1394 }
1395
1396 /*
1397 * handle a bus reset: reset chip, unqueue all active commands, free all
1398 * target struct and report lossage to upper layer.
1399 * As the upper layer may requeue immediatly we have to first store
1400 * all active commands in a temporary queue.
1401 */
1402 void
1403 esiop_handle_reset(sc)
1404 struct esiop_softc *sc;
1405 {
1406 struct esiop_cmd *esiop_cmd;
1407 struct esiop_lun *esiop_lun;
1408 int target, lun, tag;
1409 /*
1410 * scsi bus reset. reset the chip and restart
1411 * the queue. Need to clean up all active commands
1412 */
1413 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1414 /* stop, reset and restart the chip */
1415 esiop_reset(sc);
1416
1417 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1418 /* chip has been reset, all slots are free now */
1419 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1420 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1421 }
1422 /*
1423 * Process all commands: first commands completes, then commands
1424 * being executed
1425 */
1426 esiop_checkdone(sc);
1427 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1428 target++) {
1429 struct esiop_target *esiop_target =
1430 (struct esiop_target *)sc->sc_c.targets[target];
1431 if (esiop_target == NULL)
1432 continue;
1433 for (lun = 0; lun < 8; lun++) {
1434 esiop_lun = esiop_target->esiop_lun[lun];
1435 if (esiop_lun == NULL)
1436 continue;
1437 for (tag = -1; tag <
1438 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1439 ESIOP_NTAG : 0);
1440 tag++) {
1441 if (tag >= 0)
1442 esiop_cmd = esiop_lun->tactive[tag];
1443 else
1444 esiop_cmd = esiop_lun->active;
1445 if (esiop_cmd == NULL)
1446 continue;
1447 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1448 printf("command with tag id %d reset\n", tag);
1449 esiop_cmd->cmd_c.xs->error =
1450 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1451 XS_TIMEOUT : XS_RESET;
1452 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1453 if (tag >= 0)
1454 esiop_lun->tactive[tag] = NULL;
1455 else
1456 esiop_lun->active = NULL;
1457 esiop_cmd->cmd_c.status = CMDST_DONE;
1458 esiop_scsicmd_end(esiop_cmd);
1459 }
1460 }
1461 sc->sc_c.targets[target]->status = TARST_ASYNC;
1462 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1463 sc->sc_c.targets[target]->period =
1464 sc->sc_c.targets[target]->offset = 0;
1465 siop_update_xfer_mode(&sc->sc_c, target);
1466 }
1467
1468 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1469 }
1470
1471 void
1472 esiop_scsipi_request(chan, req, arg)
1473 struct scsipi_channel *chan;
1474 scsipi_adapter_req_t req;
1475 void *arg;
1476 {
1477 struct scsipi_xfer *xs;
1478 struct scsipi_periph *periph;
1479 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1480 struct esiop_cmd *esiop_cmd;
1481 struct esiop_target *esiop_target;
1482 int s, error, i;
1483 int target;
1484 int lun;
1485
1486 switch (req) {
1487 case ADAPTER_REQ_RUN_XFER:
1488 xs = arg;
1489 periph = xs->xs_periph;
1490 target = periph->periph_target;
1491 lun = periph->periph_lun;
1492
1493 s = splbio();
1494 /*
1495 * first check if there are pending complete commands.
1496 * this can free us some resources (in the rings for example).
1497 * we have to lock it to avoid recursion.
1498 */
1499 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1500 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1501 esiop_checkdone(sc);
1502 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1503 }
1504 #ifdef SIOP_DEBUG_SCHED
1505 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1506 xs->xs_tag_type, xs->xs_tag_id);
1507 #endif
1508 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1509 if (esiop_cmd == NULL) {
1510 xs->error = XS_RESOURCE_SHORTAGE;
1511 scsipi_done(xs);
1512 splx(s);
1513 return;
1514 }
1515 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1516 #ifdef DIAGNOSTIC
1517 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1518 panic("siop_scsicmd: new cmd not free");
1519 #endif
1520 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1521 if (esiop_target == NULL) {
1522 #ifdef SIOP_DEBUG
1523 printf("%s: alloc siop_target for target %d\n",
1524 sc->sc_c.sc_dev.dv_xname, target);
1525 #endif
1526 sc->sc_c.targets[target] =
1527 malloc(sizeof(struct esiop_target),
1528 M_DEVBUF, M_NOWAIT | M_ZERO);
1529 if (sc->sc_c.targets[target] == NULL) {
1530 printf("%s: can't malloc memory for "
1531 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1532 target);
1533 xs->error = XS_RESOURCE_SHORTAGE;
1534 scsipi_done(xs);
1535 splx(s);
1536 return;
1537 }
1538 esiop_target =
1539 (struct esiop_target*)sc->sc_c.targets[target];
1540 esiop_target->target_c.status = TARST_PROBING;
1541 esiop_target->target_c.flags = 0;
1542 esiop_target->target_c.id =
1543 sc->sc_c.clock_div << 24; /* scntl3 */
1544 esiop_target->target_c.id |= target << 16; /* id */
1545 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1546
1547 for (i=0; i < 8; i++)
1548 esiop_target->esiop_lun[i] = NULL;
1549 esiop_target_register(sc, target);
1550 }
1551 if (esiop_target->esiop_lun[lun] == NULL) {
1552 esiop_target->esiop_lun[lun] =
1553 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1554 M_NOWAIT|M_ZERO);
1555 if (esiop_target->esiop_lun[lun] == NULL) {
1556 printf("%s: can't alloc esiop_lun for "
1557 "target %d lun %d\n",
1558 sc->sc_c.sc_dev.dv_xname, target, lun);
1559 xs->error = XS_RESOURCE_SHORTAGE;
1560 scsipi_done(xs);
1561 splx(s);
1562 return;
1563 }
1564 }
1565 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1566 esiop_cmd->cmd_c.xs = xs;
1567 esiop_cmd->cmd_c.flags = 0;
1568 esiop_cmd->cmd_c.status = CMDST_READY;
1569
1570 /* load the DMA maps */
1571 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1572 esiop_cmd->cmd_c.dmamap_cmd,
1573 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1574 if (error) {
1575 printf("%s: unable to load cmd DMA map: %d\n",
1576 sc->sc_c.sc_dev.dv_xname, error);
1577 xs->error = XS_DRIVER_STUFFUP;
1578 scsipi_done(xs);
1579 splx(s);
1580 return;
1581 }
1582 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1583 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1584 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1585 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1586 ((xs->xs_control & XS_CTL_DATA_IN) ?
1587 BUS_DMA_READ : BUS_DMA_WRITE));
1588 if (error) {
1589 printf("%s: unable to load cmd DMA map: %d",
1590 sc->sc_c.sc_dev.dv_xname, error);
1591 xs->error = XS_DRIVER_STUFFUP;
1592 scsipi_done(xs);
1593 bus_dmamap_unload(sc->sc_c.sc_dmat,
1594 esiop_cmd->cmd_c.dmamap_cmd);
1595 splx(s);
1596 return;
1597 }
1598 bus_dmamap_sync(sc->sc_c.sc_dmat,
1599 esiop_cmd->cmd_c.dmamap_data, 0,
1600 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1601 (xs->xs_control & XS_CTL_DATA_IN) ?
1602 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1603 }
1604 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1605 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1606 BUS_DMASYNC_PREWRITE);
1607
1608 if (xs->xs_tag_type)
1609 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1610 else
1611 esiop_cmd->cmd_c.tag = -1;
1612 siop_setuptables(&esiop_cmd->cmd_c);
1613 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1614 htole32(A_f_c_target | A_f_c_lun);
1615 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1616 htole32((target << 8) | (lun << 16));
1617 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1618 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1619 htole32(A_f_c_tag);
1620 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1621 htole32(esiop_cmd->cmd_c.tag << 24);
1622 }
1623
1624 esiop_table_sync(esiop_cmd,
1625 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1626 esiop_start(sc, esiop_cmd);
1627 if (xs->xs_control & XS_CTL_POLL) {
1628 /* poll for command completion */
1629 while ((xs->xs_status & XS_STS_DONE) == 0) {
1630 delay(1000);
1631 esiop_intr(sc);
1632 }
1633 }
1634 splx(s);
1635 return;
1636
1637 case ADAPTER_REQ_GROW_RESOURCES:
1638 #ifdef SIOP_DEBUG
1639 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1640 sc->sc_c.sc_adapt.adapt_openings);
1641 #endif
1642 esiop_morecbd(sc);
1643 return;
1644
1645 case ADAPTER_REQ_SET_XFER_MODE:
1646 {
1647 struct scsipi_xfer_mode *xm = arg;
1648 if (sc->sc_c.targets[xm->xm_target] == NULL)
1649 return;
1650 s = splbio();
1651 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1652 (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1653 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1654 /* allocate tag tables for this device */
1655 for (lun = 0;
1656 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1657 if (scsipi_lookup_periph(chan,
1658 xm->xm_target, lun) != NULL)
1659 esiop_add_dev(sc, xm->xm_target, lun);
1660 }
1661 }
1662 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1663 (sc->sc_c.features & SF_BUS_WIDE))
1664 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1665 if (xm->xm_mode & PERIPH_CAP_SYNC)
1666 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1667 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1668 (sc->sc_c.features & SF_CHIP_DT))
1669 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1670 if ((xm->xm_mode &
1671 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1672 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1673 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1674
1675 splx(s);
1676 }
1677 }
1678 }
1679
1680 static void
1681 esiop_start(sc, esiop_cmd)
1682 struct esiop_softc *sc;
1683 struct esiop_cmd *esiop_cmd;
1684 {
1685 struct esiop_lun *esiop_lun;
1686 struct esiop_target *esiop_target;
1687 int timeout;
1688 int target, lun, slot;
1689
1690 /*
1691 * first make sure to read valid data
1692 */
1693 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1694
1695 /*
1696 * We use a circular queue here. sc->sc_currschedslot points to a
1697 * free slot, unless we have filled the queue. Check this.
1698 */
1699 slot = sc->sc_currschedslot;
1700 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1701 A_f_cmd_free) == 0) {
1702 /*
1703 * no more free slot, no need to continue. freeze the queue
1704 * and requeue this command.
1705 */
1706 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1707 sc->sc_flags |= SCF_CHAN_NOSLOT;
1708 esiop_script_sync(sc,
1709 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1710 esiop_script_write(sc, sc->sc_semoffset,
1711 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1712 esiop_script_sync(sc,
1713 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1714 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1715 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1716 esiop_scsicmd_end(esiop_cmd);
1717 return;
1718 }
1719 /* OK, we can use this slot */
1720
1721 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1722 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1723 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1724 esiop_lun = esiop_target->esiop_lun[lun];
1725 /* if non-tagged command active, panic: this shouldn't happen */
1726 if (esiop_lun->active != NULL) {
1727 panic("esiop_start: tagged cmd while untagged running");
1728 }
1729 #ifdef DIAGNOSTIC
1730 /* sanity check the tag if needed */
1731 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1732 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1733 panic("esiop_start: tag not free");
1734 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1735 esiop_cmd->cmd_c.tag < 0) {
1736 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1737 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1738 panic("esiop_start: invalid tag id");
1739 }
1740 }
1741 #endif
1742 #ifdef SIOP_DEBUG_SCHED
1743 printf("using slot %d for DSA 0x%lx\n", slot,
1744 (u_long)esiop_cmd->cmd_c.dsa);
1745 #endif
1746 /* mark command as active */
1747 if (esiop_cmd->cmd_c.status == CMDST_READY)
1748 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1749 else
1750 panic("esiop_start: bad status");
1751 /* DSA table for reselect */
1752 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1753 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1754 /* DSA table for reselect */
1755 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1756 htole32(esiop_cmd->cmd_c.dsa);
1757 bus_dmamap_sync(sc->sc_c.sc_dmat,
1758 esiop_lun->lun_tagtbl->tblblk->blkmap,
1759 esiop_lun->lun_tagtbl->tbl_offset,
1760 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1761 } else {
1762 esiop_lun->active = esiop_cmd;
1763 esiop_script_write(sc,
1764 esiop_target->lun_table_offset +
1765 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1766 esiop_cmd->cmd_c.dsa);
1767 }
1768 /* scheduler slot: DSA */
1769 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1770 esiop_cmd->cmd_c.dsa);
1771 /* make sure SCRIPT processor will read valid data */
1772 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1773 /* handle timeout */
1774 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1775 /* start exire timer */
1776 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1777 if (timeout == 0)
1778 timeout = 1;
1779 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1780 timeout, esiop_timeout, esiop_cmd);
1781 }
1782 /* Signal script it has some work to do */
1783 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1784 SIOP_ISTAT, ISTAT_SIGP);
1785 /* update the current slot, and wait for IRQ */
1786 sc->sc_currschedslot++;
1787 if (sc->sc_currschedslot >= A_ncmd_slots)
1788 sc->sc_currschedslot = 0;
1789 return;
1790 }
1791
1792 void
1793 esiop_timeout(v)
1794 void *v;
1795 {
1796 struct esiop_cmd *esiop_cmd = v;
1797 struct esiop_softc *sc =
1798 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1799 int s;
1800 #ifdef SIOP_DEBUG
1801 int slot, slotdsa;
1802 #endif
1803
1804 s = splbio();
1805 esiop_table_sync(esiop_cmd,
1806 BUS_DMASYNC_POSTREAD |
1807 BUS_DMASYNC_POSTWRITE);
1808 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1809 #ifdef SIOP_DEBUG
1810 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1811
1812 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1813 for (slot = 0; slot < A_ncmd_slots; slot++) {
1814 slotdsa = esiop_script_read(sc,
1815 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1816 if ((slotdsa & 0x01) == 0)
1817 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1818 }
1819 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1820 printf("DSP 0x%lx DSA 0x%x\n",
1821 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1822 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1823 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1824 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1825 #else
1826 printf("command timeout, CDB: ");
1827 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1828 printf("\n");
1829 #endif
1830 /* reset the scsi bus */
1831 siop_resetbus(&sc->sc_c);
1832
1833 /* deactivate callout */
1834 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1835 /*
1836 * mark command has being timed out and just return;
1837 * the bus reset will generate an interrupt,
1838 * it will be handled in siop_intr()
1839 */
1840 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1841 splx(s);
1842 return;
1843
1844 }
1845
1846 void
1847 esiop_dump_script(sc)
1848 struct esiop_softc *sc;
1849 {
1850 int i;
1851 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1852 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1853 le32toh(sc->sc_c.sc_script[i]),
1854 le32toh(sc->sc_c.sc_script[i+1]));
1855 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1856 0xc0000000) {
1857 i++;
1858 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1859 }
1860 printf("\n");
1861 }
1862 }
1863
1864 void
1865 esiop_morecbd(sc)
1866 struct esiop_softc *sc;
1867 {
1868 int error, i, s;
1869 bus_dma_segment_t seg;
1870 int rseg;
1871 struct esiop_cbd *newcbd;
1872 struct esiop_xfer *xfer;
1873 bus_addr_t dsa;
1874
1875 /* allocate a new list head */
1876 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1877 if (newcbd == NULL) {
1878 printf("%s: can't allocate memory for command descriptors "
1879 "head\n", sc->sc_c.sc_dev.dv_xname);
1880 return;
1881 }
1882
1883 /* allocate cmd list */
1884 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1885 M_DEVBUF, M_NOWAIT|M_ZERO);
1886 if (newcbd->cmds == NULL) {
1887 printf("%s: can't allocate memory for command descriptors\n",
1888 sc->sc_c.sc_dev.dv_xname);
1889 goto bad3;
1890 }
1891 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1892 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1893 if (error) {
1894 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1895 sc->sc_c.sc_dev.dv_xname, error);
1896 goto bad2;
1897 }
1898 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1899 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1900 if (error) {
1901 printf("%s: unable to map cbd DMA memory, error = %d\n",
1902 sc->sc_c.sc_dev.dv_xname, error);
1903 goto bad2;
1904 }
1905 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1906 BUS_DMA_NOWAIT, &newcbd->xferdma);
1907 if (error) {
1908 printf("%s: unable to create cbd DMA map, error = %d\n",
1909 sc->sc_c.sc_dev.dv_xname, error);
1910 goto bad1;
1911 }
1912 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1913 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1914 if (error) {
1915 printf("%s: unable to load cbd DMA map, error = %d\n",
1916 sc->sc_c.sc_dev.dv_xname, error);
1917 goto bad0;
1918 }
1919 #ifdef DEBUG
1920 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1921 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1922 #endif
1923 for (i = 0; i < SIOP_NCMDPB; i++) {
1924 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1925 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1926 &newcbd->cmds[i].cmd_c.dmamap_data);
1927 if (error) {
1928 printf("%s: unable to create data DMA map for cbd: "
1929 "error %d\n",
1930 sc->sc_c.sc_dev.dv_xname, error);
1931 goto bad0;
1932 }
1933 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1934 sizeof(struct scsipi_generic), 1,
1935 sizeof(struct scsipi_generic), 0,
1936 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1937 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1938 if (error) {
1939 printf("%s: unable to create cmd DMA map for cbd %d\n",
1940 sc->sc_c.sc_dev.dv_xname, error);
1941 goto bad0;
1942 }
1943 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1944 newcbd->cmds[i].esiop_cbdp = newcbd;
1945 xfer = &newcbd->xfers[i];
1946 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1947 memset(newcbd->cmds[i].cmd_tables, 0,
1948 sizeof(struct esiop_xfer));
1949 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1950 i * sizeof(struct esiop_xfer);
1951 newcbd->cmds[i].cmd_c.dsa = dsa;
1952 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1953 xfer->siop_tables.t_msgout.count= htole32(1);
1954 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1955 xfer->siop_tables.t_msgin.count= htole32(1);
1956 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1957 offsetof(struct siop_common_xfer, msg_in));
1958 xfer->siop_tables.t_extmsgin.count= htole32(2);
1959 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1960 offsetof(struct siop_common_xfer, msg_in) + 1);
1961 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1962 offsetof(struct siop_common_xfer, msg_in) + 3);
1963 xfer->siop_tables.t_status.count= htole32(1);
1964 xfer->siop_tables.t_status.addr = htole32(dsa +
1965 offsetof(struct siop_common_xfer, status));
1966
1967 s = splbio();
1968 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1969 splx(s);
1970 #ifdef SIOP_DEBUG
1971 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1972 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1973 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1974 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1975 #endif
1976 }
1977 s = splbio();
1978 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1979 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1980 splx(s);
1981 return;
1982 bad0:
1983 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1984 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1985 bad1:
1986 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1987 bad2:
1988 free(newcbd->cmds, M_DEVBUF);
1989 bad3:
1990 free(newcbd, M_DEVBUF);
1991 return;
1992 }
1993
1994 void
1995 esiop_moretagtbl(sc)
1996 struct esiop_softc *sc;
1997 {
1998 int error, i, j, s;
1999 bus_dma_segment_t seg;
2000 int rseg;
2001 struct esiop_dsatblblk *newtblblk;
2002 struct esiop_dsatbl *newtbls;
2003 u_int32_t *tbls;
2004
2005 /* allocate a new list head */
2006 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2007 M_DEVBUF, M_NOWAIT|M_ZERO);
2008 if (newtblblk == NULL) {
2009 printf("%s: can't allocate memory for tag DSA table block\n",
2010 sc->sc_c.sc_dev.dv_xname);
2011 return;
2012 }
2013
2014 /* allocate tbl list */
2015 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2016 M_DEVBUF, M_NOWAIT|M_ZERO);
2017 if (newtbls == NULL) {
2018 printf("%s: can't allocate memory for command descriptors\n",
2019 sc->sc_c.sc_dev.dv_xname);
2020 goto bad3;
2021 }
2022 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2023 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2024 if (error) {
2025 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
2026 sc->sc_c.sc_dev.dv_xname, error);
2027 goto bad2;
2028 }
2029 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2030 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2031 if (error) {
2032 printf("%s: unable to map tbls DMA memory, error = %d\n",
2033 sc->sc_c.sc_dev.dv_xname, error);
2034 goto bad2;
2035 }
2036 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2037 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2038 if (error) {
2039 printf("%s: unable to create tbl DMA map, error = %d\n",
2040 sc->sc_c.sc_dev.dv_xname, error);
2041 goto bad1;
2042 }
2043 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2044 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2045 if (error) {
2046 printf("%s: unable to load tbl DMA map, error = %d\n",
2047 sc->sc_c.sc_dev.dv_xname, error);
2048 goto bad0;
2049 }
2050 #ifdef DEBUG
2051 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2052 sc->sc_c.sc_dev.dv_xname,
2053 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2054 #endif
2055 for (i = 0; i < ESIOP_NTPB; i++) {
2056 newtbls[i].tblblk = newtblblk;
2057 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2058 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2059 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2060 newtbls[i].tbl_offset;
2061 for (j = 0; j < ESIOP_NTAG; j++)
2062 newtbls[i].tbl[j] = j;
2063 s = splbio();
2064 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2065 splx(s);
2066 }
2067 s = splbio();
2068 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2069 splx(s);
2070 return;
2071 bad0:
2072 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2073 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2074 bad1:
2075 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2076 bad2:
2077 free(newtbls, M_DEVBUF);
2078 bad3:
2079 free(newtblblk, M_DEVBUF);
2080 return;
2081 }
2082
2083 void
2084 esiop_update_scntl3(sc, _siop_target)
2085 struct esiop_softc *sc;
2086 struct siop_common_target *_siop_target;
2087 {
2088 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2089 esiop_script_write(sc, esiop_target->lun_table_offset,
2090 esiop_target->target_c.id);
2091 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2092 }
2093
2094 void
2095 esiop_add_dev(sc, target, lun)
2096 struct esiop_softc *sc;
2097 int target;
2098 int lun;
2099 {
2100 struct esiop_target *esiop_target =
2101 (struct esiop_target *)sc->sc_c.targets[target];
2102 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2103
2104 /* we need a tag DSA table */
2105 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2106 if (esiop_lun->lun_tagtbl == NULL) {
2107 esiop_moretagtbl(sc);
2108 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2109 if (esiop_lun->lun_tagtbl == NULL) {
2110 /* no resources, run untagged */
2111 esiop_target->target_c.flags &= ~TARF_TAG;
2112 return;
2113 }
2114 }
2115 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2116 /* Update LUN DSA table */
2117 esiop_script_write(sc, esiop_target->lun_table_offset +
2118 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2119 esiop_lun->lun_tagtbl->tbl_dsa);
2120 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2121 }
2122
2123 void
2124 esiop_del_dev(sc, target, lun)
2125 struct esiop_softc *sc;
2126 int target;
2127 int lun;
2128 {
2129 struct esiop_target *esiop_target;
2130 #ifdef SIOP_DEBUG
2131 printf("%s:%d:%d: free lun sw entry\n",
2132 sc->sc_c.sc_dev.dv_xname, target, lun);
2133 #endif
2134 if (sc->sc_c.targets[target] == NULL)
2135 return;
2136 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2137 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2138 esiop_target->esiop_lun[lun] = NULL;
2139 }
2140
2141 void
2142 esiop_target_register(sc, target)
2143 struct esiop_softc *sc;
2144 u_int32_t target;
2145 {
2146 struct esiop_target *esiop_target =
2147 (struct esiop_target *)sc->sc_c.targets[target];
2148 struct esiop_lun *esiop_lun;
2149 int lun;
2150
2151 /* get a DSA table for this target */
2152 esiop_target->lun_table_offset = sc->sc_free_offset;
2153 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2154 #ifdef SIOP_DEBUG
2155 printf("%s: lun table for target %d offset %d free offset %d\n",
2156 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2157 sc->sc_free_offset);
2158 #endif
2159 /* first 32 bytes are ID (for select) */
2160 esiop_script_write(sc, esiop_target->lun_table_offset,
2161 esiop_target->target_c.id);
2162 /* Record this table in the target DSA table */
2163 esiop_script_write(sc,
2164 sc->sc_target_table_offset + target,
2165 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2166 sc->sc_c.sc_scriptaddr);
2167 /* if we have a tag table, register it */
2168 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2169 esiop_lun = esiop_target->esiop_lun[lun];
2170 if (esiop_lun == NULL)
2171 continue;
2172 if (esiop_lun->lun_tagtbl)
2173 esiop_script_write(sc, esiop_target->lun_table_offset +
2174 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2175 esiop_lun->lun_tagtbl->tbl_dsa);
2176 }
2177 esiop_script_sync(sc,
2178 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2179 }
2180
2181 #ifdef SIOP_STATS
2182 void
2183 esiop_printstats()
2184 {
2185 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2186 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2187 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2188 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2189 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2190 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2191 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2192 }
2193 #endif
2194