esiop.c revision 1.32 1 /* $NetBSD: esiop.c,v 1.32 2005/02/04 02:10:36 perry Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.32 2005/02/04 02:10:36 perry Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 #undef SIOP_DEBUG
68 #undef SIOP_DEBUG_DR
69 #undef SIOP_DEBUG_INTR
70 #undef SIOP_DEBUG_SCHED
71 #undef DUMP_SCRIPT
72
73 #define SIOP_STATS
74
75 #ifndef SIOP_DEFAULT_TARGET
76 #define SIOP_DEFAULT_TARGET 7
77 #endif
78
79 /* number of cmd descriptors per block */
80 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
81
82 void esiop_reset(struct esiop_softc *);
83 void esiop_checkdone(struct esiop_softc *);
84 void esiop_handle_reset(struct esiop_softc *);
85 void esiop_scsicmd_end(struct esiop_cmd *, int);
86 void esiop_unqueue(struct esiop_softc *, int, int);
87 int esiop_handle_qtag_reject(struct esiop_cmd *);
88 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
89 void esiop_timeout(void *);
90 void esiop_scsipi_request(struct scsipi_channel *,
91 scsipi_adapter_req_t, void *);
92 void esiop_dump_script(struct esiop_softc *);
93 void esiop_morecbd(struct esiop_softc *);
94 void esiop_moretagtbl(struct esiop_softc *);
95 void siop_add_reselsw(struct esiop_softc *, int);
96 void esiop_target_register(struct esiop_softc *, u_int32_t);
97
98 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
99
100 #ifdef SIOP_STATS
101 static int esiop_stat_intr = 0;
102 static int esiop_stat_intr_shortxfer = 0;
103 static int esiop_stat_intr_sdp = 0;
104 static int esiop_stat_intr_done = 0;
105 static int esiop_stat_intr_xferdisc = 0;
106 static int esiop_stat_intr_lunresel = 0;
107 static int esiop_stat_intr_qfull = 0;
108 void esiop_printstats(void);
109 #define INCSTAT(x) x++
110 #else
111 #define INCSTAT(x)
112 #endif
113
114 static __inline__ void esiop_script_sync(struct esiop_softc *, int);
115 static __inline__ void
116 esiop_script_sync(sc, ops)
117 struct esiop_softc *sc;
118 int ops;
119 {
120 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
121 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
122 PAGE_SIZE, ops);
123 }
124
125 static __inline__ u_int32_t esiop_script_read(struct esiop_softc *, u_int);
126 static __inline__ u_int32_t
127 esiop_script_read(sc, offset)
128 struct esiop_softc *sc;
129 u_int offset;
130 {
131 if (sc->sc_c.features & SF_CHIP_RAM) {
132 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
133 offset * 4);
134 } else {
135 return le32toh(sc->sc_c.sc_script[offset]);
136 }
137 }
138
139 static __inline__ void esiop_script_write(struct esiop_softc *, u_int,
140 u_int32_t);
141 static __inline__ void
142 esiop_script_write(sc, offset, val)
143 struct esiop_softc *sc;
144 u_int offset;
145 u_int32_t val;
146 {
147 if (sc->sc_c.features & SF_CHIP_RAM) {
148 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
149 offset * 4, val);
150 } else {
151 sc->sc_c.sc_script[offset] = htole32(val);
152 }
153 }
154
155 void
156 esiop_attach(sc)
157 struct esiop_softc *sc;
158 {
159 struct esiop_dsatbl *tagtbl_donering;
160
161 if (siop_common_attach(&sc->sc_c) != 0 )
162 return;
163
164 TAILQ_INIT(&sc->free_list);
165 TAILQ_INIT(&sc->cmds);
166 TAILQ_INIT(&sc->free_tagtbl);
167 TAILQ_INIT(&sc->tag_tblblk);
168 sc->sc_currschedslot = 0;
169 #ifdef SIOP_DEBUG
170 aprint_debug("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
171 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
172 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
173 #endif
174
175 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
176 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
177
178 /*
179 * get space for the CMD done slot. For this we use a tag table entry.
180 * It's the same size and allows us to not waste 3/4 of a page
181 */
182 #ifdef DIAGNOSTIC
183 if (ESIOP_NTAG != A_ndone_slots) {
184 aprint_error("%s: size of tag DSA table different from the done"
185 " ring\n", sc->sc_c.sc_dev.dv_xname);
186 return;
187 }
188 #endif
189 esiop_moretagtbl(sc);
190 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
191 if (tagtbl_donering == NULL) {
192 aprint_error("%s: no memory for command done ring\n",
193 sc->sc_c.sc_dev.dv_xname);
194 return;
195 }
196 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
197 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
198 sc->sc_done_offset = tagtbl_donering->tbl_offset;
199 sc->sc_done_slot = &tagtbl_donering->tbl[0];
200
201 /* Do a bus reset, so that devices fall back to narrow/async */
202 siop_resetbus(&sc->sc_c);
203 /*
204 * siop_reset() will reset the chip, thus clearing pending interrupts
205 */
206 esiop_reset(sc);
207 #ifdef DUMP_SCRIPT
208 esiop_dump_script(sc);
209 #endif
210
211 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
212 }
213
214 void
215 esiop_reset(sc)
216 struct esiop_softc *sc;
217 {
218 int i, j;
219 u_int32_t addr;
220 u_int32_t msgin_addr, sem_addr;
221
222 siop_common_reset(&sc->sc_c);
223
224 /*
225 * we copy the script at the beggining of RAM. Then there is 4 bytes
226 * for messages in, and 4 bytes for semaphore
227 */
228 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
229 msgin_addr =
230 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
231 sc->sc_free_offset += 1;
232 sc->sc_semoffset = sc->sc_free_offset;
233 sem_addr =
234 sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
235 sc->sc_free_offset += 1;
236 /* then we have the scheduler ring */
237 sc->sc_shedoffset = sc->sc_free_offset;
238 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
239 /* then the targets DSA table */
240 sc->sc_target_table_offset = sc->sc_free_offset;
241 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
242 /* copy and patch the script */
243 if (sc->sc_c.features & SF_CHIP_RAM) {
244 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
245 esiop_script,
246 sizeof(esiop_script) / sizeof(esiop_script[0]));
247 for (j = 0; j <
248 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
249 j++) {
250 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
251 E_tlq_offset_Used[j] * 4,
252 sizeof(struct siop_common_xfer));
253 }
254 for (j = 0; j <
255 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
256 j++) {
257 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
258 E_abs_msgin2_Used[j] * 4, msgin_addr);
259 }
260 for (j = 0; j <
261 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
262 j++) {
263 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
264 E_abs_sem_Used[j] * 4, sem_addr);
265 }
266
267 if (sc->sc_c.features & SF_CHIP_LED0) {
268 bus_space_write_region_4(sc->sc_c.sc_ramt,
269 sc->sc_c.sc_ramh,
270 Ent_led_on1, esiop_led_on,
271 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
272 bus_space_write_region_4(sc->sc_c.sc_ramt,
273 sc->sc_c.sc_ramh,
274 Ent_led_on2, esiop_led_on,
275 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
276 bus_space_write_region_4(sc->sc_c.sc_ramt,
277 sc->sc_c.sc_ramh,
278 Ent_led_off, esiop_led_off,
279 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
280 }
281 } else {
282 for (j = 0;
283 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
284 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
285 }
286 for (j = 0; j <
287 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
288 j++) {
289 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
290 htole32(sizeof(struct siop_common_xfer));
291 }
292 for (j = 0; j <
293 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
294 j++) {
295 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
296 htole32(msgin_addr);
297 }
298 for (j = 0; j <
299 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
300 j++) {
301 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
302 htole32(sem_addr);
303 }
304
305 if (sc->sc_c.features & SF_CHIP_LED0) {
306 for (j = 0; j < (sizeof(esiop_led_on) /
307 sizeof(esiop_led_on[0])); j++)
308 sc->sc_c.sc_script[
309 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
310 ] = htole32(esiop_led_on[j]);
311 for (j = 0; j < (sizeof(esiop_led_on) /
312 sizeof(esiop_led_on[0])); j++)
313 sc->sc_c.sc_script[
314 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
315 ] = htole32(esiop_led_on[j]);
316 for (j = 0; j < (sizeof(esiop_led_off) /
317 sizeof(esiop_led_off[0])); j++)
318 sc->sc_c.sc_script[
319 Ent_led_off / sizeof(esiop_led_off[0]) + j
320 ] = htole32(esiop_led_off[j]);
321 }
322 }
323 /* get base of scheduler ring */
324 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
325 /* init scheduler */
326 for (i = 0; i < A_ncmd_slots; i++) {
327 esiop_script_write(sc,
328 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
329 }
330 sc->sc_currschedslot = 0;
331 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
332 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
333 /*
334 * 0x78000000 is a 'move data8 to reg'. data8 is the second
335 * octet, reg offset is the third.
336 */
337 esiop_script_write(sc, Ent_cmdr0 / 4,
338 0x78640000 | ((addr & 0x000000ff) << 8));
339 esiop_script_write(sc, Ent_cmdr1 / 4,
340 0x78650000 | ((addr & 0x0000ff00) ));
341 esiop_script_write(sc, Ent_cmdr2 / 4,
342 0x78660000 | ((addr & 0x00ff0000) >> 8));
343 esiop_script_write(sc, Ent_cmdr3 / 4,
344 0x78670000 | ((addr & 0xff000000) >> 16));
345 /* done ring */
346 for (i = 0; i < A_ndone_slots; i++)
347 sc->sc_done_slot[i] = 0;
348 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
349 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
350 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
351 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
352 sc->sc_currdoneslot = 0;
353 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
354 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
355 esiop_script_write(sc, Ent_doner0 / 4,
356 0x786c0000 | ((addr & 0x000000ff) << 8));
357 esiop_script_write(sc, Ent_doner1 / 4,
358 0x786d0000 | ((addr & 0x0000ff00) ));
359 esiop_script_write(sc, Ent_doner2 / 4,
360 0x786e0000 | ((addr & 0x00ff0000) >> 8));
361 esiop_script_write(sc, Ent_doner3 / 4,
362 0x786f0000 | ((addr & 0xff000000) >> 16));
363
364 /* set flags */
365 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
366 /* write pointer of base of target DSA table */
367 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
368 sc->sc_c.sc_scriptaddr;
369 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
370 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
371 ((addr & 0x000000ff) << 8));
372 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
373 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
374 ((addr & 0x0000ff00) ));
375 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
376 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
377 ((addr & 0x00ff0000) >> 8));
378 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
379 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
380 ((addr & 0xff000000) >> 16));
381 #ifdef SIOP_DEBUG
382 printf("%s: target table offset %d free offset %d\n",
383 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
384 sc->sc_free_offset);
385 #endif
386
387 /* register existing targets */
388 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
389 if (sc->sc_c.targets[i])
390 esiop_target_register(sc, i);
391 }
392 /* start script */
393 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
394 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
395 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
396 }
397 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
398 sc->sc_c.sc_scriptaddr + Ent_reselect);
399 }
400
401 #if 0
402 #define CALL_SCRIPT(ent) do {\
403 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
404 esiop_cmd->cmd_c.dsa, \
405 sc->sc_c.sc_scriptaddr + ent); \
406 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
407 } while (0)
408 #else
409 #define CALL_SCRIPT(ent) do {\
410 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
411 } while (0)
412 #endif
413
414 int
415 esiop_intr(v)
416 void *v;
417 {
418 struct esiop_softc *sc = v;
419 struct esiop_target *esiop_target;
420 struct esiop_cmd *esiop_cmd;
421 struct esiop_lun *esiop_lun;
422 struct scsipi_xfer *xs;
423 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
424 u_int32_t irqcode;
425 int need_reset = 0;
426 int offset, target, lun, tag;
427 u_int32_t tflags;
428 u_int32_t addr;
429 int freetarget = 0;
430 int slot;
431 int retval = 0;
432
433 again:
434 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
435 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
436 return retval;
437 }
438 retval = 1;
439 INCSTAT(esiop_stat_intr);
440 esiop_checkdone(sc);
441 if (istat & ISTAT_INTF) {
442 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
443 SIOP_ISTAT, ISTAT_INTF);
444 goto again;
445 }
446
447 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
448 (ISTAT_DIP | ISTAT_ABRT)) {
449 /* clear abort */
450 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
451 SIOP_ISTAT, 0);
452 }
453
454 /* get CMD from T/L/Q */
455 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
456 SIOP_SCRATCHC);
457 #ifdef SIOP_DEBUG_INTR
458 printf("interrupt, istat=0x%x tflags=0x%x "
459 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
460 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
461 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
462 SIOP_DSP) -
463 sc->sc_c.sc_scriptaddr));
464 #endif
465 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
466 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
467 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
468 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
469 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
470
471 if (target >= 0 && lun >= 0) {
472 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
473 if (esiop_target == NULL) {
474 printf("esiop_target (target %d) not valid\n", target);
475 goto none;
476 }
477 esiop_lun = esiop_target->esiop_lun[lun];
478 if (esiop_lun == NULL) {
479 printf("esiop_lun (target %d lun %d) not valid\n",
480 target, lun);
481 goto none;
482 }
483 esiop_cmd =
484 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
485 if (esiop_cmd == NULL) {
486 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
487 target, lun, tag);
488 goto none;
489 }
490 xs = esiop_cmd->cmd_c.xs;
491 #ifdef DIAGNOSTIC
492 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
493 printf("esiop_cmd (target %d lun %d) "
494 "not active (%d)\n", target, lun,
495 esiop_cmd->cmd_c.status);
496 goto none;
497 }
498 #endif
499 esiop_table_sync(esiop_cmd,
500 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
501 } else {
502 none:
503 xs = NULL;
504 esiop_target = NULL;
505 esiop_lun = NULL;
506 esiop_cmd = NULL;
507 }
508 if (istat & ISTAT_DIP) {
509 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
510 SIOP_DSTAT);
511 if (dstat & DSTAT_ABRT) {
512 /* was probably generated by a bus reset IOCTL */
513 if ((dstat & DSTAT_DFE) == 0)
514 siop_clearfifo(&sc->sc_c);
515 goto reset;
516 }
517 if (dstat & DSTAT_SSI) {
518 printf("single step dsp 0x%08x dsa 0x08%x\n",
519 (int)(bus_space_read_4(sc->sc_c.sc_rt,
520 sc->sc_c.sc_rh, SIOP_DSP) -
521 sc->sc_c.sc_scriptaddr),
522 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
523 SIOP_DSA));
524 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
525 (istat & ISTAT_SIP) == 0) {
526 bus_space_write_1(sc->sc_c.sc_rt,
527 sc->sc_c.sc_rh, SIOP_DCNTL,
528 bus_space_read_1(sc->sc_c.sc_rt,
529 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
530 }
531 return 1;
532 }
533
534 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
535 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
536 if (dstat & DSTAT_IID)
537 printf(" Illegal instruction");
538 if (dstat & DSTAT_BF)
539 printf(" bus fault");
540 if (dstat & DSTAT_MDPE)
541 printf(" parity");
542 if (dstat & DSTAT_DFE)
543 printf(" DMA fifo empty");
544 else
545 siop_clearfifo(&sc->sc_c);
546 printf(", DSP=0x%x DSA=0x%x: ",
547 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
548 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
549 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
550 if (esiop_cmd)
551 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
552 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
553 le32toh(esiop_cmd->cmd_tables->status));
554 else
555 printf(" current T/L/Q invalid\n");
556 need_reset = 1;
557 }
558 }
559 if (istat & ISTAT_SIP) {
560 if (istat & ISTAT_DIP)
561 delay(10);
562 /*
563 * Can't read sist0 & sist1 independently, or we have to
564 * insert delay
565 */
566 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
567 SIOP_SIST0);
568 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
569 SIOP_SSTAT1);
570 #ifdef SIOP_DEBUG_INTR
571 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
572 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
573 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
574 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
575 SIOP_DSP) -
576 sc->sc_c.sc_scriptaddr));
577 #endif
578 if (sist & SIST0_RST) {
579 esiop_handle_reset(sc);
580 /* no table to flush here */
581 return 1;
582 }
583 if (sist & SIST0_SGE) {
584 if (esiop_cmd)
585 scsipi_printaddr(xs->xs_periph);
586 else
587 printf("%s:", sc->sc_c.sc_dev.dv_xname);
588 printf("scsi gross error\n");
589 if (esiop_target)
590 esiop_target->target_c.flags &= ~TARF_DT;
591 #ifdef DEBUG
592 printf("DSA=0x%x DSP=0x%lx\n",
593 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
594 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
595 SIOP_DSP) -
596 sc->sc_c.sc_scriptaddr));
597 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
598 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SDID),
599 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL3),
600 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SXFER),
601 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL4));
602
603 #endif
604 goto reset;
605 }
606 if ((sist & SIST0_MA) && need_reset == 0) {
607 if (esiop_cmd) {
608 int scratchc0;
609 dstat = bus_space_read_1(sc->sc_c.sc_rt,
610 sc->sc_c.sc_rh, SIOP_DSTAT);
611 /*
612 * first restore DSA, in case we were in a S/G
613 * operation.
614 */
615 bus_space_write_4(sc->sc_c.sc_rt,
616 sc->sc_c.sc_rh,
617 SIOP_DSA, esiop_cmd->cmd_c.dsa);
618 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
619 sc->sc_c.sc_rh, SIOP_SCRATCHC);
620 switch (sstat1 & SSTAT1_PHASE_MASK) {
621 case SSTAT1_PHASE_STATUS:
622 /*
623 * previous phase may be aborted for any reason
624 * ( for example, the target has less data to
625 * transfer than requested). Compute resid and
626 * just go to status, the command should
627 * terminate.
628 */
629 INCSTAT(esiop_stat_intr_shortxfer);
630 if (scratchc0 & A_f_c_data)
631 siop_ma(&esiop_cmd->cmd_c);
632 else if ((dstat & DSTAT_DFE) == 0)
633 siop_clearfifo(&sc->sc_c);
634 CALL_SCRIPT(Ent_status);
635 return 1;
636 case SSTAT1_PHASE_MSGIN:
637 /*
638 * target may be ready to disconnect
639 * Compute resid which would be used later
640 * if a save data pointer is needed.
641 */
642 INCSTAT(esiop_stat_intr_xferdisc);
643 if (scratchc0 & A_f_c_data)
644 siop_ma(&esiop_cmd->cmd_c);
645 else if ((dstat & DSTAT_DFE) == 0)
646 siop_clearfifo(&sc->sc_c);
647 bus_space_write_1(sc->sc_c.sc_rt,
648 sc->sc_c.sc_rh, SIOP_SCRATCHC,
649 scratchc0 & ~A_f_c_data);
650 CALL_SCRIPT(Ent_msgin);
651 return 1;
652 }
653 printf("%s: unexpected phase mismatch %d\n",
654 sc->sc_c.sc_dev.dv_xname,
655 sstat1 & SSTAT1_PHASE_MASK);
656 } else {
657 printf("%s: phase mismatch without command\n",
658 sc->sc_c.sc_dev.dv_xname);
659 }
660 need_reset = 1;
661 }
662 if (sist & SIST0_PAR) {
663 /* parity error, reset */
664 if (esiop_cmd)
665 scsipi_printaddr(xs->xs_periph);
666 else
667 printf("%s:", sc->sc_c.sc_dev.dv_xname);
668 printf("parity error\n");
669 if (esiop_target)
670 esiop_target->target_c.flags &= ~TARF_DT;
671 goto reset;
672 }
673 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
674 /*
675 * selection time out, assume there's no device here
676 * We also have to update the ring pointer ourselve
677 */
678 slot = bus_space_read_1(sc->sc_c.sc_rt,
679 sc->sc_c.sc_rh, SIOP_SCRATCHE);
680 esiop_script_sync(sc,
681 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
682 #ifdef SIOP_DEBUG_SCHED
683 printf("sel timeout target %d, slot %d\n", target, slot);
684 #endif
685 /*
686 * mark this slot as free, and advance to next slot
687 */
688 esiop_script_write(sc,
689 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
690 A_f_cmd_free);
691 addr = bus_space_read_4(sc->sc_c.sc_rt,
692 sc->sc_c.sc_rh, SIOP_SCRATCHD);
693 if (slot < (A_ncmd_slots - 1)) {
694 bus_space_write_1(sc->sc_c.sc_rt,
695 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
696 addr = addr + sizeof(struct esiop_slot);
697 } else {
698 bus_space_write_1(sc->sc_c.sc_rt,
699 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
700 addr = sc->sc_c.sc_scriptaddr +
701 sc->sc_shedoffset * sizeof(u_int32_t);
702 }
703 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
704 SIOP_SCRATCHD, addr);
705 esiop_script_sync(sc,
706 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
707 if (esiop_cmd) {
708 esiop_cmd->cmd_c.status = CMDST_DONE;
709 xs->error = XS_SELTIMEOUT;
710 freetarget = 1;
711 goto end;
712 } else {
713 printf("%s: selection timeout without "
714 "command, target %d (sdid 0x%x), "
715 "slot %d\n",
716 sc->sc_c.sc_dev.dv_xname, target,
717 bus_space_read_1(sc->sc_c.sc_rt,
718 sc->sc_c.sc_rh, SIOP_SDID), slot);
719 need_reset = 1;
720 }
721 }
722 if (sist & SIST0_UDC) {
723 /*
724 * unexpected disconnect. Usually the target signals
725 * a fatal condition this way. Attempt to get sense.
726 */
727 if (esiop_cmd) {
728 esiop_cmd->cmd_tables->status =
729 htole32(SCSI_CHECK);
730 goto end;
731 }
732 printf("%s: unexpected disconnect without "
733 "command\n", sc->sc_c.sc_dev.dv_xname);
734 goto reset;
735 }
736 if (sist & (SIST1_SBMC << 8)) {
737 /* SCSI bus mode change */
738 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
739 goto reset;
740 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
741 /*
742 * we have a script interrupt, it will
743 * restart the script.
744 */
745 goto scintr;
746 }
747 /*
748 * else we have to restart it ourselve, at the
749 * interrupted instruction.
750 */
751 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
752 SIOP_DSP,
753 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
754 SIOP_DSP) - 8);
755 return 1;
756 }
757 /* Else it's an unhandled exception (for now). */
758 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
759 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
760 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
761 SIOP_SSTAT1),
762 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
763 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
764 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
765 if (esiop_cmd) {
766 esiop_cmd->cmd_c.status = CMDST_DONE;
767 xs->error = XS_SELTIMEOUT;
768 goto end;
769 }
770 need_reset = 1;
771 }
772 if (need_reset) {
773 reset:
774 /* fatal error, reset the bus */
775 siop_resetbus(&sc->sc_c);
776 /* no table to flush here */
777 return 1;
778 }
779
780 scintr:
781 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
782 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
783 SIOP_DSPS);
784 #ifdef SIOP_DEBUG_INTR
785 printf("script interrupt 0x%x\n", irqcode);
786 #endif
787 /*
788 * no command, or an inactive command is only valid for a
789 * reselect interrupt
790 */
791 if ((irqcode & 0x80) == 0) {
792 if (esiop_cmd == NULL) {
793 printf(
794 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
795 sc->sc_c.sc_dev.dv_xname, irqcode);
796 goto reset;
797 }
798 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
799 printf("%s: command with invalid status "
800 "(IRQ code 0x%x current status %d) !\n",
801 sc->sc_c.sc_dev.dv_xname,
802 irqcode, esiop_cmd->cmd_c.status);
803 xs = NULL;
804 }
805 }
806 switch(irqcode) {
807 case A_int_err:
808 printf("error, DSP=0x%x\n",
809 (int)(bus_space_read_4(sc->sc_c.sc_rt,
810 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
811 if (xs) {
812 xs->error = XS_SELTIMEOUT;
813 goto end;
814 } else {
815 goto reset;
816 }
817 case A_int_msgin:
818 {
819 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
820 sc->sc_c.sc_rh, SIOP_SFBR);
821 if (msgin == MSG_MESSAGE_REJECT) {
822 int msg, extmsg;
823 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
824 /*
825 * message was part of a identify +
826 * something else. Identify shouldn't
827 * have been rejected.
828 */
829 msg =
830 esiop_cmd->cmd_tables->msg_out[1];
831 extmsg =
832 esiop_cmd->cmd_tables->msg_out[3];
833 } else {
834 msg =
835 esiop_cmd->cmd_tables->msg_out[0];
836 extmsg =
837 esiop_cmd->cmd_tables->msg_out[2];
838 }
839 if (msg == MSG_MESSAGE_REJECT) {
840 /* MSG_REJECT for a MSG_REJECT !*/
841 if (xs)
842 scsipi_printaddr(xs->xs_periph);
843 else
844 printf("%s: ",
845 sc->sc_c.sc_dev.dv_xname);
846 printf("our reject message was "
847 "rejected\n");
848 goto reset;
849 }
850 if (msg == MSG_EXTENDED &&
851 extmsg == MSG_EXT_WDTR) {
852 /* WDTR rejected, initiate sync */
853 if ((esiop_target->target_c.flags &
854 TARF_SYNC) == 0) {
855 esiop_target->target_c.status =
856 TARST_OK;
857 siop_update_xfer_mode(&sc->sc_c,
858 target);
859 /* no table to flush here */
860 CALL_SCRIPT(Ent_msgin_ack);
861 return 1;
862 }
863 esiop_target->target_c.status =
864 TARST_SYNC_NEG;
865 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
866 sc->sc_c.st_minsync,
867 sc->sc_c.maxoff);
868 esiop_table_sync(esiop_cmd,
869 BUS_DMASYNC_PREREAD |
870 BUS_DMASYNC_PREWRITE);
871 CALL_SCRIPT(Ent_send_msgout);
872 return 1;
873 } else if (msg == MSG_EXTENDED &&
874 extmsg == MSG_EXT_SDTR) {
875 /* sync rejected */
876 esiop_target->target_c.offset = 0;
877 esiop_target->target_c.period = 0;
878 esiop_target->target_c.status =
879 TARST_OK;
880 siop_update_xfer_mode(&sc->sc_c,
881 target);
882 /* no table to flush here */
883 CALL_SCRIPT(Ent_msgin_ack);
884 return 1;
885 } else if (msg == MSG_EXTENDED &&
886 extmsg == MSG_EXT_PPR) {
887 /* PPR rejected */
888 esiop_target->target_c.offset = 0;
889 esiop_target->target_c.period = 0;
890 esiop_target->target_c.status =
891 TARST_OK;
892 siop_update_xfer_mode(&sc->sc_c,
893 target);
894 /* no table to flush here */
895 CALL_SCRIPT(Ent_msgin_ack);
896 return 1;
897 } else if (msg == MSG_SIMPLE_Q_TAG ||
898 msg == MSG_HEAD_OF_Q_TAG ||
899 msg == MSG_ORDERED_Q_TAG) {
900 if (esiop_handle_qtag_reject(
901 esiop_cmd) == -1)
902 goto reset;
903 CALL_SCRIPT(Ent_msgin_ack);
904 return 1;
905 }
906 if (xs)
907 scsipi_printaddr(xs->xs_periph);
908 else
909 printf("%s: ",
910 sc->sc_c.sc_dev.dv_xname);
911 if (msg == MSG_EXTENDED) {
912 printf("scsi message reject, extended "
913 "message sent was 0x%x\n", extmsg);
914 } else {
915 printf("scsi message reject, message "
916 "sent was 0x%x\n", msg);
917 }
918 /* no table to flush here */
919 CALL_SCRIPT(Ent_msgin_ack);
920 return 1;
921 }
922 if (msgin == MSG_IGN_WIDE_RESIDUE) {
923 /* use the extmsgdata table to get the second byte */
924 esiop_cmd->cmd_tables->t_extmsgdata.count =
925 htole32(1);
926 esiop_table_sync(esiop_cmd,
927 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
928 CALL_SCRIPT(Ent_get_extmsgdata);
929 return 1;
930 }
931 if (xs)
932 scsipi_printaddr(xs->xs_periph);
933 else
934 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
935 printf("unhandled message 0x%x\n", msgin);
936 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
937 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
938 esiop_table_sync(esiop_cmd,
939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
940 CALL_SCRIPT(Ent_send_msgout);
941 return 1;
942 }
943 case A_int_extmsgin:
944 #ifdef SIOP_DEBUG_INTR
945 printf("extended message: msg 0x%x len %d\n",
946 esiop_cmd->cmd_tables->msg_in[2],
947 esiop_cmd->cmd_tables->msg_in[1]);
948 #endif
949 if (esiop_cmd->cmd_tables->msg_in[1] >
950 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
951 printf("%s: extended message too big (%d)\n",
952 sc->sc_c.sc_dev.dv_xname,
953 esiop_cmd->cmd_tables->msg_in[1]);
954 esiop_cmd->cmd_tables->t_extmsgdata.count =
955 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
956 esiop_table_sync(esiop_cmd,
957 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
958 CALL_SCRIPT(Ent_get_extmsgdata);
959 return 1;
960 case A_int_extmsgdata:
961 #ifdef SIOP_DEBUG_INTR
962 {
963 int i;
964 printf("extended message: 0x%x, data:",
965 esiop_cmd->cmd_tables->msg_in[2]);
966 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
967 i++)
968 printf(" 0x%x",
969 esiop_cmd->cmd_tables->msg_in[i]);
970 printf("\n");
971 }
972 #endif
973 if (esiop_cmd->cmd_tables->msg_in[0] ==
974 MSG_IGN_WIDE_RESIDUE) {
975 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
976 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
977 printf("MSG_IGN_WIDE_RESIDUE: "
978 "bad len %d\n",
979 esiop_cmd->cmd_tables->msg_in[3]);
980 switch (siop_iwr(&esiop_cmd->cmd_c)) {
981 case SIOP_NEG_MSGOUT:
982 esiop_table_sync(esiop_cmd,
983 BUS_DMASYNC_PREREAD |
984 BUS_DMASYNC_PREWRITE);
985 CALL_SCRIPT(Ent_send_msgout);
986 return 1;
987 case SIOP_NEG_ACK:
988 CALL_SCRIPT(Ent_msgin_ack);
989 return 1;
990 default:
991 panic("invalid retval from "
992 "siop_iwr()");
993 }
994 return 1;
995 }
996 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
997 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
998 case SIOP_NEG_MSGOUT:
999 esiop_update_scntl3(sc,
1000 esiop_cmd->cmd_c.siop_target);
1001 esiop_table_sync(esiop_cmd,
1002 BUS_DMASYNC_PREREAD |
1003 BUS_DMASYNC_PREWRITE);
1004 CALL_SCRIPT(Ent_send_msgout);
1005 return 1;
1006 case SIOP_NEG_ACK:
1007 esiop_update_scntl3(sc,
1008 esiop_cmd->cmd_c.siop_target);
1009 CALL_SCRIPT(Ent_msgin_ack);
1010 return 1;
1011 default:
1012 panic("invalid retval from "
1013 "siop_wdtr_neg()");
1014 }
1015 return 1;
1016 }
1017 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1018 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1019 case SIOP_NEG_MSGOUT:
1020 esiop_update_scntl3(sc,
1021 esiop_cmd->cmd_c.siop_target);
1022 esiop_table_sync(esiop_cmd,
1023 BUS_DMASYNC_PREREAD |
1024 BUS_DMASYNC_PREWRITE);
1025 CALL_SCRIPT(Ent_send_msgout);
1026 return 1;
1027 case SIOP_NEG_ACK:
1028 esiop_update_scntl3(sc,
1029 esiop_cmd->cmd_c.siop_target);
1030 CALL_SCRIPT(Ent_msgin_ack);
1031 return 1;
1032 default:
1033 panic("invalid retval from "
1034 "siop_wdtr_neg()");
1035 }
1036 return 1;
1037 }
1038 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1039 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1040 case SIOP_NEG_MSGOUT:
1041 esiop_update_scntl3(sc,
1042 esiop_cmd->cmd_c.siop_target);
1043 esiop_table_sync(esiop_cmd,
1044 BUS_DMASYNC_PREREAD |
1045 BUS_DMASYNC_PREWRITE);
1046 CALL_SCRIPT(Ent_send_msgout);
1047 return 1;
1048 case SIOP_NEG_ACK:
1049 esiop_update_scntl3(sc,
1050 esiop_cmd->cmd_c.siop_target);
1051 CALL_SCRIPT(Ent_msgin_ack);
1052 return 1;
1053 default:
1054 panic("invalid retval from "
1055 "siop_wdtr_neg()");
1056 }
1057 return 1;
1058 }
1059 /* send a message reject */
1060 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1061 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1062 esiop_table_sync(esiop_cmd,
1063 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1064 CALL_SCRIPT(Ent_send_msgout);
1065 return 1;
1066 case A_int_disc:
1067 INCSTAT(esiop_stat_intr_sdp);
1068 offset = bus_space_read_1(sc->sc_c.sc_rt,
1069 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1070 #ifdef SIOP_DEBUG_DR
1071 printf("disconnect offset %d\n", offset);
1072 #endif
1073 siop_sdp(&esiop_cmd->cmd_c, offset);
1074 esiop_table_sync(esiop_cmd,
1075 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1076 CALL_SCRIPT(Ent_script_sched);
1077 return 1;
1078 case A_int_resfail:
1079 printf("reselect failed\n");
1080 CALL_SCRIPT(Ent_script_sched);
1081 return 1;
1082 case A_int_done:
1083 if (xs == NULL) {
1084 printf("%s: done without command\n",
1085 sc->sc_c.sc_dev.dv_xname);
1086 CALL_SCRIPT(Ent_script_sched);
1087 return 1;
1088 }
1089 #ifdef SIOP_DEBUG_INTR
1090 printf("done, DSA=0x%lx target id 0x%x last msg "
1091 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1092 le32toh(esiop_cmd->cmd_tables->id),
1093 esiop_cmd->cmd_tables->msg_in[0],
1094 le32toh(esiop_cmd->cmd_tables->status));
1095 #endif
1096 INCSTAT(esiop_stat_intr_done);
1097 esiop_cmd->cmd_c.status = CMDST_DONE;
1098 goto end;
1099 default:
1100 printf("unknown irqcode %x\n", irqcode);
1101 if (xs) {
1102 xs->error = XS_SELTIMEOUT;
1103 goto end;
1104 }
1105 goto reset;
1106 }
1107 return 1;
1108 }
1109 /* We just should't get there */
1110 panic("siop_intr: I shouldn't be there !");
1111
1112 end:
1113 /*
1114 * restart the script now if command completed properly
1115 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1116 * queue
1117 */
1118 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1119 #ifdef SIOP_DEBUG_INTR
1120 printf("esiop_intr end: status %d\n", xs->status);
1121 #endif
1122 if (tag >= 0)
1123 esiop_lun->tactive[tag] = NULL;
1124 else
1125 esiop_lun->active = NULL;
1126 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1127 SIOP_SCRATCHA + 1);
1128 esiop_scsicmd_end(esiop_cmd, offset);
1129 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1130 esiop_del_dev(sc, target, lun);
1131 CALL_SCRIPT(Ent_script_sched);
1132 return 1;
1133 }
1134
1135 void
1136 esiop_scsicmd_end(esiop_cmd, offset)
1137 struct esiop_cmd *esiop_cmd;
1138 int offset;
1139 {
1140 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1141 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1142
1143 siop_update_resid(&esiop_cmd->cmd_c, offset);
1144
1145 switch(xs->status) {
1146 case SCSI_OK:
1147 xs->error = XS_NOERROR;
1148 break;
1149 case SCSI_BUSY:
1150 xs->error = XS_BUSY;
1151 break;
1152 case SCSI_CHECK:
1153 xs->error = XS_BUSY;
1154 /* remove commands in the queue and scheduler */
1155 esiop_unqueue(sc, xs->xs_periph->periph_target,
1156 xs->xs_periph->periph_lun);
1157 break;
1158 case SCSI_QUEUE_FULL:
1159 INCSTAT(esiop_stat_intr_qfull);
1160 #ifdef SIOP_DEBUG
1161 printf("%s:%d:%d: queue full (tag %d)\n",
1162 sc->sc_c.sc_dev.dv_xname,
1163 xs->xs_periph->periph_target,
1164 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1165 #endif
1166 xs->error = XS_BUSY;
1167 break;
1168 case SCSI_SIOP_NOCHECK:
1169 /*
1170 * don't check status, xs->error is already valid
1171 */
1172 break;
1173 case SCSI_SIOP_NOSTATUS:
1174 /*
1175 * the status byte was not updated, cmd was
1176 * aborted
1177 */
1178 xs->error = XS_SELTIMEOUT;
1179 break;
1180 default:
1181 scsipi_printaddr(xs->xs_periph);
1182 printf("invalid status code %d\n", xs->status);
1183 xs->error = XS_DRIVER_STUFFUP;
1184 }
1185 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1186 bus_dmamap_sync(sc->sc_c.sc_dmat,
1187 esiop_cmd->cmd_c.dmamap_data, 0,
1188 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1189 (xs->xs_control & XS_CTL_DATA_IN) ?
1190 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1191 bus_dmamap_unload(sc->sc_c.sc_dmat,
1192 esiop_cmd->cmd_c.dmamap_data);
1193 }
1194 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1195 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1196 esiop_cmd->cmd_c.status = CMDST_FREE;
1197 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1198 #if 0
1199 if (xs->resid != 0)
1200 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1201 #endif
1202 scsipi_done (xs);
1203 }
1204
1205 void
1206 esiop_checkdone(sc)
1207 struct esiop_softc *sc;
1208 {
1209 int target, lun, tag;
1210 struct esiop_target *esiop_target;
1211 struct esiop_lun *esiop_lun;
1212 struct esiop_cmd *esiop_cmd;
1213 u_int32_t slot;
1214 int needsync = 0;
1215 int status;
1216 u_int32_t sem;
1217
1218 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1219 sem = esiop_script_read(sc, sc->sc_semoffset);
1220 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1221 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1222 /*
1223 * at last one command have been started,
1224 * so we should have free slots now
1225 */
1226 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1227 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1228 }
1229 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1230
1231 if ((sem & A_sem_done) == 0) {
1232 /* no pending done command */
1233 return;
1234 }
1235
1236 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1237 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1238 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1239 next:
1240 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1241 if (needsync)
1242 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1243 sc->sc_done_offset,
1244 A_ndone_slots * sizeof(u_int32_t),
1245 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1246 return;
1247 }
1248
1249 needsync = 1;
1250
1251 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1252 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1253 sc->sc_currdoneslot += 1;
1254 if (sc->sc_currdoneslot == A_ndone_slots)
1255 sc->sc_currdoneslot = 0;
1256
1257 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1258 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1259 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1260
1261 esiop_target = (target >= 0) ?
1262 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1263 if (esiop_target == NULL) {
1264 printf("esiop_target (target %d) not valid\n", target);
1265 goto next;
1266 }
1267 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1268 if (esiop_lun == NULL) {
1269 printf("esiop_lun (target %d lun %d) not valid\n",
1270 target, lun);
1271 goto next;
1272 }
1273 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1274 if (esiop_cmd == NULL) {
1275 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1276 target, lun, tag);
1277 goto next;
1278 }
1279
1280 esiop_table_sync(esiop_cmd,
1281 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1282 status = le32toh(esiop_cmd->cmd_tables->status);
1283 #ifdef DIAGNOSTIC
1284 if (status != SCSI_OK) {
1285 printf("command for T/L/Q %d/%d/%d status %d\n",
1286 target, lun, tag, status);
1287 goto next;
1288 }
1289
1290 #endif
1291 /* Ok, this command has been handled */
1292 esiop_cmd->cmd_c.xs->status = status;
1293 if (tag >= 0)
1294 esiop_lun->tactive[tag] = NULL;
1295 else
1296 esiop_lun->active = NULL;
1297 /* scratcha was saved in tlq by script. fetch offset from it */
1298 esiop_scsicmd_end(esiop_cmd,
1299 (le32toh(((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq) >> 8)
1300 & 0xff);
1301 goto next;
1302 }
1303
1304 void
1305 esiop_unqueue(sc, target, lun)
1306 struct esiop_softc *sc;
1307 int target;
1308 int lun;
1309 {
1310 int slot, tag;
1311 u_int32_t slotdsa;
1312 struct esiop_cmd *esiop_cmd;
1313 struct esiop_lun *esiop_lun =
1314 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1315
1316 /* first make sure to read valid data */
1317 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1318
1319 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1320 /* look for commands in the scheduler, not yet started */
1321 if (esiop_lun->tactive[tag] == NULL)
1322 continue;
1323 esiop_cmd = esiop_lun->tactive[tag];
1324 for (slot = 0; slot < A_ncmd_slots; slot++) {
1325 slotdsa = esiop_script_read(sc,
1326 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1327 /* if the slot has any flag, it won't match the DSA */
1328 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1329 /* Mark this slot as ignore */
1330 esiop_script_write(sc,
1331 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1332 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1333 /* ask to requeue */
1334 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1335 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1336 esiop_lun->tactive[tag] = NULL;
1337 esiop_scsicmd_end(esiop_cmd, 0);
1338 break;
1339 }
1340 }
1341 }
1342 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1343 }
1344
1345 /*
1346 * handle a rejected queue tag message: the command will run untagged,
1347 * has to adjust the reselect script.
1348 */
1349
1350
1351 int
1352 esiop_handle_qtag_reject(esiop_cmd)
1353 struct esiop_cmd *esiop_cmd;
1354 {
1355 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1356 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1357 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1358 int tag = esiop_cmd->cmd_tables->msg_out[2];
1359 struct esiop_target *esiop_target =
1360 (struct esiop_target*)sc->sc_c.targets[target];
1361 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1362
1363 #ifdef SIOP_DEBUG
1364 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1365 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1366 esiop_cmd->cmd_c.status);
1367 #endif
1368
1369 if (esiop_lun->active != NULL) {
1370 printf("%s: untagged command already running for target %d "
1371 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1372 target, lun, esiop_lun->active->cmd_c.status);
1373 return -1;
1374 }
1375 /* clear tag slot */
1376 esiop_lun->tactive[tag] = NULL;
1377 /* add command to non-tagged slot */
1378 esiop_lun->active = esiop_cmd;
1379 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1380 esiop_cmd->cmd_c.tag = -1;
1381 /* update DSA table */
1382 esiop_script_write(sc, esiop_target->lun_table_offset +
1383 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1384 esiop_cmd->cmd_c.dsa);
1385 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1386 return 0;
1387 }
1388
1389 /*
1390 * handle a bus reset: reset chip, unqueue all active commands, free all
1391 * target struct and report lossage to upper layer.
1392 * As the upper layer may requeue immediatly we have to first store
1393 * all active commands in a temporary queue.
1394 */
1395 void
1396 esiop_handle_reset(sc)
1397 struct esiop_softc *sc;
1398 {
1399 struct esiop_cmd *esiop_cmd;
1400 struct esiop_lun *esiop_lun;
1401 int target, lun, tag;
1402 /*
1403 * scsi bus reset. reset the chip and restart
1404 * the queue. Need to clean up all active commands
1405 */
1406 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1407 /* stop, reset and restart the chip */
1408 esiop_reset(sc);
1409
1410 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1411 /* chip has been reset, all slots are free now */
1412 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1413 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1414 }
1415 /*
1416 * Process all commands: first commands completes, then commands
1417 * being executed
1418 */
1419 esiop_checkdone(sc);
1420 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1421 target++) {
1422 struct esiop_target *esiop_target =
1423 (struct esiop_target *)sc->sc_c.targets[target];
1424 if (esiop_target == NULL)
1425 continue;
1426 for (lun = 0; lun < 8; lun++) {
1427 esiop_lun = esiop_target->esiop_lun[lun];
1428 if (esiop_lun == NULL)
1429 continue;
1430 for (tag = -1; tag <
1431 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1432 ESIOP_NTAG : 0);
1433 tag++) {
1434 if (tag >= 0)
1435 esiop_cmd = esiop_lun->tactive[tag];
1436 else
1437 esiop_cmd = esiop_lun->active;
1438 if (esiop_cmd == NULL)
1439 continue;
1440 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1441 printf("command with tag id %d reset\n", tag);
1442 esiop_cmd->cmd_c.xs->error =
1443 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1444 XS_TIMEOUT : XS_RESET;
1445 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1446 if (tag >= 0)
1447 esiop_lun->tactive[tag] = NULL;
1448 else
1449 esiop_lun->active = NULL;
1450 esiop_cmd->cmd_c.status = CMDST_DONE;
1451 esiop_scsicmd_end(esiop_cmd, 0);
1452 }
1453 }
1454 sc->sc_c.targets[target]->status = TARST_ASYNC;
1455 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1456 sc->sc_c.targets[target]->period =
1457 sc->sc_c.targets[target]->offset = 0;
1458 siop_update_xfer_mode(&sc->sc_c, target);
1459 }
1460
1461 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1462 }
1463
1464 void
1465 esiop_scsipi_request(chan, req, arg)
1466 struct scsipi_channel *chan;
1467 scsipi_adapter_req_t req;
1468 void *arg;
1469 {
1470 struct scsipi_xfer *xs;
1471 struct scsipi_periph *periph;
1472 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1473 struct esiop_cmd *esiop_cmd;
1474 struct esiop_target *esiop_target;
1475 int s, error, i;
1476 int target;
1477 int lun;
1478
1479 switch (req) {
1480 case ADAPTER_REQ_RUN_XFER:
1481 xs = arg;
1482 periph = xs->xs_periph;
1483 target = periph->periph_target;
1484 lun = periph->periph_lun;
1485
1486 s = splbio();
1487 /*
1488 * first check if there are pending complete commands.
1489 * this can free us some resources (in the rings for example).
1490 * we have to lock it to avoid recursion.
1491 */
1492 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1493 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1494 esiop_checkdone(sc);
1495 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1496 }
1497 #ifdef SIOP_DEBUG_SCHED
1498 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1499 xs->xs_tag_type, xs->xs_tag_id);
1500 #endif
1501 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1502 if (esiop_cmd == NULL) {
1503 xs->error = XS_RESOURCE_SHORTAGE;
1504 scsipi_done(xs);
1505 splx(s);
1506 return;
1507 }
1508 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1509 #ifdef DIAGNOSTIC
1510 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1511 panic("siop_scsicmd: new cmd not free");
1512 #endif
1513 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1514 if (esiop_target == NULL) {
1515 #ifdef SIOP_DEBUG
1516 printf("%s: alloc siop_target for target %d\n",
1517 sc->sc_c.sc_dev.dv_xname, target);
1518 #endif
1519 sc->sc_c.targets[target] =
1520 malloc(sizeof(struct esiop_target),
1521 M_DEVBUF, M_NOWAIT | M_ZERO);
1522 if (sc->sc_c.targets[target] == NULL) {
1523 printf("%s: can't malloc memory for "
1524 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1525 target);
1526 xs->error = XS_RESOURCE_SHORTAGE;
1527 scsipi_done(xs);
1528 splx(s);
1529 return;
1530 }
1531 esiop_target =
1532 (struct esiop_target*)sc->sc_c.targets[target];
1533 esiop_target->target_c.status = TARST_PROBING;
1534 esiop_target->target_c.flags = 0;
1535 esiop_target->target_c.id =
1536 sc->sc_c.clock_div << 24; /* scntl3 */
1537 esiop_target->target_c.id |= target << 16; /* id */
1538 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1539
1540 for (i=0; i < 8; i++)
1541 esiop_target->esiop_lun[i] = NULL;
1542 esiop_target_register(sc, target);
1543 }
1544 if (esiop_target->esiop_lun[lun] == NULL) {
1545 esiop_target->esiop_lun[lun] =
1546 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1547 M_NOWAIT|M_ZERO);
1548 if (esiop_target->esiop_lun[lun] == NULL) {
1549 printf("%s: can't alloc esiop_lun for "
1550 "target %d lun %d\n",
1551 sc->sc_c.sc_dev.dv_xname, target, lun);
1552 xs->error = XS_RESOURCE_SHORTAGE;
1553 scsipi_done(xs);
1554 splx(s);
1555 return;
1556 }
1557 }
1558 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1559 esiop_cmd->cmd_c.xs = xs;
1560 esiop_cmd->cmd_c.flags = 0;
1561 esiop_cmd->cmd_c.status = CMDST_READY;
1562
1563 /* load the DMA maps */
1564 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1565 esiop_cmd->cmd_c.dmamap_cmd,
1566 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1567 if (error) {
1568 printf("%s: unable to load cmd DMA map: %d\n",
1569 sc->sc_c.sc_dev.dv_xname, error);
1570 xs->error = XS_DRIVER_STUFFUP;
1571 scsipi_done(xs);
1572 splx(s);
1573 return;
1574 }
1575 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1576 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1577 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1578 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1579 ((xs->xs_control & XS_CTL_DATA_IN) ?
1580 BUS_DMA_READ : BUS_DMA_WRITE));
1581 if (error) {
1582 printf("%s: unable to load cmd DMA map: %d",
1583 sc->sc_c.sc_dev.dv_xname, error);
1584 xs->error = XS_DRIVER_STUFFUP;
1585 scsipi_done(xs);
1586 bus_dmamap_unload(sc->sc_c.sc_dmat,
1587 esiop_cmd->cmd_c.dmamap_cmd);
1588 splx(s);
1589 return;
1590 }
1591 bus_dmamap_sync(sc->sc_c.sc_dmat,
1592 esiop_cmd->cmd_c.dmamap_data, 0,
1593 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1594 (xs->xs_control & XS_CTL_DATA_IN) ?
1595 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1596 }
1597 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1598 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1599 BUS_DMASYNC_PREWRITE);
1600
1601 if (xs->xs_tag_type)
1602 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1603 else
1604 esiop_cmd->cmd_c.tag = -1;
1605 siop_setuptables(&esiop_cmd->cmd_c);
1606 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq =
1607 htole32(A_f_c_target | A_f_c_lun);
1608 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1609 htole32((target << 8) | (lun << 16));
1610 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1611 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1612 htole32(A_f_c_tag);
1613 ((struct esiop_xfer *)esiop_cmd->cmd_tables)->tlq |=
1614 htole32(esiop_cmd->cmd_c.tag << 24);
1615 }
1616
1617 esiop_table_sync(esiop_cmd,
1618 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1619 esiop_start(sc, esiop_cmd);
1620 if (xs->xs_control & XS_CTL_POLL) {
1621 /* poll for command completion */
1622 while ((xs->xs_status & XS_STS_DONE) == 0) {
1623 delay(1000);
1624 esiop_intr(sc);
1625 }
1626 }
1627 splx(s);
1628 return;
1629
1630 case ADAPTER_REQ_GROW_RESOURCES:
1631 #ifdef SIOP_DEBUG
1632 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1633 sc->sc_c.sc_adapt.adapt_openings);
1634 #endif
1635 esiop_morecbd(sc);
1636 return;
1637
1638 case ADAPTER_REQ_SET_XFER_MODE:
1639 {
1640 struct scsipi_xfer_mode *xm = arg;
1641 if (sc->sc_c.targets[xm->xm_target] == NULL)
1642 return;
1643 s = splbio();
1644 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
1645 (sc->sc_c.targets[xm->xm_target]->flags & TARF_TAG) == 0) {
1646 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1647 /* allocate tag tables for this device */
1648 for (lun = 0;
1649 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1650 if (scsipi_lookup_periph(chan,
1651 xm->xm_target, lun) != NULL)
1652 esiop_add_dev(sc, xm->xm_target, lun);
1653 }
1654 }
1655 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1656 (sc->sc_c.features & SF_BUS_WIDE))
1657 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1658 if (xm->xm_mode & PERIPH_CAP_SYNC)
1659 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1660 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1661 (sc->sc_c.features & SF_CHIP_DT))
1662 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1663 if ((xm->xm_mode &
1664 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1665 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1666 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1667
1668 splx(s);
1669 }
1670 }
1671 }
1672
1673 static void
1674 esiop_start(sc, esiop_cmd)
1675 struct esiop_softc *sc;
1676 struct esiop_cmd *esiop_cmd;
1677 {
1678 struct esiop_lun *esiop_lun;
1679 struct esiop_target *esiop_target;
1680 int timeout;
1681 int target, lun, slot;
1682
1683 /*
1684 * first make sure to read valid data
1685 */
1686 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1687
1688 /*
1689 * We use a circular queue here. sc->sc_currschedslot points to a
1690 * free slot, unless we have filled the queue. Check this.
1691 */
1692 slot = sc->sc_currschedslot;
1693 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1694 A_f_cmd_free) == 0) {
1695 /*
1696 * no more free slot, no need to continue. freeze the queue
1697 * and requeue this command.
1698 */
1699 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1700 sc->sc_flags |= SCF_CHAN_NOSLOT;
1701 esiop_script_write(sc, sc->sc_semoffset,
1702 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1703 esiop_script_sync(sc,
1704 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1706 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1707 esiop_scsicmd_end(esiop_cmd, 0);
1708 return;
1709 }
1710 /* OK, we can use this slot */
1711
1712 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1713 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1714 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1715 esiop_lun = esiop_target->esiop_lun[lun];
1716 /* if non-tagged command active, panic: this shouldn't happen */
1717 if (esiop_lun->active != NULL) {
1718 panic("esiop_start: tagged cmd while untagged running");
1719 }
1720 #ifdef DIAGNOSTIC
1721 /* sanity check the tag if needed */
1722 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1723 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1724 panic("esiop_start: tag not free");
1725 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1726 esiop_cmd->cmd_c.tag < 0) {
1727 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1728 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1729 panic("esiop_start: invalid tag id");
1730 }
1731 }
1732 #endif
1733 #ifdef SIOP_DEBUG_SCHED
1734 printf("using slot %d for DSA 0x%lx\n", slot,
1735 (u_long)esiop_cmd->cmd_c.dsa);
1736 #endif
1737 /* mark command as active */
1738 if (esiop_cmd->cmd_c.status == CMDST_READY)
1739 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1740 else
1741 panic("esiop_start: bad status");
1742 /* DSA table for reselect */
1743 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1744 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1745 /* DSA table for reselect */
1746 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1747 htole32(esiop_cmd->cmd_c.dsa);
1748 bus_dmamap_sync(sc->sc_c.sc_dmat,
1749 esiop_lun->lun_tagtbl->tblblk->blkmap,
1750 esiop_lun->lun_tagtbl->tbl_offset,
1751 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1752 } else {
1753 esiop_lun->active = esiop_cmd;
1754 esiop_script_write(sc,
1755 esiop_target->lun_table_offset +
1756 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1757 esiop_cmd->cmd_c.dsa);
1758 }
1759 /* scheduler slot: DSA */
1760 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1761 esiop_cmd->cmd_c.dsa);
1762 /* make sure SCRIPT processor will read valid data */
1763 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1764 /* handle timeout */
1765 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1766 /* start exire timer */
1767 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1768 if (timeout == 0)
1769 timeout = 1;
1770 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1771 timeout, esiop_timeout, esiop_cmd);
1772 }
1773 /* Signal script it has some work to do */
1774 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1775 SIOP_ISTAT, ISTAT_SIGP);
1776 /* update the current slot, and wait for IRQ */
1777 sc->sc_currschedslot++;
1778 if (sc->sc_currschedslot >= A_ncmd_slots)
1779 sc->sc_currschedslot = 0;
1780 return;
1781 }
1782
1783 void
1784 esiop_timeout(v)
1785 void *v;
1786 {
1787 struct esiop_cmd *esiop_cmd = v;
1788 struct esiop_softc *sc =
1789 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1790 int s;
1791 #ifdef SIOP_DEBUG
1792 int slot, slotdsa;
1793 #endif
1794
1795 s = splbio();
1796 esiop_table_sync(esiop_cmd,
1797 BUS_DMASYNC_POSTREAD |
1798 BUS_DMASYNC_POSTWRITE);
1799 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1800 #ifdef SIOP_DEBUG
1801 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1802
1803 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1804 for (slot = 0; slot < A_ncmd_slots; slot++) {
1805 slotdsa = esiop_script_read(sc,
1806 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1807 if ((slotdsa & 0x01) == 0)
1808 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1809 }
1810 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1811 printf("DSP 0x%lx DSA 0x%x\n",
1812 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1813 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1814 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1815 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1816 #else
1817 printf("command timeout, CDB: ");
1818 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1819 printf("\n");
1820 #endif
1821 /* reset the scsi bus */
1822 siop_resetbus(&sc->sc_c);
1823
1824 /* deactivate callout */
1825 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1826 /*
1827 * mark command has being timed out and just return;
1828 * the bus reset will generate an interrupt,
1829 * it will be handled in siop_intr()
1830 */
1831 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1832 splx(s);
1833 return;
1834
1835 }
1836
1837 void
1838 esiop_dump_script(sc)
1839 struct esiop_softc *sc;
1840 {
1841 int i;
1842 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1843 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1844 le32toh(sc->sc_c.sc_script[i]),
1845 le32toh(sc->sc_c.sc_script[i+1]));
1846 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1847 0xc0000000) {
1848 i++;
1849 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1850 }
1851 printf("\n");
1852 }
1853 }
1854
1855 void
1856 esiop_morecbd(sc)
1857 struct esiop_softc *sc;
1858 {
1859 int error, i, s;
1860 bus_dma_segment_t seg;
1861 int rseg;
1862 struct esiop_cbd *newcbd;
1863 struct esiop_xfer *xfer;
1864 bus_addr_t dsa;
1865
1866 /* allocate a new list head */
1867 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1868 if (newcbd == NULL) {
1869 printf("%s: can't allocate memory for command descriptors "
1870 "head\n", sc->sc_c.sc_dev.dv_xname);
1871 return;
1872 }
1873
1874 /* allocate cmd list */
1875 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1876 M_DEVBUF, M_NOWAIT|M_ZERO);
1877 if (newcbd->cmds == NULL) {
1878 printf("%s: can't allocate memory for command descriptors\n",
1879 sc->sc_c.sc_dev.dv_xname);
1880 goto bad3;
1881 }
1882 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1883 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1884 if (error) {
1885 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1886 sc->sc_c.sc_dev.dv_xname, error);
1887 goto bad2;
1888 }
1889 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1890 (caddr_t *)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1891 if (error) {
1892 printf("%s: unable to map cbd DMA memory, error = %d\n",
1893 sc->sc_c.sc_dev.dv_xname, error);
1894 goto bad2;
1895 }
1896 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1897 BUS_DMA_NOWAIT, &newcbd->xferdma);
1898 if (error) {
1899 printf("%s: unable to create cbd DMA map, error = %d\n",
1900 sc->sc_c.sc_dev.dv_xname, error);
1901 goto bad1;
1902 }
1903 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1904 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1905 if (error) {
1906 printf("%s: unable to load cbd DMA map, error = %d\n",
1907 sc->sc_c.sc_dev.dv_xname, error);
1908 goto bad0;
1909 }
1910 #ifdef DEBUG
1911 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1912 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1913 #endif
1914 for (i = 0; i < SIOP_NCMDPB; i++) {
1915 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1916 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1917 &newcbd->cmds[i].cmd_c.dmamap_data);
1918 if (error) {
1919 printf("%s: unable to create data DMA map for cbd: "
1920 "error %d\n",
1921 sc->sc_c.sc_dev.dv_xname, error);
1922 goto bad0;
1923 }
1924 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1925 sizeof(struct scsipi_generic), 1,
1926 sizeof(struct scsipi_generic), 0,
1927 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1928 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1929 if (error) {
1930 printf("%s: unable to create cmd DMA map for cbd %d\n",
1931 sc->sc_c.sc_dev.dv_xname, error);
1932 goto bad0;
1933 }
1934 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1935 newcbd->cmds[i].esiop_cbdp = newcbd;
1936 xfer = &newcbd->xfers[i];
1937 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1938 memset(newcbd->cmds[i].cmd_tables, 0,
1939 sizeof(struct esiop_xfer));
1940 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1941 i * sizeof(struct esiop_xfer);
1942 newcbd->cmds[i].cmd_c.dsa = dsa;
1943 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1944 xfer->siop_tables.t_msgout.count= htole32(1);
1945 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1946 xfer->siop_tables.t_msgin.count= htole32(1);
1947 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1948 offsetof(struct siop_common_xfer, msg_in));
1949 xfer->siop_tables.t_extmsgin.count= htole32(2);
1950 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1951 offsetof(struct siop_common_xfer, msg_in) + 1);
1952 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1953 offsetof(struct siop_common_xfer, msg_in) + 3);
1954 xfer->siop_tables.t_status.count= htole32(1);
1955 xfer->siop_tables.t_status.addr = htole32(dsa +
1956 offsetof(struct siop_common_xfer, status));
1957
1958 s = splbio();
1959 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1960 splx(s);
1961 #ifdef SIOP_DEBUG
1962 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1963 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1964 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1965 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1966 #endif
1967 }
1968 s = splbio();
1969 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1970 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1971 splx(s);
1972 return;
1973 bad0:
1974 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1975 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1976 bad1:
1977 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1978 bad2:
1979 free(newcbd->cmds, M_DEVBUF);
1980 bad3:
1981 free(newcbd, M_DEVBUF);
1982 return;
1983 }
1984
1985 void
1986 esiop_moretagtbl(sc)
1987 struct esiop_softc *sc;
1988 {
1989 int error, i, j, s;
1990 bus_dma_segment_t seg;
1991 int rseg;
1992 struct esiop_dsatblblk *newtblblk;
1993 struct esiop_dsatbl *newtbls;
1994 u_int32_t *tbls;
1995
1996 /* allocate a new list head */
1997 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
1998 M_DEVBUF, M_NOWAIT|M_ZERO);
1999 if (newtblblk == NULL) {
2000 printf("%s: can't allocate memory for tag DSA table block\n",
2001 sc->sc_c.sc_dev.dv_xname);
2002 return;
2003 }
2004
2005 /* allocate tbl list */
2006 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2007 M_DEVBUF, M_NOWAIT|M_ZERO);
2008 if (newtbls == NULL) {
2009 printf("%s: can't allocate memory for command descriptors\n",
2010 sc->sc_c.sc_dev.dv_xname);
2011 goto bad3;
2012 }
2013 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2014 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2015 if (error) {
2016 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
2017 sc->sc_c.sc_dev.dv_xname, error);
2018 goto bad2;
2019 }
2020 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2021 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2022 if (error) {
2023 printf("%s: unable to map tbls DMA memory, error = %d\n",
2024 sc->sc_c.sc_dev.dv_xname, error);
2025 goto bad2;
2026 }
2027 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2028 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2029 if (error) {
2030 printf("%s: unable to create tbl DMA map, error = %d\n",
2031 sc->sc_c.sc_dev.dv_xname, error);
2032 goto bad1;
2033 }
2034 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2035 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2036 if (error) {
2037 printf("%s: unable to load tbl DMA map, error = %d\n",
2038 sc->sc_c.sc_dev.dv_xname, error);
2039 goto bad0;
2040 }
2041 #ifdef DEBUG
2042 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2043 sc->sc_c.sc_dev.dv_xname,
2044 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2045 #endif
2046 for (i = 0; i < ESIOP_NTPB; i++) {
2047 newtbls[i].tblblk = newtblblk;
2048 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2049 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2050 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2051 newtbls[i].tbl_offset;
2052 for (j = 0; j < ESIOP_NTAG; j++)
2053 newtbls[i].tbl[j] = j;
2054 s = splbio();
2055 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2056 splx(s);
2057 }
2058 s = splbio();
2059 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2060 splx(s);
2061 return;
2062 bad0:
2063 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2064 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2065 bad1:
2066 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2067 bad2:
2068 free(newtbls, M_DEVBUF);
2069 bad3:
2070 free(newtblblk, M_DEVBUF);
2071 return;
2072 }
2073
2074 void
2075 esiop_update_scntl3(sc, _siop_target)
2076 struct esiop_softc *sc;
2077 struct siop_common_target *_siop_target;
2078 {
2079 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2080 esiop_script_write(sc, esiop_target->lun_table_offset,
2081 esiop_target->target_c.id);
2082 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2083 }
2084
2085 void
2086 esiop_add_dev(sc, target, lun)
2087 struct esiop_softc *sc;
2088 int target;
2089 int lun;
2090 {
2091 struct esiop_target *esiop_target =
2092 (struct esiop_target *)sc->sc_c.targets[target];
2093 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2094
2095 /* we need a tag DSA table */
2096 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2097 if (esiop_lun->lun_tagtbl == NULL) {
2098 esiop_moretagtbl(sc);
2099 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2100 if (esiop_lun->lun_tagtbl == NULL) {
2101 /* no resources, run untagged */
2102 esiop_target->target_c.flags &= ~TARF_TAG;
2103 return;
2104 }
2105 }
2106 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2107 /* Update LUN DSA table */
2108 esiop_script_write(sc, esiop_target->lun_table_offset +
2109 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2110 esiop_lun->lun_tagtbl->tbl_dsa);
2111 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2112 }
2113
2114 void
2115 esiop_del_dev(sc, target, lun)
2116 struct esiop_softc *sc;
2117 int target;
2118 int lun;
2119 {
2120 struct esiop_target *esiop_target;
2121 #ifdef SIOP_DEBUG
2122 printf("%s:%d:%d: free lun sw entry\n",
2123 sc->sc_c.sc_dev.dv_xname, target, lun);
2124 #endif
2125 if (sc->sc_c.targets[target] == NULL)
2126 return;
2127 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2128 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2129 esiop_target->esiop_lun[lun] = NULL;
2130 }
2131
2132 void
2133 esiop_target_register(sc, target)
2134 struct esiop_softc *sc;
2135 u_int32_t target;
2136 {
2137 struct esiop_target *esiop_target =
2138 (struct esiop_target *)sc->sc_c.targets[target];
2139 struct esiop_lun *esiop_lun;
2140 int lun;
2141
2142 /* get a DSA table for this target */
2143 esiop_target->lun_table_offset = sc->sc_free_offset;
2144 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2145 #ifdef SIOP_DEBUG
2146 printf("%s: lun table for target %d offset %d free offset %d\n",
2147 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2148 sc->sc_free_offset);
2149 #endif
2150 /* first 32 bytes are ID (for select) */
2151 esiop_script_write(sc, esiop_target->lun_table_offset,
2152 esiop_target->target_c.id);
2153 /* Record this table in the target DSA table */
2154 esiop_script_write(sc,
2155 sc->sc_target_table_offset + target,
2156 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2157 sc->sc_c.sc_scriptaddr);
2158 /* if we have a tag table, register it */
2159 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2160 esiop_lun = esiop_target->esiop_lun[lun];
2161 if (esiop_lun == NULL)
2162 continue;
2163 if (esiop_lun->lun_tagtbl)
2164 esiop_script_write(sc, esiop_target->lun_table_offset +
2165 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2166 esiop_lun->lun_tagtbl->tbl_dsa);
2167 }
2168 esiop_script_sync(sc,
2169 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2170 }
2171
2172 #ifdef SIOP_STATS
2173 void
2174 esiop_printstats()
2175 {
2176 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2177 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2178 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2179 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2180 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2181 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2182 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2183 }
2184 #endif
2185