esiop.c revision 1.50 1 /* $NetBSD: esiop.c,v 1.50 2010/02/12 06:40:52 dholland Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.50 2010/02/12 06:40:52 dholland Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39
40 #include <uvm/uvm_extern.h>
41
42 #include <machine/endian.h>
43 #include <sys/bus.h>
44
45 #include <dev/microcode/siop/esiop.out>
46
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsi_message.h>
49 #include <dev/scsipi/scsipi_all.h>
50
51 #include <dev/scsipi/scsiconf.h>
52
53 #include <dev/ic/siopreg.h>
54 #include <dev/ic/siopvar_common.h>
55 #include <dev/ic/esiopvar.h>
56
57 #include "opt_siop.h"
58
59 #ifndef DEBUG
60 #undef DEBUG
61 #endif
62 /*
63 #define SIOP_DEBUG
64 #define SIOP_DEBUG_DR
65 #define SIOP_DEBUG_INTR
66 #define SIOP_DEBUG_SCHED
67 #define DUMP_SCRIPT
68 */
69
70 #define SIOP_STATS
71
72 #ifndef SIOP_DEFAULT_TARGET
73 #define SIOP_DEFAULT_TARGET 7
74 #endif
75
76 /* number of cmd descriptors per block */
77 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
78
79 void esiop_reset(struct esiop_softc *);
80 void esiop_checkdone(struct esiop_softc *);
81 void esiop_handle_reset(struct esiop_softc *);
82 void esiop_scsicmd_end(struct esiop_cmd *, int);
83 void esiop_unqueue(struct esiop_softc *, int, int);
84 int esiop_handle_qtag_reject(struct esiop_cmd *);
85 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
86 void esiop_timeout(void *);
87 void esiop_scsipi_request(struct scsipi_channel *,
88 scsipi_adapter_req_t, void *);
89 void esiop_dump_script(struct esiop_softc *);
90 void esiop_morecbd(struct esiop_softc *);
91 void esiop_moretagtbl(struct esiop_softc *);
92 void siop_add_reselsw(struct esiop_softc *, int);
93 void esiop_target_register(struct esiop_softc *, uint32_t);
94
95 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
96
97 #ifdef SIOP_STATS
98 static int esiop_stat_intr = 0;
99 static int esiop_stat_intr_shortxfer = 0;
100 static int esiop_stat_intr_sdp = 0;
101 static int esiop_stat_intr_done = 0;
102 static int esiop_stat_intr_xferdisc = 0;
103 static int esiop_stat_intr_lunresel = 0;
104 static int esiop_stat_intr_qfull = 0;
105 void esiop_printstats(void);
106 #define INCSTAT(x) x++
107 #else
108 #define INCSTAT(x)
109 #endif
110
111 static inline void esiop_script_sync(struct esiop_softc *, int);
112 static inline void
113 esiop_script_sync(struct esiop_softc *sc, int ops)
114 {
115
116 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
117 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
118 PAGE_SIZE, ops);
119 }
120
121 static inline uint32_t esiop_script_read(struct esiop_softc *, u_int);
122 static inline uint32_t
123 esiop_script_read(struct esiop_softc *sc, u_int offset)
124 {
125
126 if (sc->sc_c.features & SF_CHIP_RAM) {
127 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
128 offset * 4);
129 } else {
130 return le32toh(sc->sc_c.sc_script[offset]);
131 }
132 }
133
134 static inline void esiop_script_write(struct esiop_softc *, u_int,
135 uint32_t);
136 static inline void
137 esiop_script_write(struct esiop_softc *sc, u_int offset, uint32_t val)
138 {
139
140 if (sc->sc_c.features & SF_CHIP_RAM) {
141 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
142 offset * 4, val);
143 } else {
144 sc->sc_c.sc_script[offset] = htole32(val);
145 }
146 }
147
148 void
149 esiop_attach(struct esiop_softc *sc)
150 {
151 struct esiop_dsatbl *tagtbl_donering;
152
153 if (siop_common_attach(&sc->sc_c) != 0 )
154 return;
155
156 TAILQ_INIT(&sc->free_list);
157 TAILQ_INIT(&sc->cmds);
158 TAILQ_INIT(&sc->free_tagtbl);
159 TAILQ_INIT(&sc->tag_tblblk);
160 sc->sc_currschedslot = 0;
161 #ifdef SIOP_DEBUG
162 aprint_debug_dev(sc->sc_c.sc_dev,
163 "script size = %d, PHY addr=0x%x, VIRT=%p\n",
164 (int)sizeof(esiop_script),
165 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
166 #endif
167
168 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
169 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
170
171 /*
172 * get space for the CMD done slot. For this we use a tag table entry.
173 * It's the same size and allows us to not waste 3/4 of a page
174 */
175 #ifdef DIAGNOSTIC
176 if (ESIOP_NTAG != A_ndone_slots) {
177 aprint_error_dev(sc->sc_c.sc_dev,
178 "size of tag DSA table different from the done ring\n");
179 return;
180 }
181 #endif
182 esiop_moretagtbl(sc);
183 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
184 if (tagtbl_donering == NULL) {
185 aprint_error_dev(sc->sc_c.sc_dev,
186 "no memory for command done ring\n");
187 return;
188 }
189 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
190 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
191 sc->sc_done_offset = tagtbl_donering->tbl_offset;
192 sc->sc_done_slot = &tagtbl_donering->tbl[0];
193
194 /* Do a bus reset, so that devices fall back to narrow/async */
195 siop_resetbus(&sc->sc_c);
196 /*
197 * siop_reset() will reset the chip, thus clearing pending interrupts
198 */
199 esiop_reset(sc);
200 #ifdef DUMP_SCRIPT
201 esiop_dump_script(sc);
202 #endif
203
204 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
205 }
206
207 void
208 esiop_reset(struct esiop_softc *sc)
209 {
210 int i, j;
211 uint32_t addr;
212 uint32_t msgin_addr, sem_addr;
213
214 siop_common_reset(&sc->sc_c);
215
216 /*
217 * we copy the script at the beggining of RAM. Then there is 4 bytes
218 * for messages in, and 4 bytes for semaphore
219 */
220 sc->sc_free_offset = __arraycount(esiop_script);
221 msgin_addr =
222 sc->sc_free_offset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
223 sc->sc_free_offset += 1;
224 sc->sc_semoffset = sc->sc_free_offset;
225 sem_addr =
226 sc->sc_semoffset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
227 sc->sc_free_offset += 1;
228 /* then we have the scheduler ring */
229 sc->sc_shedoffset = sc->sc_free_offset;
230 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
231 /* then the targets DSA table */
232 sc->sc_target_table_offset = sc->sc_free_offset;
233 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
234 /* copy and patch the script */
235 if (sc->sc_c.features & SF_CHIP_RAM) {
236 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
237 esiop_script,
238 __arraycount(esiop_script));
239 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
240 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
241 E_tlq_offset_Used[j] * 4,
242 sizeof(struct siop_common_xfer));
243 }
244 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
245 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
246 E_saved_offset_offset_Used[j] * 4,
247 sizeof(struct siop_common_xfer) + 4);
248 }
249 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
250 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
251 E_abs_msgin2_Used[j] * 4, msgin_addr);
252 }
253 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
254 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
255 E_abs_sem_Used[j] * 4, sem_addr);
256 }
257
258 if (sc->sc_c.features & SF_CHIP_LED0) {
259 bus_space_write_region_4(sc->sc_c.sc_ramt,
260 sc->sc_c.sc_ramh,
261 Ent_led_on1, esiop_led_on,
262 __arraycount(esiop_led_on));
263 bus_space_write_region_4(sc->sc_c.sc_ramt,
264 sc->sc_c.sc_ramh,
265 Ent_led_on2, esiop_led_on,
266 __arraycount(esiop_led_on));
267 bus_space_write_region_4(sc->sc_c.sc_ramt,
268 sc->sc_c.sc_ramh,
269 Ent_led_off, esiop_led_off,
270 __arraycount(esiop_led_off));
271 }
272 } else {
273 for (j = 0; j < __arraycount(esiop_script); j++) {
274 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
275 }
276 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
277 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
278 htole32(sizeof(struct siop_common_xfer));
279 }
280 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
281 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
282 htole32(sizeof(struct siop_common_xfer) + 4);
283 }
284 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
285 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
286 htole32(msgin_addr);
287 }
288 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
289 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
290 htole32(sem_addr);
291 }
292
293 if (sc->sc_c.features & SF_CHIP_LED0) {
294 for (j = 0; j < __arraycount(esiop_led_on); j++)
295 sc->sc_c.sc_script[
296 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
297 ] = htole32(esiop_led_on[j]);
298 for (j = 0; j < __arraycount(esiop_led_on); j++)
299 sc->sc_c.sc_script[
300 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
301 ] = htole32(esiop_led_on[j]);
302 for (j = 0; j < __arraycount(esiop_led_off); j++)
303 sc->sc_c.sc_script[
304 Ent_led_off / sizeof(esiop_led_off[0]) + j
305 ] = htole32(esiop_led_off[j]);
306 }
307 }
308 /* get base of scheduler ring */
309 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(uint32_t);
310 /* init scheduler */
311 for (i = 0; i < A_ncmd_slots; i++) {
312 esiop_script_write(sc,
313 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
314 }
315 sc->sc_currschedslot = 0;
316 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
317 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
318 /*
319 * 0x78000000 is a 'move data8 to reg'. data8 is the second
320 * octet, reg offset is the third.
321 */
322 esiop_script_write(sc, Ent_cmdr0 / 4,
323 0x78640000 | ((addr & 0x000000ff) << 8));
324 esiop_script_write(sc, Ent_cmdr1 / 4,
325 0x78650000 | ((addr & 0x0000ff00) ));
326 esiop_script_write(sc, Ent_cmdr2 / 4,
327 0x78660000 | ((addr & 0x00ff0000) >> 8));
328 esiop_script_write(sc, Ent_cmdr3 / 4,
329 0x78670000 | ((addr & 0xff000000) >> 16));
330 /* done ring */
331 for (i = 0; i < A_ndone_slots; i++)
332 sc->sc_done_slot[i] = 0;
333 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
334 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
336 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
337 sc->sc_currdoneslot = 0;
338 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
339 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
340 esiop_script_write(sc, Ent_doner0 / 4,
341 0x786c0000 | ((addr & 0x000000ff) << 8));
342 esiop_script_write(sc, Ent_doner1 / 4,
343 0x786d0000 | ((addr & 0x0000ff00) ));
344 esiop_script_write(sc, Ent_doner2 / 4,
345 0x786e0000 | ((addr & 0x00ff0000) >> 8));
346 esiop_script_write(sc, Ent_doner3 / 4,
347 0x786f0000 | ((addr & 0xff000000) >> 16));
348
349 /* set flags */
350 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
351 /* write pointer of base of target DSA table */
352 addr = (sc->sc_target_table_offset * sizeof(uint32_t)) +
353 sc->sc_c.sc_scriptaddr;
354 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
355 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
356 ((addr & 0x000000ff) << 8));
357 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
358 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
359 ((addr & 0x0000ff00) ));
360 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
361 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
362 ((addr & 0x00ff0000) >> 8));
363 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
364 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
365 ((addr & 0xff000000) >> 16));
366 #ifdef SIOP_DEBUG
367 printf("%s: target table offset %d free offset %d\n",
368 device_xname(sc->sc_c.sc_dev), sc->sc_target_table_offset,
369 sc->sc_free_offset);
370 #endif
371
372 /* register existing targets */
373 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
374 if (sc->sc_c.targets[i])
375 esiop_target_register(sc, i);
376 }
377 /* start script */
378 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
379 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
380 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
381 }
382 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
383 sc->sc_c.sc_scriptaddr + Ent_reselect);
384 }
385
386 #if 0
387 #define CALL_SCRIPT(ent) do { \
388 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
389 esiop_cmd->cmd_c.dsa, \
390 sc->sc_c.sc_scriptaddr + ent); \
391 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
392 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
393 } while (/* CONSTCOND */0)
394 #else
395 #define CALL_SCRIPT(ent) do { \
396 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
397 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
398 } while (/* CONSTCOND */0)
399 #endif
400
401 int
402 esiop_intr(void *v)
403 {
404 struct esiop_softc *sc = v;
405 struct esiop_target *esiop_target;
406 struct esiop_cmd *esiop_cmd;
407 struct esiop_lun *esiop_lun;
408 struct scsipi_xfer *xs;
409 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
410 uint32_t irqcode;
411 int need_reset = 0;
412 int offset, target, lun, tag;
413 uint32_t tflags;
414 uint32_t addr;
415 int freetarget = 0;
416 int slot;
417 int retval = 0;
418
419 again:
420 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
421 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
422 return retval;
423 }
424 retval = 1;
425 INCSTAT(esiop_stat_intr);
426 esiop_checkdone(sc);
427 if (istat & ISTAT_INTF) {
428 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
429 SIOP_ISTAT, ISTAT_INTF);
430 goto again;
431 }
432
433 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
434 (ISTAT_DIP | ISTAT_ABRT)) {
435 /* clear abort */
436 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
437 SIOP_ISTAT, 0);
438 }
439
440 /* get CMD from T/L/Q */
441 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
442 SIOP_SCRATCHC);
443 #ifdef SIOP_DEBUG_INTR
444 printf("interrupt, istat=0x%x tflags=0x%x "
445 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
446 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
447 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
448 SIOP_DSP) -
449 sc->sc_c.sc_scriptaddr));
450 #endif
451 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
452 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
453 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
454 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
455 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
456
457 if (target >= 0 && lun >= 0) {
458 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
459 if (esiop_target == NULL) {
460 printf("esiop_target (target %d) not valid\n", target);
461 goto none;
462 }
463 esiop_lun = esiop_target->esiop_lun[lun];
464 if (esiop_lun == NULL) {
465 printf("esiop_lun (target %d lun %d) not valid\n",
466 target, lun);
467 goto none;
468 }
469 esiop_cmd =
470 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
471 if (esiop_cmd == NULL) {
472 printf("esiop_cmd (target %d lun %d tag %d)"
473 " not valid\n",
474 target, lun, tag);
475 goto none;
476 }
477 xs = esiop_cmd->cmd_c.xs;
478 #ifdef DIAGNOSTIC
479 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
480 printf("esiop_cmd (target %d lun %d) "
481 "not active (%d)\n", target, lun,
482 esiop_cmd->cmd_c.status);
483 goto none;
484 }
485 #endif
486 esiop_table_sync(esiop_cmd,
487 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
488 } else {
489 none:
490 xs = NULL;
491 esiop_target = NULL;
492 esiop_lun = NULL;
493 esiop_cmd = NULL;
494 }
495 if (istat & ISTAT_DIP) {
496 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
497 SIOP_DSTAT);
498 if (dstat & DSTAT_ABRT) {
499 /* was probably generated by a bus reset IOCTL */
500 if ((dstat & DSTAT_DFE) == 0)
501 siop_clearfifo(&sc->sc_c);
502 goto reset;
503 }
504 if (dstat & DSTAT_SSI) {
505 printf("single step dsp 0x%08x dsa 0x08%x\n",
506 (int)(bus_space_read_4(sc->sc_c.sc_rt,
507 sc->sc_c.sc_rh, SIOP_DSP) -
508 sc->sc_c.sc_scriptaddr),
509 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
510 SIOP_DSA));
511 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
512 (istat & ISTAT_SIP) == 0) {
513 bus_space_write_1(sc->sc_c.sc_rt,
514 sc->sc_c.sc_rh, SIOP_DCNTL,
515 bus_space_read_1(sc->sc_c.sc_rt,
516 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
517 }
518 return 1;
519 }
520
521 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
522 printf("%s: DMA IRQ:", device_xname(sc->sc_c.sc_dev));
523 if (dstat & DSTAT_IID)
524 printf(" Illegal instruction");
525 if (dstat & DSTAT_BF)
526 printf(" bus fault");
527 if (dstat & DSTAT_MDPE)
528 printf(" parity");
529 if (dstat & DSTAT_DFE)
530 printf(" DMA fifo empty");
531 else
532 siop_clearfifo(&sc->sc_c);
533 printf(", DSP=0x%x DSA=0x%x: ",
534 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
535 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
536 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
537 if (esiop_cmd)
538 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
539 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
540 le32toh(esiop_cmd->cmd_tables->status));
541 else
542 printf(" current T/L/Q invalid\n");
543 need_reset = 1;
544 }
545 }
546 if (istat & ISTAT_SIP) {
547 if (istat & ISTAT_DIP)
548 delay(10);
549 /*
550 * Can't read sist0 & sist1 independently, or we have to
551 * insert delay
552 */
553 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
554 SIOP_SIST0);
555 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
556 SIOP_SSTAT1);
557 #ifdef SIOP_DEBUG_INTR
558 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
559 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
560 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
561 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
562 SIOP_DSP) -
563 sc->sc_c.sc_scriptaddr));
564 #endif
565 if (sist & SIST0_RST) {
566 esiop_handle_reset(sc);
567 /* no table to flush here */
568 return 1;
569 }
570 if (sist & SIST0_SGE) {
571 if (esiop_cmd)
572 scsipi_printaddr(xs->xs_periph);
573 else
574 printf("%s:", device_xname(sc->sc_c.sc_dev));
575 printf("scsi gross error\n");
576 if (esiop_target)
577 esiop_target->target_c.flags &= ~TARF_DT;
578 #ifdef DEBUG
579 printf("DSA=0x%x DSP=0x%lx\n",
580 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
581 SIOP_DSA),
582 (u_long)(bus_space_read_4(sc->sc_c.sc_rt,
583 sc->sc_c.sc_rh, SIOP_DSP) -
584 sc->sc_c.sc_scriptaddr));
585 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
586 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
587 SIOP_SDID),
588 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
589 SIOP_SCNTL3),
590 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
591 SIOP_SXFER),
592 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
593 SIOP_SCNTL4));
594
595 #endif
596 goto reset;
597 }
598 if ((sist & SIST0_MA) && need_reset == 0) {
599 if (esiop_cmd) {
600 int scratchc0;
601 dstat = bus_space_read_1(sc->sc_c.sc_rt,
602 sc->sc_c.sc_rh, SIOP_DSTAT);
603 /*
604 * first restore DSA, in case we were in a S/G
605 * operation.
606 */
607 bus_space_write_4(sc->sc_c.sc_rt,
608 sc->sc_c.sc_rh,
609 SIOP_DSA, esiop_cmd->cmd_c.dsa);
610 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
611 sc->sc_c.sc_rh, SIOP_SCRATCHC);
612 switch (sstat1 & SSTAT1_PHASE_MASK) {
613 case SSTAT1_PHASE_STATUS:
614 /*
615 * previous phase may be aborted for any reason
616 * ( for example, the target has less data to
617 * transfer than requested). Compute resid and
618 * just go to status, the command should
619 * terminate.
620 */
621 INCSTAT(esiop_stat_intr_shortxfer);
622 if (scratchc0 & A_f_c_data)
623 siop_ma(&esiop_cmd->cmd_c);
624 else if ((dstat & DSTAT_DFE) == 0)
625 siop_clearfifo(&sc->sc_c);
626 CALL_SCRIPT(Ent_status);
627 return 1;
628 case SSTAT1_PHASE_MSGIN:
629 /*
630 * target may be ready to disconnect
631 * Compute resid which would be used later
632 * if a save data pointer is needed.
633 */
634 INCSTAT(esiop_stat_intr_xferdisc);
635 if (scratchc0 & A_f_c_data)
636 siop_ma(&esiop_cmd->cmd_c);
637 else if ((dstat & DSTAT_DFE) == 0)
638 siop_clearfifo(&sc->sc_c);
639 bus_space_write_1(sc->sc_c.sc_rt,
640 sc->sc_c.sc_rh, SIOP_SCRATCHC,
641 scratchc0 & ~A_f_c_data);
642 CALL_SCRIPT(Ent_msgin);
643 return 1;
644 }
645 aprint_error_dev(sc->sc_c.sc_dev,
646 "unexpected phase mismatch %d\n",
647 sstat1 & SSTAT1_PHASE_MASK);
648 } else {
649 aprint_error_dev(sc->sc_c.sc_dev,
650 "phase mismatch without command\n");
651 }
652 need_reset = 1;
653 }
654 if (sist & SIST0_PAR) {
655 /* parity error, reset */
656 if (esiop_cmd)
657 scsipi_printaddr(xs->xs_periph);
658 else
659 printf("%s:", device_xname(sc->sc_c.sc_dev));
660 printf("parity error\n");
661 if (esiop_target)
662 esiop_target->target_c.flags &= ~TARF_DT;
663 goto reset;
664 }
665 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
666 /*
667 * selection time out, assume there's no device here
668 * We also have to update the ring pointer ourselve
669 */
670 slot = bus_space_read_1(sc->sc_c.sc_rt,
671 sc->sc_c.sc_rh, SIOP_SCRATCHE);
672 esiop_script_sync(sc,
673 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674 #ifdef SIOP_DEBUG_SCHED
675 printf("sel timeout target %d, slot %d\n",
676 target, slot);
677 #endif
678 /*
679 * mark this slot as free, and advance to next slot
680 */
681 esiop_script_write(sc,
682 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
683 A_f_cmd_free);
684 addr = bus_space_read_4(sc->sc_c.sc_rt,
685 sc->sc_c.sc_rh, SIOP_SCRATCHD);
686 if (slot < (A_ncmd_slots - 1)) {
687 bus_space_write_1(sc->sc_c.sc_rt,
688 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
689 addr = addr + sizeof(struct esiop_slot);
690 } else {
691 bus_space_write_1(sc->sc_c.sc_rt,
692 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
693 addr = sc->sc_c.sc_scriptaddr +
694 sc->sc_shedoffset * sizeof(uint32_t);
695 }
696 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
697 SIOP_SCRATCHD, addr);
698 esiop_script_sync(sc,
699 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
700 if (esiop_cmd) {
701 esiop_cmd->cmd_c.status = CMDST_DONE;
702 xs->error = XS_SELTIMEOUT;
703 freetarget = 1;
704 goto end;
705 } else {
706 printf("%s: selection timeout without "
707 "command, target %d (sdid 0x%x), "
708 "slot %d\n",
709 device_xname(sc->sc_c.sc_dev), target,
710 bus_space_read_1(sc->sc_c.sc_rt,
711 sc->sc_c.sc_rh, SIOP_SDID), slot);
712 need_reset = 1;
713 }
714 }
715 if (sist & SIST0_UDC) {
716 /*
717 * unexpected disconnect. Usually the target signals
718 * a fatal condition this way. Attempt to get sense.
719 */
720 if (esiop_cmd) {
721 esiop_cmd->cmd_tables->status =
722 htole32(SCSI_CHECK);
723 goto end;
724 }
725 aprint_error_dev(sc->sc_c.sc_dev,
726 "unexpected disconnect without command\n");
727 goto reset;
728 }
729 if (sist & (SIST1_SBMC << 8)) {
730 /* SCSI bus mode change */
731 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
732 goto reset;
733 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
734 /*
735 * we have a script interrupt, it will
736 * restart the script.
737 */
738 goto scintr;
739 }
740 /*
741 * else we have to restart it ourselve, at the
742 * interrupted instruction.
743 */
744 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
745 SIOP_DSP,
746 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
747 SIOP_DSP) - 8);
748 return 1;
749 }
750 /* Else it's an unhandled exception (for now). */
751 aprint_error_dev(sc->sc_c.sc_dev,
752 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
753 "DSA=0x%x DSP=0x%x\n", sist,
754 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
755 SIOP_SSTAT1),
756 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
757 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
758 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
759 if (esiop_cmd) {
760 esiop_cmd->cmd_c.status = CMDST_DONE;
761 xs->error = XS_SELTIMEOUT;
762 goto end;
763 }
764 need_reset = 1;
765 }
766 if (need_reset) {
767 reset:
768 /* fatal error, reset the bus */
769 siop_resetbus(&sc->sc_c);
770 /* no table to flush here */
771 return 1;
772 }
773
774 scintr:
775 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
776 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
777 SIOP_DSPS);
778 #ifdef SIOP_DEBUG_INTR
779 printf("script interrupt 0x%x\n", irqcode);
780 #endif
781 /*
782 * no command, or an inactive command is only valid for a
783 * reselect interrupt
784 */
785 if ((irqcode & 0x80) == 0) {
786 if (esiop_cmd == NULL) {
787 aprint_error_dev(sc->sc_c.sc_dev,
788 "script interrupt (0x%x) with invalid DSA !!!\n",
789 irqcode);
790 goto reset;
791 }
792 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
793 aprint_error_dev(sc->sc_c.sc_dev,
794 "command with invalid status "
795 "(IRQ code 0x%x current status %d) !\n",
796 irqcode, esiop_cmd->cmd_c.status);
797 xs = NULL;
798 }
799 }
800 switch(irqcode) {
801 case A_int_err:
802 printf("error, DSP=0x%x\n",
803 (int)(bus_space_read_4(sc->sc_c.sc_rt,
804 sc->sc_c.sc_rh, SIOP_DSP) -
805 sc->sc_c.sc_scriptaddr));
806 if (xs) {
807 xs->error = XS_SELTIMEOUT;
808 goto end;
809 } else {
810 goto reset;
811 }
812 case A_int_msgin:
813 {
814 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
815 sc->sc_c.sc_rh, SIOP_SFBR);
816 if (msgin == MSG_MESSAGE_REJECT) {
817 int msg, extmsg;
818 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
819 /*
820 * message was part of a identify +
821 * something else. Identify shouldn't
822 * have been rejected.
823 */
824 msg =
825 esiop_cmd->cmd_tables->msg_out[1];
826 extmsg =
827 esiop_cmd->cmd_tables->msg_out[3];
828 } else {
829 msg =
830 esiop_cmd->cmd_tables->msg_out[0];
831 extmsg =
832 esiop_cmd->cmd_tables->msg_out[2];
833 }
834 if (msg == MSG_MESSAGE_REJECT) {
835 /* MSG_REJECT for a MSG_REJECT !*/
836 if (xs)
837 scsipi_printaddr(xs->xs_periph);
838 else
839 printf("%s: ", device_xname(
840 sc->sc_c.sc_dev));
841 printf("our reject message was "
842 "rejected\n");
843 goto reset;
844 }
845 if (msg == MSG_EXTENDED &&
846 extmsg == MSG_EXT_WDTR) {
847 /* WDTR rejected, initiate sync */
848 if ((esiop_target->target_c.flags &
849 TARF_SYNC) == 0) {
850 esiop_target->target_c.status =
851 TARST_OK;
852 siop_update_xfer_mode(&sc->sc_c,
853 target);
854 /* no table to flush here */
855 CALL_SCRIPT(Ent_msgin_ack);
856 return 1;
857 }
858 esiop_target->target_c.status =
859 TARST_SYNC_NEG;
860 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
861 sc->sc_c.st_minsync,
862 sc->sc_c.maxoff);
863 esiop_table_sync(esiop_cmd,
864 BUS_DMASYNC_PREREAD |
865 BUS_DMASYNC_PREWRITE);
866 CALL_SCRIPT(Ent_send_msgout);
867 return 1;
868 } else if (msg == MSG_EXTENDED &&
869 extmsg == MSG_EXT_SDTR) {
870 /* sync rejected */
871 esiop_target->target_c.offset = 0;
872 esiop_target->target_c.period = 0;
873 esiop_target->target_c.status =
874 TARST_OK;
875 siop_update_xfer_mode(&sc->sc_c,
876 target);
877 /* no table to flush here */
878 CALL_SCRIPT(Ent_msgin_ack);
879 return 1;
880 } else if (msg == MSG_EXTENDED &&
881 extmsg == MSG_EXT_PPR) {
882 /* PPR rejected */
883 esiop_target->target_c.offset = 0;
884 esiop_target->target_c.period = 0;
885 esiop_target->target_c.status =
886 TARST_OK;
887 siop_update_xfer_mode(&sc->sc_c,
888 target);
889 /* no table to flush here */
890 CALL_SCRIPT(Ent_msgin_ack);
891 return 1;
892 } else if (msg == MSG_SIMPLE_Q_TAG ||
893 msg == MSG_HEAD_OF_Q_TAG ||
894 msg == MSG_ORDERED_Q_TAG) {
895 if (esiop_handle_qtag_reject(
896 esiop_cmd) == -1)
897 goto reset;
898 CALL_SCRIPT(Ent_msgin_ack);
899 return 1;
900 }
901 if (xs)
902 scsipi_printaddr(xs->xs_periph);
903 else
904 printf("%s: ",
905 device_xname(sc->sc_c.sc_dev));
906 if (msg == MSG_EXTENDED) {
907 printf("scsi message reject, extended "
908 "message sent was 0x%x\n", extmsg);
909 } else {
910 printf("scsi message reject, message "
911 "sent was 0x%x\n", msg);
912 }
913 /* no table to flush here */
914 CALL_SCRIPT(Ent_msgin_ack);
915 return 1;
916 }
917 if (msgin == MSG_IGN_WIDE_RESIDUE) {
918 /* use the extmsgdata table to get the second byte */
919 esiop_cmd->cmd_tables->t_extmsgdata.count =
920 htole32(1);
921 esiop_table_sync(esiop_cmd,
922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
923 CALL_SCRIPT(Ent_get_extmsgdata);
924 return 1;
925 }
926 if (xs)
927 scsipi_printaddr(xs->xs_periph);
928 else
929 printf("%s: ", device_xname(sc->sc_c.sc_dev));
930 printf("unhandled message 0x%x\n", msgin);
931 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
932 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
933 esiop_table_sync(esiop_cmd,
934 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
935 CALL_SCRIPT(Ent_send_msgout);
936 return 1;
937 }
938 case A_int_extmsgin:
939 #ifdef SIOP_DEBUG_INTR
940 printf("extended message: msg 0x%x len %d\n",
941 esiop_cmd->cmd_tables->msg_in[2],
942 esiop_cmd->cmd_tables->msg_in[1]);
943 #endif
944 if (esiop_cmd->cmd_tables->msg_in[1] >
945 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
946 aprint_error_dev(sc->sc_c.sc_dev,
947 "extended message too big (%d)\n",
948 esiop_cmd->cmd_tables->msg_in[1]);
949 esiop_cmd->cmd_tables->t_extmsgdata.count =
950 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
951 esiop_table_sync(esiop_cmd,
952 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
953 CALL_SCRIPT(Ent_get_extmsgdata);
954 return 1;
955 case A_int_extmsgdata:
956 #ifdef SIOP_DEBUG_INTR
957 {
958 int i;
959 printf("extended message: 0x%x, data:",
960 esiop_cmd->cmd_tables->msg_in[2]);
961 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
962 i++)
963 printf(" 0x%x",
964 esiop_cmd->cmd_tables->msg_in[i]);
965 printf("\n");
966 }
967 #endif
968 if (esiop_cmd->cmd_tables->msg_in[0] ==
969 MSG_IGN_WIDE_RESIDUE) {
970 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
971 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
972 printf("MSG_IGN_WIDE_RESIDUE: "
973 "bad len %d\n",
974 esiop_cmd->cmd_tables->msg_in[3]);
975 switch (siop_iwr(&esiop_cmd->cmd_c)) {
976 case SIOP_NEG_MSGOUT:
977 esiop_table_sync(esiop_cmd,
978 BUS_DMASYNC_PREREAD |
979 BUS_DMASYNC_PREWRITE);
980 CALL_SCRIPT(Ent_send_msgout);
981 return 1;
982 case SIOP_NEG_ACK:
983 CALL_SCRIPT(Ent_msgin_ack);
984 return 1;
985 default:
986 panic("invalid retval from "
987 "siop_iwr()");
988 }
989 return 1;
990 }
991 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
992 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
993 case SIOP_NEG_MSGOUT:
994 esiop_update_scntl3(sc,
995 esiop_cmd->cmd_c.siop_target);
996 esiop_table_sync(esiop_cmd,
997 BUS_DMASYNC_PREREAD |
998 BUS_DMASYNC_PREWRITE);
999 CALL_SCRIPT(Ent_send_msgout);
1000 return 1;
1001 case SIOP_NEG_ACK:
1002 esiop_update_scntl3(sc,
1003 esiop_cmd->cmd_c.siop_target);
1004 CALL_SCRIPT(Ent_msgin_ack);
1005 return 1;
1006 default:
1007 panic("invalid retval from "
1008 "siop_wdtr_neg()");
1009 }
1010 return 1;
1011 }
1012 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1013 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1014 case SIOP_NEG_MSGOUT:
1015 esiop_update_scntl3(sc,
1016 esiop_cmd->cmd_c.siop_target);
1017 esiop_table_sync(esiop_cmd,
1018 BUS_DMASYNC_PREREAD |
1019 BUS_DMASYNC_PREWRITE);
1020 CALL_SCRIPT(Ent_send_msgout);
1021 return 1;
1022 case SIOP_NEG_ACK:
1023 esiop_update_scntl3(sc,
1024 esiop_cmd->cmd_c.siop_target);
1025 CALL_SCRIPT(Ent_msgin_ack);
1026 return 1;
1027 default:
1028 panic("invalid retval from "
1029 "siop_wdtr_neg()");
1030 }
1031 return 1;
1032 }
1033 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1034 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1035 case SIOP_NEG_MSGOUT:
1036 esiop_update_scntl3(sc,
1037 esiop_cmd->cmd_c.siop_target);
1038 esiop_table_sync(esiop_cmd,
1039 BUS_DMASYNC_PREREAD |
1040 BUS_DMASYNC_PREWRITE);
1041 CALL_SCRIPT(Ent_send_msgout);
1042 return 1;
1043 case SIOP_NEG_ACK:
1044 esiop_update_scntl3(sc,
1045 esiop_cmd->cmd_c.siop_target);
1046 CALL_SCRIPT(Ent_msgin_ack);
1047 return 1;
1048 default:
1049 panic("invalid retval from "
1050 "siop_wdtr_neg()");
1051 }
1052 return 1;
1053 }
1054 /* send a message reject */
1055 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1056 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1057 esiop_table_sync(esiop_cmd,
1058 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1059 CALL_SCRIPT(Ent_send_msgout);
1060 return 1;
1061 case A_int_disc:
1062 INCSTAT(esiop_stat_intr_sdp);
1063 offset = bus_space_read_1(sc->sc_c.sc_rt,
1064 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1065 #ifdef SIOP_DEBUG_DR
1066 printf("disconnect offset %d\n", offset);
1067 #endif
1068 siop_sdp(&esiop_cmd->cmd_c, offset);
1069 /* we start again with no offset */
1070 ESIOP_XFER(esiop_cmd, saved_offset) =
1071 htole32(SIOP_NOOFFSET);
1072 esiop_table_sync(esiop_cmd,
1073 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1074 CALL_SCRIPT(Ent_script_sched);
1075 return 1;
1076 case A_int_resfail:
1077 printf("reselect failed\n");
1078 CALL_SCRIPT(Ent_script_sched);
1079 return 1;
1080 case A_int_done:
1081 if (xs == NULL) {
1082 printf("%s: done without command\n",
1083 device_xname(sc->sc_c.sc_dev));
1084 CALL_SCRIPT(Ent_script_sched);
1085 return 1;
1086 }
1087 #ifdef SIOP_DEBUG_INTR
1088 printf("done, DSA=0x%lx target id 0x%x last msg "
1089 "in=0x%x status=0x%x\n",
1090 (u_long)esiop_cmd->cmd_c.dsa,
1091 le32toh(esiop_cmd->cmd_tables->id),
1092 esiop_cmd->cmd_tables->msg_in[0],
1093 le32toh(esiop_cmd->cmd_tables->status));
1094 #endif
1095 INCSTAT(esiop_stat_intr_done);
1096 esiop_cmd->cmd_c.status = CMDST_DONE;
1097 goto end;
1098 default:
1099 printf("unknown irqcode %x\n", irqcode);
1100 if (xs) {
1101 xs->error = XS_SELTIMEOUT;
1102 goto end;
1103 }
1104 goto reset;
1105 }
1106 return 1;
1107 }
1108 /* We just should't get there */
1109 panic("siop_intr: I shouldn't be there !");
1110
1111 end:
1112 /*
1113 * restart the script now if command completed properly
1114 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1115 * queue
1116 */
1117 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1118 #ifdef SIOP_DEBUG_INTR
1119 printf("esiop_intr end: status %d\n", xs->status);
1120 #endif
1121 if (tag >= 0)
1122 esiop_lun->tactive[tag] = NULL;
1123 else
1124 esiop_lun->active = NULL;
1125 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1126 SIOP_SCRATCHA + 1);
1127 /*
1128 * if we got a disconnect between the last data phase
1129 * and the status phase, offset will be 0. In this
1130 * case, cmd_tables->saved_offset will have the proper value
1131 * if it got updated by the controller
1132 */
1133 if (offset == 0 &&
1134 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1135 offset =
1136 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1137
1138 esiop_scsicmd_end(esiop_cmd, offset);
1139 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1140 esiop_del_dev(sc, target, lun);
1141 CALL_SCRIPT(Ent_script_sched);
1142 return 1;
1143 }
1144
1145 void
1146 esiop_scsicmd_end(struct esiop_cmd *esiop_cmd, int offset)
1147 {
1148 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1149 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1150
1151 siop_update_resid(&esiop_cmd->cmd_c, offset);
1152
1153 switch(xs->status) {
1154 case SCSI_OK:
1155 xs->error = XS_NOERROR;
1156 break;
1157 case SCSI_BUSY:
1158 xs->error = XS_BUSY;
1159 break;
1160 case SCSI_CHECK:
1161 xs->error = XS_BUSY;
1162 /* remove commands in the queue and scheduler */
1163 esiop_unqueue(sc, xs->xs_periph->periph_target,
1164 xs->xs_periph->periph_lun);
1165 break;
1166 case SCSI_QUEUE_FULL:
1167 INCSTAT(esiop_stat_intr_qfull);
1168 #ifdef SIOP_DEBUG
1169 printf("%s:%d:%d: queue full (tag %d)\n",
1170 device_xname(sc->sc_c.sc_dev),
1171 xs->xs_periph->periph_target,
1172 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1173 #endif
1174 xs->error = XS_BUSY;
1175 break;
1176 case SCSI_SIOP_NOCHECK:
1177 /*
1178 * don't check status, xs->error is already valid
1179 */
1180 break;
1181 case SCSI_SIOP_NOSTATUS:
1182 /*
1183 * the status byte was not updated, cmd was
1184 * aborted
1185 */
1186 xs->error = XS_SELTIMEOUT;
1187 break;
1188 default:
1189 scsipi_printaddr(xs->xs_periph);
1190 printf("invalid status code %d\n", xs->status);
1191 xs->error = XS_DRIVER_STUFFUP;
1192 }
1193 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1194 bus_dmamap_sync(sc->sc_c.sc_dmat,
1195 esiop_cmd->cmd_c.dmamap_data, 0,
1196 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1197 (xs->xs_control & XS_CTL_DATA_IN) ?
1198 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1199 bus_dmamap_unload(sc->sc_c.sc_dmat,
1200 esiop_cmd->cmd_c.dmamap_data);
1201 }
1202 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1203 if ((xs->xs_control & XS_CTL_POLL) == 0)
1204 callout_stop(&xs->xs_callout);
1205 esiop_cmd->cmd_c.status = CMDST_FREE;
1206 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1207 #if 0
1208 if (xs->resid != 0)
1209 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1210 #endif
1211 scsipi_done (xs);
1212 }
1213
1214 void
1215 esiop_checkdone(struct esiop_softc *sc)
1216 {
1217 int target, lun, tag;
1218 struct esiop_target *esiop_target;
1219 struct esiop_lun *esiop_lun;
1220 struct esiop_cmd *esiop_cmd;
1221 uint32_t slot;
1222 int needsync = 0;
1223 int status;
1224 uint32_t sem, offset;
1225
1226 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1227 sem = esiop_script_read(sc, sc->sc_semoffset);
1228 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1229 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1230 /*
1231 * at last one command have been started,
1232 * so we should have free slots now
1233 */
1234 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1235 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1236 }
1237 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1238
1239 if ((sem & A_sem_done) == 0) {
1240 /* no pending done command */
1241 return;
1242 }
1243
1244 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1245 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
1246 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1247 next:
1248 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1249 if (needsync)
1250 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1251 sc->sc_done_offset,
1252 A_ndone_slots * sizeof(uint32_t),
1253 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254 return;
1255 }
1256
1257 needsync = 1;
1258
1259 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1260 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1261 sc->sc_currdoneslot += 1;
1262 if (sc->sc_currdoneslot == A_ndone_slots)
1263 sc->sc_currdoneslot = 0;
1264
1265 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1266 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1267 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1268
1269 esiop_target = (target >= 0) ?
1270 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1271 if (esiop_target == NULL) {
1272 printf("esiop_target (target %d) not valid\n", target);
1273 goto next;
1274 }
1275 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1276 if (esiop_lun == NULL) {
1277 printf("esiop_lun (target %d lun %d) not valid\n",
1278 target, lun);
1279 goto next;
1280 }
1281 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1282 if (esiop_cmd == NULL) {
1283 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1284 target, lun, tag);
1285 goto next;
1286 }
1287
1288 esiop_table_sync(esiop_cmd,
1289 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1290 status = le32toh(esiop_cmd->cmd_tables->status);
1291 #ifdef DIAGNOSTIC
1292 if (status != SCSI_OK) {
1293 printf("command for T/L/Q %d/%d/%d status %d\n",
1294 target, lun, tag, status);
1295 goto next;
1296 }
1297
1298 #endif
1299 /* Ok, this command has been handled */
1300 esiop_cmd->cmd_c.xs->status = status;
1301 if (tag >= 0)
1302 esiop_lun->tactive[tag] = NULL;
1303 else
1304 esiop_lun->active = NULL;
1305 /*
1306 * scratcha was eventually saved in saved_offset by script.
1307 * fetch offset from it
1308 */
1309 offset = 0;
1310 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1311 offset =
1312 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1313 esiop_scsicmd_end(esiop_cmd, offset);
1314 goto next;
1315 }
1316
1317 void
1318 esiop_unqueue(struct esiop_softc *sc, int target, int lun)
1319 {
1320 int slot, tag;
1321 uint32_t slotdsa;
1322 struct esiop_cmd *esiop_cmd;
1323 struct esiop_lun *esiop_lun =
1324 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1325
1326 /* first make sure to read valid data */
1327 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1328
1329 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1330 /* look for commands in the scheduler, not yet started */
1331 if (esiop_lun->tactive[tag] == NULL)
1332 continue;
1333 esiop_cmd = esiop_lun->tactive[tag];
1334 for (slot = 0; slot < A_ncmd_slots; slot++) {
1335 slotdsa = esiop_script_read(sc,
1336 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1337 /* if the slot has any flag, it won't match the DSA */
1338 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1339 /* Mark this slot as ignore */
1340 esiop_script_write(sc,
1341 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1342 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1343 /* ask to requeue */
1344 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1345 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1346 esiop_lun->tactive[tag] = NULL;
1347 esiop_scsicmd_end(esiop_cmd, 0);
1348 break;
1349 }
1350 }
1351 }
1352 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1353 }
1354
1355 /*
1356 * handle a rejected queue tag message: the command will run untagged,
1357 * has to adjust the reselect script.
1358 */
1359
1360
1361 int
1362 esiop_handle_qtag_reject(struct esiop_cmd *esiop_cmd)
1363 {
1364 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1365 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1366 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1367 int tag = esiop_cmd->cmd_tables->msg_out[2];
1368 struct esiop_target *esiop_target =
1369 (struct esiop_target*)sc->sc_c.targets[target];
1370 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1371
1372 #ifdef SIOP_DEBUG
1373 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1374 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1375 esiop_cmd->cmd_c.tag, esiop_cmd->cmd_c.status);
1376 #endif
1377
1378 if (esiop_lun->active != NULL) {
1379 aprint_error_dev(sc->sc_c.sc_dev,
1380 "untagged command already running for target %d "
1381 "lun %d (status %d)\n",
1382 target, lun, esiop_lun->active->cmd_c.status);
1383 return -1;
1384 }
1385 /* clear tag slot */
1386 esiop_lun->tactive[tag] = NULL;
1387 /* add command to non-tagged slot */
1388 esiop_lun->active = esiop_cmd;
1389 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1390 esiop_cmd->cmd_c.tag = -1;
1391 /* update DSA table */
1392 esiop_script_write(sc, esiop_target->lun_table_offset +
1393 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1394 esiop_cmd->cmd_c.dsa);
1395 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1396 return 0;
1397 }
1398
1399 /*
1400 * handle a bus reset: reset chip, unqueue all active commands, free all
1401 * target struct and report lossage to upper layer.
1402 * As the upper layer may requeue immediatly we have to first store
1403 * all active commands in a temporary queue.
1404 */
1405 void
1406 esiop_handle_reset(struct esiop_softc *sc)
1407 {
1408 struct esiop_cmd *esiop_cmd;
1409 struct esiop_lun *esiop_lun;
1410 int target, lun, tag;
1411 /*
1412 * scsi bus reset. reset the chip and restart
1413 * the queue. Need to clean up all active commands
1414 */
1415 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1416 /* stop, reset and restart the chip */
1417 esiop_reset(sc);
1418
1419 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1420 /* chip has been reset, all slots are free now */
1421 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1422 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1423 }
1424 /*
1425 * Process all commands: first commands completes, then commands
1426 * being executed
1427 */
1428 esiop_checkdone(sc);
1429 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1430 struct esiop_target *esiop_target =
1431 (struct esiop_target *)sc->sc_c.targets[target];
1432 if (esiop_target == NULL)
1433 continue;
1434 for (lun = 0; lun < 8; lun++) {
1435 esiop_lun = esiop_target->esiop_lun[lun];
1436 if (esiop_lun == NULL)
1437 continue;
1438 for (tag = -1; tag <
1439 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1440 ESIOP_NTAG : 0);
1441 tag++) {
1442 if (tag >= 0)
1443 esiop_cmd = esiop_lun->tactive[tag];
1444 else
1445 esiop_cmd = esiop_lun->active;
1446 if (esiop_cmd == NULL)
1447 continue;
1448 scsipi_printaddr(
1449 esiop_cmd->cmd_c.xs->xs_periph);
1450 printf("command with tag id %d reset\n", tag);
1451 esiop_cmd->cmd_c.xs->error =
1452 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1453 XS_TIMEOUT : XS_RESET;
1454 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1455 if (tag >= 0)
1456 esiop_lun->tactive[tag] = NULL;
1457 else
1458 esiop_lun->active = NULL;
1459 esiop_cmd->cmd_c.status = CMDST_DONE;
1460 esiop_scsicmd_end(esiop_cmd, 0);
1461 }
1462 }
1463 sc->sc_c.targets[target]->status = TARST_ASYNC;
1464 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1465 sc->sc_c.targets[target]->period =
1466 sc->sc_c.targets[target]->offset = 0;
1467 siop_update_xfer_mode(&sc->sc_c, target);
1468 }
1469
1470 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1471 }
1472
1473 void
1474 esiop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1475 void *arg)
1476 {
1477 struct scsipi_xfer *xs;
1478 struct scsipi_periph *periph;
1479 struct esiop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1480 struct esiop_cmd *esiop_cmd;
1481 struct esiop_target *esiop_target;
1482 int s, error, i;
1483 int target;
1484 int lun;
1485
1486 switch (req) {
1487 case ADAPTER_REQ_RUN_XFER:
1488 xs = arg;
1489 periph = xs->xs_periph;
1490 target = periph->periph_target;
1491 lun = periph->periph_lun;
1492
1493 s = splbio();
1494 /*
1495 * first check if there are pending complete commands.
1496 * this can free us some resources (in the rings for example).
1497 * we have to lock it to avoid recursion.
1498 */
1499 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1500 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1501 esiop_checkdone(sc);
1502 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1503 }
1504 #ifdef SIOP_DEBUG_SCHED
1505 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1506 xs->xs_tag_type, xs->xs_tag_id);
1507 #endif
1508 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1509 if (esiop_cmd == NULL) {
1510 xs->error = XS_RESOURCE_SHORTAGE;
1511 scsipi_done(xs);
1512 splx(s);
1513 return;
1514 }
1515 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1516 #ifdef DIAGNOSTIC
1517 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1518 panic("siop_scsicmd: new cmd not free");
1519 #endif
1520 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1521 if (esiop_target == NULL) {
1522 #ifdef SIOP_DEBUG
1523 printf("%s: alloc siop_target for target %d\n",
1524 device_xname(sc->sc_c.sc_dev), target);
1525 #endif
1526 sc->sc_c.targets[target] =
1527 malloc(sizeof(struct esiop_target),
1528 M_DEVBUF, M_NOWAIT | M_ZERO);
1529 if (sc->sc_c.targets[target] == NULL) {
1530 aprint_error_dev(sc->sc_c.sc_dev,
1531 "can't malloc memory for "
1532 "target %d\n",
1533 target);
1534 xs->error = XS_RESOURCE_SHORTAGE;
1535 scsipi_done(xs);
1536 splx(s);
1537 return;
1538 }
1539 esiop_target =
1540 (struct esiop_target*)sc->sc_c.targets[target];
1541 esiop_target->target_c.status = TARST_PROBING;
1542 esiop_target->target_c.flags = 0;
1543 esiop_target->target_c.id =
1544 sc->sc_c.clock_div << 24; /* scntl3 */
1545 esiop_target->target_c.id |= target << 16; /* id */
1546 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1547
1548 for (i=0; i < 8; i++)
1549 esiop_target->esiop_lun[i] = NULL;
1550 esiop_target_register(sc, target);
1551 }
1552 if (esiop_target->esiop_lun[lun] == NULL) {
1553 esiop_target->esiop_lun[lun] =
1554 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1555 M_NOWAIT|M_ZERO);
1556 if (esiop_target->esiop_lun[lun] == NULL) {
1557 aprint_error_dev(sc->sc_c.sc_dev,
1558 "can't alloc esiop_lun for "
1559 "target %d lun %d\n",
1560 target, lun);
1561 xs->error = XS_RESOURCE_SHORTAGE;
1562 scsipi_done(xs);
1563 splx(s);
1564 return;
1565 }
1566 }
1567 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1568 esiop_cmd->cmd_c.xs = xs;
1569 esiop_cmd->cmd_c.flags = 0;
1570 esiop_cmd->cmd_c.status = CMDST_READY;
1571
1572 /* load the DMA maps */
1573 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1574 esiop_cmd->cmd_c.dmamap_cmd,
1575 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1576 if (error) {
1577 aprint_error_dev(sc->sc_c.sc_dev,
1578 "unable to load cmd DMA map: %d\n",
1579 error);
1580 xs->error = XS_DRIVER_STUFFUP;
1581 scsipi_done(xs);
1582 splx(s);
1583 return;
1584 }
1585 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1586 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1587 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1588 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1589 ((xs->xs_control & XS_CTL_DATA_IN) ?
1590 BUS_DMA_READ : BUS_DMA_WRITE));
1591 if (error) {
1592 aprint_error_dev(sc->sc_c.sc_dev,
1593 "unable to load cmd DMA map: %d",
1594 error);
1595 xs->error = XS_DRIVER_STUFFUP;
1596 scsipi_done(xs);
1597 bus_dmamap_unload(sc->sc_c.sc_dmat,
1598 esiop_cmd->cmd_c.dmamap_cmd);
1599 splx(s);
1600 return;
1601 }
1602 bus_dmamap_sync(sc->sc_c.sc_dmat,
1603 esiop_cmd->cmd_c.dmamap_data, 0,
1604 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1605 (xs->xs_control & XS_CTL_DATA_IN) ?
1606 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1607 }
1608 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1609 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1610 BUS_DMASYNC_PREWRITE);
1611
1612 if (xs->xs_tag_type)
1613 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1614 else
1615 esiop_cmd->cmd_c.tag = -1;
1616 siop_setuptables(&esiop_cmd->cmd_c);
1617 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1618 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1619 ESIOP_XFER(esiop_cmd, tlq) |=
1620 htole32((target << 8) | (lun << 16));
1621 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1622 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1623 ESIOP_XFER(esiop_cmd, tlq) |=
1624 htole32(esiop_cmd->cmd_c.tag << 24);
1625 }
1626
1627 esiop_table_sync(esiop_cmd,
1628 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1629 esiop_start(sc, esiop_cmd);
1630 if (xs->xs_control & XS_CTL_POLL) {
1631 /* poll for command completion */
1632 while ((xs->xs_status & XS_STS_DONE) == 0) {
1633 delay(1000);
1634 esiop_intr(sc);
1635 }
1636 }
1637 splx(s);
1638 return;
1639
1640 case ADAPTER_REQ_GROW_RESOURCES:
1641 #ifdef SIOP_DEBUG
1642 printf("%s grow resources (%d)\n",
1643 device_xname(sc->sc_c.sc_dev),
1644 sc->sc_c.sc_adapt.adapt_openings);
1645 #endif
1646 esiop_morecbd(sc);
1647 return;
1648
1649 case ADAPTER_REQ_SET_XFER_MODE:
1650 {
1651 struct scsipi_xfer_mode *xm = arg;
1652 if (sc->sc_c.targets[xm->xm_target] == NULL)
1653 return;
1654 s = splbio();
1655 if (xm->xm_mode & PERIPH_CAP_TQING) {
1656 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1657 /* allocate tag tables for this device */
1658 for (lun = 0;
1659 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1660 if (scsipi_lookup_periph(chan,
1661 xm->xm_target, lun) != NULL)
1662 esiop_add_dev(sc, xm->xm_target, lun);
1663 }
1664 }
1665 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1666 (sc->sc_c.features & SF_BUS_WIDE))
1667 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1668 if (xm->xm_mode & PERIPH_CAP_SYNC)
1669 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1670 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1671 (sc->sc_c.features & SF_CHIP_DT))
1672 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1673 if ((xm->xm_mode &
1674 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1675 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1676 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1677
1678 splx(s);
1679 }
1680 }
1681 }
1682
1683 static void
1684 esiop_start(struct esiop_softc *sc, struct esiop_cmd *esiop_cmd)
1685 {
1686 struct esiop_lun *esiop_lun;
1687 struct esiop_target *esiop_target;
1688 int timeout;
1689 int target, lun, slot;
1690
1691 /*
1692 * first make sure to read valid data
1693 */
1694 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1695
1696 /*
1697 * We use a circular queue here. sc->sc_currschedslot points to a
1698 * free slot, unless we have filled the queue. Check this.
1699 */
1700 slot = sc->sc_currschedslot;
1701 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1702 A_f_cmd_free) == 0) {
1703 /*
1704 * no more free slot, no need to continue. freeze the queue
1705 * and requeue this command.
1706 */
1707 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1708 sc->sc_flags |= SCF_CHAN_NOSLOT;
1709 esiop_script_write(sc, sc->sc_semoffset,
1710 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1711 esiop_script_sync(sc,
1712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1713 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1714 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1715 esiop_scsicmd_end(esiop_cmd, 0);
1716 return;
1717 }
1718 /* OK, we can use this slot */
1719
1720 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1721 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1722 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1723 esiop_lun = esiop_target->esiop_lun[lun];
1724 /* if non-tagged command active, panic: this shouldn't happen */
1725 if (esiop_lun->active != NULL) {
1726 panic("esiop_start: tagged cmd while untagged running");
1727 }
1728 #ifdef DIAGNOSTIC
1729 /* sanity check the tag if needed */
1730 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1731 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1732 esiop_cmd->cmd_c.tag < 0) {
1733 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1734 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1735 panic("esiop_start: invalid tag id");
1736 }
1737 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1738 panic("esiop_start: tag not free");
1739 }
1740 #endif
1741 #ifdef SIOP_DEBUG_SCHED
1742 printf("using slot %d for DSA 0x%lx\n", slot,
1743 (u_long)esiop_cmd->cmd_c.dsa);
1744 #endif
1745 /* mark command as active */
1746 if (esiop_cmd->cmd_c.status == CMDST_READY)
1747 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1748 else
1749 panic("esiop_start: bad status");
1750 /* DSA table for reselect */
1751 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1752 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1753 /* DSA table for reselect */
1754 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1755 htole32(esiop_cmd->cmd_c.dsa);
1756 bus_dmamap_sync(sc->sc_c.sc_dmat,
1757 esiop_lun->lun_tagtbl->tblblk->blkmap,
1758 esiop_lun->lun_tagtbl->tbl_offset,
1759 sizeof(uint32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1760 } else {
1761 esiop_lun->active = esiop_cmd;
1762 esiop_script_write(sc,
1763 esiop_target->lun_table_offset +
1764 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1765 esiop_cmd->cmd_c.dsa);
1766 }
1767 /* scheduler slot: DSA */
1768 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1769 esiop_cmd->cmd_c.dsa);
1770 /* make sure SCRIPT processor will read valid data */
1771 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1772 /* handle timeout */
1773 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1774 /* start exire timer */
1775 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1776 if (timeout == 0)
1777 timeout = 1;
1778 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1779 timeout, esiop_timeout, esiop_cmd);
1780 }
1781 /* Signal script it has some work to do */
1782 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1783 SIOP_ISTAT, ISTAT_SIGP);
1784 /* update the current slot, and wait for IRQ */
1785 sc->sc_currschedslot++;
1786 if (sc->sc_currschedslot >= A_ncmd_slots)
1787 sc->sc_currschedslot = 0;
1788 }
1789
1790 void
1791 esiop_timeout(void *v)
1792 {
1793 struct esiop_cmd *esiop_cmd = v;
1794 struct esiop_softc *sc =
1795 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1796 int s;
1797 #ifdef SIOP_DEBUG
1798 int slot, slotdsa;
1799 #endif
1800
1801 s = splbio();
1802 esiop_table_sync(esiop_cmd,
1803 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1804 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1805 #ifdef SIOP_DEBUG
1806 printf("command timeout (status %d)\n",
1807 le32toh(esiop_cmd->cmd_tables->status));
1808
1809 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1810 for (slot = 0; slot < A_ncmd_slots; slot++) {
1811 slotdsa = esiop_script_read(sc,
1812 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1813 if ((slotdsa & 0x01) == 0)
1814 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1815 }
1816 printf("istat 0x%x ",
1817 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1818 printf("DSP 0x%lx DSA 0x%x\n",
1819 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP)
1820 - sc->sc_c.sc_scriptaddr),
1821 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1822 (void)bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1823 printf("istat 0x%x\n",
1824 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1825 #else
1826 printf("command timeout, CDB: ");
1827 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1828 printf("\n");
1829 #endif
1830 /* reset the scsi bus */
1831 siop_resetbus(&sc->sc_c);
1832
1833 /* deactivate callout */
1834 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1835 /*
1836 * mark command has being timed out and just return;
1837 * the bus reset will generate an interrupt,
1838 * it will be handled in siop_intr()
1839 */
1840 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1841 splx(s);
1842 }
1843
1844 void
1845 esiop_dump_script(struct esiop_softc *sc)
1846 {
1847 int i;
1848
1849 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1850 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1851 le32toh(sc->sc_c.sc_script[i]),
1852 le32toh(sc->sc_c.sc_script[i + 1]));
1853 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1854 0xc0000000) {
1855 i++;
1856 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i + 1]));
1857 }
1858 printf("\n");
1859 }
1860 }
1861
1862 void
1863 esiop_morecbd(struct esiop_softc *sc)
1864 {
1865 int error, i, s;
1866 bus_dma_segment_t seg;
1867 int rseg;
1868 struct esiop_cbd *newcbd;
1869 struct esiop_xfer *xfer;
1870 bus_addr_t dsa;
1871
1872 /* allocate a new list head */
1873 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1874 if (newcbd == NULL) {
1875 aprint_error_dev(sc->sc_c.sc_dev,
1876 "can't allocate memory for command descriptors "
1877 "head\n");
1878 return;
1879 }
1880
1881 /* allocate cmd list */
1882 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1883 M_DEVBUF, M_NOWAIT|M_ZERO);
1884 if (newcbd->cmds == NULL) {
1885 aprint_error_dev(sc->sc_c.sc_dev,
1886 "can't allocate memory for command descriptors\n");
1887 goto bad3;
1888 }
1889 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1890 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1891 if (error) {
1892 aprint_error_dev(sc->sc_c.sc_dev,
1893 "unable to allocate cbd DMA memory, error = %d\n",
1894 error);
1895 goto bad2;
1896 }
1897 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1898 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1899 if (error) {
1900 aprint_error_dev(sc->sc_c.sc_dev,
1901 "unable to map cbd DMA memory, error = %d\n",
1902 error);
1903 goto bad2;
1904 }
1905 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1906 BUS_DMA_NOWAIT, &newcbd->xferdma);
1907 if (error) {
1908 aprint_error_dev(sc->sc_c.sc_dev,
1909 "unable to create cbd DMA map, error = %d\n", error);
1910 goto bad1;
1911 }
1912 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1913 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1914 if (error) {
1915 aprint_error_dev(sc->sc_c.sc_dev,
1916 "unable to load cbd DMA map, error = %d\n", error);
1917 goto bad0;
1918 }
1919 #ifdef DEBUG
1920 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1921 device_xname(sc->sc_c.sc_dev),
1922 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1923 #endif
1924 for (i = 0; i < SIOP_NCMDPB; i++) {
1925 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1926 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1927 &newcbd->cmds[i].cmd_c.dmamap_data);
1928 if (error) {
1929 aprint_error_dev(sc->sc_c.sc_dev,
1930 "unable to create data DMA map for cbd: "
1931 "error %d\n", error);
1932 goto bad0;
1933 }
1934 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1935 sizeof(struct scsipi_generic), 1,
1936 sizeof(struct scsipi_generic), 0,
1937 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1938 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1939 if (error) {
1940 aprint_error_dev(sc->sc_c.sc_dev,
1941 "unable to create cmd DMA map for cbd %d\n", error);
1942 goto bad0;
1943 }
1944 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1945 newcbd->cmds[i].esiop_cbdp = newcbd;
1946 xfer = &newcbd->xfers[i];
1947 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1948 memset(newcbd->cmds[i].cmd_tables, 0,
1949 sizeof(struct esiop_xfer));
1950 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1951 i * sizeof(struct esiop_xfer);
1952 newcbd->cmds[i].cmd_c.dsa = dsa;
1953 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1954 xfer->siop_tables.t_msgout.count= htole32(1);
1955 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1956 xfer->siop_tables.t_msgin.count= htole32(1);
1957 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1958 offsetof(struct siop_common_xfer, msg_in));
1959 xfer->siop_tables.t_extmsgin.count= htole32(2);
1960 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1961 offsetof(struct siop_common_xfer, msg_in) + 1);
1962 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1963 offsetof(struct siop_common_xfer, msg_in) + 3);
1964 xfer->siop_tables.t_status.count= htole32(1);
1965 xfer->siop_tables.t_status.addr = htole32(dsa +
1966 offsetof(struct siop_common_xfer, status));
1967
1968 s = splbio();
1969 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1970 splx(s);
1971 #ifdef SIOP_DEBUG
1972 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1973 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1974 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1975 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1976 #endif
1977 }
1978 s = splbio();
1979 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1980 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1981 splx(s);
1982 return;
1983 bad0:
1984 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1985 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1986 bad1:
1987 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1988 bad2:
1989 free(newcbd->cmds, M_DEVBUF);
1990 bad3:
1991 free(newcbd, M_DEVBUF);
1992 }
1993
1994 void
1995 esiop_moretagtbl(struct esiop_softc *sc)
1996 {
1997 int error, i, j, s;
1998 bus_dma_segment_t seg;
1999 int rseg;
2000 struct esiop_dsatblblk *newtblblk;
2001 struct esiop_dsatbl *newtbls;
2002 uint32_t *tbls;
2003
2004 /* allocate a new list head */
2005 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2006 M_DEVBUF, M_NOWAIT|M_ZERO);
2007 if (newtblblk == NULL) {
2008 aprint_error_dev(sc->sc_c.sc_dev,
2009 "can't allocate memory for tag DSA table block\n");
2010 return;
2011 }
2012
2013 /* allocate tbl list */
2014 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2015 M_DEVBUF, M_NOWAIT|M_ZERO);
2016 if (newtbls == NULL) {
2017 aprint_error_dev(sc->sc_c.sc_dev,
2018 "can't allocate memory for command descriptors\n");
2019 goto bad3;
2020 }
2021 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2022 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2023 if (error) {
2024 aprint_error_dev(sc->sc_c.sc_dev,
2025 "unable to allocate tbl DMA memory, error = %d\n", error);
2026 goto bad2;
2027 }
2028 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2029 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2030 if (error) {
2031 aprint_error_dev(sc->sc_c.sc_dev,
2032 "unable to map tbls DMA memory, error = %d\n", error);
2033 goto bad2;
2034 }
2035 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2036 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2037 if (error) {
2038 aprint_error_dev(sc->sc_c.sc_dev,
2039 "unable to create tbl DMA map, error = %d\n", error);
2040 goto bad1;
2041 }
2042 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2043 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2044 if (error) {
2045 aprint_error_dev(sc->sc_c.sc_dev,
2046 "unable to load tbl DMA map, error = %d\n", error);
2047 goto bad0;
2048 }
2049 #ifdef DEBUG
2050 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2051 device_xname(sc->sc_c.sc_dev),
2052 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2053 #endif
2054 for (i = 0; i < ESIOP_NTPB; i++) {
2055 newtbls[i].tblblk = newtblblk;
2056 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2057 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(uint32_t);
2058 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2059 newtbls[i].tbl_offset;
2060 for (j = 0; j < ESIOP_NTAG; j++)
2061 newtbls[i].tbl[j] = j;
2062 s = splbio();
2063 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2064 splx(s);
2065 }
2066 s = splbio();
2067 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2068 splx(s);
2069 return;
2070 bad0:
2071 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2072 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2073 bad1:
2074 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2075 bad2:
2076 free(newtbls, M_DEVBUF);
2077 bad3:
2078 free(newtblblk, M_DEVBUF);
2079 }
2080
2081 void
2082 esiop_update_scntl3(struct esiop_softc *sc,
2083 struct siop_common_target *_siop_target)
2084 {
2085 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2086
2087 esiop_script_write(sc, esiop_target->lun_table_offset,
2088 esiop_target->target_c.id);
2089 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2090 }
2091
2092 void
2093 esiop_add_dev(struct esiop_softc *sc, int target, int lun)
2094 {
2095 struct esiop_target *esiop_target =
2096 (struct esiop_target *)sc->sc_c.targets[target];
2097 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2098
2099 if (esiop_lun->lun_tagtbl != NULL)
2100 return; /* already allocated */
2101
2102 /* we need a tag DSA table */
2103 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2104 if (esiop_lun->lun_tagtbl == NULL) {
2105 esiop_moretagtbl(sc);
2106 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2107 if (esiop_lun->lun_tagtbl == NULL) {
2108 /* no resources, run untagged */
2109 esiop_target->target_c.flags &= ~TARF_TAG;
2110 return;
2111 }
2112 }
2113 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2114 /* Update LUN DSA table */
2115 esiop_script_write(sc, esiop_target->lun_table_offset +
2116 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2117 esiop_lun->lun_tagtbl->tbl_dsa);
2118 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2119 }
2120
2121 void
2122 esiop_del_dev(struct esiop_softc *sc, int target, int lun)
2123 {
2124 struct esiop_target *esiop_target;
2125
2126 #ifdef SIOP_DEBUG
2127 printf("%s:%d:%d: free lun sw entry\n",
2128 device_xname(sc->sc_c.sc_dev), target, lun);
2129 #endif
2130 if (sc->sc_c.targets[target] == NULL)
2131 return;
2132 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2133 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2134 esiop_target->esiop_lun[lun] = NULL;
2135 }
2136
2137 void
2138 esiop_target_register(struct esiop_softc *sc, uint32_t target)
2139 {
2140 struct esiop_target *esiop_target =
2141 (struct esiop_target *)sc->sc_c.targets[target];
2142 struct esiop_lun *esiop_lun;
2143 int lun;
2144
2145 /* get a DSA table for this target */
2146 esiop_target->lun_table_offset = sc->sc_free_offset;
2147 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2148 #ifdef SIOP_DEBUG
2149 printf("%s: lun table for target %d offset %d free offset %d\n",
2150 device_xname(sc->sc_c.sc_dev), target,
2151 esiop_target->lun_table_offset,
2152 sc->sc_free_offset);
2153 #endif
2154 /* first 32 bytes are ID (for select) */
2155 esiop_script_write(sc, esiop_target->lun_table_offset,
2156 esiop_target->target_c.id);
2157 /* Record this table in the target DSA table */
2158 esiop_script_write(sc,
2159 sc->sc_target_table_offset + target,
2160 (esiop_target->lun_table_offset * sizeof(uint32_t)) +
2161 sc->sc_c.sc_scriptaddr);
2162 /* if we have a tag table, register it */
2163 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2164 esiop_lun = esiop_target->esiop_lun[lun];
2165 if (esiop_lun == NULL)
2166 continue;
2167 if (esiop_lun->lun_tagtbl)
2168 esiop_script_write(sc, esiop_target->lun_table_offset +
2169 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2170 esiop_lun->lun_tagtbl->tbl_dsa);
2171 }
2172 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2173 }
2174
2175 #ifdef SIOP_STATS
2176 void
2177 esiop_printstats(void)
2178 {
2179
2180 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2181 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2182 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2183 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2184 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2185 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2186 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2187 }
2188 #endif
2189