esiop.c revision 1.53 1 /* $NetBSD: esiop.c,v 1.53 2010/05/02 17:37:52 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.53 2010/05/02 17:37:52 jakllsch Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39
40 #include <uvm/uvm_extern.h>
41
42 #include <machine/endian.h>
43 #include <sys/bus.h>
44
45 #include <dev/microcode/siop/esiop.out>
46
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsi_message.h>
49 #include <dev/scsipi/scsipi_all.h>
50
51 #include <dev/scsipi/scsiconf.h>
52
53 #include <dev/ic/siopreg.h>
54 #include <dev/ic/siopvar_common.h>
55 #include <dev/ic/esiopvar.h>
56
57 #include "opt_siop.h"
58
59 #ifndef DEBUG
60 #undef DEBUG
61 #endif
62 /*
63 #define SIOP_DEBUG
64 #define SIOP_DEBUG_DR
65 #define SIOP_DEBUG_INTR
66 #define SIOP_DEBUG_SCHED
67 #define DUMP_SCRIPT
68 */
69
70 #define SIOP_STATS
71
72 #ifndef SIOP_DEFAULT_TARGET
73 #define SIOP_DEFAULT_TARGET 7
74 #endif
75
76 /* number of cmd descriptors per block */
77 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
78
79 void esiop_reset(struct esiop_softc *);
80 void esiop_checkdone(struct esiop_softc *);
81 void esiop_handle_reset(struct esiop_softc *);
82 void esiop_scsicmd_end(struct esiop_cmd *, int);
83 void esiop_unqueue(struct esiop_softc *, int, int);
84 int esiop_handle_qtag_reject(struct esiop_cmd *);
85 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
86 void esiop_timeout(void *);
87 void esiop_scsipi_request(struct scsipi_channel *,
88 scsipi_adapter_req_t, void *);
89 void esiop_dump_script(struct esiop_softc *);
90 void esiop_morecbd(struct esiop_softc *);
91 void esiop_moretagtbl(struct esiop_softc *);
92 void siop_add_reselsw(struct esiop_softc *, int);
93 void esiop_target_register(struct esiop_softc *, uint32_t);
94
95 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
96
97 #ifdef SIOP_STATS
98 static int esiop_stat_intr = 0;
99 static int esiop_stat_intr_shortxfer = 0;
100 static int esiop_stat_intr_sdp = 0;
101 static int esiop_stat_intr_done = 0;
102 static int esiop_stat_intr_xferdisc = 0;
103 static int esiop_stat_intr_lunresel = 0;
104 static int esiop_stat_intr_qfull = 0;
105 void esiop_printstats(void);
106 #define INCSTAT(x) x++
107 #else
108 #define INCSTAT(x)
109 #endif
110
111 static inline void esiop_script_sync(struct esiop_softc *, int);
112 static inline void
113 esiop_script_sync(struct esiop_softc *sc, int ops)
114 {
115
116 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
117 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
118 PAGE_SIZE, ops);
119 }
120
121 static inline uint32_t esiop_script_read(struct esiop_softc *, u_int);
122 static inline uint32_t
123 esiop_script_read(struct esiop_softc *sc, u_int offset)
124 {
125
126 if (sc->sc_c.features & SF_CHIP_RAM) {
127 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
128 offset * 4);
129 } else {
130 return le32toh(sc->sc_c.sc_script[offset]);
131 }
132 }
133
134 static inline void esiop_script_write(struct esiop_softc *, u_int,
135 uint32_t);
136 static inline void
137 esiop_script_write(struct esiop_softc *sc, u_int offset, uint32_t val)
138 {
139
140 if (sc->sc_c.features & SF_CHIP_RAM) {
141 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
142 offset * 4, val);
143 } else {
144 sc->sc_c.sc_script[offset] = htole32(val);
145 }
146 }
147
148 void
149 esiop_attach(struct esiop_softc *sc)
150 {
151 struct esiop_dsatbl *tagtbl_donering;
152
153 if (siop_common_attach(&sc->sc_c) != 0 )
154 return;
155
156 TAILQ_INIT(&sc->free_list);
157 TAILQ_INIT(&sc->cmds);
158 TAILQ_INIT(&sc->free_tagtbl);
159 TAILQ_INIT(&sc->tag_tblblk);
160 sc->sc_currschedslot = 0;
161 #ifdef SIOP_DEBUG
162 aprint_debug_dev(sc->sc_c.sc_dev,
163 "script size = %d, PHY addr=0x%x, VIRT=%p\n",
164 (int)sizeof(esiop_script),
165 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
166 #endif
167
168 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
169 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
170
171 /*
172 * get space for the CMD done slot. For this we use a tag table entry.
173 * It's the same size and allows us to not waste 3/4 of a page
174 */
175 #ifdef DIAGNOSTIC
176 if (ESIOP_NTAG != A_ndone_slots) {
177 aprint_error_dev(sc->sc_c.sc_dev,
178 "size of tag DSA table different from the done ring\n");
179 return;
180 }
181 #endif
182 esiop_moretagtbl(sc);
183 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
184 if (tagtbl_donering == NULL) {
185 aprint_error_dev(sc->sc_c.sc_dev,
186 "no memory for command done ring\n");
187 return;
188 }
189 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
190 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
191 sc->sc_done_offset = tagtbl_donering->tbl_offset;
192 sc->sc_done_slot = &tagtbl_donering->tbl[0];
193
194 /* Do a bus reset, so that devices fall back to narrow/async */
195 siop_resetbus(&sc->sc_c);
196 /*
197 * siop_reset() will reset the chip, thus clearing pending interrupts
198 */
199 esiop_reset(sc);
200 #ifdef DUMP_SCRIPT
201 esiop_dump_script(sc);
202 #endif
203
204 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
205 }
206
207 void
208 esiop_reset(struct esiop_softc *sc)
209 {
210 int i, j;
211 uint32_t addr;
212 uint32_t msgin_addr, sem_addr;
213
214 siop_common_reset(&sc->sc_c);
215
216 /*
217 * we copy the script at the beggining of RAM. Then there is 4 bytes
218 * for messages in, and 4 bytes for semaphore
219 */
220 sc->sc_free_offset = __arraycount(esiop_script);
221 msgin_addr =
222 sc->sc_free_offset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
223 sc->sc_free_offset += 1;
224 sc->sc_semoffset = sc->sc_free_offset;
225 sem_addr =
226 sc->sc_semoffset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
227 sc->sc_free_offset += 1;
228 /* then we have the scheduler ring */
229 sc->sc_shedoffset = sc->sc_free_offset;
230 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
231 /* then the targets DSA table */
232 sc->sc_target_table_offset = sc->sc_free_offset;
233 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
234 /* copy and patch the script */
235 if (sc->sc_c.features & SF_CHIP_RAM) {
236 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
237 esiop_script,
238 __arraycount(esiop_script));
239 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
240 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
241 E_tlq_offset_Used[j] * 4,
242 sizeof(struct siop_common_xfer));
243 }
244 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
245 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
246 E_saved_offset_offset_Used[j] * 4,
247 sizeof(struct siop_common_xfer) + 4);
248 }
249 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
250 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
251 E_abs_msgin2_Used[j] * 4, msgin_addr);
252 }
253 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
254 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
255 E_abs_sem_Used[j] * 4, sem_addr);
256 }
257
258 if (sc->sc_c.features & SF_CHIP_LED0) {
259 bus_space_write_region_4(sc->sc_c.sc_ramt,
260 sc->sc_c.sc_ramh,
261 Ent_led_on1, esiop_led_on,
262 __arraycount(esiop_led_on));
263 bus_space_write_region_4(sc->sc_c.sc_ramt,
264 sc->sc_c.sc_ramh,
265 Ent_led_on2, esiop_led_on,
266 __arraycount(esiop_led_on));
267 bus_space_write_region_4(sc->sc_c.sc_ramt,
268 sc->sc_c.sc_ramh,
269 Ent_led_off, esiop_led_off,
270 __arraycount(esiop_led_off));
271 }
272 } else {
273 for (j = 0; j < __arraycount(esiop_script); j++) {
274 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
275 }
276 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
277 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
278 htole32(sizeof(struct siop_common_xfer));
279 }
280 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
281 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
282 htole32(sizeof(struct siop_common_xfer) + 4);
283 }
284 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
285 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
286 htole32(msgin_addr);
287 }
288 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
289 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
290 htole32(sem_addr);
291 }
292
293 if (sc->sc_c.features & SF_CHIP_LED0) {
294 for (j = 0; j < __arraycount(esiop_led_on); j++)
295 sc->sc_c.sc_script[
296 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
297 ] = htole32(esiop_led_on[j]);
298 for (j = 0; j < __arraycount(esiop_led_on); j++)
299 sc->sc_c.sc_script[
300 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
301 ] = htole32(esiop_led_on[j]);
302 for (j = 0; j < __arraycount(esiop_led_off); j++)
303 sc->sc_c.sc_script[
304 Ent_led_off / sizeof(esiop_led_off[0]) + j
305 ] = htole32(esiop_led_off[j]);
306 }
307 }
308 /* get base of scheduler ring */
309 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(uint32_t);
310 /* init scheduler */
311 for (i = 0; i < A_ncmd_slots; i++) {
312 esiop_script_write(sc,
313 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
314 }
315 sc->sc_currschedslot = 0;
316 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
317 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
318 /*
319 * 0x78000000 is a 'move data8 to reg'. data8 is the second
320 * octet, reg offset is the third.
321 */
322 esiop_script_write(sc, Ent_cmdr0 / 4,
323 0x78640000 | ((addr & 0x000000ff) << 8));
324 esiop_script_write(sc, Ent_cmdr1 / 4,
325 0x78650000 | ((addr & 0x0000ff00) ));
326 esiop_script_write(sc, Ent_cmdr2 / 4,
327 0x78660000 | ((addr & 0x00ff0000) >> 8));
328 esiop_script_write(sc, Ent_cmdr3 / 4,
329 0x78670000 | ((addr & 0xff000000) >> 16));
330 /* done ring */
331 for (i = 0; i < A_ndone_slots; i++)
332 sc->sc_done_slot[i] = 0;
333 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
334 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
335 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
336 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
337 sc->sc_currdoneslot = 0;
338 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
339 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
340 esiop_script_write(sc, Ent_doner0 / 4,
341 0x786c0000 | ((addr & 0x000000ff) << 8));
342 esiop_script_write(sc, Ent_doner1 / 4,
343 0x786d0000 | ((addr & 0x0000ff00) ));
344 esiop_script_write(sc, Ent_doner2 / 4,
345 0x786e0000 | ((addr & 0x00ff0000) >> 8));
346 esiop_script_write(sc, Ent_doner3 / 4,
347 0x786f0000 | ((addr & 0xff000000) >> 16));
348
349 /* set flags */
350 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
351 /* write pointer of base of target DSA table */
352 addr = (sc->sc_target_table_offset * sizeof(uint32_t)) +
353 sc->sc_c.sc_scriptaddr;
354 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
355 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
356 ((addr & 0x000000ff) << 8));
357 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
358 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
359 ((addr & 0x0000ff00) ));
360 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
361 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
362 ((addr & 0x00ff0000) >> 8));
363 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
364 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
365 ((addr & 0xff000000) >> 16));
366 #ifdef SIOP_DEBUG
367 printf("%s: target table offset %d free offset %d\n",
368 device_xname(sc->sc_c.sc_dev), sc->sc_target_table_offset,
369 sc->sc_free_offset);
370 #endif
371
372 /* register existing targets */
373 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
374 if (sc->sc_c.targets[i])
375 esiop_target_register(sc, i);
376 }
377 /* start script */
378 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
379 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
380 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
381 }
382 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
383 sc->sc_c.sc_scriptaddr + Ent_reselect);
384 }
385
386 #if 0
387 #define CALL_SCRIPT(ent) do { \
388 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
389 esiop_cmd->cmd_c.dsa, \
390 sc->sc_c.sc_scriptaddr + ent); \
391 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
392 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
393 } while (/* CONSTCOND */0)
394 #else
395 #define CALL_SCRIPT(ent) do { \
396 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
397 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
398 } while (/* CONSTCOND */0)
399 #endif
400
401 int
402 esiop_intr(void *v)
403 {
404 struct esiop_softc *sc = v;
405 struct esiop_target *esiop_target;
406 struct esiop_cmd *esiop_cmd;
407 struct esiop_lun *esiop_lun;
408 struct scsipi_xfer *xs;
409 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
410 uint32_t irqcode;
411 int need_reset = 0;
412 int offset, target, lun, tag;
413 uint32_t tflags;
414 uint32_t addr;
415 int freetarget = 0;
416 int slot;
417 int retval = 0;
418
419 again:
420 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
421 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
422 return retval;
423 }
424 retval = 1;
425 INCSTAT(esiop_stat_intr);
426 esiop_checkdone(sc);
427 if (istat & ISTAT_INTF) {
428 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
429 SIOP_ISTAT, ISTAT_INTF);
430 goto again;
431 }
432
433 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
434 (ISTAT_DIP | ISTAT_ABRT)) {
435 /* clear abort */
436 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
437 SIOP_ISTAT, 0);
438 }
439
440 /* get CMD from T/L/Q */
441 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
442 SIOP_SCRATCHC);
443 #ifdef SIOP_DEBUG_INTR
444 printf("interrupt, istat=0x%x tflags=0x%x "
445 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
446 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
447 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
448 SIOP_DSP) -
449 sc->sc_c.sc_scriptaddr));
450 #endif
451 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
452 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
453 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
454 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
455 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
456
457 if (target >= 0 && lun >= 0) {
458 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
459 if (esiop_target == NULL) {
460 printf("esiop_target (target %d) not valid\n", target);
461 goto none;
462 }
463 esiop_lun = esiop_target->esiop_lun[lun];
464 if (esiop_lun == NULL) {
465 printf("esiop_lun (target %d lun %d) not valid\n",
466 target, lun);
467 goto none;
468 }
469 esiop_cmd =
470 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
471 if (esiop_cmd == NULL) {
472 printf("esiop_cmd (target %d lun %d tag %d)"
473 " not valid\n",
474 target, lun, tag);
475 goto none;
476 }
477 xs = esiop_cmd->cmd_c.xs;
478 #ifdef DIAGNOSTIC
479 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
480 printf("esiop_cmd (target %d lun %d) "
481 "not active (%d)\n", target, lun,
482 esiop_cmd->cmd_c.status);
483 goto none;
484 }
485 #endif
486 esiop_table_sync(esiop_cmd,
487 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
488 } else {
489 none:
490 xs = NULL;
491 esiop_target = NULL;
492 esiop_lun = NULL;
493 esiop_cmd = NULL;
494 }
495 if (istat & ISTAT_DIP) {
496 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
497 SIOP_DSTAT);
498 if (dstat & DSTAT_ABRT) {
499 /* was probably generated by a bus reset IOCTL */
500 if ((dstat & DSTAT_DFE) == 0)
501 siop_clearfifo(&sc->sc_c);
502 goto reset;
503 }
504 if (dstat & DSTAT_SSI) {
505 printf("single step dsp 0x%08x dsa 0x08%x\n",
506 (int)(bus_space_read_4(sc->sc_c.sc_rt,
507 sc->sc_c.sc_rh, SIOP_DSP) -
508 sc->sc_c.sc_scriptaddr),
509 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
510 SIOP_DSA));
511 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
512 (istat & ISTAT_SIP) == 0) {
513 bus_space_write_1(sc->sc_c.sc_rt,
514 sc->sc_c.sc_rh, SIOP_DCNTL,
515 bus_space_read_1(sc->sc_c.sc_rt,
516 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
517 }
518 return 1;
519 }
520
521 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
522 printf("%s: DMA IRQ:", device_xname(sc->sc_c.sc_dev));
523 if (dstat & DSTAT_IID)
524 printf(" Illegal instruction");
525 if (dstat & DSTAT_BF)
526 printf(" bus fault");
527 if (dstat & DSTAT_MDPE)
528 printf(" parity");
529 if (dstat & DSTAT_DFE)
530 printf(" DMA fifo empty");
531 else
532 siop_clearfifo(&sc->sc_c);
533 printf(", DSP=0x%x DSA=0x%x: ",
534 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
535 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
536 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
537 if (esiop_cmd)
538 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
539 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
540 le32toh(esiop_cmd->cmd_tables->status));
541 else
542 printf(" current T/L/Q invalid\n");
543 need_reset = 1;
544 }
545 }
546 if (istat & ISTAT_SIP) {
547 if (istat & ISTAT_DIP)
548 delay(10);
549 /*
550 * Can't read sist0 & sist1 independently, or we have to
551 * insert delay
552 */
553 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
554 SIOP_SIST0);
555 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
556 SIOP_SSTAT1);
557 #ifdef SIOP_DEBUG_INTR
558 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
559 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
560 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
561 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
562 SIOP_DSP) -
563 sc->sc_c.sc_scriptaddr));
564 #endif
565 if (sist & SIST0_RST) {
566 esiop_handle_reset(sc);
567 /* no table to flush here */
568 return 1;
569 }
570 if (sist & SIST0_SGE) {
571 if (esiop_cmd)
572 scsipi_printaddr(xs->xs_periph);
573 else
574 printf("%s:", device_xname(sc->sc_c.sc_dev));
575 printf("scsi gross error\n");
576 if (esiop_target)
577 esiop_target->target_c.flags &= ~TARF_DT;
578 #ifdef DEBUG
579 printf("DSA=0x%x DSP=0x%lx\n",
580 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
581 SIOP_DSA),
582 (u_long)(bus_space_read_4(sc->sc_c.sc_rt,
583 sc->sc_c.sc_rh, SIOP_DSP) -
584 sc->sc_c.sc_scriptaddr));
585 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
586 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
587 SIOP_SDID),
588 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
589 SIOP_SCNTL3),
590 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
591 SIOP_SXFER),
592 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
593 SIOP_SCNTL4));
594
595 #endif
596 goto reset;
597 }
598 if ((sist & SIST0_MA) && need_reset == 0) {
599 if (esiop_cmd) {
600 int scratchc0;
601 dstat = bus_space_read_1(sc->sc_c.sc_rt,
602 sc->sc_c.sc_rh, SIOP_DSTAT);
603 /*
604 * first restore DSA, in case we were in a S/G
605 * operation.
606 */
607 bus_space_write_4(sc->sc_c.sc_rt,
608 sc->sc_c.sc_rh,
609 SIOP_DSA, esiop_cmd->cmd_c.dsa);
610 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
611 sc->sc_c.sc_rh, SIOP_SCRATCHC);
612 switch (sstat1 & SSTAT1_PHASE_MASK) {
613 case SSTAT1_PHASE_STATUS:
614 /*
615 * previous phase may be aborted for any reason
616 * ( for example, the target has less data to
617 * transfer than requested). Compute resid and
618 * just go to status, the command should
619 * terminate.
620 */
621 INCSTAT(esiop_stat_intr_shortxfer);
622 if (scratchc0 & A_f_c_data)
623 siop_ma(&esiop_cmd->cmd_c);
624 else if ((dstat & DSTAT_DFE) == 0)
625 siop_clearfifo(&sc->sc_c);
626 CALL_SCRIPT(Ent_status);
627 return 1;
628 case SSTAT1_PHASE_MSGIN:
629 /*
630 * target may be ready to disconnect
631 * Compute resid which would be used later
632 * if a save data pointer is needed.
633 */
634 INCSTAT(esiop_stat_intr_xferdisc);
635 if (scratchc0 & A_f_c_data)
636 siop_ma(&esiop_cmd->cmd_c);
637 else if ((dstat & DSTAT_DFE) == 0)
638 siop_clearfifo(&sc->sc_c);
639 bus_space_write_1(sc->sc_c.sc_rt,
640 sc->sc_c.sc_rh, SIOP_SCRATCHC,
641 scratchc0 & ~A_f_c_data);
642 CALL_SCRIPT(Ent_msgin);
643 return 1;
644 }
645 aprint_error_dev(sc->sc_c.sc_dev,
646 "unexpected phase mismatch %d\n",
647 sstat1 & SSTAT1_PHASE_MASK);
648 } else {
649 aprint_error_dev(sc->sc_c.sc_dev,
650 "phase mismatch without command\n");
651 }
652 need_reset = 1;
653 }
654 if (sist & SIST0_PAR) {
655 /* parity error, reset */
656 if (esiop_cmd)
657 scsipi_printaddr(xs->xs_periph);
658 else
659 printf("%s:", device_xname(sc->sc_c.sc_dev));
660 printf("parity error\n");
661 if (esiop_target)
662 esiop_target->target_c.flags &= ~TARF_DT;
663 goto reset;
664 }
665 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
666 /*
667 * selection time out, assume there's no device here
668 * We also have to update the ring pointer ourselve
669 */
670 slot = bus_space_read_1(sc->sc_c.sc_rt,
671 sc->sc_c.sc_rh, SIOP_SCRATCHE);
672 esiop_script_sync(sc,
673 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674 #ifdef SIOP_DEBUG_SCHED
675 printf("sel timeout target %d, slot %d\n",
676 target, slot);
677 #endif
678 /*
679 * mark this slot as free, and advance to next slot
680 */
681 esiop_script_write(sc,
682 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
683 A_f_cmd_free);
684 addr = bus_space_read_4(sc->sc_c.sc_rt,
685 sc->sc_c.sc_rh, SIOP_SCRATCHD);
686 if (slot < (A_ncmd_slots - 1)) {
687 bus_space_write_1(sc->sc_c.sc_rt,
688 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
689 addr = addr + sizeof(struct esiop_slot);
690 } else {
691 bus_space_write_1(sc->sc_c.sc_rt,
692 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
693 addr = sc->sc_c.sc_scriptaddr +
694 sc->sc_shedoffset * sizeof(uint32_t);
695 }
696 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
697 SIOP_SCRATCHD, addr);
698 esiop_script_sync(sc,
699 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
700 if (esiop_cmd) {
701 esiop_cmd->cmd_c.status = CMDST_DONE;
702 xs->error = XS_SELTIMEOUT;
703 freetarget = 1;
704 goto end;
705 } else {
706 printf("%s: selection timeout without "
707 "command, target %d (sdid 0x%x), "
708 "slot %d\n",
709 device_xname(sc->sc_c.sc_dev), target,
710 bus_space_read_1(sc->sc_c.sc_rt,
711 sc->sc_c.sc_rh, SIOP_SDID), slot);
712 need_reset = 1;
713 }
714 }
715 if (sist & SIST0_UDC) {
716 /*
717 * unexpected disconnect. Usually the target signals
718 * a fatal condition this way. Attempt to get sense.
719 */
720 if (esiop_cmd) {
721 esiop_cmd->cmd_tables->status =
722 htole32(SCSI_CHECK);
723 goto end;
724 }
725 aprint_error_dev(sc->sc_c.sc_dev,
726 "unexpected disconnect without command\n");
727 goto reset;
728 }
729 if (sist & (SIST1_SBMC << 8)) {
730 /* SCSI bus mode change */
731 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
732 goto reset;
733 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
734 /*
735 * we have a script interrupt, it will
736 * restart the script.
737 */
738 goto scintr;
739 }
740 /*
741 * else we have to restart it ourselve, at the
742 * interrupted instruction.
743 */
744 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
745 SIOP_DSP,
746 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
747 SIOP_DSP) - 8);
748 return 1;
749 }
750 /* Else it's an unhandled exception (for now). */
751 aprint_error_dev(sc->sc_c.sc_dev,
752 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
753 "DSA=0x%x DSP=0x%x\n", sist,
754 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
755 SIOP_SSTAT1),
756 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
757 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
758 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
759 if (esiop_cmd) {
760 esiop_cmd->cmd_c.status = CMDST_DONE;
761 xs->error = XS_SELTIMEOUT;
762 goto end;
763 }
764 need_reset = 1;
765 }
766 if (need_reset) {
767 reset:
768 /* fatal error, reset the bus */
769 siop_resetbus(&sc->sc_c);
770 /* no table to flush here */
771 return 1;
772 }
773
774 scintr:
775 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
776 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
777 SIOP_DSPS);
778 #ifdef SIOP_DEBUG_INTR
779 printf("script interrupt 0x%x\n", irqcode);
780 #endif
781 /*
782 * no command, or an inactive command is only valid for a
783 * reselect interrupt
784 */
785 if ((irqcode & 0x80) == 0) {
786 if (esiop_cmd == NULL) {
787 aprint_error_dev(sc->sc_c.sc_dev,
788 "script interrupt (0x%x) with invalid DSA !!!\n",
789 irqcode);
790 goto reset;
791 }
792 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
793 aprint_error_dev(sc->sc_c.sc_dev,
794 "command with invalid status "
795 "(IRQ code 0x%x current status %d) !\n",
796 irqcode, esiop_cmd->cmd_c.status);
797 xs = NULL;
798 }
799 }
800 switch(irqcode) {
801 case A_int_err:
802 printf("error, DSP=0x%x\n",
803 (int)(bus_space_read_4(sc->sc_c.sc_rt,
804 sc->sc_c.sc_rh, SIOP_DSP) -
805 sc->sc_c.sc_scriptaddr));
806 if (xs) {
807 xs->error = XS_SELTIMEOUT;
808 goto end;
809 } else {
810 goto reset;
811 }
812 case A_int_msgin:
813 {
814 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
815 sc->sc_c.sc_rh, SIOP_SFBR);
816 if (msgin == MSG_MESSAGE_REJECT) {
817 int msg, extmsg;
818 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
819 /*
820 * message was part of a identify +
821 * something else. Identify shouldn't
822 * have been rejected.
823 */
824 msg =
825 esiop_cmd->cmd_tables->msg_out[1];
826 extmsg =
827 esiop_cmd->cmd_tables->msg_out[3];
828 } else {
829 msg =
830 esiop_cmd->cmd_tables->msg_out[0];
831 extmsg =
832 esiop_cmd->cmd_tables->msg_out[2];
833 }
834 if (msg == MSG_MESSAGE_REJECT) {
835 /* MSG_REJECT for a MSG_REJECT !*/
836 if (xs)
837 scsipi_printaddr(xs->xs_periph);
838 else
839 printf("%s: ", device_xname(
840 sc->sc_c.sc_dev));
841 printf("our reject message was "
842 "rejected\n");
843 goto reset;
844 }
845 if (msg == MSG_EXTENDED &&
846 extmsg == MSG_EXT_WDTR) {
847 /* WDTR rejected, initiate sync */
848 if ((esiop_target->target_c.flags &
849 TARF_SYNC) == 0) {
850 esiop_target->target_c.status =
851 TARST_OK;
852 siop_update_xfer_mode(&sc->sc_c,
853 target);
854 /* no table to flush here */
855 CALL_SCRIPT(Ent_msgin_ack);
856 return 1;
857 }
858 esiop_target->target_c.status =
859 TARST_SYNC_NEG;
860 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
861 sc->sc_c.st_minsync,
862 sc->sc_c.maxoff);
863 esiop_table_sync(esiop_cmd,
864 BUS_DMASYNC_PREREAD |
865 BUS_DMASYNC_PREWRITE);
866 CALL_SCRIPT(Ent_send_msgout);
867 return 1;
868 } else if (msg == MSG_EXTENDED &&
869 extmsg == MSG_EXT_SDTR) {
870 /* sync rejected */
871 esiop_target->target_c.offset = 0;
872 esiop_target->target_c.period = 0;
873 esiop_target->target_c.status =
874 TARST_OK;
875 siop_update_xfer_mode(&sc->sc_c,
876 target);
877 /* no table to flush here */
878 CALL_SCRIPT(Ent_msgin_ack);
879 return 1;
880 } else if (msg == MSG_EXTENDED &&
881 extmsg == MSG_EXT_PPR) {
882 /* PPR rejected */
883 esiop_target->target_c.offset = 0;
884 esiop_target->target_c.period = 0;
885 esiop_target->target_c.status =
886 TARST_OK;
887 siop_update_xfer_mode(&sc->sc_c,
888 target);
889 /* no table to flush here */
890 CALL_SCRIPT(Ent_msgin_ack);
891 return 1;
892 } else if (msg == MSG_SIMPLE_Q_TAG ||
893 msg == MSG_HEAD_OF_Q_TAG ||
894 msg == MSG_ORDERED_Q_TAG) {
895 if (esiop_handle_qtag_reject(
896 esiop_cmd) == -1)
897 goto reset;
898 CALL_SCRIPT(Ent_msgin_ack);
899 return 1;
900 }
901 if (xs)
902 scsipi_printaddr(xs->xs_periph);
903 else
904 printf("%s: ",
905 device_xname(sc->sc_c.sc_dev));
906 if (msg == MSG_EXTENDED) {
907 printf("scsi message reject, extended "
908 "message sent was 0x%x\n", extmsg);
909 } else {
910 printf("scsi message reject, message "
911 "sent was 0x%x\n", msg);
912 }
913 /* no table to flush here */
914 CALL_SCRIPT(Ent_msgin_ack);
915 return 1;
916 }
917 if (msgin == MSG_IGN_WIDE_RESIDUE) {
918 /* use the extmsgdata table to get the second byte */
919 esiop_cmd->cmd_tables->t_extmsgdata.count =
920 htole32(1);
921 esiop_table_sync(esiop_cmd,
922 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
923 CALL_SCRIPT(Ent_get_extmsgdata);
924 return 1;
925 }
926 if (xs)
927 scsipi_printaddr(xs->xs_periph);
928 else
929 printf("%s: ", device_xname(sc->sc_c.sc_dev));
930 printf("unhandled message 0x%x\n", msgin);
931 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
932 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
933 esiop_table_sync(esiop_cmd,
934 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
935 CALL_SCRIPT(Ent_send_msgout);
936 return 1;
937 }
938 case A_int_extmsgin:
939 #ifdef SIOP_DEBUG_INTR
940 printf("extended message: msg 0x%x len %d\n",
941 esiop_cmd->cmd_tables->msg_in[2],
942 esiop_cmd->cmd_tables->msg_in[1]);
943 #endif
944 if (esiop_cmd->cmd_tables->msg_in[1] >
945 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
946 aprint_error_dev(sc->sc_c.sc_dev,
947 "extended message too big (%d)\n",
948 esiop_cmd->cmd_tables->msg_in[1]);
949 esiop_cmd->cmd_tables->t_extmsgdata.count =
950 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
951 esiop_table_sync(esiop_cmd,
952 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
953 CALL_SCRIPT(Ent_get_extmsgdata);
954 return 1;
955 case A_int_extmsgdata:
956 #ifdef SIOP_DEBUG_INTR
957 {
958 int i;
959 printf("extended message: 0x%x, data:",
960 esiop_cmd->cmd_tables->msg_in[2]);
961 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
962 i++)
963 printf(" 0x%x",
964 esiop_cmd->cmd_tables->msg_in[i]);
965 printf("\n");
966 }
967 #endif
968 if (esiop_cmd->cmd_tables->msg_in[0] ==
969 MSG_IGN_WIDE_RESIDUE) {
970 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
971 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
972 printf("MSG_IGN_WIDE_RESIDUE: "
973 "bad len %d\n",
974 esiop_cmd->cmd_tables->msg_in[3]);
975 switch (siop_iwr(&esiop_cmd->cmd_c)) {
976 case SIOP_NEG_MSGOUT:
977 esiop_table_sync(esiop_cmd,
978 BUS_DMASYNC_PREREAD |
979 BUS_DMASYNC_PREWRITE);
980 CALL_SCRIPT(Ent_send_msgout);
981 return 1;
982 case SIOP_NEG_ACK:
983 CALL_SCRIPT(Ent_msgin_ack);
984 return 1;
985 default:
986 panic("invalid retval from "
987 "siop_iwr()");
988 }
989 return 1;
990 }
991 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
992 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
993 case SIOP_NEG_MSGOUT:
994 esiop_update_scntl3(sc,
995 esiop_cmd->cmd_c.siop_target);
996 esiop_table_sync(esiop_cmd,
997 BUS_DMASYNC_PREREAD |
998 BUS_DMASYNC_PREWRITE);
999 CALL_SCRIPT(Ent_send_msgout);
1000 return 1;
1001 case SIOP_NEG_ACK:
1002 esiop_update_scntl3(sc,
1003 esiop_cmd->cmd_c.siop_target);
1004 CALL_SCRIPT(Ent_msgin_ack);
1005 return 1;
1006 default:
1007 panic("invalid retval from "
1008 "siop_wdtr_neg()");
1009 }
1010 return 1;
1011 }
1012 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1013 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1014 case SIOP_NEG_MSGOUT:
1015 esiop_update_scntl3(sc,
1016 esiop_cmd->cmd_c.siop_target);
1017 esiop_table_sync(esiop_cmd,
1018 BUS_DMASYNC_PREREAD |
1019 BUS_DMASYNC_PREWRITE);
1020 CALL_SCRIPT(Ent_send_msgout);
1021 return 1;
1022 case SIOP_NEG_ACK:
1023 esiop_update_scntl3(sc,
1024 esiop_cmd->cmd_c.siop_target);
1025 CALL_SCRIPT(Ent_msgin_ack);
1026 return 1;
1027 default:
1028 panic("invalid retval from "
1029 "siop_wdtr_neg()");
1030 }
1031 return 1;
1032 }
1033 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1034 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1035 case SIOP_NEG_MSGOUT:
1036 esiop_update_scntl3(sc,
1037 esiop_cmd->cmd_c.siop_target);
1038 esiop_table_sync(esiop_cmd,
1039 BUS_DMASYNC_PREREAD |
1040 BUS_DMASYNC_PREWRITE);
1041 CALL_SCRIPT(Ent_send_msgout);
1042 return 1;
1043 case SIOP_NEG_ACK:
1044 esiop_update_scntl3(sc,
1045 esiop_cmd->cmd_c.siop_target);
1046 CALL_SCRIPT(Ent_msgin_ack);
1047 return 1;
1048 default:
1049 panic("invalid retval from "
1050 "siop_wdtr_neg()");
1051 }
1052 return 1;
1053 }
1054 /* send a message reject */
1055 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1056 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1057 esiop_table_sync(esiop_cmd,
1058 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1059 CALL_SCRIPT(Ent_send_msgout);
1060 return 1;
1061 case A_int_disc:
1062 INCSTAT(esiop_stat_intr_sdp);
1063 offset = bus_space_read_1(sc->sc_c.sc_rt,
1064 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1065 #ifdef SIOP_DEBUG_DR
1066 printf("disconnect offset %d\n", offset);
1067 #endif
1068 siop_sdp(&esiop_cmd->cmd_c, offset);
1069 /* we start again with no offset */
1070 ESIOP_XFER(esiop_cmd, saved_offset) =
1071 htole32(SIOP_NOOFFSET);
1072 esiop_table_sync(esiop_cmd,
1073 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1074 CALL_SCRIPT(Ent_script_sched);
1075 return 1;
1076 case A_int_resfail:
1077 printf("reselect failed\n");
1078 CALL_SCRIPT(Ent_script_sched);
1079 return 1;
1080 case A_int_done:
1081 if (xs == NULL) {
1082 printf("%s: done without command\n",
1083 device_xname(sc->sc_c.sc_dev));
1084 CALL_SCRIPT(Ent_script_sched);
1085 return 1;
1086 }
1087 #ifdef SIOP_DEBUG_INTR
1088 printf("done, DSA=0x%lx target id 0x%x last msg "
1089 "in=0x%x status=0x%x\n",
1090 (u_long)esiop_cmd->cmd_c.dsa,
1091 le32toh(esiop_cmd->cmd_tables->id),
1092 esiop_cmd->cmd_tables->msg_in[0],
1093 le32toh(esiop_cmd->cmd_tables->status));
1094 #endif
1095 INCSTAT(esiop_stat_intr_done);
1096 esiop_cmd->cmd_c.status = CMDST_DONE;
1097 goto end;
1098 default:
1099 printf("unknown irqcode %x\n", irqcode);
1100 if (xs) {
1101 xs->error = XS_SELTIMEOUT;
1102 goto end;
1103 }
1104 goto reset;
1105 }
1106 return 1;
1107 }
1108 /* We just should't get there */
1109 panic("siop_intr: I shouldn't be there !");
1110
1111 end:
1112 /*
1113 * restart the script now if command completed properly
1114 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1115 * queue
1116 */
1117 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1118 #ifdef SIOP_DEBUG_INTR
1119 printf("esiop_intr end: status %d\n", xs->status);
1120 #endif
1121 if (tag >= 0)
1122 esiop_lun->tactive[tag] = NULL;
1123 else
1124 esiop_lun->active = NULL;
1125 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1126 SIOP_SCRATCHA + 1);
1127 /*
1128 * if we got a disconnect between the last data phase
1129 * and the status phase, offset will be 0. In this
1130 * case, cmd_tables->saved_offset will have the proper value
1131 * if it got updated by the controller
1132 */
1133 if (offset == 0 &&
1134 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1135 offset =
1136 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1137
1138 esiop_scsicmd_end(esiop_cmd, offset);
1139 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1140 esiop_del_dev(sc, target, lun);
1141 CALL_SCRIPT(Ent_script_sched);
1142 return 1;
1143 }
1144
1145 void
1146 esiop_scsicmd_end(struct esiop_cmd *esiop_cmd, int offset)
1147 {
1148 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1149 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1150
1151 siop_update_resid(&esiop_cmd->cmd_c, offset);
1152
1153 switch(xs->status) {
1154 case SCSI_OK:
1155 xs->error = XS_NOERROR;
1156 break;
1157 case SCSI_BUSY:
1158 xs->error = XS_BUSY;
1159 break;
1160 case SCSI_CHECK:
1161 xs->error = XS_BUSY;
1162 /* remove commands in the queue and scheduler */
1163 esiop_unqueue(sc, xs->xs_periph->periph_target,
1164 xs->xs_periph->periph_lun);
1165 break;
1166 case SCSI_QUEUE_FULL:
1167 INCSTAT(esiop_stat_intr_qfull);
1168 #ifdef SIOP_DEBUG
1169 printf("%s:%d:%d: queue full (tag %d)\n",
1170 device_xname(sc->sc_c.sc_dev),
1171 xs->xs_periph->periph_target,
1172 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1173 #endif
1174 xs->error = XS_BUSY;
1175 break;
1176 case SCSI_SIOP_NOCHECK:
1177 /*
1178 * don't check status, xs->error is already valid
1179 */
1180 break;
1181 case SCSI_SIOP_NOSTATUS:
1182 /*
1183 * the status byte was not updated, cmd was
1184 * aborted
1185 */
1186 xs->error = XS_SELTIMEOUT;
1187 break;
1188 default:
1189 scsipi_printaddr(xs->xs_periph);
1190 printf("invalid status code %d\n", xs->status);
1191 xs->error = XS_DRIVER_STUFFUP;
1192 }
1193 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1194 bus_dmamap_sync(sc->sc_c.sc_dmat,
1195 esiop_cmd->cmd_c.dmamap_data, 0,
1196 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1197 (xs->xs_control & XS_CTL_DATA_IN) ?
1198 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1199 bus_dmamap_unload(sc->sc_c.sc_dmat,
1200 esiop_cmd->cmd_c.dmamap_data);
1201 }
1202 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1203 if ((xs->xs_control & XS_CTL_POLL) == 0)
1204 callout_stop(&xs->xs_callout);
1205 esiop_cmd->cmd_c.status = CMDST_FREE;
1206 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1207 #if 0
1208 if (xs->resid != 0)
1209 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1210 #endif
1211 scsipi_done (xs);
1212 }
1213
1214 void
1215 esiop_checkdone(struct esiop_softc *sc)
1216 {
1217 int target, lun, tag;
1218 struct esiop_target *esiop_target;
1219 struct esiop_lun *esiop_lun;
1220 struct esiop_cmd *esiop_cmd;
1221 uint32_t slot;
1222 int needsync = 0;
1223 int status;
1224 uint32_t sem, offset;
1225
1226 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1227 sem = esiop_script_read(sc, sc->sc_semoffset);
1228 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1229 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1230 /*
1231 * at last one command have been started,
1232 * so we should have free slots now
1233 */
1234 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1235 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1236 }
1237 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1238
1239 if ((sem & A_sem_done) == 0) {
1240 /* no pending done command */
1241 return;
1242 }
1243
1244 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1245 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
1246 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1247 next:
1248 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1249 if (needsync)
1250 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1251 sc->sc_done_offset,
1252 A_ndone_slots * sizeof(uint32_t),
1253 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254 return;
1255 }
1256
1257 needsync = 1;
1258
1259 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1260 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1261 sc->sc_currdoneslot += 1;
1262 if (sc->sc_currdoneslot == A_ndone_slots)
1263 sc->sc_currdoneslot = 0;
1264
1265 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1266 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1267 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1268
1269 esiop_target = (target >= 0) ?
1270 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1271 if (esiop_target == NULL) {
1272 printf("esiop_target (target %d) not valid\n", target);
1273 goto next;
1274 }
1275 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1276 if (esiop_lun == NULL) {
1277 printf("esiop_lun (target %d lun %d) not valid\n",
1278 target, lun);
1279 goto next;
1280 }
1281 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1282 if (esiop_cmd == NULL) {
1283 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1284 target, lun, tag);
1285 goto next;
1286 }
1287
1288 esiop_table_sync(esiop_cmd,
1289 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1290 status = le32toh(esiop_cmd->cmd_tables->status);
1291 #ifdef DIAGNOSTIC
1292 if (status != SCSI_OK) {
1293 printf("command for T/L/Q %d/%d/%d status %d\n",
1294 target, lun, tag, status);
1295 goto next;
1296 }
1297
1298 #endif
1299 /* Ok, this command has been handled */
1300 esiop_cmd->cmd_c.xs->status = status;
1301 if (tag >= 0)
1302 esiop_lun->tactive[tag] = NULL;
1303 else
1304 esiop_lun->active = NULL;
1305 /*
1306 * scratcha was eventually saved in saved_offset by script.
1307 * fetch offset from it
1308 */
1309 offset = 0;
1310 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1311 offset =
1312 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1313 esiop_scsicmd_end(esiop_cmd, offset);
1314 goto next;
1315 }
1316
1317 void
1318 esiop_unqueue(struct esiop_softc *sc, int target, int lun)
1319 {
1320 int slot, tag;
1321 uint32_t slotdsa;
1322 struct esiop_cmd *esiop_cmd;
1323 struct esiop_lun *esiop_lun =
1324 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1325
1326 /* first make sure to read valid data */
1327 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1328
1329 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1330 /* look for commands in the scheduler, not yet started */
1331 if (esiop_lun->tactive[tag] == NULL)
1332 continue;
1333 esiop_cmd = esiop_lun->tactive[tag];
1334 for (slot = 0; slot < A_ncmd_slots; slot++) {
1335 slotdsa = esiop_script_read(sc,
1336 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1337 /* if the slot has any flag, it won't match the DSA */
1338 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1339 /* Mark this slot as ignore */
1340 esiop_script_write(sc,
1341 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1342 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1343 /* ask to requeue */
1344 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1345 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1346 esiop_lun->tactive[tag] = NULL;
1347 esiop_scsicmd_end(esiop_cmd, 0);
1348 break;
1349 }
1350 }
1351 }
1352 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1353 }
1354
1355 /*
1356 * handle a rejected queue tag message: the command will run untagged,
1357 * has to adjust the reselect script.
1358 */
1359
1360
1361 int
1362 esiop_handle_qtag_reject(struct esiop_cmd *esiop_cmd)
1363 {
1364 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1365 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1366 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1367 int tag = esiop_cmd->cmd_tables->msg_out[2];
1368 struct esiop_target *esiop_target =
1369 (struct esiop_target*)sc->sc_c.targets[target];
1370 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1371
1372 #ifdef SIOP_DEBUG
1373 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1374 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1375 esiop_cmd->cmd_c.tag, esiop_cmd->cmd_c.status);
1376 #endif
1377
1378 if (esiop_lun->active != NULL) {
1379 aprint_error_dev(sc->sc_c.sc_dev,
1380 "untagged command already running for target %d "
1381 "lun %d (status %d)\n",
1382 target, lun, esiop_lun->active->cmd_c.status);
1383 return -1;
1384 }
1385 /* clear tag slot */
1386 esiop_lun->tactive[tag] = NULL;
1387 /* add command to non-tagged slot */
1388 esiop_lun->active = esiop_cmd;
1389 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1390 esiop_cmd->cmd_c.tag = -1;
1391 /* update DSA table */
1392 esiop_script_write(sc, esiop_target->lun_table_offset +
1393 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1394 esiop_cmd->cmd_c.dsa);
1395 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1396 return 0;
1397 }
1398
1399 /*
1400 * handle a bus reset: reset chip, unqueue all active commands, free all
1401 * target struct and report lossage to upper layer.
1402 * As the upper layer may requeue immediatly we have to first store
1403 * all active commands in a temporary queue.
1404 */
1405 void
1406 esiop_handle_reset(struct esiop_softc *sc)
1407 {
1408 struct esiop_cmd *esiop_cmd;
1409 struct esiop_lun *esiop_lun;
1410 int target, lun, tag;
1411 /*
1412 * scsi bus reset. reset the chip and restart
1413 * the queue. Need to clean up all active commands
1414 */
1415 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1416 /* stop, reset and restart the chip */
1417 esiop_reset(sc);
1418
1419 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1420 /* chip has been reset, all slots are free now */
1421 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1422 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1423 }
1424 /*
1425 * Process all commands: first commands completes, then commands
1426 * being executed
1427 */
1428 esiop_checkdone(sc);
1429 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1430 struct esiop_target *esiop_target =
1431 (struct esiop_target *)sc->sc_c.targets[target];
1432 if (esiop_target == NULL)
1433 continue;
1434 for (lun = 0; lun < 8; lun++) {
1435 esiop_lun = esiop_target->esiop_lun[lun];
1436 if (esiop_lun == NULL)
1437 continue;
1438 for (tag = -1; tag <
1439 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1440 ESIOP_NTAG : 0);
1441 tag++) {
1442 if (tag >= 0)
1443 esiop_cmd = esiop_lun->tactive[tag];
1444 else
1445 esiop_cmd = esiop_lun->active;
1446 if (esiop_cmd == NULL)
1447 continue;
1448 scsipi_printaddr(
1449 esiop_cmd->cmd_c.xs->xs_periph);
1450 printf("command with tag id %d reset\n", tag);
1451 esiop_cmd->cmd_c.xs->error =
1452 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1453 XS_TIMEOUT : XS_RESET;
1454 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1455 if (tag >= 0)
1456 esiop_lun->tactive[tag] = NULL;
1457 else
1458 esiop_lun->active = NULL;
1459 esiop_cmd->cmd_c.status = CMDST_DONE;
1460 esiop_scsicmd_end(esiop_cmd, 0);
1461 }
1462 }
1463 sc->sc_c.targets[target]->status = TARST_ASYNC;
1464 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1465 sc->sc_c.targets[target]->period =
1466 sc->sc_c.targets[target]->offset = 0;
1467 siop_update_xfer_mode(&sc->sc_c, target);
1468 }
1469
1470 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1471 }
1472
1473 void
1474 esiop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1475 void *arg)
1476 {
1477 struct scsipi_xfer *xs;
1478 struct scsipi_periph *periph;
1479 struct esiop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1480 struct esiop_cmd *esiop_cmd;
1481 struct esiop_target *esiop_target;
1482 int s, error, i;
1483 int target;
1484 int lun;
1485
1486 switch (req) {
1487 case ADAPTER_REQ_RUN_XFER:
1488 xs = arg;
1489 periph = xs->xs_periph;
1490 target = periph->periph_target;
1491 lun = periph->periph_lun;
1492
1493 s = splbio();
1494 /*
1495 * first check if there are pending complete commands.
1496 * this can free us some resources (in the rings for example).
1497 * we have to lock it to avoid recursion.
1498 */
1499 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1500 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1501 esiop_checkdone(sc);
1502 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1503 }
1504 #ifdef SIOP_DEBUG_SCHED
1505 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1506 xs->xs_tag_type, xs->xs_tag_id);
1507 #endif
1508 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1509 if (esiop_cmd == NULL) {
1510 xs->error = XS_RESOURCE_SHORTAGE;
1511 scsipi_done(xs);
1512 splx(s);
1513 return;
1514 }
1515 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1516 #ifdef DIAGNOSTIC
1517 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1518 panic("siop_scsicmd: new cmd not free");
1519 #endif
1520 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1521 if (esiop_target == NULL) {
1522 #ifdef SIOP_DEBUG
1523 printf("%s: alloc siop_target for target %d\n",
1524 device_xname(sc->sc_c.sc_dev), target);
1525 #endif
1526 sc->sc_c.targets[target] =
1527 malloc(sizeof(struct esiop_target),
1528 M_DEVBUF, M_NOWAIT | M_ZERO);
1529 if (sc->sc_c.targets[target] == NULL) {
1530 aprint_error_dev(sc->sc_c.sc_dev,
1531 "can't malloc memory for "
1532 "target %d\n",
1533 target);
1534 xs->error = XS_RESOURCE_SHORTAGE;
1535 scsipi_done(xs);
1536 TAILQ_INSERT_TAIL(&sc->free_list,
1537 esiop_cmd, next);
1538 splx(s);
1539 return;
1540 }
1541 esiop_target =
1542 (struct esiop_target*)sc->sc_c.targets[target];
1543 esiop_target->target_c.status = TARST_PROBING;
1544 esiop_target->target_c.flags = 0;
1545 esiop_target->target_c.id =
1546 sc->sc_c.clock_div << 24; /* scntl3 */
1547 esiop_target->target_c.id |= target << 16; /* id */
1548 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1549
1550 for (i=0; i < 8; i++)
1551 esiop_target->esiop_lun[i] = NULL;
1552 esiop_target_register(sc, target);
1553 }
1554 if (esiop_target->esiop_lun[lun] == NULL) {
1555 esiop_target->esiop_lun[lun] =
1556 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1557 M_NOWAIT|M_ZERO);
1558 if (esiop_target->esiop_lun[lun] == NULL) {
1559 aprint_error_dev(sc->sc_c.sc_dev,
1560 "can't alloc esiop_lun for "
1561 "target %d lun %d\n",
1562 target, lun);
1563 xs->error = XS_RESOURCE_SHORTAGE;
1564 scsipi_done(xs);
1565 TAILQ_INSERT_TAIL(&sc->free_list,
1566 esiop_cmd, next);
1567 splx(s);
1568 return;
1569 }
1570 }
1571 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1572 esiop_cmd->cmd_c.xs = xs;
1573 esiop_cmd->cmd_c.flags = 0;
1574 esiop_cmd->cmd_c.status = CMDST_READY;
1575
1576 /* load the DMA maps */
1577 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1578 esiop_cmd->cmd_c.dmamap_cmd,
1579 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1580 if (error) {
1581 aprint_error_dev(sc->sc_c.sc_dev,
1582 "unable to load cmd DMA map: %d\n",
1583 error);
1584 xs->error = (error == EAGAIN) ?
1585 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1586 scsipi_done(xs);
1587 esiop_cmd->cmd_c.status = CMDST_FREE;
1588 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1589 splx(s);
1590 return;
1591 }
1592 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1593 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1594 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1595 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1596 ((xs->xs_control & XS_CTL_DATA_IN) ?
1597 BUS_DMA_READ : BUS_DMA_WRITE));
1598 if (error) {
1599 aprint_error_dev(sc->sc_c.sc_dev,
1600 "unable to load data DMA map: %d\n",
1601 error);
1602 xs->error = (error == EAGAIN) ?
1603 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1604 scsipi_done(xs);
1605 bus_dmamap_unload(sc->sc_c.sc_dmat,
1606 esiop_cmd->cmd_c.dmamap_cmd);
1607 esiop_cmd->cmd_c.status = CMDST_FREE;
1608 TAILQ_INSERT_TAIL(&sc->free_list,
1609 esiop_cmd, next);
1610 splx(s);
1611 return;
1612 }
1613 bus_dmamap_sync(sc->sc_c.sc_dmat,
1614 esiop_cmd->cmd_c.dmamap_data, 0,
1615 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1616 (xs->xs_control & XS_CTL_DATA_IN) ?
1617 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1618 }
1619 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1620 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1621 BUS_DMASYNC_PREWRITE);
1622
1623 if (xs->xs_tag_type)
1624 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1625 else
1626 esiop_cmd->cmd_c.tag = -1;
1627 siop_setuptables(&esiop_cmd->cmd_c);
1628 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1629 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1630 ESIOP_XFER(esiop_cmd, tlq) |=
1631 htole32((target << 8) | (lun << 16));
1632 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1633 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1634 ESIOP_XFER(esiop_cmd, tlq) |=
1635 htole32(esiop_cmd->cmd_c.tag << 24);
1636 }
1637
1638 esiop_table_sync(esiop_cmd,
1639 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1640 esiop_start(sc, esiop_cmd);
1641 if (xs->xs_control & XS_CTL_POLL) {
1642 /* poll for command completion */
1643 while ((xs->xs_status & XS_STS_DONE) == 0) {
1644 delay(1000);
1645 esiop_intr(sc);
1646 }
1647 }
1648 splx(s);
1649 return;
1650
1651 case ADAPTER_REQ_GROW_RESOURCES:
1652 #ifdef SIOP_DEBUG
1653 printf("%s grow resources (%d)\n",
1654 device_xname(sc->sc_c.sc_dev),
1655 sc->sc_c.sc_adapt.adapt_openings);
1656 #endif
1657 esiop_morecbd(sc);
1658 return;
1659
1660 case ADAPTER_REQ_SET_XFER_MODE:
1661 {
1662 struct scsipi_xfer_mode *xm = arg;
1663 if (sc->sc_c.targets[xm->xm_target] == NULL)
1664 return;
1665 s = splbio();
1666 if (xm->xm_mode & PERIPH_CAP_TQING) {
1667 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1668 /* allocate tag tables for this device */
1669 for (lun = 0;
1670 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1671 if (scsipi_lookup_periph(chan,
1672 xm->xm_target, lun) != NULL)
1673 esiop_add_dev(sc, xm->xm_target, lun);
1674 }
1675 }
1676 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1677 (sc->sc_c.features & SF_BUS_WIDE))
1678 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1679 if (xm->xm_mode & PERIPH_CAP_SYNC)
1680 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1681 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1682 (sc->sc_c.features & SF_CHIP_DT))
1683 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1684 if ((xm->xm_mode &
1685 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1686 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1687 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1688
1689 splx(s);
1690 }
1691 }
1692 }
1693
1694 static void
1695 esiop_start(struct esiop_softc *sc, struct esiop_cmd *esiop_cmd)
1696 {
1697 struct esiop_lun *esiop_lun;
1698 struct esiop_target *esiop_target;
1699 int timeout;
1700 int target, lun, slot;
1701
1702 /*
1703 * first make sure to read valid data
1704 */
1705 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1706
1707 /*
1708 * We use a circular queue here. sc->sc_currschedslot points to a
1709 * free slot, unless we have filled the queue. Check this.
1710 */
1711 slot = sc->sc_currschedslot;
1712 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1713 A_f_cmd_free) == 0) {
1714 /*
1715 * no more free slot, no need to continue. freeze the queue
1716 * and requeue this command.
1717 */
1718 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1719 sc->sc_flags |= SCF_CHAN_NOSLOT;
1720 esiop_script_write(sc, sc->sc_semoffset,
1721 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1722 esiop_script_sync(sc,
1723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1725 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1726 esiop_scsicmd_end(esiop_cmd, 0);
1727 return;
1728 }
1729 /* OK, we can use this slot */
1730
1731 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1732 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1733 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1734 esiop_lun = esiop_target->esiop_lun[lun];
1735 /* if non-tagged command active, panic: this shouldn't happen */
1736 if (esiop_lun->active != NULL) {
1737 panic("esiop_start: tagged cmd while untagged running");
1738 }
1739 #ifdef DIAGNOSTIC
1740 /* sanity check the tag if needed */
1741 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1742 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1743 esiop_cmd->cmd_c.tag < 0) {
1744 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1745 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1746 panic("esiop_start: invalid tag id");
1747 }
1748 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1749 panic("esiop_start: tag not free");
1750 }
1751 #endif
1752 #ifdef SIOP_DEBUG_SCHED
1753 printf("using slot %d for DSA 0x%lx\n", slot,
1754 (u_long)esiop_cmd->cmd_c.dsa);
1755 #endif
1756 /* mark command as active */
1757 if (esiop_cmd->cmd_c.status == CMDST_READY)
1758 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1759 else
1760 panic("esiop_start: bad status");
1761 /* DSA table for reselect */
1762 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1763 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1764 /* DSA table for reselect */
1765 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1766 htole32(esiop_cmd->cmd_c.dsa);
1767 bus_dmamap_sync(sc->sc_c.sc_dmat,
1768 esiop_lun->lun_tagtbl->tblblk->blkmap,
1769 esiop_lun->lun_tagtbl->tbl_offset,
1770 sizeof(uint32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1771 } else {
1772 esiop_lun->active = esiop_cmd;
1773 esiop_script_write(sc,
1774 esiop_target->lun_table_offset +
1775 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1776 esiop_cmd->cmd_c.dsa);
1777 }
1778 /* scheduler slot: DSA */
1779 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1780 esiop_cmd->cmd_c.dsa);
1781 /* make sure SCRIPT processor will read valid data */
1782 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1783 /* handle timeout */
1784 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1785 /* start exire timer */
1786 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1787 if (timeout == 0)
1788 timeout = 1;
1789 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1790 timeout, esiop_timeout, esiop_cmd);
1791 }
1792 /* Signal script it has some work to do */
1793 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1794 SIOP_ISTAT, ISTAT_SIGP);
1795 /* update the current slot, and wait for IRQ */
1796 sc->sc_currschedslot++;
1797 if (sc->sc_currschedslot >= A_ncmd_slots)
1798 sc->sc_currschedslot = 0;
1799 }
1800
1801 void
1802 esiop_timeout(void *v)
1803 {
1804 struct esiop_cmd *esiop_cmd = v;
1805 struct esiop_softc *sc =
1806 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1807 int s;
1808 #ifdef SIOP_DEBUG
1809 int slot, slotdsa;
1810 #endif
1811
1812 s = splbio();
1813 esiop_table_sync(esiop_cmd,
1814 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1815 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1816 #ifdef SIOP_DEBUG
1817 printf("command timeout (status %d)\n",
1818 le32toh(esiop_cmd->cmd_tables->status));
1819
1820 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1821 for (slot = 0; slot < A_ncmd_slots; slot++) {
1822 slotdsa = esiop_script_read(sc,
1823 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1824 if ((slotdsa & 0x01) == 0)
1825 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1826 }
1827 printf("istat 0x%x ",
1828 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1829 printf("DSP 0x%lx DSA 0x%x\n",
1830 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP)
1831 - sc->sc_c.sc_scriptaddr),
1832 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1833 (void)bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1834 printf("istat 0x%x\n",
1835 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1836 #else
1837 printf("command timeout, CDB: ");
1838 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1839 printf("\n");
1840 #endif
1841 /* reset the scsi bus */
1842 siop_resetbus(&sc->sc_c);
1843
1844 /* deactivate callout */
1845 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1846 /*
1847 * mark command has being timed out and just return;
1848 * the bus reset will generate an interrupt,
1849 * it will be handled in siop_intr()
1850 */
1851 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1852 splx(s);
1853 }
1854
1855 void
1856 esiop_dump_script(struct esiop_softc *sc)
1857 {
1858 int i;
1859
1860 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1861 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1862 le32toh(sc->sc_c.sc_script[i]),
1863 le32toh(sc->sc_c.sc_script[i + 1]));
1864 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1865 0xc0000000) {
1866 i++;
1867 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i + 1]));
1868 }
1869 printf("\n");
1870 }
1871 }
1872
1873 void
1874 esiop_morecbd(struct esiop_softc *sc)
1875 {
1876 int error, i, s;
1877 bus_dma_segment_t seg;
1878 int rseg;
1879 struct esiop_cbd *newcbd;
1880 struct esiop_xfer *xfer;
1881 bus_addr_t dsa;
1882
1883 /* allocate a new list head */
1884 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1885 if (newcbd == NULL) {
1886 aprint_error_dev(sc->sc_c.sc_dev,
1887 "can't allocate memory for command descriptors "
1888 "head\n");
1889 return;
1890 }
1891
1892 /* allocate cmd list */
1893 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1894 M_DEVBUF, M_NOWAIT|M_ZERO);
1895 if (newcbd->cmds == NULL) {
1896 aprint_error_dev(sc->sc_c.sc_dev,
1897 "can't allocate memory for command descriptors\n");
1898 goto bad3;
1899 }
1900 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1901 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1902 if (error) {
1903 aprint_error_dev(sc->sc_c.sc_dev,
1904 "unable to allocate cbd DMA memory, error = %d\n",
1905 error);
1906 goto bad2;
1907 }
1908 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1909 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1910 if (error) {
1911 aprint_error_dev(sc->sc_c.sc_dev,
1912 "unable to map cbd DMA memory, error = %d\n",
1913 error);
1914 goto bad2;
1915 }
1916 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1917 BUS_DMA_NOWAIT, &newcbd->xferdma);
1918 if (error) {
1919 aprint_error_dev(sc->sc_c.sc_dev,
1920 "unable to create cbd DMA map, error = %d\n", error);
1921 goto bad1;
1922 }
1923 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1924 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1925 if (error) {
1926 aprint_error_dev(sc->sc_c.sc_dev,
1927 "unable to load cbd DMA map, error = %d\n", error);
1928 goto bad0;
1929 }
1930 #ifdef DEBUG
1931 aprint_debug_dev(sc->sc_c.sc_dev, "alloc newcdb at PHY addr 0x%lx\n",
1932 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1933 #endif
1934 for (i = 0; i < SIOP_NCMDPB; i++) {
1935 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1936 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1937 &newcbd->cmds[i].cmd_c.dmamap_data);
1938 if (error) {
1939 aprint_error_dev(sc->sc_c.sc_dev,
1940 "unable to create data DMA map for cbd: "
1941 "error %d\n", error);
1942 goto bad0;
1943 }
1944 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1945 sizeof(struct scsipi_generic), 1,
1946 sizeof(struct scsipi_generic), 0,
1947 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1948 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1949 if (error) {
1950 aprint_error_dev(sc->sc_c.sc_dev,
1951 "unable to create cmd DMA map for cbd %d\n", error);
1952 goto bad0;
1953 }
1954 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1955 newcbd->cmds[i].esiop_cbdp = newcbd;
1956 xfer = &newcbd->xfers[i];
1957 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1958 memset(newcbd->cmds[i].cmd_tables, 0,
1959 sizeof(struct esiop_xfer));
1960 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1961 i * sizeof(struct esiop_xfer);
1962 newcbd->cmds[i].cmd_c.dsa = dsa;
1963 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1964 xfer->siop_tables.t_msgout.count= htole32(1);
1965 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1966 xfer->siop_tables.t_msgin.count= htole32(1);
1967 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1968 offsetof(struct siop_common_xfer, msg_in));
1969 xfer->siop_tables.t_extmsgin.count= htole32(2);
1970 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1971 offsetof(struct siop_common_xfer, msg_in) + 1);
1972 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1973 offsetof(struct siop_common_xfer, msg_in) + 3);
1974 xfer->siop_tables.t_status.count= htole32(1);
1975 xfer->siop_tables.t_status.addr = htole32(dsa +
1976 offsetof(struct siop_common_xfer, status));
1977
1978 s = splbio();
1979 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1980 splx(s);
1981 #ifdef SIOP_DEBUG
1982 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1983 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1984 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1985 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1986 #endif
1987 }
1988 s = splbio();
1989 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1990 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1991 splx(s);
1992 return;
1993 bad0:
1994 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1995 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1996 bad1:
1997 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1998 bad2:
1999 free(newcbd->cmds, M_DEVBUF);
2000 bad3:
2001 free(newcbd, M_DEVBUF);
2002 }
2003
2004 void
2005 esiop_moretagtbl(struct esiop_softc *sc)
2006 {
2007 int error, i, j, s;
2008 bus_dma_segment_t seg;
2009 int rseg;
2010 struct esiop_dsatblblk *newtblblk;
2011 struct esiop_dsatbl *newtbls;
2012 uint32_t *tbls;
2013
2014 /* allocate a new list head */
2015 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2016 M_DEVBUF, M_NOWAIT|M_ZERO);
2017 if (newtblblk == NULL) {
2018 aprint_error_dev(sc->sc_c.sc_dev,
2019 "can't allocate memory for tag DSA table block\n");
2020 return;
2021 }
2022
2023 /* allocate tbl list */
2024 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2025 M_DEVBUF, M_NOWAIT|M_ZERO);
2026 if (newtbls == NULL) {
2027 aprint_error_dev(sc->sc_c.sc_dev,
2028 "can't allocate memory for command descriptors\n");
2029 goto bad3;
2030 }
2031 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2032 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2033 if (error) {
2034 aprint_error_dev(sc->sc_c.sc_dev,
2035 "unable to allocate tbl DMA memory, error = %d\n", error);
2036 goto bad2;
2037 }
2038 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2039 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2040 if (error) {
2041 aprint_error_dev(sc->sc_c.sc_dev,
2042 "unable to map tbls DMA memory, error = %d\n", error);
2043 goto bad2;
2044 }
2045 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2046 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2047 if (error) {
2048 aprint_error_dev(sc->sc_c.sc_dev,
2049 "unable to create tbl DMA map, error = %d\n", error);
2050 goto bad1;
2051 }
2052 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2053 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2054 if (error) {
2055 aprint_error_dev(sc->sc_c.sc_dev,
2056 "unable to load tbl DMA map, error = %d\n", error);
2057 goto bad0;
2058 }
2059 #ifdef DEBUG
2060 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2061 device_xname(sc->sc_c.sc_dev),
2062 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2063 #endif
2064 for (i = 0; i < ESIOP_NTPB; i++) {
2065 newtbls[i].tblblk = newtblblk;
2066 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2067 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(uint32_t);
2068 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2069 newtbls[i].tbl_offset;
2070 for (j = 0; j < ESIOP_NTAG; j++)
2071 newtbls[i].tbl[j] = j;
2072 s = splbio();
2073 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2074 splx(s);
2075 }
2076 s = splbio();
2077 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2078 splx(s);
2079 return;
2080 bad0:
2081 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2082 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2083 bad1:
2084 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2085 bad2:
2086 free(newtbls, M_DEVBUF);
2087 bad3:
2088 free(newtblblk, M_DEVBUF);
2089 }
2090
2091 void
2092 esiop_update_scntl3(struct esiop_softc *sc,
2093 struct siop_common_target *_siop_target)
2094 {
2095 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2096
2097 esiop_script_write(sc, esiop_target->lun_table_offset,
2098 esiop_target->target_c.id);
2099 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2100 }
2101
2102 void
2103 esiop_add_dev(struct esiop_softc *sc, int target, int lun)
2104 {
2105 struct esiop_target *esiop_target =
2106 (struct esiop_target *)sc->sc_c.targets[target];
2107 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2108
2109 if (esiop_lun->lun_tagtbl != NULL)
2110 return; /* already allocated */
2111
2112 /* we need a tag DSA table */
2113 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2114 if (esiop_lun->lun_tagtbl == NULL) {
2115 esiop_moretagtbl(sc);
2116 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2117 if (esiop_lun->lun_tagtbl == NULL) {
2118 /* no resources, run untagged */
2119 esiop_target->target_c.flags &= ~TARF_TAG;
2120 return;
2121 }
2122 }
2123 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2124 /* Update LUN DSA table */
2125 esiop_script_write(sc, esiop_target->lun_table_offset +
2126 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2127 esiop_lun->lun_tagtbl->tbl_dsa);
2128 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2129 }
2130
2131 void
2132 esiop_del_dev(struct esiop_softc *sc, int target, int lun)
2133 {
2134 struct esiop_target *esiop_target;
2135
2136 #ifdef SIOP_DEBUG
2137 printf("%s:%d:%d: free lun sw entry\n",
2138 device_xname(sc->sc_c.sc_dev), target, lun);
2139 #endif
2140 if (sc->sc_c.targets[target] == NULL)
2141 return;
2142 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2143 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2144 esiop_target->esiop_lun[lun] = NULL;
2145 }
2146
2147 void
2148 esiop_target_register(struct esiop_softc *sc, uint32_t target)
2149 {
2150 struct esiop_target *esiop_target =
2151 (struct esiop_target *)sc->sc_c.targets[target];
2152 struct esiop_lun *esiop_lun;
2153 int lun;
2154
2155 /* get a DSA table for this target */
2156 esiop_target->lun_table_offset = sc->sc_free_offset;
2157 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2158 #ifdef SIOP_DEBUG
2159 printf("%s: lun table for target %d offset %d free offset %d\n",
2160 device_xname(sc->sc_c.sc_dev), target,
2161 esiop_target->lun_table_offset,
2162 sc->sc_free_offset);
2163 #endif
2164 /* first 32 bytes are ID (for select) */
2165 esiop_script_write(sc, esiop_target->lun_table_offset,
2166 esiop_target->target_c.id);
2167 /* Record this table in the target DSA table */
2168 esiop_script_write(sc,
2169 sc->sc_target_table_offset + target,
2170 (esiop_target->lun_table_offset * sizeof(uint32_t)) +
2171 sc->sc_c.sc_scriptaddr);
2172 /* if we have a tag table, register it */
2173 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2174 esiop_lun = esiop_target->esiop_lun[lun];
2175 if (esiop_lun == NULL)
2176 continue;
2177 if (esiop_lun->lun_tagtbl)
2178 esiop_script_write(sc, esiop_target->lun_table_offset +
2179 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2180 esiop_lun->lun_tagtbl->tbl_dsa);
2181 }
2182 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2183 }
2184
2185 #ifdef SIOP_STATS
2186 void
2187 esiop_printstats(void)
2188 {
2189
2190 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2191 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2192 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2193 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2194 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2195 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2196 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2197 }
2198 #endif
2199