esiop.c revision 1.42 1 /* $NetBSD: esiop.c,v 1.42 2008/04/08 12:07:26 cegger Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.42 2008/04/08 12:07:26 cegger Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <sys/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 /*
68 #define SIOP_DEBUG
69 #define SIOP_DEBUG_DR
70 #define SIOP_DEBUG_INTR
71 #define SIOP_DEBUG_SCHED
72 #define DUMP_SCRIPT
73 */
74
75 #define SIOP_STATS
76
77 #ifndef SIOP_DEFAULT_TARGET
78 #define SIOP_DEFAULT_TARGET 7
79 #endif
80
81 /* number of cmd descriptors per block */
82 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
83
84 void esiop_reset(struct esiop_softc *);
85 void esiop_checkdone(struct esiop_softc *);
86 void esiop_handle_reset(struct esiop_softc *);
87 void esiop_scsicmd_end(struct esiop_cmd *, int);
88 void esiop_unqueue(struct esiop_softc *, int, int);
89 int esiop_handle_qtag_reject(struct esiop_cmd *);
90 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
91 void esiop_timeout(void *);
92 void esiop_scsipi_request(struct scsipi_channel *,
93 scsipi_adapter_req_t, void *);
94 void esiop_dump_script(struct esiop_softc *);
95 void esiop_morecbd(struct esiop_softc *);
96 void esiop_moretagtbl(struct esiop_softc *);
97 void siop_add_reselsw(struct esiop_softc *, int);
98 void esiop_target_register(struct esiop_softc *, u_int32_t);
99
100 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
101
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats(void);
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static inline void esiop_script_sync(struct esiop_softc *, int);
117 static inline void
118 esiop_script_sync(sc, ops)
119 struct esiop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static inline u_int32_t esiop_script_read(struct esiop_softc *, u_int);
128 static inline u_int32_t
129 esiop_script_read(sc, offset)
130 struct esiop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static inline void esiop_script_write(struct esiop_softc *, u_int,
142 u_int32_t);
143 static inline void
144 esiop_script_write(sc, offset, val)
145 struct esiop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 esiop_attach(sc)
159 struct esiop_softc *sc;
160 {
161 struct esiop_dsatbl *tagtbl_donering;
162
163 if (siop_common_attach(&sc->sc_c) != 0 )
164 return;
165
166 TAILQ_INIT(&sc->free_list);
167 TAILQ_INIT(&sc->cmds);
168 TAILQ_INIT(&sc->free_tagtbl);
169 TAILQ_INIT(&sc->tag_tblblk);
170 sc->sc_currschedslot = 0;
171 #ifdef SIOP_DEBUG
172 aprint_debug_dev(&sc->sc_c.sc_dev, "script size = %d, PHY addr=0x%x, VIRT=%p\n",
173 (int)sizeof(esiop_script),
174 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
175 #endif
176
177 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
178 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
179
180 /*
181 * get space for the CMD done slot. For this we use a tag table entry.
182 * It's the same size and allows us to not waste 3/4 of a page
183 */
184 #ifdef DIAGNOSTIC
185 if (ESIOP_NTAG != A_ndone_slots) {
186 aprint_error_dev(&sc->sc_c.sc_dev, "size of tag DSA table different from the done"
187 " ring\n");
188 return;
189 }
190 #endif
191 esiop_moretagtbl(sc);
192 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
193 if (tagtbl_donering == NULL) {
194 aprint_error_dev(&sc->sc_c.sc_dev, "no memory for command done ring\n");
195 return;
196 }
197 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
198 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
199 sc->sc_done_offset = tagtbl_donering->tbl_offset;
200 sc->sc_done_slot = &tagtbl_donering->tbl[0];
201
202 /* Do a bus reset, so that devices fall back to narrow/async */
203 siop_resetbus(&sc->sc_c);
204 /*
205 * siop_reset() will reset the chip, thus clearing pending interrupts
206 */
207 esiop_reset(sc);
208 #ifdef DUMP_SCRIPT
209 esiop_dump_script(sc);
210 #endif
211
212 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
213 }
214
215 void
216 esiop_reset(sc)
217 struct esiop_softc *sc;
218 {
219 int i, j;
220 u_int32_t addr;
221 u_int32_t msgin_addr, sem_addr;
222
223 siop_common_reset(&sc->sc_c);
224
225 /*
226 * we copy the script at the beggining of RAM. Then there is 4 bytes
227 * for messages in, and 4 bytes for semaphore
228 */
229 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
230 msgin_addr =
231 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
232 sc->sc_free_offset += 1;
233 sc->sc_semoffset = sc->sc_free_offset;
234 sem_addr =
235 sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
236 sc->sc_free_offset += 1;
237 /* then we have the scheduler ring */
238 sc->sc_shedoffset = sc->sc_free_offset;
239 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
240 /* then the targets DSA table */
241 sc->sc_target_table_offset = sc->sc_free_offset;
242 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
243 /* copy and patch the script */
244 if (sc->sc_c.features & SF_CHIP_RAM) {
245 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
246 esiop_script,
247 sizeof(esiop_script) / sizeof(esiop_script[0]));
248 for (j = 0; j <
249 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
250 j++) {
251 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
252 E_tlq_offset_Used[j] * 4,
253 sizeof(struct siop_common_xfer));
254 }
255 for (j = 0; j <
256 (sizeof(E_saved_offset_offset_Used) /
257 sizeof(E_saved_offset_offset_Used[0]));
258 j++) {
259 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
260 E_saved_offset_offset_Used[j] * 4,
261 sizeof(struct siop_common_xfer) + 4);
262 }
263 for (j = 0; j <
264 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
265 j++) {
266 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
267 E_abs_msgin2_Used[j] * 4, msgin_addr);
268 }
269 for (j = 0; j <
270 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
271 j++) {
272 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
273 E_abs_sem_Used[j] * 4, sem_addr);
274 }
275
276 if (sc->sc_c.features & SF_CHIP_LED0) {
277 bus_space_write_region_4(sc->sc_c.sc_ramt,
278 sc->sc_c.sc_ramh,
279 Ent_led_on1, esiop_led_on,
280 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
281 bus_space_write_region_4(sc->sc_c.sc_ramt,
282 sc->sc_c.sc_ramh,
283 Ent_led_on2, esiop_led_on,
284 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
285 bus_space_write_region_4(sc->sc_c.sc_ramt,
286 sc->sc_c.sc_ramh,
287 Ent_led_off, esiop_led_off,
288 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
289 }
290 } else {
291 for (j = 0;
292 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
293 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
294 }
295 for (j = 0; j <
296 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
297 j++) {
298 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
299 htole32(sizeof(struct siop_common_xfer));
300 }
301 for (j = 0; j <
302 (sizeof(E_saved_offset_offset_Used) /
303 sizeof(E_saved_offset_offset_Used[0]));
304 j++) {
305 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
306 htole32(sizeof(struct siop_common_xfer) + 4);
307 }
308 for (j = 0; j <
309 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
310 j++) {
311 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
312 htole32(msgin_addr);
313 }
314 for (j = 0; j <
315 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
316 j++) {
317 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
318 htole32(sem_addr);
319 }
320
321 if (sc->sc_c.features & SF_CHIP_LED0) {
322 for (j = 0; j < (sizeof(esiop_led_on) /
323 sizeof(esiop_led_on[0])); j++)
324 sc->sc_c.sc_script[
325 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
326 ] = htole32(esiop_led_on[j]);
327 for (j = 0; j < (sizeof(esiop_led_on) /
328 sizeof(esiop_led_on[0])); j++)
329 sc->sc_c.sc_script[
330 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
331 ] = htole32(esiop_led_on[j]);
332 for (j = 0; j < (sizeof(esiop_led_off) /
333 sizeof(esiop_led_off[0])); j++)
334 sc->sc_c.sc_script[
335 Ent_led_off / sizeof(esiop_led_off[0]) + j
336 ] = htole32(esiop_led_off[j]);
337 }
338 }
339 /* get base of scheduler ring */
340 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
341 /* init scheduler */
342 for (i = 0; i < A_ncmd_slots; i++) {
343 esiop_script_write(sc,
344 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
345 }
346 sc->sc_currschedslot = 0;
347 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
348 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
349 /*
350 * 0x78000000 is a 'move data8 to reg'. data8 is the second
351 * octet, reg offset is the third.
352 */
353 esiop_script_write(sc, Ent_cmdr0 / 4,
354 0x78640000 | ((addr & 0x000000ff) << 8));
355 esiop_script_write(sc, Ent_cmdr1 / 4,
356 0x78650000 | ((addr & 0x0000ff00) ));
357 esiop_script_write(sc, Ent_cmdr2 / 4,
358 0x78660000 | ((addr & 0x00ff0000) >> 8));
359 esiop_script_write(sc, Ent_cmdr3 / 4,
360 0x78670000 | ((addr & 0xff000000) >> 16));
361 /* done ring */
362 for (i = 0; i < A_ndone_slots; i++)
363 sc->sc_done_slot[i] = 0;
364 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
365 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
366 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
367 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
368 sc->sc_currdoneslot = 0;
369 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
370 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
371 esiop_script_write(sc, Ent_doner0 / 4,
372 0x786c0000 | ((addr & 0x000000ff) << 8));
373 esiop_script_write(sc, Ent_doner1 / 4,
374 0x786d0000 | ((addr & 0x0000ff00) ));
375 esiop_script_write(sc, Ent_doner2 / 4,
376 0x786e0000 | ((addr & 0x00ff0000) >> 8));
377 esiop_script_write(sc, Ent_doner3 / 4,
378 0x786f0000 | ((addr & 0xff000000) >> 16));
379
380 /* set flags */
381 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
382 /* write pointer of base of target DSA table */
383 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
384 sc->sc_c.sc_scriptaddr;
385 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
386 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
387 ((addr & 0x000000ff) << 8));
388 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
389 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
390 ((addr & 0x0000ff00) ));
391 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
392 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
393 ((addr & 0x00ff0000) >> 8));
394 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
395 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
396 ((addr & 0xff000000) >> 16));
397 #ifdef SIOP_DEBUG
398 printf("%s: target table offset %d free offset %d\n",
399 device_xname(&sc->sc_c.sc_dev), sc->sc_target_table_offset,
400 sc->sc_free_offset);
401 #endif
402
403 /* register existing targets */
404 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
405 if (sc->sc_c.targets[i])
406 esiop_target_register(sc, i);
407 }
408 /* start script */
409 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
410 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
411 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
412 }
413 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
414 sc->sc_c.sc_scriptaddr + Ent_reselect);
415 }
416
417 #if 0
418 #define CALL_SCRIPT(ent) do {\
419 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
420 esiop_cmd->cmd_c.dsa, \
421 sc->sc_c.sc_scriptaddr + ent); \
422 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
423 } while (0)
424 #else
425 #define CALL_SCRIPT(ent) do {\
426 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
427 } while (0)
428 #endif
429
430 int
431 esiop_intr(v)
432 void *v;
433 {
434 struct esiop_softc *sc = v;
435 struct esiop_target *esiop_target;
436 struct esiop_cmd *esiop_cmd;
437 struct esiop_lun *esiop_lun;
438 struct scsipi_xfer *xs;
439 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
440 u_int32_t irqcode;
441 int need_reset = 0;
442 int offset, target, lun, tag;
443 u_int32_t tflags;
444 u_int32_t addr;
445 int freetarget = 0;
446 int slot;
447 int retval = 0;
448
449 again:
450 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
451 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
452 return retval;
453 }
454 retval = 1;
455 INCSTAT(esiop_stat_intr);
456 esiop_checkdone(sc);
457 if (istat & ISTAT_INTF) {
458 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
459 SIOP_ISTAT, ISTAT_INTF);
460 goto again;
461 }
462
463 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
464 (ISTAT_DIP | ISTAT_ABRT)) {
465 /* clear abort */
466 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
467 SIOP_ISTAT, 0);
468 }
469
470 /* get CMD from T/L/Q */
471 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
472 SIOP_SCRATCHC);
473 #ifdef SIOP_DEBUG_INTR
474 printf("interrupt, istat=0x%x tflags=0x%x "
475 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
476 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
477 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
478 SIOP_DSP) -
479 sc->sc_c.sc_scriptaddr));
480 #endif
481 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
482 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
483 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
484 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
485 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
486
487 if (target >= 0 && lun >= 0) {
488 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
489 if (esiop_target == NULL) {
490 printf("esiop_target (target %d) not valid\n", target);
491 goto none;
492 }
493 esiop_lun = esiop_target->esiop_lun[lun];
494 if (esiop_lun == NULL) {
495 printf("esiop_lun (target %d lun %d) not valid\n",
496 target, lun);
497 goto none;
498 }
499 esiop_cmd =
500 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
501 if (esiop_cmd == NULL) {
502 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
503 target, lun, tag);
504 goto none;
505 }
506 xs = esiop_cmd->cmd_c.xs;
507 #ifdef DIAGNOSTIC
508 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
509 printf("esiop_cmd (target %d lun %d) "
510 "not active (%d)\n", target, lun,
511 esiop_cmd->cmd_c.status);
512 goto none;
513 }
514 #endif
515 esiop_table_sync(esiop_cmd,
516 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
517 } else {
518 none:
519 xs = NULL;
520 esiop_target = NULL;
521 esiop_lun = NULL;
522 esiop_cmd = NULL;
523 }
524 if (istat & ISTAT_DIP) {
525 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
526 SIOP_DSTAT);
527 if (dstat & DSTAT_ABRT) {
528 /* was probably generated by a bus reset IOCTL */
529 if ((dstat & DSTAT_DFE) == 0)
530 siop_clearfifo(&sc->sc_c);
531 goto reset;
532 }
533 if (dstat & DSTAT_SSI) {
534 printf("single step dsp 0x%08x dsa 0x08%x\n",
535 (int)(bus_space_read_4(sc->sc_c.sc_rt,
536 sc->sc_c.sc_rh, SIOP_DSP) -
537 sc->sc_c.sc_scriptaddr),
538 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
539 SIOP_DSA));
540 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
541 (istat & ISTAT_SIP) == 0) {
542 bus_space_write_1(sc->sc_c.sc_rt,
543 sc->sc_c.sc_rh, SIOP_DCNTL,
544 bus_space_read_1(sc->sc_c.sc_rt,
545 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
546 }
547 return 1;
548 }
549
550 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
551 printf("%s: DMA IRQ:", device_xname(&sc->sc_c.sc_dev));
552 if (dstat & DSTAT_IID)
553 printf(" Illegal instruction");
554 if (dstat & DSTAT_BF)
555 printf(" bus fault");
556 if (dstat & DSTAT_MDPE)
557 printf(" parity");
558 if (dstat & DSTAT_DFE)
559 printf(" DMA fifo empty");
560 else
561 siop_clearfifo(&sc->sc_c);
562 printf(", DSP=0x%x DSA=0x%x: ",
563 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
564 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
565 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
566 if (esiop_cmd)
567 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
568 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
569 le32toh(esiop_cmd->cmd_tables->status));
570 else
571 printf(" current T/L/Q invalid\n");
572 need_reset = 1;
573 }
574 }
575 if (istat & ISTAT_SIP) {
576 if (istat & ISTAT_DIP)
577 delay(10);
578 /*
579 * Can't read sist0 & sist1 independently, or we have to
580 * insert delay
581 */
582 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
583 SIOP_SIST0);
584 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
585 SIOP_SSTAT1);
586 #ifdef SIOP_DEBUG_INTR
587 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
588 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
589 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
590 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
591 SIOP_DSP) -
592 sc->sc_c.sc_scriptaddr));
593 #endif
594 if (sist & SIST0_RST) {
595 esiop_handle_reset(sc);
596 /* no table to flush here */
597 return 1;
598 }
599 if (sist & SIST0_SGE) {
600 if (esiop_cmd)
601 scsipi_printaddr(xs->xs_periph);
602 else
603 printf("%s:", device_xname(&sc->sc_c.sc_dev));
604 printf("scsi gross error\n");
605 if (esiop_target)
606 esiop_target->target_c.flags &= ~TARF_DT;
607 #ifdef DEBUG
608 printf("DSA=0x%x DSP=0x%lx\n",
609 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
610 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
611 SIOP_DSP) -
612 sc->sc_c.sc_scriptaddr));
613 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
614 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SDID),
615 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL3),
616 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SXFER),
617 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL4));
618
619 #endif
620 goto reset;
621 }
622 if ((sist & SIST0_MA) && need_reset == 0) {
623 if (esiop_cmd) {
624 int scratchc0;
625 dstat = bus_space_read_1(sc->sc_c.sc_rt,
626 sc->sc_c.sc_rh, SIOP_DSTAT);
627 /*
628 * first restore DSA, in case we were in a S/G
629 * operation.
630 */
631 bus_space_write_4(sc->sc_c.sc_rt,
632 sc->sc_c.sc_rh,
633 SIOP_DSA, esiop_cmd->cmd_c.dsa);
634 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
635 sc->sc_c.sc_rh, SIOP_SCRATCHC);
636 switch (sstat1 & SSTAT1_PHASE_MASK) {
637 case SSTAT1_PHASE_STATUS:
638 /*
639 * previous phase may be aborted for any reason
640 * ( for example, the target has less data to
641 * transfer than requested). Compute resid and
642 * just go to status, the command should
643 * terminate.
644 */
645 INCSTAT(esiop_stat_intr_shortxfer);
646 if (scratchc0 & A_f_c_data)
647 siop_ma(&esiop_cmd->cmd_c);
648 else if ((dstat & DSTAT_DFE) == 0)
649 siop_clearfifo(&sc->sc_c);
650 CALL_SCRIPT(Ent_status);
651 return 1;
652 case SSTAT1_PHASE_MSGIN:
653 /*
654 * target may be ready to disconnect
655 * Compute resid which would be used later
656 * if a save data pointer is needed.
657 */
658 INCSTAT(esiop_stat_intr_xferdisc);
659 if (scratchc0 & A_f_c_data)
660 siop_ma(&esiop_cmd->cmd_c);
661 else if ((dstat & DSTAT_DFE) == 0)
662 siop_clearfifo(&sc->sc_c);
663 bus_space_write_1(sc->sc_c.sc_rt,
664 sc->sc_c.sc_rh, SIOP_SCRATCHC,
665 scratchc0 & ~A_f_c_data);
666 CALL_SCRIPT(Ent_msgin);
667 return 1;
668 }
669 aprint_error_dev(&sc->sc_c.sc_dev, "unexpected phase mismatch %d\n",
670 sstat1 & SSTAT1_PHASE_MASK);
671 } else {
672 aprint_error_dev(&sc->sc_c.sc_dev, "phase mismatch without command\n");
673 }
674 need_reset = 1;
675 }
676 if (sist & SIST0_PAR) {
677 /* parity error, reset */
678 if (esiop_cmd)
679 scsipi_printaddr(xs->xs_periph);
680 else
681 printf("%s:", device_xname(&sc->sc_c.sc_dev));
682 printf("parity error\n");
683 if (esiop_target)
684 esiop_target->target_c.flags &= ~TARF_DT;
685 goto reset;
686 }
687 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
688 /*
689 * selection time out, assume there's no device here
690 * We also have to update the ring pointer ourselve
691 */
692 slot = bus_space_read_1(sc->sc_c.sc_rt,
693 sc->sc_c.sc_rh, SIOP_SCRATCHE);
694 esiop_script_sync(sc,
695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
696 #ifdef SIOP_DEBUG_SCHED
697 printf("sel timeout target %d, slot %d\n", target, slot);
698 #endif
699 /*
700 * mark this slot as free, and advance to next slot
701 */
702 esiop_script_write(sc,
703 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
704 A_f_cmd_free);
705 addr = bus_space_read_4(sc->sc_c.sc_rt,
706 sc->sc_c.sc_rh, SIOP_SCRATCHD);
707 if (slot < (A_ncmd_slots - 1)) {
708 bus_space_write_1(sc->sc_c.sc_rt,
709 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
710 addr = addr + sizeof(struct esiop_slot);
711 } else {
712 bus_space_write_1(sc->sc_c.sc_rt,
713 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
714 addr = sc->sc_c.sc_scriptaddr +
715 sc->sc_shedoffset * sizeof(u_int32_t);
716 }
717 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
718 SIOP_SCRATCHD, addr);
719 esiop_script_sync(sc,
720 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
721 if (esiop_cmd) {
722 esiop_cmd->cmd_c.status = CMDST_DONE;
723 xs->error = XS_SELTIMEOUT;
724 freetarget = 1;
725 goto end;
726 } else {
727 printf("%s: selection timeout without "
728 "command, target %d (sdid 0x%x), "
729 "slot %d\n",
730 device_xname(&sc->sc_c.sc_dev), target,
731 bus_space_read_1(sc->sc_c.sc_rt,
732 sc->sc_c.sc_rh, SIOP_SDID), slot);
733 need_reset = 1;
734 }
735 }
736 if (sist & SIST0_UDC) {
737 /*
738 * unexpected disconnect. Usually the target signals
739 * a fatal condition this way. Attempt to get sense.
740 */
741 if (esiop_cmd) {
742 esiop_cmd->cmd_tables->status =
743 htole32(SCSI_CHECK);
744 goto end;
745 }
746 aprint_error_dev(&sc->sc_c.sc_dev, "unexpected disconnect without "
747 "command\n");
748 goto reset;
749 }
750 if (sist & (SIST1_SBMC << 8)) {
751 /* SCSI bus mode change */
752 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
753 goto reset;
754 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
755 /*
756 * we have a script interrupt, it will
757 * restart the script.
758 */
759 goto scintr;
760 }
761 /*
762 * else we have to restart it ourselve, at the
763 * interrupted instruction.
764 */
765 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
766 SIOP_DSP,
767 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
768 SIOP_DSP) - 8);
769 return 1;
770 }
771 /* Else it's an unhandled exception (for now). */
772 aprint_error_dev(&sc->sc_c.sc_dev, "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
773 "DSA=0x%x DSP=0x%x\n", sist,
774 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
775 SIOP_SSTAT1),
776 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
777 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
778 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
779 if (esiop_cmd) {
780 esiop_cmd->cmd_c.status = CMDST_DONE;
781 xs->error = XS_SELTIMEOUT;
782 goto end;
783 }
784 need_reset = 1;
785 }
786 if (need_reset) {
787 reset:
788 /* fatal error, reset the bus */
789 siop_resetbus(&sc->sc_c);
790 /* no table to flush here */
791 return 1;
792 }
793
794 scintr:
795 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
796 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
797 SIOP_DSPS);
798 #ifdef SIOP_DEBUG_INTR
799 printf("script interrupt 0x%x\n", irqcode);
800 #endif
801 /*
802 * no command, or an inactive command is only valid for a
803 * reselect interrupt
804 */
805 if ((irqcode & 0x80) == 0) {
806 if (esiop_cmd == NULL) {
807 aprint_error_dev(&sc->sc_c.sc_dev,
808 "script interrupt (0x%x) with invalid DSA !!!\n",
809 irqcode);
810 goto reset;
811 }
812 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
813 aprint_error_dev(&sc->sc_c.sc_dev, "command with invalid status "
814 "(IRQ code 0x%x current status %d) !\n",
815 irqcode, esiop_cmd->cmd_c.status);
816 xs = NULL;
817 }
818 }
819 switch(irqcode) {
820 case A_int_err:
821 printf("error, DSP=0x%x\n",
822 (int)(bus_space_read_4(sc->sc_c.sc_rt,
823 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
824 if (xs) {
825 xs->error = XS_SELTIMEOUT;
826 goto end;
827 } else {
828 goto reset;
829 }
830 case A_int_msgin:
831 {
832 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
833 sc->sc_c.sc_rh, SIOP_SFBR);
834 if (msgin == MSG_MESSAGE_REJECT) {
835 int msg, extmsg;
836 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
837 /*
838 * message was part of a identify +
839 * something else. Identify shouldn't
840 * have been rejected.
841 */
842 msg =
843 esiop_cmd->cmd_tables->msg_out[1];
844 extmsg =
845 esiop_cmd->cmd_tables->msg_out[3];
846 } else {
847 msg =
848 esiop_cmd->cmd_tables->msg_out[0];
849 extmsg =
850 esiop_cmd->cmd_tables->msg_out[2];
851 }
852 if (msg == MSG_MESSAGE_REJECT) {
853 /* MSG_REJECT for a MSG_REJECT !*/
854 if (xs)
855 scsipi_printaddr(xs->xs_periph);
856 else
857 printf("%s: ",
858 device_xname(&sc->sc_c.sc_dev));
859 printf("our reject message was "
860 "rejected\n");
861 goto reset;
862 }
863 if (msg == MSG_EXTENDED &&
864 extmsg == MSG_EXT_WDTR) {
865 /* WDTR rejected, initiate sync */
866 if ((esiop_target->target_c.flags &
867 TARF_SYNC) == 0) {
868 esiop_target->target_c.status =
869 TARST_OK;
870 siop_update_xfer_mode(&sc->sc_c,
871 target);
872 /* no table to flush here */
873 CALL_SCRIPT(Ent_msgin_ack);
874 return 1;
875 }
876 esiop_target->target_c.status =
877 TARST_SYNC_NEG;
878 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
879 sc->sc_c.st_minsync,
880 sc->sc_c.maxoff);
881 esiop_table_sync(esiop_cmd,
882 BUS_DMASYNC_PREREAD |
883 BUS_DMASYNC_PREWRITE);
884 CALL_SCRIPT(Ent_send_msgout);
885 return 1;
886 } else if (msg == MSG_EXTENDED &&
887 extmsg == MSG_EXT_SDTR) {
888 /* sync rejected */
889 esiop_target->target_c.offset = 0;
890 esiop_target->target_c.period = 0;
891 esiop_target->target_c.status =
892 TARST_OK;
893 siop_update_xfer_mode(&sc->sc_c,
894 target);
895 /* no table to flush here */
896 CALL_SCRIPT(Ent_msgin_ack);
897 return 1;
898 } else if (msg == MSG_EXTENDED &&
899 extmsg == MSG_EXT_PPR) {
900 /* PPR rejected */
901 esiop_target->target_c.offset = 0;
902 esiop_target->target_c.period = 0;
903 esiop_target->target_c.status =
904 TARST_OK;
905 siop_update_xfer_mode(&sc->sc_c,
906 target);
907 /* no table to flush here */
908 CALL_SCRIPT(Ent_msgin_ack);
909 return 1;
910 } else if (msg == MSG_SIMPLE_Q_TAG ||
911 msg == MSG_HEAD_OF_Q_TAG ||
912 msg == MSG_ORDERED_Q_TAG) {
913 if (esiop_handle_qtag_reject(
914 esiop_cmd) == -1)
915 goto reset;
916 CALL_SCRIPT(Ent_msgin_ack);
917 return 1;
918 }
919 if (xs)
920 scsipi_printaddr(xs->xs_periph);
921 else
922 printf("%s: ",
923 device_xname(&sc->sc_c.sc_dev));
924 if (msg == MSG_EXTENDED) {
925 printf("scsi message reject, extended "
926 "message sent was 0x%x\n", extmsg);
927 } else {
928 printf("scsi message reject, message "
929 "sent was 0x%x\n", msg);
930 }
931 /* no table to flush here */
932 CALL_SCRIPT(Ent_msgin_ack);
933 return 1;
934 }
935 if (msgin == MSG_IGN_WIDE_RESIDUE) {
936 /* use the extmsgdata table to get the second byte */
937 esiop_cmd->cmd_tables->t_extmsgdata.count =
938 htole32(1);
939 esiop_table_sync(esiop_cmd,
940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
941 CALL_SCRIPT(Ent_get_extmsgdata);
942 return 1;
943 }
944 if (xs)
945 scsipi_printaddr(xs->xs_periph);
946 else
947 printf("%s: ", device_xname(&sc->sc_c.sc_dev));
948 printf("unhandled message 0x%x\n", msgin);
949 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
950 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
951 esiop_table_sync(esiop_cmd,
952 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
953 CALL_SCRIPT(Ent_send_msgout);
954 return 1;
955 }
956 case A_int_extmsgin:
957 #ifdef SIOP_DEBUG_INTR
958 printf("extended message: msg 0x%x len %d\n",
959 esiop_cmd->cmd_tables->msg_in[2],
960 esiop_cmd->cmd_tables->msg_in[1]);
961 #endif
962 if (esiop_cmd->cmd_tables->msg_in[1] >
963 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
964 aprint_error_dev(&sc->sc_c.sc_dev, "extended message too big (%d)\n",
965 esiop_cmd->cmd_tables->msg_in[1]);
966 esiop_cmd->cmd_tables->t_extmsgdata.count =
967 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
968 esiop_table_sync(esiop_cmd,
969 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
970 CALL_SCRIPT(Ent_get_extmsgdata);
971 return 1;
972 case A_int_extmsgdata:
973 #ifdef SIOP_DEBUG_INTR
974 {
975 int i;
976 printf("extended message: 0x%x, data:",
977 esiop_cmd->cmd_tables->msg_in[2]);
978 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
979 i++)
980 printf(" 0x%x",
981 esiop_cmd->cmd_tables->msg_in[i]);
982 printf("\n");
983 }
984 #endif
985 if (esiop_cmd->cmd_tables->msg_in[0] ==
986 MSG_IGN_WIDE_RESIDUE) {
987 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
988 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
989 printf("MSG_IGN_WIDE_RESIDUE: "
990 "bad len %d\n",
991 esiop_cmd->cmd_tables->msg_in[3]);
992 switch (siop_iwr(&esiop_cmd->cmd_c)) {
993 case SIOP_NEG_MSGOUT:
994 esiop_table_sync(esiop_cmd,
995 BUS_DMASYNC_PREREAD |
996 BUS_DMASYNC_PREWRITE);
997 CALL_SCRIPT(Ent_send_msgout);
998 return 1;
999 case SIOP_NEG_ACK:
1000 CALL_SCRIPT(Ent_msgin_ack);
1001 return 1;
1002 default:
1003 panic("invalid retval from "
1004 "siop_iwr()");
1005 }
1006 return 1;
1007 }
1008 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
1009 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
1010 case SIOP_NEG_MSGOUT:
1011 esiop_update_scntl3(sc,
1012 esiop_cmd->cmd_c.siop_target);
1013 esiop_table_sync(esiop_cmd,
1014 BUS_DMASYNC_PREREAD |
1015 BUS_DMASYNC_PREWRITE);
1016 CALL_SCRIPT(Ent_send_msgout);
1017 return 1;
1018 case SIOP_NEG_ACK:
1019 esiop_update_scntl3(sc,
1020 esiop_cmd->cmd_c.siop_target);
1021 CALL_SCRIPT(Ent_msgin_ack);
1022 return 1;
1023 default:
1024 panic("invalid retval from "
1025 "siop_wdtr_neg()");
1026 }
1027 return 1;
1028 }
1029 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1030 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1031 case SIOP_NEG_MSGOUT:
1032 esiop_update_scntl3(sc,
1033 esiop_cmd->cmd_c.siop_target);
1034 esiop_table_sync(esiop_cmd,
1035 BUS_DMASYNC_PREREAD |
1036 BUS_DMASYNC_PREWRITE);
1037 CALL_SCRIPT(Ent_send_msgout);
1038 return 1;
1039 case SIOP_NEG_ACK:
1040 esiop_update_scntl3(sc,
1041 esiop_cmd->cmd_c.siop_target);
1042 CALL_SCRIPT(Ent_msgin_ack);
1043 return 1;
1044 default:
1045 panic("invalid retval from "
1046 "siop_wdtr_neg()");
1047 }
1048 return 1;
1049 }
1050 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1051 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1052 case SIOP_NEG_MSGOUT:
1053 esiop_update_scntl3(sc,
1054 esiop_cmd->cmd_c.siop_target);
1055 esiop_table_sync(esiop_cmd,
1056 BUS_DMASYNC_PREREAD |
1057 BUS_DMASYNC_PREWRITE);
1058 CALL_SCRIPT(Ent_send_msgout);
1059 return 1;
1060 case SIOP_NEG_ACK:
1061 esiop_update_scntl3(sc,
1062 esiop_cmd->cmd_c.siop_target);
1063 CALL_SCRIPT(Ent_msgin_ack);
1064 return 1;
1065 default:
1066 panic("invalid retval from "
1067 "siop_wdtr_neg()");
1068 }
1069 return 1;
1070 }
1071 /* send a message reject */
1072 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1073 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1074 esiop_table_sync(esiop_cmd,
1075 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1076 CALL_SCRIPT(Ent_send_msgout);
1077 return 1;
1078 case A_int_disc:
1079 INCSTAT(esiop_stat_intr_sdp);
1080 offset = bus_space_read_1(sc->sc_c.sc_rt,
1081 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1082 #ifdef SIOP_DEBUG_DR
1083 printf("disconnect offset %d\n", offset);
1084 #endif
1085 siop_sdp(&esiop_cmd->cmd_c, offset);
1086 /* we start again with no offset */
1087 ESIOP_XFER(esiop_cmd, saved_offset) =
1088 htole32(SIOP_NOOFFSET);
1089 esiop_table_sync(esiop_cmd,
1090 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1091 CALL_SCRIPT(Ent_script_sched);
1092 return 1;
1093 case A_int_resfail:
1094 printf("reselect failed\n");
1095 CALL_SCRIPT(Ent_script_sched);
1096 return 1;
1097 case A_int_done:
1098 if (xs == NULL) {
1099 printf("%s: done without command\n",
1100 device_xname(&sc->sc_c.sc_dev));
1101 CALL_SCRIPT(Ent_script_sched);
1102 return 1;
1103 }
1104 #ifdef SIOP_DEBUG_INTR
1105 printf("done, DSA=0x%lx target id 0x%x last msg "
1106 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1107 le32toh(esiop_cmd->cmd_tables->id),
1108 esiop_cmd->cmd_tables->msg_in[0],
1109 le32toh(esiop_cmd->cmd_tables->status));
1110 #endif
1111 INCSTAT(esiop_stat_intr_done);
1112 esiop_cmd->cmd_c.status = CMDST_DONE;
1113 goto end;
1114 default:
1115 printf("unknown irqcode %x\n", irqcode);
1116 if (xs) {
1117 xs->error = XS_SELTIMEOUT;
1118 goto end;
1119 }
1120 goto reset;
1121 }
1122 return 1;
1123 }
1124 /* We just should't get there */
1125 panic("siop_intr: I shouldn't be there !");
1126
1127 end:
1128 /*
1129 * restart the script now if command completed properly
1130 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1131 * queue
1132 */
1133 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1134 #ifdef SIOP_DEBUG_INTR
1135 printf("esiop_intr end: status %d\n", xs->status);
1136 #endif
1137 if (tag >= 0)
1138 esiop_lun->tactive[tag] = NULL;
1139 else
1140 esiop_lun->active = NULL;
1141 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1142 SIOP_SCRATCHA + 1);
1143 /*
1144 * if we got a disconnect between the last data phase
1145 * and the status phase, offset will be 0. In this
1146 * case, cmd_tables->saved_offset will have the proper value
1147 * if it got updated by the controller
1148 */
1149 if (offset == 0 &&
1150 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1151 offset =
1152 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1153
1154 esiop_scsicmd_end(esiop_cmd, offset);
1155 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1156 esiop_del_dev(sc, target, lun);
1157 CALL_SCRIPT(Ent_script_sched);
1158 return 1;
1159 }
1160
1161 void
1162 esiop_scsicmd_end(esiop_cmd, offset)
1163 struct esiop_cmd *esiop_cmd;
1164 int offset;
1165 {
1166 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1167 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1168
1169 siop_update_resid(&esiop_cmd->cmd_c, offset);
1170
1171 switch(xs->status) {
1172 case SCSI_OK:
1173 xs->error = XS_NOERROR;
1174 break;
1175 case SCSI_BUSY:
1176 xs->error = XS_BUSY;
1177 break;
1178 case SCSI_CHECK:
1179 xs->error = XS_BUSY;
1180 /* remove commands in the queue and scheduler */
1181 esiop_unqueue(sc, xs->xs_periph->periph_target,
1182 xs->xs_periph->periph_lun);
1183 break;
1184 case SCSI_QUEUE_FULL:
1185 INCSTAT(esiop_stat_intr_qfull);
1186 #ifdef SIOP_DEBUG
1187 printf("%s:%d:%d: queue full (tag %d)\n",
1188 device_xname(&sc->sc_c.sc_dev),
1189 xs->xs_periph->periph_target,
1190 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1191 #endif
1192 xs->error = XS_BUSY;
1193 break;
1194 case SCSI_SIOP_NOCHECK:
1195 /*
1196 * don't check status, xs->error is already valid
1197 */
1198 break;
1199 case SCSI_SIOP_NOSTATUS:
1200 /*
1201 * the status byte was not updated, cmd was
1202 * aborted
1203 */
1204 xs->error = XS_SELTIMEOUT;
1205 break;
1206 default:
1207 scsipi_printaddr(xs->xs_periph);
1208 printf("invalid status code %d\n", xs->status);
1209 xs->error = XS_DRIVER_STUFFUP;
1210 }
1211 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1212 bus_dmamap_sync(sc->sc_c.sc_dmat,
1213 esiop_cmd->cmd_c.dmamap_data, 0,
1214 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1215 (xs->xs_control & XS_CTL_DATA_IN) ?
1216 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1217 bus_dmamap_unload(sc->sc_c.sc_dmat,
1218 esiop_cmd->cmd_c.dmamap_data);
1219 }
1220 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1221 if ((xs->xs_control & XS_CTL_POLL) == 0)
1222 callout_stop(&xs->xs_callout);
1223 esiop_cmd->cmd_c.status = CMDST_FREE;
1224 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1225 #if 0
1226 if (xs->resid != 0)
1227 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1228 #endif
1229 scsipi_done (xs);
1230 }
1231
1232 void
1233 esiop_checkdone(sc)
1234 struct esiop_softc *sc;
1235 {
1236 int target, lun, tag;
1237 struct esiop_target *esiop_target;
1238 struct esiop_lun *esiop_lun;
1239 struct esiop_cmd *esiop_cmd;
1240 u_int32_t slot;
1241 int needsync = 0;
1242 int status;
1243 u_int32_t sem, offset;
1244
1245 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1246 sem = esiop_script_read(sc, sc->sc_semoffset);
1247 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1248 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1249 /*
1250 * at last one command have been started,
1251 * so we should have free slots now
1252 */
1253 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1254 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1255 }
1256 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1257
1258 if ((sem & A_sem_done) == 0) {
1259 /* no pending done command */
1260 return;
1261 }
1262
1263 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1264 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1265 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1266 next:
1267 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1268 if (needsync)
1269 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1270 sc->sc_done_offset,
1271 A_ndone_slots * sizeof(u_int32_t),
1272 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1273 return;
1274 }
1275
1276 needsync = 1;
1277
1278 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1279 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1280 sc->sc_currdoneslot += 1;
1281 if (sc->sc_currdoneslot == A_ndone_slots)
1282 sc->sc_currdoneslot = 0;
1283
1284 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1285 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1286 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1287
1288 esiop_target = (target >= 0) ?
1289 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1290 if (esiop_target == NULL) {
1291 printf("esiop_target (target %d) not valid\n", target);
1292 goto next;
1293 }
1294 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1295 if (esiop_lun == NULL) {
1296 printf("esiop_lun (target %d lun %d) not valid\n",
1297 target, lun);
1298 goto next;
1299 }
1300 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1301 if (esiop_cmd == NULL) {
1302 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1303 target, lun, tag);
1304 goto next;
1305 }
1306
1307 esiop_table_sync(esiop_cmd,
1308 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1309 status = le32toh(esiop_cmd->cmd_tables->status);
1310 #ifdef DIAGNOSTIC
1311 if (status != SCSI_OK) {
1312 printf("command for T/L/Q %d/%d/%d status %d\n",
1313 target, lun, tag, status);
1314 goto next;
1315 }
1316
1317 #endif
1318 /* Ok, this command has been handled */
1319 esiop_cmd->cmd_c.xs->status = status;
1320 if (tag >= 0)
1321 esiop_lun->tactive[tag] = NULL;
1322 else
1323 esiop_lun->active = NULL;
1324 /*
1325 * scratcha was eventually saved in saved_offset by script.
1326 * fetch offset from it
1327 */
1328 offset = 0;
1329 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1330 offset =
1331 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1332 esiop_scsicmd_end(esiop_cmd, offset);
1333 goto next;
1334 }
1335
1336 void
1337 esiop_unqueue(sc, target, lun)
1338 struct esiop_softc *sc;
1339 int target;
1340 int lun;
1341 {
1342 int slot, tag;
1343 u_int32_t slotdsa;
1344 struct esiop_cmd *esiop_cmd;
1345 struct esiop_lun *esiop_lun =
1346 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1347
1348 /* first make sure to read valid data */
1349 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1350
1351 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1352 /* look for commands in the scheduler, not yet started */
1353 if (esiop_lun->tactive[tag] == NULL)
1354 continue;
1355 esiop_cmd = esiop_lun->tactive[tag];
1356 for (slot = 0; slot < A_ncmd_slots; slot++) {
1357 slotdsa = esiop_script_read(sc,
1358 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1359 /* if the slot has any flag, it won't match the DSA */
1360 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1361 /* Mark this slot as ignore */
1362 esiop_script_write(sc,
1363 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1364 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1365 /* ask to requeue */
1366 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1367 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1368 esiop_lun->tactive[tag] = NULL;
1369 esiop_scsicmd_end(esiop_cmd, 0);
1370 break;
1371 }
1372 }
1373 }
1374 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375 }
1376
1377 /*
1378 * handle a rejected queue tag message: the command will run untagged,
1379 * has to adjust the reselect script.
1380 */
1381
1382
1383 int
1384 esiop_handle_qtag_reject(esiop_cmd)
1385 struct esiop_cmd *esiop_cmd;
1386 {
1387 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1388 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1389 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1390 int tag = esiop_cmd->cmd_tables->msg_out[2];
1391 struct esiop_target *esiop_target =
1392 (struct esiop_target*)sc->sc_c.targets[target];
1393 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1394
1395 #ifdef SIOP_DEBUG
1396 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1397 device_xname(&sc->sc_c.sc_dev), target, lun, tag, esiop_cmd->cmd_c.tag,
1398 esiop_cmd->cmd_c.status);
1399 #endif
1400
1401 if (esiop_lun->active != NULL) {
1402 aprint_error_dev(&sc->sc_c.sc_dev, "untagged command already running for target %d "
1403 "lun %d (status %d)\n",
1404 target, lun, esiop_lun->active->cmd_c.status);
1405 return -1;
1406 }
1407 /* clear tag slot */
1408 esiop_lun->tactive[tag] = NULL;
1409 /* add command to non-tagged slot */
1410 esiop_lun->active = esiop_cmd;
1411 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1412 esiop_cmd->cmd_c.tag = -1;
1413 /* update DSA table */
1414 esiop_script_write(sc, esiop_target->lun_table_offset +
1415 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1416 esiop_cmd->cmd_c.dsa);
1417 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1418 return 0;
1419 }
1420
1421 /*
1422 * handle a bus reset: reset chip, unqueue all active commands, free all
1423 * target struct and report lossage to upper layer.
1424 * As the upper layer may requeue immediatly we have to first store
1425 * all active commands in a temporary queue.
1426 */
1427 void
1428 esiop_handle_reset(sc)
1429 struct esiop_softc *sc;
1430 {
1431 struct esiop_cmd *esiop_cmd;
1432 struct esiop_lun *esiop_lun;
1433 int target, lun, tag;
1434 /*
1435 * scsi bus reset. reset the chip and restart
1436 * the queue. Need to clean up all active commands
1437 */
1438 printf("%s: scsi bus reset\n", device_xname(&sc->sc_c.sc_dev));
1439 /* stop, reset and restart the chip */
1440 esiop_reset(sc);
1441
1442 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1443 /* chip has been reset, all slots are free now */
1444 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1445 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1446 }
1447 /*
1448 * Process all commands: first commands completes, then commands
1449 * being executed
1450 */
1451 esiop_checkdone(sc);
1452 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1453 target++) {
1454 struct esiop_target *esiop_target =
1455 (struct esiop_target *)sc->sc_c.targets[target];
1456 if (esiop_target == NULL)
1457 continue;
1458 for (lun = 0; lun < 8; lun++) {
1459 esiop_lun = esiop_target->esiop_lun[lun];
1460 if (esiop_lun == NULL)
1461 continue;
1462 for (tag = -1; tag <
1463 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1464 ESIOP_NTAG : 0);
1465 tag++) {
1466 if (tag >= 0)
1467 esiop_cmd = esiop_lun->tactive[tag];
1468 else
1469 esiop_cmd = esiop_lun->active;
1470 if (esiop_cmd == NULL)
1471 continue;
1472 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1473 printf("command with tag id %d reset\n", tag);
1474 esiop_cmd->cmd_c.xs->error =
1475 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1476 XS_TIMEOUT : XS_RESET;
1477 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1478 if (tag >= 0)
1479 esiop_lun->tactive[tag] = NULL;
1480 else
1481 esiop_lun->active = NULL;
1482 esiop_cmd->cmd_c.status = CMDST_DONE;
1483 esiop_scsicmd_end(esiop_cmd, 0);
1484 }
1485 }
1486 sc->sc_c.targets[target]->status = TARST_ASYNC;
1487 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1488 sc->sc_c.targets[target]->period =
1489 sc->sc_c.targets[target]->offset = 0;
1490 siop_update_xfer_mode(&sc->sc_c, target);
1491 }
1492
1493 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1494 }
1495
1496 void
1497 esiop_scsipi_request(chan, req, arg)
1498 struct scsipi_channel *chan;
1499 scsipi_adapter_req_t req;
1500 void *arg;
1501 {
1502 struct scsipi_xfer *xs;
1503 struct scsipi_periph *periph;
1504 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1505 struct esiop_cmd *esiop_cmd;
1506 struct esiop_target *esiop_target;
1507 int s, error, i;
1508 int target;
1509 int lun;
1510
1511 switch (req) {
1512 case ADAPTER_REQ_RUN_XFER:
1513 xs = arg;
1514 periph = xs->xs_periph;
1515 target = periph->periph_target;
1516 lun = periph->periph_lun;
1517
1518 s = splbio();
1519 /*
1520 * first check if there are pending complete commands.
1521 * this can free us some resources (in the rings for example).
1522 * we have to lock it to avoid recursion.
1523 */
1524 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1525 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1526 esiop_checkdone(sc);
1527 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1528 }
1529 #ifdef SIOP_DEBUG_SCHED
1530 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1531 xs->xs_tag_type, xs->xs_tag_id);
1532 #endif
1533 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1534 if (esiop_cmd == NULL) {
1535 xs->error = XS_RESOURCE_SHORTAGE;
1536 scsipi_done(xs);
1537 splx(s);
1538 return;
1539 }
1540 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1541 #ifdef DIAGNOSTIC
1542 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1543 panic("siop_scsicmd: new cmd not free");
1544 #endif
1545 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1546 if (esiop_target == NULL) {
1547 #ifdef SIOP_DEBUG
1548 printf("%s: alloc siop_target for target %d\n",
1549 device_xname(&sc->sc_c.sc_dev), target);
1550 #endif
1551 sc->sc_c.targets[target] =
1552 malloc(sizeof(struct esiop_target),
1553 M_DEVBUF, M_NOWAIT | M_ZERO);
1554 if (sc->sc_c.targets[target] == NULL) {
1555 aprint_error_dev(&sc->sc_c.sc_dev, "can't malloc memory for "
1556 "target %d\n", target);
1557 xs->error = XS_RESOURCE_SHORTAGE;
1558 scsipi_done(xs);
1559 splx(s);
1560 return;
1561 }
1562 esiop_target =
1563 (struct esiop_target*)sc->sc_c.targets[target];
1564 esiop_target->target_c.status = TARST_PROBING;
1565 esiop_target->target_c.flags = 0;
1566 esiop_target->target_c.id =
1567 sc->sc_c.clock_div << 24; /* scntl3 */
1568 esiop_target->target_c.id |= target << 16; /* id */
1569 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1570
1571 for (i=0; i < 8; i++)
1572 esiop_target->esiop_lun[i] = NULL;
1573 esiop_target_register(sc, target);
1574 }
1575 if (esiop_target->esiop_lun[lun] == NULL) {
1576 esiop_target->esiop_lun[lun] =
1577 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1578 M_NOWAIT|M_ZERO);
1579 if (esiop_target->esiop_lun[lun] == NULL) {
1580 aprint_error_dev(&sc->sc_c.sc_dev, "can't alloc esiop_lun for "
1581 "target %d lun %d\n",
1582 target, lun);
1583 xs->error = XS_RESOURCE_SHORTAGE;
1584 scsipi_done(xs);
1585 splx(s);
1586 return;
1587 }
1588 }
1589 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1590 esiop_cmd->cmd_c.xs = xs;
1591 esiop_cmd->cmd_c.flags = 0;
1592 esiop_cmd->cmd_c.status = CMDST_READY;
1593
1594 /* load the DMA maps */
1595 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1596 esiop_cmd->cmd_c.dmamap_cmd,
1597 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1598 if (error) {
1599 aprint_error_dev(&sc->sc_c.sc_dev, "unable to load cmd DMA map: %d\n",
1600 error);
1601 xs->error = XS_DRIVER_STUFFUP;
1602 scsipi_done(xs);
1603 splx(s);
1604 return;
1605 }
1606 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1607 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1608 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1609 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1610 ((xs->xs_control & XS_CTL_DATA_IN) ?
1611 BUS_DMA_READ : BUS_DMA_WRITE));
1612 if (error) {
1613 aprint_error_dev(&sc->sc_c.sc_dev, "unable to load cmd DMA map: %d",
1614 error);
1615 xs->error = XS_DRIVER_STUFFUP;
1616 scsipi_done(xs);
1617 bus_dmamap_unload(sc->sc_c.sc_dmat,
1618 esiop_cmd->cmd_c.dmamap_cmd);
1619 splx(s);
1620 return;
1621 }
1622 bus_dmamap_sync(sc->sc_c.sc_dmat,
1623 esiop_cmd->cmd_c.dmamap_data, 0,
1624 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1625 (xs->xs_control & XS_CTL_DATA_IN) ?
1626 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1627 }
1628 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1629 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1630 BUS_DMASYNC_PREWRITE);
1631
1632 if (xs->xs_tag_type)
1633 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1634 else
1635 esiop_cmd->cmd_c.tag = -1;
1636 siop_setuptables(&esiop_cmd->cmd_c);
1637 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1638 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1639 ESIOP_XFER(esiop_cmd, tlq) |=
1640 htole32((target << 8) | (lun << 16));
1641 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1642 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1643 ESIOP_XFER(esiop_cmd, tlq) |=
1644 htole32(esiop_cmd->cmd_c.tag << 24);
1645 }
1646
1647 esiop_table_sync(esiop_cmd,
1648 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1649 esiop_start(sc, esiop_cmd);
1650 if (xs->xs_control & XS_CTL_POLL) {
1651 /* poll for command completion */
1652 while ((xs->xs_status & XS_STS_DONE) == 0) {
1653 delay(1000);
1654 esiop_intr(sc);
1655 }
1656 }
1657 splx(s);
1658 return;
1659
1660 case ADAPTER_REQ_GROW_RESOURCES:
1661 #ifdef SIOP_DEBUG
1662 printf("%s grow resources (%d)\n", device_xname(&sc->sc_c.sc_dev),
1663 sc->sc_c.sc_adapt.adapt_openings);
1664 #endif
1665 esiop_morecbd(sc);
1666 return;
1667
1668 case ADAPTER_REQ_SET_XFER_MODE:
1669 {
1670 struct scsipi_xfer_mode *xm = arg;
1671 if (sc->sc_c.targets[xm->xm_target] == NULL)
1672 return;
1673 s = splbio();
1674 if (xm->xm_mode & PERIPH_CAP_TQING) {
1675 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1676 /* allocate tag tables for this device */
1677 for (lun = 0;
1678 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1679 if (scsipi_lookup_periph(chan,
1680 xm->xm_target, lun) != NULL)
1681 esiop_add_dev(sc, xm->xm_target, lun);
1682 }
1683 }
1684 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1685 (sc->sc_c.features & SF_BUS_WIDE))
1686 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1687 if (xm->xm_mode & PERIPH_CAP_SYNC)
1688 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1689 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1690 (sc->sc_c.features & SF_CHIP_DT))
1691 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1692 if ((xm->xm_mode &
1693 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1694 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1695 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1696
1697 splx(s);
1698 }
1699 }
1700 }
1701
1702 static void
1703 esiop_start(sc, esiop_cmd)
1704 struct esiop_softc *sc;
1705 struct esiop_cmd *esiop_cmd;
1706 {
1707 struct esiop_lun *esiop_lun;
1708 struct esiop_target *esiop_target;
1709 int timeout;
1710 int target, lun, slot;
1711
1712 /*
1713 * first make sure to read valid data
1714 */
1715 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1716
1717 /*
1718 * We use a circular queue here. sc->sc_currschedslot points to a
1719 * free slot, unless we have filled the queue. Check this.
1720 */
1721 slot = sc->sc_currschedslot;
1722 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1723 A_f_cmd_free) == 0) {
1724 /*
1725 * no more free slot, no need to continue. freeze the queue
1726 * and requeue this command.
1727 */
1728 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1729 sc->sc_flags |= SCF_CHAN_NOSLOT;
1730 esiop_script_write(sc, sc->sc_semoffset,
1731 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1732 esiop_script_sync(sc,
1733 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1734 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1735 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1736 esiop_scsicmd_end(esiop_cmd, 0);
1737 return;
1738 }
1739 /* OK, we can use this slot */
1740
1741 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1742 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1743 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1744 esiop_lun = esiop_target->esiop_lun[lun];
1745 /* if non-tagged command active, panic: this shouldn't happen */
1746 if (esiop_lun->active != NULL) {
1747 panic("esiop_start: tagged cmd while untagged running");
1748 }
1749 #ifdef DIAGNOSTIC
1750 /* sanity check the tag if needed */
1751 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1752 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1753 esiop_cmd->cmd_c.tag < 0) {
1754 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1755 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1756 panic("esiop_start: invalid tag id");
1757 }
1758 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1759 panic("esiop_start: tag not free");
1760 }
1761 #endif
1762 #ifdef SIOP_DEBUG_SCHED
1763 printf("using slot %d for DSA 0x%lx\n", slot,
1764 (u_long)esiop_cmd->cmd_c.dsa);
1765 #endif
1766 /* mark command as active */
1767 if (esiop_cmd->cmd_c.status == CMDST_READY)
1768 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1769 else
1770 panic("esiop_start: bad status");
1771 /* DSA table for reselect */
1772 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1773 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1774 /* DSA table for reselect */
1775 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1776 htole32(esiop_cmd->cmd_c.dsa);
1777 bus_dmamap_sync(sc->sc_c.sc_dmat,
1778 esiop_lun->lun_tagtbl->tblblk->blkmap,
1779 esiop_lun->lun_tagtbl->tbl_offset,
1780 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1781 } else {
1782 esiop_lun->active = esiop_cmd;
1783 esiop_script_write(sc,
1784 esiop_target->lun_table_offset +
1785 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1786 esiop_cmd->cmd_c.dsa);
1787 }
1788 /* scheduler slot: DSA */
1789 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1790 esiop_cmd->cmd_c.dsa);
1791 /* make sure SCRIPT processor will read valid data */
1792 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1793 /* handle timeout */
1794 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1795 /* start exire timer */
1796 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1797 if (timeout == 0)
1798 timeout = 1;
1799 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1800 timeout, esiop_timeout, esiop_cmd);
1801 }
1802 /* Signal script it has some work to do */
1803 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1804 SIOP_ISTAT, ISTAT_SIGP);
1805 /* update the current slot, and wait for IRQ */
1806 sc->sc_currschedslot++;
1807 if (sc->sc_currschedslot >= A_ncmd_slots)
1808 sc->sc_currschedslot = 0;
1809 return;
1810 }
1811
1812 void
1813 esiop_timeout(v)
1814 void *v;
1815 {
1816 struct esiop_cmd *esiop_cmd = v;
1817 struct esiop_softc *sc =
1818 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1819 int s;
1820 #ifdef SIOP_DEBUG
1821 int slot, slotdsa;
1822 #endif
1823
1824 s = splbio();
1825 esiop_table_sync(esiop_cmd,
1826 BUS_DMASYNC_POSTREAD |
1827 BUS_DMASYNC_POSTWRITE);
1828 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1829 #ifdef SIOP_DEBUG
1830 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1831
1832 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1833 for (slot = 0; slot < A_ncmd_slots; slot++) {
1834 slotdsa = esiop_script_read(sc,
1835 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1836 if ((slotdsa & 0x01) == 0)
1837 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1838 }
1839 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1840 printf("DSP 0x%lx DSA 0x%x\n",
1841 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1842 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1843 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1844 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1845 #else
1846 printf("command timeout, CDB: ");
1847 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1848 printf("\n");
1849 #endif
1850 /* reset the scsi bus */
1851 siop_resetbus(&sc->sc_c);
1852
1853 /* deactivate callout */
1854 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1855 /*
1856 * mark command has being timed out and just return;
1857 * the bus reset will generate an interrupt,
1858 * it will be handled in siop_intr()
1859 */
1860 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1861 splx(s);
1862 return;
1863
1864 }
1865
1866 void
1867 esiop_dump_script(sc)
1868 struct esiop_softc *sc;
1869 {
1870 int i;
1871 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1872 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1873 le32toh(sc->sc_c.sc_script[i]),
1874 le32toh(sc->sc_c.sc_script[i+1]));
1875 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1876 0xc0000000) {
1877 i++;
1878 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1879 }
1880 printf("\n");
1881 }
1882 }
1883
1884 void
1885 esiop_morecbd(sc)
1886 struct esiop_softc *sc;
1887 {
1888 int error, i, s;
1889 bus_dma_segment_t seg;
1890 int rseg;
1891 struct esiop_cbd *newcbd;
1892 struct esiop_xfer *xfer;
1893 bus_addr_t dsa;
1894
1895 /* allocate a new list head */
1896 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1897 if (newcbd == NULL) {
1898 aprint_error_dev(&sc->sc_c.sc_dev, "can't allocate memory for command descriptors "
1899 "head\n");
1900 return;
1901 }
1902
1903 /* allocate cmd list */
1904 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1905 M_DEVBUF, M_NOWAIT|M_ZERO);
1906 if (newcbd->cmds == NULL) {
1907 aprint_error_dev(&sc->sc_c.sc_dev, "can't allocate memory for command descriptors\n");
1908 goto bad3;
1909 }
1910 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1911 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1912 if (error) {
1913 aprint_error_dev(&sc->sc_c.sc_dev, "unable to allocate cbd DMA memory, error = %d\n",
1914 error);
1915 goto bad2;
1916 }
1917 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1918 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1919 if (error) {
1920 aprint_error_dev(&sc->sc_c.sc_dev, "unable to map cbd DMA memory, error = %d\n",
1921 error);
1922 goto bad2;
1923 }
1924 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1925 BUS_DMA_NOWAIT, &newcbd->xferdma);
1926 if (error) {
1927 aprint_error_dev(&sc->sc_c.sc_dev, "unable to create cbd DMA map, error = %d\n", error);
1928 goto bad1;
1929 }
1930 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1931 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1932 if (error) {
1933 aprint_error_dev(&sc->sc_c.sc_dev, "unable to load cbd DMA map, error = %d\n", error);
1934 goto bad0;
1935 }
1936 #ifdef DEBUG
1937 printf("%s: alloc newcdb at PHY addr 0x%lx\n", device_xname(&sc->sc_c.sc_dev),
1938 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1939 #endif
1940 for (i = 0; i < SIOP_NCMDPB; i++) {
1941 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1942 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1943 &newcbd->cmds[i].cmd_c.dmamap_data);
1944 if (error) {
1945 aprint_error_dev(&sc->sc_c.sc_dev, "unable to create data DMA map for cbd: "
1946 "error %d\n", error);
1947 goto bad0;
1948 }
1949 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1950 sizeof(struct scsipi_generic), 1,
1951 sizeof(struct scsipi_generic), 0,
1952 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1953 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1954 if (error) {
1955 aprint_error_dev(&sc->sc_c.sc_dev, "unable to create cmd DMA map for cbd %d\n", error);
1956 goto bad0;
1957 }
1958 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1959 newcbd->cmds[i].esiop_cbdp = newcbd;
1960 xfer = &newcbd->xfers[i];
1961 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1962 memset(newcbd->cmds[i].cmd_tables, 0,
1963 sizeof(struct esiop_xfer));
1964 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1965 i * sizeof(struct esiop_xfer);
1966 newcbd->cmds[i].cmd_c.dsa = dsa;
1967 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1968 xfer->siop_tables.t_msgout.count= htole32(1);
1969 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1970 xfer->siop_tables.t_msgin.count= htole32(1);
1971 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1972 offsetof(struct siop_common_xfer, msg_in));
1973 xfer->siop_tables.t_extmsgin.count= htole32(2);
1974 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1975 offsetof(struct siop_common_xfer, msg_in) + 1);
1976 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1977 offsetof(struct siop_common_xfer, msg_in) + 3);
1978 xfer->siop_tables.t_status.count= htole32(1);
1979 xfer->siop_tables.t_status.addr = htole32(dsa +
1980 offsetof(struct siop_common_xfer, status));
1981
1982 s = splbio();
1983 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1984 splx(s);
1985 #ifdef SIOP_DEBUG
1986 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1987 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1988 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1989 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1990 #endif
1991 }
1992 s = splbio();
1993 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1994 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1995 splx(s);
1996 return;
1997 bad0:
1998 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1999 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
2000 bad1:
2001 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2002 bad2:
2003 free(newcbd->cmds, M_DEVBUF);
2004 bad3:
2005 free(newcbd, M_DEVBUF);
2006 return;
2007 }
2008
2009 void
2010 esiop_moretagtbl(sc)
2011 struct esiop_softc *sc;
2012 {
2013 int error, i, j, s;
2014 bus_dma_segment_t seg;
2015 int rseg;
2016 struct esiop_dsatblblk *newtblblk;
2017 struct esiop_dsatbl *newtbls;
2018 u_int32_t *tbls;
2019
2020 /* allocate a new list head */
2021 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2022 M_DEVBUF, M_NOWAIT|M_ZERO);
2023 if (newtblblk == NULL) {
2024 aprint_error_dev(&sc->sc_c.sc_dev, "can't allocate memory for tag DSA table block\n");
2025 return;
2026 }
2027
2028 /* allocate tbl list */
2029 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2030 M_DEVBUF, M_NOWAIT|M_ZERO);
2031 if (newtbls == NULL) {
2032 aprint_error_dev(&sc->sc_c.sc_dev, "can't allocate memory for command descriptors\n");
2033 goto bad3;
2034 }
2035 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2036 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2037 if (error) {
2038 aprint_error_dev(&sc->sc_c.sc_dev, "unable to allocate tbl DMA memory, error = %d\n", error);
2039 goto bad2;
2040 }
2041 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2042 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2043 if (error) {
2044 aprint_error_dev(&sc->sc_c.sc_dev, "unable to map tbls DMA memory, error = %d\n", error);
2045 goto bad2;
2046 }
2047 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2048 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2049 if (error) {
2050 aprint_error_dev(&sc->sc_c.sc_dev, "unable to create tbl DMA map, error = %d\n", error);
2051 goto bad1;
2052 }
2053 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2054 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2055 if (error) {
2056 aprint_error_dev(&sc->sc_c.sc_dev, "unable to load tbl DMA map, error = %d\n", error);
2057 goto bad0;
2058 }
2059 #ifdef DEBUG
2060 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2061 device_xname(&sc->sc_c.sc_dev),
2062 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2063 #endif
2064 for (i = 0; i < ESIOP_NTPB; i++) {
2065 newtbls[i].tblblk = newtblblk;
2066 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2067 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2068 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2069 newtbls[i].tbl_offset;
2070 for (j = 0; j < ESIOP_NTAG; j++)
2071 newtbls[i].tbl[j] = j;
2072 s = splbio();
2073 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2074 splx(s);
2075 }
2076 s = splbio();
2077 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2078 splx(s);
2079 return;
2080 bad0:
2081 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2082 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2083 bad1:
2084 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2085 bad2:
2086 free(newtbls, M_DEVBUF);
2087 bad3:
2088 free(newtblblk, M_DEVBUF);
2089 return;
2090 }
2091
2092 void
2093 esiop_update_scntl3(sc, _siop_target)
2094 struct esiop_softc *sc;
2095 struct siop_common_target *_siop_target;
2096 {
2097 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2098 esiop_script_write(sc, esiop_target->lun_table_offset,
2099 esiop_target->target_c.id);
2100 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2101 }
2102
2103 void
2104 esiop_add_dev(sc, target, lun)
2105 struct esiop_softc *sc;
2106 int target;
2107 int lun;
2108 {
2109 struct esiop_target *esiop_target =
2110 (struct esiop_target *)sc->sc_c.targets[target];
2111 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2112
2113 if (esiop_lun->lun_tagtbl != NULL)
2114 return; /* already allocated */
2115
2116 /* we need a tag DSA table */
2117 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2118 if (esiop_lun->lun_tagtbl == NULL) {
2119 esiop_moretagtbl(sc);
2120 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2121 if (esiop_lun->lun_tagtbl == NULL) {
2122 /* no resources, run untagged */
2123 esiop_target->target_c.flags &= ~TARF_TAG;
2124 return;
2125 }
2126 }
2127 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2128 /* Update LUN DSA table */
2129 esiop_script_write(sc, esiop_target->lun_table_offset +
2130 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2131 esiop_lun->lun_tagtbl->tbl_dsa);
2132 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2133 }
2134
2135 void
2136 esiop_del_dev(sc, target, lun)
2137 struct esiop_softc *sc;
2138 int target;
2139 int lun;
2140 {
2141 struct esiop_target *esiop_target;
2142 #ifdef SIOP_DEBUG
2143 printf("%s:%d:%d: free lun sw entry\n",
2144 device_xname(&sc->sc_c.sc_dev), target, lun);
2145 #endif
2146 if (sc->sc_c.targets[target] == NULL)
2147 return;
2148 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2149 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2150 esiop_target->esiop_lun[lun] = NULL;
2151 }
2152
2153 void
2154 esiop_target_register(sc, target)
2155 struct esiop_softc *sc;
2156 u_int32_t target;
2157 {
2158 struct esiop_target *esiop_target =
2159 (struct esiop_target *)sc->sc_c.targets[target];
2160 struct esiop_lun *esiop_lun;
2161 int lun;
2162
2163 /* get a DSA table for this target */
2164 esiop_target->lun_table_offset = sc->sc_free_offset;
2165 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2166 #ifdef SIOP_DEBUG
2167 printf("%s: lun table for target %d offset %d free offset %d\n",
2168 device_xname(&sc->sc_c.sc_dev), target, esiop_target->lun_table_offset,
2169 sc->sc_free_offset);
2170 #endif
2171 /* first 32 bytes are ID (for select) */
2172 esiop_script_write(sc, esiop_target->lun_table_offset,
2173 esiop_target->target_c.id);
2174 /* Record this table in the target DSA table */
2175 esiop_script_write(sc,
2176 sc->sc_target_table_offset + target,
2177 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2178 sc->sc_c.sc_scriptaddr);
2179 /* if we have a tag table, register it */
2180 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2181 esiop_lun = esiop_target->esiop_lun[lun];
2182 if (esiop_lun == NULL)
2183 continue;
2184 if (esiop_lun->lun_tagtbl)
2185 esiop_script_write(sc, esiop_target->lun_table_offset +
2186 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2187 esiop_lun->lun_tagtbl->tbl_dsa);
2188 }
2189 esiop_script_sync(sc,
2190 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2191 }
2192
2193 #ifdef SIOP_STATS
2194 void
2195 esiop_printstats()
2196 {
2197 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2198 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2199 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2200 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2201 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2202 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2203 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2204 }
2205 #endif
2206