esiop.c revision 1.40 1 /* $NetBSD: esiop.c,v 1.40 2007/09/30 11:59:41 martin Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.40 2007/09/30 11:59:41 martin Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #include <machine/endian.h>
48 #include <machine/bus.h>
49
50 #include <dev/microcode/siop/esiop.out>
51
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55
56 #include <dev/scsipi/scsiconf.h>
57
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
61
62 #include "opt_siop.h"
63
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 /*
68 #define SIOP_DEBUG
69 #define SIOP_DEBUG_DR
70 #define SIOP_DEBUG_INTR
71 #define SIOP_DEBUG_SCHED
72 #define DUMP_SCRIPT
73 */
74
75 #define SIOP_STATS
76
77 #ifndef SIOP_DEFAULT_TARGET
78 #define SIOP_DEFAULT_TARGET 7
79 #endif
80
81 /* number of cmd descriptors per block */
82 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
83
84 void esiop_reset(struct esiop_softc *);
85 void esiop_checkdone(struct esiop_softc *);
86 void esiop_handle_reset(struct esiop_softc *);
87 void esiop_scsicmd_end(struct esiop_cmd *, int);
88 void esiop_unqueue(struct esiop_softc *, int, int);
89 int esiop_handle_qtag_reject(struct esiop_cmd *);
90 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
91 void esiop_timeout(void *);
92 void esiop_scsipi_request(struct scsipi_channel *,
93 scsipi_adapter_req_t, void *);
94 void esiop_dump_script(struct esiop_softc *);
95 void esiop_morecbd(struct esiop_softc *);
96 void esiop_moretagtbl(struct esiop_softc *);
97 void siop_add_reselsw(struct esiop_softc *, int);
98 void esiop_target_register(struct esiop_softc *, u_int32_t);
99
100 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
101
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats(void);
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
115
116 static inline void esiop_script_sync(struct esiop_softc *, int);
117 static inline void
118 esiop_script_sync(sc, ops)
119 struct esiop_softc *sc;
120 int ops;
121 {
122 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
123 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
124 PAGE_SIZE, ops);
125 }
126
127 static inline u_int32_t esiop_script_read(struct esiop_softc *, u_int);
128 static inline u_int32_t
129 esiop_script_read(sc, offset)
130 struct esiop_softc *sc;
131 u_int offset;
132 {
133 if (sc->sc_c.features & SF_CHIP_RAM) {
134 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
135 offset * 4);
136 } else {
137 return le32toh(sc->sc_c.sc_script[offset]);
138 }
139 }
140
141 static inline void esiop_script_write(struct esiop_softc *, u_int,
142 u_int32_t);
143 static inline void
144 esiop_script_write(sc, offset, val)
145 struct esiop_softc *sc;
146 u_int offset;
147 u_int32_t val;
148 {
149 if (sc->sc_c.features & SF_CHIP_RAM) {
150 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
151 offset * 4, val);
152 } else {
153 sc->sc_c.sc_script[offset] = htole32(val);
154 }
155 }
156
157 void
158 esiop_attach(sc)
159 struct esiop_softc *sc;
160 {
161 struct esiop_dsatbl *tagtbl_donering;
162
163 if (siop_common_attach(&sc->sc_c) != 0 )
164 return;
165
166 TAILQ_INIT(&sc->free_list);
167 TAILQ_INIT(&sc->cmds);
168 TAILQ_INIT(&sc->free_tagtbl);
169 TAILQ_INIT(&sc->tag_tblblk);
170 sc->sc_currschedslot = 0;
171 #ifdef SIOP_DEBUG
172 aprint_debug("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
173 sc->sc_c.sc_dev.dv_xname, (int)sizeof(esiop_script),
174 (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
175 #endif
176
177 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
178 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
179
180 /*
181 * get space for the CMD done slot. For this we use a tag table entry.
182 * It's the same size and allows us to not waste 3/4 of a page
183 */
184 #ifdef DIAGNOSTIC
185 if (ESIOP_NTAG != A_ndone_slots) {
186 aprint_error("%s: size of tag DSA table different from the done"
187 " ring\n", sc->sc_c.sc_dev.dv_xname);
188 return;
189 }
190 #endif
191 esiop_moretagtbl(sc);
192 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
193 if (tagtbl_donering == NULL) {
194 aprint_error("%s: no memory for command done ring\n",
195 sc->sc_c.sc_dev.dv_xname);
196 return;
197 }
198 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
199 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
200 sc->sc_done_offset = tagtbl_donering->tbl_offset;
201 sc->sc_done_slot = &tagtbl_donering->tbl[0];
202
203 /* Do a bus reset, so that devices fall back to narrow/async */
204 siop_resetbus(&sc->sc_c);
205 /*
206 * siop_reset() will reset the chip, thus clearing pending interrupts
207 */
208 esiop_reset(sc);
209 #ifdef DUMP_SCRIPT
210 esiop_dump_script(sc);
211 #endif
212
213 config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
214 }
215
216 void
217 esiop_reset(sc)
218 struct esiop_softc *sc;
219 {
220 int i, j;
221 u_int32_t addr;
222 u_int32_t msgin_addr, sem_addr;
223
224 siop_common_reset(&sc->sc_c);
225
226 /*
227 * we copy the script at the beggining of RAM. Then there is 4 bytes
228 * for messages in, and 4 bytes for semaphore
229 */
230 sc->sc_free_offset = sizeof(esiop_script) / sizeof(esiop_script[0]);
231 msgin_addr =
232 sc->sc_free_offset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
233 sc->sc_free_offset += 1;
234 sc->sc_semoffset = sc->sc_free_offset;
235 sem_addr =
236 sc->sc_semoffset * sizeof(u_int32_t) + sc->sc_c.sc_scriptaddr;
237 sc->sc_free_offset += 1;
238 /* then we have the scheduler ring */
239 sc->sc_shedoffset = sc->sc_free_offset;
240 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
241 /* then the targets DSA table */
242 sc->sc_target_table_offset = sc->sc_free_offset;
243 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
244 /* copy and patch the script */
245 if (sc->sc_c.features & SF_CHIP_RAM) {
246 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
247 esiop_script,
248 sizeof(esiop_script) / sizeof(esiop_script[0]));
249 for (j = 0; j <
250 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
251 j++) {
252 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
253 E_tlq_offset_Used[j] * 4,
254 sizeof(struct siop_common_xfer));
255 }
256 for (j = 0; j <
257 (sizeof(E_saved_offset_offset_Used) /
258 sizeof(E_saved_offset_offset_Used[0]));
259 j++) {
260 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
261 E_saved_offset_offset_Used[j] * 4,
262 sizeof(struct siop_common_xfer) + 4);
263 }
264 for (j = 0; j <
265 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
266 j++) {
267 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
268 E_abs_msgin2_Used[j] * 4, msgin_addr);
269 }
270 for (j = 0; j <
271 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
272 j++) {
273 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
274 E_abs_sem_Used[j] * 4, sem_addr);
275 }
276
277 if (sc->sc_c.features & SF_CHIP_LED0) {
278 bus_space_write_region_4(sc->sc_c.sc_ramt,
279 sc->sc_c.sc_ramh,
280 Ent_led_on1, esiop_led_on,
281 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
282 bus_space_write_region_4(sc->sc_c.sc_ramt,
283 sc->sc_c.sc_ramh,
284 Ent_led_on2, esiop_led_on,
285 sizeof(esiop_led_on) / sizeof(esiop_led_on[0]));
286 bus_space_write_region_4(sc->sc_c.sc_ramt,
287 sc->sc_c.sc_ramh,
288 Ent_led_off, esiop_led_off,
289 sizeof(esiop_led_off) / sizeof(esiop_led_off[0]));
290 }
291 } else {
292 for (j = 0;
293 j < (sizeof(esiop_script) / sizeof(esiop_script[0])); j++) {
294 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
295 }
296 for (j = 0; j <
297 (sizeof(E_tlq_offset_Used) / sizeof(E_tlq_offset_Used[0]));
298 j++) {
299 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
300 htole32(sizeof(struct siop_common_xfer));
301 }
302 for (j = 0; j <
303 (sizeof(E_saved_offset_offset_Used) /
304 sizeof(E_saved_offset_offset_Used[0]));
305 j++) {
306 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
307 htole32(sizeof(struct siop_common_xfer) + 4);
308 }
309 for (j = 0; j <
310 (sizeof(E_abs_msgin2_Used) / sizeof(E_abs_msgin2_Used[0]));
311 j++) {
312 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
313 htole32(msgin_addr);
314 }
315 for (j = 0; j <
316 (sizeof(E_abs_sem_Used) / sizeof(E_abs_sem_Used[0]));
317 j++) {
318 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
319 htole32(sem_addr);
320 }
321
322 if (sc->sc_c.features & SF_CHIP_LED0) {
323 for (j = 0; j < (sizeof(esiop_led_on) /
324 sizeof(esiop_led_on[0])); j++)
325 sc->sc_c.sc_script[
326 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
327 ] = htole32(esiop_led_on[j]);
328 for (j = 0; j < (sizeof(esiop_led_on) /
329 sizeof(esiop_led_on[0])); j++)
330 sc->sc_c.sc_script[
331 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
332 ] = htole32(esiop_led_on[j]);
333 for (j = 0; j < (sizeof(esiop_led_off) /
334 sizeof(esiop_led_off[0])); j++)
335 sc->sc_c.sc_script[
336 Ent_led_off / sizeof(esiop_led_off[0]) + j
337 ] = htole32(esiop_led_off[j]);
338 }
339 }
340 /* get base of scheduler ring */
341 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(u_int32_t);
342 /* init scheduler */
343 for (i = 0; i < A_ncmd_slots; i++) {
344 esiop_script_write(sc,
345 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
346 }
347 sc->sc_currschedslot = 0;
348 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
349 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
350 /*
351 * 0x78000000 is a 'move data8 to reg'. data8 is the second
352 * octet, reg offset is the third.
353 */
354 esiop_script_write(sc, Ent_cmdr0 / 4,
355 0x78640000 | ((addr & 0x000000ff) << 8));
356 esiop_script_write(sc, Ent_cmdr1 / 4,
357 0x78650000 | ((addr & 0x0000ff00) ));
358 esiop_script_write(sc, Ent_cmdr2 / 4,
359 0x78660000 | ((addr & 0x00ff0000) >> 8));
360 esiop_script_write(sc, Ent_cmdr3 / 4,
361 0x78670000 | ((addr & 0xff000000) >> 16));
362 /* done ring */
363 for (i = 0; i < A_ndone_slots; i++)
364 sc->sc_done_slot[i] = 0;
365 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
366 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
367 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
368 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
369 sc->sc_currdoneslot = 0;
370 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
371 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
372 esiop_script_write(sc, Ent_doner0 / 4,
373 0x786c0000 | ((addr & 0x000000ff) << 8));
374 esiop_script_write(sc, Ent_doner1 / 4,
375 0x786d0000 | ((addr & 0x0000ff00) ));
376 esiop_script_write(sc, Ent_doner2 / 4,
377 0x786e0000 | ((addr & 0x00ff0000) >> 8));
378 esiop_script_write(sc, Ent_doner3 / 4,
379 0x786f0000 | ((addr & 0xff000000) >> 16));
380
381 /* set flags */
382 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
383 /* write pointer of base of target DSA table */
384 addr = (sc->sc_target_table_offset * sizeof(u_int32_t)) +
385 sc->sc_c.sc_scriptaddr;
386 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
387 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
388 ((addr & 0x000000ff) << 8));
389 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
390 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
391 ((addr & 0x0000ff00) ));
392 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
393 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
394 ((addr & 0x00ff0000) >> 8));
395 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
396 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
397 ((addr & 0xff000000) >> 16));
398 #ifdef SIOP_DEBUG
399 printf("%s: target table offset %d free offset %d\n",
400 sc->sc_c.sc_dev.dv_xname, sc->sc_target_table_offset,
401 sc->sc_free_offset);
402 #endif
403
404 /* register existing targets */
405 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
406 if (sc->sc_c.targets[i])
407 esiop_target_register(sc, i);
408 }
409 /* start script */
410 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
411 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
412 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
413 }
414 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
415 sc->sc_c.sc_scriptaddr + Ent_reselect);
416 }
417
418 #if 0
419 #define CALL_SCRIPT(ent) do {\
420 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
421 esiop_cmd->cmd_c.dsa, \
422 sc->sc_c.sc_scriptaddr + ent); \
423 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
424 } while (0)
425 #else
426 #define CALL_SCRIPT(ent) do {\
427 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
428 } while (0)
429 #endif
430
431 int
432 esiop_intr(v)
433 void *v;
434 {
435 struct esiop_softc *sc = v;
436 struct esiop_target *esiop_target;
437 struct esiop_cmd *esiop_cmd;
438 struct esiop_lun *esiop_lun;
439 struct scsipi_xfer *xs;
440 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
441 u_int32_t irqcode;
442 int need_reset = 0;
443 int offset, target, lun, tag;
444 u_int32_t tflags;
445 u_int32_t addr;
446 int freetarget = 0;
447 int slot;
448 int retval = 0;
449
450 again:
451 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
452 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
453 return retval;
454 }
455 retval = 1;
456 INCSTAT(esiop_stat_intr);
457 esiop_checkdone(sc);
458 if (istat & ISTAT_INTF) {
459 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
460 SIOP_ISTAT, ISTAT_INTF);
461 goto again;
462 }
463
464 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
465 (ISTAT_DIP | ISTAT_ABRT)) {
466 /* clear abort */
467 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
468 SIOP_ISTAT, 0);
469 }
470
471 /* get CMD from T/L/Q */
472 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
473 SIOP_SCRATCHC);
474 #ifdef SIOP_DEBUG_INTR
475 printf("interrupt, istat=0x%x tflags=0x%x "
476 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
477 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
478 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
479 SIOP_DSP) -
480 sc->sc_c.sc_scriptaddr));
481 #endif
482 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
483 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
484 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
485 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
486 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
487
488 if (target >= 0 && lun >= 0) {
489 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
490 if (esiop_target == NULL) {
491 printf("esiop_target (target %d) not valid\n", target);
492 goto none;
493 }
494 esiop_lun = esiop_target->esiop_lun[lun];
495 if (esiop_lun == NULL) {
496 printf("esiop_lun (target %d lun %d) not valid\n",
497 target, lun);
498 goto none;
499 }
500 esiop_cmd =
501 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
502 if (esiop_cmd == NULL) {
503 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
504 target, lun, tag);
505 goto none;
506 }
507 xs = esiop_cmd->cmd_c.xs;
508 #ifdef DIAGNOSTIC
509 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
510 printf("esiop_cmd (target %d lun %d) "
511 "not active (%d)\n", target, lun,
512 esiop_cmd->cmd_c.status);
513 goto none;
514 }
515 #endif
516 esiop_table_sync(esiop_cmd,
517 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
518 } else {
519 none:
520 xs = NULL;
521 esiop_target = NULL;
522 esiop_lun = NULL;
523 esiop_cmd = NULL;
524 }
525 if (istat & ISTAT_DIP) {
526 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
527 SIOP_DSTAT);
528 if (dstat & DSTAT_ABRT) {
529 /* was probably generated by a bus reset IOCTL */
530 if ((dstat & DSTAT_DFE) == 0)
531 siop_clearfifo(&sc->sc_c);
532 goto reset;
533 }
534 if (dstat & DSTAT_SSI) {
535 printf("single step dsp 0x%08x dsa 0x08%x\n",
536 (int)(bus_space_read_4(sc->sc_c.sc_rt,
537 sc->sc_c.sc_rh, SIOP_DSP) -
538 sc->sc_c.sc_scriptaddr),
539 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
540 SIOP_DSA));
541 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
542 (istat & ISTAT_SIP) == 0) {
543 bus_space_write_1(sc->sc_c.sc_rt,
544 sc->sc_c.sc_rh, SIOP_DCNTL,
545 bus_space_read_1(sc->sc_c.sc_rt,
546 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
547 }
548 return 1;
549 }
550
551 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
552 printf("%s: DMA IRQ:", sc->sc_c.sc_dev.dv_xname);
553 if (dstat & DSTAT_IID)
554 printf(" Illegal instruction");
555 if (dstat & DSTAT_BF)
556 printf(" bus fault");
557 if (dstat & DSTAT_MDPE)
558 printf(" parity");
559 if (dstat & DSTAT_DFE)
560 printf(" DMA fifo empty");
561 else
562 siop_clearfifo(&sc->sc_c);
563 printf(", DSP=0x%x DSA=0x%x: ",
564 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
565 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
566 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
567 if (esiop_cmd)
568 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
569 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
570 le32toh(esiop_cmd->cmd_tables->status));
571 else
572 printf(" current T/L/Q invalid\n");
573 need_reset = 1;
574 }
575 }
576 if (istat & ISTAT_SIP) {
577 if (istat & ISTAT_DIP)
578 delay(10);
579 /*
580 * Can't read sist0 & sist1 independently, or we have to
581 * insert delay
582 */
583 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
584 SIOP_SIST0);
585 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 SIOP_SSTAT1);
587 #ifdef SIOP_DEBUG_INTR
588 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
589 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
590 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
591 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
592 SIOP_DSP) -
593 sc->sc_c.sc_scriptaddr));
594 #endif
595 if (sist & SIST0_RST) {
596 esiop_handle_reset(sc);
597 /* no table to flush here */
598 return 1;
599 }
600 if (sist & SIST0_SGE) {
601 if (esiop_cmd)
602 scsipi_printaddr(xs->xs_periph);
603 else
604 printf("%s:", sc->sc_c.sc_dev.dv_xname);
605 printf("scsi gross error\n");
606 if (esiop_target)
607 esiop_target->target_c.flags &= ~TARF_DT;
608 #ifdef DEBUG
609 printf("DSA=0x%x DSP=0x%lx\n",
610 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
611 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
612 SIOP_DSP) -
613 sc->sc_c.sc_scriptaddr));
614 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
615 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SDID),
616 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL3),
617 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SXFER),
618 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCNTL4));
619
620 #endif
621 goto reset;
622 }
623 if ((sist & SIST0_MA) && need_reset == 0) {
624 if (esiop_cmd) {
625 int scratchc0;
626 dstat = bus_space_read_1(sc->sc_c.sc_rt,
627 sc->sc_c.sc_rh, SIOP_DSTAT);
628 /*
629 * first restore DSA, in case we were in a S/G
630 * operation.
631 */
632 bus_space_write_4(sc->sc_c.sc_rt,
633 sc->sc_c.sc_rh,
634 SIOP_DSA, esiop_cmd->cmd_c.dsa);
635 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
636 sc->sc_c.sc_rh, SIOP_SCRATCHC);
637 switch (sstat1 & SSTAT1_PHASE_MASK) {
638 case SSTAT1_PHASE_STATUS:
639 /*
640 * previous phase may be aborted for any reason
641 * ( for example, the target has less data to
642 * transfer than requested). Compute resid and
643 * just go to status, the command should
644 * terminate.
645 */
646 INCSTAT(esiop_stat_intr_shortxfer);
647 if (scratchc0 & A_f_c_data)
648 siop_ma(&esiop_cmd->cmd_c);
649 else if ((dstat & DSTAT_DFE) == 0)
650 siop_clearfifo(&sc->sc_c);
651 CALL_SCRIPT(Ent_status);
652 return 1;
653 case SSTAT1_PHASE_MSGIN:
654 /*
655 * target may be ready to disconnect
656 * Compute resid which would be used later
657 * if a save data pointer is needed.
658 */
659 INCSTAT(esiop_stat_intr_xferdisc);
660 if (scratchc0 & A_f_c_data)
661 siop_ma(&esiop_cmd->cmd_c);
662 else if ((dstat & DSTAT_DFE) == 0)
663 siop_clearfifo(&sc->sc_c);
664 bus_space_write_1(sc->sc_c.sc_rt,
665 sc->sc_c.sc_rh, SIOP_SCRATCHC,
666 scratchc0 & ~A_f_c_data);
667 CALL_SCRIPT(Ent_msgin);
668 return 1;
669 }
670 printf("%s: unexpected phase mismatch %d\n",
671 sc->sc_c.sc_dev.dv_xname,
672 sstat1 & SSTAT1_PHASE_MASK);
673 } else {
674 printf("%s: phase mismatch without command\n",
675 sc->sc_c.sc_dev.dv_xname);
676 }
677 need_reset = 1;
678 }
679 if (sist & SIST0_PAR) {
680 /* parity error, reset */
681 if (esiop_cmd)
682 scsipi_printaddr(xs->xs_periph);
683 else
684 printf("%s:", sc->sc_c.sc_dev.dv_xname);
685 printf("parity error\n");
686 if (esiop_target)
687 esiop_target->target_c.flags &= ~TARF_DT;
688 goto reset;
689 }
690 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
691 /*
692 * selection time out, assume there's no device here
693 * We also have to update the ring pointer ourselve
694 */
695 slot = bus_space_read_1(sc->sc_c.sc_rt,
696 sc->sc_c.sc_rh, SIOP_SCRATCHE);
697 esiop_script_sync(sc,
698 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
699 #ifdef SIOP_DEBUG_SCHED
700 printf("sel timeout target %d, slot %d\n", target, slot);
701 #endif
702 /*
703 * mark this slot as free, and advance to next slot
704 */
705 esiop_script_write(sc,
706 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
707 A_f_cmd_free);
708 addr = bus_space_read_4(sc->sc_c.sc_rt,
709 sc->sc_c.sc_rh, SIOP_SCRATCHD);
710 if (slot < (A_ncmd_slots - 1)) {
711 bus_space_write_1(sc->sc_c.sc_rt,
712 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
713 addr = addr + sizeof(struct esiop_slot);
714 } else {
715 bus_space_write_1(sc->sc_c.sc_rt,
716 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
717 addr = sc->sc_c.sc_scriptaddr +
718 sc->sc_shedoffset * sizeof(u_int32_t);
719 }
720 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
721 SIOP_SCRATCHD, addr);
722 esiop_script_sync(sc,
723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
724 if (esiop_cmd) {
725 esiop_cmd->cmd_c.status = CMDST_DONE;
726 xs->error = XS_SELTIMEOUT;
727 freetarget = 1;
728 goto end;
729 } else {
730 printf("%s: selection timeout without "
731 "command, target %d (sdid 0x%x), "
732 "slot %d\n",
733 sc->sc_c.sc_dev.dv_xname, target,
734 bus_space_read_1(sc->sc_c.sc_rt,
735 sc->sc_c.sc_rh, SIOP_SDID), slot);
736 need_reset = 1;
737 }
738 }
739 if (sist & SIST0_UDC) {
740 /*
741 * unexpected disconnect. Usually the target signals
742 * a fatal condition this way. Attempt to get sense.
743 */
744 if (esiop_cmd) {
745 esiop_cmd->cmd_tables->status =
746 htole32(SCSI_CHECK);
747 goto end;
748 }
749 printf("%s: unexpected disconnect without "
750 "command\n", sc->sc_c.sc_dev.dv_xname);
751 goto reset;
752 }
753 if (sist & (SIST1_SBMC << 8)) {
754 /* SCSI bus mode change */
755 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
756 goto reset;
757 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
758 /*
759 * we have a script interrupt, it will
760 * restart the script.
761 */
762 goto scintr;
763 }
764 /*
765 * else we have to restart it ourselve, at the
766 * interrupted instruction.
767 */
768 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
769 SIOP_DSP,
770 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
771 SIOP_DSP) - 8);
772 return 1;
773 }
774 /* Else it's an unhandled exception (for now). */
775 printf("%s: unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
776 "DSA=0x%x DSP=0x%x\n", sc->sc_c.sc_dev.dv_xname, sist,
777 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
778 SIOP_SSTAT1),
779 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
780 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
781 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
782 if (esiop_cmd) {
783 esiop_cmd->cmd_c.status = CMDST_DONE;
784 xs->error = XS_SELTIMEOUT;
785 goto end;
786 }
787 need_reset = 1;
788 }
789 if (need_reset) {
790 reset:
791 /* fatal error, reset the bus */
792 siop_resetbus(&sc->sc_c);
793 /* no table to flush here */
794 return 1;
795 }
796
797 scintr:
798 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
799 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
800 SIOP_DSPS);
801 #ifdef SIOP_DEBUG_INTR
802 printf("script interrupt 0x%x\n", irqcode);
803 #endif
804 /*
805 * no command, or an inactive command is only valid for a
806 * reselect interrupt
807 */
808 if ((irqcode & 0x80) == 0) {
809 if (esiop_cmd == NULL) {
810 printf(
811 "%s: script interrupt (0x%x) with invalid DSA !!!\n",
812 sc->sc_c.sc_dev.dv_xname, irqcode);
813 goto reset;
814 }
815 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
816 printf("%s: command with invalid status "
817 "(IRQ code 0x%x current status %d) !\n",
818 sc->sc_c.sc_dev.dv_xname,
819 irqcode, esiop_cmd->cmd_c.status);
820 xs = NULL;
821 }
822 }
823 switch(irqcode) {
824 case A_int_err:
825 printf("error, DSP=0x%x\n",
826 (int)(bus_space_read_4(sc->sc_c.sc_rt,
827 sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
828 if (xs) {
829 xs->error = XS_SELTIMEOUT;
830 goto end;
831 } else {
832 goto reset;
833 }
834 case A_int_msgin:
835 {
836 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
837 sc->sc_c.sc_rh, SIOP_SFBR);
838 if (msgin == MSG_MESSAGE_REJECT) {
839 int msg, extmsg;
840 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
841 /*
842 * message was part of a identify +
843 * something else. Identify shouldn't
844 * have been rejected.
845 */
846 msg =
847 esiop_cmd->cmd_tables->msg_out[1];
848 extmsg =
849 esiop_cmd->cmd_tables->msg_out[3];
850 } else {
851 msg =
852 esiop_cmd->cmd_tables->msg_out[0];
853 extmsg =
854 esiop_cmd->cmd_tables->msg_out[2];
855 }
856 if (msg == MSG_MESSAGE_REJECT) {
857 /* MSG_REJECT for a MSG_REJECT !*/
858 if (xs)
859 scsipi_printaddr(xs->xs_periph);
860 else
861 printf("%s: ",
862 sc->sc_c.sc_dev.dv_xname);
863 printf("our reject message was "
864 "rejected\n");
865 goto reset;
866 }
867 if (msg == MSG_EXTENDED &&
868 extmsg == MSG_EXT_WDTR) {
869 /* WDTR rejected, initiate sync */
870 if ((esiop_target->target_c.flags &
871 TARF_SYNC) == 0) {
872 esiop_target->target_c.status =
873 TARST_OK;
874 siop_update_xfer_mode(&sc->sc_c,
875 target);
876 /* no table to flush here */
877 CALL_SCRIPT(Ent_msgin_ack);
878 return 1;
879 }
880 esiop_target->target_c.status =
881 TARST_SYNC_NEG;
882 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
883 sc->sc_c.st_minsync,
884 sc->sc_c.maxoff);
885 esiop_table_sync(esiop_cmd,
886 BUS_DMASYNC_PREREAD |
887 BUS_DMASYNC_PREWRITE);
888 CALL_SCRIPT(Ent_send_msgout);
889 return 1;
890 } else if (msg == MSG_EXTENDED &&
891 extmsg == MSG_EXT_SDTR) {
892 /* sync rejected */
893 esiop_target->target_c.offset = 0;
894 esiop_target->target_c.period = 0;
895 esiop_target->target_c.status =
896 TARST_OK;
897 siop_update_xfer_mode(&sc->sc_c,
898 target);
899 /* no table to flush here */
900 CALL_SCRIPT(Ent_msgin_ack);
901 return 1;
902 } else if (msg == MSG_EXTENDED &&
903 extmsg == MSG_EXT_PPR) {
904 /* PPR rejected */
905 esiop_target->target_c.offset = 0;
906 esiop_target->target_c.period = 0;
907 esiop_target->target_c.status =
908 TARST_OK;
909 siop_update_xfer_mode(&sc->sc_c,
910 target);
911 /* no table to flush here */
912 CALL_SCRIPT(Ent_msgin_ack);
913 return 1;
914 } else if (msg == MSG_SIMPLE_Q_TAG ||
915 msg == MSG_HEAD_OF_Q_TAG ||
916 msg == MSG_ORDERED_Q_TAG) {
917 if (esiop_handle_qtag_reject(
918 esiop_cmd) == -1)
919 goto reset;
920 CALL_SCRIPT(Ent_msgin_ack);
921 return 1;
922 }
923 if (xs)
924 scsipi_printaddr(xs->xs_periph);
925 else
926 printf("%s: ",
927 sc->sc_c.sc_dev.dv_xname);
928 if (msg == MSG_EXTENDED) {
929 printf("scsi message reject, extended "
930 "message sent was 0x%x\n", extmsg);
931 } else {
932 printf("scsi message reject, message "
933 "sent was 0x%x\n", msg);
934 }
935 /* no table to flush here */
936 CALL_SCRIPT(Ent_msgin_ack);
937 return 1;
938 }
939 if (msgin == MSG_IGN_WIDE_RESIDUE) {
940 /* use the extmsgdata table to get the second byte */
941 esiop_cmd->cmd_tables->t_extmsgdata.count =
942 htole32(1);
943 esiop_table_sync(esiop_cmd,
944 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
945 CALL_SCRIPT(Ent_get_extmsgdata);
946 return 1;
947 }
948 if (xs)
949 scsipi_printaddr(xs->xs_periph);
950 else
951 printf("%s: ", sc->sc_c.sc_dev.dv_xname);
952 printf("unhandled message 0x%x\n", msgin);
953 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
954 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
955 esiop_table_sync(esiop_cmd,
956 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
957 CALL_SCRIPT(Ent_send_msgout);
958 return 1;
959 }
960 case A_int_extmsgin:
961 #ifdef SIOP_DEBUG_INTR
962 printf("extended message: msg 0x%x len %d\n",
963 esiop_cmd->cmd_tables->msg_in[2],
964 esiop_cmd->cmd_tables->msg_in[1]);
965 #endif
966 if (esiop_cmd->cmd_tables->msg_in[1] >
967 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
968 printf("%s: extended message too big (%d)\n",
969 sc->sc_c.sc_dev.dv_xname,
970 esiop_cmd->cmd_tables->msg_in[1]);
971 esiop_cmd->cmd_tables->t_extmsgdata.count =
972 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
973 esiop_table_sync(esiop_cmd,
974 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
975 CALL_SCRIPT(Ent_get_extmsgdata);
976 return 1;
977 case A_int_extmsgdata:
978 #ifdef SIOP_DEBUG_INTR
979 {
980 int i;
981 printf("extended message: 0x%x, data:",
982 esiop_cmd->cmd_tables->msg_in[2]);
983 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
984 i++)
985 printf(" 0x%x",
986 esiop_cmd->cmd_tables->msg_in[i]);
987 printf("\n");
988 }
989 #endif
990 if (esiop_cmd->cmd_tables->msg_in[0] ==
991 MSG_IGN_WIDE_RESIDUE) {
992 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
993 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
994 printf("MSG_IGN_WIDE_RESIDUE: "
995 "bad len %d\n",
996 esiop_cmd->cmd_tables->msg_in[3]);
997 switch (siop_iwr(&esiop_cmd->cmd_c)) {
998 case SIOP_NEG_MSGOUT:
999 esiop_table_sync(esiop_cmd,
1000 BUS_DMASYNC_PREREAD |
1001 BUS_DMASYNC_PREWRITE);
1002 CALL_SCRIPT(Ent_send_msgout);
1003 return 1;
1004 case SIOP_NEG_ACK:
1005 CALL_SCRIPT(Ent_msgin_ack);
1006 return 1;
1007 default:
1008 panic("invalid retval from "
1009 "siop_iwr()");
1010 }
1011 return 1;
1012 }
1013 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
1014 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
1015 case SIOP_NEG_MSGOUT:
1016 esiop_update_scntl3(sc,
1017 esiop_cmd->cmd_c.siop_target);
1018 esiop_table_sync(esiop_cmd,
1019 BUS_DMASYNC_PREREAD |
1020 BUS_DMASYNC_PREWRITE);
1021 CALL_SCRIPT(Ent_send_msgout);
1022 return 1;
1023 case SIOP_NEG_ACK:
1024 esiop_update_scntl3(sc,
1025 esiop_cmd->cmd_c.siop_target);
1026 CALL_SCRIPT(Ent_msgin_ack);
1027 return 1;
1028 default:
1029 panic("invalid retval from "
1030 "siop_wdtr_neg()");
1031 }
1032 return 1;
1033 }
1034 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1035 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1036 case SIOP_NEG_MSGOUT:
1037 esiop_update_scntl3(sc,
1038 esiop_cmd->cmd_c.siop_target);
1039 esiop_table_sync(esiop_cmd,
1040 BUS_DMASYNC_PREREAD |
1041 BUS_DMASYNC_PREWRITE);
1042 CALL_SCRIPT(Ent_send_msgout);
1043 return 1;
1044 case SIOP_NEG_ACK:
1045 esiop_update_scntl3(sc,
1046 esiop_cmd->cmd_c.siop_target);
1047 CALL_SCRIPT(Ent_msgin_ack);
1048 return 1;
1049 default:
1050 panic("invalid retval from "
1051 "siop_wdtr_neg()");
1052 }
1053 return 1;
1054 }
1055 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1056 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1057 case SIOP_NEG_MSGOUT:
1058 esiop_update_scntl3(sc,
1059 esiop_cmd->cmd_c.siop_target);
1060 esiop_table_sync(esiop_cmd,
1061 BUS_DMASYNC_PREREAD |
1062 BUS_DMASYNC_PREWRITE);
1063 CALL_SCRIPT(Ent_send_msgout);
1064 return 1;
1065 case SIOP_NEG_ACK:
1066 esiop_update_scntl3(sc,
1067 esiop_cmd->cmd_c.siop_target);
1068 CALL_SCRIPT(Ent_msgin_ack);
1069 return 1;
1070 default:
1071 panic("invalid retval from "
1072 "siop_wdtr_neg()");
1073 }
1074 return 1;
1075 }
1076 /* send a message reject */
1077 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1078 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1079 esiop_table_sync(esiop_cmd,
1080 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1081 CALL_SCRIPT(Ent_send_msgout);
1082 return 1;
1083 case A_int_disc:
1084 INCSTAT(esiop_stat_intr_sdp);
1085 offset = bus_space_read_1(sc->sc_c.sc_rt,
1086 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1087 #ifdef SIOP_DEBUG_DR
1088 printf("disconnect offset %d\n", offset);
1089 #endif
1090 siop_sdp(&esiop_cmd->cmd_c, offset);
1091 /* we start again with no offset */
1092 ESIOP_XFER(esiop_cmd, saved_offset) =
1093 htole32(SIOP_NOOFFSET);
1094 esiop_table_sync(esiop_cmd,
1095 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1096 CALL_SCRIPT(Ent_script_sched);
1097 return 1;
1098 case A_int_resfail:
1099 printf("reselect failed\n");
1100 CALL_SCRIPT(Ent_script_sched);
1101 return 1;
1102 case A_int_done:
1103 if (xs == NULL) {
1104 printf("%s: done without command\n",
1105 sc->sc_c.sc_dev.dv_xname);
1106 CALL_SCRIPT(Ent_script_sched);
1107 return 1;
1108 }
1109 #ifdef SIOP_DEBUG_INTR
1110 printf("done, DSA=0x%lx target id 0x%x last msg "
1111 "in=0x%x status=0x%x\n", (u_long)esiop_cmd->cmd_c.dsa,
1112 le32toh(esiop_cmd->cmd_tables->id),
1113 esiop_cmd->cmd_tables->msg_in[0],
1114 le32toh(esiop_cmd->cmd_tables->status));
1115 #endif
1116 INCSTAT(esiop_stat_intr_done);
1117 esiop_cmd->cmd_c.status = CMDST_DONE;
1118 goto end;
1119 default:
1120 printf("unknown irqcode %x\n", irqcode);
1121 if (xs) {
1122 xs->error = XS_SELTIMEOUT;
1123 goto end;
1124 }
1125 goto reset;
1126 }
1127 return 1;
1128 }
1129 /* We just should't get there */
1130 panic("siop_intr: I shouldn't be there !");
1131
1132 end:
1133 /*
1134 * restart the script now if command completed properly
1135 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1136 * queue
1137 */
1138 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1139 #ifdef SIOP_DEBUG_INTR
1140 printf("esiop_intr end: status %d\n", xs->status);
1141 #endif
1142 if (tag >= 0)
1143 esiop_lun->tactive[tag] = NULL;
1144 else
1145 esiop_lun->active = NULL;
1146 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1147 SIOP_SCRATCHA + 1);
1148 /*
1149 * if we got a disconnect between the last data phase
1150 * and the status phase, offset will be 0. In this
1151 * case, cmd_tables->saved_offset will have the proper value
1152 * if it got updated by the controller
1153 */
1154 if (offset == 0 &&
1155 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1156 offset =
1157 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1158
1159 esiop_scsicmd_end(esiop_cmd, offset);
1160 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1161 esiop_del_dev(sc, target, lun);
1162 CALL_SCRIPT(Ent_script_sched);
1163 return 1;
1164 }
1165
1166 void
1167 esiop_scsicmd_end(esiop_cmd, offset)
1168 struct esiop_cmd *esiop_cmd;
1169 int offset;
1170 {
1171 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1172 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1173
1174 siop_update_resid(&esiop_cmd->cmd_c, offset);
1175
1176 switch(xs->status) {
1177 case SCSI_OK:
1178 xs->error = XS_NOERROR;
1179 break;
1180 case SCSI_BUSY:
1181 xs->error = XS_BUSY;
1182 break;
1183 case SCSI_CHECK:
1184 xs->error = XS_BUSY;
1185 /* remove commands in the queue and scheduler */
1186 esiop_unqueue(sc, xs->xs_periph->periph_target,
1187 xs->xs_periph->periph_lun);
1188 break;
1189 case SCSI_QUEUE_FULL:
1190 INCSTAT(esiop_stat_intr_qfull);
1191 #ifdef SIOP_DEBUG
1192 printf("%s:%d:%d: queue full (tag %d)\n",
1193 sc->sc_c.sc_dev.dv_xname,
1194 xs->xs_periph->periph_target,
1195 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1196 #endif
1197 xs->error = XS_BUSY;
1198 break;
1199 case SCSI_SIOP_NOCHECK:
1200 /*
1201 * don't check status, xs->error is already valid
1202 */
1203 break;
1204 case SCSI_SIOP_NOSTATUS:
1205 /*
1206 * the status byte was not updated, cmd was
1207 * aborted
1208 */
1209 xs->error = XS_SELTIMEOUT;
1210 break;
1211 default:
1212 scsipi_printaddr(xs->xs_periph);
1213 printf("invalid status code %d\n", xs->status);
1214 xs->error = XS_DRIVER_STUFFUP;
1215 }
1216 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1217 bus_dmamap_sync(sc->sc_c.sc_dmat,
1218 esiop_cmd->cmd_c.dmamap_data, 0,
1219 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1220 (xs->xs_control & XS_CTL_DATA_IN) ?
1221 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1222 bus_dmamap_unload(sc->sc_c.sc_dmat,
1223 esiop_cmd->cmd_c.dmamap_data);
1224 }
1225 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1226 if ((xs->xs_control & XS_CTL_POLL) == 0)
1227 callout_stop(&xs->xs_callout);
1228 esiop_cmd->cmd_c.status = CMDST_FREE;
1229 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1230 #if 0
1231 if (xs->resid != 0)
1232 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1233 #endif
1234 scsipi_done (xs);
1235 }
1236
1237 void
1238 esiop_checkdone(sc)
1239 struct esiop_softc *sc;
1240 {
1241 int target, lun, tag;
1242 struct esiop_target *esiop_target;
1243 struct esiop_lun *esiop_lun;
1244 struct esiop_cmd *esiop_cmd;
1245 u_int32_t slot;
1246 int needsync = 0;
1247 int status;
1248 u_int32_t sem, offset;
1249
1250 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1251 sem = esiop_script_read(sc, sc->sc_semoffset);
1252 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1253 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1254 /*
1255 * at last one command have been started,
1256 * so we should have free slots now
1257 */
1258 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1259 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1260 }
1261 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1262
1263 if ((sem & A_sem_done) == 0) {
1264 /* no pending done command */
1265 return;
1266 }
1267
1268 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1269 sc->sc_done_offset, A_ndone_slots * sizeof(u_int32_t),
1270 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1271 next:
1272 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1273 if (needsync)
1274 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1275 sc->sc_done_offset,
1276 A_ndone_slots * sizeof(u_int32_t),
1277 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1278 return;
1279 }
1280
1281 needsync = 1;
1282
1283 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1284 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1285 sc->sc_currdoneslot += 1;
1286 if (sc->sc_currdoneslot == A_ndone_slots)
1287 sc->sc_currdoneslot = 0;
1288
1289 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1290 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1291 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1292
1293 esiop_target = (target >= 0) ?
1294 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1295 if (esiop_target == NULL) {
1296 printf("esiop_target (target %d) not valid\n", target);
1297 goto next;
1298 }
1299 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1300 if (esiop_lun == NULL) {
1301 printf("esiop_lun (target %d lun %d) not valid\n",
1302 target, lun);
1303 goto next;
1304 }
1305 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1306 if (esiop_cmd == NULL) {
1307 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1308 target, lun, tag);
1309 goto next;
1310 }
1311
1312 esiop_table_sync(esiop_cmd,
1313 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1314 status = le32toh(esiop_cmd->cmd_tables->status);
1315 #ifdef DIAGNOSTIC
1316 if (status != SCSI_OK) {
1317 printf("command for T/L/Q %d/%d/%d status %d\n",
1318 target, lun, tag, status);
1319 goto next;
1320 }
1321
1322 #endif
1323 /* Ok, this command has been handled */
1324 esiop_cmd->cmd_c.xs->status = status;
1325 if (tag >= 0)
1326 esiop_lun->tactive[tag] = NULL;
1327 else
1328 esiop_lun->active = NULL;
1329 /*
1330 * scratcha was eventually saved in saved_offset by script.
1331 * fetch offset from it
1332 */
1333 offset = 0;
1334 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1335 offset =
1336 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1337 esiop_scsicmd_end(esiop_cmd, offset);
1338 goto next;
1339 }
1340
1341 void
1342 esiop_unqueue(sc, target, lun)
1343 struct esiop_softc *sc;
1344 int target;
1345 int lun;
1346 {
1347 int slot, tag;
1348 u_int32_t slotdsa;
1349 struct esiop_cmd *esiop_cmd;
1350 struct esiop_lun *esiop_lun =
1351 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1352
1353 /* first make sure to read valid data */
1354 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1355
1356 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1357 /* look for commands in the scheduler, not yet started */
1358 if (esiop_lun->tactive[tag] == NULL)
1359 continue;
1360 esiop_cmd = esiop_lun->tactive[tag];
1361 for (slot = 0; slot < A_ncmd_slots; slot++) {
1362 slotdsa = esiop_script_read(sc,
1363 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1364 /* if the slot has any flag, it won't match the DSA */
1365 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1366 /* Mark this slot as ignore */
1367 esiop_script_write(sc,
1368 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1369 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1370 /* ask to requeue */
1371 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1372 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1373 esiop_lun->tactive[tag] = NULL;
1374 esiop_scsicmd_end(esiop_cmd, 0);
1375 break;
1376 }
1377 }
1378 }
1379 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380 }
1381
1382 /*
1383 * handle a rejected queue tag message: the command will run untagged,
1384 * has to adjust the reselect script.
1385 */
1386
1387
1388 int
1389 esiop_handle_qtag_reject(esiop_cmd)
1390 struct esiop_cmd *esiop_cmd;
1391 {
1392 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1393 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1394 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1395 int tag = esiop_cmd->cmd_tables->msg_out[2];
1396 struct esiop_target *esiop_target =
1397 (struct esiop_target*)sc->sc_c.targets[target];
1398 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1399
1400 #ifdef SIOP_DEBUG
1401 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1402 sc->sc_c.sc_dev.dv_xname, target, lun, tag, esiop_cmd->cmd_c.tag,
1403 esiop_cmd->cmd_c.status);
1404 #endif
1405
1406 if (esiop_lun->active != NULL) {
1407 printf("%s: untagged command already running for target %d "
1408 "lun %d (status %d)\n", sc->sc_c.sc_dev.dv_xname,
1409 target, lun, esiop_lun->active->cmd_c.status);
1410 return -1;
1411 }
1412 /* clear tag slot */
1413 esiop_lun->tactive[tag] = NULL;
1414 /* add command to non-tagged slot */
1415 esiop_lun->active = esiop_cmd;
1416 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1417 esiop_cmd->cmd_c.tag = -1;
1418 /* update DSA table */
1419 esiop_script_write(sc, esiop_target->lun_table_offset +
1420 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1421 esiop_cmd->cmd_c.dsa);
1422 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1423 return 0;
1424 }
1425
1426 /*
1427 * handle a bus reset: reset chip, unqueue all active commands, free all
1428 * target struct and report lossage to upper layer.
1429 * As the upper layer may requeue immediatly we have to first store
1430 * all active commands in a temporary queue.
1431 */
1432 void
1433 esiop_handle_reset(sc)
1434 struct esiop_softc *sc;
1435 {
1436 struct esiop_cmd *esiop_cmd;
1437 struct esiop_lun *esiop_lun;
1438 int target, lun, tag;
1439 /*
1440 * scsi bus reset. reset the chip and restart
1441 * the queue. Need to clean up all active commands
1442 */
1443 printf("%s: scsi bus reset\n", sc->sc_c.sc_dev.dv_xname);
1444 /* stop, reset and restart the chip */
1445 esiop_reset(sc);
1446
1447 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1448 /* chip has been reset, all slots are free now */
1449 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1450 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1451 }
1452 /*
1453 * Process all commands: first commands completes, then commands
1454 * being executed
1455 */
1456 esiop_checkdone(sc);
1457 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1458 target++) {
1459 struct esiop_target *esiop_target =
1460 (struct esiop_target *)sc->sc_c.targets[target];
1461 if (esiop_target == NULL)
1462 continue;
1463 for (lun = 0; lun < 8; lun++) {
1464 esiop_lun = esiop_target->esiop_lun[lun];
1465 if (esiop_lun == NULL)
1466 continue;
1467 for (tag = -1; tag <
1468 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1469 ESIOP_NTAG : 0);
1470 tag++) {
1471 if (tag >= 0)
1472 esiop_cmd = esiop_lun->tactive[tag];
1473 else
1474 esiop_cmd = esiop_lun->active;
1475 if (esiop_cmd == NULL)
1476 continue;
1477 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1478 printf("command with tag id %d reset\n", tag);
1479 esiop_cmd->cmd_c.xs->error =
1480 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1481 XS_TIMEOUT : XS_RESET;
1482 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1483 if (tag >= 0)
1484 esiop_lun->tactive[tag] = NULL;
1485 else
1486 esiop_lun->active = NULL;
1487 esiop_cmd->cmd_c.status = CMDST_DONE;
1488 esiop_scsicmd_end(esiop_cmd, 0);
1489 }
1490 }
1491 sc->sc_c.targets[target]->status = TARST_ASYNC;
1492 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1493 sc->sc_c.targets[target]->period =
1494 sc->sc_c.targets[target]->offset = 0;
1495 siop_update_xfer_mode(&sc->sc_c, target);
1496 }
1497
1498 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1499 }
1500
1501 void
1502 esiop_scsipi_request(chan, req, arg)
1503 struct scsipi_channel *chan;
1504 scsipi_adapter_req_t req;
1505 void *arg;
1506 {
1507 struct scsipi_xfer *xs;
1508 struct scsipi_periph *periph;
1509 struct esiop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1510 struct esiop_cmd *esiop_cmd;
1511 struct esiop_target *esiop_target;
1512 int s, error, i;
1513 int target;
1514 int lun;
1515
1516 switch (req) {
1517 case ADAPTER_REQ_RUN_XFER:
1518 xs = arg;
1519 periph = xs->xs_periph;
1520 target = periph->periph_target;
1521 lun = periph->periph_lun;
1522
1523 s = splbio();
1524 /*
1525 * first check if there are pending complete commands.
1526 * this can free us some resources (in the rings for example).
1527 * we have to lock it to avoid recursion.
1528 */
1529 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1530 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1531 esiop_checkdone(sc);
1532 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1533 }
1534 #ifdef SIOP_DEBUG_SCHED
1535 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1536 xs->xs_tag_type, xs->xs_tag_id);
1537 #endif
1538 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1539 if (esiop_cmd == NULL) {
1540 xs->error = XS_RESOURCE_SHORTAGE;
1541 scsipi_done(xs);
1542 splx(s);
1543 return;
1544 }
1545 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1546 #ifdef DIAGNOSTIC
1547 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1548 panic("siop_scsicmd: new cmd not free");
1549 #endif
1550 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1551 if (esiop_target == NULL) {
1552 #ifdef SIOP_DEBUG
1553 printf("%s: alloc siop_target for target %d\n",
1554 sc->sc_c.sc_dev.dv_xname, target);
1555 #endif
1556 sc->sc_c.targets[target] =
1557 malloc(sizeof(struct esiop_target),
1558 M_DEVBUF, M_NOWAIT | M_ZERO);
1559 if (sc->sc_c.targets[target] == NULL) {
1560 printf("%s: can't malloc memory for "
1561 "target %d\n", sc->sc_c.sc_dev.dv_xname,
1562 target);
1563 xs->error = XS_RESOURCE_SHORTAGE;
1564 scsipi_done(xs);
1565 splx(s);
1566 return;
1567 }
1568 esiop_target =
1569 (struct esiop_target*)sc->sc_c.targets[target];
1570 esiop_target->target_c.status = TARST_PROBING;
1571 esiop_target->target_c.flags = 0;
1572 esiop_target->target_c.id =
1573 sc->sc_c.clock_div << 24; /* scntl3 */
1574 esiop_target->target_c.id |= target << 16; /* id */
1575 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1576
1577 for (i=0; i < 8; i++)
1578 esiop_target->esiop_lun[i] = NULL;
1579 esiop_target_register(sc, target);
1580 }
1581 if (esiop_target->esiop_lun[lun] == NULL) {
1582 esiop_target->esiop_lun[lun] =
1583 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1584 M_NOWAIT|M_ZERO);
1585 if (esiop_target->esiop_lun[lun] == NULL) {
1586 printf("%s: can't alloc esiop_lun for "
1587 "target %d lun %d\n",
1588 sc->sc_c.sc_dev.dv_xname, target, lun);
1589 xs->error = XS_RESOURCE_SHORTAGE;
1590 scsipi_done(xs);
1591 splx(s);
1592 return;
1593 }
1594 }
1595 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1596 esiop_cmd->cmd_c.xs = xs;
1597 esiop_cmd->cmd_c.flags = 0;
1598 esiop_cmd->cmd_c.status = CMDST_READY;
1599
1600 /* load the DMA maps */
1601 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1602 esiop_cmd->cmd_c.dmamap_cmd,
1603 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1604 if (error) {
1605 printf("%s: unable to load cmd DMA map: %d\n",
1606 sc->sc_c.sc_dev.dv_xname, error);
1607 xs->error = XS_DRIVER_STUFFUP;
1608 scsipi_done(xs);
1609 splx(s);
1610 return;
1611 }
1612 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1613 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1614 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1615 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1616 ((xs->xs_control & XS_CTL_DATA_IN) ?
1617 BUS_DMA_READ : BUS_DMA_WRITE));
1618 if (error) {
1619 printf("%s: unable to load cmd DMA map: %d",
1620 sc->sc_c.sc_dev.dv_xname, error);
1621 xs->error = XS_DRIVER_STUFFUP;
1622 scsipi_done(xs);
1623 bus_dmamap_unload(sc->sc_c.sc_dmat,
1624 esiop_cmd->cmd_c.dmamap_cmd);
1625 splx(s);
1626 return;
1627 }
1628 bus_dmamap_sync(sc->sc_c.sc_dmat,
1629 esiop_cmd->cmd_c.dmamap_data, 0,
1630 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1631 (xs->xs_control & XS_CTL_DATA_IN) ?
1632 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1633 }
1634 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1635 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1636 BUS_DMASYNC_PREWRITE);
1637
1638 if (xs->xs_tag_type)
1639 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1640 else
1641 esiop_cmd->cmd_c.tag = -1;
1642 siop_setuptables(&esiop_cmd->cmd_c);
1643 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1644 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1645 ESIOP_XFER(esiop_cmd, tlq) |=
1646 htole32((target << 8) | (lun << 16));
1647 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1648 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1649 ESIOP_XFER(esiop_cmd, tlq) |=
1650 htole32(esiop_cmd->cmd_c.tag << 24);
1651 }
1652
1653 esiop_table_sync(esiop_cmd,
1654 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1655 esiop_start(sc, esiop_cmd);
1656 if (xs->xs_control & XS_CTL_POLL) {
1657 /* poll for command completion */
1658 while ((xs->xs_status & XS_STS_DONE) == 0) {
1659 delay(1000);
1660 esiop_intr(sc);
1661 }
1662 }
1663 splx(s);
1664 return;
1665
1666 case ADAPTER_REQ_GROW_RESOURCES:
1667 #ifdef SIOP_DEBUG
1668 printf("%s grow resources (%d)\n", sc->sc_c.sc_dev.dv_xname,
1669 sc->sc_c.sc_adapt.adapt_openings);
1670 #endif
1671 esiop_morecbd(sc);
1672 return;
1673
1674 case ADAPTER_REQ_SET_XFER_MODE:
1675 {
1676 struct scsipi_xfer_mode *xm = arg;
1677 if (sc->sc_c.targets[xm->xm_target] == NULL)
1678 return;
1679 s = splbio();
1680 if (xm->xm_mode & PERIPH_CAP_TQING) {
1681 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1682 /* allocate tag tables for this device */
1683 for (lun = 0;
1684 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1685 if (scsipi_lookup_periph(chan,
1686 xm->xm_target, lun) != NULL)
1687 esiop_add_dev(sc, xm->xm_target, lun);
1688 }
1689 }
1690 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1691 (sc->sc_c.features & SF_BUS_WIDE))
1692 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1693 if (xm->xm_mode & PERIPH_CAP_SYNC)
1694 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1695 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1696 (sc->sc_c.features & SF_CHIP_DT))
1697 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1698 if ((xm->xm_mode &
1699 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1700 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1701 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1702
1703 splx(s);
1704 }
1705 }
1706 }
1707
1708 static void
1709 esiop_start(sc, esiop_cmd)
1710 struct esiop_softc *sc;
1711 struct esiop_cmd *esiop_cmd;
1712 {
1713 struct esiop_lun *esiop_lun;
1714 struct esiop_target *esiop_target;
1715 int timeout;
1716 int target, lun, slot;
1717
1718 /*
1719 * first make sure to read valid data
1720 */
1721 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1722
1723 /*
1724 * We use a circular queue here. sc->sc_currschedslot points to a
1725 * free slot, unless we have filled the queue. Check this.
1726 */
1727 slot = sc->sc_currschedslot;
1728 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1729 A_f_cmd_free) == 0) {
1730 /*
1731 * no more free slot, no need to continue. freeze the queue
1732 * and requeue this command.
1733 */
1734 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1735 sc->sc_flags |= SCF_CHAN_NOSLOT;
1736 esiop_script_write(sc, sc->sc_semoffset,
1737 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1738 esiop_script_sync(sc,
1739 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1740 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1741 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1742 esiop_scsicmd_end(esiop_cmd, 0);
1743 return;
1744 }
1745 /* OK, we can use this slot */
1746
1747 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1748 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1749 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1750 esiop_lun = esiop_target->esiop_lun[lun];
1751 /* if non-tagged command active, panic: this shouldn't happen */
1752 if (esiop_lun->active != NULL) {
1753 panic("esiop_start: tagged cmd while untagged running");
1754 }
1755 #ifdef DIAGNOSTIC
1756 /* sanity check the tag if needed */
1757 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1758 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1759 esiop_cmd->cmd_c.tag < 0) {
1760 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1761 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1762 panic("esiop_start: invalid tag id");
1763 }
1764 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1765 panic("esiop_start: tag not free");
1766 }
1767 #endif
1768 #ifdef SIOP_DEBUG_SCHED
1769 printf("using slot %d for DSA 0x%lx\n", slot,
1770 (u_long)esiop_cmd->cmd_c.dsa);
1771 #endif
1772 /* mark command as active */
1773 if (esiop_cmd->cmd_c.status == CMDST_READY)
1774 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1775 else
1776 panic("esiop_start: bad status");
1777 /* DSA table for reselect */
1778 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1779 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1780 /* DSA table for reselect */
1781 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1782 htole32(esiop_cmd->cmd_c.dsa);
1783 bus_dmamap_sync(sc->sc_c.sc_dmat,
1784 esiop_lun->lun_tagtbl->tblblk->blkmap,
1785 esiop_lun->lun_tagtbl->tbl_offset,
1786 sizeof(u_int32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1787 } else {
1788 esiop_lun->active = esiop_cmd;
1789 esiop_script_write(sc,
1790 esiop_target->lun_table_offset +
1791 lun * 2 + A_target_luntbl / sizeof(u_int32_t),
1792 esiop_cmd->cmd_c.dsa);
1793 }
1794 /* scheduler slot: DSA */
1795 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1796 esiop_cmd->cmd_c.dsa);
1797 /* make sure SCRIPT processor will read valid data */
1798 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1799 /* handle timeout */
1800 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1801 /* start exire timer */
1802 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1803 if (timeout == 0)
1804 timeout = 1;
1805 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1806 timeout, esiop_timeout, esiop_cmd);
1807 }
1808 /* Signal script it has some work to do */
1809 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1810 SIOP_ISTAT, ISTAT_SIGP);
1811 /* update the current slot, and wait for IRQ */
1812 sc->sc_currschedslot++;
1813 if (sc->sc_currschedslot >= A_ncmd_slots)
1814 sc->sc_currschedslot = 0;
1815 return;
1816 }
1817
1818 void
1819 esiop_timeout(v)
1820 void *v;
1821 {
1822 struct esiop_cmd *esiop_cmd = v;
1823 struct esiop_softc *sc =
1824 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1825 int s;
1826 #ifdef SIOP_DEBUG
1827 int slot, slotdsa;
1828 #endif
1829
1830 s = splbio();
1831 esiop_table_sync(esiop_cmd,
1832 BUS_DMASYNC_POSTREAD |
1833 BUS_DMASYNC_POSTWRITE);
1834 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1835 #ifdef SIOP_DEBUG
1836 printf("command timeout (status %d)\n", le32toh(esiop_cmd->cmd_tables->status));
1837
1838 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1839 for (slot = 0; slot < A_ncmd_slots; slot++) {
1840 slotdsa = esiop_script_read(sc,
1841 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1842 if ((slotdsa & 0x01) == 0)
1843 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1844 }
1845 printf("istat 0x%x ", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1846 printf("DSP 0x%lx DSA 0x%x\n",
1847 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr),
1848 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1849 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1850 printf("istat 0x%x\n", bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1851 #else
1852 printf("command timeout, CDB: ");
1853 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1854 printf("\n");
1855 #endif
1856 /* reset the scsi bus */
1857 siop_resetbus(&sc->sc_c);
1858
1859 /* deactivate callout */
1860 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1861 /*
1862 * mark command has being timed out and just return;
1863 * the bus reset will generate an interrupt,
1864 * it will be handled in siop_intr()
1865 */
1866 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1867 splx(s);
1868 return;
1869
1870 }
1871
1872 void
1873 esiop_dump_script(sc)
1874 struct esiop_softc *sc;
1875 {
1876 int i;
1877 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1878 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1879 le32toh(sc->sc_c.sc_script[i]),
1880 le32toh(sc->sc_c.sc_script[i+1]));
1881 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1882 0xc0000000) {
1883 i++;
1884 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i+1]));
1885 }
1886 printf("\n");
1887 }
1888 }
1889
1890 void
1891 esiop_morecbd(sc)
1892 struct esiop_softc *sc;
1893 {
1894 int error, i, s;
1895 bus_dma_segment_t seg;
1896 int rseg;
1897 struct esiop_cbd *newcbd;
1898 struct esiop_xfer *xfer;
1899 bus_addr_t dsa;
1900
1901 /* allocate a new list head */
1902 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1903 if (newcbd == NULL) {
1904 printf("%s: can't allocate memory for command descriptors "
1905 "head\n", sc->sc_c.sc_dev.dv_xname);
1906 return;
1907 }
1908
1909 /* allocate cmd list */
1910 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1911 M_DEVBUF, M_NOWAIT|M_ZERO);
1912 if (newcbd->cmds == NULL) {
1913 printf("%s: can't allocate memory for command descriptors\n",
1914 sc->sc_c.sc_dev.dv_xname);
1915 goto bad3;
1916 }
1917 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1918 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1919 if (error) {
1920 printf("%s: unable to allocate cbd DMA memory, error = %d\n",
1921 sc->sc_c.sc_dev.dv_xname, error);
1922 goto bad2;
1923 }
1924 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1925 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1926 if (error) {
1927 printf("%s: unable to map cbd DMA memory, error = %d\n",
1928 sc->sc_c.sc_dev.dv_xname, error);
1929 goto bad2;
1930 }
1931 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1932 BUS_DMA_NOWAIT, &newcbd->xferdma);
1933 if (error) {
1934 printf("%s: unable to create cbd DMA map, error = %d\n",
1935 sc->sc_c.sc_dev.dv_xname, error);
1936 goto bad1;
1937 }
1938 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1939 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1940 if (error) {
1941 printf("%s: unable to load cbd DMA map, error = %d\n",
1942 sc->sc_c.sc_dev.dv_xname, error);
1943 goto bad0;
1944 }
1945 #ifdef DEBUG
1946 printf("%s: alloc newcdb at PHY addr 0x%lx\n", sc->sc_c.sc_dev.dv_xname,
1947 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1948 #endif
1949 for (i = 0; i < SIOP_NCMDPB; i++) {
1950 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1951 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1952 &newcbd->cmds[i].cmd_c.dmamap_data);
1953 if (error) {
1954 printf("%s: unable to create data DMA map for cbd: "
1955 "error %d\n",
1956 sc->sc_c.sc_dev.dv_xname, error);
1957 goto bad0;
1958 }
1959 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1960 sizeof(struct scsipi_generic), 1,
1961 sizeof(struct scsipi_generic), 0,
1962 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1963 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1964 if (error) {
1965 printf("%s: unable to create cmd DMA map for cbd %d\n",
1966 sc->sc_c.sc_dev.dv_xname, error);
1967 goto bad0;
1968 }
1969 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1970 newcbd->cmds[i].esiop_cbdp = newcbd;
1971 xfer = &newcbd->xfers[i];
1972 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1973 memset(newcbd->cmds[i].cmd_tables, 0,
1974 sizeof(struct esiop_xfer));
1975 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1976 i * sizeof(struct esiop_xfer);
1977 newcbd->cmds[i].cmd_c.dsa = dsa;
1978 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1979 xfer->siop_tables.t_msgout.count= htole32(1);
1980 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1981 xfer->siop_tables.t_msgin.count= htole32(1);
1982 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1983 offsetof(struct siop_common_xfer, msg_in));
1984 xfer->siop_tables.t_extmsgin.count= htole32(2);
1985 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1986 offsetof(struct siop_common_xfer, msg_in) + 1);
1987 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1988 offsetof(struct siop_common_xfer, msg_in) + 3);
1989 xfer->siop_tables.t_status.count= htole32(1);
1990 xfer->siop_tables.t_status.addr = htole32(dsa +
1991 offsetof(struct siop_common_xfer, status));
1992
1993 s = splbio();
1994 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1995 splx(s);
1996 #ifdef SIOP_DEBUG
1997 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1998 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1999 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
2000 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
2001 #endif
2002 }
2003 s = splbio();
2004 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
2005 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
2006 splx(s);
2007 return;
2008 bad0:
2009 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
2010 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
2011 bad1:
2012 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2013 bad2:
2014 free(newcbd->cmds, M_DEVBUF);
2015 bad3:
2016 free(newcbd, M_DEVBUF);
2017 return;
2018 }
2019
2020 void
2021 esiop_moretagtbl(sc)
2022 struct esiop_softc *sc;
2023 {
2024 int error, i, j, s;
2025 bus_dma_segment_t seg;
2026 int rseg;
2027 struct esiop_dsatblblk *newtblblk;
2028 struct esiop_dsatbl *newtbls;
2029 u_int32_t *tbls;
2030
2031 /* allocate a new list head */
2032 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2033 M_DEVBUF, M_NOWAIT|M_ZERO);
2034 if (newtblblk == NULL) {
2035 printf("%s: can't allocate memory for tag DSA table block\n",
2036 sc->sc_c.sc_dev.dv_xname);
2037 return;
2038 }
2039
2040 /* allocate tbl list */
2041 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2042 M_DEVBUF, M_NOWAIT|M_ZERO);
2043 if (newtbls == NULL) {
2044 printf("%s: can't allocate memory for command descriptors\n",
2045 sc->sc_c.sc_dev.dv_xname);
2046 goto bad3;
2047 }
2048 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2049 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2050 if (error) {
2051 printf("%s: unable to allocate tbl DMA memory, error = %d\n",
2052 sc->sc_c.sc_dev.dv_xname, error);
2053 goto bad2;
2054 }
2055 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2056 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2057 if (error) {
2058 printf("%s: unable to map tbls DMA memory, error = %d\n",
2059 sc->sc_c.sc_dev.dv_xname, error);
2060 goto bad2;
2061 }
2062 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2063 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2064 if (error) {
2065 printf("%s: unable to create tbl DMA map, error = %d\n",
2066 sc->sc_c.sc_dev.dv_xname, error);
2067 goto bad1;
2068 }
2069 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2070 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2071 if (error) {
2072 printf("%s: unable to load tbl DMA map, error = %d\n",
2073 sc->sc_c.sc_dev.dv_xname, error);
2074 goto bad0;
2075 }
2076 #ifdef DEBUG
2077 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2078 sc->sc_c.sc_dev.dv_xname,
2079 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2080 #endif
2081 for (i = 0; i < ESIOP_NTPB; i++) {
2082 newtbls[i].tblblk = newtblblk;
2083 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2084 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(u_int32_t);
2085 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2086 newtbls[i].tbl_offset;
2087 for (j = 0; j < ESIOP_NTAG; j++)
2088 newtbls[i].tbl[j] = j;
2089 s = splbio();
2090 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2091 splx(s);
2092 }
2093 s = splbio();
2094 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2095 splx(s);
2096 return;
2097 bad0:
2098 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2099 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2100 bad1:
2101 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2102 bad2:
2103 free(newtbls, M_DEVBUF);
2104 bad3:
2105 free(newtblblk, M_DEVBUF);
2106 return;
2107 }
2108
2109 void
2110 esiop_update_scntl3(sc, _siop_target)
2111 struct esiop_softc *sc;
2112 struct siop_common_target *_siop_target;
2113 {
2114 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2115 esiop_script_write(sc, esiop_target->lun_table_offset,
2116 esiop_target->target_c.id);
2117 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2118 }
2119
2120 void
2121 esiop_add_dev(sc, target, lun)
2122 struct esiop_softc *sc;
2123 int target;
2124 int lun;
2125 {
2126 struct esiop_target *esiop_target =
2127 (struct esiop_target *)sc->sc_c.targets[target];
2128 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2129
2130 if (esiop_lun->lun_tagtbl != NULL)
2131 return; /* already allocated */
2132
2133 /* we need a tag DSA table */
2134 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2135 if (esiop_lun->lun_tagtbl == NULL) {
2136 esiop_moretagtbl(sc);
2137 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2138 if (esiop_lun->lun_tagtbl == NULL) {
2139 /* no resources, run untagged */
2140 esiop_target->target_c.flags &= ~TARF_TAG;
2141 return;
2142 }
2143 }
2144 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2145 /* Update LUN DSA table */
2146 esiop_script_write(sc, esiop_target->lun_table_offset +
2147 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2148 esiop_lun->lun_tagtbl->tbl_dsa);
2149 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2150 }
2151
2152 void
2153 esiop_del_dev(sc, target, lun)
2154 struct esiop_softc *sc;
2155 int target;
2156 int lun;
2157 {
2158 struct esiop_target *esiop_target;
2159 #ifdef SIOP_DEBUG
2160 printf("%s:%d:%d: free lun sw entry\n",
2161 sc->sc_c.sc_dev.dv_xname, target, lun);
2162 #endif
2163 if (sc->sc_c.targets[target] == NULL)
2164 return;
2165 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2166 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2167 esiop_target->esiop_lun[lun] = NULL;
2168 }
2169
2170 void
2171 esiop_target_register(sc, target)
2172 struct esiop_softc *sc;
2173 u_int32_t target;
2174 {
2175 struct esiop_target *esiop_target =
2176 (struct esiop_target *)sc->sc_c.targets[target];
2177 struct esiop_lun *esiop_lun;
2178 int lun;
2179
2180 /* get a DSA table for this target */
2181 esiop_target->lun_table_offset = sc->sc_free_offset;
2182 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2183 #ifdef SIOP_DEBUG
2184 printf("%s: lun table for target %d offset %d free offset %d\n",
2185 sc->sc_c.sc_dev.dv_xname, target, esiop_target->lun_table_offset,
2186 sc->sc_free_offset);
2187 #endif
2188 /* first 32 bytes are ID (for select) */
2189 esiop_script_write(sc, esiop_target->lun_table_offset,
2190 esiop_target->target_c.id);
2191 /* Record this table in the target DSA table */
2192 esiop_script_write(sc,
2193 sc->sc_target_table_offset + target,
2194 (esiop_target->lun_table_offset * sizeof(u_int32_t)) +
2195 sc->sc_c.sc_scriptaddr);
2196 /* if we have a tag table, register it */
2197 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2198 esiop_lun = esiop_target->esiop_lun[lun];
2199 if (esiop_lun == NULL)
2200 continue;
2201 if (esiop_lun->lun_tagtbl)
2202 esiop_script_write(sc, esiop_target->lun_table_offset +
2203 lun * 2 + A_target_luntbl_tag / sizeof(u_int32_t),
2204 esiop_lun->lun_tagtbl->tbl_dsa);
2205 }
2206 esiop_script_sync(sc,
2207 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2208 }
2209
2210 #ifdef SIOP_STATS
2211 void
2212 esiop_printstats()
2213 {
2214 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2215 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2216 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2217 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2218 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2219 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2220 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2221 }
2222 #endif
2223