esiop.c revision 1.54 1 /* $NetBSD: esiop.c,v 1.54 2010/09/09 14:50:25 jakllsch Exp $ */
2
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.54 2010/09/09 14:50:25 jakllsch Exp $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39
40 #include <uvm/uvm_extern.h>
41
42 #include <machine/endian.h>
43 #include <sys/bus.h>
44
45 #include <dev/microcode/siop/esiop.out>
46
47 #include <dev/scsipi/scsi_all.h>
48 #include <dev/scsipi/scsi_message.h>
49 #include <dev/scsipi/scsipi_all.h>
50
51 #include <dev/scsipi/scsiconf.h>
52
53 #include <dev/ic/siopreg.h>
54 #include <dev/ic/siopvar_common.h>
55 #include <dev/ic/esiopvar.h>
56
57 #include "opt_siop.h"
58
59 /*
60 #define SIOP_DEBUG
61 #define SIOP_DEBUG_DR
62 #define SIOP_DEBUG_INTR
63 #define SIOP_DEBUG_SCHED
64 #define SIOP_DUMP_SCRIPT
65 */
66
67 #define SIOP_STATS
68
69 #ifndef SIOP_DEFAULT_TARGET
70 #define SIOP_DEFAULT_TARGET 7
71 #endif
72
73 /* number of cmd descriptors per block */
74 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
75
76 void esiop_reset(struct esiop_softc *);
77 void esiop_checkdone(struct esiop_softc *);
78 void esiop_handle_reset(struct esiop_softc *);
79 void esiop_scsicmd_end(struct esiop_cmd *, int);
80 void esiop_unqueue(struct esiop_softc *, int, int);
81 int esiop_handle_qtag_reject(struct esiop_cmd *);
82 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
83 void esiop_timeout(void *);
84 void esiop_scsipi_request(struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 void esiop_dump_script(struct esiop_softc *);
87 void esiop_morecbd(struct esiop_softc *);
88 void esiop_moretagtbl(struct esiop_softc *);
89 void siop_add_reselsw(struct esiop_softc *, int);
90 void esiop_target_register(struct esiop_softc *, uint32_t);
91
92 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
93
94 #ifdef SIOP_STATS
95 static int esiop_stat_intr = 0;
96 static int esiop_stat_intr_shortxfer = 0;
97 static int esiop_stat_intr_sdp = 0;
98 static int esiop_stat_intr_done = 0;
99 static int esiop_stat_intr_xferdisc = 0;
100 static int esiop_stat_intr_lunresel = 0;
101 static int esiop_stat_intr_qfull = 0;
102 void esiop_printstats(void);
103 #define INCSTAT(x) x++
104 #else
105 #define INCSTAT(x)
106 #endif
107
108 static inline void esiop_script_sync(struct esiop_softc *, int);
109 static inline void
110 esiop_script_sync(struct esiop_softc *sc, int ops)
111 {
112
113 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
114 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
115 PAGE_SIZE, ops);
116 }
117
118 static inline uint32_t esiop_script_read(struct esiop_softc *, u_int);
119 static inline uint32_t
120 esiop_script_read(struct esiop_softc *sc, u_int offset)
121 {
122
123 if (sc->sc_c.features & SF_CHIP_RAM) {
124 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
125 offset * 4);
126 } else {
127 return le32toh(sc->sc_c.sc_script[offset]);
128 }
129 }
130
131 static inline void esiop_script_write(struct esiop_softc *, u_int,
132 uint32_t);
133 static inline void
134 esiop_script_write(struct esiop_softc *sc, u_int offset, uint32_t val)
135 {
136
137 if (sc->sc_c.features & SF_CHIP_RAM) {
138 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
139 offset * 4, val);
140 } else {
141 sc->sc_c.sc_script[offset] = htole32(val);
142 }
143 }
144
145 void
146 esiop_attach(struct esiop_softc *sc)
147 {
148 struct esiop_dsatbl *tagtbl_donering;
149
150 if (siop_common_attach(&sc->sc_c) != 0 )
151 return;
152
153 TAILQ_INIT(&sc->free_list);
154 TAILQ_INIT(&sc->cmds);
155 TAILQ_INIT(&sc->free_tagtbl);
156 TAILQ_INIT(&sc->tag_tblblk);
157 sc->sc_currschedslot = 0;
158 #ifdef SIOP_DEBUG
159 aprint_debug_dev(sc->sc_c.sc_dev,
160 "script size = %d, PHY addr=0x%x, VIRT=%p\n",
161 (int)sizeof(esiop_script),
162 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
163 #endif
164
165 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
166 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
167
168 /*
169 * get space for the CMD done slot. For this we use a tag table entry.
170 * It's the same size and allows us to not waste 3/4 of a page
171 */
172 #ifdef DIAGNOSTIC
173 if (ESIOP_NTAG != A_ndone_slots) {
174 aprint_error_dev(sc->sc_c.sc_dev,
175 "size of tag DSA table different from the done ring\n");
176 return;
177 }
178 #endif
179 esiop_moretagtbl(sc);
180 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
181 if (tagtbl_donering == NULL) {
182 aprint_error_dev(sc->sc_c.sc_dev,
183 "no memory for command done ring\n");
184 return;
185 }
186 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
187 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
188 sc->sc_done_offset = tagtbl_donering->tbl_offset;
189 sc->sc_done_slot = &tagtbl_donering->tbl[0];
190
191 /* Do a bus reset, so that devices fall back to narrow/async */
192 siop_resetbus(&sc->sc_c);
193 /*
194 * siop_reset() will reset the chip, thus clearing pending interrupts
195 */
196 esiop_reset(sc);
197 #ifdef SIOP_DUMP_SCRIPT
198 esiop_dump_script(sc);
199 #endif
200
201 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
202 }
203
204 void
205 esiop_reset(struct esiop_softc *sc)
206 {
207 int i, j;
208 uint32_t addr;
209 uint32_t msgin_addr, sem_addr;
210
211 siop_common_reset(&sc->sc_c);
212
213 /*
214 * we copy the script at the beggining of RAM. Then there is 4 bytes
215 * for messages in, and 4 bytes for semaphore
216 */
217 sc->sc_free_offset = __arraycount(esiop_script);
218 msgin_addr =
219 sc->sc_free_offset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
220 sc->sc_free_offset += 1;
221 sc->sc_semoffset = sc->sc_free_offset;
222 sem_addr =
223 sc->sc_semoffset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
224 sc->sc_free_offset += 1;
225 /* then we have the scheduler ring */
226 sc->sc_shedoffset = sc->sc_free_offset;
227 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
228 /* then the targets DSA table */
229 sc->sc_target_table_offset = sc->sc_free_offset;
230 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
231 /* copy and patch the script */
232 if (sc->sc_c.features & SF_CHIP_RAM) {
233 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
234 esiop_script,
235 __arraycount(esiop_script));
236 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
237 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
238 E_tlq_offset_Used[j] * 4,
239 sizeof(struct siop_common_xfer));
240 }
241 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
242 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
243 E_saved_offset_offset_Used[j] * 4,
244 sizeof(struct siop_common_xfer) + 4);
245 }
246 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
247 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
248 E_abs_msgin2_Used[j] * 4, msgin_addr);
249 }
250 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
251 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
252 E_abs_sem_Used[j] * 4, sem_addr);
253 }
254
255 if (sc->sc_c.features & SF_CHIP_LED0) {
256 bus_space_write_region_4(sc->sc_c.sc_ramt,
257 sc->sc_c.sc_ramh,
258 Ent_led_on1, esiop_led_on,
259 __arraycount(esiop_led_on));
260 bus_space_write_region_4(sc->sc_c.sc_ramt,
261 sc->sc_c.sc_ramh,
262 Ent_led_on2, esiop_led_on,
263 __arraycount(esiop_led_on));
264 bus_space_write_region_4(sc->sc_c.sc_ramt,
265 sc->sc_c.sc_ramh,
266 Ent_led_off, esiop_led_off,
267 __arraycount(esiop_led_off));
268 }
269 } else {
270 for (j = 0; j < __arraycount(esiop_script); j++) {
271 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
272 }
273 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
274 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
275 htole32(sizeof(struct siop_common_xfer));
276 }
277 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
278 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
279 htole32(sizeof(struct siop_common_xfer) + 4);
280 }
281 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
282 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
283 htole32(msgin_addr);
284 }
285 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
286 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
287 htole32(sem_addr);
288 }
289
290 if (sc->sc_c.features & SF_CHIP_LED0) {
291 for (j = 0; j < __arraycount(esiop_led_on); j++)
292 sc->sc_c.sc_script[
293 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
294 ] = htole32(esiop_led_on[j]);
295 for (j = 0; j < __arraycount(esiop_led_on); j++)
296 sc->sc_c.sc_script[
297 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
298 ] = htole32(esiop_led_on[j]);
299 for (j = 0; j < __arraycount(esiop_led_off); j++)
300 sc->sc_c.sc_script[
301 Ent_led_off / sizeof(esiop_led_off[0]) + j
302 ] = htole32(esiop_led_off[j]);
303 }
304 }
305 /* get base of scheduler ring */
306 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(uint32_t);
307 /* init scheduler */
308 for (i = 0; i < A_ncmd_slots; i++) {
309 esiop_script_write(sc,
310 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
311 }
312 sc->sc_currschedslot = 0;
313 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
314 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
315 /*
316 * 0x78000000 is a 'move data8 to reg'. data8 is the second
317 * octet, reg offset is the third.
318 */
319 esiop_script_write(sc, Ent_cmdr0 / 4,
320 0x78640000 | ((addr & 0x000000ff) << 8));
321 esiop_script_write(sc, Ent_cmdr1 / 4,
322 0x78650000 | ((addr & 0x0000ff00) ));
323 esiop_script_write(sc, Ent_cmdr2 / 4,
324 0x78660000 | ((addr & 0x00ff0000) >> 8));
325 esiop_script_write(sc, Ent_cmdr3 / 4,
326 0x78670000 | ((addr & 0xff000000) >> 16));
327 /* done ring */
328 for (i = 0; i < A_ndone_slots; i++)
329 sc->sc_done_slot[i] = 0;
330 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
331 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
332 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
333 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
334 sc->sc_currdoneslot = 0;
335 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
336 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
337 esiop_script_write(sc, Ent_doner0 / 4,
338 0x786c0000 | ((addr & 0x000000ff) << 8));
339 esiop_script_write(sc, Ent_doner1 / 4,
340 0x786d0000 | ((addr & 0x0000ff00) ));
341 esiop_script_write(sc, Ent_doner2 / 4,
342 0x786e0000 | ((addr & 0x00ff0000) >> 8));
343 esiop_script_write(sc, Ent_doner3 / 4,
344 0x786f0000 | ((addr & 0xff000000) >> 16));
345
346 /* set flags */
347 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
348 /* write pointer of base of target DSA table */
349 addr = (sc->sc_target_table_offset * sizeof(uint32_t)) +
350 sc->sc_c.sc_scriptaddr;
351 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
352 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
353 ((addr & 0x000000ff) << 8));
354 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
355 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
356 ((addr & 0x0000ff00) ));
357 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
358 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
359 ((addr & 0x00ff0000) >> 8));
360 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
361 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
362 ((addr & 0xff000000) >> 16));
363 #ifdef SIOP_DEBUG
364 printf("%s: target table offset %d free offset %d\n",
365 device_xname(sc->sc_c.sc_dev), sc->sc_target_table_offset,
366 sc->sc_free_offset);
367 #endif
368
369 /* register existing targets */
370 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
371 if (sc->sc_c.targets[i])
372 esiop_target_register(sc, i);
373 }
374 /* start script */
375 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
376 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
377 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
378 }
379 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
380 sc->sc_c.sc_scriptaddr + Ent_reselect);
381 }
382
383 #if 0
384 #define CALL_SCRIPT(ent) do { \
385 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
386 esiop_cmd->cmd_c.dsa, \
387 sc->sc_c.sc_scriptaddr + ent); \
388 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
389 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
390 } while (/* CONSTCOND */0)
391 #else
392 #define CALL_SCRIPT(ent) do { \
393 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
394 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
395 } while (/* CONSTCOND */0)
396 #endif
397
398 int
399 esiop_intr(void *v)
400 {
401 struct esiop_softc *sc = v;
402 struct esiop_target *esiop_target;
403 struct esiop_cmd *esiop_cmd;
404 struct esiop_lun *esiop_lun;
405 struct scsipi_xfer *xs;
406 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
407 uint32_t irqcode;
408 int need_reset = 0;
409 int offset, target, lun, tag;
410 uint32_t tflags;
411 uint32_t addr;
412 int freetarget = 0;
413 int slot;
414 int retval = 0;
415
416 again:
417 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
418 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
419 return retval;
420 }
421 retval = 1;
422 INCSTAT(esiop_stat_intr);
423 esiop_checkdone(sc);
424 if (istat & ISTAT_INTF) {
425 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
426 SIOP_ISTAT, ISTAT_INTF);
427 goto again;
428 }
429
430 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
431 (ISTAT_DIP | ISTAT_ABRT)) {
432 /* clear abort */
433 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
434 SIOP_ISTAT, 0);
435 }
436
437 /* get CMD from T/L/Q */
438 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
439 SIOP_SCRATCHC);
440 #ifdef SIOP_DEBUG_INTR
441 printf("interrupt, istat=0x%x tflags=0x%x "
442 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
443 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
444 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
445 SIOP_DSP) -
446 sc->sc_c.sc_scriptaddr));
447 #endif
448 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
449 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
450 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
451 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
452 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
453
454 if (target >= 0 && lun >= 0) {
455 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
456 if (esiop_target == NULL) {
457 printf("esiop_target (target %d) not valid\n", target);
458 goto none;
459 }
460 esiop_lun = esiop_target->esiop_lun[lun];
461 if (esiop_lun == NULL) {
462 printf("esiop_lun (target %d lun %d) not valid\n",
463 target, lun);
464 goto none;
465 }
466 esiop_cmd =
467 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
468 if (esiop_cmd == NULL) {
469 printf("esiop_cmd (target %d lun %d tag %d)"
470 " not valid\n",
471 target, lun, tag);
472 goto none;
473 }
474 xs = esiop_cmd->cmd_c.xs;
475 #ifdef DIAGNOSTIC
476 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
477 printf("esiop_cmd (target %d lun %d) "
478 "not active (%d)\n", target, lun,
479 esiop_cmd->cmd_c.status);
480 goto none;
481 }
482 #endif
483 esiop_table_sync(esiop_cmd,
484 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
485 } else {
486 none:
487 xs = NULL;
488 esiop_target = NULL;
489 esiop_lun = NULL;
490 esiop_cmd = NULL;
491 }
492 if (istat & ISTAT_DIP) {
493 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
494 SIOP_DSTAT);
495 if (dstat & DSTAT_ABRT) {
496 /* was probably generated by a bus reset IOCTL */
497 if ((dstat & DSTAT_DFE) == 0)
498 siop_clearfifo(&sc->sc_c);
499 goto reset;
500 }
501 if (dstat & DSTAT_SSI) {
502 printf("single step dsp 0x%08x dsa 0x08%x\n",
503 (int)(bus_space_read_4(sc->sc_c.sc_rt,
504 sc->sc_c.sc_rh, SIOP_DSP) -
505 sc->sc_c.sc_scriptaddr),
506 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
507 SIOP_DSA));
508 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
509 (istat & ISTAT_SIP) == 0) {
510 bus_space_write_1(sc->sc_c.sc_rt,
511 sc->sc_c.sc_rh, SIOP_DCNTL,
512 bus_space_read_1(sc->sc_c.sc_rt,
513 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
514 }
515 return 1;
516 }
517
518 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
519 printf("%s: DMA IRQ:", device_xname(sc->sc_c.sc_dev));
520 if (dstat & DSTAT_IID)
521 printf(" Illegal instruction");
522 if (dstat & DSTAT_BF)
523 printf(" bus fault");
524 if (dstat & DSTAT_MDPE)
525 printf(" parity");
526 if (dstat & DSTAT_DFE)
527 printf(" DMA fifo empty");
528 else
529 siop_clearfifo(&sc->sc_c);
530 printf(", DSP=0x%x DSA=0x%x: ",
531 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
532 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
533 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
534 if (esiop_cmd)
535 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
536 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
537 le32toh(esiop_cmd->cmd_tables->status));
538 else
539 printf(" current T/L/Q invalid\n");
540 need_reset = 1;
541 }
542 }
543 if (istat & ISTAT_SIP) {
544 if (istat & ISTAT_DIP)
545 delay(10);
546 /*
547 * Can't read sist0 & sist1 independently, or we have to
548 * insert delay
549 */
550 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
551 SIOP_SIST0);
552 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
553 SIOP_SSTAT1);
554 #ifdef SIOP_DEBUG_INTR
555 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
556 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
557 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
558 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
559 SIOP_DSP) -
560 sc->sc_c.sc_scriptaddr));
561 #endif
562 if (sist & SIST0_RST) {
563 esiop_handle_reset(sc);
564 /* no table to flush here */
565 return 1;
566 }
567 if (sist & SIST0_SGE) {
568 if (esiop_cmd)
569 scsipi_printaddr(xs->xs_periph);
570 else
571 printf("%s:", device_xname(sc->sc_c.sc_dev));
572 printf("scsi gross error\n");
573 if (esiop_target)
574 esiop_target->target_c.flags &= ~TARF_DT;
575 #ifdef SIOP_DEBUG
576 printf("DSA=0x%x DSP=0x%lx\n",
577 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
578 SIOP_DSA),
579 (u_long)(bus_space_read_4(sc->sc_c.sc_rt,
580 sc->sc_c.sc_rh, SIOP_DSP) -
581 sc->sc_c.sc_scriptaddr));
582 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
583 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
584 SIOP_SDID),
585 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 SIOP_SCNTL3),
587 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
588 SIOP_SXFER),
589 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
590 SIOP_SCNTL4));
591
592 #endif
593 goto reset;
594 }
595 if ((sist & SIST0_MA) && need_reset == 0) {
596 if (esiop_cmd) {
597 int scratchc0;
598 dstat = bus_space_read_1(sc->sc_c.sc_rt,
599 sc->sc_c.sc_rh, SIOP_DSTAT);
600 /*
601 * first restore DSA, in case we were in a S/G
602 * operation.
603 */
604 bus_space_write_4(sc->sc_c.sc_rt,
605 sc->sc_c.sc_rh,
606 SIOP_DSA, esiop_cmd->cmd_c.dsa);
607 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
608 sc->sc_c.sc_rh, SIOP_SCRATCHC);
609 switch (sstat1 & SSTAT1_PHASE_MASK) {
610 case SSTAT1_PHASE_STATUS:
611 /*
612 * previous phase may be aborted for any reason
613 * ( for example, the target has less data to
614 * transfer than requested). Compute resid and
615 * just go to status, the command should
616 * terminate.
617 */
618 INCSTAT(esiop_stat_intr_shortxfer);
619 if (scratchc0 & A_f_c_data)
620 siop_ma(&esiop_cmd->cmd_c);
621 else if ((dstat & DSTAT_DFE) == 0)
622 siop_clearfifo(&sc->sc_c);
623 CALL_SCRIPT(Ent_status);
624 return 1;
625 case SSTAT1_PHASE_MSGIN:
626 /*
627 * target may be ready to disconnect
628 * Compute resid which would be used later
629 * if a save data pointer is needed.
630 */
631 INCSTAT(esiop_stat_intr_xferdisc);
632 if (scratchc0 & A_f_c_data)
633 siop_ma(&esiop_cmd->cmd_c);
634 else if ((dstat & DSTAT_DFE) == 0)
635 siop_clearfifo(&sc->sc_c);
636 bus_space_write_1(sc->sc_c.sc_rt,
637 sc->sc_c.sc_rh, SIOP_SCRATCHC,
638 scratchc0 & ~A_f_c_data);
639 CALL_SCRIPT(Ent_msgin);
640 return 1;
641 }
642 aprint_error_dev(sc->sc_c.sc_dev,
643 "unexpected phase mismatch %d\n",
644 sstat1 & SSTAT1_PHASE_MASK);
645 } else {
646 aprint_error_dev(sc->sc_c.sc_dev,
647 "phase mismatch without command\n");
648 }
649 need_reset = 1;
650 }
651 if (sist & SIST0_PAR) {
652 /* parity error, reset */
653 if (esiop_cmd)
654 scsipi_printaddr(xs->xs_periph);
655 else
656 printf("%s:", device_xname(sc->sc_c.sc_dev));
657 printf("parity error\n");
658 if (esiop_target)
659 esiop_target->target_c.flags &= ~TARF_DT;
660 goto reset;
661 }
662 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
663 /*
664 * selection time out, assume there's no device here
665 * We also have to update the ring pointer ourselve
666 */
667 slot = bus_space_read_1(sc->sc_c.sc_rt,
668 sc->sc_c.sc_rh, SIOP_SCRATCHE);
669 esiop_script_sync(sc,
670 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
671 #ifdef SIOP_DEBUG_SCHED
672 printf("sel timeout target %d, slot %d\n",
673 target, slot);
674 #endif
675 /*
676 * mark this slot as free, and advance to next slot
677 */
678 esiop_script_write(sc,
679 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
680 A_f_cmd_free);
681 addr = bus_space_read_4(sc->sc_c.sc_rt,
682 sc->sc_c.sc_rh, SIOP_SCRATCHD);
683 if (slot < (A_ncmd_slots - 1)) {
684 bus_space_write_1(sc->sc_c.sc_rt,
685 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
686 addr = addr + sizeof(struct esiop_slot);
687 } else {
688 bus_space_write_1(sc->sc_c.sc_rt,
689 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
690 addr = sc->sc_c.sc_scriptaddr +
691 sc->sc_shedoffset * sizeof(uint32_t);
692 }
693 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
694 SIOP_SCRATCHD, addr);
695 esiop_script_sync(sc,
696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
697 if (esiop_cmd) {
698 esiop_cmd->cmd_c.status = CMDST_DONE;
699 xs->error = XS_SELTIMEOUT;
700 freetarget = 1;
701 goto end;
702 } else {
703 printf("%s: selection timeout without "
704 "command, target %d (sdid 0x%x), "
705 "slot %d\n",
706 device_xname(sc->sc_c.sc_dev), target,
707 bus_space_read_1(sc->sc_c.sc_rt,
708 sc->sc_c.sc_rh, SIOP_SDID), slot);
709 need_reset = 1;
710 }
711 }
712 if (sist & SIST0_UDC) {
713 /*
714 * unexpected disconnect. Usually the target signals
715 * a fatal condition this way. Attempt to get sense.
716 */
717 if (esiop_cmd) {
718 esiop_cmd->cmd_tables->status =
719 htole32(SCSI_CHECK);
720 goto end;
721 }
722 aprint_error_dev(sc->sc_c.sc_dev,
723 "unexpected disconnect without command\n");
724 goto reset;
725 }
726 if (sist & (SIST1_SBMC << 8)) {
727 /* SCSI bus mode change */
728 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
729 goto reset;
730 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
731 /*
732 * we have a script interrupt, it will
733 * restart the script.
734 */
735 goto scintr;
736 }
737 /*
738 * else we have to restart it ourselve, at the
739 * interrupted instruction.
740 */
741 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
742 SIOP_DSP,
743 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
744 SIOP_DSP) - 8);
745 return 1;
746 }
747 /* Else it's an unhandled exception (for now). */
748 aprint_error_dev(sc->sc_c.sc_dev,
749 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
750 "DSA=0x%x DSP=0x%x\n", sist,
751 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
752 SIOP_SSTAT1),
753 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
754 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
755 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
756 if (esiop_cmd) {
757 esiop_cmd->cmd_c.status = CMDST_DONE;
758 xs->error = XS_SELTIMEOUT;
759 goto end;
760 }
761 need_reset = 1;
762 }
763 if (need_reset) {
764 reset:
765 /* fatal error, reset the bus */
766 siop_resetbus(&sc->sc_c);
767 /* no table to flush here */
768 return 1;
769 }
770
771 scintr:
772 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
773 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
774 SIOP_DSPS);
775 #ifdef SIOP_DEBUG_INTR
776 printf("script interrupt 0x%x\n", irqcode);
777 #endif
778 /*
779 * no command, or an inactive command is only valid for a
780 * reselect interrupt
781 */
782 if ((irqcode & 0x80) == 0) {
783 if (esiop_cmd == NULL) {
784 aprint_error_dev(sc->sc_c.sc_dev,
785 "script interrupt (0x%x) with invalid DSA !!!\n",
786 irqcode);
787 goto reset;
788 }
789 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
790 aprint_error_dev(sc->sc_c.sc_dev,
791 "command with invalid status "
792 "(IRQ code 0x%x current status %d) !\n",
793 irqcode, esiop_cmd->cmd_c.status);
794 xs = NULL;
795 }
796 }
797 switch(irqcode) {
798 case A_int_err:
799 printf("error, DSP=0x%x\n",
800 (int)(bus_space_read_4(sc->sc_c.sc_rt,
801 sc->sc_c.sc_rh, SIOP_DSP) -
802 sc->sc_c.sc_scriptaddr));
803 if (xs) {
804 xs->error = XS_SELTIMEOUT;
805 goto end;
806 } else {
807 goto reset;
808 }
809 case A_int_msgin:
810 {
811 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
812 sc->sc_c.sc_rh, SIOP_SFBR);
813 if (msgin == MSG_MESSAGE_REJECT) {
814 int msg, extmsg;
815 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
816 /*
817 * message was part of a identify +
818 * something else. Identify shouldn't
819 * have been rejected.
820 */
821 msg =
822 esiop_cmd->cmd_tables->msg_out[1];
823 extmsg =
824 esiop_cmd->cmd_tables->msg_out[3];
825 } else {
826 msg =
827 esiop_cmd->cmd_tables->msg_out[0];
828 extmsg =
829 esiop_cmd->cmd_tables->msg_out[2];
830 }
831 if (msg == MSG_MESSAGE_REJECT) {
832 /* MSG_REJECT for a MSG_REJECT !*/
833 if (xs)
834 scsipi_printaddr(xs->xs_periph);
835 else
836 printf("%s: ", device_xname(
837 sc->sc_c.sc_dev));
838 printf("our reject message was "
839 "rejected\n");
840 goto reset;
841 }
842 if (msg == MSG_EXTENDED &&
843 extmsg == MSG_EXT_WDTR) {
844 /* WDTR rejected, initiate sync */
845 if ((esiop_target->target_c.flags &
846 TARF_SYNC) == 0) {
847 esiop_target->target_c.status =
848 TARST_OK;
849 siop_update_xfer_mode(&sc->sc_c,
850 target);
851 /* no table to flush here */
852 CALL_SCRIPT(Ent_msgin_ack);
853 return 1;
854 }
855 esiop_target->target_c.status =
856 TARST_SYNC_NEG;
857 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
858 sc->sc_c.st_minsync,
859 sc->sc_c.maxoff);
860 esiop_table_sync(esiop_cmd,
861 BUS_DMASYNC_PREREAD |
862 BUS_DMASYNC_PREWRITE);
863 CALL_SCRIPT(Ent_send_msgout);
864 return 1;
865 } else if (msg == MSG_EXTENDED &&
866 extmsg == MSG_EXT_SDTR) {
867 /* sync rejected */
868 esiop_target->target_c.offset = 0;
869 esiop_target->target_c.period = 0;
870 esiop_target->target_c.status =
871 TARST_OK;
872 siop_update_xfer_mode(&sc->sc_c,
873 target);
874 /* no table to flush here */
875 CALL_SCRIPT(Ent_msgin_ack);
876 return 1;
877 } else if (msg == MSG_EXTENDED &&
878 extmsg == MSG_EXT_PPR) {
879 /* PPR rejected */
880 esiop_target->target_c.offset = 0;
881 esiop_target->target_c.period = 0;
882 esiop_target->target_c.status =
883 TARST_OK;
884 siop_update_xfer_mode(&sc->sc_c,
885 target);
886 /* no table to flush here */
887 CALL_SCRIPT(Ent_msgin_ack);
888 return 1;
889 } else if (msg == MSG_SIMPLE_Q_TAG ||
890 msg == MSG_HEAD_OF_Q_TAG ||
891 msg == MSG_ORDERED_Q_TAG) {
892 if (esiop_handle_qtag_reject(
893 esiop_cmd) == -1)
894 goto reset;
895 CALL_SCRIPT(Ent_msgin_ack);
896 return 1;
897 }
898 if (xs)
899 scsipi_printaddr(xs->xs_periph);
900 else
901 printf("%s: ",
902 device_xname(sc->sc_c.sc_dev));
903 if (msg == MSG_EXTENDED) {
904 printf("scsi message reject, extended "
905 "message sent was 0x%x\n", extmsg);
906 } else {
907 printf("scsi message reject, message "
908 "sent was 0x%x\n", msg);
909 }
910 /* no table to flush here */
911 CALL_SCRIPT(Ent_msgin_ack);
912 return 1;
913 }
914 if (msgin == MSG_IGN_WIDE_RESIDUE) {
915 /* use the extmsgdata table to get the second byte */
916 esiop_cmd->cmd_tables->t_extmsgdata.count =
917 htole32(1);
918 esiop_table_sync(esiop_cmd,
919 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
920 CALL_SCRIPT(Ent_get_extmsgdata);
921 return 1;
922 }
923 if (xs)
924 scsipi_printaddr(xs->xs_periph);
925 else
926 printf("%s: ", device_xname(sc->sc_c.sc_dev));
927 printf("unhandled message 0x%x\n", msgin);
928 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
929 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
930 esiop_table_sync(esiop_cmd,
931 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
932 CALL_SCRIPT(Ent_send_msgout);
933 return 1;
934 }
935 case A_int_extmsgin:
936 #ifdef SIOP_DEBUG_INTR
937 printf("extended message: msg 0x%x len %d\n",
938 esiop_cmd->cmd_tables->msg_in[2],
939 esiop_cmd->cmd_tables->msg_in[1]);
940 #endif
941 if (esiop_cmd->cmd_tables->msg_in[1] >
942 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
943 aprint_error_dev(sc->sc_c.sc_dev,
944 "extended message too big (%d)\n",
945 esiop_cmd->cmd_tables->msg_in[1]);
946 esiop_cmd->cmd_tables->t_extmsgdata.count =
947 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
948 esiop_table_sync(esiop_cmd,
949 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
950 CALL_SCRIPT(Ent_get_extmsgdata);
951 return 1;
952 case A_int_extmsgdata:
953 #ifdef SIOP_DEBUG_INTR
954 {
955 int i;
956 printf("extended message: 0x%x, data:",
957 esiop_cmd->cmd_tables->msg_in[2]);
958 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
959 i++)
960 printf(" 0x%x",
961 esiop_cmd->cmd_tables->msg_in[i]);
962 printf("\n");
963 }
964 #endif
965 if (esiop_cmd->cmd_tables->msg_in[0] ==
966 MSG_IGN_WIDE_RESIDUE) {
967 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
968 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
969 printf("MSG_IGN_WIDE_RESIDUE: "
970 "bad len %d\n",
971 esiop_cmd->cmd_tables->msg_in[3]);
972 switch (siop_iwr(&esiop_cmd->cmd_c)) {
973 case SIOP_NEG_MSGOUT:
974 esiop_table_sync(esiop_cmd,
975 BUS_DMASYNC_PREREAD |
976 BUS_DMASYNC_PREWRITE);
977 CALL_SCRIPT(Ent_send_msgout);
978 return 1;
979 case SIOP_NEG_ACK:
980 CALL_SCRIPT(Ent_msgin_ack);
981 return 1;
982 default:
983 panic("invalid retval from "
984 "siop_iwr()");
985 }
986 return 1;
987 }
988 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
989 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
990 case SIOP_NEG_MSGOUT:
991 esiop_update_scntl3(sc,
992 esiop_cmd->cmd_c.siop_target);
993 esiop_table_sync(esiop_cmd,
994 BUS_DMASYNC_PREREAD |
995 BUS_DMASYNC_PREWRITE);
996 CALL_SCRIPT(Ent_send_msgout);
997 return 1;
998 case SIOP_NEG_ACK:
999 esiop_update_scntl3(sc,
1000 esiop_cmd->cmd_c.siop_target);
1001 CALL_SCRIPT(Ent_msgin_ack);
1002 return 1;
1003 default:
1004 panic("invalid retval from "
1005 "siop_wdtr_neg()");
1006 }
1007 return 1;
1008 }
1009 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1010 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1011 case SIOP_NEG_MSGOUT:
1012 esiop_update_scntl3(sc,
1013 esiop_cmd->cmd_c.siop_target);
1014 esiop_table_sync(esiop_cmd,
1015 BUS_DMASYNC_PREREAD |
1016 BUS_DMASYNC_PREWRITE);
1017 CALL_SCRIPT(Ent_send_msgout);
1018 return 1;
1019 case SIOP_NEG_ACK:
1020 esiop_update_scntl3(sc,
1021 esiop_cmd->cmd_c.siop_target);
1022 CALL_SCRIPT(Ent_msgin_ack);
1023 return 1;
1024 default:
1025 panic("invalid retval from "
1026 "siop_wdtr_neg()");
1027 }
1028 return 1;
1029 }
1030 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1031 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1032 case SIOP_NEG_MSGOUT:
1033 esiop_update_scntl3(sc,
1034 esiop_cmd->cmd_c.siop_target);
1035 esiop_table_sync(esiop_cmd,
1036 BUS_DMASYNC_PREREAD |
1037 BUS_DMASYNC_PREWRITE);
1038 CALL_SCRIPT(Ent_send_msgout);
1039 return 1;
1040 case SIOP_NEG_ACK:
1041 esiop_update_scntl3(sc,
1042 esiop_cmd->cmd_c.siop_target);
1043 CALL_SCRIPT(Ent_msgin_ack);
1044 return 1;
1045 default:
1046 panic("invalid retval from "
1047 "siop_wdtr_neg()");
1048 }
1049 return 1;
1050 }
1051 /* send a message reject */
1052 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1053 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1054 esiop_table_sync(esiop_cmd,
1055 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1056 CALL_SCRIPT(Ent_send_msgout);
1057 return 1;
1058 case A_int_disc:
1059 INCSTAT(esiop_stat_intr_sdp);
1060 offset = bus_space_read_1(sc->sc_c.sc_rt,
1061 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1062 #ifdef SIOP_DEBUG_DR
1063 printf("disconnect offset %d\n", offset);
1064 #endif
1065 siop_sdp(&esiop_cmd->cmd_c, offset);
1066 /* we start again with no offset */
1067 ESIOP_XFER(esiop_cmd, saved_offset) =
1068 htole32(SIOP_NOOFFSET);
1069 esiop_table_sync(esiop_cmd,
1070 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1071 CALL_SCRIPT(Ent_script_sched);
1072 return 1;
1073 case A_int_resfail:
1074 printf("reselect failed\n");
1075 CALL_SCRIPT(Ent_script_sched);
1076 return 1;
1077 case A_int_done:
1078 if (xs == NULL) {
1079 printf("%s: done without command\n",
1080 device_xname(sc->sc_c.sc_dev));
1081 CALL_SCRIPT(Ent_script_sched);
1082 return 1;
1083 }
1084 #ifdef SIOP_DEBUG_INTR
1085 printf("done, DSA=0x%lx target id 0x%x last msg "
1086 "in=0x%x status=0x%x\n",
1087 (u_long)esiop_cmd->cmd_c.dsa,
1088 le32toh(esiop_cmd->cmd_tables->id),
1089 esiop_cmd->cmd_tables->msg_in[0],
1090 le32toh(esiop_cmd->cmd_tables->status));
1091 #endif
1092 INCSTAT(esiop_stat_intr_done);
1093 esiop_cmd->cmd_c.status = CMDST_DONE;
1094 goto end;
1095 default:
1096 printf("unknown irqcode %x\n", irqcode);
1097 if (xs) {
1098 xs->error = XS_SELTIMEOUT;
1099 goto end;
1100 }
1101 goto reset;
1102 }
1103 return 1;
1104 }
1105 /* We just should't get there */
1106 panic("siop_intr: I shouldn't be there !");
1107
1108 end:
1109 /*
1110 * restart the script now if command completed properly
1111 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1112 * queue
1113 */
1114 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1115 #ifdef SIOP_DEBUG_INTR
1116 printf("esiop_intr end: status %d\n", xs->status);
1117 #endif
1118 if (tag >= 0)
1119 esiop_lun->tactive[tag] = NULL;
1120 else
1121 esiop_lun->active = NULL;
1122 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1123 SIOP_SCRATCHA + 1);
1124 /*
1125 * if we got a disconnect between the last data phase
1126 * and the status phase, offset will be 0. In this
1127 * case, cmd_tables->saved_offset will have the proper value
1128 * if it got updated by the controller
1129 */
1130 if (offset == 0 &&
1131 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1132 offset =
1133 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1134
1135 esiop_scsicmd_end(esiop_cmd, offset);
1136 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1137 esiop_del_dev(sc, target, lun);
1138 CALL_SCRIPT(Ent_script_sched);
1139 return 1;
1140 }
1141
1142 void
1143 esiop_scsicmd_end(struct esiop_cmd *esiop_cmd, int offset)
1144 {
1145 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1146 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1147
1148 siop_update_resid(&esiop_cmd->cmd_c, offset);
1149
1150 switch(xs->status) {
1151 case SCSI_OK:
1152 xs->error = XS_NOERROR;
1153 break;
1154 case SCSI_BUSY:
1155 xs->error = XS_BUSY;
1156 break;
1157 case SCSI_CHECK:
1158 xs->error = XS_BUSY;
1159 /* remove commands in the queue and scheduler */
1160 esiop_unqueue(sc, xs->xs_periph->periph_target,
1161 xs->xs_periph->periph_lun);
1162 break;
1163 case SCSI_QUEUE_FULL:
1164 INCSTAT(esiop_stat_intr_qfull);
1165 #ifdef SIOP_DEBUG
1166 printf("%s:%d:%d: queue full (tag %d)\n",
1167 device_xname(sc->sc_c.sc_dev),
1168 xs->xs_periph->periph_target,
1169 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1170 #endif
1171 xs->error = XS_BUSY;
1172 break;
1173 case SCSI_SIOP_NOCHECK:
1174 /*
1175 * don't check status, xs->error is already valid
1176 */
1177 break;
1178 case SCSI_SIOP_NOSTATUS:
1179 /*
1180 * the status byte was not updated, cmd was
1181 * aborted
1182 */
1183 xs->error = XS_SELTIMEOUT;
1184 break;
1185 default:
1186 scsipi_printaddr(xs->xs_periph);
1187 printf("invalid status code %d\n", xs->status);
1188 xs->error = XS_DRIVER_STUFFUP;
1189 }
1190 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1191 bus_dmamap_sync(sc->sc_c.sc_dmat,
1192 esiop_cmd->cmd_c.dmamap_data, 0,
1193 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1194 (xs->xs_control & XS_CTL_DATA_IN) ?
1195 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1196 bus_dmamap_unload(sc->sc_c.sc_dmat,
1197 esiop_cmd->cmd_c.dmamap_data);
1198 }
1199 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1200 if ((xs->xs_control & XS_CTL_POLL) == 0)
1201 callout_stop(&xs->xs_callout);
1202 esiop_cmd->cmd_c.status = CMDST_FREE;
1203 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1204 #if 0
1205 if (xs->resid != 0)
1206 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1207 #endif
1208 scsipi_done (xs);
1209 }
1210
1211 void
1212 esiop_checkdone(struct esiop_softc *sc)
1213 {
1214 int target, lun, tag;
1215 struct esiop_target *esiop_target;
1216 struct esiop_lun *esiop_lun;
1217 struct esiop_cmd *esiop_cmd;
1218 uint32_t slot;
1219 int needsync = 0;
1220 int status;
1221 uint32_t sem, offset;
1222
1223 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1224 sem = esiop_script_read(sc, sc->sc_semoffset);
1225 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1226 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1227 /*
1228 * at last one command have been started,
1229 * so we should have free slots now
1230 */
1231 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1232 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1233 }
1234 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1235
1236 if ((sem & A_sem_done) == 0) {
1237 /* no pending done command */
1238 return;
1239 }
1240
1241 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1242 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
1243 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1244 next:
1245 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1246 if (needsync)
1247 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1248 sc->sc_done_offset,
1249 A_ndone_slots * sizeof(uint32_t),
1250 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1251 return;
1252 }
1253
1254 needsync = 1;
1255
1256 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1257 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1258 sc->sc_currdoneslot += 1;
1259 if (sc->sc_currdoneslot == A_ndone_slots)
1260 sc->sc_currdoneslot = 0;
1261
1262 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1263 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1264 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1265
1266 esiop_target = (target >= 0) ?
1267 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1268 if (esiop_target == NULL) {
1269 printf("esiop_target (target %d) not valid\n", target);
1270 goto next;
1271 }
1272 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1273 if (esiop_lun == NULL) {
1274 printf("esiop_lun (target %d lun %d) not valid\n",
1275 target, lun);
1276 goto next;
1277 }
1278 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1279 if (esiop_cmd == NULL) {
1280 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1281 target, lun, tag);
1282 goto next;
1283 }
1284
1285 esiop_table_sync(esiop_cmd,
1286 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1287 status = le32toh(esiop_cmd->cmd_tables->status);
1288 #ifdef DIAGNOSTIC
1289 if (status != SCSI_OK) {
1290 printf("command for T/L/Q %d/%d/%d status %d\n",
1291 target, lun, tag, status);
1292 goto next;
1293 }
1294
1295 #endif
1296 /* Ok, this command has been handled */
1297 esiop_cmd->cmd_c.xs->status = status;
1298 if (tag >= 0)
1299 esiop_lun->tactive[tag] = NULL;
1300 else
1301 esiop_lun->active = NULL;
1302 /*
1303 * scratcha was eventually saved in saved_offset by script.
1304 * fetch offset from it
1305 */
1306 offset = 0;
1307 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1308 offset =
1309 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1310 esiop_scsicmd_end(esiop_cmd, offset);
1311 goto next;
1312 }
1313
1314 void
1315 esiop_unqueue(struct esiop_softc *sc, int target, int lun)
1316 {
1317 int slot, tag;
1318 uint32_t slotdsa;
1319 struct esiop_cmd *esiop_cmd;
1320 struct esiop_lun *esiop_lun =
1321 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1322
1323 /* first make sure to read valid data */
1324 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1325
1326 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1327 /* look for commands in the scheduler, not yet started */
1328 if (esiop_lun->tactive[tag] == NULL)
1329 continue;
1330 esiop_cmd = esiop_lun->tactive[tag];
1331 for (slot = 0; slot < A_ncmd_slots; slot++) {
1332 slotdsa = esiop_script_read(sc,
1333 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1334 /* if the slot has any flag, it won't match the DSA */
1335 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1336 /* Mark this slot as ignore */
1337 esiop_script_write(sc,
1338 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1339 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1340 /* ask to requeue */
1341 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1342 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1343 esiop_lun->tactive[tag] = NULL;
1344 esiop_scsicmd_end(esiop_cmd, 0);
1345 break;
1346 }
1347 }
1348 }
1349 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1350 }
1351
1352 /*
1353 * handle a rejected queue tag message: the command will run untagged,
1354 * has to adjust the reselect script.
1355 */
1356
1357
1358 int
1359 esiop_handle_qtag_reject(struct esiop_cmd *esiop_cmd)
1360 {
1361 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1362 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1363 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1364 int tag = esiop_cmd->cmd_tables->msg_out[2];
1365 struct esiop_target *esiop_target =
1366 (struct esiop_target*)sc->sc_c.targets[target];
1367 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1368
1369 #ifdef SIOP_DEBUG
1370 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1371 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1372 esiop_cmd->cmd_c.tag, esiop_cmd->cmd_c.status);
1373 #endif
1374
1375 if (esiop_lun->active != NULL) {
1376 aprint_error_dev(sc->sc_c.sc_dev,
1377 "untagged command already running for target %d "
1378 "lun %d (status %d)\n",
1379 target, lun, esiop_lun->active->cmd_c.status);
1380 return -1;
1381 }
1382 /* clear tag slot */
1383 esiop_lun->tactive[tag] = NULL;
1384 /* add command to non-tagged slot */
1385 esiop_lun->active = esiop_cmd;
1386 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1387 esiop_cmd->cmd_c.tag = -1;
1388 /* update DSA table */
1389 esiop_script_write(sc, esiop_target->lun_table_offset +
1390 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1391 esiop_cmd->cmd_c.dsa);
1392 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1393 return 0;
1394 }
1395
1396 /*
1397 * handle a bus reset: reset chip, unqueue all active commands, free all
1398 * target struct and report lossage to upper layer.
1399 * As the upper layer may requeue immediatly we have to first store
1400 * all active commands in a temporary queue.
1401 */
1402 void
1403 esiop_handle_reset(struct esiop_softc *sc)
1404 {
1405 struct esiop_cmd *esiop_cmd;
1406 struct esiop_lun *esiop_lun;
1407 int target, lun, tag;
1408 /*
1409 * scsi bus reset. reset the chip and restart
1410 * the queue. Need to clean up all active commands
1411 */
1412 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1413 /* stop, reset and restart the chip */
1414 esiop_reset(sc);
1415
1416 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1417 /* chip has been reset, all slots are free now */
1418 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1419 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1420 }
1421 /*
1422 * Process all commands: first commands completes, then commands
1423 * being executed
1424 */
1425 esiop_checkdone(sc);
1426 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1427 struct esiop_target *esiop_target =
1428 (struct esiop_target *)sc->sc_c.targets[target];
1429 if (esiop_target == NULL)
1430 continue;
1431 for (lun = 0; lun < 8; lun++) {
1432 esiop_lun = esiop_target->esiop_lun[lun];
1433 if (esiop_lun == NULL)
1434 continue;
1435 for (tag = -1; tag <
1436 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1437 ESIOP_NTAG : 0);
1438 tag++) {
1439 if (tag >= 0)
1440 esiop_cmd = esiop_lun->tactive[tag];
1441 else
1442 esiop_cmd = esiop_lun->active;
1443 if (esiop_cmd == NULL)
1444 continue;
1445 scsipi_printaddr(
1446 esiop_cmd->cmd_c.xs->xs_periph);
1447 printf("command with tag id %d reset\n", tag);
1448 esiop_cmd->cmd_c.xs->error =
1449 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1450 XS_TIMEOUT : XS_RESET;
1451 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1452 if (tag >= 0)
1453 esiop_lun->tactive[tag] = NULL;
1454 else
1455 esiop_lun->active = NULL;
1456 esiop_cmd->cmd_c.status = CMDST_DONE;
1457 esiop_scsicmd_end(esiop_cmd, 0);
1458 }
1459 }
1460 sc->sc_c.targets[target]->status = TARST_ASYNC;
1461 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1462 sc->sc_c.targets[target]->period =
1463 sc->sc_c.targets[target]->offset = 0;
1464 siop_update_xfer_mode(&sc->sc_c, target);
1465 }
1466
1467 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1468 }
1469
1470 void
1471 esiop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1472 void *arg)
1473 {
1474 struct scsipi_xfer *xs;
1475 struct scsipi_periph *periph;
1476 struct esiop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1477 struct esiop_cmd *esiop_cmd;
1478 struct esiop_target *esiop_target;
1479 int s, error, i;
1480 int target;
1481 int lun;
1482
1483 switch (req) {
1484 case ADAPTER_REQ_RUN_XFER:
1485 xs = arg;
1486 periph = xs->xs_periph;
1487 target = periph->periph_target;
1488 lun = periph->periph_lun;
1489
1490 s = splbio();
1491 /*
1492 * first check if there are pending complete commands.
1493 * this can free us some resources (in the rings for example).
1494 * we have to lock it to avoid recursion.
1495 */
1496 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1497 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1498 esiop_checkdone(sc);
1499 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1500 }
1501 #ifdef SIOP_DEBUG_SCHED
1502 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1503 xs->xs_tag_type, xs->xs_tag_id);
1504 #endif
1505 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1506 if (esiop_cmd == NULL) {
1507 xs->error = XS_RESOURCE_SHORTAGE;
1508 scsipi_done(xs);
1509 splx(s);
1510 return;
1511 }
1512 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1513 #ifdef DIAGNOSTIC
1514 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1515 panic("siop_scsicmd: new cmd not free");
1516 #endif
1517 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1518 if (esiop_target == NULL) {
1519 #ifdef SIOP_DEBUG
1520 printf("%s: alloc siop_target for target %d\n",
1521 device_xname(sc->sc_c.sc_dev), target);
1522 #endif
1523 sc->sc_c.targets[target] =
1524 malloc(sizeof(struct esiop_target),
1525 M_DEVBUF, M_NOWAIT | M_ZERO);
1526 if (sc->sc_c.targets[target] == NULL) {
1527 aprint_error_dev(sc->sc_c.sc_dev,
1528 "can't malloc memory for "
1529 "target %d\n",
1530 target);
1531 xs->error = XS_RESOURCE_SHORTAGE;
1532 scsipi_done(xs);
1533 TAILQ_INSERT_TAIL(&sc->free_list,
1534 esiop_cmd, next);
1535 splx(s);
1536 return;
1537 }
1538 esiop_target =
1539 (struct esiop_target*)sc->sc_c.targets[target];
1540 esiop_target->target_c.status = TARST_PROBING;
1541 esiop_target->target_c.flags = 0;
1542 esiop_target->target_c.id =
1543 sc->sc_c.clock_div << 24; /* scntl3 */
1544 esiop_target->target_c.id |= target << 16; /* id */
1545 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1546
1547 for (i=0; i < 8; i++)
1548 esiop_target->esiop_lun[i] = NULL;
1549 esiop_target_register(sc, target);
1550 }
1551 if (esiop_target->esiop_lun[lun] == NULL) {
1552 esiop_target->esiop_lun[lun] =
1553 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1554 M_NOWAIT|M_ZERO);
1555 if (esiop_target->esiop_lun[lun] == NULL) {
1556 aprint_error_dev(sc->sc_c.sc_dev,
1557 "can't alloc esiop_lun for "
1558 "target %d lun %d\n",
1559 target, lun);
1560 xs->error = XS_RESOURCE_SHORTAGE;
1561 scsipi_done(xs);
1562 TAILQ_INSERT_TAIL(&sc->free_list,
1563 esiop_cmd, next);
1564 splx(s);
1565 return;
1566 }
1567 }
1568 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1569 esiop_cmd->cmd_c.xs = xs;
1570 esiop_cmd->cmd_c.flags = 0;
1571 esiop_cmd->cmd_c.status = CMDST_READY;
1572
1573 /* load the DMA maps */
1574 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1575 esiop_cmd->cmd_c.dmamap_cmd,
1576 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1577 if (error) {
1578 aprint_error_dev(sc->sc_c.sc_dev,
1579 "unable to load cmd DMA map: %d\n",
1580 error);
1581 xs->error = (error == EAGAIN) ?
1582 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1583 scsipi_done(xs);
1584 esiop_cmd->cmd_c.status = CMDST_FREE;
1585 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1586 splx(s);
1587 return;
1588 }
1589 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1590 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1591 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1592 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1593 ((xs->xs_control & XS_CTL_DATA_IN) ?
1594 BUS_DMA_READ : BUS_DMA_WRITE));
1595 if (error) {
1596 aprint_error_dev(sc->sc_c.sc_dev,
1597 "unable to load data DMA map: %d\n",
1598 error);
1599 xs->error = (error == EAGAIN) ?
1600 XS_RESOURCE_SHORTAGE : XS_DRIVER_STUFFUP;
1601 scsipi_done(xs);
1602 bus_dmamap_unload(sc->sc_c.sc_dmat,
1603 esiop_cmd->cmd_c.dmamap_cmd);
1604 esiop_cmd->cmd_c.status = CMDST_FREE;
1605 TAILQ_INSERT_TAIL(&sc->free_list,
1606 esiop_cmd, next);
1607 splx(s);
1608 return;
1609 }
1610 bus_dmamap_sync(sc->sc_c.sc_dmat,
1611 esiop_cmd->cmd_c.dmamap_data, 0,
1612 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1613 (xs->xs_control & XS_CTL_DATA_IN) ?
1614 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1615 }
1616 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1617 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1618 BUS_DMASYNC_PREWRITE);
1619
1620 if (xs->xs_tag_type)
1621 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1622 else
1623 esiop_cmd->cmd_c.tag = -1;
1624 siop_setuptables(&esiop_cmd->cmd_c);
1625 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1626 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1627 ESIOP_XFER(esiop_cmd, tlq) |=
1628 htole32((target << 8) | (lun << 16));
1629 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1630 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1631 ESIOP_XFER(esiop_cmd, tlq) |=
1632 htole32(esiop_cmd->cmd_c.tag << 24);
1633 }
1634
1635 esiop_table_sync(esiop_cmd,
1636 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1637 esiop_start(sc, esiop_cmd);
1638 if (xs->xs_control & XS_CTL_POLL) {
1639 /* poll for command completion */
1640 while ((xs->xs_status & XS_STS_DONE) == 0) {
1641 delay(1000);
1642 esiop_intr(sc);
1643 }
1644 }
1645 splx(s);
1646 return;
1647
1648 case ADAPTER_REQ_GROW_RESOURCES:
1649 #ifdef SIOP_DEBUG
1650 printf("%s grow resources (%d)\n",
1651 device_xname(sc->sc_c.sc_dev),
1652 sc->sc_c.sc_adapt.adapt_openings);
1653 #endif
1654 esiop_morecbd(sc);
1655 return;
1656
1657 case ADAPTER_REQ_SET_XFER_MODE:
1658 {
1659 struct scsipi_xfer_mode *xm = arg;
1660 if (sc->sc_c.targets[xm->xm_target] == NULL)
1661 return;
1662 s = splbio();
1663 if (xm->xm_mode & PERIPH_CAP_TQING) {
1664 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1665 /* allocate tag tables for this device */
1666 for (lun = 0;
1667 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1668 if (scsipi_lookup_periph(chan,
1669 xm->xm_target, lun) != NULL)
1670 esiop_add_dev(sc, xm->xm_target, lun);
1671 }
1672 }
1673 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1674 (sc->sc_c.features & SF_BUS_WIDE))
1675 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1676 if (xm->xm_mode & PERIPH_CAP_SYNC)
1677 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1678 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1679 (sc->sc_c.features & SF_CHIP_DT))
1680 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1681 if ((xm->xm_mode &
1682 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1683 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1684 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1685
1686 splx(s);
1687 }
1688 }
1689 }
1690
1691 static void
1692 esiop_start(struct esiop_softc *sc, struct esiop_cmd *esiop_cmd)
1693 {
1694 struct esiop_lun *esiop_lun;
1695 struct esiop_target *esiop_target;
1696 int timeout;
1697 int target, lun, slot;
1698
1699 /*
1700 * first make sure to read valid data
1701 */
1702 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1703
1704 /*
1705 * We use a circular queue here. sc->sc_currschedslot points to a
1706 * free slot, unless we have filled the queue. Check this.
1707 */
1708 slot = sc->sc_currschedslot;
1709 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1710 A_f_cmd_free) == 0) {
1711 /*
1712 * no more free slot, no need to continue. freeze the queue
1713 * and requeue this command.
1714 */
1715 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1716 sc->sc_flags |= SCF_CHAN_NOSLOT;
1717 esiop_script_write(sc, sc->sc_semoffset,
1718 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1719 esiop_script_sync(sc,
1720 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1721 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1722 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1723 esiop_scsicmd_end(esiop_cmd, 0);
1724 return;
1725 }
1726 /* OK, we can use this slot */
1727
1728 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1729 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1730 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1731 esiop_lun = esiop_target->esiop_lun[lun];
1732 /* if non-tagged command active, panic: this shouldn't happen */
1733 if (esiop_lun->active != NULL) {
1734 panic("esiop_start: tagged cmd while untagged running");
1735 }
1736 #ifdef DIAGNOSTIC
1737 /* sanity check the tag if needed */
1738 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1739 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1740 esiop_cmd->cmd_c.tag < 0) {
1741 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1742 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1743 panic("esiop_start: invalid tag id");
1744 }
1745 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1746 panic("esiop_start: tag not free");
1747 }
1748 #endif
1749 #ifdef SIOP_DEBUG_SCHED
1750 printf("using slot %d for DSA 0x%lx\n", slot,
1751 (u_long)esiop_cmd->cmd_c.dsa);
1752 #endif
1753 /* mark command as active */
1754 if (esiop_cmd->cmd_c.status == CMDST_READY)
1755 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1756 else
1757 panic("esiop_start: bad status");
1758 /* DSA table for reselect */
1759 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1760 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1761 /* DSA table for reselect */
1762 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1763 htole32(esiop_cmd->cmd_c.dsa);
1764 bus_dmamap_sync(sc->sc_c.sc_dmat,
1765 esiop_lun->lun_tagtbl->tblblk->blkmap,
1766 esiop_lun->lun_tagtbl->tbl_offset,
1767 sizeof(uint32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1768 } else {
1769 esiop_lun->active = esiop_cmd;
1770 esiop_script_write(sc,
1771 esiop_target->lun_table_offset +
1772 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1773 esiop_cmd->cmd_c.dsa);
1774 }
1775 /* scheduler slot: DSA */
1776 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1777 esiop_cmd->cmd_c.dsa);
1778 /* make sure SCRIPT processor will read valid data */
1779 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1780 /* handle timeout */
1781 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1782 /* start exire timer */
1783 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1784 if (timeout == 0)
1785 timeout = 1;
1786 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1787 timeout, esiop_timeout, esiop_cmd);
1788 }
1789 /* Signal script it has some work to do */
1790 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1791 SIOP_ISTAT, ISTAT_SIGP);
1792 /* update the current slot, and wait for IRQ */
1793 sc->sc_currschedslot++;
1794 if (sc->sc_currschedslot >= A_ncmd_slots)
1795 sc->sc_currschedslot = 0;
1796 }
1797
1798 void
1799 esiop_timeout(void *v)
1800 {
1801 struct esiop_cmd *esiop_cmd = v;
1802 struct esiop_softc *sc =
1803 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1804 int s;
1805 #ifdef SIOP_DEBUG
1806 int slot, slotdsa;
1807 #endif
1808
1809 s = splbio();
1810 esiop_table_sync(esiop_cmd,
1811 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1812 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1813 #ifdef SIOP_DEBUG
1814 printf("command timeout (status %d)\n",
1815 le32toh(esiop_cmd->cmd_tables->status));
1816
1817 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1818 for (slot = 0; slot < A_ncmd_slots; slot++) {
1819 slotdsa = esiop_script_read(sc,
1820 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1821 if ((slotdsa & 0x01) == 0)
1822 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1823 }
1824 printf("istat 0x%x ",
1825 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1826 printf("DSP 0x%lx DSA 0x%x\n",
1827 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP)
1828 - sc->sc_c.sc_scriptaddr),
1829 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1830 (void)bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1831 printf("istat 0x%x\n",
1832 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1833 #else
1834 printf("command timeout, CDB: ");
1835 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1836 printf("\n");
1837 #endif
1838 /* reset the scsi bus */
1839 siop_resetbus(&sc->sc_c);
1840
1841 /* deactivate callout */
1842 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1843 /*
1844 * mark command has being timed out and just return;
1845 * the bus reset will generate an interrupt,
1846 * it will be handled in siop_intr()
1847 */
1848 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1849 splx(s);
1850 }
1851
1852 void
1853 esiop_dump_script(struct esiop_softc *sc)
1854 {
1855 int i;
1856
1857 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1858 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1859 esiop_script_read(sc, i),
1860 esiop_script_read(sc, i + 1));
1861 if ((esiop_script_read(sc, i) & 0xe0000000) == 0xc0000000) {
1862 i++;
1863 printf(" 0x%08x", esiop_script_read(sc, i + 1));
1864 }
1865 printf("\n");
1866 }
1867 }
1868
1869 void
1870 esiop_morecbd(struct esiop_softc *sc)
1871 {
1872 int error, i, s;
1873 bus_dma_segment_t seg;
1874 int rseg;
1875 struct esiop_cbd *newcbd;
1876 struct esiop_xfer *xfer;
1877 bus_addr_t dsa;
1878
1879 /* allocate a new list head */
1880 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1881 if (newcbd == NULL) {
1882 aprint_error_dev(sc->sc_c.sc_dev,
1883 "can't allocate memory for command descriptors "
1884 "head\n");
1885 return;
1886 }
1887
1888 /* allocate cmd list */
1889 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1890 M_DEVBUF, M_NOWAIT|M_ZERO);
1891 if (newcbd->cmds == NULL) {
1892 aprint_error_dev(sc->sc_c.sc_dev,
1893 "can't allocate memory for command descriptors\n");
1894 goto bad3;
1895 }
1896 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1897 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1898 if (error) {
1899 aprint_error_dev(sc->sc_c.sc_dev,
1900 "unable to allocate cbd DMA memory, error = %d\n",
1901 error);
1902 goto bad2;
1903 }
1904 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1905 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1906 if (error) {
1907 aprint_error_dev(sc->sc_c.sc_dev,
1908 "unable to map cbd DMA memory, error = %d\n",
1909 error);
1910 goto bad2;
1911 }
1912 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1913 BUS_DMA_NOWAIT, &newcbd->xferdma);
1914 if (error) {
1915 aprint_error_dev(sc->sc_c.sc_dev,
1916 "unable to create cbd DMA map, error = %d\n", error);
1917 goto bad1;
1918 }
1919 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1920 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1921 if (error) {
1922 aprint_error_dev(sc->sc_c.sc_dev,
1923 "unable to load cbd DMA map, error = %d\n", error);
1924 goto bad0;
1925 }
1926 #ifdef SIOP_DEBUG
1927 aprint_debug_dev(sc->sc_c.sc_dev, "alloc newcdb at PHY addr 0x%lx\n",
1928 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1929 #endif
1930 for (i = 0; i < SIOP_NCMDPB; i++) {
1931 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1932 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1933 &newcbd->cmds[i].cmd_c.dmamap_data);
1934 if (error) {
1935 aprint_error_dev(sc->sc_c.sc_dev,
1936 "unable to create data DMA map for cbd: "
1937 "error %d\n", error);
1938 goto bad0;
1939 }
1940 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1941 sizeof(struct scsipi_generic), 1,
1942 sizeof(struct scsipi_generic), 0,
1943 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1944 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1945 if (error) {
1946 aprint_error_dev(sc->sc_c.sc_dev,
1947 "unable to create cmd DMA map for cbd %d\n", error);
1948 goto bad0;
1949 }
1950 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1951 newcbd->cmds[i].esiop_cbdp = newcbd;
1952 xfer = &newcbd->xfers[i];
1953 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1954 memset(newcbd->cmds[i].cmd_tables, 0,
1955 sizeof(struct esiop_xfer));
1956 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1957 i * sizeof(struct esiop_xfer);
1958 newcbd->cmds[i].cmd_c.dsa = dsa;
1959 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1960 xfer->siop_tables.t_msgout.count= htole32(1);
1961 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1962 xfer->siop_tables.t_msgin.count= htole32(1);
1963 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1964 offsetof(struct siop_common_xfer, msg_in));
1965 xfer->siop_tables.t_extmsgin.count= htole32(2);
1966 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1967 offsetof(struct siop_common_xfer, msg_in) + 1);
1968 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1969 offsetof(struct siop_common_xfer, msg_in) + 3);
1970 xfer->siop_tables.t_status.count= htole32(1);
1971 xfer->siop_tables.t_status.addr = htole32(dsa +
1972 offsetof(struct siop_common_xfer, status));
1973
1974 s = splbio();
1975 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1976 splx(s);
1977 #ifdef SIOP_DEBUG
1978 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1979 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1980 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1981 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1982 #endif
1983 }
1984 s = splbio();
1985 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1986 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1987 splx(s);
1988 return;
1989 bad0:
1990 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1991 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1992 bad1:
1993 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1994 bad2:
1995 free(newcbd->cmds, M_DEVBUF);
1996 bad3:
1997 free(newcbd, M_DEVBUF);
1998 }
1999
2000 void
2001 esiop_moretagtbl(struct esiop_softc *sc)
2002 {
2003 int error, i, j, s;
2004 bus_dma_segment_t seg;
2005 int rseg;
2006 struct esiop_dsatblblk *newtblblk;
2007 struct esiop_dsatbl *newtbls;
2008 uint32_t *tbls;
2009
2010 /* allocate a new list head */
2011 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2012 M_DEVBUF, M_NOWAIT|M_ZERO);
2013 if (newtblblk == NULL) {
2014 aprint_error_dev(sc->sc_c.sc_dev,
2015 "can't allocate memory for tag DSA table block\n");
2016 return;
2017 }
2018
2019 /* allocate tbl list */
2020 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2021 M_DEVBUF, M_NOWAIT|M_ZERO);
2022 if (newtbls == NULL) {
2023 aprint_error_dev(sc->sc_c.sc_dev,
2024 "can't allocate memory for command descriptors\n");
2025 goto bad3;
2026 }
2027 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2028 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2029 if (error) {
2030 aprint_error_dev(sc->sc_c.sc_dev,
2031 "unable to allocate tbl DMA memory, error = %d\n", error);
2032 goto bad2;
2033 }
2034 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2035 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2036 if (error) {
2037 aprint_error_dev(sc->sc_c.sc_dev,
2038 "unable to map tbls DMA memory, error = %d\n", error);
2039 goto bad2;
2040 }
2041 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2042 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2043 if (error) {
2044 aprint_error_dev(sc->sc_c.sc_dev,
2045 "unable to create tbl DMA map, error = %d\n", error);
2046 goto bad1;
2047 }
2048 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2049 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2050 if (error) {
2051 aprint_error_dev(sc->sc_c.sc_dev,
2052 "unable to load tbl DMA map, error = %d\n", error);
2053 goto bad0;
2054 }
2055 #ifdef SIOP_DEBUG
2056 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2057 device_xname(sc->sc_c.sc_dev),
2058 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2059 #endif
2060 for (i = 0; i < ESIOP_NTPB; i++) {
2061 newtbls[i].tblblk = newtblblk;
2062 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2063 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(uint32_t);
2064 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2065 newtbls[i].tbl_offset;
2066 for (j = 0; j < ESIOP_NTAG; j++)
2067 newtbls[i].tbl[j] = j;
2068 s = splbio();
2069 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2070 splx(s);
2071 }
2072 s = splbio();
2073 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2074 splx(s);
2075 return;
2076 bad0:
2077 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2078 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2079 bad1:
2080 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2081 bad2:
2082 free(newtbls, M_DEVBUF);
2083 bad3:
2084 free(newtblblk, M_DEVBUF);
2085 }
2086
2087 void
2088 esiop_update_scntl3(struct esiop_softc *sc,
2089 struct siop_common_target *_siop_target)
2090 {
2091 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2092
2093 esiop_script_write(sc, esiop_target->lun_table_offset,
2094 esiop_target->target_c.id);
2095 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2096 }
2097
2098 void
2099 esiop_add_dev(struct esiop_softc *sc, int target, int lun)
2100 {
2101 struct esiop_target *esiop_target =
2102 (struct esiop_target *)sc->sc_c.targets[target];
2103 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2104
2105 if (esiop_lun->lun_tagtbl != NULL)
2106 return; /* already allocated */
2107
2108 /* we need a tag DSA table */
2109 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2110 if (esiop_lun->lun_tagtbl == NULL) {
2111 esiop_moretagtbl(sc);
2112 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2113 if (esiop_lun->lun_tagtbl == NULL) {
2114 /* no resources, run untagged */
2115 esiop_target->target_c.flags &= ~TARF_TAG;
2116 return;
2117 }
2118 }
2119 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2120 /* Update LUN DSA table */
2121 esiop_script_write(sc, esiop_target->lun_table_offset +
2122 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2123 esiop_lun->lun_tagtbl->tbl_dsa);
2124 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2125 }
2126
2127 void
2128 esiop_del_dev(struct esiop_softc *sc, int target, int lun)
2129 {
2130 struct esiop_target *esiop_target;
2131
2132 #ifdef SIOP_DEBUG
2133 printf("%s:%d:%d: free lun sw entry\n",
2134 device_xname(sc->sc_c.sc_dev), target, lun);
2135 #endif
2136 if (sc->sc_c.targets[target] == NULL)
2137 return;
2138 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2139 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2140 esiop_target->esiop_lun[lun] = NULL;
2141 }
2142
2143 void
2144 esiop_target_register(struct esiop_softc *sc, uint32_t target)
2145 {
2146 struct esiop_target *esiop_target =
2147 (struct esiop_target *)sc->sc_c.targets[target];
2148 struct esiop_lun *esiop_lun;
2149 int lun;
2150
2151 /* get a DSA table for this target */
2152 esiop_target->lun_table_offset = sc->sc_free_offset;
2153 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2154 #ifdef SIOP_DEBUG
2155 printf("%s: lun table for target %d offset %d free offset %d\n",
2156 device_xname(sc->sc_c.sc_dev), target,
2157 esiop_target->lun_table_offset,
2158 sc->sc_free_offset);
2159 #endif
2160 /* first 32 bytes are ID (for select) */
2161 esiop_script_write(sc, esiop_target->lun_table_offset,
2162 esiop_target->target_c.id);
2163 /* Record this table in the target DSA table */
2164 esiop_script_write(sc,
2165 sc->sc_target_table_offset + target,
2166 (esiop_target->lun_table_offset * sizeof(uint32_t)) +
2167 sc->sc_c.sc_scriptaddr);
2168 /* if we have a tag table, register it */
2169 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2170 esiop_lun = esiop_target->esiop_lun[lun];
2171 if (esiop_lun == NULL)
2172 continue;
2173 if (esiop_lun->lun_tagtbl)
2174 esiop_script_write(sc, esiop_target->lun_table_offset +
2175 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2176 esiop_lun->lun_tagtbl->tbl_dsa);
2177 }
2178 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2179 }
2180
2181 #ifdef SIOP_STATS
2182 void
2183 esiop_printstats(void)
2184 {
2185
2186 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2187 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2188 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2189 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2190 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2191 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2192 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2193 }
2194 #endif
2195