iha.c revision 1.38 1 /* $NetBSD: iha.c,v 1.38 2008/04/12 08:21:19 tsutsui Exp $ */
2
3 /*-
4 * Device driver for the INI-9XXXU/UW or INIC-940/950 PCI SCSI Controller.
5 *
6 * Written for 386bsd and FreeBSD by
7 * Winston Hung <winstonh (at) initio.com>
8 *
9 * Copyright (c) 1997-1999 Initio Corp.
10 * Copyright (c) 2000, 2001 Ken Westerback
11 * Copyright (c) 2001, 2002 Izumi Tsutsui
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer,
19 * without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*
37 * Ported to NetBSD by Izumi Tsutsui <tsutsui (at) ceres.dti.ne.jp> from OpenBSD:
38 * $OpenBSD: iha.c,v 1.3 2001/02/20 00:47:33 krw Exp $
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: iha.c,v 1.38 2008/04/12 08:21:19 tsutsui Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/buf.h>
48 #include <sys/device.h>
49 #include <sys/malloc.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include <sys/bus.h>
54 #include <sys/intr.h>
55
56 #include <dev/scsipi/scsi_spc.h>
57 #include <dev/scsipi/scsi_all.h>
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsiconf.h>
60 #include <dev/scsipi/scsi_message.h>
61
62 #include <dev/ic/ihareg.h>
63 #include <dev/ic/ihavar.h>
64
65 /*
66 * SCSI Rate Table, indexed by FLAG_SCSI_RATE field of
67 * tcs flags.
68 */
69 static const uint8_t iha_rate_tbl[] = {
70 /* fast 20 */
71 /* nanosecond divide by 4 */
72 12, /* 50ns, 20M */
73 18, /* 75ns, 13.3M */
74 25, /* 100ns, 10M */
75 31, /* 125ns, 8M */
76 37, /* 150ns, 6.6M */
77 43, /* 175ns, 5.7M */
78 50, /* 200ns, 5M */
79 62 /* 250ns, 4M */
80 };
81 #define IHA_MAX_PERIOD 62
82
83 #ifdef notused
84 static uint16_t eeprom_default[EEPROM_SIZE] = {
85 /* -- Header ------------------------------------ */
86 /* signature */
87 EEP_SIGNATURE,
88 /* size, revision */
89 EEP_WORD(EEPROM_SIZE * 2, 0x01),
90 /* -- Host Adapter Structure -------------------- */
91 /* model */
92 0x0095,
93 /* model info, number of channel */
94 EEP_WORD(0x00, 1),
95 /* BIOS config */
96 EEP_BIOSCFG_DEFAULT,
97 /* host adapter config */
98 0,
99
100 /* -- eeprom_adapter[0] ------------------------------- */
101 /* ID, adapter config 1 */
102 EEP_WORD(7, CFG_DEFAULT),
103 /* adapter config 2, number of targets */
104 EEP_WORD(0x00, 8),
105 /* target flags */
106 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
107 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
108 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
109 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
110 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
111 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
112 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
113 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
114
115 /* -- eeprom_adapter[1] ------------------------------- */
116 /* ID, adapter config 1 */
117 EEP_WORD(7, CFG_DEFAULT),
118 /* adapter config 2, number of targets */
119 EEP_WORD(0x00, 8),
120 /* target flags */
121 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
122 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
123 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
124 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
125 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
126 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
127 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
128 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
129 /* reserved[5] */
130 0, 0, 0, 0, 0,
131 /* checksum */
132 0
133 };
134 #endif
135
136 static void iha_append_free_scb(struct iha_softc *, struct iha_scb *);
137 static void iha_append_done_scb(struct iha_softc *, struct iha_scb *, uint8_t);
138 static inline struct iha_scb *iha_pop_done_scb(struct iha_softc *);
139
140 static struct iha_scb *iha_find_pend_scb(struct iha_softc *);
141 static inline void iha_append_pend_scb(struct iha_softc *, struct iha_scb *);
142 static inline void iha_push_pend_scb(struct iha_softc *, struct iha_scb *);
143 static inline void iha_del_pend_scb(struct iha_softc *, struct iha_scb *);
144 static inline void iha_mark_busy_scb(struct iha_scb *);
145
146 static inline void iha_set_ssig(struct iha_softc *, uint8_t, uint8_t);
147
148 static int iha_alloc_sglist(struct iha_softc *);
149
150 static void iha_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
151 void *);
152 static void iha_update_xfer_mode(struct iha_softc *, int);
153
154 static void iha_reset_scsi_bus(struct iha_softc *);
155 static void iha_reset_chip(struct iha_softc *);
156 static void iha_reset_dma(struct iha_softc *);
157 static void iha_reset_tcs(struct tcs *, uint8_t);
158
159 static void iha_main(struct iha_softc *);
160 static void iha_scsi(struct iha_softc *);
161 static void iha_select(struct iha_softc *, struct iha_scb *, uint8_t);
162 static int iha_wait(struct iha_softc *, uint8_t);
163
164 static void iha_exec_scb(struct iha_softc *, struct iha_scb *);
165 static void iha_done_scb(struct iha_softc *, struct iha_scb *);
166 static int iha_push_sense_request(struct iha_softc *, struct iha_scb *);
167
168 static void iha_timeout(void *);
169 static void iha_abort_xs(struct iha_softc *, struct scsipi_xfer *, uint8_t);
170 static uint8_t iha_data_over_run(struct iha_scb *);
171
172 static int iha_next_state(struct iha_softc *);
173 static int iha_state_1(struct iha_softc *);
174 static int iha_state_2(struct iha_softc *);
175 static int iha_state_3(struct iha_softc *);
176 static int iha_state_4(struct iha_softc *);
177 static int iha_state_5(struct iha_softc *);
178 static int iha_state_6(struct iha_softc *);
179 static int iha_state_8(struct iha_softc *);
180
181 static int iha_xfer_data(struct iha_softc *, struct iha_scb *, int);
182 static int iha_xpad_in(struct iha_softc *);
183 static int iha_xpad_out(struct iha_softc *);
184
185 static int iha_status_msg(struct iha_softc *);
186 static void iha_busfree(struct iha_softc *);
187 static int iha_resel(struct iha_softc *);
188
189 static int iha_msgin(struct iha_softc *);
190 static int iha_msgin_extended(struct iha_softc *);
191 static int iha_msgin_sdtr(struct iha_softc *);
192 static int iha_msgin_ignore_wid_resid(struct iha_softc *);
193
194 static int iha_msgout(struct iha_softc *, uint8_t);
195 static void iha_msgout_abort(struct iha_softc *, uint8_t);
196 static int iha_msgout_reject(struct iha_softc *);
197 static int iha_msgout_extended(struct iha_softc *);
198 static int iha_msgout_wdtr(struct iha_softc *);
199 static int iha_msgout_sdtr(struct iha_softc *);
200
201 static void iha_wide_done(struct iha_softc *);
202 static void iha_sync_done(struct iha_softc *);
203
204 static void iha_bad_seq(struct iha_softc *);
205
206 static void iha_read_eeprom(struct iha_softc *, struct iha_eeprom *);
207 static int iha_se2_rd_all(struct iha_softc *, uint16_t *);
208 static void iha_se2_instr(struct iha_softc *, int);
209 static uint16_t iha_se2_rd(struct iha_softc *, int);
210 #ifdef notused
211 static void iha_se2_update_all(struct iha_softc *);
212 static void iha_se2_wr(struct iha_softc *, int, uint16_t);
213 #endif
214
215 /*
216 * iha_append_free_scb - append the supplied SCB to the tail of the
217 * sc_freescb queue after clearing and resetting
218 * everything possible.
219 */
220 static void
221 iha_append_free_scb(struct iha_softc *sc, struct iha_scb *scb)
222 {
223 int s;
224
225 s = splbio();
226
227 if (scb == sc->sc_actscb)
228 sc->sc_actscb = NULL;
229
230 scb->status = STATUS_QUEUED;
231 scb->ha_stat = HOST_OK;
232 scb->ta_stat = SCSI_OK;
233
234 scb->nextstat = 0;
235 scb->scb_tagmsg = 0;
236
237 scb->xs = NULL;
238 scb->tcs = NULL;
239
240 /*
241 * scb_tagid, sg_addr, sglist
242 * SCB_SensePtr are set at initialization
243 * and never change
244 */
245
246 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain);
247
248 splx(s);
249 }
250
251 static void
252 iha_append_done_scb(struct iha_softc *sc, struct iha_scb *scb, uint8_t hastat)
253 {
254 struct tcs *tcs;
255 int s;
256
257 s = splbio();
258
259 if (scb->xs != NULL)
260 callout_stop(&scb->xs->xs_callout);
261
262 if (scb == sc->sc_actscb)
263 sc->sc_actscb = NULL;
264
265 tcs = scb->tcs;
266
267 if (scb->scb_tagmsg != 0) {
268 if (tcs->tagcnt)
269 tcs->tagcnt--;
270 } else if (tcs->ntagscb == scb)
271 tcs->ntagscb = NULL;
272
273 scb->status = STATUS_QUEUED;
274 scb->ha_stat = hastat;
275
276 TAILQ_INSERT_TAIL(&sc->sc_donescb, scb, chain);
277
278 splx(s);
279 }
280
281 static inline struct iha_scb *
282 iha_pop_done_scb(struct iha_softc *sc)
283 {
284 struct iha_scb *scb;
285 int s;
286
287 s = splbio();
288
289 scb = TAILQ_FIRST(&sc->sc_donescb);
290
291 if (scb != NULL) {
292 scb->status = STATUS_RENT;
293 TAILQ_REMOVE(&sc->sc_donescb, scb, chain);
294 }
295
296 splx(s);
297
298 return (scb);
299 }
300
301 /*
302 * iha_find_pend_scb - scan the pending queue for a SCB that can be
303 * processed immediately. Return NULL if none found
304 * and a pointer to the SCB if one is found. If there
305 * is an active SCB, return NULL!
306 */
307 static struct iha_scb *
308 iha_find_pend_scb(struct iha_softc *sc)
309 {
310 struct iha_scb *scb;
311 struct tcs *tcs;
312 int s;
313
314 s = splbio();
315
316 if (sc->sc_actscb != NULL)
317 scb = NULL;
318
319 else
320 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) {
321 if ((scb->xs->xs_control & XS_CTL_RESET) != 0)
322 /* ALWAYS willing to reset a device */
323 break;
324
325 tcs = scb->tcs;
326
327 if ((scb->scb_tagmsg) != 0) {
328 /*
329 * A Tagged I/O. OK to start If no
330 * non-tagged I/O is active on the same
331 * target
332 */
333 if (tcs->ntagscb == NULL)
334 break;
335
336 } else if (scb->cmd[0] == SCSI_REQUEST_SENSE) {
337 /*
338 * OK to do a non-tagged request sense
339 * even if a non-tagged I/O has been
340 * started, 'cuz we don't allow any
341 * disconnect during a request sense op
342 */
343 break;
344
345 } else if (tcs->tagcnt == 0) {
346 /*
347 * No tagged I/O active on this target,
348 * ok to start a non-tagged one if one
349 * is not already active
350 */
351 if (tcs->ntagscb == NULL)
352 break;
353 }
354 }
355
356 splx(s);
357
358 return (scb);
359 }
360
361 static inline void
362 iha_append_pend_scb(struct iha_softc *sc, struct iha_scb *scb)
363 {
364 /* ASSUMPTION: only called within a splbio()/splx() pair */
365
366 if (scb == sc->sc_actscb)
367 sc->sc_actscb = NULL;
368
369 scb->status = STATUS_QUEUED;
370
371 TAILQ_INSERT_TAIL(&sc->sc_pendscb, scb, chain);
372 }
373
374 static inline void
375 iha_push_pend_scb(struct iha_softc *sc, struct iha_scb *scb)
376 {
377 int s;
378
379 s = splbio();
380
381 if (scb == sc->sc_actscb)
382 sc->sc_actscb = NULL;
383
384 scb->status = STATUS_QUEUED;
385
386 TAILQ_INSERT_HEAD(&sc->sc_pendscb, scb, chain);
387
388 splx(s);
389 }
390
391 /*
392 * iha_del_pend_scb - remove scb from sc_pendscb
393 */
394 static inline void
395 iha_del_pend_scb(struct iha_softc *sc, struct iha_scb *scb)
396 {
397 int s;
398
399 s = splbio();
400
401 TAILQ_REMOVE(&sc->sc_pendscb, scb, chain);
402
403 splx(s);
404 }
405
406 static inline void
407 iha_mark_busy_scb(struct iha_scb *scb)
408 {
409 int s;
410
411 s = splbio();
412
413 scb->status = STATUS_BUSY;
414
415 if (scb->scb_tagmsg == 0)
416 scb->tcs->ntagscb = scb;
417 else
418 scb->tcs->tagcnt++;
419
420 splx(s);
421 }
422
423 /*
424 * iha_set_ssig - read the current scsi signal mask, then write a new
425 * one which turns off/on the specified signals.
426 */
427 static inline void
428 iha_set_ssig(struct iha_softc *sc, uint8_t offsigs, uint8_t onsigs)
429 {
430 bus_space_tag_t iot = sc->sc_iot;
431 bus_space_handle_t ioh = sc->sc_ioh;
432 uint8_t currsigs;
433
434 currsigs = bus_space_read_1(iot, ioh, TUL_SSIGI);
435 bus_space_write_1(iot, ioh, TUL_SSIGO, (currsigs & ~offsigs) | onsigs);
436 }
437
438 /*
439 * iha_intr - the interrupt service routine for the iha driver
440 */
441 int
442 iha_intr(void *arg)
443 {
444 bus_space_tag_t iot;
445 bus_space_handle_t ioh;
446 struct iha_softc *sc;
447 int s;
448
449 sc = (struct iha_softc *)arg;
450 iot = sc->sc_iot;
451 ioh = sc->sc_ioh;
452
453 if ((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0)
454 return (0);
455
456 s = splbio(); /* XXX - Or are interrupts off when ISR's are called? */
457
458 if (sc->sc_semaph != SEMAPH_IN_MAIN) {
459 /* XXX - need these inside a splbio()/splx()? */
460 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
461 sc->sc_semaph = SEMAPH_IN_MAIN;
462
463 iha_main(sc);
464
465 sc->sc_semaph = ~SEMAPH_IN_MAIN;
466 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP));
467 }
468
469 splx(s);
470
471 return (1);
472 }
473
474 void
475 iha_attach(struct iha_softc *sc)
476 {
477 bus_space_tag_t iot = sc->sc_iot;
478 bus_space_handle_t ioh = sc->sc_ioh;
479 struct iha_scb *scb;
480 struct iha_eeprom eeprom;
481 struct eeprom_adapter *conf;
482 int i, error, reg;
483
484 iha_read_eeprom(sc, &eeprom);
485
486 conf = &eeprom.adapter[0];
487
488 /*
489 * fill in the rest of the iha_softc fields
490 */
491 sc->sc_id = CFG_ID(conf->config1);
492 sc->sc_semaph = ~SEMAPH_IN_MAIN;
493 sc->sc_status0 = 0;
494 sc->sc_actscb = NULL;
495
496 TAILQ_INIT(&sc->sc_freescb);
497 TAILQ_INIT(&sc->sc_pendscb);
498 TAILQ_INIT(&sc->sc_donescb);
499 error = iha_alloc_sglist(sc);
500 if (error != 0) {
501 aprint_error_dev(sc->sc_dev, "cannot allocate sglist\n");
502 return;
503 }
504
505 sc->sc_scb = malloc(sizeof(struct iha_scb) * IHA_MAX_SCB,
506 M_DEVBUF, M_NOWAIT|M_ZERO);
507 if (sc->sc_scb == NULL) {
508 aprint_error_dev(sc->sc_dev, "cannot allocate SCB\n");
509 return;
510 }
511
512 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) {
513 scb->scb_tagid = i;
514 scb->sgoffset = IHA_SG_SIZE * i;
515 scb->sglist = sc->sc_sglist + IHA_MAX_SG_ENTRIES * i;
516 scb->sg_addr =
517 sc->sc_dmamap->dm_segs[0].ds_addr + scb->sgoffset;
518
519 error = bus_dmamap_create(sc->sc_dmat,
520 MAXPHYS, IHA_MAX_SG_ENTRIES, MAXPHYS, 0,
521 BUS_DMA_NOWAIT, &scb->dmap);
522
523 if (error != 0) {
524 aprint_error_dev(sc->sc_dev,
525 "couldn't create SCB DMA map, error = %d\n",
526 error);
527 return;
528 }
529 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain);
530 }
531
532 /* Mask all the interrupts */
533 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
534
535 /* Stop any I/O and reset the scsi module */
536 iha_reset_dma(sc);
537 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSMOD);
538
539 /* Program HBA's SCSI ID */
540 bus_space_write_1(iot, ioh, TUL_SID, sc->sc_id << 4);
541
542 /*
543 * Configure the channel as requested by the NVRAM settings read
544 * by iha_read_eeprom() above.
545 */
546
547 sc->sc_sconf1 = SCONFIG0DEFAULT;
548 if ((conf->config1 & CFG_EN_PAR) != 0)
549 sc->sc_sconf1 |= SPCHK;
550 bus_space_write_1(iot, ioh, TUL_SCONFIG0, sc->sc_sconf1);
551
552 /* set selection time out 250 ms */
553 bus_space_write_1(iot, ioh, TUL_STIMO, STIMO_250MS);
554
555 /* Enable desired SCSI termination configuration read from eeprom */
556 reg = 0;
557 if (conf->config1 & CFG_ACT_TERM1)
558 reg |= ENTMW;
559 if (conf->config1 & CFG_ACT_TERM2)
560 reg |= ENTM;
561 bus_space_write_1(iot, ioh, TUL_DCTRL0, reg);
562
563 reg = bus_space_read_1(iot, ioh, TUL_GCTRL1) & ~ATDEN;
564 if (conf->config1 & CFG_AUTO_TERM)
565 reg |= ATDEN;
566 bus_space_write_1(iot, ioh, TUL_GCTRL1, reg);
567
568 for (i = 0; i < IHA_MAX_TARGETS / 2; i++) {
569 sc->sc_tcs[i * 2 ].flags = EEP_LBYTE(conf->tflags[i]);
570 sc->sc_tcs[i * 2 + 1].flags = EEP_HBYTE(conf->tflags[i]);
571 iha_reset_tcs(&sc->sc_tcs[i * 2 ], sc->sc_sconf1);
572 iha_reset_tcs(&sc->sc_tcs[i * 2 + 1], sc->sc_sconf1);
573 }
574
575 iha_reset_chip(sc);
576 bus_space_write_1(iot, ioh, TUL_SIEN, ALL_INTERRUPTS);
577
578 /*
579 * fill in the adapter.
580 */
581 sc->sc_adapter.adapt_dev = sc->sc_dev;
582 sc->sc_adapter.adapt_nchannels = 1;
583 sc->sc_adapter.adapt_openings = IHA_MAX_SCB;
584 sc->sc_adapter.adapt_max_periph = IHA_MAX_SCB;
585 sc->sc_adapter.adapt_ioctl = NULL;
586 sc->sc_adapter.adapt_minphys = minphys;
587 sc->sc_adapter.adapt_request = iha_scsipi_request;
588
589 /*
590 * fill in the channel.
591 */
592 sc->sc_channel.chan_adapter = &sc->sc_adapter;
593 sc->sc_channel.chan_bustype = &scsi_bustype;
594 sc->sc_channel.chan_channel = 0;
595 sc->sc_channel.chan_ntargets = CFG_TARGET(conf->config2);
596 sc->sc_channel.chan_nluns = 8;
597 sc->sc_channel.chan_id = sc->sc_id;
598
599 /*
600 * Now try to attach all the sub devices.
601 */
602 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
603 }
604
605 /*
606 * iha_alloc_sglist - allocate and map sglist for SCB's
607 */
608 static int
609 iha_alloc_sglist(struct iha_softc *sc)
610 {
611 bus_dma_segment_t seg;
612 int error, rseg;
613
614 /*
615 * Allocate DMA-safe memory for the SCB's sglist
616 */
617 if ((error = bus_dmamem_alloc(sc->sc_dmat,
618 IHA_SG_SIZE * IHA_MAX_SCB,
619 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
620 printf(": unable to allocate sglist, error = %d\n", error);
621 return (error);
622 }
623 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
624 IHA_SG_SIZE * IHA_MAX_SCB, (void **)&sc->sc_sglist,
625 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
626 printf(": unable to map sglist, error = %d\n", error);
627 return (error);
628 }
629
630 /*
631 * Create and load the DMA map used for the SCBs
632 */
633 if ((error = bus_dmamap_create(sc->sc_dmat,
634 IHA_SG_SIZE * IHA_MAX_SCB, 1, IHA_SG_SIZE * IHA_MAX_SCB,
635 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
636 printf(": unable to create control DMA map, error = %d\n",
637 error);
638 return (error);
639 }
640 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
641 sc->sc_sglist, IHA_SG_SIZE * IHA_MAX_SCB,
642 NULL, BUS_DMA_NOWAIT)) != 0) {
643 printf(": unable to load control DMA map, error = %d\n", error);
644 return (error);
645 }
646
647 memset(sc->sc_sglist, 0, IHA_SG_SIZE * IHA_MAX_SCB);
648
649 return (0);
650 }
651
652 void
653 iha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
654 void *arg)
655 {
656 struct scsipi_xfer *xs;
657 struct scsipi_periph *periph;
658 struct iha_scb *scb;
659 struct iha_softc *sc;
660 int error, s;
661
662 sc = device_private(chan->chan_adapter->adapt_dev);
663
664 switch (req) {
665 case ADAPTER_REQ_RUN_XFER:
666 xs = arg;
667 periph = xs->xs_periph;
668
669 /* XXX This size isn't actually a hardware restriction. */
670 if (xs->cmdlen > sizeof(scb->cmd) ||
671 periph->periph_target >= IHA_MAX_TARGETS) {
672 xs->error = XS_DRIVER_STUFFUP;
673 scsipi_done(xs);
674 return;
675 }
676
677 s = splbio();
678 scb = TAILQ_FIRST(&sc->sc_freescb);
679 if (scb != NULL) {
680 scb->status = STATUS_RENT;
681 TAILQ_REMOVE(&sc->sc_freescb, scb, chain);
682 }
683 else {
684 printf("unable to allocate scb\n");
685 #ifdef DIAGNOSTIC
686 scsipi_printaddr(periph);
687 panic("iha_scsipi_request");
688 #else
689 splx(s);
690 return;
691 #endif
692 }
693 splx(s);
694
695 scb->target = periph->periph_target;
696 scb->lun = periph->periph_lun;
697 scb->tcs = &sc->sc_tcs[scb->target];
698 scb->scb_id = MSG_IDENTIFY(periph->periph_lun,
699 (xs->xs_control & XS_CTL_REQSENSE) == 0);
700
701 scb->xs = xs;
702 scb->cmdlen = xs->cmdlen;
703 memcpy(&scb->cmd, xs->cmd, xs->cmdlen);
704 scb->buflen = xs->datalen;
705 scb->flags = 0;
706 if (xs->xs_control & XS_CTL_DATA_OUT)
707 scb->flags |= FLAG_DATAOUT;
708 if (xs->xs_control & XS_CTL_DATA_IN)
709 scb->flags |= FLAG_DATAIN;
710
711 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) {
712 error = bus_dmamap_load(sc->sc_dmat, scb->dmap,
713 xs->data, scb->buflen, NULL,
714 ((xs->xs_control & XS_CTL_NOSLEEP) ?
715 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
716 BUS_DMA_STREAMING |
717 ((scb->flags & FLAG_DATAIN) ?
718 BUS_DMA_READ : BUS_DMA_WRITE));
719
720 if (error) {
721 printf("%s: error %d loading DMA map\n",
722 device_xname(sc->sc_dev), error);
723 iha_append_free_scb(sc, scb);
724 xs->error = XS_DRIVER_STUFFUP;
725 scsipi_done(xs);
726 return;
727 }
728 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
729 0, scb->dmap->dm_mapsize,
730 (scb->flags & FLAG_DATAIN) ?
731 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
732 }
733
734 iha_exec_scb(sc, scb);
735 return;
736
737 case ADAPTER_REQ_GROW_RESOURCES:
738 return; /* XXX */
739
740 case ADAPTER_REQ_SET_XFER_MODE:
741 {
742 struct tcs *tcs;
743 struct scsipi_xfer_mode *xm = arg;
744
745 tcs = &sc->sc_tcs[xm->xm_target];
746
747 if ((xm->xm_mode & PERIPH_CAP_WIDE16) != 0 &&
748 (tcs->flags & FLAG_NO_WIDE) == 0)
749 tcs->flags &= ~(FLAG_WIDE_DONE|FLAG_SYNC_DONE);
750
751 if ((xm->xm_mode & PERIPH_CAP_SYNC) != 0 &&
752 (tcs->flags & FLAG_NO_SYNC) == 0)
753 tcs->flags &= ~FLAG_SYNC_DONE;
754
755 /*
756 * If we're not going to negotiate, send the
757 * notification now, since it won't happen later.
758 */
759 if ((tcs->flags & (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) ==
760 (FLAG_WIDE_DONE|FLAG_SYNC_DONE))
761 iha_update_xfer_mode(sc, xm->xm_target);
762
763 return;
764 }
765 }
766 }
767
768 void
769 iha_update_xfer_mode(struct iha_softc *sc, int target)
770 {
771 struct tcs *tcs = &sc->sc_tcs[target];
772 struct scsipi_xfer_mode xm;
773
774 xm.xm_target = target;
775 xm.xm_mode = 0;
776 xm.xm_period = 0;
777 xm.xm_offset = 0;
778
779 if (tcs->syncm & PERIOD_WIDE_SCSI)
780 xm.xm_mode |= PERIPH_CAP_WIDE16;
781
782 if (tcs->period) {
783 xm.xm_mode |= PERIPH_CAP_SYNC;
784 xm.xm_period = tcs->period;
785 xm.xm_offset = tcs->offset;
786 }
787
788 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
789 }
790
791 static void
792 iha_reset_scsi_bus(struct iha_softc *sc)
793 {
794 struct iha_scb *scb;
795 struct tcs *tcs;
796 int i, s;
797
798 s = splbio();
799
800 iha_reset_dma(sc);
801
802 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
803 switch (scb->status) {
804 case STATUS_BUSY:
805 iha_append_done_scb(sc, scb, HOST_SCSI_RST);
806 break;
807
808 case STATUS_SELECT:
809 iha_push_pend_scb(sc, scb);
810 break;
811
812 default:
813 break;
814 }
815
816 for (i = 0, tcs = sc->sc_tcs; i < IHA_MAX_TARGETS; i++, tcs++)
817 iha_reset_tcs(tcs, sc->sc_sconf1);
818
819 splx(s);
820 }
821
822 void
823 iha_reset_chip(struct iha_softc *sc)
824 {
825 bus_space_tag_t iot = sc->sc_iot;
826 bus_space_handle_t ioh = sc->sc_ioh;
827
828 /* reset tulip chip */
829
830 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSCSI);
831
832 do {
833 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
834 } while ((sc->sc_sistat & SRSTD) == 0);
835
836 iha_set_ssig(sc, 0, 0);
837
838 /* Clear any active interrupt*/
839 (void)bus_space_read_1(iot, ioh, TUL_SISTAT);
840 }
841
842 /*
843 * iha_reset_dma - abort any active DMA xfer, reset tulip FIFO.
844 */
845 static void
846 iha_reset_dma(struct iha_softc *sc)
847 {
848 bus_space_tag_t iot = sc->sc_iot;
849 bus_space_handle_t ioh = sc->sc_ioh;
850
851 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
852 /* if DMA xfer is pending, abort DMA xfer */
853 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR);
854 /* wait Abort DMA xfer done */
855 while ((bus_space_read_1(iot, ioh, TUL_ISTUS0) & DABT) == 0)
856 ;
857 }
858
859 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
860 }
861
862 /*
863 * iha_reset_tcs - reset the target control structure pointed
864 * to by tcs to default values. tcs flags
865 * only has the negotiation done bits reset as
866 * the other bits are fixed at initialization.
867 */
868 static void
869 iha_reset_tcs(struct tcs *tcs, uint8_t config0)
870 {
871
872 tcs->flags &= ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE);
873 tcs->period = 0;
874 tcs->offset = 0;
875 tcs->tagcnt = 0;
876 tcs->ntagscb = NULL;
877 tcs->syncm = 0;
878 tcs->sconfig0 = config0;
879 }
880
881 /*
882 * iha_main - process the active SCB, taking one off pending and making it
883 * active if necessary, and any done SCB's created as
884 * a result until there are no interrupts pending and no pending
885 * SCB's that can be started.
886 */
887 static void
888 iha_main(struct iha_softc *sc)
889 {
890 bus_space_tag_t iot = sc->sc_iot;
891 bus_space_handle_t ioh =sc->sc_ioh;
892 struct iha_scb *scb;
893
894 for (;;) {
895 iha_scsi(sc);
896
897 while ((scb = iha_pop_done_scb(sc)) != NULL)
898 iha_done_scb(sc, scb);
899
900 /*
901 * If there are no interrupts pending, or we can't start
902 * a pending sc, break out of the for(;;). Otherwise
903 * continue the good work with another call to
904 * iha_scsi().
905 */
906 if (((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0)
907 && (iha_find_pend_scb(sc) == NULL))
908 break;
909 }
910 }
911
912 /*
913 * iha_scsi - service any outstanding interrupts. If there are none, try to
914 * start another SCB currently in the pending queue.
915 */
916 static void
917 iha_scsi(struct iha_softc *sc)
918 {
919 bus_space_tag_t iot = sc->sc_iot;
920 bus_space_handle_t ioh = sc->sc_ioh;
921 struct iha_scb *scb;
922 struct tcs *tcs;
923 uint8_t stat;
924
925 /* service pending interrupts asap */
926
927 stat = bus_space_read_1(iot, ioh, TUL_STAT0);
928 if ((stat & INTPD) != 0) {
929 sc->sc_status0 = stat;
930 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1);
931 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
932
933 sc->sc_phase = sc->sc_status0 & PH_MASK;
934
935 if ((sc->sc_sistat & SRSTD) != 0) {
936 iha_reset_scsi_bus(sc);
937 return;
938 }
939
940 if ((sc->sc_sistat & RSELED) != 0) {
941 iha_resel(sc);
942 return;
943 }
944
945 if ((sc->sc_sistat & (STIMEO | DISCD)) != 0) {
946 iha_busfree(sc);
947 return;
948 }
949
950 if ((sc->sc_sistat & (SCMDN | SBSRV)) != 0) {
951 iha_next_state(sc);
952 return;
953 }
954
955 if ((sc->sc_sistat & SELED) != 0)
956 iha_set_ssig(sc, 0, 0);
957 }
958
959 /*
960 * There were no interrupts pending which required action elsewhere, so
961 * see if it is possible to start the selection phase on a pending SCB
962 */
963 if ((scb = iha_find_pend_scb(sc)) == NULL)
964 return;
965
966 tcs = scb->tcs;
967
968 /* program HBA's SCSI ID & target SCSI ID */
969 bus_space_write_1(iot, ioh, TUL_SID, (sc->sc_id << 4) | scb->target);
970
971 if ((scb->xs->xs_control & XS_CTL_RESET) == 0) {
972 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
973
974 if ((tcs->flags & FLAG_NO_NEG_SYNC) == 0 ||
975 (tcs->flags & FLAG_NO_NEG_WIDE) == 0)
976 iha_select(sc, scb, SELATNSTOP);
977
978 else if (scb->scb_tagmsg != 0)
979 iha_select(sc, scb, SEL_ATN3);
980
981 else
982 iha_select(sc, scb, SEL_ATN);
983
984 } else {
985 iha_select(sc, scb, SELATNSTOP);
986 scb->nextstat = 8;
987 }
988
989 if ((scb->xs->xs_control & XS_CTL_POLL) != 0) {
990 int timeout;
991 for (timeout = scb->xs->timeout; timeout > 0; timeout--) {
992 if (iha_wait(sc, NO_OP) == -1)
993 break;
994 if (iha_next_state(sc) == -1)
995 break;
996 delay(1000); /* Only happens in boot, so it's ok */
997 }
998
999 /*
1000 * Since done queue processing not done until AFTER this
1001 * function returns, scb is on the done queue, not
1002 * the free queue at this point and still has valid data
1003 *
1004 * Conversely, xs->error has not been set yet
1005 */
1006 if (timeout == 0)
1007 iha_timeout(scb);
1008 }
1009 }
1010
1011 static void
1012 iha_select(struct iha_softc *sc, struct iha_scb *scb, uint8_t select_type)
1013 {
1014 bus_space_tag_t iot = sc->sc_iot;
1015 bus_space_handle_t ioh = sc->sc_ioh;
1016
1017 switch (select_type) {
1018 case SEL_ATN:
1019 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
1020 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
1021 scb->cmd, scb->cmdlen);
1022
1023 scb->nextstat = 2;
1024 break;
1025
1026 case SELATNSTOP:
1027 scb->nextstat = 1;
1028 break;
1029
1030 case SEL_ATN3:
1031 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
1032 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagmsg);
1033 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagid);
1034
1035 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, scb->cmd,
1036 scb->cmdlen);
1037
1038 scb->nextstat = 2;
1039 break;
1040
1041 default:
1042 printf("[debug] iha_select() - unknown select type = 0x%02x\n",
1043 select_type);
1044 return;
1045 }
1046
1047 iha_del_pend_scb(sc, scb);
1048 scb->status = STATUS_SELECT;
1049
1050 sc->sc_actscb = scb;
1051
1052 bus_space_write_1(iot, ioh, TUL_SCMD, select_type);
1053 }
1054
1055 /*
1056 * iha_wait - wait for an interrupt to service or a SCSI bus phase change
1057 * after writing the supplied command to the tulip chip. If
1058 * the command is NO_OP, skip the command writing.
1059 */
1060 static int
1061 iha_wait(struct iha_softc *sc, uint8_t cmd)
1062 {
1063 bus_space_tag_t iot = sc->sc_iot;
1064 bus_space_handle_t ioh = sc->sc_ioh;
1065
1066 if (cmd != NO_OP)
1067 bus_space_write_1(iot, ioh, TUL_SCMD, cmd);
1068
1069 /*
1070 * Have to do this here, in addition to in iha_isr, because
1071 * interrupts might be turned off when we get here.
1072 */
1073 do {
1074 sc->sc_status0 = bus_space_read_1(iot, ioh, TUL_STAT0);
1075 } while ((sc->sc_status0 & INTPD) == 0);
1076
1077 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1);
1078 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
1079
1080 sc->sc_phase = sc->sc_status0 & PH_MASK;
1081
1082 if ((sc->sc_sistat & SRSTD) != 0) {
1083 /* SCSI bus reset interrupt */
1084 iha_reset_scsi_bus(sc);
1085 return (-1);
1086 }
1087
1088 if ((sc->sc_sistat & RSELED) != 0)
1089 /* Reselection interrupt */
1090 return (iha_resel(sc));
1091
1092 if ((sc->sc_sistat & STIMEO) != 0) {
1093 /* selected/reselected timeout interrupt */
1094 iha_busfree(sc);
1095 return (-1);
1096 }
1097
1098 if ((sc->sc_sistat & DISCD) != 0) {
1099 /* BUS disconnection interrupt */
1100 if ((sc->sc_flags & FLAG_EXPECT_DONE_DISC) != 0) {
1101 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1102 bus_space_write_1(iot, ioh, TUL_SCONFIG0,
1103 SCONFIG0DEFAULT);
1104 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
1105 iha_append_done_scb(sc, sc->sc_actscb, HOST_OK);
1106 sc->sc_flags &= ~FLAG_EXPECT_DONE_DISC;
1107
1108 } else if ((sc->sc_flags & FLAG_EXPECT_DISC) != 0) {
1109 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1110 bus_space_write_1(iot, ioh, TUL_SCONFIG0,
1111 SCONFIG0DEFAULT);
1112 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
1113 sc->sc_actscb = NULL;
1114 sc->sc_flags &= ~FLAG_EXPECT_DISC;
1115
1116 } else
1117 iha_busfree(sc);
1118
1119 return (-1);
1120 }
1121
1122 return (sc->sc_phase);
1123 }
1124
1125 static void
1126 iha_exec_scb(struct iha_softc *sc, struct iha_scb *scb)
1127 {
1128 bus_space_tag_t iot;
1129 bus_space_handle_t ioh;
1130 bus_dmamap_t dm;
1131 struct scsipi_xfer *xs = scb->xs;
1132 int nseg, s;
1133
1134 dm = scb->dmap;
1135 nseg = dm->dm_nsegs;
1136
1137 if (nseg > 1) {
1138 struct iha_sg_element *sg = scb->sglist;
1139 int i;
1140
1141 for (i = 0; i < nseg; i++) {
1142 sg[i].sg_len = htole32(dm->dm_segs[i].ds_len);
1143 sg[i].sg_addr = htole32(dm->dm_segs[i].ds_addr);
1144 }
1145 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1146 scb->sgoffset, IHA_SG_SIZE,
1147 BUS_DMASYNC_PREWRITE);
1148
1149 scb->flags |= FLAG_SG;
1150 scb->sg_size = scb->sg_max = nseg;
1151 scb->sg_index = 0;
1152
1153 scb->bufaddr = scb->sg_addr;
1154 } else
1155 scb->bufaddr = dm->dm_segs[0].ds_addr;
1156
1157 if ((xs->xs_control & XS_CTL_POLL) == 0) {
1158 int timeout = mstohz(xs->timeout);
1159 if (timeout == 0)
1160 timeout = 1;
1161 callout_reset(&xs->xs_callout, timeout, iha_timeout, scb);
1162 }
1163
1164 s = splbio();
1165
1166 if (((scb->xs->xs_control & XS_RESET) != 0) ||
1167 (scb->cmd[0] == SCSI_REQUEST_SENSE))
1168 iha_push_pend_scb(sc, scb); /* Insert SCB at head of Pend */
1169 else
1170 iha_append_pend_scb(sc, scb); /* Append SCB to tail of Pend */
1171
1172 /*
1173 * Run through iha_main() to ensure something is active, if
1174 * only this new SCB.
1175 */
1176 if (sc->sc_semaph != SEMAPH_IN_MAIN) {
1177 iot = sc->sc_iot;
1178 ioh = sc->sc_ioh;
1179
1180 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
1181 sc->sc_semaph = SEMAPH_IN_MAIN;
1182
1183 splx(s);
1184 iha_main(sc);
1185 s = splbio();
1186
1187 sc->sc_semaph = ~SEMAPH_IN_MAIN;
1188 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP));
1189 }
1190
1191 splx(s);
1192 }
1193
1194 /*
1195 * iha_done_scb - We have a scb which has been processed by the
1196 * adaptor, now we look to see how the operation went.
1197 */
1198 static void
1199 iha_done_scb(struct iha_softc *sc, struct iha_scb *scb)
1200 {
1201 struct scsipi_xfer *xs = scb->xs;
1202
1203 if (xs != NULL) {
1204 /* Cancel the timeout. */
1205 callout_stop(&xs->xs_callout);
1206
1207 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) {
1208 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
1209 0, scb->dmap->dm_mapsize,
1210 (scb->flags & FLAG_DATAIN) ?
1211 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1212 bus_dmamap_unload(sc->sc_dmat, scb->dmap);
1213 }
1214
1215 xs->status = scb->ta_stat;
1216
1217 switch (scb->ha_stat) {
1218 case HOST_OK:
1219 switch (scb->ta_stat) {
1220 case SCSI_OK:
1221 case SCSI_CONDITION_MET:
1222 case SCSI_INTERM:
1223 case SCSI_INTERM_COND_MET:
1224 xs->resid = scb->buflen;
1225 xs->error = XS_NOERROR;
1226 if ((scb->flags & FLAG_RSENS) != 0)
1227 xs->error = XS_SENSE;
1228 break;
1229
1230 case SCSI_RESV_CONFLICT:
1231 case SCSI_BUSY:
1232 case SCSI_QUEUE_FULL:
1233 xs->error = XS_BUSY;
1234 break;
1235
1236 case SCSI_TERMINATED:
1237 case SCSI_ACA_ACTIVE:
1238 case SCSI_CHECK:
1239 scb->tcs->flags &=
1240 ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE);
1241
1242 if ((scb->flags & FLAG_RSENS) != 0 ||
1243 iha_push_sense_request(sc, scb) != 0) {
1244 scb->flags &= ~FLAG_RSENS;
1245 printf("%s: request sense failed\n",
1246 device_xname(sc->sc_dev));
1247 xs->error = XS_DRIVER_STUFFUP;
1248 break;
1249 }
1250
1251 xs->error = XS_SENSE;
1252 return;
1253
1254 default:
1255 xs->error = XS_DRIVER_STUFFUP;
1256 break;
1257 }
1258 break;
1259
1260 case HOST_SEL_TOUT:
1261 xs->error = XS_SELTIMEOUT;
1262 break;
1263
1264 case HOST_SCSI_RST:
1265 case HOST_DEV_RST:
1266 xs->error = XS_RESET;
1267 break;
1268
1269 case HOST_SPERR:
1270 printf("%s: SCSI Parity error detected\n",
1271 device_xname(sc->sc_dev));
1272 xs->error = XS_DRIVER_STUFFUP;
1273 break;
1274
1275 case HOST_TIMED_OUT:
1276 xs->error = XS_TIMEOUT;
1277 break;
1278
1279 case HOST_DO_DU:
1280 case HOST_BAD_PHAS:
1281 default:
1282 xs->error = XS_DRIVER_STUFFUP;
1283 break;
1284 }
1285
1286 scsipi_done(xs);
1287 }
1288
1289 iha_append_free_scb(sc, scb);
1290 }
1291
1292 /*
1293 * iha_push_sense_request - obtain auto sense data by pushing the
1294 * SCB needing it back onto the pending
1295 * queue with a REQUEST_SENSE CDB.
1296 */
1297 static int
1298 iha_push_sense_request(struct iha_softc *sc, struct iha_scb *scb)
1299 {
1300 struct scsipi_xfer *xs = scb->xs;
1301 struct scsipi_periph *periph = xs->xs_periph;
1302 struct scsi_request_sense *ss = (struct scsi_request_sense *)scb->cmd;
1303 int lun = periph->periph_lun;
1304 int err;
1305
1306 memset(ss, 0, sizeof(*ss));
1307 ss->opcode = SCSI_REQUEST_SENSE;
1308 ss->byte2 = lun << SCSI_CMD_LUN_SHIFT;
1309 ss->length = sizeof(struct scsi_sense_data);
1310
1311 scb->flags = FLAG_RSENS | FLAG_DATAIN;
1312
1313 scb->scb_id &= ~MSG_IDENTIFY_DISCFLAG;
1314
1315 scb->scb_tagmsg = 0;
1316 scb->ta_stat = SCSI_OK;
1317
1318 scb->cmdlen = sizeof(struct scsi_request_sense);
1319 scb->buflen = ss->length;
1320
1321 err = bus_dmamap_load(sc->sc_dmat, scb->dmap,
1322 &xs->sense.scsi_sense, scb->buflen, NULL,
1323 BUS_DMA_READ|BUS_DMA_NOWAIT);
1324 if (err != 0) {
1325 printf("iha_push_sense_request: cannot bus_dmamap_load()\n");
1326 xs->error = XS_DRIVER_STUFFUP;
1327 return 1;
1328 }
1329 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
1330 0, scb->buflen, BUS_DMASYNC_PREREAD);
1331
1332 /* XXX What about queued command? */
1333 iha_exec_scb(sc, scb);
1334
1335 return 0;
1336 }
1337
1338 static void
1339 iha_timeout(void *arg)
1340 {
1341 struct iha_scb *scb = (struct iha_scb *)arg;
1342 struct scsipi_xfer *xs = scb->xs;
1343 struct scsipi_periph *periph;
1344 struct iha_softc *sc;
1345
1346 if (xs == NULL) {
1347 printf("[debug] iha_timeout called with xs == NULL\n");
1348 return;
1349 }
1350
1351 periph = xs->xs_periph;
1352
1353 sc = device_private(periph->periph_channel->chan_adapter->adapt_dev);
1354
1355 scsipi_printaddr(periph);
1356 printf("SCSI OpCode 0x%02x timed out\n", xs->cmd->opcode);
1357 iha_abort_xs(sc, xs, HOST_TIMED_OUT);
1358 }
1359
1360 /*
1361 * iha_abort_xs - find the SCB associated with the supplied xs and
1362 * stop all processing on it, moving it to the done
1363 * queue with the supplied host status value.
1364 */
1365 static void
1366 iha_abort_xs(struct iha_softc *sc, struct scsipi_xfer *xs, uint8_t hastat)
1367 {
1368 struct iha_scb *scb;
1369 int i, s;
1370
1371 s = splbio();
1372
1373 /* Check the pending queue for the SCB pointing to xs */
1374
1375 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain)
1376 if (scb->xs == xs) {
1377 iha_del_pend_scb(sc, scb);
1378 iha_append_done_scb(sc, scb, hastat);
1379 splx(s);
1380 return;
1381 }
1382
1383 /*
1384 * If that didn't work, check all BUSY/SELECTING SCB's for one
1385 * pointing to xs
1386 */
1387
1388 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
1389 switch (scb->status) {
1390 case STATUS_BUSY:
1391 case STATUS_SELECT:
1392 if (scb->xs == xs) {
1393 iha_append_done_scb(sc, scb, hastat);
1394 splx(s);
1395 return;
1396 }
1397 break;
1398 default:
1399 break;
1400 }
1401
1402 splx(s);
1403 }
1404
1405 /*
1406 * iha_data_over_run - return HOST_OK for all SCSI opcodes where BufLen
1407 * is an 'Allocation Length'. All other SCSI opcodes
1408 * get HOST_DO_DU as they SHOULD have xferred all the
1409 * data requested.
1410 *
1411 * The list of opcodes using 'Allocation Length' was
1412 * found by scanning all the SCSI-3 T10 drafts. See
1413 * www.t10.org for the curious with a .pdf reader.
1414 */
1415 static uint8_t
1416 iha_data_over_run(struct iha_scb *scb)
1417 {
1418 switch (scb->cmd[0]) {
1419 case 0x03: /* Request Sense SPC-2 */
1420 case 0x12: /* Inquiry SPC-2 */
1421 case 0x1a: /* Mode Sense (6 byte version) SPC-2 */
1422 case 0x1c: /* Receive Diagnostic Results SPC-2 */
1423 case 0x23: /* Read Format Capacities MMC-2 */
1424 case 0x29: /* Read Generation SBC */
1425 case 0x34: /* Read Position SSC-2 */
1426 case 0x37: /* Read Defect Data SBC */
1427 case 0x3c: /* Read Buffer SPC-2 */
1428 case 0x42: /* Read Sub Channel MMC-2 */
1429 case 0x43: /* Read TOC/PMA/ATIP MMC */
1430
1431 /* XXX - 2 with same opcode of 0x44? */
1432 case 0x44: /* Read Header/Read Density Suprt MMC/SSC*/
1433
1434 case 0x46: /* Get Configuration MMC-2 */
1435 case 0x4a: /* Get Event/Status Notification MMC-2 */
1436 case 0x4d: /* Log Sense SPC-2 */
1437 case 0x51: /* Read Disc Information MMC */
1438 case 0x52: /* Read Track Information MMC */
1439 case 0x59: /* Read Master CUE MMC */
1440 case 0x5a: /* Mode Sense (10 byte version) SPC-2 */
1441 case 0x5c: /* Read Buffer Capacity MMC */
1442 case 0x5e: /* Persistent Reserve In SPC-2 */
1443 case 0x84: /* Receive Copy Results SPC-2 */
1444 case 0xa0: /* Report LUNs SPC-2 */
1445 case 0xa3: /* Various Report requests SBC-2/SCC-2*/
1446 case 0xa4: /* Report Key MMC-2 */
1447 case 0xad: /* Read DVD Structure MMC-2 */
1448 case 0xb4: /* Read Element Status (Attached) SMC */
1449 case 0xb5: /* Request Volume Element Address SMC */
1450 case 0xb7: /* Read Defect Data (12 byte ver.) SBC */
1451 case 0xb8: /* Read Element Status (Independ.) SMC */
1452 case 0xba: /* Report Redundancy SCC-2 */
1453 case 0xbd: /* Mechanism Status MMC */
1454 case 0xbe: /* Report Basic Redundancy SCC-2 */
1455
1456 return (HOST_OK);
1457
1458 default:
1459 return (HOST_DO_DU);
1460 }
1461 }
1462
1463 /*
1464 * iha_next_state - process the current SCB as requested in its
1465 * nextstat member.
1466 */
1467 static int
1468 iha_next_state(struct iha_softc *sc)
1469 {
1470
1471 if (sc->sc_actscb == NULL)
1472 return (-1);
1473
1474 switch (sc->sc_actscb->nextstat) {
1475 case 1:
1476 if (iha_state_1(sc) == 3)
1477 goto state_3;
1478 break;
1479
1480 case 2:
1481 switch (iha_state_2(sc)) {
1482 case 3:
1483 goto state_3;
1484 case 4:
1485 goto state_4;
1486 default:
1487 break;
1488 }
1489 break;
1490
1491 case 3:
1492 state_3:
1493 if (iha_state_3(sc) == 4)
1494 goto state_4;
1495 break;
1496
1497 case 4:
1498 state_4:
1499 switch (iha_state_4(sc)) {
1500 case 0:
1501 return (0);
1502 case 6:
1503 goto state_6;
1504 default:
1505 break;
1506 }
1507 break;
1508
1509 case 5:
1510 switch (iha_state_5(sc)) {
1511 case 4:
1512 goto state_4;
1513 case 6:
1514 goto state_6;
1515 default:
1516 break;
1517 }
1518 break;
1519
1520 case 6:
1521 state_6:
1522 iha_state_6(sc);
1523 break;
1524
1525 case 8:
1526 iha_state_8(sc);
1527 break;
1528
1529 default:
1530 #ifdef IHA_DEBUG_STATE
1531 printf("[debug] -unknown state: %i-\n",
1532 sc->sc_actscb->nextstat);
1533 #endif
1534 iha_bad_seq(sc);
1535 break;
1536 }
1537
1538 return (-1);
1539 }
1540
1541 /*
1542 * iha_state_1 - selection is complete after a SELATNSTOP. If the target
1543 * has put the bus into MSG_OUT phase start wide/sync
1544 * negotiation. Otherwise clear the FIFO and go to state 3,
1545 * which will send the SCSI CDB to the target.
1546 */
1547 static int
1548 iha_state_1(struct iha_softc *sc)
1549 {
1550 bus_space_tag_t iot = sc->sc_iot;
1551 bus_space_handle_t ioh = sc->sc_ioh;
1552 struct iha_scb *scb = sc->sc_actscb;
1553 struct tcs *tcs;
1554 int flags;
1555
1556 iha_mark_busy_scb(scb);
1557
1558 tcs = scb->tcs;
1559
1560 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
1561
1562 /*
1563 * If we are in PHASE_MSG_OUT, send
1564 * a) IDENT message (with tags if appropriate)
1565 * b) WDTR if the target is configured to negotiate wide xfers
1566 * ** OR **
1567 * c) SDTR if the target is configured to negotiate sync xfers
1568 * but not wide ones
1569 *
1570 * If we are NOT, then the target is not asking for anything but
1571 * the data/command, so go straight to state 3.
1572 */
1573 if (sc->sc_phase == PHASE_MSG_OUT) {
1574 bus_space_write_1(iot, ioh, TUL_SCTRL1, (ESBUSIN | EHRSL));
1575 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
1576
1577 if (scb->scb_tagmsg != 0) {
1578 bus_space_write_1(iot, ioh, TUL_SFIFO,
1579 scb->scb_tagmsg);
1580 bus_space_write_1(iot, ioh, TUL_SFIFO,
1581 scb->scb_tagid);
1582 }
1583
1584 flags = tcs->flags;
1585 if ((flags & FLAG_NO_NEG_WIDE) == 0) {
1586 if (iha_msgout_wdtr(sc) == -1)
1587 return (-1);
1588 } else if ((flags & FLAG_NO_NEG_SYNC) == 0) {
1589 if (iha_msgout_sdtr(sc) == -1)
1590 return (-1);
1591 }
1592
1593 } else {
1594 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1595 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1596 }
1597
1598 return (3);
1599 }
1600
1601 /*
1602 * iha_state_2 - selection is complete after a SEL_ATN or SEL_ATN3. If the SCSI
1603 * CDB has already been send, go to state 4 to start the data
1604 * xfer. Otherwise reset the FIFO and go to state 3, sending
1605 * the SCSI CDB.
1606 */
1607 static int
1608 iha_state_2(struct iha_softc *sc)
1609 {
1610 bus_space_tag_t iot = sc->sc_iot;
1611 bus_space_handle_t ioh = sc->sc_ioh;
1612 struct iha_scb *scb = sc->sc_actscb;
1613
1614 iha_mark_busy_scb(scb);
1615
1616 bus_space_write_1(iot, ioh, TUL_SCONFIG0, scb->tcs->sconfig0);
1617
1618 if ((sc->sc_status1 & CPDNE) != 0)
1619 return (4);
1620
1621 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1622
1623 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1624
1625 return (3);
1626 }
1627
1628 /*
1629 * iha_state_3 - send the SCSI CDB to the target, processing any status
1630 * or other messages received until that is done or
1631 * abandoned.
1632 */
1633 static int
1634 iha_state_3(struct iha_softc *sc)
1635 {
1636 bus_space_tag_t iot = sc->sc_iot;
1637 bus_space_handle_t ioh = sc->sc_ioh;
1638 struct iha_scb *scb = sc->sc_actscb;
1639 int flags;
1640
1641 for (;;) {
1642 switch (sc->sc_phase) {
1643 case PHASE_CMD_OUT:
1644 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
1645 scb->cmd, scb->cmdlen);
1646 if (iha_wait(sc, XF_FIFO_OUT) == -1)
1647 return (-1);
1648 else if (sc->sc_phase == PHASE_CMD_OUT) {
1649 iha_bad_seq(sc);
1650 return (-1);
1651 } else
1652 return (4);
1653
1654 case PHASE_MSG_IN:
1655 scb->nextstat = 3;
1656 if (iha_msgin(sc) == -1)
1657 return (-1);
1658 break;
1659
1660 case PHASE_STATUS_IN:
1661 if (iha_status_msg(sc) == -1)
1662 return (-1);
1663 break;
1664
1665 case PHASE_MSG_OUT:
1666 flags = scb->tcs->flags;
1667 if ((flags & FLAG_NO_NEG_SYNC) != 0) {
1668 if (iha_msgout(sc, MSG_NOOP) == -1)
1669 return (-1);
1670 } else if (iha_msgout_sdtr(sc) == -1)
1671 return (-1);
1672 break;
1673
1674 default:
1675 printf("[debug] -s3- bad phase = %d\n", sc->sc_phase);
1676 iha_bad_seq(sc);
1677 return (-1);
1678 }
1679 }
1680 }
1681
1682 /*
1683 * iha_state_4 - start a data xfer. Handle any bus state
1684 * transitions until PHASE_DATA_IN/_OUT
1685 * or the attempt is abandoned. If there is
1686 * no data to xfer, go to state 6 and finish
1687 * processing the current SCB.
1688 */
1689 static int
1690 iha_state_4(struct iha_softc *sc)
1691 {
1692 struct iha_scb *scb = sc->sc_actscb;
1693
1694 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) ==
1695 (FLAG_DATAIN | FLAG_DATAOUT))
1696 return (6); /* Both dir flags set => NO xfer was requested */
1697
1698 for (;;) {
1699 if (scb->buflen == 0)
1700 return (6);
1701
1702 switch (sc->sc_phase) {
1703 case PHASE_STATUS_IN:
1704 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0)
1705 scb->ha_stat = iha_data_over_run(scb);
1706 if ((iha_status_msg(sc)) == -1)
1707 return (-1);
1708 break;
1709
1710 case PHASE_MSG_IN:
1711 scb->nextstat = 4;
1712 if (iha_msgin(sc) == -1)
1713 return (-1);
1714 break;
1715
1716 case PHASE_MSG_OUT:
1717 if ((sc->sc_status0 & SPERR) != 0) {
1718 scb->buflen = 0;
1719 scb->ha_stat = HOST_SPERR;
1720 if (iha_msgout(sc, MSG_INITIATOR_DET_ERR) == -1)
1721 return (-1);
1722 else
1723 return (6);
1724 } else {
1725 if (iha_msgout(sc, MSG_NOOP) == -1)
1726 return (-1);
1727 }
1728 break;
1729
1730 case PHASE_DATA_IN:
1731 return (iha_xfer_data(sc, scb, FLAG_DATAIN));
1732
1733 case PHASE_DATA_OUT:
1734 return (iha_xfer_data(sc, scb, FLAG_DATAOUT));
1735
1736 default:
1737 iha_bad_seq(sc);
1738 return (-1);
1739 }
1740 }
1741 }
1742
1743 /*
1744 * iha_state_5 - handle the partial or final completion of the current
1745 * data xfer. If DMA is still active stop it. If there is
1746 * more data to xfer, go to state 4 and start the xfer.
1747 * If not go to state 6 and finish the SCB.
1748 */
1749 static int
1750 iha_state_5(struct iha_softc *sc)
1751 {
1752 bus_space_tag_t iot = sc->sc_iot;
1753 bus_space_handle_t ioh = sc->sc_ioh;
1754 struct iha_scb *scb = sc->sc_actscb;
1755 struct iha_sg_element *sg;
1756 uint32_t cnt;
1757 uint8_t period, stat;
1758 long xcnt; /* cannot use unsigned!! see code: if (xcnt < 0) */
1759 int i;
1760
1761 cnt = bus_space_read_4(iot, ioh, TUL_STCNT0) & TCNT;
1762
1763 /*
1764 * Stop any pending DMA activity and check for parity error.
1765 */
1766
1767 if ((bus_space_read_1(iot, ioh, TUL_DCMD) & XDIR) != 0) {
1768 /* Input Operation */
1769 if ((sc->sc_status0 & SPERR) != 0)
1770 scb->ha_stat = HOST_SPERR;
1771
1772 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
1773 bus_space_write_1(iot, ioh, TUL_DCTRL0,
1774 bus_space_read_1(iot, ioh, TUL_DCTRL0) | SXSTP);
1775 while (bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND)
1776 ;
1777 }
1778
1779 } else {
1780 /* Output Operation */
1781 if ((sc->sc_status1 & SXCMP) == 0) {
1782 period = scb->tcs->syncm;
1783 if ((period & PERIOD_WIDE_SCSI) != 0)
1784 cnt += (bus_space_read_1(iot, ioh,
1785 TUL_SFIFOCNT) & FIFOC) * 2;
1786 else
1787 cnt += bus_space_read_1(iot, ioh,
1788 TUL_SFIFOCNT) & FIFOC;
1789 }
1790
1791 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
1792 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR);
1793 do
1794 stat = bus_space_read_1(iot, ioh, TUL_ISTUS0);
1795 while ((stat & DABT) == 0);
1796 }
1797
1798 if ((cnt == 1) && (sc->sc_phase == PHASE_DATA_OUT)) {
1799 if (iha_wait(sc, XF_FIFO_OUT) == -1)
1800 return (-1);
1801 cnt = 0;
1802
1803 } else if ((sc->sc_status1 & SXCMP) == 0)
1804 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1805 }
1806
1807 if (cnt == 0) {
1808 scb->buflen = 0;
1809 return (6);
1810 }
1811
1812 /* Update active data pointer and restart the I/O at the new point */
1813
1814 xcnt = scb->buflen - cnt; /* xcnt == bytes xferred */
1815 scb->buflen = cnt; /* cnt == bytes left */
1816
1817 if ((scb->flags & FLAG_SG) != 0) {
1818 sg = &scb->sglist[scb->sg_index];
1819 for (i = scb->sg_index; i < scb->sg_max; sg++, i++) {
1820 xcnt -= le32toh(sg->sg_len);
1821 if (xcnt < 0) {
1822 xcnt += le32toh(sg->sg_len);
1823
1824 sg->sg_addr =
1825 htole32(le32toh(sg->sg_addr) + xcnt);
1826 sg->sg_len =
1827 htole32(le32toh(sg->sg_len) - xcnt);
1828 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1829 scb->sgoffset, IHA_SG_SIZE,
1830 BUS_DMASYNC_PREWRITE);
1831
1832 scb->bufaddr += (i - scb->sg_index) *
1833 sizeof(struct iha_sg_element);
1834 scb->sg_size = scb->sg_max - i;
1835 scb->sg_index = i;
1836
1837 return (4);
1838 }
1839 }
1840 return (6);
1841
1842 } else
1843 scb->bufaddr += xcnt;
1844
1845 return (4);
1846 }
1847
1848 /*
1849 * iha_state_6 - finish off the active scb (may require several
1850 * iterations if PHASE_MSG_IN) and return -1 to indicate
1851 * the bus is free.
1852 */
1853 static int
1854 iha_state_6(struct iha_softc *sc)
1855 {
1856
1857 for (;;) {
1858 switch (sc->sc_phase) {
1859 case PHASE_STATUS_IN:
1860 if (iha_status_msg(sc) == -1)
1861 return (-1);
1862 break;
1863
1864 case PHASE_MSG_IN:
1865 sc->sc_actscb->nextstat = 6;
1866 if ((iha_msgin(sc)) == -1)
1867 return (-1);
1868 break;
1869
1870 case PHASE_MSG_OUT:
1871 if ((iha_msgout(sc, MSG_NOOP)) == -1)
1872 return (-1);
1873 break;
1874
1875 case PHASE_DATA_IN:
1876 if (iha_xpad_in(sc) == -1)
1877 return (-1);
1878 break;
1879
1880 case PHASE_DATA_OUT:
1881 if (iha_xpad_out(sc) == -1)
1882 return (-1);
1883 break;
1884
1885 default:
1886 iha_bad_seq(sc);
1887 return (-1);
1888 }
1889 }
1890 }
1891
1892 /*
1893 * iha_state_8 - reset the active device and all busy SCBs using it
1894 */
1895 static int
1896 iha_state_8(struct iha_softc *sc)
1897 {
1898 bus_space_tag_t iot = sc->sc_iot;
1899 bus_space_handle_t ioh = sc->sc_ioh;
1900 struct iha_scb *scb;
1901 int i;
1902 uint8_t tar;
1903
1904 if (sc->sc_phase == PHASE_MSG_OUT) {
1905 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_BUS_DEV_RESET);
1906
1907 scb = sc->sc_actscb;
1908
1909 /* This SCB finished correctly -- resetting the device */
1910 iha_append_done_scb(sc, scb, HOST_OK);
1911
1912 iha_reset_tcs(scb->tcs, sc->sc_sconf1);
1913
1914 tar = scb->target;
1915 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
1916 if (scb->target == tar)
1917 switch (scb->status) {
1918 case STATUS_BUSY:
1919 iha_append_done_scb(sc,
1920 scb, HOST_DEV_RST);
1921 break;
1922
1923 case STATUS_SELECT:
1924 iha_push_pend_scb(sc, scb);
1925 break;
1926
1927 default:
1928 break;
1929 }
1930
1931 sc->sc_flags |= FLAG_EXPECT_DISC;
1932
1933 if (iha_wait(sc, XF_FIFO_OUT) == -1)
1934 return (-1);
1935 }
1936
1937 iha_bad_seq(sc);
1938 return (-1);
1939 }
1940
1941 /*
1942 * iha_xfer_data - initiate the DMA xfer of the data
1943 */
1944 static int
1945 iha_xfer_data(struct iha_softc *sc, struct iha_scb *scb, int direction)
1946 {
1947 bus_space_tag_t iot = sc->sc_iot;
1948 bus_space_handle_t ioh = sc->sc_ioh;
1949 uint32_t xferlen;
1950 uint8_t xfercmd;
1951
1952 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != direction)
1953 return (6); /* wrong direction, abandon I/O */
1954
1955 bus_space_write_4(iot, ioh, TUL_STCNT0, scb->buflen);
1956
1957 xfercmd = STRXFR;
1958 if (direction == FLAG_DATAIN)
1959 xfercmd |= XDIR;
1960
1961 if (scb->flags & FLAG_SG) {
1962 xferlen = scb->sg_size * sizeof(struct iha_sg_element);
1963 xfercmd |= SGXFR;
1964 } else
1965 xferlen = scb->buflen;
1966
1967 bus_space_write_4(iot, ioh, TUL_DXC, xferlen);
1968 bus_space_write_4(iot, ioh, TUL_DXPA, scb->bufaddr);
1969 bus_space_write_1(iot, ioh, TUL_DCMD, xfercmd);
1970
1971 bus_space_write_1(iot, ioh, TUL_SCMD,
1972 (direction == FLAG_DATAIN) ? XF_DMA_IN : XF_DMA_OUT);
1973
1974 scb->nextstat = 5;
1975
1976 return (0);
1977 }
1978
1979 static int
1980 iha_xpad_in(struct iha_softc *sc)
1981 {
1982 bus_space_tag_t iot = sc->sc_iot;
1983 bus_space_handle_t ioh = sc->sc_ioh;
1984 struct iha_scb *scb = sc->sc_actscb;
1985
1986 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0)
1987 scb->ha_stat = HOST_DO_DU;
1988
1989 for (;;) {
1990 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0)
1991 bus_space_write_4(iot, ioh, TUL_STCNT0, 2);
1992 else
1993 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1994
1995 switch (iha_wait(sc, XF_FIFO_IN)) {
1996 case -1:
1997 return (-1);
1998
1999 case PHASE_DATA_IN:
2000 (void)bus_space_read_1(iot, ioh, TUL_SFIFO);
2001 break;
2002
2003 default:
2004 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2005 return (6);
2006 }
2007 }
2008 }
2009
2010 static int
2011 iha_xpad_out(struct iha_softc *sc)
2012 {
2013 bus_space_tag_t iot = sc->sc_iot;
2014 bus_space_handle_t ioh = sc->sc_ioh;
2015 struct iha_scb *scb = sc->sc_actscb;
2016
2017 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0)
2018 scb->ha_stat = HOST_DO_DU;
2019
2020 for (;;) {
2021 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0)
2022 bus_space_write_4(iot, ioh, TUL_STCNT0, 2);
2023 else
2024 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
2025
2026 bus_space_write_1(iot, ioh, TUL_SFIFO, 0);
2027
2028 switch (iha_wait(sc, XF_FIFO_OUT)) {
2029 case -1:
2030 return (-1);
2031
2032 case PHASE_DATA_OUT:
2033 break;
2034
2035 default:
2036 /* Disable wide CPU to allow read 16 bits */
2037 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
2038 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2039 return (6);
2040 }
2041 }
2042 }
2043
2044 static int
2045 iha_status_msg(struct iha_softc *sc)
2046 {
2047 bus_space_tag_t iot = sc->sc_iot;
2048 bus_space_handle_t ioh = sc->sc_ioh;
2049 struct iha_scb *scb;
2050 uint8_t msg;
2051 int phase;
2052
2053 if ((phase = iha_wait(sc, CMD_COMP)) == -1)
2054 return (-1);
2055
2056 scb = sc->sc_actscb;
2057
2058 scb->ta_stat = bus_space_read_1(iot, ioh, TUL_SFIFO);
2059
2060 if (phase == PHASE_MSG_OUT) {
2061 if ((sc->sc_status0 & SPERR) == 0)
2062 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_NOOP);
2063 else
2064 bus_space_write_1(iot, ioh, TUL_SFIFO,
2065 MSG_PARITY_ERROR);
2066
2067 return (iha_wait(sc, XF_FIFO_OUT));
2068
2069 } else if (phase == PHASE_MSG_IN) {
2070 msg = bus_space_read_1(iot, ioh, TUL_SFIFO);
2071
2072 if ((sc->sc_status0 & SPERR) != 0)
2073 switch (iha_wait(sc, MSG_ACCEPT)) {
2074 case -1:
2075 return (-1);
2076 case PHASE_MSG_OUT:
2077 bus_space_write_1(iot, ioh, TUL_SFIFO,
2078 MSG_PARITY_ERROR);
2079 return (iha_wait(sc, XF_FIFO_OUT));
2080 default:
2081 iha_bad_seq(sc);
2082 return (-1);
2083 }
2084
2085 if (msg == MSG_CMDCOMPLETE) {
2086 if ((scb->ta_stat &
2087 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) {
2088 iha_bad_seq(sc);
2089 return (-1);
2090 }
2091 sc->sc_flags |= FLAG_EXPECT_DONE_DISC;
2092 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2093 return (iha_wait(sc, MSG_ACCEPT));
2094 }
2095
2096 if ((msg == MSG_LINK_CMD_COMPLETE)
2097 || (msg == MSG_LINK_CMD_COMPLETEF)) {
2098 if ((scb->ta_stat &
2099 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM)
2100 return (iha_wait(sc, MSG_ACCEPT));
2101 }
2102 }
2103
2104 iha_bad_seq(sc);
2105 return (-1);
2106 }
2107
2108 /*
2109 * iha_busfree - SCSI bus free detected as a result of a TIMEOUT or
2110 * DISCONNECT interrupt. Reset the tulip FIFO and
2111 * SCONFIG0 and enable hardware reselect. Move any active
2112 * SCB to sc_donescb list. Return an appropriate host status
2113 * if an I/O was active.
2114 */
2115 static void
2116 iha_busfree(struct iha_softc *sc)
2117 {
2118 bus_space_tag_t iot = sc->sc_iot;
2119 bus_space_handle_t ioh = sc->sc_ioh;
2120 struct iha_scb *scb;
2121
2122 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2123 bus_space_write_1(iot, ioh, TUL_SCONFIG0, SCONFIG0DEFAULT);
2124 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
2125
2126 scb = sc->sc_actscb;
2127
2128 if (scb != NULL) {
2129 if (scb->status == STATUS_SELECT)
2130 /* selection timeout */
2131 iha_append_done_scb(sc, scb, HOST_SEL_TOUT);
2132 else
2133 /* Unexpected bus free */
2134 iha_append_done_scb(sc, scb, HOST_BAD_PHAS);
2135 }
2136 }
2137
2138 /*
2139 * iha_resel - handle a detected SCSI bus reselection request.
2140 */
2141 static int
2142 iha_resel(struct iha_softc *sc)
2143 {
2144 bus_space_tag_t iot = sc->sc_iot;
2145 bus_space_handle_t ioh = sc->sc_ioh;
2146 struct iha_scb *scb;
2147 struct tcs *tcs;
2148 uint8_t tag, target, lun, msg, abortmsg;
2149
2150 if (sc->sc_actscb != NULL) {
2151 if ((sc->sc_actscb->status == STATUS_SELECT))
2152 iha_push_pend_scb(sc, sc->sc_actscb);
2153 sc->sc_actscb = NULL;
2154 }
2155
2156 target = bus_space_read_1(iot, ioh, TUL_SBID);
2157 lun = bus_space_read_1(iot, ioh, TUL_SALVC) & IHA_MSG_IDENTIFY_LUNMASK;
2158
2159 tcs = &sc->sc_tcs[target];
2160
2161 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2162 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2163
2164 abortmsg = MSG_ABORT; /* until a valid tag has been obtained */
2165
2166 if (tcs->ntagscb != NULL)
2167 /* There is a non-tagged I/O active on the target */
2168 scb = tcs->ntagscb;
2169
2170 else {
2171 /*
2172 * Since there is no active non-tagged operation
2173 * read the tag type, the tag itself, and find
2174 * the appropriate scb by indexing sc_scb with
2175 * the tag.
2176 */
2177
2178 switch (iha_wait(sc, MSG_ACCEPT)) {
2179 case -1:
2180 return (-1);
2181 case PHASE_MSG_IN:
2182 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
2183 if ((iha_wait(sc, XF_FIFO_IN)) == -1)
2184 return (-1);
2185 break;
2186 default:
2187 goto abort;
2188 }
2189
2190 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag Msg */
2191
2192 if ((msg < MSG_SIMPLE_Q_TAG) || (msg > MSG_ORDERED_Q_TAG))
2193 goto abort;
2194
2195 switch (iha_wait(sc, MSG_ACCEPT)) {
2196 case -1:
2197 return (-1);
2198 case PHASE_MSG_IN:
2199 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
2200 if ((iha_wait(sc, XF_FIFO_IN)) == -1)
2201 return (-1);
2202 break;
2203 default:
2204 goto abort;
2205 }
2206
2207 tag = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag ID */
2208 scb = &sc->sc_scb[tag];
2209
2210 abortmsg = MSG_ABORT_TAG; /* Now that we have valdid tag! */
2211 }
2212
2213 if ((scb->target != target)
2214 || (scb->lun != lun)
2215 || (scb->status != STATUS_BUSY)) {
2216 abort:
2217 iha_msgout_abort(sc, abortmsg);
2218 return (-1);
2219 }
2220
2221 sc->sc_actscb = scb;
2222
2223 if (iha_wait(sc, MSG_ACCEPT) == -1)
2224 return (-1);
2225
2226 return (iha_next_state(sc));
2227 }
2228
2229 static int
2230 iha_msgin(struct iha_softc *sc)
2231 {
2232 bus_space_tag_t iot = sc->sc_iot;
2233 bus_space_handle_t ioh = sc->sc_ioh;
2234 int flags;
2235 int phase;
2236 uint8_t msg;
2237
2238 for (;;) {
2239 if ((bus_space_read_1(iot, ioh, TUL_SFIFOCNT) & FIFOC) > 0)
2240 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2241
2242 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
2243
2244 phase = iha_wait(sc, XF_FIFO_IN);
2245 msg = bus_space_read_1(iot, ioh, TUL_SFIFO);
2246
2247 switch (msg) {
2248 case MSG_DISCONNECT:
2249 sc->sc_flags |= FLAG_EXPECT_DISC;
2250 if (iha_wait(sc, MSG_ACCEPT) != -1)
2251 iha_bad_seq(sc);
2252 phase = -1;
2253 break;
2254 case MSG_SAVEDATAPOINTER:
2255 case MSG_RESTOREPOINTERS:
2256 case MSG_NOOP:
2257 phase = iha_wait(sc, MSG_ACCEPT);
2258 break;
2259 case MSG_MESSAGE_REJECT:
2260 /* XXX - need to clear FIFO like other 'Clear ATN'?*/
2261 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
2262 flags = sc->sc_actscb->tcs->flags;
2263 if ((flags & FLAG_NO_NEG_SYNC) == 0)
2264 iha_set_ssig(sc, REQ | BSY | SEL, ATN);
2265 phase = iha_wait(sc, MSG_ACCEPT);
2266 break;
2267 case MSG_EXTENDED:
2268 phase = iha_msgin_extended(sc);
2269 break;
2270 case MSG_IGN_WIDE_RESIDUE:
2271 phase = iha_msgin_ignore_wid_resid(sc);
2272 break;
2273 case MSG_CMDCOMPLETE:
2274 sc->sc_flags |= FLAG_EXPECT_DONE_DISC;
2275 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2276 phase = iha_wait(sc, MSG_ACCEPT);
2277 if (phase != -1) {
2278 iha_bad_seq(sc);
2279 return (-1);
2280 }
2281 break;
2282 default:
2283 printf("[debug] iha_msgin: bad msg type: %d\n", msg);
2284 phase = iha_msgout_reject(sc);
2285 break;
2286 }
2287
2288 if (phase != PHASE_MSG_IN)
2289 return (phase);
2290 }
2291 /* NOTREACHED */
2292 }
2293
2294 static int
2295 iha_msgin_extended(struct iha_softc *sc)
2296 {
2297 bus_space_tag_t iot = sc->sc_iot;
2298 bus_space_handle_t ioh = sc->sc_ioh;
2299 int flags, i, phase, msglen, msgcode;
2300
2301 /*
2302 * XXX - can we just stop reading and reject, or do we have to
2303 * read all input, discarding the excess, and then reject
2304 */
2305 for (i = 0; i < IHA_MAX_EXTENDED_MSG; i++) {
2306 phase = iha_wait(sc, MSG_ACCEPT);
2307
2308 if (phase != PHASE_MSG_IN)
2309 return (phase);
2310
2311 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
2312
2313 if (iha_wait(sc, XF_FIFO_IN) == -1)
2314 return (-1);
2315
2316 sc->sc_msg[i] = bus_space_read_1(iot, ioh, TUL_SFIFO);
2317
2318 if (sc->sc_msg[0] == i)
2319 break;
2320 }
2321
2322 msglen = sc->sc_msg[0];
2323 msgcode = sc->sc_msg[1];
2324
2325 if ((msglen == MSG_EXT_SDTR_LEN) && (msgcode == MSG_EXT_SDTR)) {
2326 if (iha_msgin_sdtr(sc) == 0) {
2327 iha_sync_done(sc);
2328 return (iha_wait(sc, MSG_ACCEPT));
2329 }
2330
2331 iha_set_ssig(sc, REQ | BSY | SEL, ATN);
2332
2333 phase = iha_wait(sc, MSG_ACCEPT);
2334 if (phase != PHASE_MSG_OUT)
2335 return (phase);
2336
2337 /* Clear FIFO for important message - final SYNC offer */
2338 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2339
2340 iha_sync_done(sc); /* This is our final offer */
2341
2342 } else if ((msglen == MSG_EXT_WDTR_LEN) && (msgcode == MSG_EXT_WDTR)) {
2343
2344 flags = sc->sc_actscb->tcs->flags;
2345
2346 if ((flags & FLAG_NO_WIDE) != 0)
2347 /* Offer 8bit xfers only */
2348 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_8_BIT;
2349
2350 else if (sc->sc_msg[2] > MSG_EXT_WDTR_BUS_32_BIT)
2351 /* BAD MSG */
2352 return (iha_msgout_reject(sc));
2353
2354 else if (sc->sc_msg[2] == MSG_EXT_WDTR_BUS_32_BIT)
2355 /* Offer 16bit instead */
2356 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT;
2357
2358 else {
2359 iha_wide_done(sc);
2360 if ((flags & FLAG_NO_NEG_SYNC) == 0)
2361 iha_set_ssig(sc, REQ | BSY | SEL, ATN);
2362 return (iha_wait(sc, MSG_ACCEPT));
2363 }
2364
2365 iha_set_ssig(sc, REQ | BSY | SEL, ATN);
2366
2367 phase = iha_wait(sc, MSG_ACCEPT);
2368 if (phase != PHASE_MSG_OUT)
2369 return (phase);
2370 } else
2371 return (iha_msgout_reject(sc));
2372
2373 return (iha_msgout_extended(sc));
2374 }
2375
2376 /*
2377 * iha_msgin_sdtr - check SDTR msg in sc_msg. If the offer is
2378 * acceptable leave sc_msg as is and return 0.
2379 * If the negotiation must continue, modify sc_msg
2380 * as needed and return 1. Else return 0.
2381 */
2382 static int
2383 iha_msgin_sdtr(struct iha_softc *sc)
2384 {
2385 int flags;
2386 int newoffer;
2387 uint8_t default_period;
2388
2389 flags = sc->sc_actscb->tcs->flags;
2390
2391 default_period = iha_rate_tbl[flags & FLAG_SCSI_RATE];
2392
2393 if (sc->sc_msg[3] == 0)
2394 /* target offered async only. Accept it. */
2395 return (0);
2396
2397 newoffer = 0;
2398
2399 if ((flags & FLAG_NO_SYNC) != 0) {
2400 sc->sc_msg[3] = 0;
2401 newoffer = 1;
2402 }
2403
2404 if (sc->sc_msg[3] > IHA_MAX_OFFSET) {
2405 sc->sc_msg[3] = IHA_MAX_OFFSET;
2406 newoffer = 1;
2407 }
2408
2409 if (sc->sc_msg[2] < default_period) {
2410 sc->sc_msg[2] = default_period;
2411 newoffer = 1;
2412 }
2413
2414 if (sc->sc_msg[2] > IHA_MAX_PERIOD) {
2415 /* Use async */
2416 sc->sc_msg[3] = 0;
2417 newoffer = 1;
2418 }
2419
2420 return (newoffer);
2421 }
2422
2423 static int
2424 iha_msgin_ignore_wid_resid(struct iha_softc *sc)
2425 {
2426 bus_space_tag_t iot = sc->sc_iot;
2427 bus_space_handle_t ioh = sc->sc_ioh;
2428 int phase;
2429
2430 phase = iha_wait(sc, MSG_ACCEPT);
2431
2432 if (phase == PHASE_MSG_IN) {
2433 phase = iha_wait(sc, XF_FIFO_IN);
2434
2435 if (phase != -1) {
2436 bus_space_write_1(iot, ioh, TUL_SFIFO, 0);
2437 (void)bus_space_read_1(iot, ioh, TUL_SFIFO);
2438 (void)bus_space_read_1(iot, ioh, TUL_SFIFO);
2439
2440 phase = iha_wait(sc, MSG_ACCEPT);
2441 }
2442 }
2443
2444 return (phase);
2445 }
2446
2447 static int
2448 iha_msgout(struct iha_softc *sc, uint8_t msg)
2449 {
2450
2451 bus_space_write_1(sc->sc_iot, sc->sc_ioh, TUL_SFIFO, msg);
2452
2453 return (iha_wait(sc, XF_FIFO_OUT));
2454 }
2455
2456 static void
2457 iha_msgout_abort(struct iha_softc *sc, uint8_t aborttype)
2458 {
2459
2460 iha_set_ssig(sc, REQ | BSY | SEL, ATN);
2461
2462 switch (iha_wait(sc, MSG_ACCEPT)) {
2463 case -1:
2464 break;
2465
2466 case PHASE_MSG_OUT:
2467 sc->sc_flags |= FLAG_EXPECT_DISC;
2468 if (iha_msgout(sc, aborttype) != -1)
2469 iha_bad_seq(sc);
2470 break;
2471
2472 default:
2473 iha_bad_seq(sc);
2474 break;
2475 }
2476 }
2477
2478 static int
2479 iha_msgout_reject(struct iha_softc *sc)
2480 {
2481
2482 iha_set_ssig(sc, REQ | BSY | SEL, ATN);
2483
2484 if (iha_wait(sc, MSG_ACCEPT) == PHASE_MSG_OUT)
2485 return (iha_msgout(sc, MSG_MESSAGE_REJECT));
2486
2487 return (-1);
2488 }
2489
2490 static int
2491 iha_msgout_extended(struct iha_softc *sc)
2492 {
2493 bus_space_tag_t iot = sc->sc_iot;
2494 bus_space_handle_t ioh = sc->sc_ioh;
2495 int phase;
2496
2497 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED);
2498
2499 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
2500 sc->sc_msg, sc->sc_msg[0] + 1);
2501
2502 phase = iha_wait(sc, XF_FIFO_OUT);
2503
2504 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2505 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
2506
2507 return (phase);
2508 }
2509
2510 static int
2511 iha_msgout_wdtr(struct iha_softc *sc)
2512 {
2513
2514 sc->sc_actscb->tcs->flags |= FLAG_WIDE_DONE;
2515
2516 sc->sc_msg[0] = MSG_EXT_WDTR_LEN;
2517 sc->sc_msg[1] = MSG_EXT_WDTR;
2518 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT;
2519
2520 return (iha_msgout_extended(sc));
2521 }
2522
2523 static int
2524 iha_msgout_sdtr(struct iha_softc *sc)
2525 {
2526 struct tcs *tcs = sc->sc_actscb->tcs;
2527
2528 tcs->flags |= FLAG_SYNC_DONE;
2529
2530 sc->sc_msg[0] = MSG_EXT_SDTR_LEN;
2531 sc->sc_msg[1] = MSG_EXT_SDTR;
2532 sc->sc_msg[2] = iha_rate_tbl[tcs->flags & FLAG_SCSI_RATE];
2533 sc->sc_msg[3] = IHA_MAX_OFFSET; /* REQ/ACK */
2534
2535 return (iha_msgout_extended(sc));
2536 }
2537
2538 static void
2539 iha_wide_done(struct iha_softc *sc)
2540 {
2541 bus_space_tag_t iot = sc->sc_iot;
2542 bus_space_handle_t ioh = sc->sc_ioh;
2543 struct tcs *tcs = sc->sc_actscb->tcs;
2544
2545 tcs->syncm = 0;
2546 tcs->period = 0;
2547 tcs->offset = 0;
2548
2549 if (sc->sc_msg[2] != 0)
2550 tcs->syncm |= PERIOD_WIDE_SCSI;
2551
2552 tcs->sconfig0 &= ~ALTPD;
2553 tcs->flags &= ~FLAG_SYNC_DONE;
2554 tcs->flags |= FLAG_WIDE_DONE;
2555
2556 iha_update_xfer_mode(sc, sc->sc_actscb->target);
2557
2558 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2559 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2560 }
2561
2562 static void
2563 iha_sync_done(struct iha_softc *sc)
2564 {
2565 bus_space_tag_t iot = sc->sc_iot;
2566 bus_space_handle_t ioh = sc->sc_ioh;
2567 struct tcs *tcs = sc->sc_actscb->tcs;
2568 int i;
2569
2570 tcs->period = sc->sc_msg[2];
2571 tcs->offset = sc->sc_msg[3];
2572 if (tcs->offset != 0) {
2573 tcs->syncm |= tcs->offset;
2574
2575 /* pick the highest possible rate */
2576 for (i = 0; i < sizeof(iha_rate_tbl); i++)
2577 if (iha_rate_tbl[i] >= tcs->period)
2578 break;
2579
2580 tcs->syncm |= (i << 4);
2581 tcs->sconfig0 |= ALTPD;
2582 }
2583
2584 tcs->flags |= FLAG_SYNC_DONE;
2585
2586 iha_update_xfer_mode(sc, sc->sc_actscb->target);
2587
2588 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2589 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2590 }
2591
2592 /*
2593 * iha_bad_seq - a SCSI bus phase was encountered out of the
2594 * correct/expected sequence. Reset the SCSI bus.
2595 */
2596 static void
2597 iha_bad_seq(struct iha_softc *sc)
2598 {
2599 struct iha_scb *scb = sc->sc_actscb;
2600
2601 if (scb != NULL)
2602 iha_append_done_scb(sc, scb, HOST_BAD_PHAS);
2603
2604 iha_reset_scsi_bus(sc);
2605 iha_reset_chip(sc);
2606 }
2607
2608 /*
2609 * iha_read_eeprom - read Serial EEPROM value & set to defaults
2610 * if required. XXX - Writing does NOT work!
2611 */
2612 static void
2613 iha_read_eeprom(struct iha_softc *sc, struct iha_eeprom *eeprom)
2614 {
2615 bus_space_tag_t iot = sc->sc_iot;
2616 bus_space_handle_t ioh = sc->sc_ioh;
2617 uint16_t *tbuf = (uint16_t *)eeprom;
2618 uint8_t gctrl;
2619
2620 /* Enable EEProm programming */
2621 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) | EEPRG;
2622 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl);
2623
2624 /* Read EEProm */
2625 if (iha_se2_rd_all(sc, tbuf) == 0)
2626 panic("%s: cannot read EEPROM", device_xname(sc->sc_dev));
2627
2628 /* Disable EEProm programming */
2629 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) & ~EEPRG;
2630 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl);
2631 }
2632
2633 #ifdef notused
2634 /*
2635 * iha_se2_update_all - Update SCSI H/A configuration parameters from
2636 * serial EEPROM Setup default pattern. Only
2637 * change those values different from the values
2638 * in iha_eeprom.
2639 */
2640 static void
2641 iha_se2_update_all(struct iha_softc *sc)
2642 {
2643 bus_space_tag_t iot = sc->sc_iot;
2644 bus_space_handle_t ioh = sc->sc_ioh;
2645 uint16_t *np;
2646 uint32_t chksum;
2647 int i;
2648
2649 /* Enable erase/write state of EEPROM */
2650 iha_se2_instr(sc, ENABLE_ERASE);
2651 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2652 EEP_WAIT();
2653
2654 np = (uint16_t *)&eeprom_default;
2655
2656 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) {
2657 iha_se2_wr(sc, i, *np);
2658 chksum += *np++;
2659 }
2660
2661 chksum &= 0x0000ffff;
2662 iha_se2_wr(sc, 31, chksum);
2663
2664 /* Disable erase/write state of EEPROM */
2665 iha_se2_instr(sc, 0);
2666 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2667 EEP_WAIT();
2668 }
2669
2670 /*
2671 * iha_se2_wr - write the given 16 bit value into the Serial EEPROM
2672 * at the specified offset
2673 */
2674 static void
2675 iha_se2_wr(struct iha_softc *sc, int addr, uint16_t writeword)
2676 {
2677 bus_space_tag_t iot = sc->sc_iot;
2678 bus_space_handle_t ioh = sc->sc_ioh;
2679 int i, bit;
2680
2681 /* send 'WRITE' Instruction == address | WRITE bit */
2682 iha_se2_instr(sc, addr | WRITE);
2683
2684 for (i = 16; i > 0; i--) {
2685 if (writeword & (1 << (i - 1)))
2686 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRDO);
2687 else
2688 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2689 EEP_WAIT();
2690 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2691 EEP_WAIT();
2692 }
2693
2694 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2695 EEP_WAIT();
2696 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2697 EEP_WAIT();
2698 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2699 EEP_WAIT();
2700
2701 for (;;) {
2702 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2703 EEP_WAIT();
2704 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2705 EEP_WAIT();
2706 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI;
2707 EEP_WAIT();
2708 if (bit != 0)
2709 break; /* write complete */
2710 }
2711
2712 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2713 }
2714 #endif
2715
2716 /*
2717 * iha_se2_rd - read & return the 16 bit value at the specified
2718 * offset in the Serial E2PROM
2719 *
2720 */
2721 static uint16_t
2722 iha_se2_rd(struct iha_softc *sc, int addr)
2723 {
2724 bus_space_tag_t iot = sc->sc_iot;
2725 bus_space_handle_t ioh = sc->sc_ioh;
2726 int i, bit;
2727 uint16_t readword;
2728
2729 /* Send 'READ' instruction == address | READ bit */
2730 iha_se2_instr(sc, addr | READ);
2731
2732 readword = 0;
2733 for (i = 16; i > 0; i--) {
2734 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2735 EEP_WAIT();
2736 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2737 EEP_WAIT();
2738 /* sample data after the following edge of clock */
2739 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI ? 1 : 0;
2740 EEP_WAIT();
2741
2742 readword |= bit << (i - 1);
2743 }
2744
2745 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2746
2747 return (readword);
2748 }
2749
2750 /*
2751 * iha_se2_rd_all - Read SCSI H/A config parameters from serial EEPROM
2752 */
2753 static int
2754 iha_se2_rd_all(struct iha_softc *sc, uint16_t *tbuf)
2755 {
2756 struct iha_eeprom *eeprom = (struct iha_eeprom *)tbuf;
2757 uint32_t chksum;
2758 int i;
2759
2760 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) {
2761 *tbuf = iha_se2_rd(sc, i);
2762 chksum += *tbuf++;
2763 }
2764 *tbuf = iha_se2_rd(sc, 31); /* read checksum from EEPROM */
2765
2766 chksum &= 0x0000ffff; /* lower 16 bits */
2767
2768 return (eeprom->signature == EEP_SIGNATURE) &&
2769 (eeprom->checksum == chksum);
2770 }
2771
2772 /*
2773 * iha_se2_instr - write an octet to serial E2PROM one bit at a time
2774 */
2775 static void
2776 iha_se2_instr(struct iha_softc *sc, int instr)
2777 {
2778 bus_space_tag_t iot = sc->sc_iot;
2779 bus_space_handle_t ioh = sc->sc_ioh;
2780 int b, i;
2781
2782 b = NVRCS | NVRDO; /* Write the start bit (== 1) */
2783
2784 bus_space_write_1(iot, ioh, TUL_NVRAM, b);
2785 EEP_WAIT();
2786 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK);
2787 EEP_WAIT();
2788
2789 for (i = 8; i > 0; i--) {
2790 if (instr & (1 << (i - 1)))
2791 b = NVRCS | NVRDO; /* Write a 1 bit */
2792 else
2793 b = NVRCS; /* Write a 0 bit */
2794
2795 bus_space_write_1(iot, ioh, TUL_NVRAM, b);
2796 EEP_WAIT();
2797 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK);
2798 EEP_WAIT();
2799 }
2800
2801 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2802 }
2803