iha.c revision 1.6 1 /* $NetBSD: iha.c,v 1.6 2001/07/27 15:10:56 tsutsui Exp $ */
2 /*
3 * Initio INI-9xxxU/UW SCSI Device Driver
4 *
5 * Copyright (c) 2000 Ken Westerback
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 *-------------------------------------------------------------------------
30 *
31 * Ported from i91u.c, provided by Initio Corporation, which credits:
32 *
33 * Device driver for the INI-9XXXU/UW or INIC-940/950 PCI SCSI Controller.
34 *
35 * FreeBSD
36 *
37 * Written for 386bsd and FreeBSD by
38 * Winston Hung <winstonh (at) initio.com>
39 *
40 * Copyright (c) 1997-99 Initio Corp. All rights reserved.
41 *
42 *-------------------------------------------------------------------------
43 */
44
45 /*
46 * Ported to NetBSD by Izumi Tsutsui <tsutsui (at) ceres.dti.ne.jp> from OpenBSD:
47 * $OpenBSD: iha.c,v 1.3 2001/02/20 00:47:33 krw Exp $
48 */
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/buf.h>
54 #include <sys/device.h>
55 #include <sys/malloc.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <machine/bus.h>
60 #include <machine/intr.h>
61
62 #include <dev/scsipi/scsi_all.h>
63 #include <dev/scsipi/scsipi_all.h>
64 #include <dev/scsipi/scsiconf.h>
65 #include <dev/scsipi/scsi_message.h>
66
67 #include <dev/ic/ihareg.h>
68 #include <dev/ic/ihavar.h>
69
70 /*
71 * SCSI Rate Table, indexed by FLAG_SCSI_RATE field of
72 * tcs flags.
73 */
74 static u_int8_t tul_rate_tbl[8] = {
75 /* fast 20 */
76 /* nanosecond divide by 4 */
77 12, /* 50ns, 20M */
78 18, /* 75ns, 13.3M */
79 25, /* 100ns, 10M */
80 31, /* 125ns, 8M */
81 37, /* 150ns, 6.6M */
82 43, /* 175ns, 5.7M */
83 50, /* 200ns, 5M */
84 62 /* 250ns, 4M */
85 };
86
87 static u_int16_t eeprom_default[EEPROM_SIZE] = {
88 /* -- Header ------------------------------------ */
89 /* signature */
90 EEP_SIGNATURE,
91 /* size, revision */
92 EEP_WORD(EEPROM_SIZE * 2, 0x01),
93 /* -- Host Adapter Structure -------------------- */
94 /* model */
95 0x0095,
96 /* model info, number of channel */
97 EEP_WORD(0x00, 1),
98 /* BIOS config */
99 EEP_BIOSCFG_DEFAULT,
100 /* host adapter config */
101 0,
102
103 /* -- eeprom_adapter[0] ------------------------------- */
104 /* ID, adapter config 1 */
105 EEP_WORD(7, CFG_DEFAULT),
106 /* adapter config 2, number of targets */
107 EEP_WORD(0x00, 8),
108 /* target flags */
109 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
110 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
111 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
112 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
113 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
114 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
115 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
116 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
117
118 /* -- eeprom_adapter[1] ------------------------------- */
119 /* ID, adapter config 1 */
120 EEP_WORD(7, CFG_DEFAULT),
121 /* adapter config 2, number of targets */
122 EEP_WORD(0x00, 8),
123 /* target flags */
124 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
125 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
126 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
127 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
128 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
129 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
130 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
131 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
132 /* reserved[5] */
133 0, 0, 0, 0, 0,
134 /* checksum */
135 0
136 };
137
138 static u_int8_t tul_data_over_run(struct iha_scsi_req_q *);
139
140 static int tul_push_sense_request(struct iha_softc *, struct iha_scsi_req_q *);
141 static void tul_timeout(void *);
142 static int tul_alloc_sglist(struct iha_softc *);
143
144 static void tul_read_eeprom(struct iha_softc *, struct iha_eeprom *);
145 static void tul_se2_update_all(struct iha_softc *);
146 static int tul_se2_rd_all(struct iha_softc *, u_int16_t *);
147 static void tul_se2_wr(struct iha_softc *, int, u_int16_t);
148 static void tul_se2_instr(struct iha_softc *, int);
149 static u_int16_t tul_se2_rd(struct iha_softc *, int);
150
151 static void tul_reset_scsi_bus(struct iha_softc *);
152 static void tul_reset_chip(struct iha_softc *);
153 static void tul_reset_dma(struct iha_softc *);
154
155 static void tul_reset_tcs(struct tcs *, u_int8_t);
156
157 static void tul_done_scb(struct iha_softc *, struct iha_scsi_req_q *);
158 static void tul_exec_scb(struct iha_softc *, struct iha_scsi_req_q *);
159
160 static void tul_main(struct iha_softc *);
161 static void tul_scsi(struct iha_softc *);
162
163 static int tul_wait(struct iha_softc *, u_int8_t);
164
165 static __inline void tul_mark_busy_scb(struct iha_scsi_req_q *);
166
167 static void tul_append_free_scb(struct iha_softc *, struct iha_scsi_req_q *);
168 static void tul_append_done_scb(struct iha_softc *, struct iha_scsi_req_q *,
169 u_int8_t);
170 static __inline struct iha_scsi_req_q *tul_pop_done_scb(struct iha_softc *);
171
172 static __inline void tul_append_pend_scb(struct iha_softc *,
173 struct iha_scsi_req_q *);
174 static __inline void tul_push_pend_scb(struct iha_softc *,
175 struct iha_scsi_req_q *);
176 static __inline void tul_del_pend_scb(struct iha_softc *,
177 struct iha_scsi_req_q *);
178 static struct iha_scsi_req_q *tul_find_pend_scb(struct iha_softc *);
179
180 static void tul_sync_done(struct iha_softc *);
181 static void tul_wide_done(struct iha_softc *);
182 static void tul_bad_seq(struct iha_softc *);
183
184 static int tul_next_state(struct iha_softc *);
185 static int tul_state_1(struct iha_softc *);
186 static int tul_state_2(struct iha_softc *);
187 static int tul_state_3(struct iha_softc *);
188 static int tul_state_4(struct iha_softc *);
189 static int tul_state_5(struct iha_softc *);
190 static int tul_state_6(struct iha_softc *);
191 static int tul_state_8(struct iha_softc *);
192
193 static void tul_set_ssig(struct iha_softc *, u_int8_t, u_int8_t);
194
195 static int tul_xpad_in(struct iha_softc *);
196 static int tul_xpad_out(struct iha_softc *);
197
198 static int tul_xfer_data(struct iha_softc *, struct iha_scsi_req_q *,
199 int direction);
200
201 static int tul_status_msg(struct iha_softc *);
202
203 static int tul_msgin(struct iha_softc *);
204 static int tul_msgin_sdtr(struct iha_softc *);
205 static int tul_msgin_extended(struct iha_softc *);
206 static int tul_msgin_ignore_wid_resid(struct iha_softc *);
207
208 static int tul_msgout(struct iha_softc *, u_int8_t);
209 static int tul_msgout_extended(struct iha_softc *);
210 static void tul_msgout_abort(struct iha_softc *, u_int8_t);
211 static int tul_msgout_reject(struct iha_softc *);
212 static int tul_msgout_sdtr(struct iha_softc *);
213 static int tul_msgout_wdtr(struct iha_softc *);
214
215 static void tul_select(struct iha_softc *, struct iha_scsi_req_q *, u_int8_t);
216
217 static void tul_busfree(struct iha_softc *);
218 static int tul_resel(struct iha_softc *);
219
220 static void tul_abort_xs(struct iha_softc *, struct scsipi_xfer *, u_int8_t);
221
222 static void iha_minphys(struct buf *);
223 void iha_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
224 void *arg);
225
226 /*
227 * iha_intr - the interrupt service routine for the iha driver
228 */
229 int
230 iha_intr(arg)
231 void *arg;
232 {
233 bus_space_tag_t iot;
234 bus_space_handle_t ioh;
235 struct iha_softc *sc;
236 int s;
237
238 sc = (struct iha_softc *)arg;
239 iot = sc->sc_iot;
240 ioh = sc->sc_ioh;
241
242 if ((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0)
243 return (0);
244
245 s = splbio(); /* XXX - Or are interrupts off when ISR's are called? */
246
247 if (sc->sc_semaph != SEMAPH_IN_MAIN) {
248 /* XXX - need these inside a splbio()/splx()? */
249 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
250 sc->sc_semaph = SEMAPH_IN_MAIN;
251
252 tul_main(sc);
253
254 sc->sc_semaph = ~SEMAPH_IN_MAIN;
255 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP));
256 }
257
258 splx(s);
259
260 return (1);
261 }
262
263 void
264 iha_scsipi_request(chan, req, arg)
265 struct scsipi_channel *chan;
266 scsipi_adapter_req_t req;
267 void *arg;
268 {
269 struct scsipi_xfer *xs;
270 struct scsipi_periph *periph;
271 struct iha_scsi_req_q *scb;
272 struct iha_softc *sc;
273 int error, flags, s;
274
275 sc = (struct iha_softc *)chan->chan_adapter->adapt_dev;
276
277 switch (req) {
278 case ADAPTER_REQ_RUN_XFER:
279 xs = arg;
280 periph = xs->xs_periph;
281 flags = xs->xs_control;
282
283 if (xs->cmdlen > sizeof(struct scsi_generic) ||
284 periph->periph_target >= IHA_MAX_TARGETS) {
285 xs->error = XS_DRIVER_STUFFUP;
286 return;
287 }
288
289 s = splbio();
290 scb = TAILQ_FIRST(&sc->sc_freescb);
291 if (scb != NULL) {
292 scb->status = STATUS_RENT;
293 TAILQ_REMOVE(&sc->sc_freescb, scb, chain);
294 }
295 #ifdef DIAGNOSTIC
296 else {
297 scsipi_printaddr(periph);
298 printf("unable to allocate scb\n");
299 panic("iha_scsipi_request");
300 }
301 #endif
302 splx(s);
303
304 scb->target = periph->periph_target;
305 scb->lun = periph->periph_lun;
306 scb->tcs = &sc->sc_tcs[scb->target];
307 scb->flags = xs->xs_control; /* XXX */
308 scb->scb_id = MSG_IDENTIFY(periph->periph_lun,
309 (xs->xs_control & XS_CTL_REQSENSE) == 0);
310
311 scb->xs = xs;
312 scb->timeout = xs->timeout;
313 scb->cmdlen = xs->cmdlen;
314 memcpy(&scb->cmd, xs->cmd, xs->cmdlen);
315
316 scb->buflen = xs->datalen;
317
318 if (scb->buflen > 0) {
319 error = bus_dmamap_load(sc->sc_dmat, scb->dmap,
320 xs->data, scb->buflen, NULL,
321 ((xs->xs_control & XS_CTL_NOSLEEP) ?
322 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
323 BUS_DMA_STREAMING |
324 ((xs->xs_control & XS_CTL_DATA_IN) ?
325 BUS_DMA_READ : BUS_DMA_WRITE));
326
327 if (error) {
328 printf("%s: error %d loading dma map\n",
329 sc->sc_dev.dv_xname, error);
330 tul_append_free_scb(sc, scb);
331 xs->error = XS_DRIVER_STUFFUP;
332 scsipi_done(xs);
333 return;
334 }
335 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
336 0, scb->dmap->dm_mapsize,
337 (xs->xs_control & XS_CTL_DATA_IN) ?
338 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
339 }
340
341 tul_exec_scb(sc, scb);
342 return;
343
344 case ADAPTER_REQ_GROW_RESOURCES:
345 return; /* XXX */
346
347 case ADAPTER_REQ_SET_XFER_MODE:
348 return; /* XXX */
349 }
350 }
351
352 void
353 iha_attach(sc)
354 struct iha_softc *sc;
355 {
356 bus_space_tag_t iot = sc->sc_iot;
357 bus_space_handle_t ioh = sc->sc_ioh;
358 struct iha_scsi_req_q *scb;
359 struct iha_eeprom eeprom;
360 struct eeprom_adapter *conf;
361 int i, error, reg;
362
363 tul_read_eeprom(sc, &eeprom);
364
365 conf = &eeprom.adapter[0];
366
367 /*
368 * fill in the rest of the IHA_SOFTC fields
369 */
370 sc->sc_id = CFG_ID(conf->config1);
371 sc->sc_semaph = ~SEMAPH_IN_MAIN;
372 sc->sc_status0 = 0;
373 sc->sc_actscb = NULL;
374
375 TAILQ_INIT(&sc->sc_freescb);
376 TAILQ_INIT(&sc->sc_pendscb);
377 TAILQ_INIT(&sc->sc_donescb);
378 error = tul_alloc_sglist(sc);
379 if (error != 0) {
380 printf(": cannot allocate sglist\n");
381 return;
382 }
383
384 sc->sc_scb = malloc(sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB,
385 M_DEVBUF, M_NOWAIT);
386 if (sc->sc_scb == NULL) {
387 printf(": cannot allocate SCB\n");
388 return;
389 }
390 memset(sc->sc_scb, 0, sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB);
391
392 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) {
393 scb->scb_tagid = i;
394 scb->sgoffset = IHA_SG_SIZE * i;
395 scb->sglist = &sc->sc_sglist[i].sg_element[0];
396 scb->sg_addr =
397 sc->sc_dmamap->dm_segs[0].ds_addr + scb->sgoffset;
398
399 error = bus_dmamap_create(sc->sc_dmat,
400 (IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE, IHA_MAX_SG_ENTRIES,
401 (IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE, 0,
402 BUS_DMA_NOWAIT, &scb->dmap);
403
404 if (error != 0) {
405 printf(": couldn't create SCB DMA map, error = %d\n",
406 error);
407 return;
408 }
409 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain);
410 }
411
412 /* Mask all the interrupts */
413 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
414
415 /* Stop any I/O and reset the scsi module */
416 tul_reset_dma(sc);
417 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSMOD);
418
419 /* Program HBA's SCSI ID */
420 bus_space_write_1(iot, ioh, TUL_SID, sc->sc_id << 4);
421
422 /*
423 * Configure the channel as requested by the NVRAM settings read
424 * by tul_read_eeprom() above.
425 */
426
427 sc->sc_sconf1 = SCONFIG0DEFAULT;
428 if ((conf->config1 & CFG_EN_PAR) != 0)
429 sc->sc_sconf1 |= SPCHK;
430 bus_space_write_1(iot, ioh, TUL_SCONFIG0, sc->sc_sconf1);
431
432 /* set selection time out 250 ms */
433 bus_space_write_1(iot, ioh, TUL_STIMO, STIMO_250MS);
434
435 /* Enable desired SCSI termination configuration read from eeprom */
436 reg = 0;
437 if (conf->config1 & CFG_ACT_TERM1)
438 reg |= ENTMW;
439 if (conf->config1 & CFG_ACT_TERM2)
440 reg |= ENTM;
441 bus_space_write_1(iot, ioh, TUL_DCTRL0, reg);
442
443 reg = bus_space_read_1(iot, ioh, TUL_GCTRL1) & ~ATDEN;
444 if (conf->config1 & CFG_AUTO_TERM)
445 reg |= ATDEN;
446 bus_space_write_1(iot, ioh, TUL_GCTRL1, reg);
447
448 for (i = 0; i < IHA_MAX_TARGETS / 2; i++) {
449 sc->sc_tcs[i * 2 ].flags = EEP_LBYTE(conf->tflags[i]);
450 sc->sc_tcs[i * 2 + 1].flags = EEP_HBYTE(conf->tflags[i]);
451 tul_reset_tcs(&sc->sc_tcs[i * 2 ], sc->sc_sconf1);
452 tul_reset_tcs(&sc->sc_tcs[i * 2 + 1], sc->sc_sconf1);
453 }
454
455 tul_reset_chip(sc);
456 bus_space_write_1(iot, ioh, TUL_SIEN, ALL_INTERRUPTS);
457
458 /*
459 * fill in the adapter.
460 */
461 sc->sc_adapter.adapt_dev = &sc->sc_dev;
462 sc->sc_adapter.adapt_nchannels = 1;
463 sc->sc_adapter.adapt_openings = IHA_MAX_SCB;
464 sc->sc_adapter.adapt_max_periph = IHA_MAX_SCB;
465 sc->sc_adapter.adapt_ioctl = NULL;
466 sc->sc_adapter.adapt_minphys = iha_minphys;
467 sc->sc_adapter.adapt_request = iha_scsipi_request;
468
469 /*
470 * fill in the channel.
471 */
472 sc->sc_channel.chan_adapter = &sc->sc_adapter;
473 sc->sc_channel.chan_bustype = &scsi_bustype;
474 sc->sc_channel.chan_channel = 0;
475 sc->sc_channel.chan_ntargets = CFG_TARGET(conf->config2);
476 sc->sc_channel.chan_nluns = 8;
477 sc->sc_channel.chan_id = sc->sc_id;
478
479 /*
480 * Now try to attach all the sub devices.
481 */
482 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
483 }
484
485 /*
486 * iha_minphys - reduce bp->b_bcount to something less than
487 * or equal to the largest I/O possible through
488 * the adapter. Called from higher layers
489 * via sc->sc_adapter.scsi_minphys.
490 */
491 static void
492 iha_minphys(bp)
493 struct buf *bp;
494 {
495 if (bp->b_bcount > ((IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE))
496 bp->b_bcount = ((IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE);
497
498 minphys(bp);
499 }
500
501 /*
502 * tul_reset_dma - abort any active DMA xfer, reset tulip FIFO.
503 */
504 static void
505 tul_reset_dma(sc)
506 struct iha_softc *sc;
507 {
508 bus_space_tag_t iot = sc->sc_iot;
509 bus_space_handle_t ioh = sc->sc_ioh;
510
511 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
512 /* if DMA xfer is pending, abort DMA xfer */
513 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR);
514 /* wait Abort DMA xfer done */
515 while ((bus_space_read_1(iot, ioh, TUL_ISTUS0) & DABT) == 0)
516 ;
517 }
518
519 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
520 }
521
522 /*
523 * tul_append_free_scb - append the supplied SCB to the tail of the
524 * sc_freescb queue after clearing and resetting
525 * everything possible.
526 */
527 static void
528 tul_append_free_scb(sc, scb)
529 struct iha_softc *sc;
530 struct iha_scsi_req_q *scb;
531 {
532 int s;
533
534 s = splbio();
535
536 if (scb == sc->sc_actscb)
537 sc->sc_actscb = NULL;
538
539 scb->status = STATUS_QUEUED;
540 scb->ha_stat = HOST_OK;
541 scb->ta_stat = SCSI_OK;
542
543 scb->nextstat = 0;
544 scb->sg_index = 0;
545 scb->sg_max = 0;
546 scb->flags = 0;
547 scb->target = 0;
548 scb->lun = 0;
549 scb->buflen = 0;
550 scb->sg_size = 0;
551 scb->cmdlen = 0;
552 scb->scb_id = 0;
553 scb->scb_tagmsg = 0;
554 scb->timeout = 0;
555 scb->bufaddr = 0;
556
557 scb->xs = NULL;
558 scb->tcs = NULL;
559
560 memset(scb->cmd, 0, sizeof(scb->cmd));
561 memset(scb->sglist, 0, sizeof(scb->sglist));
562
563 /*
564 * scb_tagid, sg_addr, sglist
565 * SCB_SensePtr are set at initialization
566 * and never change
567 */
568
569 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain);
570
571 splx(s);
572 }
573
574 static __inline void
575 tul_append_pend_scb(sc, scb)
576 struct iha_softc *sc;
577 struct iha_scsi_req_q *scb;
578 {
579 /* ASSUMPTION: only called within a splbio()/splx() pair */
580
581 if (scb == sc->sc_actscb)
582 sc->sc_actscb = NULL;
583
584 scb->status = STATUS_QUEUED;
585
586 TAILQ_INSERT_TAIL(&sc->sc_pendscb, scb, chain);
587 }
588
589 static __inline void
590 tul_push_pend_scb(sc, scb)
591 struct iha_softc *sc;
592 struct iha_scsi_req_q *scb;
593 {
594 int s;
595
596 s = splbio();
597
598 if (scb == sc->sc_actscb)
599 sc->sc_actscb = NULL;
600
601 scb->status = STATUS_QUEUED;
602
603 TAILQ_INSERT_HEAD(&sc->sc_pendscb, scb, chain);
604
605 splx(s);
606 }
607
608 /*
609 * tul_find_pend_scb - scan the pending queue for a SCB that can be
610 * processed immediately. Return NULL if none found
611 * and a pointer to the SCB if one is found. If there
612 * is an active SCB, return NULL!
613 */
614 static struct iha_scsi_req_q *
615 tul_find_pend_scb(sc)
616 struct iha_softc *sc;
617 {
618 struct iha_scsi_req_q *scb;
619 struct tcs *tcs;
620 int s;
621
622 s = splbio();
623
624 if (sc->sc_actscb != NULL)
625 scb = NULL;
626
627 else
628 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) {
629 if ((scb->flags & XS_CTL_RESET) != 0)
630 /* ALWAYS willing to reset a device */
631 break;
632
633 tcs = scb->tcs;
634
635 if ((scb->scb_tagmsg) != 0) {
636 /*
637 * A Tagged I/O. OK to start If no
638 * non-tagged I/O is active on the same
639 * target
640 */
641 if (tcs->ntagscb == NULL)
642 break;
643
644 } else if (scb->cmd[0] == REQUEST_SENSE) {
645 /*
646 * OK to do a non-tagged request sense
647 * even if a non-tagged I/O has been
648 * started, 'cuz we don't allow any
649 * disconnect during a request sense op
650 */
651 break;
652
653 } else if (tcs->tagcnt == 0) {
654 /*
655 * No tagged I/O active on this target,
656 * ok to start a non-tagged one if one
657 * is not already active
658 */
659 if (tcs->ntagscb == NULL)
660 break;
661 }
662 }
663
664 splx(s);
665
666 return (scb);
667 }
668
669 /*
670 * tul_del_pend_scb - remove scb from sc_pendscb
671 */
672 static __inline void
673 tul_del_pend_scb(sc, scb)
674 struct iha_softc *sc;
675 struct iha_scsi_req_q *scb;
676 {
677 int s;
678
679 s = splbio();
680
681 TAILQ_REMOVE(&sc->sc_pendscb, scb, chain);
682
683 splx(s);
684 }
685
686 static __inline void
687 tul_mark_busy_scb(scb)
688 struct iha_scsi_req_q *scb;
689 {
690 int s;
691
692 s = splbio();
693
694 scb->status = STATUS_BUSY;
695
696 if (scb->scb_tagmsg == 0)
697 scb->tcs->ntagscb = scb;
698 else
699 scb->tcs->tagcnt++;
700
701 splx(s);
702 }
703
704 static void
705 tul_append_done_scb(sc, scb, hastat)
706 struct iha_softc *sc;
707 struct iha_scsi_req_q *scb;
708 u_int8_t hastat;
709 {
710 struct tcs *tcs;
711 int s;
712
713 s = splbio();
714
715 if (scb->xs != NULL)
716 callout_stop(&scb->xs->xs_callout);
717
718 if (scb == sc->sc_actscb)
719 sc->sc_actscb = NULL;
720
721 tcs = scb->tcs;
722
723 if (scb->scb_tagmsg != 0) {
724 if (tcs->tagcnt)
725 tcs->tagcnt--;
726 } else if (tcs->ntagscb == scb)
727 tcs->ntagscb = NULL;
728
729 scb->status = STATUS_QUEUED;
730 scb->ha_stat = hastat;
731
732 TAILQ_INSERT_TAIL(&sc->sc_donescb, scb, chain);
733
734 splx(s);
735 }
736
737 static __inline struct iha_scsi_req_q *
738 tul_pop_done_scb(sc)
739 struct iha_softc *sc;
740 {
741 struct iha_scsi_req_q *scb;
742 int s;
743
744 s = splbio();
745
746 scb = TAILQ_FIRST(&sc->sc_donescb);
747
748 if (scb != NULL) {
749 scb->status = STATUS_RENT;
750 TAILQ_REMOVE(&sc->sc_donescb, scb, chain);
751 }
752
753 splx(s);
754
755 return (scb);
756 }
757
758 /*
759 * tul_abort_xs - find the SCB associated with the supplied xs and
760 * stop all processing on it, moving it to the done
761 * queue with the supplied host status value.
762 */
763 static void
764 tul_abort_xs(sc, xs, hastat)
765 struct iha_softc *sc;
766 struct scsipi_xfer *xs;
767 u_int8_t hastat;
768 {
769 struct iha_scsi_req_q *scb;
770 int i, s;
771
772 s = splbio();
773
774 /* Check the pending queue for the SCB pointing to xs */
775
776 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain)
777 if (scb->xs == xs) {
778 tul_del_pend_scb(sc, scb);
779 tul_append_done_scb(sc, scb, hastat);
780 splx(s);
781 return;
782 }
783
784 /*
785 * If that didn't work, check all BUSY/SELECTING SCB's for one
786 * pointing to xs
787 */
788
789 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
790 switch (scb->status) {
791 case STATUS_BUSY:
792 case STATUS_SELECT:
793 if (scb->xs == xs) {
794 tul_append_done_scb(sc, scb, hastat);
795 splx(s);
796 return;
797 }
798 break;
799 default:
800 break;
801 }
802
803 splx(s);
804 }
805
806 /*
807 * tul_bad_seq - a SCSI bus phase was encountered out of the
808 * correct/expected sequence. Reset the SCSI bus.
809 */
810 static void
811 tul_bad_seq(sc)
812 struct iha_softc *sc;
813 {
814 struct iha_scsi_req_q *scb = sc->sc_actscb;
815
816 if (scb != NULL)
817 tul_append_done_scb(sc, scb, HOST_BAD_PHAS);
818
819 tul_reset_scsi_bus(sc);
820 tul_reset_chip(sc);
821 }
822
823 /*
824 * tul_push_sense_request - obtain auto sense data by pushing the
825 * SCB needing it back onto the pending
826 * queue with a REQUEST_SENSE CDB.
827 */
828 static int
829 tul_push_sense_request(sc, scb)
830 struct iha_softc *sc;
831 struct iha_scsi_req_q *scb;
832 {
833 struct scsipi_xfer *xs = scb->xs;
834 struct scsipi_periph *periph = xs->xs_periph;
835 struct scsipi_sense *ss = (struct scsipi_sense *)scb->cmd;
836 int lun = periph->periph_lun;
837 int err;
838
839 ss->opcode = REQUEST_SENSE;
840 ss->byte2 = lun << SCSI_CMD_LUN_SHIFT;
841 ss->unused[0] = ss->unused[1] = 0;
842 ss->length = sizeof(struct scsipi_sense_data);
843 ss->control = 0;
844
845 scb->flags &= ~(FLAG_SG | XS_CTL_DATA_OUT);
846 scb->flags |= FLAG_RSENS | XS_CTL_DATA_IN;
847
848 scb->scb_id &= ~MSG_IDENTIFY_DISCFLAG;
849
850 scb->scb_tagmsg = 0;
851 scb->ta_stat = SCSI_OK;
852
853 scb->cmdlen = sizeof(struct scsipi_sense);
854 scb->buflen = ss->length;
855
856 err = bus_dmamap_load(sc->sc_dmat, scb->dmap,
857 &xs->sense.scsi_sense, scb->buflen, NULL,
858 BUS_DMA_READ|BUS_DMA_NOWAIT);
859 if (err != 0) {
860 printf("iha_push_sense_request: cannot bus_dmamap_load()\n");
861 xs->error = XS_DRIVER_STUFFUP;
862 return 1;
863 }
864 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
865 0, scb->buflen, BUS_DMASYNC_PREREAD);
866
867 /* XXX What about queued command? */
868 tul_exec_scb(sc, scb);
869
870 return 0;
871 }
872
873 /*
874 * tul_main - process the active SCB, taking one off pending and making it
875 * active if necessary, and any done SCB's created as
876 * a result until there are no interrupts pending and no pending
877 * SCB's that can be started.
878 */
879 static void
880 tul_main(sc)
881 struct iha_softc *sc;
882 {
883 bus_space_tag_t iot = sc->sc_iot;
884 bus_space_handle_t ioh =sc->sc_ioh;
885 struct iha_scsi_req_q *scb;
886
887 for (;;) {
888 tul_scsi(sc);
889
890 while ((scb = tul_pop_done_scb(sc)) != NULL)
891 tul_done_scb(sc, scb);
892
893 /*
894 * If there are no interrupts pending, or we can't start
895 * a pending sc, break out of the for(;;). Otherwise
896 * continue the good work with another call to
897 * tul_scsi().
898 */
899 if (((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0)
900 && (tul_find_pend_scb(sc) == NULL))
901 break;
902 }
903 }
904
905 /*
906 * tul_scsi - service any outstanding interrupts. If there are none, try to
907 * start another SCB currently in the pending queue.
908 */
909 static void
910 tul_scsi(sc)
911 struct iha_softc *sc;
912 {
913 bus_space_tag_t iot = sc->sc_iot;
914 bus_space_handle_t ioh = sc->sc_ioh;
915 struct iha_scsi_req_q *scb;
916 struct tcs *tcs;
917 u_int8_t stat;
918
919 /* service pending interrupts asap */
920
921 stat = bus_space_read_1(iot, ioh, TUL_STAT0);
922 if ((stat & INTPD) != 0) {
923 sc->sc_status0 = stat;
924 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1);
925 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
926
927 sc->sc_phase = sc->sc_status0 & PH_MASK;
928
929 if ((sc->sc_sistat & SRSTD) != 0) {
930 tul_reset_scsi_bus(sc);
931 return;
932 }
933
934 if ((sc->sc_sistat & RSELED) != 0) {
935 tul_resel(sc);
936 return;
937 }
938
939 if ((sc->sc_sistat & (STIMEO | DISCD)) != 0) {
940 tul_busfree(sc);
941 return;
942 }
943
944 if ((sc->sc_sistat & (SCMDN | SBSRV)) != 0) {
945 tul_next_state(sc);
946 return;
947 }
948
949 if ((sc->sc_sistat & SELED) != 0)
950 tul_set_ssig(sc, 0, 0);
951 }
952
953 /*
954 * There were no interrupts pending which required action elsewhere, so
955 * see if it is possible to start the selection phase on a pending SCB
956 */
957 if ((scb = tul_find_pend_scb(sc)) == NULL)
958 return;
959
960 tcs = scb->tcs;
961
962 /* program HBA's SCSI ID & target SCSI ID */
963 bus_space_write_1(iot, ioh, TUL_SID, (sc->sc_id << 4) | scb->target);
964
965 if ((scb->flags & XS_CTL_RESET) == 0) {
966 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
967
968 if ((tcs->flags & FLAG_NO_NEG_SYNC) == 0 ||
969 (tcs->flags & FLAG_NO_NEG_WIDE) == 0)
970 tul_select(sc, scb, SELATNSTOP);
971
972 else if (scb->scb_tagmsg != 0)
973 tul_select(sc, scb, SEL_ATN3);
974
975 else
976 tul_select(sc, scb, SEL_ATN);
977
978 } else {
979 tul_select(sc, scb, SELATNSTOP);
980 scb->nextstat = 8;
981 }
982
983 if ((scb->flags & XS_CTL_POLL) != 0) {
984 for (; scb->timeout > 0; scb->timeout--) {
985 if (tul_wait(sc, NO_OP) == -1)
986 break;
987 if (tul_next_state(sc) == -1)
988 break;
989 delay(1000); /* Only happens in boot, so it's ok */
990 }
991
992 /*
993 * Since done queue processing not done until AFTER this
994 * function returns, scb is on the done queue, not
995 * the free queue at this point and still has valid data
996 *
997 * Conversely, xs->error has not been set yet
998 */
999 if (scb->timeout == 0)
1000 tul_timeout(scb);
1001 }
1002 }
1003
1004 /*
1005 * tul_data_over_run - return HOST_OK for all SCSI opcodes where BufLen
1006 * is an 'Allocation Length'. All other SCSI opcodes
1007 * get HOST_DO_DU as they SHOULD have xferred all the
1008 * data requested.
1009 *
1010 * The list of opcodes using 'Allocation Length' was
1011 * found by scanning all the SCSI-3 T10 drafts. See
1012 * www.t10.org for the curious with a .pdf reader.
1013 */
1014 static u_int8_t
1015 tul_data_over_run(scb)
1016 struct iha_scsi_req_q *scb;
1017 {
1018 switch (scb->cmd[0]) {
1019 case 0x03: /* Request Sense SPC-2 */
1020 case 0x12: /* Inquiry SPC-2 */
1021 case 0x1a: /* Mode Sense (6 byte version) SPC-2 */
1022 case 0x1c: /* Receive Diagnostic Results SPC-2 */
1023 case 0x23: /* Read Format Capacities MMC-2 */
1024 case 0x29: /* Read Generation SBC */
1025 case 0x34: /* Read Position SSC-2 */
1026 case 0x37: /* Read Defect Data SBC */
1027 case 0x3c: /* Read Buffer SPC-2 */
1028 case 0x42: /* Read Sub Channel MMC-2 */
1029 case 0x43: /* Read TOC/PMA/ATIP MMC */
1030
1031 /* XXX - 2 with same opcode of 0x44? */
1032 case 0x44: /* Read Header/Read Density Suprt MMC/SSC*/
1033
1034 case 0x46: /* Get Configuration MMC-2 */
1035 case 0x4a: /* Get Event/Status Notification MMC-2 */
1036 case 0x4d: /* Log Sense SPC-2 */
1037 case 0x51: /* Read Disc Information MMC */
1038 case 0x52: /* Read Track Information MMC */
1039 case 0x59: /* Read Master CUE MMC */
1040 case 0x5a: /* Mode Sense (10 byte version) SPC-2 */
1041 case 0x5c: /* Read Buffer Capacity MMC */
1042 case 0x5e: /* Persistant Reserve In SPC-2 */
1043 case 0x84: /* Receive Copy Results SPC-2 */
1044 case 0xa0: /* Report LUNs SPC-2 */
1045 case 0xa3: /* Various Report requests SBC-2/SCC-2*/
1046 case 0xa4: /* Report Key MMC-2 */
1047 case 0xad: /* Read DVD Structure MMC-2 */
1048 case 0xb4: /* Read Element Status (Attached) SMC */
1049 case 0xb5: /* Request Volume Element Address SMC */
1050 case 0xb7: /* Read Defect Data (12 byte ver.) SBC */
1051 case 0xb8: /* Read Element Status (Independ.) SMC */
1052 case 0xba: /* Report Redundancy SCC-2 */
1053 case 0xbd: /* Mechanism Status MMC */
1054 case 0xbe: /* Report Basic Redundancy SCC-2 */
1055
1056 return (HOST_OK);
1057 break;
1058
1059 default:
1060 return (HOST_DO_DU);
1061 break;
1062 }
1063 }
1064
1065 /*
1066 * tul_next_state - prcess the current SCB as requested in it's
1067 * nextstat member.
1068 */
1069 static int
1070 tul_next_state(sc)
1071 struct iha_softc *sc;
1072 {
1073
1074 if (sc->sc_actscb == NULL)
1075 return (-1);
1076
1077 switch (sc->sc_actscb->nextstat) {
1078 case 1:
1079 if (tul_state_1(sc) == 3)
1080 goto state_3;
1081 break;
1082
1083 case 2:
1084 switch (tul_state_2(sc)) {
1085 case 3:
1086 goto state_3;
1087 case 4:
1088 goto state_4;
1089 default:
1090 break;
1091 }
1092 break;
1093
1094 case 3:
1095 state_3:
1096 if (tul_state_3(sc) == 4)
1097 goto state_4;
1098 break;
1099
1100 case 4:
1101 state_4:
1102 switch (tul_state_4(sc)) {
1103 case 0:
1104 return (0);
1105 case 6:
1106 goto state_6;
1107 default:
1108 break;
1109 }
1110 break;
1111
1112 case 5:
1113 switch (tul_state_5(sc)) {
1114 case 4:
1115 goto state_4;
1116 case 6:
1117 goto state_6;
1118 default:
1119 break;
1120 }
1121 break;
1122
1123 case 6:
1124 state_6:
1125 tul_state_6(sc);
1126 break;
1127
1128 case 8:
1129 tul_state_8(sc);
1130 break;
1131
1132 default:
1133 #ifdef IHA_DEBUG_STATE
1134 printf("[debug] -unknown state: %i-\n",
1135 sc->sc_actscb->nextstat);
1136 #endif
1137 tul_bad_seq(sc);
1138 break;
1139 }
1140
1141 return (-1);
1142 }
1143
1144 /*
1145 * tul_state_1 - selection is complete after a SELATNSTOP. If the target
1146 * has put the bus into MSG_OUT phase start wide/sync
1147 * negotiation. Otherwise clear the FIFO and go to state 3,
1148 * which will send the SCSI CDB to the target.
1149 */
1150 static int
1151 tul_state_1(sc)
1152 struct iha_softc *sc;
1153 {
1154 bus_space_tag_t iot = sc->sc_iot;
1155 bus_space_handle_t ioh = sc->sc_ioh;
1156 struct iha_scsi_req_q *scb = sc->sc_actscb;
1157 struct tcs *tcs;
1158 int flags;
1159
1160 tul_mark_busy_scb(scb);
1161
1162 tcs = scb->tcs;
1163
1164 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
1165
1166 /*
1167 * If we are in PHASE_MSG_OUT, send
1168 * a) IDENT message (with tags if appropriate)
1169 * b) WDTR if the target is configured to negotiate wide xfers
1170 * ** OR **
1171 * c) SDTR if the target is configured to negotiate sync xfers
1172 * but not wide ones
1173 *
1174 * If we are NOT, then the target is not asking for anything but
1175 * the data/command, so go straight to state 3.
1176 */
1177 if (sc->sc_phase == PHASE_MSG_OUT) {
1178 bus_space_write_1(iot, ioh, TUL_SCTRL1, (ESBUSIN | EHRSL));
1179 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
1180
1181 if (scb->scb_tagmsg != 0) {
1182 bus_space_write_1(iot, ioh, TUL_SFIFO,
1183 scb->scb_tagmsg);
1184 bus_space_write_1(iot, ioh, TUL_SFIFO,
1185 scb->scb_tagid);
1186 }
1187
1188 flags = tcs->flags;
1189 if ((flags & FLAG_NO_NEG_WIDE) == 0) {
1190 if (tul_msgout_wdtr(sc) == -1)
1191 return (-1);
1192 } else if ((flags & FLAG_NO_NEG_SYNC) == 0) {
1193 if (tul_msgout_sdtr(sc) == -1)
1194 return (-1);
1195 }
1196
1197 } else {
1198 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1199 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1200 }
1201
1202 return (3);
1203 }
1204
1205 /*
1206 * tul_state_2 - selection is complete after a SEL_ATN or SEL_ATN3. If the SCSI
1207 * CDB has already been send, go to state 4 to start the data
1208 * xfer. Otherwise reset the FIFO and go to state 3, sending
1209 * the SCSI CDB.
1210 */
1211 static int
1212 tul_state_2(sc)
1213 struct iha_softc *sc;
1214 {
1215 bus_space_tag_t iot = sc->sc_iot;
1216 bus_space_handle_t ioh = sc->sc_ioh;
1217 struct iha_scsi_req_q *scb = sc->sc_actscb;
1218
1219 tul_mark_busy_scb(scb);
1220
1221 bus_space_write_1(iot, ioh, TUL_SCONFIG0, scb->tcs->sconfig0);
1222
1223 if ((sc->sc_status1 & CPDNE) != 0)
1224 return (4);
1225
1226 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1227
1228 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1229
1230 return (3);
1231 }
1232
1233 /*
1234 * tul_state_3 - send the SCSI CDB to the target, processing any status
1235 * or other messages received until that is done or
1236 * abandoned.
1237 */
1238 static int
1239 tul_state_3(sc)
1240 struct iha_softc *sc;
1241 {
1242 bus_space_tag_t iot = sc->sc_iot;
1243 bus_space_handle_t ioh = sc->sc_ioh;
1244 struct iha_scsi_req_q *scb = sc->sc_actscb;
1245 int flags;
1246
1247 for (;;) {
1248 switch (sc->sc_phase) {
1249 case PHASE_CMD_OUT:
1250 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
1251 scb->cmd, scb->cmdlen);
1252 if (tul_wait(sc, XF_FIFO_OUT) == -1)
1253 return (-1);
1254 else if (sc->sc_phase == PHASE_CMD_OUT) {
1255 tul_bad_seq(sc);
1256 return (-1);
1257 } else
1258 return (4);
1259
1260 case PHASE_MSG_IN:
1261 scb->nextstat = 3;
1262 if (tul_msgin(sc) == -1)
1263 return (-1);
1264 break;
1265
1266 case PHASE_STATUS_IN:
1267 if (tul_status_msg(sc) == -1)
1268 return (-1);
1269 break;
1270
1271 case PHASE_MSG_OUT:
1272 flags = scb->tcs->flags;
1273 if ((flags & FLAG_NO_NEG_SYNC) != 0) {
1274 if (tul_msgout(sc, MSG_NOOP) == -1)
1275 return (-1);
1276 } else if (tul_msgout_sdtr(sc) == -1)
1277 return (-1);
1278 break;
1279
1280 default:
1281 printf("[debug] -s3- bad phase = %d\n", sc->sc_phase);
1282 tul_bad_seq(sc);
1283 return (-1);
1284 }
1285 }
1286 }
1287
1288 /*
1289 * tul_state_4 - start a data xfer. Handle any bus state
1290 * transitions until PHASE_DATA_IN/_OUT
1291 * or the attempt is abandoned. If there is
1292 * no data to xfer, go to state 6 and finish
1293 * processing the current SCB.
1294 */
1295 static int
1296 tul_state_4(sc)
1297 struct iha_softc *sc;
1298 {
1299 struct iha_scsi_req_q *scb = sc->sc_actscb;
1300
1301 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ==
1302 (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1303 return (6); /* Both dir flags set => NO xfer was requested */
1304
1305 for (;;) {
1306 if (scb->buflen == 0)
1307 return (6);
1308
1309 switch (sc->sc_phase) {
1310 case PHASE_STATUS_IN:
1311 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1312 != 0)
1313 scb->ha_stat = tul_data_over_run(scb);
1314 if ((tul_status_msg(sc)) == -1)
1315 return (-1);
1316 break;
1317
1318 case PHASE_MSG_IN:
1319 scb->nextstat = 4;
1320 if (tul_msgin(sc) == -1)
1321 return (-1);
1322 break;
1323
1324 case PHASE_MSG_OUT:
1325 if ((sc->sc_status0 & SPERR) != 0) {
1326 scb->buflen = 0;
1327 scb->ha_stat = HOST_SPERR;
1328 if (tul_msgout(sc, MSG_INITIATOR_DET_ERR) == -1)
1329 return (-1);
1330 else
1331 return (6);
1332 } else {
1333 if (tul_msgout(sc, MSG_NOOP) == -1)
1334 return (-1);
1335 }
1336 break;
1337
1338 case PHASE_DATA_IN:
1339 return (tul_xfer_data(sc, scb, XS_CTL_DATA_IN));
1340
1341 case PHASE_DATA_OUT:
1342 return (tul_xfer_data(sc, scb, XS_CTL_DATA_OUT));
1343
1344 default:
1345 tul_bad_seq(sc);
1346 return (-1);
1347 }
1348 }
1349 }
1350
1351 /*
1352 * tul_state_5 - handle the partial or final completion of the current
1353 * data xfer. If DMA is still active stop it. If there is
1354 * more data to xfer, go to state 4 and start the xfer.
1355 * If not go to state 6 and finish the SCB.
1356 */
1357 static int
1358 tul_state_5(sc)
1359 struct iha_softc *sc;
1360 {
1361 bus_space_tag_t iot = sc->sc_iot;
1362 bus_space_handle_t ioh = sc->sc_ioh;
1363 struct iha_scsi_req_q *scb = sc->sc_actscb;
1364 struct iha_sg_element *sg;
1365 u_int32_t cnt;
1366 u_int8_t period, stat;
1367 long xcnt; /* cannot use unsigned!! see code: if (xcnt < 0) */
1368 int i;
1369
1370 cnt = bus_space_read_4(iot, ioh, TUL_STCNT0) & TCNT;
1371
1372 /*
1373 * Stop any pending DMA activity and check for parity error.
1374 */
1375
1376 if ((bus_space_read_1(iot, ioh, TUL_DCMD) & XDIR) != 0) {
1377 /* Input Operation */
1378 if ((sc->sc_status0 & SPERR) != 0)
1379 scb->ha_stat = HOST_SPERR;
1380
1381 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
1382 bus_space_write_1(iot, ioh, TUL_DCTRL0,
1383 bus_space_read_1(iot, ioh, TUL_DCTRL0) | SXSTP);
1384 while (bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND)
1385 ;
1386 }
1387
1388 } else {
1389 /* Output Operation */
1390 if ((sc->sc_status1 & SXCMP) == 0) {
1391 period = scb->tcs->syncm;
1392 if ((period & PERIOD_WIDE_SCSI) != 0)
1393 cnt += (bus_space_read_1(iot, ioh,
1394 TUL_SFIFOCNT) & FIFOC) * 2;
1395 else
1396 cnt += bus_space_read_1(iot, ioh,
1397 TUL_SFIFOCNT) & FIFOC;
1398 }
1399
1400 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
1401 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR);
1402 do
1403 stat = bus_space_read_1(iot, ioh, TUL_ISTUS0);
1404 while ((stat & DABT) == 0);
1405 }
1406
1407 if ((cnt == 1) && (sc->sc_phase == PHASE_DATA_OUT)) {
1408 if (tul_wait(sc, XF_FIFO_OUT) == -1)
1409 return (-1);
1410 cnt = 0;
1411
1412 } else if ((sc->sc_status1 & SXCMP) == 0)
1413 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1414 }
1415
1416 if (cnt == 0) {
1417 scb->buflen = 0;
1418 return (6);
1419 }
1420
1421 /* Update active data pointer and restart the I/O at the new point */
1422
1423 xcnt = scb->buflen - cnt; /* xcnt == bytes xferred */
1424 scb->buflen = cnt; /* cnt == bytes left */
1425
1426 if ((scb->flags & FLAG_SG) != 0) {
1427 sg = &scb->sglist[scb->sg_index];
1428 for (i = scb->sg_index; i < scb->sg_max; sg++, i++) {
1429 xcnt -= le32toh(sg->sg_len);
1430 if (xcnt < 0) {
1431 xcnt += le32toh(sg->sg_len);
1432
1433 sg->sg_addr =
1434 htole32(le32toh(sg->sg_addr) + xcnt);
1435 sg->sg_len =
1436 htole32(le32toh(sg->sg_len) - xcnt);
1437 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1438 scb->sgoffset, IHA_SG_SIZE,
1439 BUS_DMASYNC_PREWRITE);
1440
1441 scb->bufaddr += (i - scb->sg_index) *
1442 sizeof(struct iha_sg_element);
1443 scb->sg_size = scb->sg_max - i;
1444 scb->sg_index = i;
1445
1446 return (4);
1447 }
1448 }
1449 return (6);
1450
1451 } else
1452 scb->bufaddr += xcnt;
1453
1454 return (4);
1455 }
1456
1457 /*
1458 * tul_state_6 - finish off the active scb (may require several
1459 * iterations if PHASE_MSG_IN) and return -1 to indicate
1460 * the bus is free.
1461 */
1462 static int
1463 tul_state_6(sc)
1464 struct iha_softc *sc;
1465 {
1466
1467 for (;;) {
1468 switch (sc->sc_phase) {
1469 case PHASE_STATUS_IN:
1470 if (tul_status_msg(sc) == -1)
1471 return (-1);
1472 break;
1473
1474 case PHASE_MSG_IN:
1475 sc->sc_actscb->nextstat = 6;
1476 if ((tul_msgin(sc)) == -1)
1477 return (-1);
1478 break;
1479
1480 case PHASE_MSG_OUT:
1481 if ((tul_msgout(sc, MSG_NOOP)) == -1)
1482 return (-1);
1483 break;
1484
1485 case PHASE_DATA_IN:
1486 if (tul_xpad_in(sc) == -1)
1487 return (-1);
1488 break;
1489
1490 case PHASE_DATA_OUT:
1491 if (tul_xpad_out(sc) == -1)
1492 return (-1);
1493 break;
1494
1495 default:
1496 tul_bad_seq(sc);
1497 return (-1);
1498 }
1499 }
1500 }
1501
1502 /*
1503 * tul_state_8 - reset the active device and all busy SCBs using it
1504 */
1505 static int
1506 tul_state_8(sc)
1507 struct iha_softc *sc;
1508 {
1509 bus_space_tag_t iot = sc->sc_iot;
1510 bus_space_handle_t ioh = sc->sc_ioh;
1511 struct iha_scsi_req_q *scb;
1512 int i;
1513 u_int8_t tar;
1514
1515 if (sc->sc_phase == PHASE_MSG_OUT) {
1516 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_BUS_DEV_RESET);
1517
1518 scb = sc->sc_actscb;
1519
1520 /* This SCB finished correctly -- resetting the device */
1521 tul_append_done_scb(sc, scb, HOST_OK);
1522
1523 tul_reset_tcs(scb->tcs, sc->sc_sconf1);
1524
1525 tar = scb->target;
1526 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
1527 if (scb->target == tar)
1528 switch (scb->status) {
1529 case STATUS_BUSY:
1530 tul_append_done_scb(sc,
1531 scb, HOST_DEV_RST);
1532 break;
1533
1534 case STATUS_SELECT:
1535 tul_push_pend_scb(sc, scb);
1536 break;
1537
1538 default:
1539 break;
1540 }
1541
1542 sc->sc_flags |= FLAG_EXPECT_DISC;
1543
1544 if (tul_wait(sc, XF_FIFO_OUT) == -1)
1545 return (-1);
1546 }
1547
1548 tul_bad_seq(sc);
1549 return (-1);
1550 }
1551
1552 /*
1553 * tul_xfer_data - initiate the DMA xfer of the data
1554 */
1555 static int
1556 tul_xfer_data(sc, scb, direction)
1557 struct iha_softc *sc;
1558 struct iha_scsi_req_q *scb;
1559 int direction;
1560 {
1561 bus_space_tag_t iot = sc->sc_iot;
1562 bus_space_handle_t ioh = sc->sc_ioh;
1563 u_int32_t xferlen;
1564 u_int8_t xfertype;
1565
1566 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) != direction)
1567 return (6); /* wrong direction, abandon I/O */
1568
1569 bus_space_write_4(iot, ioh, TUL_STCNT0, scb->buflen);
1570
1571 if ((scb->flags & FLAG_SG) == 0) {
1572 xferlen = scb->buflen;
1573 xfertype = (direction == XS_CTL_DATA_IN) ? ST_X_IN : ST_X_OUT;
1574
1575 } else {
1576 xferlen = scb->sg_size * sizeof(struct iha_sg_element);
1577 xfertype = (direction == XS_CTL_DATA_IN) ? ST_SG_IN : ST_SG_OUT;
1578 }
1579
1580 bus_space_write_4(iot, ioh, TUL_DXC, xferlen);
1581 bus_space_write_4(iot, ioh, TUL_DXPA, scb->bufaddr);
1582 bus_space_write_1(iot, ioh, TUL_DCMD, xfertype);
1583
1584 bus_space_write_1(iot, ioh, TUL_SCMD,
1585 (direction == XS_CTL_DATA_IN) ? XF_DMA_IN : XF_DMA_OUT);
1586
1587 scb->nextstat = 5;
1588
1589 return (0);
1590 }
1591
1592 static int
1593 tul_xpad_in(sc)
1594 struct iha_softc *sc;
1595 {
1596 bus_space_tag_t iot = sc->sc_iot;
1597 bus_space_handle_t ioh = sc->sc_ioh;
1598 struct iha_scsi_req_q *scb = sc->sc_actscb;
1599
1600 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) != 0)
1601 scb->ha_stat = HOST_DO_DU;
1602
1603 for (;;) {
1604 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0)
1605 bus_space_write_4(iot, ioh, TUL_STCNT0, 2);
1606 else
1607 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1608
1609 switch (tul_wait(sc, XF_FIFO_IN)) {
1610 case -1:
1611 return (-1);
1612
1613 case PHASE_DATA_IN:
1614 bus_space_read_1(iot, ioh, TUL_SFIFO);
1615 break;
1616
1617 default:
1618 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1619 return (6);
1620 }
1621 }
1622 }
1623
1624 static int
1625 tul_xpad_out(sc)
1626 struct iha_softc *sc;
1627 {
1628 bus_space_tag_t iot = sc->sc_iot;
1629 bus_space_handle_t ioh = sc->sc_ioh;
1630 struct iha_scsi_req_q *scb = sc->sc_actscb;
1631
1632 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) != 0)
1633 scb->ha_stat = HOST_DO_DU;
1634
1635 for (;;) {
1636 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0)
1637 bus_space_write_4(iot, ioh, TUL_STCNT0, 2);
1638 else
1639 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1640
1641 bus_space_write_1(iot, ioh, TUL_SFIFO, 0);
1642
1643 switch (tul_wait(sc, XF_FIFO_OUT)) {
1644 case -1:
1645 return (-1);
1646
1647 case PHASE_DATA_OUT:
1648 break;
1649
1650 default:
1651 /* Disable wide CPU to allow read 16 bits */
1652 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
1653 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1654 return (6);
1655 }
1656 }
1657 }
1658
1659 static int
1660 tul_status_msg(sc)
1661 struct iha_softc *sc;
1662 {
1663 bus_space_tag_t iot = sc->sc_iot;
1664 bus_space_handle_t ioh = sc->sc_ioh;
1665 struct iha_scsi_req_q *scb;
1666 u_int8_t msg;
1667 int phase;
1668
1669 if ((phase = tul_wait(sc, CMD_COMP)) == -1)
1670 return (-1);
1671
1672 scb = sc->sc_actscb;
1673
1674 scb->ta_stat = bus_space_read_1(iot, ioh, TUL_SFIFO);
1675
1676 if (phase == PHASE_MSG_OUT) {
1677 if ((sc->sc_status0 & SPERR) == 0)
1678 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_NOOP);
1679 else
1680 bus_space_write_1(iot, ioh, TUL_SFIFO,
1681 MSG_PARITY_ERROR);
1682
1683 return (tul_wait(sc, XF_FIFO_OUT));
1684
1685 } else if (phase == PHASE_MSG_IN) {
1686 msg = bus_space_read_1(iot, ioh, TUL_SFIFO);
1687
1688 if ((sc->sc_status0 & SPERR) != 0)
1689 switch (tul_wait(sc, MSG_ACCEPT)) {
1690 case -1:
1691 return (-1);
1692 case PHASE_MSG_OUT:
1693 bus_space_write_1(iot, ioh, TUL_SFIFO,
1694 MSG_PARITY_ERROR);
1695 return (tul_wait(sc, XF_FIFO_OUT));
1696 default:
1697 tul_bad_seq(sc);
1698 return (-1);
1699 }
1700
1701 if (msg == MSG_CMDCOMPLETE) {
1702 if ((scb->ta_stat &
1703 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) {
1704 tul_bad_seq(sc);
1705 return (-1);
1706 }
1707 sc->sc_flags |= FLAG_EXPECT_DONE_DISC;
1708 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1709 return (tul_wait(sc, MSG_ACCEPT));
1710 }
1711
1712 if ((msg == MSG_LINK_CMD_COMPLETE)
1713 || (msg == MSG_LINK_CMD_COMPLETEF)) {
1714 if ((scb->ta_stat &
1715 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM)
1716 return (tul_wait(sc, MSG_ACCEPT));
1717 }
1718 }
1719
1720 tul_bad_seq(sc);
1721 return (-1);
1722 }
1723
1724 /*
1725 * tul_busfree - SCSI bus free detected as a result of a TIMEOUT or
1726 * DISCONNECT interrupt. Reset the tulip FIFO and
1727 * SCONFIG0 and enable hardware reselect. Move any active
1728 * SCB to sc_donescb list. Return an appropriate host status
1729 * if an I/O was active.
1730 */
1731 static void
1732 tul_busfree(sc)
1733 struct iha_softc *sc;
1734 {
1735 bus_space_tag_t iot = sc->sc_iot;
1736 bus_space_handle_t ioh = sc->sc_ioh;
1737 struct iha_scsi_req_q *scb;
1738
1739 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1740 bus_space_write_1(iot, ioh, TUL_SCONFIG0, SCONFIG0DEFAULT);
1741 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
1742
1743 scb = sc->sc_actscb;
1744
1745 if (scb != NULL) {
1746 if (scb->status == STATUS_SELECT)
1747 /* selection timeout */
1748 tul_append_done_scb(sc, scb, HOST_SEL_TOUT);
1749 else
1750 /* Unexpected bus free */
1751 tul_append_done_scb(sc, scb, HOST_BAD_PHAS);
1752 }
1753 }
1754
1755 static void
1756 tul_reset_scsi_bus(sc)
1757 struct iha_softc *sc;
1758 {
1759 struct iha_scsi_req_q *scb;
1760 struct tcs *tcs;
1761 int i, s;
1762
1763 s = splbio();
1764
1765 tul_reset_dma(sc);
1766
1767 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
1768 switch (scb->status) {
1769 case STATUS_BUSY:
1770 tul_append_done_scb(sc, scb, HOST_SCSI_RST);
1771 break;
1772
1773 case STATUS_SELECT:
1774 tul_push_pend_scb(sc, scb);
1775 break;
1776
1777 default:
1778 break;
1779 }
1780
1781 for (i = 0, tcs = sc->sc_tcs; i < IHA_MAX_TARGETS; i++, tcs++)
1782 tul_reset_tcs(tcs, sc->sc_sconf1);
1783
1784 splx(s);
1785 }
1786
1787 /*
1788 * tul_resel - handle a detected SCSI bus reselection request.
1789 */
1790 static int
1791 tul_resel(sc)
1792 struct iha_softc *sc;
1793 {
1794 bus_space_tag_t iot = sc->sc_iot;
1795 bus_space_handle_t ioh = sc->sc_ioh;
1796 struct iha_scsi_req_q *scb;
1797 struct tcs *tcs;
1798 u_int8_t tag, target, lun, msg, abortmsg;
1799
1800 if (sc->sc_actscb != NULL) {
1801 if ((sc->sc_actscb->status == STATUS_SELECT))
1802 tul_push_pend_scb(sc, sc->sc_actscb);
1803 sc->sc_actscb = NULL;
1804 }
1805
1806 target = bus_space_read_1(iot, ioh, TUL_SBID);
1807 lun = bus_space_read_1(iot, ioh, TUL_SALVC) & MSG_IDENTIFY_LUNMASK;
1808
1809 tcs = &sc->sc_tcs[target];
1810
1811 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
1812 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
1813
1814 abortmsg = MSG_ABORT; /* until a valid tag has been obtained */
1815
1816 if (tcs->ntagscb != NULL)
1817 /* There is a non-tagged I/O active on the target */
1818 scb = tcs->ntagscb;
1819
1820 else {
1821 /*
1822 * Since there is no active non-tagged operation
1823 * read the tag type, the tag itself, and find
1824 * the appropriate scb by indexing sc_scb with
1825 * the tag.
1826 */
1827
1828 switch (tul_wait(sc, MSG_ACCEPT)) {
1829 case -1:
1830 return (-1);
1831 case PHASE_MSG_IN:
1832 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1833 if ((tul_wait(sc, XF_FIFO_IN)) == -1)
1834 return (-1);
1835 break;
1836 default:
1837 goto abort;
1838 }
1839
1840 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag Msg */
1841
1842 if ((msg < MSG_SIMPLE_Q_TAG) || (msg > MSG_ORDERED_Q_TAG))
1843 goto abort;
1844
1845 switch (tul_wait(sc, MSG_ACCEPT)) {
1846 case -1:
1847 return (-1);
1848 case PHASE_MSG_IN:
1849 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1850 if ((tul_wait(sc, XF_FIFO_IN)) == -1)
1851 return (-1);
1852 break;
1853 default:
1854 goto abort;
1855 }
1856
1857 tag = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag ID */
1858 scb = &sc->sc_scb[tag];
1859
1860 abortmsg = MSG_ABORT_TAG; /* Now that we have valdid tag! */
1861 }
1862
1863 if ((scb->target != target)
1864 || (scb->lun != lun)
1865 || (scb->status != STATUS_BUSY)) {
1866 abort:
1867 tul_msgout_abort(sc, abortmsg);
1868 return (-1);
1869 }
1870
1871 sc->sc_actscb = scb;
1872
1873 if (tul_wait(sc, MSG_ACCEPT) == -1)
1874 return (-1);
1875
1876 return (tul_next_state(sc));
1877 }
1878
1879 static int
1880 tul_msgin(sc)
1881 struct iha_softc *sc;
1882 {
1883 bus_space_tag_t iot = sc->sc_iot;
1884 bus_space_handle_t ioh = sc->sc_ioh;
1885 int flags;
1886 int phase;
1887 u_int8_t msg;
1888
1889 for (;;) {
1890 if ((bus_space_read_1(iot, ioh, TUL_SFIFOCNT) & FIFOC) > 0)
1891 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1892
1893 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1894
1895 phase = tul_wait(sc, XF_FIFO_IN);
1896 msg = bus_space_read_1(iot, ioh, TUL_SFIFO);
1897
1898 switch (msg) {
1899 case MSG_DISCONNECT:
1900 sc->sc_flags |= FLAG_EXPECT_DISC;
1901 if (tul_wait(sc, MSG_ACCEPT) != -1)
1902 tul_bad_seq(sc);
1903 phase = -1;
1904 break;
1905 case MSG_SAVEDATAPOINTER:
1906 case MSG_RESTOREPOINTERS:
1907 case MSG_NOOP:
1908 phase = tul_wait(sc, MSG_ACCEPT);
1909 break;
1910 case MSG_MESSAGE_REJECT:
1911 /* XXX - need to clear FIFO like other 'Clear ATN'?*/
1912 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1913 flags = sc->sc_actscb->tcs->flags;
1914 if ((flags & FLAG_NO_NEG_SYNC) == 0)
1915 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
1916 phase = tul_wait(sc, MSG_ACCEPT);
1917 break;
1918 case MSG_EXTENDED:
1919 phase = tul_msgin_extended(sc);
1920 break;
1921 case MSG_IGN_WIDE_RESIDUE:
1922 phase = tul_msgin_ignore_wid_resid(sc);
1923 break;
1924 case MSG_CMDCOMPLETE:
1925 sc->sc_flags |= FLAG_EXPECT_DONE_DISC;
1926 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1927 phase = tul_wait(sc, MSG_ACCEPT);
1928 if (phase != -1) {
1929 tul_bad_seq(sc);
1930 return (-1);
1931 }
1932 break;
1933 default:
1934 printf("[debug] tul_msgin: bad msg type: %d\n", msg);
1935 phase = tul_msgout_reject(sc);
1936 break;
1937 }
1938
1939 if (phase != PHASE_MSG_IN)
1940 return (phase);
1941 }
1942 /* NOTREACHED */
1943 }
1944
1945 static int
1946 tul_msgin_ignore_wid_resid(sc)
1947 struct iha_softc *sc;
1948 {
1949 bus_space_tag_t iot = sc->sc_iot;
1950 bus_space_handle_t ioh = sc->sc_ioh;
1951 int phase;
1952
1953 phase = tul_wait(sc, MSG_ACCEPT);
1954
1955 if (phase == PHASE_MSG_IN) {
1956 phase = tul_wait(sc, XF_FIFO_IN);
1957
1958 if (phase != -1) {
1959 bus_space_write_1(iot, ioh, TUL_SFIFO, 0);
1960 bus_space_read_1(iot, ioh, TUL_SFIFO);
1961 bus_space_read_1(iot, ioh, TUL_SFIFO);
1962
1963 phase = tul_wait(sc, MSG_ACCEPT);
1964 }
1965 }
1966
1967 return (phase);
1968 }
1969
1970 static int
1971 tul_msgin_extended(sc)
1972 struct iha_softc *sc;
1973 {
1974 bus_space_tag_t iot = sc->sc_iot;
1975 bus_space_handle_t ioh = sc->sc_ioh;
1976 int flags, i, phase, msglen, msgcode;
1977
1978 /*
1979 * XXX - can we just stop reading and reject, or do we have to
1980 * read all input, discarding the excess, and then reject
1981 */
1982 for (i = 0; i < IHA_MAX_EXTENDED_MSG; i++) {
1983 phase = tul_wait(sc, MSG_ACCEPT);
1984
1985 if (phase != PHASE_MSG_IN)
1986 return (phase);
1987
1988 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1989
1990 if (tul_wait(sc, XF_FIFO_IN) == -1)
1991 return (-1);
1992
1993 sc->sc_msg[i] = bus_space_read_1(iot, ioh, TUL_SFIFO);
1994
1995 if (sc->sc_msg[0] == i)
1996 break;
1997 }
1998
1999 msglen = sc->sc_msg[0];
2000 msgcode = sc->sc_msg[1];
2001
2002 if ((msglen == MSG_EXT_SDTR_LEN) && (msgcode == MSG_EXT_SDTR)) {
2003 if (tul_msgin_sdtr(sc) == 0) {
2004 tul_sync_done(sc);
2005 return (tul_wait(sc, MSG_ACCEPT));
2006 }
2007
2008 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2009
2010 phase = tul_wait(sc, MSG_ACCEPT);
2011 if (phase != PHASE_MSG_OUT)
2012 return (phase);
2013
2014 /* Clear FIFO for important message - final SYNC offer */
2015 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2016
2017 tul_sync_done(sc); /* This is our final offer */
2018
2019 } else if ((msglen == MSG_EXT_WDTR_LEN) && (msgcode == MSG_EXT_WDTR)) {
2020
2021 flags = sc->sc_actscb->tcs->flags;
2022
2023 if ((flags & FLAG_NO_WIDE) != 0)
2024 sc->sc_msg[2] = 0; /* Offer async xfers only */
2025
2026 else if (sc->sc_msg[2] > 2) /* BAD MSG: 2 is max value */
2027 return (tul_msgout_reject(sc));
2028
2029 else if (sc->sc_msg[2] == 2) /* a request for 32 bit xfers*/
2030 sc->sc_msg[2] = 1; /* Offer 16 instead */
2031
2032 else {
2033 tul_wide_done(sc);
2034 if ((flags & FLAG_NO_NEG_SYNC) == 0)
2035 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2036 return (tul_wait(sc, MSG_ACCEPT));
2037 }
2038
2039 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2040
2041 phase = tul_wait(sc, MSG_ACCEPT);
2042 if (phase != PHASE_MSG_OUT)
2043 return (phase);
2044 } else
2045 return (tul_msgout_reject(sc));
2046
2047 return (tul_msgout_extended(sc));
2048 }
2049
2050 /*
2051 * tul_msgin_sdtr - check SDTR msg in sc_msg. If the offer is
2052 * acceptable leave sc_msg as is and return 0.
2053 * If the negotiation must continue, modify sc_msg
2054 * as needed and return 1. Else return 0.
2055 */
2056 static int
2057 tul_msgin_sdtr(sc)
2058 struct iha_softc *sc;
2059 {
2060 int flags;
2061 int newoffer;
2062 u_int8_t default_period;
2063
2064 flags = sc->sc_actscb->tcs->flags;
2065
2066 default_period = tul_rate_tbl[flags & FLAG_SCSI_RATE];
2067
2068 if (sc->sc_msg[3] == 0) /* target offered async only. Accept it. */
2069 return (0);
2070
2071 newoffer = 0;
2072
2073 if ((flags & FLAG_NO_SYNC) != 0) {
2074 sc->sc_msg[3] = 0;
2075 newoffer = 1;
2076 }
2077
2078 if (sc->sc_msg[3] > IHA_MAX_OFFSET) {
2079 sc->sc_msg[3] = IHA_MAX_OFFSET;
2080 newoffer = 1;
2081 }
2082
2083 if (sc->sc_msg[2] < default_period) {
2084 sc->sc_msg[2] = default_period;
2085 newoffer = 1;
2086 }
2087
2088 if (sc->sc_msg[2] >= 59) { /* XXX magic */
2089 sc->sc_msg[3] = 0;
2090 newoffer = 1;
2091 }
2092
2093 return (newoffer);
2094 }
2095
2096 static int
2097 tul_msgout(sc, msg)
2098 struct iha_softc *sc;
2099 u_int8_t msg;
2100 {
2101
2102 bus_space_write_1(sc->sc_iot, sc->sc_ioh, TUL_SFIFO, msg);
2103
2104 return (tul_wait(sc, XF_FIFO_OUT));
2105 }
2106
2107 static void
2108 tul_msgout_abort(sc, aborttype)
2109 struct iha_softc *sc;
2110 u_int8_t aborttype;
2111 {
2112
2113 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2114
2115 switch (tul_wait(sc, MSG_ACCEPT)) {
2116 case -1:
2117 break;
2118
2119 case PHASE_MSG_OUT:
2120 sc->sc_flags |= FLAG_EXPECT_DISC;
2121 if (tul_msgout(sc, aborttype) != -1)
2122 tul_bad_seq(sc);
2123 break;
2124
2125 default:
2126 tul_bad_seq(sc);
2127 break;
2128 }
2129 }
2130
2131 static int
2132 tul_msgout_reject(sc)
2133 struct iha_softc *sc;
2134 {
2135
2136 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2137
2138 if (tul_wait(sc, MSG_ACCEPT) == PHASE_MSG_OUT)
2139 return (tul_msgout(sc, MSG_MESSAGE_REJECT));
2140
2141 return (-1);
2142 }
2143
2144 static int
2145 tul_msgout_extended(sc)
2146 struct iha_softc *sc;
2147 {
2148 bus_space_tag_t iot = sc->sc_iot;
2149 bus_space_handle_t ioh = sc->sc_ioh;
2150 int phase;
2151
2152 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED);
2153
2154 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
2155 sc->sc_msg, sc->sc_msg[0] + 1);
2156
2157 phase = tul_wait(sc, XF_FIFO_OUT);
2158
2159 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2160 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
2161
2162 return (phase);
2163 }
2164
2165 static int
2166 tul_msgout_wdtr(sc)
2167 struct iha_softc *sc;
2168 {
2169
2170 sc->sc_actscb->tcs->flags |= FLAG_WIDE_DONE;
2171
2172 sc->sc_msg[0] = MSG_EXT_WDTR_LEN;
2173 sc->sc_msg[1] = MSG_EXT_WDTR;
2174 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT;
2175
2176 return (tul_msgout_extended(sc));
2177 }
2178
2179 static int
2180 tul_msgout_sdtr(sc)
2181 struct iha_softc *sc;
2182 {
2183 int rateindex;
2184
2185 rateindex = sc->sc_actscb->tcs->flags & FLAG_SCSI_RATE;
2186
2187 sc->sc_msg[0] = MSG_EXT_SDTR_LEN;
2188 sc->sc_msg[1] = MSG_EXT_SDTR;
2189 sc->sc_msg[2] = tul_rate_tbl[rateindex];
2190 sc->sc_msg[3] = IHA_MAX_OFFSET; /* REQ/ACK */
2191
2192 return (tul_msgout_extended(sc));
2193 }
2194
2195 static void
2196 tul_wide_done(sc)
2197 struct iha_softc *sc;
2198 {
2199 bus_space_tag_t iot = sc->sc_iot;
2200 bus_space_handle_t ioh = sc->sc_ioh;
2201 struct tcs *tcs = sc->sc_actscb->tcs;
2202
2203 tcs->syncm = 0;
2204 tcs->period = 0;
2205 tcs->offset = 0;
2206
2207 if (sc->sc_msg[2] != 0)
2208 tcs->syncm |= PERIOD_WIDE_SCSI;
2209
2210 tcs->sconfig0 &= ~ALTPD;
2211 tcs->flags &= ~FLAG_SYNC_DONE;
2212 tcs->flags |= FLAG_WIDE_DONE;
2213
2214 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2215 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2216 }
2217
2218 static void
2219 tul_sync_done(sc)
2220 struct iha_softc *sc;
2221 {
2222 bus_space_tag_t iot = sc->sc_iot;
2223 bus_space_handle_t ioh = sc->sc_ioh;
2224 struct tcs *tcs = sc->sc_actscb->tcs;
2225 int i;
2226
2227 if ((tcs->flags & FLAG_SYNC_DONE) == 0) {
2228 tcs->period = sc->sc_msg[2];
2229 tcs->offset = sc->sc_msg[3];
2230 if (tcs->offset != 0) {
2231 tcs->syncm |= tcs->offset;
2232
2233 /* pick the highest possible rate */
2234 for (i = 0; i < 8; i++)
2235 if (tul_rate_tbl[i] >= tcs->period)
2236 break;
2237
2238 tcs->syncm |= (i << 4);
2239 tcs->sconfig0 |= ALTPD;
2240 }
2241
2242 tcs->flags |= FLAG_SYNC_DONE;
2243
2244 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2245 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2246 }
2247 }
2248
2249 void
2250 tul_reset_chip(sc)
2251 struct iha_softc *sc;
2252 {
2253 bus_space_tag_t iot = sc->sc_iot;
2254 bus_space_handle_t ioh = sc->sc_ioh;
2255
2256 /* reset tulip chip */
2257
2258 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSCSI);
2259
2260 do {
2261 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
2262 } while ((sc->sc_sistat & SRSTD) == 0);
2263
2264 tul_set_ssig(sc, 0, 0);
2265
2266 bus_space_read_1(iot, ioh, TUL_SISTAT); /* Clear any active interrupt*/
2267 }
2268
2269 static void
2270 tul_select(sc, scb, select_type)
2271 struct iha_softc *sc;
2272 struct iha_scsi_req_q *scb;
2273 u_int8_t select_type;
2274 {
2275 bus_space_tag_t iot = sc->sc_iot;
2276 bus_space_handle_t ioh = sc->sc_ioh;
2277
2278 switch (select_type) {
2279 case SEL_ATN:
2280 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
2281 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
2282 scb->cmd, scb->cmdlen);
2283
2284 scb->nextstat = 2;
2285 break;
2286
2287 case SELATNSTOP:
2288 scb->nextstat = 1;
2289 break;
2290
2291 case SEL_ATN3:
2292 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
2293 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagmsg);
2294 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagid);
2295
2296 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, scb->cmd,
2297 scb->cmdlen);
2298
2299 scb->nextstat = 2;
2300 break;
2301
2302 default:
2303 printf("[debug] tul_select() - unknown select type = 0x%02x\n",
2304 select_type);
2305 return;
2306 }
2307
2308 tul_del_pend_scb(sc, scb);
2309 scb->status = STATUS_SELECT;
2310
2311 sc->sc_actscb = scb;
2312
2313 bus_space_write_1(iot, ioh, TUL_SCMD, select_type);
2314 }
2315
2316 /*
2317 * tul_wait - wait for an interrupt to service or a SCSI bus phase change
2318 * after writing the supplied command to the tulip chip. If
2319 * the command is NO_OP, skip the command writing.
2320 */
2321 static int
2322 tul_wait(sc, cmd)
2323 struct iha_softc *sc;
2324 u_int8_t cmd;
2325 {
2326 bus_space_tag_t iot = sc->sc_iot;
2327 bus_space_handle_t ioh = sc->sc_ioh;
2328
2329 if (cmd != NO_OP)
2330 bus_space_write_1(iot, ioh, TUL_SCMD, cmd);
2331
2332 /*
2333 * Have to do this here, in addition to in iha_isr, because
2334 * interrupts might be turned off when we get here.
2335 */
2336 do {
2337 sc->sc_status0 = bus_space_read_1(iot, ioh, TUL_STAT0);
2338 } while ((sc->sc_status0 & INTPD) == 0);
2339
2340 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1);
2341 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
2342
2343 sc->sc_phase = sc->sc_status0 & PH_MASK;
2344
2345 if ((sc->sc_sistat & SRSTD) != 0) {
2346 /* SCSI bus reset interrupt */
2347 tul_reset_scsi_bus(sc);
2348 return (-1);
2349 }
2350
2351 if ((sc->sc_sistat & RSELED) != 0)
2352 /* Reselection interrupt */
2353 return (tul_resel(sc));
2354
2355 if ((sc->sc_sistat & STIMEO) != 0) {
2356 /* selected/reselected timeout interrupt */
2357 tul_busfree(sc);
2358 return (-1);
2359 }
2360
2361 if ((sc->sc_sistat & DISCD) != 0) {
2362 /* BUS disconnection interrupt */
2363 if ((sc->sc_flags & FLAG_EXPECT_DONE_DISC) != 0) {
2364 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2365 bus_space_write_1(iot, ioh, TUL_SCONFIG0,
2366 SCONFIG0DEFAULT);
2367 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
2368 tul_append_done_scb(sc, sc->sc_actscb, HOST_OK);
2369 sc->sc_flags &= ~FLAG_EXPECT_DONE_DISC;
2370
2371 } else if ((sc->sc_flags & FLAG_EXPECT_DISC) != 0) {
2372 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2373 bus_space_write_1(iot, ioh, TUL_SCONFIG0,
2374 SCONFIG0DEFAULT);
2375 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
2376 sc->sc_actscb = NULL;
2377 sc->sc_flags &= ~FLAG_EXPECT_DISC;
2378
2379 } else
2380 tul_busfree(sc);
2381
2382 return (-1);
2383 }
2384
2385 return (sc->sc_phase);
2386 }
2387
2388 /*
2389 * tul_done_scb - We have a scb which has been processed by the
2390 * adaptor, now we look to see how the operation went.
2391 */
2392 static void
2393 tul_done_scb(sc, scb)
2394 struct iha_softc *sc;
2395 struct iha_scsi_req_q *scb;
2396 {
2397 struct scsipi_xfer *xs = scb->xs;
2398
2399 if (xs != NULL) {
2400 /* Cancel the timeout. */
2401 callout_stop(&xs->xs_callout);
2402
2403 if (xs->datalen > 0) {
2404 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
2405 0, scb->dmap->dm_mapsize,
2406 (xs->xs_control & XS_CTL_DATA_IN) ?
2407 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2408 bus_dmamap_unload(sc->sc_dmat, scb->dmap);
2409 }
2410
2411 xs->status = scb->ta_stat;
2412
2413 switch (scb->ha_stat) {
2414 case HOST_OK:
2415 switch (scb->ta_stat) {
2416 case SCSI_OK:
2417 case SCSI_CONDITION_MET:
2418 case SCSI_INTERM:
2419 case SCSI_INTERM_COND_MET:
2420 xs->resid = scb->buflen;
2421 xs->error = XS_NOERROR;
2422 if ((scb->flags & FLAG_RSENS) != 0)
2423 xs->error = XS_SENSE;
2424 break;
2425
2426 case SCSI_RESV_CONFLICT:
2427 case SCSI_BUSY:
2428 case SCSI_QUEUE_FULL:
2429 xs->error = XS_BUSY;
2430 break;
2431
2432 case SCSI_TERMINATED:
2433 case SCSI_ACA_ACTIVE:
2434 case SCSI_CHECK:
2435 scb->tcs->flags &=
2436 ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE);
2437
2438 if ((scb->flags & FLAG_RSENS) != 0 ||
2439 tul_push_sense_request(sc, scb) != 0) {
2440 scb->flags &= FLAG_RSENS;
2441 printf("%s: request sense failed\n",
2442 sc->sc_dev.dv_xname);
2443 xs->error = XS_DRIVER_STUFFUP;
2444 break;
2445 }
2446
2447 xs->error = XS_SENSE;
2448 return;
2449
2450 default:
2451 xs->error = XS_DRIVER_STUFFUP;
2452 break;
2453 }
2454 break;
2455
2456 case HOST_SEL_TOUT:
2457 xs->error = XS_SELTIMEOUT;
2458 break;
2459
2460 case HOST_SCSI_RST:
2461 case HOST_DEV_RST:
2462 xs->error = XS_RESET;
2463 break;
2464
2465 case HOST_SPERR:
2466 printf("%s: SCSI Parity error detected\n",
2467 sc->sc_dev.dv_xname);
2468 xs->error = XS_DRIVER_STUFFUP;
2469 break;
2470
2471 case HOST_TIMED_OUT:
2472 xs->error = XS_TIMEOUT;
2473 break;
2474
2475 case HOST_DO_DU:
2476 case HOST_BAD_PHAS:
2477 default:
2478 xs->error = XS_DRIVER_STUFFUP;
2479 break;
2480 }
2481
2482 scsipi_done(xs);
2483 }
2484
2485 tul_append_free_scb(sc, scb);
2486 }
2487
2488 static void
2489 tul_timeout(arg)
2490 void *arg;
2491 {
2492 struct iha_scsi_req_q *scb = (struct iha_scsi_req_q *)arg;
2493 struct scsipi_xfer *xs = scb->xs;
2494 struct scsipi_periph *periph = xs->xs_periph;
2495 struct iha_softc *sc;
2496
2497 sc = (void *)periph->periph_channel->chan_adapter->adapt_dev;
2498
2499 if (xs == NULL)
2500 printf("[debug] tul_timeout called with xs == NULL\n");
2501
2502 else {
2503 scsipi_printaddr(periph);
2504 printf("SCSI OpCode 0x%02x timed out\n", xs->cmd->opcode);
2505
2506 tul_abort_xs(sc, xs, HOST_TIMED_OUT);
2507 }
2508 }
2509
2510 static void
2511 tul_exec_scb(sc, scb)
2512 struct iha_softc *sc;
2513 struct iha_scsi_req_q *scb;
2514 {
2515 bus_space_tag_t iot;
2516 bus_space_handle_t ioh;
2517 bus_dmamap_t dm;
2518 struct scsipi_xfer *xs = scb->xs;
2519 int nseg, s;
2520
2521 dm = scb->dmap;
2522 nseg = dm->dm_nsegs;
2523
2524 if (nseg > 1) {
2525 struct iha_sg_element *sg = scb->sglist;
2526 int i;
2527
2528 for (i = 0; i < nseg; i++) {
2529 sg[i].sg_len = htole32(dm->dm_segs[i].ds_len);
2530 sg[i].sg_addr = htole32(dm->dm_segs[i].ds_addr);
2531 }
2532 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2533 scb->sgoffset, IHA_SG_SIZE,
2534 BUS_DMASYNC_PREWRITE);
2535
2536 scb->flags |= FLAG_SG; /* XXX */
2537 scb->sg_size = scb->sg_max = nseg;
2538
2539 scb->bufaddr = scb->sg_addr;
2540 } else
2541 scb->bufaddr = dm->dm_segs[0].ds_addr;
2542
2543 if ((xs->xs_control & XS_CTL_POLL) == 0) {
2544 int timeout = xs->timeout;
2545 timeout = (timeout > 100000) ?
2546 timeout / 1000 * hz : timeout * hz / 1000;
2547 if (timeout == 0)
2548 timeout = 1;
2549 callout_reset(&xs->xs_callout, timeout, tul_timeout, scb);
2550 }
2551
2552 s = splbio();
2553
2554 if (((scb->flags & XS_RESET) != 0) || (scb->cmd[0] == REQUEST_SENSE))
2555 tul_push_pend_scb(sc, scb); /* Insert SCB at head of Pend */
2556 else
2557 tul_append_pend_scb(sc, scb); /* Append SCB to tail of Pend */
2558
2559 /*
2560 * Run through tul_main() to ensure something is active, if
2561 * only this new SCB.
2562 */
2563 if (sc->sc_semaph != SEMAPH_IN_MAIN) {
2564 iot = sc->sc_iot;
2565 ioh = sc->sc_ioh;
2566
2567 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
2568 sc->sc_semaph = SEMAPH_IN_MAIN;;
2569
2570 splx(s);
2571 tul_main(sc);
2572 s = splbio();
2573
2574 sc->sc_semaph = ~SEMAPH_IN_MAIN;;
2575 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP));
2576 }
2577
2578 splx(s);
2579 }
2580
2581
2582 /*
2583 * tul_set_ssig - read the current scsi signal mask, then write a new
2584 * one which turns off/on the specified signals.
2585 */
2586 static void
2587 tul_set_ssig(sc, offsigs, onsigs)
2588 struct iha_softc *sc;
2589 u_int8_t offsigs, onsigs;
2590 {
2591 bus_space_tag_t iot = sc->sc_iot;
2592 bus_space_handle_t ioh = sc->sc_ioh;
2593 u_int8_t currsigs;
2594
2595 currsigs = bus_space_read_1(iot, ioh, TUL_SSIGI);
2596 bus_space_write_1(iot, ioh, TUL_SSIGO, (currsigs & ~offsigs) | onsigs);
2597 }
2598
2599 /*
2600 * tul_alloc_sglist - allocate and map sglist for SCB's
2601 */
2602 static int
2603 tul_alloc_sglist(sc)
2604 struct iha_softc *sc;
2605 {
2606 bus_dma_segment_t seg;
2607 int error, rseg;
2608
2609 /*
2610 * Allocate dma-safe memory for the SCB's sglist
2611 */
2612 if ((error = bus_dmamem_alloc(sc->sc_dmat,
2613 IHA_SG_SIZE * IHA_MAX_SCB,
2614 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2615 printf(": unable to allocate sglist, error = %d\n", error);
2616 return (error);
2617 }
2618 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
2619 IHA_SG_SIZE * IHA_MAX_SCB, (caddr_t *)&sc->sc_sglist,
2620 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2621 printf(": unable to map sglist, error = %d\n", error);
2622 return (error);
2623 }
2624
2625 /*
2626 * Create and load the DMA map used for the SCBs
2627 */
2628 if ((error = bus_dmamap_create(sc->sc_dmat,
2629 IHA_SG_SIZE * IHA_MAX_SCB, 1, IHA_SG_SIZE * IHA_MAX_SCB,
2630 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
2631 printf(": unable to create control DMA map, error = %d\n",
2632 error);
2633 return (error);
2634 }
2635 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
2636 sc->sc_sglist, IHA_SG_SIZE * IHA_MAX_SCB,
2637 NULL, BUS_DMA_NOWAIT)) != 0) {
2638 printf(": unable to load control DMA map, error = %d\n", error);
2639 return (error);
2640 }
2641
2642 memset(sc->sc_sglist, 0, IHA_SG_SIZE * IHA_MAX_SCB);
2643
2644 return (0);
2645 }
2646
2647 /*
2648 * tul_read_eeprom - read Serial EEPROM value & set to defaults
2649 * if required. XXX - Writing does NOT work!
2650 */
2651 void
2652 tul_read_eeprom(sc, eeprom)
2653 struct iha_softc *sc;
2654 struct iha_eeprom *eeprom;
2655 {
2656 bus_space_tag_t iot = sc->sc_iot;
2657 bus_space_handle_t ioh = sc->sc_ioh;
2658 u_int16_t *buf = (u_int16_t *)eeprom;
2659 u_int8_t gctrl;
2660
2661 /*------Enable EEProm programming ---*/
2662 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) | EEPRG;
2663 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl);
2664
2665 /*------ Program default pattern ----*/
2666 if (tul_se2_rd_all(sc, buf) == 0) {
2667 tul_se2_update_all(sc);
2668 if(tul_se2_rd_all(sc, buf) == 0)
2669 panic("could not program iha Tulip EEPROM\n");
2670 }
2671
2672 /*------ Disable EEProm programming ---*/
2673 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) & ~EEPRG;
2674 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl);
2675 }
2676
2677 /*
2678 * tul_se2_update_all - Update SCSI H/A configuration parameters from
2679 * serial EEPROM Setup default pattern. Only
2680 * change those values different from the values
2681 * in tul_nvram.
2682 */
2683 void
2684 tul_se2_update_all(sc)
2685 struct iha_softc *sc;
2686 {
2687 bus_space_tag_t iot = sc->sc_iot;
2688 bus_space_handle_t ioh = sc->sc_ioh;
2689 u_int16_t *np;
2690 u_int32_t chksum;
2691 int i;
2692
2693 /* Enable erase/write state of EEPROM */
2694 tul_se2_instr(sc, ENABLE_ERASE);
2695 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2696 EEP_WAIT();
2697
2698 np = (u_int16_t *)&eeprom_default;
2699
2700 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) {
2701 tul_se2_wr(sc, i, *np);
2702 chksum += *np++;
2703 }
2704
2705 chksum &= 0x0000ffff;
2706 tul_se2_wr(sc, 31, chksum);
2707
2708 /* Disable erase/write state of EEPROM */
2709 tul_se2_instr(sc, 0);
2710 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2711 EEP_WAIT();
2712 }
2713
2714 /*
2715 * tul_se2_wr - write the given 16 bit value into the Serial EEPROM
2716 * at the specified offset
2717 */
2718 void
2719 tul_se2_wr(sc, addr, writeword)
2720 struct iha_softc *sc;
2721 int addr;
2722 u_int16_t writeword;
2723 {
2724 bus_space_tag_t iot = sc->sc_iot;
2725 bus_space_handle_t ioh = sc->sc_ioh;
2726 int i, bit;
2727
2728 /* send 'WRITE' Instruction == address | WRITE bit */
2729 tul_se2_instr(sc, addr | WRITE);
2730
2731 for (i = 16; i > 0; i--) {
2732 if (writeword & (1 << (i - 1)))
2733 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRDO);
2734 else
2735 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2736 EEP_WAIT();
2737 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2738 EEP_WAIT();
2739 }
2740
2741 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2742 EEP_WAIT();
2743 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2744 EEP_WAIT();
2745 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2746 EEP_WAIT();
2747
2748 for (;;) {
2749 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2750 EEP_WAIT();
2751 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2752 EEP_WAIT();
2753 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI;
2754 EEP_WAIT();
2755 if (bit != 0)
2756 break; /* write complete */
2757 }
2758
2759 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2760 }
2761
2762 /*
2763 * tul_se2_rd - read & return the 16 bit value at the specified
2764 * offset in the Serial E2PROM
2765 *
2766 */
2767 u_int16_t
2768 tul_se2_rd(sc, addr)
2769 struct iha_softc *sc;
2770 int addr;
2771 {
2772 bus_space_tag_t iot = sc->sc_iot;
2773 bus_space_handle_t ioh = sc->sc_ioh;
2774 int i, bit;
2775 u_int16_t readword;
2776
2777 /* Send 'READ' instruction == address | READ bit */
2778 tul_se2_instr(sc, addr | READ);
2779
2780 readword = 0;
2781 for (i = 16; i > 0; i--) {
2782 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2783 EEP_WAIT();
2784 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2785 EEP_WAIT();
2786 /* sample data after the following edge of clock */
2787 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI ? 1 : 0;
2788 EEP_WAIT();
2789
2790 readword |= bit << (i - 1);
2791 }
2792
2793 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2794
2795 return (readword);
2796 }
2797
2798 /*
2799 * tul_se2_rd_all - Read SCSI H/A config parameters from serial EEPROM
2800 */
2801 int
2802 tul_se2_rd_all(sc, buf)
2803 struct iha_softc *sc;
2804 u_int16_t *buf;
2805 {
2806 struct iha_eeprom *eeprom = (struct iha_eeprom *)buf;
2807 u_int32_t chksum;
2808 int i;
2809
2810 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) {
2811 *buf = tul_se2_rd(sc, i);
2812 chksum += *buf++;
2813 }
2814 *buf = tul_se2_rd(sc, 31); /* read checksum from EEPROM */
2815
2816 chksum &= 0x0000ffff; /* lower 16 bits */
2817
2818 return (eeprom->signature == EEP_SIGNATURE) &&
2819 (eeprom->checksum == chksum);
2820 }
2821
2822 /*
2823 * tul_se2_instr - write an octet to serial E2PROM one bit at a time
2824 */
2825 void
2826 tul_se2_instr(sc, instr)
2827 struct iha_softc *sc;
2828 int instr;
2829 {
2830 bus_space_tag_t iot = sc->sc_iot;
2831 bus_space_handle_t ioh = sc->sc_ioh;
2832 int b, i;
2833
2834 b = NVRCS | NVRDO; /* Write the start bit (== 1) */
2835
2836 bus_space_write_1(iot, ioh, TUL_NVRAM, b);
2837 EEP_WAIT();
2838 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK);
2839 EEP_WAIT();
2840
2841 for (i = 8; i > 0; i--) {
2842 if (instr & (1 << (i - 1)))
2843 b = NVRCS | NVRDO; /* Write a 1 bit */
2844 else
2845 b = NVRCS; /* Write a 0 bit */
2846
2847 bus_space_write_1(iot, ioh, TUL_NVRAM, b);
2848 EEP_WAIT();
2849 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK);
2850 EEP_WAIT();
2851 }
2852
2853 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2854 }
2855
2856 /*
2857 * tul_reset_tcs - reset the target control structure pointed
2858 * to by tcs to default values. tcs flags
2859 * only has the negotiation done bits reset as
2860 * the other bits are fixed at initialization.
2861 */
2862 void
2863 tul_reset_tcs(tcs, config0)
2864 struct tcs *tcs;
2865 u_int8_t config0;
2866 {
2867
2868 tcs->flags &= ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE);
2869 tcs->period = 0;
2870 tcs->offset = 0;
2871 tcs->tagcnt = 0;
2872 tcs->ntagscb = NULL;
2873 tcs->syncm = 0;
2874 tcs->sconfig0 = config0;
2875 }
2876