iha.c revision 1.5 1 /* $NetBSD: iha.c,v 1.5 2001/07/19 16:25:25 thorpej Exp $ */
2 /*
3 * Initio INI-9xxxU/UW SCSI Device Driver
4 *
5 * Copyright (c) 2000 Ken Westerback
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 *-------------------------------------------------------------------------
30 *
31 * Ported from i91u.c, provided by Initio Corporation, which credits:
32 *
33 * Device driver for the INI-9XXXU/UW or INIC-940/950 PCI SCSI Controller.
34 *
35 * FreeBSD
36 *
37 * Written for 386bsd and FreeBSD by
38 * Winston Hung <winstonh (at) initio.com>
39 *
40 * Copyright (c) 1997-99 Initio Corp. All rights reserved.
41 *
42 *-------------------------------------------------------------------------
43 */
44
45 /*
46 * Ported to NetBSD by Izumi Tsutsui <tsutsui (at) ceres.dti.ne.jp> from OpenBSD:
47 * $OpenBSD: iha.c,v 1.3 2001/02/20 00:47:33 krw Exp $
48 */
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/buf.h>
54 #include <sys/device.h>
55 #include <sys/malloc.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <machine/bus.h>
60 #include <machine/intr.h>
61
62 #include <dev/scsipi/scsi_all.h>
63 #include <dev/scsipi/scsipi_all.h>
64 #include <dev/scsipi/scsiconf.h>
65 #include <dev/scsipi/scsi_message.h>
66
67 #include <dev/ic/ihareg.h>
68 #include <dev/ic/ihavar.h>
69
70 /*
71 * SCSI Rate Table, indexed by FLAG_SCSI_RATE field of
72 * tcs flags.
73 */
74 static u_int8_t tul_rate_tbl[8] = {
75 /* fast 20 */
76 /* nanosecond divide by 4 */
77 12, /* 50ns, 20M */
78 18, /* 75ns, 13.3M */
79 25, /* 100ns, 10M */
80 31, /* 125ns, 8M */
81 37, /* 150ns, 6.6M */
82 43, /* 175ns, 5.7M */
83 50, /* 200ns, 5M */
84 62 /* 250ns, 4M */
85 };
86
87 static u_int16_t eeprom_default[EEPROM_SIZE] = {
88 /* -- Header ------------------------------------ */
89 /* signature */
90 EEP_SIGNATURE,
91 /* size, revision */
92 EEP_WORD(EEPROM_SIZE * 2, 0x01),
93 /* -- Host Adapter Structure -------------------- */
94 /* model */
95 0x0095,
96 /* model info, number of channel */
97 EEP_WORD(0x00, 1),
98 /* BIOS config */
99 EEP_BIOSCFG_DEFAULT,
100 /* host adapter config */
101 0,
102
103 /* -- eeprom_adapter[0] ------------------------------- */
104 /* ID, adapter config 1 */
105 EEP_WORD(7, CFG_DEFAULT),
106 /* adapter config 2, number of targets */
107 EEP_WORD(0x00, 8),
108 /* target flags */
109 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
110 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
111 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
112 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
113 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
114 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
115 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
116 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
117
118 /* -- eeprom_adapter[1] ------------------------------- */
119 /* ID, adapter config 1 */
120 EEP_WORD(7, CFG_DEFAULT),
121 /* adapter config 2, number of targets */
122 EEP_WORD(0x00, 8),
123 /* target flags */
124 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
125 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
126 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
127 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
128 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
129 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
130 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
131 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT),
132 /* reserved[5] */
133 0, 0, 0, 0, 0,
134 /* checksum */
135 0
136 };
137
138 static u_int8_t tul_data_over_run(struct iha_scsi_req_q *);
139
140 static int tul_push_sense_request(struct iha_softc *, struct iha_scsi_req_q *);
141 static void tul_timeout(void *);
142 static int tul_alloc_sglist(struct iha_softc *);
143
144 static void tul_read_eeprom(struct iha_softc *, struct iha_eeprom *);
145 static void tul_se2_update_all(struct iha_softc *);
146 static int tul_se2_rd_all(struct iha_softc *, u_int16_t *);
147 static void tul_se2_wr(struct iha_softc *, int, u_int16_t);
148 static void tul_se2_instr(struct iha_softc *, int);
149 static u_int16_t tul_se2_rd(struct iha_softc *, int);
150
151 static void tul_reset_scsi_bus(struct iha_softc *);
152 static void tul_reset_chip(struct iha_softc *);
153 static void tul_reset_dma(struct iha_softc *);
154
155 static void tul_reset_tcs(struct tcs *, u_int8_t);
156
157 static void tul_done_scb(struct iha_softc *, struct iha_scsi_req_q *);
158 static void tul_exec_scb(struct iha_softc *, struct iha_scsi_req_q *);
159
160 static void tul_main(struct iha_softc *);
161 static void tul_scsi(struct iha_softc *);
162
163 static int tul_wait(struct iha_softc *, u_int8_t);
164
165 static __inline void tul_mark_busy_scb(struct iha_scsi_req_q *);
166
167 static void tul_append_free_scb(struct iha_softc *, struct iha_scsi_req_q *);
168 static void tul_append_done_scb(struct iha_softc *, struct iha_scsi_req_q *,
169 u_int8_t);
170 static __inline struct iha_scsi_req_q *tul_pop_done_scb(struct iha_softc *);
171
172 static __inline void tul_append_pend_scb(struct iha_softc *,
173 struct iha_scsi_req_q *);
174 static __inline void tul_push_pend_scb(struct iha_softc *,
175 struct iha_scsi_req_q *);
176 static __inline void tul_del_pend_scb(struct iha_softc *,
177 struct iha_scsi_req_q *);
178 static struct iha_scsi_req_q *tul_find_pend_scb(struct iha_softc *);
179
180 static void tul_sync_done(struct iha_softc *);
181 static void tul_wdtr_done(struct iha_softc *);
182 static void tul_bad_seq(struct iha_softc *);
183
184 static int tul_next_state(struct iha_softc *);
185 static int tul_state_1(struct iha_softc *);
186 static int tul_state_2(struct iha_softc *);
187 static int tul_state_3(struct iha_softc *);
188 static int tul_state_4(struct iha_softc *);
189 static int tul_state_5(struct iha_softc *);
190 static int tul_state_6(struct iha_softc *);
191 static int tul_state_8(struct iha_softc *);
192
193 static void tul_set_ssig(struct iha_softc *, u_int8_t, u_int8_t);
194
195 static int tul_xpad_in(struct iha_softc *);
196 static int tul_xpad_out(struct iha_softc *);
197
198 static int tul_xfer_data(struct iha_softc *, struct iha_scsi_req_q *,
199 int direction);
200
201 static int tul_status_msg(struct iha_softc *);
202
203 static int tul_msgin(struct iha_softc *);
204 static int tul_msgin_sync(struct iha_softc *);
205 static int tul_msgin_extend(struct iha_softc *);
206 static int tul_msgin_ignore_wid_resid(struct iha_softc *);
207
208 static int tul_msgout(struct iha_softc *, u_int8_t);
209 static void tul_msgout_abort(struct iha_softc *, u_int8_t);
210 static int tul_msgout_reject(struct iha_softc *);
211 static int tul_msgout_sync(struct iha_softc *);
212 static int tul_msgout_wide(struct iha_softc *);
213
214 static void tul_select(struct iha_softc *, struct iha_scsi_req_q *, u_int8_t);
215
216 static void tul_busfree(struct iha_softc *);
217 static int tul_resel(struct iha_softc *);
218
219 static void tul_abort_xs(struct iha_softc *, struct scsipi_xfer *, u_int8_t);
220
221 static void iha_minphys(struct buf *);
222 void iha_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
223 void *arg);
224
225 /*
226 * iha_intr - the interrupt service routine for the iha driver
227 */
228 int
229 iha_intr(arg)
230 void *arg;
231 {
232 bus_space_tag_t iot;
233 bus_space_handle_t ioh;
234 struct iha_softc *sc;
235 int s;
236
237 sc = (struct iha_softc *)arg;
238 iot = sc->sc_iot;
239 ioh = sc->sc_ioh;
240
241 if ((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0)
242 return (0);
243
244 s = splbio(); /* XXX - Or are interrupts off when ISR's are called? */
245
246 if (sc->sc_semaph != SEMAPH_IN_MAIN) {
247 /* XXX - need these inside a splbio()/splx()? */
248 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
249 sc->sc_semaph = SEMAPH_IN_MAIN;
250
251 tul_main(sc);
252
253 sc->sc_semaph = ~SEMAPH_IN_MAIN;
254 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP));
255 }
256
257 splx(s);
258
259 return (1);
260 }
261
262 void
263 iha_scsipi_request(chan, req, arg)
264 struct scsipi_channel *chan;
265 scsipi_adapter_req_t req;
266 void *arg;
267 {
268 struct scsipi_xfer *xs;
269 struct scsipi_periph *periph;
270 struct iha_scsi_req_q *scb;
271 struct iha_softc *sc;
272 int error, flags, s;
273
274 sc = (struct iha_softc *)chan->chan_adapter->adapt_dev;
275
276 switch (req) {
277 case ADAPTER_REQ_RUN_XFER:
278 xs = arg;
279 periph = xs->xs_periph;
280 flags = xs->xs_control;
281
282 if (xs->cmdlen > sizeof(struct scsi_generic) ||
283 periph->periph_target >= IHA_MAX_TARGETS) {
284 xs->error = XS_DRIVER_STUFFUP;
285 return;
286 }
287
288 s = splbio();
289 scb = TAILQ_FIRST(&sc->sc_freescb);
290 if (scb != NULL) {
291 scb->status = STATUS_RENT;
292 TAILQ_REMOVE(&sc->sc_freescb, scb, chain);
293 }
294 #ifdef DIAGNOSTIC
295 else {
296 scsipi_printaddr(periph);
297 printf("unable to allocate scb\n");
298 panic("iha_scsipi_request");
299 }
300 #endif
301 splx(s);
302
303 scb->target = periph->periph_target;
304 scb->lun = periph->periph_lun;
305 scb->tcs = &sc->sc_tcs[scb->target];
306 scb->flags = xs->xs_control; /* XXX */
307 scb->scb_id = MSG_IDENTIFY(periph->periph_lun,
308 (xs->xs_control & XS_CTL_REQSENSE) == 0);
309
310 scb->xs = xs;
311 scb->timeout = xs->timeout;
312 scb->cmdlen = xs->cmdlen;
313 memcpy(&scb->cmd, xs->cmd, xs->cmdlen);
314
315 scb->buflen = xs->datalen;
316
317 if (scb->buflen > 0) {
318 error = bus_dmamap_load(sc->sc_dmat, scb->dmap,
319 xs->data, scb->buflen, NULL,
320 ((xs->xs_control & XS_CTL_NOSLEEP) ?
321 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
322 BUS_DMA_STREAMING |
323 ((xs->xs_control & XS_CTL_DATA_IN) ?
324 BUS_DMA_READ : BUS_DMA_WRITE));
325
326 if (error) {
327 printf("%s: error %d loading dma map\n",
328 sc->sc_dev.dv_xname, error);
329 tul_append_free_scb(sc, scb);
330 xs->error = XS_DRIVER_STUFFUP;
331 scsipi_done(xs);
332 return;
333 }
334 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
335 0, scb->dmap->dm_mapsize,
336 (xs->xs_control & XS_CTL_DATA_IN) ?
337 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
338 }
339
340 tul_exec_scb(sc, scb);
341 return;
342
343 case ADAPTER_REQ_GROW_RESOURCES:
344 return; /* XXX */
345
346 case ADAPTER_REQ_SET_XFER_MODE:
347 return; /* XXX */
348 }
349 }
350
351 void
352 iha_attach(sc)
353 struct iha_softc *sc;
354 {
355 bus_space_tag_t iot = sc->sc_iot;
356 bus_space_handle_t ioh = sc->sc_ioh;
357 struct iha_scsi_req_q *scb;
358 struct iha_eeprom eeprom;
359 struct eeprom_adapter *conf;
360 int i, error, reg;
361
362 tul_read_eeprom(sc, &eeprom);
363
364 conf = &eeprom.adapter[0];
365
366 /*
367 * fill in the rest of the IHA_SOFTC fields
368 */
369 sc->sc_id = CFG_ID(conf->config1);
370 sc->sc_semaph = ~SEMAPH_IN_MAIN;
371 sc->sc_status0 = 0;
372 sc->sc_actscb = NULL;
373
374 TAILQ_INIT(&sc->sc_freescb);
375 TAILQ_INIT(&sc->sc_pendscb);
376 TAILQ_INIT(&sc->sc_donescb);
377 error = tul_alloc_sglist(sc);
378 if (error != 0) {
379 printf(": cannot allocate sglist\n");
380 return;
381 }
382
383 sc->sc_scb = malloc(sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB,
384 M_DEVBUF, M_NOWAIT);
385 if (sc->sc_scb == NULL) {
386 printf(": cannot allocate SCB\n");
387 return;
388 }
389 memset(sc->sc_scb, 0, sizeof(struct iha_scsi_req_q) * IHA_MAX_SCB);
390
391 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) {
392 scb->scb_tagid = i;
393 scb->sgoffset = IHA_SG_SIZE * i;
394 scb->sglist = &sc->sc_sglist[i].sg_element[0];
395 scb->sg_addr =
396 sc->sc_dmamap->dm_segs[0].ds_addr + scb->sgoffset;
397
398 error = bus_dmamap_create(sc->sc_dmat,
399 (IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE, IHA_MAX_SG_ENTRIES,
400 (IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE, 0,
401 BUS_DMA_NOWAIT, &scb->dmap);
402
403 if (error != 0) {
404 printf(": couldn't create SCB DMA map, error = %d\n",
405 error);
406 return;
407 }
408 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain);
409 }
410
411 /* Mask all the interrupts */
412 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
413
414 /* Stop any I/O and reset the scsi module */
415 tul_reset_dma(sc);
416 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSMOD);
417
418 /* Program HBA's SCSI ID */
419 bus_space_write_1(iot, ioh, TUL_SID, sc->sc_id << 4);
420
421 /*
422 * Configure the channel as requested by the NVRAM settings read
423 * by tul_read_eeprom() above.
424 */
425
426 sc->sc_sconf1 = SCONFIG0DEFAULT;
427 if ((conf->config1 & CFG_EN_PAR) != 0)
428 sc->sc_sconf1 |= SPCHK;
429 bus_space_write_1(iot, ioh, TUL_SCONFIG0, sc->sc_sconf1);
430
431 /* set selection time out 250 ms */
432 bus_space_write_1(iot, ioh, TUL_STIMO, STIMO_250MS);
433
434 /* Enable desired SCSI termination configuration read from eeprom */
435 reg = 0;
436 if (conf->config1 & CFG_ACT_TERM1)
437 reg |= ENTMW;
438 if (conf->config1 & CFG_ACT_TERM2)
439 reg |= ENTM;
440 bus_space_write_1(iot, ioh, TUL_DCTRL0, reg);
441
442 reg = bus_space_read_1(iot, ioh, TUL_GCTRL1) & ~ATDEN;
443 if (conf->config1 & CFG_AUTO_TERM)
444 reg |= ATDEN;
445 bus_space_write_1(iot, ioh, TUL_GCTRL1, reg);
446
447 for (i = 0; i < IHA_MAX_TARGETS / 2; i++) {
448 sc->sc_tcs[i * 2 ].flags = EEP_LBYTE(conf->tflags[i]);
449 sc->sc_tcs[i * 2 + 1].flags = EEP_HBYTE(conf->tflags[i]);
450 tul_reset_tcs(&sc->sc_tcs[i * 2 ], sc->sc_sconf1);
451 tul_reset_tcs(&sc->sc_tcs[i * 2 + 1], sc->sc_sconf1);
452 }
453
454 tul_reset_chip(sc);
455 bus_space_write_1(iot, ioh, TUL_SIEN, ALL_INTERRUPTS);
456
457 /*
458 * fill in the adapter.
459 */
460 sc->sc_adapter.adapt_dev = &sc->sc_dev;
461 sc->sc_adapter.adapt_nchannels = 1;
462 sc->sc_adapter.adapt_openings = IHA_MAX_SCB;
463 sc->sc_adapter.adapt_max_periph = IHA_MAX_SCB;
464 sc->sc_adapter.adapt_ioctl = NULL;
465 sc->sc_adapter.adapt_minphys = iha_minphys;
466 sc->sc_adapter.adapt_request = iha_scsipi_request;
467
468 /*
469 * fill in the channel.
470 */
471 sc->sc_channel.chan_adapter = &sc->sc_adapter;
472 sc->sc_channel.chan_bustype = &scsi_bustype;
473 sc->sc_channel.chan_channel = 0;
474 sc->sc_channel.chan_ntargets = CFG_TARGET(conf->config2);
475 sc->sc_channel.chan_nluns = 8;
476 sc->sc_channel.chan_id = sc->sc_id;
477
478 /*
479 * Now try to attach all the sub devices.
480 */
481 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
482 }
483
484 /*
485 * iha_minphys - reduce bp->b_bcount to something less than
486 * or equal to the largest I/O possible through
487 * the adapter. Called from higher layers
488 * via sc->sc_adapter.scsi_minphys.
489 */
490 static void
491 iha_minphys(bp)
492 struct buf *bp;
493 {
494 if (bp->b_bcount > ((IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE))
495 bp->b_bcount = ((IHA_MAX_SG_ENTRIES - 1) * PAGE_SIZE);
496
497 minphys(bp);
498 }
499
500 /*
501 * tul_reset_dma - abort any active DMA xfer, reset tulip FIFO.
502 */
503 static void
504 tul_reset_dma(sc)
505 struct iha_softc *sc;
506 {
507 bus_space_tag_t iot = sc->sc_iot;
508 bus_space_handle_t ioh = sc->sc_ioh;
509
510 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
511 /* if DMA xfer is pending, abort DMA xfer */
512 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR);
513 /* wait Abort DMA xfer done */
514 while ((bus_space_read_1(iot, ioh, TUL_ISTUS0) & DABT) == 0)
515 ;
516 }
517
518 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
519 }
520
521 /*
522 * tul_append_free_scb - append the supplied SCB to the tail of the
523 * sc_freescb queue after clearing and resetting
524 * everything possible.
525 */
526 static void
527 tul_append_free_scb(sc, scb)
528 struct iha_softc *sc;
529 struct iha_scsi_req_q *scb;
530 {
531 int s;
532
533 s = splbio();
534
535 if (scb == sc->sc_actscb)
536 sc->sc_actscb = NULL;
537
538 scb->status = STATUS_QUEUED;
539 scb->ha_stat = HOST_OK;
540 scb->ta_stat = SCSI_OK;
541
542 scb->nextstat = 0;
543 scb->sg_index = 0;
544 scb->sg_max = 0;
545 scb->flags = 0;
546 scb->target = 0;
547 scb->lun = 0;
548 scb->buflen = 0;
549 scb->sg_size = 0;
550 scb->cmdlen = 0;
551 scb->scb_id = 0;
552 scb->scb_tagmsg = 0;
553 scb->timeout = 0;
554 scb->bufaddr = 0;
555
556 scb->xs = NULL;
557 scb->tcs = NULL;
558
559 memset(scb->cmd, 0, sizeof(scb->cmd));
560 memset(scb->sglist, 0, sizeof(scb->sglist));
561
562 /*
563 * scb_tagid, sg_addr, sglist
564 * SCB_SensePtr are set at initialization
565 * and never change
566 */
567
568 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain);
569
570 splx(s);
571 }
572
573 static __inline void
574 tul_append_pend_scb(sc, scb)
575 struct iha_softc *sc;
576 struct iha_scsi_req_q *scb;
577 {
578 /* ASSUMPTION: only called within a splbio()/splx() pair */
579
580 if (scb == sc->sc_actscb)
581 sc->sc_actscb = NULL;
582
583 scb->status = STATUS_QUEUED;
584
585 TAILQ_INSERT_TAIL(&sc->sc_pendscb, scb, chain);
586 }
587
588 static __inline void
589 tul_push_pend_scb(sc, scb)
590 struct iha_softc *sc;
591 struct iha_scsi_req_q *scb;
592 {
593 int s;
594
595 s = splbio();
596
597 if (scb == sc->sc_actscb)
598 sc->sc_actscb = NULL;
599
600 scb->status = STATUS_QUEUED;
601
602 TAILQ_INSERT_HEAD(&sc->sc_pendscb, scb, chain);
603
604 splx(s);
605 }
606
607 /*
608 * tul_find_pend_scb - scan the pending queue for a SCB that can be
609 * processed immediately. Return NULL if none found
610 * and a pointer to the SCB if one is found. If there
611 * is an active SCB, return NULL!
612 */
613 static struct iha_scsi_req_q *
614 tul_find_pend_scb(sc)
615 struct iha_softc *sc;
616 {
617 struct iha_scsi_req_q *scb;
618 struct tcs *tcs;
619 int s;
620
621 s = splbio();
622
623 if (sc->sc_actscb != NULL)
624 scb = NULL;
625
626 else
627 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) {
628 if ((scb->flags & XS_CTL_RESET) != 0)
629 /* ALWAYS willing to reset a device */
630 break;
631
632 tcs = scb->tcs;
633
634 if ((scb->scb_tagmsg) != 0) {
635 /*
636 * A Tagged I/O. OK to start If no
637 * non-tagged I/O is active on the same
638 * target
639 */
640 if (tcs->ntagscb == NULL)
641 break;
642
643 } else if (scb->cmd[0] == REQUEST_SENSE) {
644 /*
645 * OK to do a non-tagged request sense
646 * even if a non-tagged I/O has been
647 * started, 'cuz we don't allow any
648 * disconnect during a request sense op
649 */
650 break;
651
652 } else if (tcs->tagcnt == 0) {
653 /*
654 * No tagged I/O active on this target,
655 * ok to start a non-tagged one if one
656 * is not already active
657 */
658 if (tcs->ntagscb == NULL)
659 break;
660 }
661 }
662
663 splx(s);
664
665 return (scb);
666 }
667
668 /*
669 * tul_del_pend_scb - remove scb from sc_pendscb
670 */
671 static __inline void
672 tul_del_pend_scb(sc, scb)
673 struct iha_softc *sc;
674 struct iha_scsi_req_q *scb;
675 {
676 int s;
677
678 s = splbio();
679
680 TAILQ_REMOVE(&sc->sc_pendscb, scb, chain);
681
682 splx(s);
683 }
684
685 static __inline void
686 tul_mark_busy_scb(scb)
687 struct iha_scsi_req_q *scb;
688 {
689 int s;
690
691 s = splbio();
692
693 scb->status = STATUS_BUSY;
694
695 if (scb->scb_tagmsg == 0)
696 scb->tcs->ntagscb = scb;
697 else
698 scb->tcs->tagcnt++;
699
700 splx(s);
701 }
702
703 static void
704 tul_append_done_scb(sc, scb, hastat)
705 struct iha_softc *sc;
706 struct iha_scsi_req_q *scb;
707 u_int8_t hastat;
708 {
709 struct tcs *tcs;
710 int s;
711
712 s = splbio();
713
714 if (scb->xs != NULL)
715 callout_stop(&scb->xs->xs_callout);
716
717 if (scb == sc->sc_actscb)
718 sc->sc_actscb = NULL;
719
720 tcs = scb->tcs;
721
722 if (scb->scb_tagmsg != 0) {
723 if (tcs->tagcnt)
724 tcs->tagcnt--;
725 } else if (tcs->ntagscb == scb)
726 tcs->ntagscb = NULL;
727
728 scb->status = STATUS_QUEUED;
729 scb->ha_stat = hastat;
730
731 TAILQ_INSERT_TAIL(&sc->sc_donescb, scb, chain);
732
733 splx(s);
734 }
735
736 static __inline struct iha_scsi_req_q *
737 tul_pop_done_scb(sc)
738 struct iha_softc *sc;
739 {
740 struct iha_scsi_req_q *scb;
741 int s;
742
743 s = splbio();
744
745 scb = TAILQ_FIRST(&sc->sc_donescb);
746
747 if (scb != NULL) {
748 scb->status = STATUS_RENT;
749 TAILQ_REMOVE(&sc->sc_donescb, scb, chain);
750 }
751
752 splx(s);
753
754 return (scb);
755 }
756
757 /*
758 * tul_abort_xs - find the SCB associated with the supplied xs and
759 * stop all processing on it, moving it to the done
760 * queue with the supplied host status value.
761 */
762 static void
763 tul_abort_xs(sc, xs, hastat)
764 struct iha_softc *sc;
765 struct scsipi_xfer *xs;
766 u_int8_t hastat;
767 {
768 struct iha_scsi_req_q *scb;
769 int i, s;
770
771 s = splbio();
772
773 /* Check the pending queue for the SCB pointing to xs */
774
775 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain)
776 if (scb->xs == xs) {
777 tul_del_pend_scb(sc, scb);
778 tul_append_done_scb(sc, scb, hastat);
779 splx(s);
780 return;
781 }
782
783 /*
784 * If that didn't work, check all BUSY/SELECTING SCB's for one
785 * pointing to xs
786 */
787
788 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
789 switch (scb->status) {
790 case STATUS_BUSY:
791 case STATUS_SELECT:
792 if (scb->xs == xs) {
793 tul_append_done_scb(sc, scb, hastat);
794 splx(s);
795 return;
796 }
797 break;
798 default:
799 break;
800 }
801
802 splx(s);
803 }
804
805 /*
806 * tul_bad_seq - a SCSI bus phase was encountered out of the
807 * correct/expected sequence. Reset the SCSI bus.
808 */
809 static void
810 tul_bad_seq(sc)
811 struct iha_softc *sc;
812 {
813 struct iha_scsi_req_q *scb = sc->sc_actscb;
814
815 if (scb != NULL)
816 tul_append_done_scb(sc, scb, HOST_BAD_PHAS);
817
818 tul_reset_scsi_bus(sc);
819 tul_reset_chip(sc);
820 }
821
822 /*
823 * tul_push_sense_request - obtain auto sense data by pushing the
824 * SCB needing it back onto the pending
825 * queue with a REQUEST_SENSE CDB.
826 */
827 static int
828 tul_push_sense_request(sc, scb)
829 struct iha_softc *sc;
830 struct iha_scsi_req_q *scb;
831 {
832 struct scsipi_xfer *xs = scb->xs;
833 struct scsipi_periph *periph = xs->xs_periph;
834 struct scsipi_sense *ss = (struct scsipi_sense *)scb->cmd;
835 int lun = periph->periph_lun;
836 int err;
837
838 ss->opcode = REQUEST_SENSE;
839 ss->byte2 = lun << SCSI_CMD_LUN_SHIFT;
840 ss->unused[0] = ss->unused[1] = 0;
841 ss->length = sizeof(struct scsipi_sense_data);
842 ss->control = 0;
843
844 scb->flags &= ~(FLAG_SG | XS_CTL_DATA_OUT);
845 scb->flags |= FLAG_RSENS | XS_CTL_DATA_IN;
846
847 scb->scb_id &= ~MSG_IDENTIFY_DISCFLAG;
848
849 scb->scb_tagmsg = 0;
850 scb->ta_stat = SCSI_OK;
851
852 scb->cmdlen = sizeof(struct scsipi_sense);
853 scb->buflen = ss->length;
854
855 err = bus_dmamap_load(sc->sc_dmat, scb->dmap,
856 &xs->sense.scsi_sense, scb->buflen, NULL,
857 BUS_DMA_READ|BUS_DMA_NOWAIT);
858 if (err != 0) {
859 printf("iha_push_sense_request: cannot bus_dmamap_load()\n");
860 xs->error = XS_DRIVER_STUFFUP;
861 return 1;
862 }
863 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
864 0, scb->buflen, BUS_DMASYNC_PREREAD);
865
866 /* XXX What about queued command? */
867 tul_exec_scb(sc, scb);
868
869 return 0;
870 }
871
872 /*
873 * tul_main - process the active SCB, taking one off pending and making it
874 * active if necessary, and any done SCB's created as
875 * a result until there are no interrupts pending and no pending
876 * SCB's that can be started.
877 */
878 static void
879 tul_main(sc)
880 struct iha_softc *sc;
881 {
882 bus_space_tag_t iot = sc->sc_iot;
883 bus_space_handle_t ioh =sc->sc_ioh;
884 struct iha_scsi_req_q *scb;
885
886 for (;;) {
887 tul_scsi(sc);
888
889 while ((scb = tul_pop_done_scb(sc)) != NULL)
890 tul_done_scb(sc, scb);
891
892 /*
893 * If there are no interrupts pending, or we can't start
894 * a pending sc, break out of the for(;;). Otherwise
895 * continue the good work with another call to
896 * tul_scsi().
897 */
898 if (((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0)
899 && (tul_find_pend_scb(sc) == NULL))
900 break;
901 }
902 }
903
904 /*
905 * tul_scsi - service any outstanding interrupts. If there are none, try to
906 * start another SCB currently in the pending queue.
907 */
908 static void
909 tul_scsi(sc)
910 struct iha_softc *sc;
911 {
912 bus_space_tag_t iot = sc->sc_iot;
913 bus_space_handle_t ioh = sc->sc_ioh;
914 struct iha_scsi_req_q *scb;
915 struct tcs *tcs;
916 u_int8_t stat;
917
918 /* service pending interrupts asap */
919
920 stat = bus_space_read_1(iot, ioh, TUL_STAT0);
921 if ((stat & INTPD) != 0) {
922 sc->sc_status0 = stat;
923 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1);
924 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
925
926 sc->sc_phase = sc->sc_status0 & PH_MASK;
927
928 if ((sc->sc_sistat & SRSTD) != 0) {
929 tul_reset_scsi_bus(sc);
930 return;
931 }
932
933 if ((sc->sc_sistat & RSELED) != 0) {
934 tul_resel(sc);
935 return;
936 }
937
938 if ((sc->sc_sistat & (STIMEO | DISCD)) != 0) {
939 tul_busfree(sc);
940 return;
941 }
942
943 if ((sc->sc_sistat & (SCMDN | SBSRV)) != 0) {
944 tul_next_state(sc);
945 return;
946 }
947
948 if ((sc->sc_sistat & SELED) != 0)
949 tul_set_ssig(sc, 0, 0);
950 }
951
952 /*
953 * There were no interrupts pending which required action elsewhere, so
954 * see if it is possible to start the selection phase on a pending SCB
955 */
956 if ((scb = tul_find_pend_scb(sc)) == NULL)
957 return;
958
959 tcs = scb->tcs;
960
961 /* program HBA's SCSI ID & target SCSI ID */
962 bus_space_write_1(iot, ioh, TUL_SID, (sc->sc_id << 4) | scb->target);
963
964 if ((scb->flags & XS_CTL_RESET) == 0) {
965 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
966
967 if ((tcs->flags & FLAG_NO_NEG_SYNC) == 0 ||
968 (tcs->flags & FLAG_NO_NEG_WIDE) == 0)
969 tul_select(sc, scb, SELATNSTOP);
970
971 else if (scb->scb_tagmsg != 0)
972 tul_select(sc, scb, SEL_ATN3);
973
974 else
975 tul_select(sc, scb, SEL_ATN);
976
977 } else {
978 tul_select(sc, scb, SELATNSTOP);
979 scb->nextstat = 8;
980 }
981
982 if ((scb->flags & XS_CTL_POLL) != 0) {
983 for (; scb->timeout > 0; scb->timeout--) {
984 if (tul_wait(sc, NO_OP) == -1)
985 break;
986 if (tul_next_state(sc) == -1)
987 break;
988 delay(1000); /* Only happens in boot, so it's ok */
989 }
990
991 /*
992 * Since done queue processing not done until AFTER this
993 * function returns, scb is on the done queue, not
994 * the free queue at this point and still has valid data
995 *
996 * Conversely, xs->error has not been set yet
997 */
998 if (scb->timeout == 0)
999 tul_timeout(scb);
1000 }
1001 }
1002
1003 /*
1004 * tul_data_over_run - return HOST_OK for all SCSI opcodes where BufLen
1005 * is an 'Allocation Length'. All other SCSI opcodes
1006 * get HOST_DO_DU as they SHOULD have xferred all the
1007 * data requested.
1008 *
1009 * The list of opcodes using 'Allocation Length' was
1010 * found by scanning all the SCSI-3 T10 drafts. See
1011 * www.t10.org for the curious with a .pdf reader.
1012 */
1013 static u_int8_t
1014 tul_data_over_run(scb)
1015 struct iha_scsi_req_q *scb;
1016 {
1017 switch (scb->cmd[0]) {
1018 case 0x03: /* Request Sense SPC-2 */
1019 case 0x12: /* Inquiry SPC-2 */
1020 case 0x1a: /* Mode Sense (6 byte version) SPC-2 */
1021 case 0x1c: /* Receive Diagnostic Results SPC-2 */
1022 case 0x23: /* Read Format Capacities MMC-2 */
1023 case 0x29: /* Read Generation SBC */
1024 case 0x34: /* Read Position SSC-2 */
1025 case 0x37: /* Read Defect Data SBC */
1026 case 0x3c: /* Read Buffer SPC-2 */
1027 case 0x42: /* Read Sub Channel MMC-2 */
1028 case 0x43: /* Read TOC/PMA/ATIP MMC */
1029
1030 /* XXX - 2 with same opcode of 0x44? */
1031 case 0x44: /* Read Header/Read Density Suprt MMC/SSC*/
1032
1033 case 0x46: /* Get Configuration MMC-2 */
1034 case 0x4a: /* Get Event/Status Notification MMC-2 */
1035 case 0x4d: /* Log Sense SPC-2 */
1036 case 0x51: /* Read Disc Information MMC */
1037 case 0x52: /* Read Track Information MMC */
1038 case 0x59: /* Read Master CUE MMC */
1039 case 0x5a: /* Mode Sense (10 byte version) SPC-2 */
1040 case 0x5c: /* Read Buffer Capacity MMC */
1041 case 0x5e: /* Persistant Reserve In SPC-2 */
1042 case 0x84: /* Receive Copy Results SPC-2 */
1043 case 0xa0: /* Report LUNs SPC-2 */
1044 case 0xa3: /* Various Report requests SBC-2/SCC-2*/
1045 case 0xa4: /* Report Key MMC-2 */
1046 case 0xad: /* Read DVD Structure MMC-2 */
1047 case 0xb4: /* Read Element Status (Attached) SMC */
1048 case 0xb5: /* Request Volume Element Address SMC */
1049 case 0xb7: /* Read Defect Data (12 byte ver.) SBC */
1050 case 0xb8: /* Read Element Status (Independ.) SMC */
1051 case 0xba: /* Report Redundancy SCC-2 */
1052 case 0xbd: /* Mechanism Status MMC */
1053 case 0xbe: /* Report Basic Redundancy SCC-2 */
1054
1055 return (HOST_OK);
1056 break;
1057
1058 default:
1059 return (HOST_DO_DU);
1060 break;
1061 }
1062 }
1063
1064 /*
1065 * tul_next_state - prcess the current SCB as requested in it's
1066 * nextstat member.
1067 */
1068 static int
1069 tul_next_state(sc)
1070 struct iha_softc *sc;
1071 {
1072
1073 if (sc->sc_actscb == NULL)
1074 return (-1);
1075
1076 switch (sc->sc_actscb->nextstat) {
1077 case 1:
1078 if (tul_state_1(sc) == 3)
1079 goto state_3;
1080 break;
1081
1082 case 2:
1083 switch (tul_state_2(sc)) {
1084 case 3:
1085 goto state_3;
1086 case 4:
1087 goto state_4;
1088 default:
1089 break;
1090 }
1091 break;
1092
1093 case 3:
1094 state_3:
1095 if (tul_state_3(sc) == 4)
1096 goto state_4;
1097 break;
1098
1099 case 4:
1100 state_4:
1101 switch (tul_state_4(sc)) {
1102 case 0:
1103 return (0);
1104 case 6:
1105 goto state_6;
1106 default:
1107 break;
1108 }
1109 break;
1110
1111 case 5:
1112 switch (tul_state_5(sc)) {
1113 case 4:
1114 goto state_4;
1115 case 6:
1116 goto state_6;
1117 default:
1118 break;
1119 }
1120 break;
1121
1122 case 6:
1123 state_6:
1124 tul_state_6(sc);
1125 break;
1126
1127 case 8:
1128 tul_state_8(sc);
1129 break;
1130
1131 default:
1132 #ifdef IHA_DEBUG_STATE
1133 printf("[debug] -unknown state: %i-\n",
1134 sc->sc_actscb->nextstat);
1135 #endif
1136 tul_bad_seq(sc);
1137 break;
1138 }
1139
1140 return (-1);
1141 }
1142
1143 /*
1144 * tul_state_1 - selection is complete after a SELATNSTOP. If the target
1145 * has put the bus into MSG_OUT phase start wide/sync
1146 * negotiation. Otherwise clear the FIFO and go to state 3,
1147 * which will send the SCSI CDB to the target.
1148 */
1149 static int
1150 tul_state_1(sc)
1151 struct iha_softc *sc;
1152 {
1153 bus_space_tag_t iot = sc->sc_iot;
1154 bus_space_handle_t ioh = sc->sc_ioh;
1155 struct iha_scsi_req_q *scb = sc->sc_actscb;
1156 struct tcs *tcs;
1157 int flags;
1158
1159 tul_mark_busy_scb(scb);
1160
1161 tcs = scb->tcs;
1162
1163 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
1164
1165 /*
1166 * If we are in PHASE_MSG_OUT, send
1167 * a) IDENT message (with tags if appropriate)
1168 * b) WDTR if the target is configured to negotiate wide xfers
1169 * ** OR **
1170 * c) SDTR if the target is configured to negotiate sync xfers
1171 * but not wide ones
1172 *
1173 * If we are NOT, then the target is not asking for anything but
1174 * the data/command, so go straight to state 3.
1175 */
1176 if (sc->sc_phase == PHASE_MSG_OUT) {
1177 bus_space_write_1(iot, ioh, TUL_SCTRL1, (ESBUSIN | EHRSL));
1178 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
1179
1180 if (scb->scb_tagmsg != 0) {
1181 bus_space_write_1(iot, ioh, TUL_SFIFO,
1182 scb->scb_tagmsg);
1183 bus_space_write_1(iot, ioh, TUL_SFIFO,
1184 scb->scb_tagid);
1185 }
1186
1187 flags = tcs->flags;
1188 if ((flags & FLAG_NO_NEG_WIDE) == 0) {
1189 if (tul_msgout_wide(sc) == -1)
1190 return (-1);
1191 } else if ((flags & FLAG_NO_NEG_SYNC) == 0) {
1192 if (tul_msgout_sync(sc) == -1)
1193 return (-1);
1194 }
1195
1196 } else {
1197 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1198 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1199 }
1200
1201 return (3);
1202 }
1203
1204 /*
1205 * tul_state_2 - selection is complete after a SEL_ATN or SEL_ATN3. If the SCSI
1206 * CDB has already been send, go to state 4 to start the data
1207 * xfer. Otherwise reset the FIFO and go to state 3, sending
1208 * the SCSI CDB.
1209 */
1210 static int
1211 tul_state_2(sc)
1212 struct iha_softc *sc;
1213 {
1214 bus_space_tag_t iot = sc->sc_iot;
1215 bus_space_handle_t ioh = sc->sc_ioh;
1216 struct iha_scsi_req_q *scb = sc->sc_actscb;
1217
1218 tul_mark_busy_scb(scb);
1219
1220 bus_space_write_1(iot, ioh, TUL_SCONFIG0, scb->tcs->sconfig0);
1221
1222 if ((sc->sc_status1 & CPDNE) != 0)
1223 return (4);
1224
1225 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1226
1227 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1228
1229 return (3);
1230 }
1231
1232 /*
1233 * tul_state_3 - send the SCSI CDB to the target, processing any status
1234 * or other messages received until that is done or
1235 * abandoned.
1236 */
1237 static int
1238 tul_state_3(sc)
1239 struct iha_softc *sc;
1240 {
1241 bus_space_tag_t iot = sc->sc_iot;
1242 bus_space_handle_t ioh = sc->sc_ioh;
1243 struct iha_scsi_req_q *scb = sc->sc_actscb;
1244 int flags;
1245
1246 for (;;) {
1247 switch (sc->sc_phase) {
1248 case PHASE_CMD_OUT:
1249 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
1250 scb->cmd, scb->cmdlen);
1251 if (tul_wait(sc, XF_FIFO_OUT) == -1)
1252 return (-1);
1253 else if (sc->sc_phase == PHASE_CMD_OUT) {
1254 tul_bad_seq(sc);
1255 return (-1);
1256 } else
1257 return (4);
1258
1259 case PHASE_MSG_IN:
1260 scb->nextstat = 3;
1261 if (tul_msgin(sc) == -1)
1262 return (-1);
1263 break;
1264
1265 case PHASE_STATUS_IN:
1266 if (tul_status_msg(sc) == -1)
1267 return (-1);
1268 break;
1269
1270 case PHASE_MSG_OUT:
1271 flags = scb->tcs->flags;
1272 if ((flags & FLAG_NO_NEG_SYNC) != 0) {
1273 if (tul_msgout(sc, MSG_NOOP) == -1)
1274 return (-1);
1275 } else if (tul_msgout_sync(sc) == -1)
1276 return (-1);
1277 break;
1278
1279 default:
1280 printf("[debug] -s3- bad phase = %d\n", sc->sc_phase);
1281 tul_bad_seq(sc);
1282 return (-1);
1283 }
1284 }
1285 }
1286
1287 /*
1288 * tul_state_4 - start a data xfer. Handle any bus state
1289 * transitions until PHASE_DATA_IN/_OUT
1290 * or the attempt is abandoned. If there is
1291 * no data to xfer, go to state 6 and finish
1292 * processing the current SCB.
1293 */
1294 static int
1295 tul_state_4(sc)
1296 struct iha_softc *sc;
1297 {
1298 struct iha_scsi_req_q *scb = sc->sc_actscb;
1299
1300 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ==
1301 (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1302 return (6); /* Both dir flags set => NO xfer was requested */
1303
1304 for (;;) {
1305 if (scb->buflen == 0)
1306 return (6);
1307
1308 switch (sc->sc_phase) {
1309 case PHASE_STATUS_IN:
1310 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT))
1311 != 0)
1312 scb->ha_stat = tul_data_over_run(scb);
1313 if ((tul_status_msg(sc)) == -1)
1314 return (-1);
1315 break;
1316
1317 case PHASE_MSG_IN:
1318 scb->nextstat = 4;
1319 if (tul_msgin(sc) == -1)
1320 return (-1);
1321 break;
1322
1323 case PHASE_MSG_OUT:
1324 if ((sc->sc_status0 & SPERR) != 0) {
1325 scb->buflen = 0;
1326 scb->ha_stat = HOST_SPERR;
1327 if (tul_msgout(sc, MSG_INITIATOR_DET_ERR) == -1)
1328 return (-1);
1329 else
1330 return (6);
1331 } else {
1332 if (tul_msgout(sc, MSG_NOOP) == -1)
1333 return (-1);
1334 }
1335 break;
1336
1337 case PHASE_DATA_IN:
1338 return (tul_xfer_data(sc, scb, XS_CTL_DATA_IN));
1339
1340 case PHASE_DATA_OUT:
1341 return (tul_xfer_data(sc, scb, XS_CTL_DATA_OUT));
1342
1343 default:
1344 tul_bad_seq(sc);
1345 return (-1);
1346 }
1347 }
1348 }
1349
1350 /*
1351 * tul_state_5 - handle the partial or final completion of the current
1352 * data xfer. If DMA is still active stop it. If there is
1353 * more data to xfer, go to state 4 and start the xfer.
1354 * If not go to state 6 and finish the SCB.
1355 */
1356 static int
1357 tul_state_5(sc)
1358 struct iha_softc *sc;
1359 {
1360 bus_space_tag_t iot = sc->sc_iot;
1361 bus_space_handle_t ioh = sc->sc_ioh;
1362 struct iha_scsi_req_q *scb = sc->sc_actscb;
1363 struct iha_sg_element *sg;
1364 u_int32_t cnt;
1365 u_int8_t period, stat;
1366 long xcnt; /* cannot use unsigned!! see code: if (xcnt < 0) */
1367 int i;
1368
1369 cnt = bus_space_read_4(iot, ioh, TUL_STCNT0) & TCNT;
1370
1371 /*
1372 * Stop any pending DMA activity and check for parity error.
1373 */
1374
1375 if ((bus_space_read_1(iot, ioh, TUL_DCMD) & XDIR) != 0) {
1376 /* Input Operation */
1377 if ((sc->sc_status0 & SPERR) != 0)
1378 scb->ha_stat = HOST_SPERR;
1379
1380 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
1381 bus_space_write_1(iot, ioh, TUL_DCTRL0,
1382 bus_space_read_1(iot, ioh, TUL_DCTRL0) | SXSTP);
1383 while (bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND)
1384 ;
1385 }
1386
1387 } else {
1388 /* Output Operation */
1389 if ((sc->sc_status1 & SXCMP) == 0) {
1390 period = scb->tcs->syncm;
1391 if ((period & PERIOD_WIDE_SCSI) != 0)
1392 cnt += (bus_space_read_1(iot, ioh,
1393 TUL_SFIFOCNT) & FIFOC) * 2;
1394 else
1395 cnt += bus_space_read_1(iot, ioh,
1396 TUL_SFIFOCNT) & FIFOC;
1397 }
1398
1399 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) {
1400 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR);
1401 do
1402 stat = bus_space_read_1(iot, ioh, TUL_ISTUS0);
1403 while ((stat & DABT) == 0);
1404 }
1405
1406 if ((cnt == 1) && (sc->sc_phase == PHASE_DATA_OUT)) {
1407 if (tul_wait(sc, XF_FIFO_OUT) == -1)
1408 return (-1);
1409 cnt = 0;
1410
1411 } else if ((sc->sc_status1 & SXCMP) == 0)
1412 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1413 }
1414
1415 if (cnt == 0) {
1416 scb->buflen = 0;
1417 return (6);
1418 }
1419
1420 /* Update active data pointer and restart the I/O at the new point */
1421
1422 xcnt = scb->buflen - cnt; /* xcnt == bytes xferred */
1423 scb->buflen = cnt; /* cnt == bytes left */
1424
1425 if ((scb->flags & FLAG_SG) != 0) {
1426 sg = &scb->sglist[scb->sg_index];
1427 for (i = scb->sg_index; i < scb->sg_max; sg++, i++) {
1428 xcnt -= le32toh(sg->sg_len);
1429 if (xcnt < 0) {
1430 xcnt += le32toh(sg->sg_len);
1431
1432 sg->sg_addr =
1433 htole32(le32toh(sg->sg_addr) + xcnt);
1434 sg->sg_len =
1435 htole32(le32toh(sg->sg_len) - xcnt);
1436 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1437 scb->sgoffset, IHA_SG_SIZE,
1438 BUS_DMASYNC_PREWRITE);
1439
1440 scb->bufaddr += (i - scb->sg_index) *
1441 sizeof(struct iha_sg_element);
1442 scb->sg_size = scb->sg_max - i;
1443 scb->sg_index = i;
1444
1445 return (4);
1446 }
1447 }
1448 return (6);
1449
1450 } else
1451 scb->bufaddr += xcnt;
1452
1453 return (4);
1454 }
1455
1456 /*
1457 * tul_state_6 - finish off the active scb (may require several
1458 * iterations if PHASE_MSG_IN) and return -1 to indicate
1459 * the bus is free.
1460 */
1461 static int
1462 tul_state_6(sc)
1463 struct iha_softc *sc;
1464 {
1465
1466 for (;;) {
1467 switch (sc->sc_phase) {
1468 case PHASE_STATUS_IN:
1469 if (tul_status_msg(sc) == -1)
1470 return (-1);
1471 break;
1472
1473 case PHASE_MSG_IN:
1474 sc->sc_actscb->nextstat = 6;
1475 if ((tul_msgin(sc)) == -1)
1476 return (-1);
1477 break;
1478
1479 case PHASE_MSG_OUT:
1480 if ((tul_msgout(sc, MSG_NOOP)) == -1)
1481 return (-1);
1482 break;
1483
1484 case PHASE_DATA_IN:
1485 if (tul_xpad_in(sc) == -1)
1486 return (-1);
1487 break;
1488
1489 case PHASE_DATA_OUT:
1490 if (tul_xpad_out(sc) == -1)
1491 return (-1);
1492 break;
1493
1494 default:
1495 tul_bad_seq(sc);
1496 return (-1);
1497 }
1498 }
1499 }
1500
1501 /*
1502 * tul_state_8 - reset the active device and all busy SCBs using it
1503 */
1504 static int
1505 tul_state_8(sc)
1506 struct iha_softc *sc;
1507 {
1508 bus_space_tag_t iot = sc->sc_iot;
1509 bus_space_handle_t ioh = sc->sc_ioh;
1510 struct iha_scsi_req_q *scb;
1511 int i;
1512 u_int8_t tar;
1513
1514 if (sc->sc_phase == PHASE_MSG_OUT) {
1515 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_BUS_DEV_RESET);
1516
1517 scb = sc->sc_actscb;
1518
1519 /* This SCB finished correctly -- resetting the device */
1520 tul_append_done_scb(sc, scb, HOST_OK);
1521
1522 tul_reset_tcs(scb->tcs, sc->sc_sconf1);
1523
1524 tar = scb->target;
1525 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
1526 if (scb->target == tar)
1527 switch (scb->status) {
1528 case STATUS_BUSY:
1529 tul_append_done_scb(sc,
1530 scb, HOST_DEV_RST);
1531 break;
1532
1533 case STATUS_SELECT:
1534 tul_push_pend_scb(sc, scb);
1535 break;
1536
1537 default:
1538 break;
1539 }
1540
1541 sc->sc_flags |= FLAG_EXPECT_DISC;
1542
1543 if (tul_wait(sc, XF_FIFO_OUT) == -1)
1544 return (-1);
1545 }
1546
1547 tul_bad_seq(sc);
1548 return (-1);
1549 }
1550
1551 /*
1552 * tul_xfer_data - initiate the DMA xfer of the data
1553 */
1554 static int
1555 tul_xfer_data(sc, scb, direction)
1556 struct iha_softc *sc;
1557 struct iha_scsi_req_q *scb;
1558 int direction;
1559 {
1560 bus_space_tag_t iot = sc->sc_iot;
1561 bus_space_handle_t ioh = sc->sc_ioh;
1562 u_int32_t xferlen;
1563 u_int8_t xfertype;
1564
1565 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) != direction)
1566 return (6); /* wrong direction, abandon I/O */
1567
1568 bus_space_write_4(iot, ioh, TUL_STCNT0, scb->buflen);
1569
1570 if ((scb->flags & FLAG_SG) == 0) {
1571 xferlen = scb->buflen;
1572 xfertype = (direction == XS_CTL_DATA_IN) ? ST_X_IN : ST_X_OUT;
1573
1574 } else {
1575 xferlen = scb->sg_size * sizeof(struct iha_sg_element);
1576 xfertype = (direction == XS_CTL_DATA_IN) ? ST_SG_IN : ST_SG_OUT;
1577 }
1578
1579 bus_space_write_4(iot, ioh, TUL_DXC, xferlen);
1580 bus_space_write_4(iot, ioh, TUL_DXPA, scb->bufaddr);
1581 bus_space_write_1(iot, ioh, TUL_DCMD, xfertype);
1582
1583 bus_space_write_1(iot, ioh, TUL_SCMD,
1584 (direction == XS_CTL_DATA_IN) ? XF_DMA_IN : XF_DMA_OUT);
1585
1586 scb->nextstat = 5;
1587
1588 return (0);
1589 }
1590
1591 static int
1592 tul_xpad_in(sc)
1593 struct iha_softc *sc;
1594 {
1595 bus_space_tag_t iot = sc->sc_iot;
1596 bus_space_handle_t ioh = sc->sc_ioh;
1597 struct iha_scsi_req_q *scb = sc->sc_actscb;
1598
1599 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) != 0)
1600 scb->ha_stat = HOST_DO_DU;
1601
1602 for (;;) {
1603 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0)
1604 bus_space_write_4(iot, ioh, TUL_STCNT0, 2);
1605 else
1606 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1607
1608 switch (tul_wait(sc, XF_FIFO_IN)) {
1609 case -1:
1610 return (-1);
1611
1612 case PHASE_DATA_IN:
1613 bus_space_read_1(iot, ioh, TUL_SFIFO);
1614 break;
1615
1616 default:
1617 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1618 return (6);
1619 }
1620 }
1621 }
1622
1623 static int
1624 tul_xpad_out(sc)
1625 struct iha_softc *sc;
1626 {
1627 bus_space_tag_t iot = sc->sc_iot;
1628 bus_space_handle_t ioh = sc->sc_ioh;
1629 struct iha_scsi_req_q *scb = sc->sc_actscb;
1630
1631 if ((scb->flags & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) != 0)
1632 scb->ha_stat = HOST_DO_DU;
1633
1634 for (;;) {
1635 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0)
1636 bus_space_write_4(iot, ioh, TUL_STCNT0, 2);
1637 else
1638 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1639
1640 bus_space_write_1(iot, ioh, TUL_SFIFO, 0);
1641
1642 switch (tul_wait(sc, XF_FIFO_OUT)) {
1643 case -1:
1644 return (-1);
1645
1646 case PHASE_DATA_OUT:
1647 break;
1648
1649 default:
1650 /* Disable wide CPU to allow read 16 bits */
1651 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
1652 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1653 return (6);
1654 }
1655 }
1656 }
1657
1658 static int
1659 tul_status_msg(sc)
1660 struct iha_softc *sc;
1661 {
1662 bus_space_tag_t iot = sc->sc_iot;
1663 bus_space_handle_t ioh = sc->sc_ioh;
1664 struct iha_scsi_req_q *scb;
1665 u_int8_t msg;
1666 int phase;
1667
1668 if ((phase = tul_wait(sc, CMD_COMP)) == -1)
1669 return (-1);
1670
1671 scb = sc->sc_actscb;
1672
1673 scb->ta_stat = bus_space_read_1(iot, ioh, TUL_SFIFO);
1674
1675 if (phase == PHASE_MSG_OUT) {
1676 if ((sc->sc_status0 & SPERR) == 0)
1677 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_NOOP);
1678 else
1679 bus_space_write_1(iot, ioh, TUL_SFIFO,
1680 MSG_PARITY_ERROR);
1681
1682 return (tul_wait(sc, XF_FIFO_OUT));
1683
1684 } else if (phase == PHASE_MSG_IN) {
1685 msg = bus_space_read_1(iot, ioh, TUL_SFIFO);
1686
1687 if ((sc->sc_status0 & SPERR) != 0)
1688 switch (tul_wait(sc, MSG_ACCEPT)) {
1689 case -1:
1690 return (-1);
1691 case PHASE_MSG_OUT:
1692 bus_space_write_1(iot, ioh, TUL_SFIFO,
1693 MSG_PARITY_ERROR);
1694 return (tul_wait(sc, XF_FIFO_OUT));
1695 default:
1696 tul_bad_seq(sc);
1697 return (-1);
1698 }
1699
1700 if (msg == MSG_CMDCOMPLETE) {
1701 if ((scb->ta_stat &
1702 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) {
1703 tul_bad_seq(sc);
1704 return (-1);
1705 }
1706 sc->sc_flags |= FLAG_EXPECT_DONE_DISC;
1707 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1708 return (tul_wait(sc, MSG_ACCEPT));
1709 }
1710
1711 if ((msg == MSG_LINK_CMD_COMPLETE)
1712 || (msg == MSG_LINK_CMD_COMPLETEF)) {
1713 if ((scb->ta_stat &
1714 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM)
1715 return (tul_wait(sc, MSG_ACCEPT));
1716 }
1717 }
1718
1719 tul_bad_seq(sc);
1720 return (-1);
1721 }
1722
1723 /*
1724 * tul_busfree - SCSI bus free detected as a result of a TIMEOUT or
1725 * DISCONNECT interrupt. Reset the tulip FIFO and
1726 * SCONFIG0 and enable hardware reselect. Move any active
1727 * SCB to sc_donescb list. Return an appropriate host status
1728 * if an I/O was active.
1729 */
1730 static void
1731 tul_busfree(sc)
1732 struct iha_softc *sc;
1733 {
1734 bus_space_tag_t iot = sc->sc_iot;
1735 bus_space_handle_t ioh = sc->sc_ioh;
1736 struct iha_scsi_req_q *scb;
1737
1738 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1739 bus_space_write_1(iot, ioh, TUL_SCONFIG0, SCONFIG0DEFAULT);
1740 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
1741
1742 scb = sc->sc_actscb;
1743
1744 if (scb != NULL) {
1745 if (scb->status == STATUS_SELECT)
1746 /* selection timeout */
1747 tul_append_done_scb(sc, scb, HOST_SEL_TOUT);
1748 else
1749 /* Unexpected bus free */
1750 tul_append_done_scb(sc, scb, HOST_BAD_PHAS);
1751 }
1752 }
1753
1754 static void
1755 tul_reset_scsi_bus(sc)
1756 struct iha_softc *sc;
1757 {
1758 struct iha_scsi_req_q *scb;
1759 struct tcs *tcs;
1760 int i, s;
1761
1762 s = splbio();
1763
1764 tul_reset_dma(sc);
1765
1766 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++)
1767 switch (scb->status) {
1768 case STATUS_BUSY:
1769 tul_append_done_scb(sc, scb, HOST_SCSI_RST);
1770 break;
1771
1772 case STATUS_SELECT:
1773 tul_push_pend_scb(sc, scb);
1774 break;
1775
1776 default:
1777 break;
1778 }
1779
1780 for (i = 0, tcs = sc->sc_tcs; i < IHA_MAX_TARGETS; i++, tcs++)
1781 tul_reset_tcs(tcs, sc->sc_sconf1);
1782
1783 splx(s);
1784 }
1785
1786 /*
1787 * tul_resel - handle a detected SCSI bus reselection request.
1788 */
1789 static int
1790 tul_resel(sc)
1791 struct iha_softc *sc;
1792 {
1793 bus_space_tag_t iot = sc->sc_iot;
1794 bus_space_handle_t ioh = sc->sc_ioh;
1795 struct iha_scsi_req_q *scb;
1796 struct tcs *tcs;
1797 u_int8_t tag, target, lun, msg, abortmsg;
1798
1799 if (sc->sc_actscb != NULL) {
1800 if ((sc->sc_actscb->status == STATUS_SELECT))
1801 /* sets ActScb to NULL */
1802 tul_push_pend_scb(sc, sc->sc_actscb);
1803 else
1804 sc->sc_actscb = NULL;
1805 }
1806
1807 target = bus_space_read_1(iot, ioh, TUL_SBID);
1808 lun = bus_space_read_1(iot, ioh, TUL_SALVC) & MSG_IDENTIFY_LUNMASK;
1809
1810 tcs = &sc->sc_tcs[target];
1811
1812 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
1813 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
1814
1815 abortmsg = MSG_ABORT; /* until a valid tag has been obtained */
1816
1817 if (tcs->ntagscb != NULL)
1818 /* There is a non-tagged I/O active on the target */
1819 scb = tcs->ntagscb;
1820
1821 else {
1822 /*
1823 * Since there is no active non-tagged operation
1824 * read the tag type, the tag itself, and find
1825 * the appropriate scb by indexing sc_scb with
1826 * the tag.
1827 */
1828
1829 switch (tul_wait(sc, MSG_ACCEPT)) {
1830 case -1:
1831 return (-1);
1832 case PHASE_MSG_IN:
1833 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1834 if ((tul_wait(sc, XF_FIFO_IN)) == -1)
1835 return (-1);
1836 break;
1837 default:
1838 goto abort;
1839 }
1840
1841 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag Msg */
1842
1843 if ((msg < MSG_SIMPLE_Q_TAG) || (msg > MSG_ORDERED_Q_TAG))
1844 goto abort;
1845
1846 switch (tul_wait(sc, MSG_ACCEPT)) {
1847 case -1:
1848 return (-1);
1849 case PHASE_MSG_IN:
1850 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1851 if ((tul_wait(sc, XF_FIFO_IN)) == -1)
1852 return (-1);
1853 break;
1854 default:
1855 goto abort;
1856 }
1857
1858 tag = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag ID */
1859 scb = &sc->sc_scb[tag];
1860
1861 abortmsg = MSG_ABORT_TAG; /* Now that we have valdid tag! */
1862 }
1863
1864 if ((scb->target != target)
1865 || (scb->lun != lun)
1866 || (scb->status != STATUS_BUSY)) {
1867 abort:
1868 tul_msgout_abort(sc, abortmsg);
1869 return (-1);
1870 }
1871
1872 sc->sc_actscb = scb;
1873
1874 if (tul_wait(sc, MSG_ACCEPT) == -1)
1875 return (-1);
1876
1877 return (tul_next_state(sc));
1878 }
1879
1880 static int
1881 tul_msgin(sc)
1882 struct iha_softc *sc;
1883 {
1884 bus_space_tag_t iot = sc->sc_iot;
1885 bus_space_handle_t ioh = sc->sc_ioh;
1886 int flags;
1887 int phase;
1888 u_int8_t msg;
1889
1890 for (;;) {
1891 if ((bus_space_read_1(iot, ioh, TUL_SFIFOCNT) & FIFOC) > 0)
1892 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1893
1894 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1895
1896 phase = tul_wait(sc, XF_FIFO_IN);
1897 msg = bus_space_read_1(iot, ioh, TUL_SFIFO);
1898
1899 switch (msg) {
1900 case MSG_DISCONNECT:
1901 sc->sc_flags |= FLAG_EXPECT_DISC;
1902 if (tul_wait(sc, MSG_ACCEPT) != -1)
1903 tul_bad_seq(sc);
1904 phase = -1;
1905 break;
1906 case MSG_SAVEDATAPOINTER:
1907 case MSG_RESTOREPOINTERS:
1908 case MSG_NOOP:
1909 phase = tul_wait(sc, MSG_ACCEPT);
1910 break;
1911 case MSG_MESSAGE_REJECT:
1912 /* XXX - need to clear FIFO like other 'Clear ATN'?*/
1913 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
1914 flags = sc->sc_actscb->tcs->flags;
1915 if ((flags & FLAG_NO_NEG_SYNC) == 0)
1916 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
1917 phase = tul_wait(sc, MSG_ACCEPT);
1918 break;
1919 case MSG_EXTENDED:
1920 phase = tul_msgin_extend(sc);
1921 break;
1922 case MSG_IGN_WIDE_RESIDUE:
1923 phase = tul_msgin_ignore_wid_resid(sc);
1924 break;
1925 case MSG_CMDCOMPLETE:
1926 sc->sc_flags |= FLAG_EXPECT_DONE_DISC;
1927 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
1928 phase = tul_wait(sc, MSG_ACCEPT);
1929 if (phase != -1) {
1930 tul_bad_seq(sc);
1931 return (-1);
1932 }
1933 break;
1934 default:
1935 printf("[debug] tul_msgin: bad msg type: %d\n", msg);
1936 phase = tul_msgout_reject(sc);
1937 break;
1938 }
1939
1940 if (phase != PHASE_MSG_IN)
1941 return (phase);
1942 }
1943 /* NOTREACHED */
1944 }
1945
1946 static int
1947 tul_msgin_ignore_wid_resid(sc)
1948 struct iha_softc *sc;
1949 {
1950 bus_space_tag_t iot = sc->sc_iot;
1951 bus_space_handle_t ioh = sc->sc_ioh;
1952 int phase;
1953
1954 phase = tul_wait(sc, MSG_ACCEPT);
1955
1956 if (phase == PHASE_MSG_IN) {
1957 if (tul_wait(sc, XF_FIFO_IN) == -1)
1958 return (-1);
1959
1960 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); /* put pad */
1961 bus_space_read_1(iot, ioh, TUL_SFIFO); /* get IGNORE */
1962 bus_space_read_1(iot, ioh, TUL_SFIFO); /* get pad */
1963
1964 return (tul_wait(sc, MSG_ACCEPT));
1965 }
1966 else
1967 return (phase);
1968 }
1969
1970 static int
1971 tul_msgin_extend(sc)
1972 struct iha_softc *sc;
1973 {
1974 bus_space_tag_t iot = sc->sc_iot;
1975 bus_space_handle_t ioh = sc->sc_ioh;
1976 int flags, i, phase, msglen, msgcode;
1977
1978 /*
1979 * XXX - can we just stop reading and reject, or do we have to
1980 * read all input, discarding the excess, and then reject
1981 */
1982 for (i = 0; i < IHA_MAX_EXTENDED_MSG; i++) {
1983 phase = tul_wait(sc, MSG_ACCEPT);
1984
1985 if (phase != PHASE_MSG_IN)
1986 return (phase);
1987
1988 bus_space_write_4(iot, ioh, TUL_STCNT0, 1);
1989
1990 if (tul_wait(sc, XF_FIFO_IN) == -1)
1991 return (-1);
1992
1993 sc->sc_msg[i] = bus_space_read_1(iot, ioh, TUL_SFIFO);
1994
1995 if (sc->sc_msg[0] == i)
1996 break;
1997 }
1998
1999 msglen = sc->sc_msg[0];
2000 msgcode = sc->sc_msg[1];
2001
2002 if ((msglen == MSG_EXT_SDTR_LEN) && (msgcode == MSG_EXT_SDTR)) {
2003 if (tul_msgin_sync(sc) == 0) {
2004 tul_sync_done(sc);
2005 return (tul_wait(sc, MSG_ACCEPT));
2006 }
2007
2008 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2009
2010 phase = tul_wait(sc, MSG_ACCEPT);
2011 if (phase != PHASE_MSG_OUT)
2012 return (phase);
2013
2014 /* Clear FIFO for important message - final SYNC offer */
2015 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2016
2017 tul_sync_done(sc); /* This is our final offer */
2018
2019 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED);
2020 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_SDTR_LEN);
2021 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_SDTR);
2022 bus_space_write_1(iot, ioh, TUL_SFIFO, sc->sc_msg[2]);
2023 bus_space_write_1(iot, ioh, TUL_SFIFO, sc->sc_msg[3]);
2024
2025 } else if ((msglen == MSG_EXT_WDTR_LEN) && (msgcode == MSG_EXT_WDTR)) {
2026
2027 flags = sc->sc_actscb->tcs->flags;
2028
2029 if ((flags & FLAG_NO_WIDE) != 0)
2030 sc->sc_msg[2] = 0; /* Offer async xfers only */
2031
2032 else if (sc->sc_msg[2] > 2) /* BAD MSG: 2 is max value */
2033 return (tul_msgout_reject(sc));
2034
2035 else if (sc->sc_msg[2] == 2) /* a request for 32 bit xfers*/
2036 sc->sc_msg[2] = 1; /* Offer 16 instead */
2037
2038 else {
2039 tul_wdtr_done(sc);
2040 if ((flags & FLAG_NO_NEG_SYNC) == 0)
2041 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2042 return (tul_wait(sc, MSG_ACCEPT));
2043 }
2044
2045 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2046
2047 phase = tul_wait(sc, MSG_ACCEPT);
2048 if (phase != PHASE_MSG_OUT)
2049 return (phase);
2050
2051 /* WDTR msg out */
2052 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED);
2053 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_WDTR_LEN);
2054 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_WDTR);
2055 bus_space_write_1(iot, ioh, TUL_SFIFO, sc->sc_msg[2]);
2056
2057 } else
2058 return (tul_msgout_reject(sc));
2059
2060 return (tul_wait(sc, XF_FIFO_OUT));
2061 }
2062
2063 /*
2064 * tul_msgin_sync - check SDTR msg in sc_msg. If the offer is
2065 * acceptable leave sc_msg as is and return 0.
2066 * If the negotiation must continue, modify sc_msg
2067 * as needed and return 1. Else return 0.
2068 */
2069 static int
2070 tul_msgin_sync(sc)
2071 struct iha_softc *sc;
2072 {
2073 int flags;
2074 int newoffer;
2075 u_int8_t default_period;
2076
2077 flags = sc->sc_actscb->tcs->flags;
2078
2079 default_period = tul_rate_tbl[flags & FLAG_SCSI_RATE];
2080
2081 if (sc->sc_msg[3] == 0) /* target offered async only. Accept it. */
2082 return (0);
2083
2084 newoffer = 0;
2085
2086 if ((flags & FLAG_NO_SYNC) != 0) {
2087 sc->sc_msg[3] = 0;
2088 newoffer = 1;
2089 }
2090
2091 if (sc->sc_msg[3] > IHA_MAX_OFFSET) {
2092 sc->sc_msg[3] = IHA_MAX_OFFSET;
2093 newoffer = 1;
2094 }
2095
2096 if (sc->sc_msg[2] < default_period) {
2097 sc->sc_msg[2] = default_period;
2098 newoffer = 1;
2099 }
2100
2101 if (sc->sc_msg[2] >= 59) { /* XXX magic */
2102 sc->sc_msg[3] = 0;
2103 newoffer = 1;
2104 }
2105
2106 return (newoffer);
2107 }
2108
2109 static int
2110 tul_msgout(sc, msg)
2111 struct iha_softc *sc;
2112 u_int8_t msg;
2113 {
2114
2115 bus_space_write_1(sc->sc_iot, sc->sc_ioh, TUL_SFIFO, msg);
2116
2117 return (tul_wait(sc, XF_FIFO_OUT));
2118 }
2119
2120 static void
2121 tul_msgout_abort(sc, aborttype)
2122 struct iha_softc *sc;
2123 u_int8_t aborttype;
2124 {
2125 bus_space_tag_t iot = sc->sc_iot;
2126 bus_space_handle_t ioh = sc->sc_ioh;
2127
2128 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2129
2130 switch (tul_wait(sc, MSG_ACCEPT)) {
2131 case -1:
2132 break;
2133
2134 case PHASE_MSG_OUT:
2135 bus_space_write_1(iot, ioh, TUL_SFIFO, aborttype);
2136
2137 sc->sc_flags |= FLAG_EXPECT_DISC;
2138
2139 if (tul_wait(sc, XF_FIFO_OUT) != -1)
2140 tul_bad_seq(sc);
2141 break;
2142
2143 default:
2144 tul_bad_seq(sc);
2145 break;
2146 }
2147 }
2148
2149 static int
2150 tul_msgout_reject(sc)
2151 struct iha_softc *sc;
2152 {
2153 bus_space_tag_t iot = sc->sc_iot;
2154 bus_space_handle_t ioh = sc->sc_ioh;
2155 int phase;
2156
2157 tul_set_ssig(sc, REQ | BSY | SEL, ATN);
2158
2159 if ((phase = tul_wait(sc, MSG_ACCEPT)) == -1)
2160 return (-1);
2161
2162 if (phase == PHASE_MSG_OUT) {
2163 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_MESSAGE_REJECT);
2164 return (tul_wait(sc, XF_FIFO_OUT));
2165 }
2166
2167 return (phase);
2168 }
2169
2170 static int
2171 tul_msgout_wide(sc)
2172 struct iha_softc *sc;
2173 {
2174 bus_space_tag_t iot = sc->sc_iot;
2175 bus_space_handle_t ioh = sc->sc_ioh;
2176 int phase;
2177
2178 sc->sc_actscb->tcs->flags |= FLAG_WIDE_DONE;
2179
2180 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED);
2181 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_WDTR_LEN);
2182 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_WDTR);
2183 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_WDTR_BUS_16_BIT);
2184
2185 phase = tul_wait(sc, XF_FIFO_OUT);
2186
2187 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2188 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
2189
2190 return (phase);
2191 }
2192
2193 static int
2194 tul_msgout_sync(sc)
2195 struct iha_softc *sc;
2196 {
2197 bus_space_tag_t iot = sc->sc_iot;
2198 bus_space_handle_t ioh = sc->sc_ioh;
2199 int rateindex;
2200 int phase;
2201 u_int8_t sync_rate;
2202
2203 rateindex = sc->sc_actscb->tcs->flags & FLAG_SCSI_RATE;
2204
2205 sync_rate = tul_rate_tbl[rateindex];
2206
2207 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED);
2208 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_SDTR_LEN);
2209 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXT_SDTR);
2210 bus_space_write_1(iot, ioh, TUL_SFIFO, sync_rate);
2211 bus_space_write_1(iot, ioh, TUL_SFIFO, IHA_MAX_OFFSET);/* REQ/ACK*/
2212
2213 phase = tul_wait(sc, XF_FIFO_OUT);
2214
2215 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2216 tul_set_ssig(sc, REQ | BSY | SEL | ATN, 0);
2217
2218 return (phase);
2219 }
2220
2221 static void
2222 tul_wdtr_done(sc)
2223 struct iha_softc *sc;
2224 {
2225 bus_space_tag_t iot = sc->sc_iot;
2226 bus_space_handle_t ioh = sc->sc_ioh;
2227 struct tcs *tcs = sc->sc_actscb->tcs;
2228
2229 tcs->syncm = 0;
2230 tcs->period = 0;
2231 tcs->offset = 0;
2232
2233 if (sc->sc_msg[2] != 0)
2234 tcs->syncm |= PERIOD_WIDE_SCSI;
2235
2236 tcs->sconfig0 &= ~ALTPD;
2237 tcs->flags &= ~FLAG_SYNC_DONE;
2238 tcs->flags |= FLAG_WIDE_DONE;
2239
2240 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2241 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2242 }
2243
2244 static void
2245 tul_sync_done(sc)
2246 struct iha_softc *sc;
2247 {
2248 bus_space_tag_t iot = sc->sc_iot;
2249 bus_space_handle_t ioh = sc->sc_ioh;
2250 struct tcs *tcs = sc->sc_actscb->tcs;
2251 int i;
2252
2253 if ((tcs->flags & FLAG_SYNC_DONE) == 0) {
2254 tcs->period = sc->sc_msg[2];
2255 tcs->offset = sc->sc_msg[3];
2256 if (tcs->offset != 0) {
2257 tcs->syncm |= tcs->offset;
2258
2259 /* pick the highest possible rate */
2260 for (i = 0; i < 8; i++)
2261 if (tul_rate_tbl[i] >= tcs->period)
2262 break;
2263
2264 tcs->syncm |= (i << 4);
2265 tcs->sconfig0 |= ALTPD;
2266 }
2267
2268 tcs->flags |= FLAG_SYNC_DONE;
2269
2270 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0);
2271 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm);
2272 }
2273 }
2274
2275 void
2276 tul_reset_chip(sc)
2277 struct iha_softc *sc;
2278 {
2279 bus_space_tag_t iot = sc->sc_iot;
2280 bus_space_handle_t ioh = sc->sc_ioh;
2281
2282 /* reset tulip chip */
2283
2284 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSCSI);
2285
2286 do {
2287 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
2288 } while ((sc->sc_sistat & SRSTD) == 0);
2289
2290 tul_set_ssig(sc, 0, 0);
2291
2292 bus_space_read_1(iot, ioh, TUL_SISTAT); /* Clear any active interrupt*/
2293 }
2294
2295 static void
2296 tul_select(sc, scb, select_type)
2297 struct iha_softc *sc;
2298 struct iha_scsi_req_q *scb;
2299 u_int8_t select_type;
2300 {
2301 bus_space_tag_t iot = sc->sc_iot;
2302 bus_space_handle_t ioh = sc->sc_ioh;
2303
2304 switch (select_type) {
2305 case SEL_ATN:
2306 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
2307 bus_space_write_multi_1(iot, ioh, TUL_SFIFO,
2308 scb->cmd, scb->cmdlen);
2309
2310 scb->nextstat = 2;
2311 break;
2312
2313 case SELATNSTOP:
2314 scb->nextstat = 1;
2315 break;
2316
2317 case SEL_ATN3:
2318 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id);
2319 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagmsg);
2320 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagid);
2321
2322 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, scb->cmd,
2323 scb->cmdlen);
2324
2325 scb->nextstat = 2;
2326 break;
2327
2328 default:
2329 printf("[debug] tul_select() - unknown select type = 0x%02x\n",
2330 select_type);
2331 return;
2332 }
2333
2334 tul_del_pend_scb(sc, scb);
2335 scb->status = STATUS_SELECT;
2336
2337 sc->sc_actscb = scb;
2338
2339 bus_space_write_1(iot, ioh, TUL_SCMD, select_type);
2340 }
2341
2342 /*
2343 * tul_wait - wait for an interrupt to service or a SCSI bus phase change
2344 * after writing the supplied command to the tulip chip. If
2345 * the command is NO_OP, skip the command writing.
2346 */
2347 static int
2348 tul_wait(sc, cmd)
2349 struct iha_softc *sc;
2350 u_int8_t cmd;
2351 {
2352 bus_space_tag_t iot = sc->sc_iot;
2353 bus_space_handle_t ioh = sc->sc_ioh;
2354
2355 if (cmd != NO_OP)
2356 bus_space_write_1(iot, ioh, TUL_SCMD, cmd);
2357
2358 /*
2359 * Have to do this here, in addition to in iha_isr, because
2360 * interrupts might be turned off when we get here.
2361 */
2362 do {
2363 sc->sc_status0 = bus_space_read_1(iot, ioh, TUL_STAT0);
2364 } while ((sc->sc_status0 & INTPD) == 0);
2365
2366 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1);
2367 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT);
2368
2369 sc->sc_phase = sc->sc_status0 & PH_MASK;
2370
2371 if ((sc->sc_sistat & SRSTD) != 0) {
2372 /* SCSI bus reset interrupt */
2373 tul_reset_scsi_bus(sc);
2374 return (-1);
2375 }
2376
2377 if ((sc->sc_sistat & RSELED) != 0)
2378 /* Reselection interrupt */
2379 return (tul_resel(sc));
2380
2381 if ((sc->sc_sistat & STIMEO) != 0) {
2382 /* selected/reselected timeout interrupt */
2383 tul_busfree(sc);
2384 return (-1);
2385 }
2386
2387 if ((sc->sc_sistat & DISCD) != 0) {
2388 /* BUS disconnection interrupt */
2389 if ((sc->sc_flags & FLAG_EXPECT_DONE_DISC) != 0) {
2390 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2391 bus_space_write_1(iot, ioh, TUL_SCONFIG0,
2392 SCONFIG0DEFAULT);
2393 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
2394 tul_append_done_scb(sc, sc->sc_actscb, HOST_OK);
2395 sc->sc_flags &= ~FLAG_EXPECT_DONE_DISC;
2396
2397 } else if ((sc->sc_flags & FLAG_EXPECT_DISC) != 0) {
2398 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO);
2399 bus_space_write_1(iot, ioh, TUL_SCONFIG0,
2400 SCONFIG0DEFAULT);
2401 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL);
2402 sc->sc_actscb = NULL;
2403 sc->sc_flags &= ~FLAG_EXPECT_DISC;
2404
2405 } else
2406 tul_busfree(sc);
2407
2408 return (-1);
2409 }
2410
2411 return (sc->sc_phase);
2412 }
2413
2414 /*
2415 * tul_done_scb - We have a scb which has been processed by the
2416 * adaptor, now we look to see how the operation went.
2417 */
2418 static void
2419 tul_done_scb(sc, scb)
2420 struct iha_softc *sc;
2421 struct iha_scsi_req_q *scb;
2422 {
2423 struct scsipi_xfer *xs = scb->xs;
2424
2425 if (xs != NULL) {
2426 /* Cancel the timeout. */
2427 callout_stop(&xs->xs_callout);
2428
2429 if (xs->datalen > 0) {
2430 bus_dmamap_sync(sc->sc_dmat, scb->dmap,
2431 0, scb->dmap->dm_mapsize,
2432 (xs->xs_control & XS_CTL_DATA_IN) ?
2433 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2434 bus_dmamap_unload(sc->sc_dmat, scb->dmap);
2435 }
2436
2437 xs->status = scb->ta_stat;
2438
2439 switch (scb->ha_stat) {
2440 case HOST_OK:
2441 switch (scb->ta_stat) {
2442 case SCSI_OK:
2443 case SCSI_CONDITION_MET:
2444 case SCSI_INTERM:
2445 case SCSI_INTERM_COND_MET:
2446 xs->resid = scb->buflen;
2447 xs->error = XS_NOERROR;
2448 if ((scb->flags & FLAG_RSENS) != 0)
2449 xs->error = XS_SENSE;
2450 break;
2451
2452 case SCSI_RESV_CONFLICT:
2453 case SCSI_BUSY:
2454 case SCSI_QUEUE_FULL:
2455 xs->error = XS_BUSY;
2456 break;
2457
2458 case SCSI_TERMINATED:
2459 case SCSI_ACA_ACTIVE:
2460 case SCSI_CHECK:
2461 scb->tcs->flags &=
2462 ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE);
2463
2464 if ((scb->flags & FLAG_RSENS) != 0 ||
2465 tul_push_sense_request(sc, scb) != 0) {
2466 scb->flags &= FLAG_RSENS;
2467 printf("%s: request sense failed\n",
2468 sc->sc_dev.dv_xname);
2469 xs->error = XS_DRIVER_STUFFUP;
2470 break;
2471 }
2472
2473 xs->error = XS_SENSE;
2474 return;
2475
2476 default:
2477 xs->error = XS_DRIVER_STUFFUP;
2478 break;
2479 }
2480 break;
2481
2482 case HOST_SEL_TOUT:
2483 xs->error = XS_SELTIMEOUT;
2484 break;
2485
2486 case HOST_SCSI_RST:
2487 case HOST_DEV_RST:
2488 xs->error = XS_RESET;
2489 break;
2490
2491 case HOST_SPERR:
2492 printf("%s: SCSI Parity error detected\n",
2493 sc->sc_dev.dv_xname);
2494 xs->error = XS_DRIVER_STUFFUP;
2495 break;
2496
2497 case HOST_TIMED_OUT:
2498 xs->error = XS_TIMEOUT;
2499 break;
2500
2501 case HOST_DO_DU:
2502 case HOST_BAD_PHAS:
2503 default:
2504 xs->error = XS_DRIVER_STUFFUP;
2505 break;
2506 }
2507
2508 scsipi_done(xs);
2509 }
2510
2511 tul_append_free_scb(sc, scb);
2512 }
2513
2514 static void
2515 tul_timeout(arg)
2516 void *arg;
2517 {
2518 struct iha_scsi_req_q *scb = (struct iha_scsi_req_q *)arg;
2519 struct scsipi_xfer *xs = scb->xs;
2520 struct scsipi_periph *periph = xs->xs_periph;
2521 struct iha_softc *sc;
2522
2523 sc = (void *)periph->periph_channel->chan_adapter->adapt_dev;
2524
2525 if (xs == NULL)
2526 printf("[debug] tul_timeout called with xs == NULL\n");
2527
2528 else {
2529 scsipi_printaddr(periph);
2530 printf("SCSI OpCode 0x%02x timed out\n", xs->cmd->opcode);
2531
2532 tul_abort_xs(sc, xs, HOST_TIMED_OUT);
2533 }
2534 }
2535
2536 static void
2537 tul_exec_scb(sc, scb)
2538 struct iha_softc *sc;
2539 struct iha_scsi_req_q *scb;
2540 {
2541 bus_space_tag_t iot;
2542 bus_space_handle_t ioh;
2543 bus_dmamap_t dm;
2544 struct scsipi_xfer *xs = scb->xs;
2545 int nseg, s;
2546
2547 dm = scb->dmap;
2548 nseg = dm->dm_nsegs;
2549
2550 if (nseg > 1) {
2551 struct iha_sg_element *sg = scb->sglist;
2552 int i;
2553
2554 for (i = 0; i < nseg; i++) {
2555 sg[i].sg_len = htole32(dm->dm_segs[i].ds_len);
2556 sg[i].sg_addr = htole32(dm->dm_segs[i].ds_addr);
2557 }
2558 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2559 scb->sgoffset, IHA_SG_SIZE,
2560 BUS_DMASYNC_PREWRITE);
2561
2562 scb->flags |= FLAG_SG; /* XXX */
2563 scb->sg_size = scb->sg_max = nseg;
2564
2565 scb->bufaddr = scb->sg_addr;
2566 } else
2567 scb->bufaddr = dm->dm_segs[0].ds_addr;
2568
2569 if ((xs->xs_control & XS_CTL_POLL) == 0) {
2570 int timeout = xs->timeout;
2571 timeout = (timeout > 100000) ?
2572 timeout / 1000 * hz : timeout * hz / 1000;
2573 if (timeout == 0)
2574 timeout = 1;
2575 callout_reset(&xs->xs_callout, timeout, tul_timeout, scb);
2576 }
2577
2578 s = splbio();
2579
2580 if (((scb->flags & XS_RESET) != 0) || (scb->cmd[0] == REQUEST_SENSE))
2581 tul_push_pend_scb(sc, scb); /* Insert SCB at head of Pend */
2582 else
2583 tul_append_pend_scb(sc, scb); /* Append SCB to tail of Pend */
2584
2585 /*
2586 * Run through tul_main() to ensure something is active, if
2587 * only this new SCB.
2588 */
2589 if (sc->sc_semaph != SEMAPH_IN_MAIN) {
2590 iot = sc->sc_iot;
2591 ioh = sc->sc_ioh;
2592
2593 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL);
2594 sc->sc_semaph = SEMAPH_IN_MAIN;;
2595
2596 splx(s);
2597 tul_main(sc);
2598 s = splbio();
2599
2600 sc->sc_semaph = ~SEMAPH_IN_MAIN;;
2601 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP));
2602 }
2603
2604 splx(s);
2605 }
2606
2607
2608 /*
2609 * tul_set_ssig - read the current scsi signal mask, then write a new
2610 * one which turns off/on the specified signals.
2611 */
2612 static void
2613 tul_set_ssig(sc, offsigs, onsigs)
2614 struct iha_softc *sc;
2615 u_int8_t offsigs, onsigs;
2616 {
2617 bus_space_tag_t iot = sc->sc_iot;
2618 bus_space_handle_t ioh = sc->sc_ioh;
2619 u_int8_t currsigs;
2620
2621 currsigs = bus_space_read_1(iot, ioh, TUL_SSIGI);
2622 bus_space_write_1(iot, ioh, TUL_SSIGO, (currsigs & ~offsigs) | onsigs);
2623 }
2624
2625 /*
2626 * tul_alloc_sglist - allocate and map sglist for SCB's
2627 */
2628 static int
2629 tul_alloc_sglist(sc)
2630 struct iha_softc *sc;
2631 {
2632 bus_dma_segment_t seg;
2633 int error, rseg;
2634
2635 /*
2636 * Allocate dma-safe memory for the SCB's sglist
2637 */
2638 if ((error = bus_dmamem_alloc(sc->sc_dmat,
2639 IHA_SG_SIZE * IHA_MAX_SCB,
2640 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2641 printf(": unable to allocate sglist, error = %d\n", error);
2642 return (error);
2643 }
2644 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
2645 IHA_SG_SIZE * IHA_MAX_SCB, (caddr_t *)&sc->sc_sglist,
2646 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2647 printf(": unable to map sglist, error = %d\n", error);
2648 return (error);
2649 }
2650
2651 /*
2652 * Create and load the DMA map used for the SCBs
2653 */
2654 if ((error = bus_dmamap_create(sc->sc_dmat,
2655 IHA_SG_SIZE * IHA_MAX_SCB, 1, IHA_SG_SIZE * IHA_MAX_SCB,
2656 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
2657 printf(": unable to create control DMA map, error = %d\n",
2658 error);
2659 return (error);
2660 }
2661 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
2662 sc->sc_sglist, IHA_SG_SIZE * IHA_MAX_SCB,
2663 NULL, BUS_DMA_NOWAIT)) != 0) {
2664 printf(": unable to load control DMA map, error = %d\n", error);
2665 return (error);
2666 }
2667
2668 memset(sc->sc_sglist, 0, IHA_SG_SIZE * IHA_MAX_SCB);
2669
2670 return (0);
2671 }
2672
2673 /*
2674 * tul_read_eeprom - read Serial EEPROM value & set to defaults
2675 * if required. XXX - Writing does NOT work!
2676 */
2677 void
2678 tul_read_eeprom(sc, eeprom)
2679 struct iha_softc *sc;
2680 struct iha_eeprom *eeprom;
2681 {
2682 bus_space_tag_t iot = sc->sc_iot;
2683 bus_space_handle_t ioh = sc->sc_ioh;
2684 u_int16_t *buf = (u_int16_t *)eeprom;
2685 u_int8_t gctrl;
2686
2687 /*------Enable EEProm programming ---*/
2688 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) | EEPRG;
2689 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl);
2690
2691 /*------ Program default pattern ----*/
2692 if (tul_se2_rd_all(sc, buf) == 0) {
2693 tul_se2_update_all(sc);
2694 if(tul_se2_rd_all(sc, buf) == 0)
2695 panic("could not program iha Tulip EEPROM\n");
2696 }
2697
2698 /*------ Disable EEProm programming ---*/
2699 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) & ~EEPRG;
2700 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl);
2701 }
2702
2703 /*
2704 * tul_se2_update_all - Update SCSI H/A configuration parameters from
2705 * serial EEPROM Setup default pattern. Only
2706 * change those values different from the values
2707 * in tul_nvram.
2708 */
2709 void
2710 tul_se2_update_all(sc)
2711 struct iha_softc *sc;
2712 {
2713 bus_space_tag_t iot = sc->sc_iot;
2714 bus_space_handle_t ioh = sc->sc_ioh;
2715 u_int16_t *np;
2716 u_int32_t chksum;
2717 int i;
2718
2719 /* Enable erase/write state of EEPROM */
2720 tul_se2_instr(sc, ENABLE_ERASE);
2721 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2722 EEP_WAIT();
2723
2724 np = (u_int16_t *)&eeprom_default;
2725
2726 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) {
2727 tul_se2_wr(sc, i, *np);
2728 chksum += *np++;
2729 }
2730
2731 chksum &= 0x0000ffff;
2732 tul_se2_wr(sc, 31, chksum);
2733
2734 /* Disable erase/write state of EEPROM */
2735 tul_se2_instr(sc, 0);
2736 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2737 EEP_WAIT();
2738 }
2739
2740 /*
2741 * tul_se2_wr - write the given 16 bit value into the Serial EEPROM
2742 * at the specified offset
2743 */
2744 void
2745 tul_se2_wr(sc, addr, writeword)
2746 struct iha_softc *sc;
2747 int addr;
2748 u_int16_t writeword;
2749 {
2750 bus_space_tag_t iot = sc->sc_iot;
2751 bus_space_handle_t ioh = sc->sc_ioh;
2752 int i, bit;
2753
2754 /* send 'WRITE' Instruction == address | WRITE bit */
2755 tul_se2_instr(sc, addr | WRITE);
2756
2757 for (i = 16; i > 0; i--) {
2758 if (writeword & (1 << (i - 1)))
2759 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRDO);
2760 else
2761 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2762 EEP_WAIT();
2763 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2764 EEP_WAIT();
2765 }
2766
2767 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2768 EEP_WAIT();
2769 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2770 EEP_WAIT();
2771 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2772 EEP_WAIT();
2773
2774 for (;;) {
2775 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2776 EEP_WAIT();
2777 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2778 EEP_WAIT();
2779 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI;
2780 EEP_WAIT();
2781 if (bit != 0)
2782 break; /* write complete */
2783 }
2784
2785 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2786 }
2787
2788 /*
2789 * tul_se2_rd - read & return the 16 bit value at the specified
2790 * offset in the Serial E2PROM
2791 *
2792 */
2793 u_int16_t
2794 tul_se2_rd(sc, addr)
2795 struct iha_softc *sc;
2796 int addr;
2797 {
2798 bus_space_tag_t iot = sc->sc_iot;
2799 bus_space_handle_t ioh = sc->sc_ioh;
2800 int i, bit;
2801 u_int16_t readword;
2802
2803 /* Send 'READ' instruction == address | READ bit */
2804 tul_se2_instr(sc, addr | READ);
2805
2806 readword = 0;
2807 for (i = 16; i > 0; i--) {
2808 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK);
2809 EEP_WAIT();
2810 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2811 EEP_WAIT();
2812 /* sample data after the following edge of clock */
2813 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI ? 1 : 0;
2814 EEP_WAIT();
2815
2816 readword |= bit << (i - 1);
2817 }
2818
2819 bus_space_write_1(iot, ioh, TUL_NVRAM, 0);
2820
2821 return (readword);
2822 }
2823
2824 /*
2825 * tul_se2_rd_all - Read SCSI H/A config parameters from serial EEPROM
2826 */
2827 int
2828 tul_se2_rd_all(sc, buf)
2829 struct iha_softc *sc;
2830 u_int16_t *buf;
2831 {
2832 struct iha_eeprom *eeprom = (struct iha_eeprom *)buf;
2833 u_int32_t chksum;
2834 int i;
2835
2836 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) {
2837 *buf = tul_se2_rd(sc, i);
2838 chksum += *buf++;
2839 }
2840 *buf = tul_se2_rd(sc, 31); /* just read checksum */
2841
2842 chksum &= 0x0000ffff; /* checksum is lower 16 bits of sum */
2843
2844 return (eeprom->signature == EEP_SIGNATURE) &&
2845 (eeprom->checksum == chksum);
2846 }
2847
2848 /*
2849 * tul_se2_instr - write an octet to serial E2PROM one bit at a time
2850 */
2851 void
2852 tul_se2_instr(sc, instr)
2853 struct iha_softc *sc;
2854 int instr;
2855 {
2856 bus_space_tag_t iot = sc->sc_iot;
2857 bus_space_handle_t ioh = sc->sc_ioh;
2858 int b, i;
2859
2860 b = NVRCS | NVRDO; /* Write the start bit (== 1) */
2861
2862 bus_space_write_1(iot, ioh, TUL_NVRAM, b);
2863 EEP_WAIT();
2864 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK);
2865 EEP_WAIT();
2866
2867 for (i = 8; i > 0; i--) {
2868 if (instr & (1 << (i - 1)))
2869 b = NVRCS | NVRDO; /* Write a 1 bit */
2870 else
2871 b = NVRCS; /* Write a 0 bit */
2872
2873 bus_space_write_1(iot, ioh, TUL_NVRAM, b);
2874 EEP_WAIT();
2875 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK);
2876 EEP_WAIT();
2877 }
2878
2879 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS);
2880 }
2881
2882 /*
2883 * tul_reset_tcs - reset the target control structure pointed
2884 * to by tcs to default values. tcs flags
2885 * only has the negotiation done bits reset as
2886 * the other bits are fixed at initialization.
2887 */
2888 void
2889 tul_reset_tcs(tcs, config0)
2890 struct tcs *tcs;
2891 u_int8_t config0;
2892 {
2893
2894 tcs->flags &= ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE);
2895 tcs->period = 0;
2896 tcs->offset = 0;
2897 tcs->tagcnt = 0;
2898 tcs->ntagscb = NULL;
2899 tcs->syncm = 0;
2900 tcs->sconfig0 = config0;
2901 }
2902