adw.c revision 1.4 1 /* $NetBSD: adw.c,v 1.4 1998/11/19 21:52:59 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static void adw_enqueue __P((ADW_SOFTC *, struct scsipi_xfer *, int));
74 static struct scsipi_xfer *adw_dequeue __P((ADW_SOFTC *));
75
76 static int adw_alloc_ccbs __P((ADW_SOFTC *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static void adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
83 static void adw_start_ccbs __P((ADW_SOFTC *));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *));
87 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *));
88 static void adwminphys __P((struct buf *));
89 static void adw_wide_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90
91 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
92 static void adw_timeout __P((void *));
93 static void adw_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adw_dev =
101 {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108
109 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
110 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
111
112
113 /******************************************************************************/
114 /* scsipi_xfer queue routines */
115 /******************************************************************************/
116
117 /*
118 * Insert a scsipi_xfer into the software queue. We overload xs->free_list
119 * to avoid having to allocate additional resources (since we're used
120 * only during resource shortages anyhow.
121 */
122 static void
123 adw_enqueue(sc, xs, infront)
124 ADW_SOFTC *sc;
125 struct scsipi_xfer *xs;
126 int infront;
127 {
128
129 if (infront || sc->sc_queue.lh_first == NULL) {
130 if (sc->sc_queue.lh_first == NULL)
131 sc->sc_queuelast = xs;
132 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
133 return;
134 }
135 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
136 sc->sc_queuelast = xs;
137 }
138
139
140 /*
141 * Pull a scsipi_xfer off the front of the software queue.
142 */
143 static struct scsipi_xfer *
144 adw_dequeue(sc)
145 ADW_SOFTC *sc;
146 {
147 struct scsipi_xfer *xs;
148
149 xs = sc->sc_queue.lh_first;
150 LIST_REMOVE(xs, free_list);
151
152 if (sc->sc_queue.lh_first == NULL)
153 sc->sc_queuelast = NULL;
154
155 return (xs);
156 }
157
158
159 /******************************************************************************/
160 /* Control Blocks routines */
161 /******************************************************************************/
162
163
164 static int
165 adw_alloc_ccbs(sc)
166 ADW_SOFTC *sc;
167 {
168 bus_dma_segment_t seg;
169 int error, rseg;
170
171 /*
172 * Allocate the control blocks.
173 */
174 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
175 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
176 printf("%s: unable to allocate control structures,"
177 " error = %d\n", sc->sc_dev.dv_xname, error);
178 return (error);
179 }
180 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
181 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
182 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
183 printf("%s: unable to map control structures, error = %d\n",
184 sc->sc_dev.dv_xname, error);
185 return (error);
186 }
187 /*
188 * Create and load the DMA map used for the control blocks.
189 */
190 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
191 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
192 &sc->sc_dmamap_control)) != 0) {
193 printf("%s: unable to create control DMA map, error = %d\n",
194 sc->sc_dev.dv_xname, error);
195 return (error);
196 }
197 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
198 sc->sc_control, sizeof(struct adw_control), NULL,
199 BUS_DMA_NOWAIT)) != 0) {
200 printf("%s: unable to load control DMA map, error = %d\n",
201 sc->sc_dev.dv_xname, error);
202 return (error);
203 }
204 return (0);
205 }
206
207
208 /*
209 * Create a set of ccbs and add them to the free list. Called once
210 * by adw_init(). We return the number of CCBs successfully created.
211 */
212 static int
213 adw_create_ccbs(sc, ccbstore, count)
214 ADW_SOFTC *sc;
215 ADW_CCB *ccbstore;
216 int count;
217 {
218 ADW_CCB *ccb;
219 int i, error;
220
221 bzero(ccbstore, sizeof(ADW_CCB) * count);
222 for (i = 0; i < count; i++) {
223 ccb = &ccbstore[i];
224 if ((error = adw_init_ccb(sc, ccb)) != 0) {
225 printf("%s: unable to initialize ccb, error = %d\n",
226 sc->sc_dev.dv_xname, error);
227 return (i);
228 }
229 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
230 }
231
232 return (i);
233 }
234
235
236 /*
237 * A ccb is put onto the free list.
238 */
239 static void
240 adw_free_ccb(sc, ccb)
241 ADW_SOFTC *sc;
242 ADW_CCB *ccb;
243 {
244 int s;
245
246 s = splbio();
247
248 adw_reset_ccb(ccb);
249 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
250
251 /*
252 * If there were none, wake anybody waiting for one to come free,
253 * starting with queued entries.
254 */
255 if (ccb->chain.tqe_next == 0)
256 wakeup(&sc->sc_free_ccb);
257
258 splx(s);
259 }
260
261
262 static void
263 adw_reset_ccb(ccb)
264 ADW_CCB *ccb;
265 {
266
267 ccb->flags = 0;
268 }
269
270
271 static int
272 adw_init_ccb(sc, ccb)
273 ADW_SOFTC *sc;
274 ADW_CCB *ccb;
275 {
276 int error;
277
278 /*
279 * Create the DMA map for this CCB.
280 */
281 error = bus_dmamap_create(sc->sc_dmat,
282 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
283 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
284 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
285 if (error) {
286 printf("%s: unable to create DMA map, error = %d\n",
287 sc->sc_dev.dv_xname, error);
288 return (error);
289 }
290 adw_reset_ccb(ccb);
291 return (0);
292 }
293
294
295 /*
296 * Get a free ccb
297 *
298 * If there are none, see if we can allocate a new one
299 */
300 static ADW_CCB *
301 adw_get_ccb(sc, flags)
302 ADW_SOFTC *sc;
303 int flags;
304 {
305 ADW_CCB *ccb = 0;
306 int s;
307
308 s = splbio();
309
310 /*
311 * If we can and have to, sleep waiting for one to come free
312 * but only if we can't allocate a new one.
313 */
314 for (;;) {
315 ccb = sc->sc_free_ccb.tqh_first;
316 if (ccb) {
317 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
318 break;
319 }
320 if ((flags & SCSI_NOSLEEP) != 0)
321 goto out;
322
323 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
324 }
325
326 ccb->flags |= CCB_ALLOC;
327
328 out:
329 splx(s);
330 return (ccb);
331 }
332
333
334 /*
335 * Queue a CCB to be sent to the controller, and send it if possible.
336 */
337 static void
338 adw_queue_ccb(sc, ccb)
339 ADW_SOFTC *sc;
340 ADW_CCB *ccb;
341 {
342
343 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
344
345 adw_start_ccbs(sc);
346 }
347
348
349 static void
350 adw_start_ccbs(sc)
351 ADW_SOFTC *sc;
352 {
353 ADW_CCB *ccb;
354
355 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
356 if (ccb->flags & CCB_WATCHDOG)
357 untimeout(adw_watchdog, ccb);
358
359 if (AdvExeScsiQueue(sc, &ccb->scsiq) == ADW_BUSY) {
360 ccb->flags |= CCB_WATCHDOG;
361 timeout(adw_watchdog, ccb,
362 (ADW_WATCH_TIMEOUT * hz) / 1000);
363 break;
364 }
365 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
366
367 if ((ccb->xs->flags & SCSI_POLL) == 0)
368 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
369 }
370 }
371
372
373 /******************************************************************************/
374 /* SCSI layer interfacing routines */
375 /******************************************************************************/
376
377
378 int
379 adw_init(sc)
380 ADW_SOFTC *sc;
381 {
382 u_int16_t warn_code;
383
384
385 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
386 ADW_LIB_VERSION_MINOR;
387 sc->cfg.chip_version =
388 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
389
390 /*
391 * Reset the chip to start and allow register writes.
392 */
393 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
394 panic("adw_init: adw_find_signature failed");
395 } else {
396 AdvResetChip(sc->sc_iot, sc->sc_ioh);
397
398 warn_code = AdvInitFromEEP(sc);
399 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
400 printf("%s: Bad checksum found. "
401 "Setting default values\n",
402 sc->sc_dev.dv_xname);
403 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
404 printf("%s: Bad bus termination setting."
405 "Using automatic termination.\n",
406 sc->sc_dev.dv_xname);
407
408 /*
409 * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
410 * Resets should be performed.
411 */
412 if (sc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
413 AdvResetSCSIBus(sc);
414 }
415
416 sc->isr_callback = (ulong) adw_wide_isr_callback;
417
418 return (0);
419 }
420
421
422 void
423 adw_attach(sc)
424 ADW_SOFTC *sc;
425 {
426 int i, error;
427
428
429 /*
430 * Initialize the ASC3550.
431 */
432 switch (AdvInitAsc3550Driver(sc)) {
433 case ASC_IERR_MCODE_CHKSUM:
434 panic("%s: Microcode checksum error",
435 sc->sc_dev.dv_xname);
436 break;
437
438 case ASC_IERR_ILLEGAL_CONNECTION:
439 panic("%s: All three connectors are in use",
440 sc->sc_dev.dv_xname);
441 break;
442
443 case ASC_IERR_REVERSED_CABLE:
444 panic("%s: Cable is reversed",
445 sc->sc_dev.dv_xname);
446 break;
447
448 case ASC_IERR_SINGLE_END_DEVICE:
449 panic("%s: single-ended device is attached to"
450 " one of the connectors",
451 sc->sc_dev.dv_xname);
452 break;
453 }
454
455 /*
456 * Fill in the adapter.
457 */
458 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
459 sc->sc_adapter.scsipi_minphys = adwminphys;
460
461 /*
462 * fill in the prototype scsipi_link.
463 */
464 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
465 sc->sc_link.adapter_softc = sc;
466 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
467 sc->sc_link.adapter = &sc->sc_adapter;
468 sc->sc_link.device = &adw_dev;
469 sc->sc_link.openings = 4;
470 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
471 sc->sc_link.type = BUS_SCSI;
472
473
474 TAILQ_INIT(&sc->sc_free_ccb);
475 TAILQ_INIT(&sc->sc_waiting_ccb);
476 LIST_INIT(&sc->sc_queue);
477
478
479 /*
480 * Allocate the Control Blocks.
481 */
482 error = adw_alloc_ccbs(sc);
483 if (error)
484 return; /* (error) */ ;
485
486 /*
487 * Create and initialize the Control Blocks.
488 */
489 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
490 if (i == 0) {
491 printf("%s: unable to create control blocks\n",
492 sc->sc_dev.dv_xname);
493 return; /* (ENOMEM) */ ;
494 } else if (i != ADW_MAX_CCB) {
495 printf("%s: WARNING: only %d of %d control blocks"
496 " created\n",
497 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
498 }
499 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
500 }
501
502
503 static void
504 adwminphys(bp)
505 struct buf *bp;
506 {
507
508 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
509 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
510 minphys(bp);
511 }
512
513
514 /*
515 * start a scsi operation given the command and the data address.
516 * Also needs the unit, target and lu.
517 */
518 static int
519 adw_scsi_cmd(xs)
520 struct scsipi_xfer *xs;
521 {
522 struct scsipi_link *sc_link = xs->sc_link;
523 ADW_SOFTC *sc = sc_link->adapter_softc;
524 ADW_CCB *ccb;
525 int s, fromqueue = 1, dontqueue = 0;
526
527 s = splbio(); /* protect the queue */
528
529 /*
530 * If we're running the queue from adw_done(), we've been
531 * called with the first queue entry as our argument.
532 */
533 if (xs == sc->sc_queue.lh_first) {
534 xs = adw_dequeue(sc);
535 fromqueue = 1;
536 } else {
537
538 /* Polled requests can't be queued for later. */
539 dontqueue = xs->flags & SCSI_POLL;
540
541 /*
542 * If there are jobs in the queue, run them first.
543 */
544 if (sc->sc_queue.lh_first != NULL) {
545 /*
546 * If we can't queue, we have to abort, since
547 * we have to preserve order.
548 */
549 if (dontqueue) {
550 splx(s);
551 xs->error = XS_DRIVER_STUFFUP;
552 return (TRY_AGAIN_LATER);
553 }
554 /*
555 * Swap with the first queue entry.
556 */
557 adw_enqueue(sc, xs, 0);
558 xs = adw_dequeue(sc);
559 fromqueue = 1;
560 }
561 }
562
563
564 /*
565 * get a ccb to use. If the transfer
566 * is from a buf (possibly from interrupt time)
567 * then we can't allow it to sleep
568 */
569
570 if ((ccb = adw_get_ccb(sc, xs->flags)) == NULL) {
571 /*
572 * If we can't queue, we lose.
573 */
574 if (dontqueue) {
575 splx(s);
576 xs->error = XS_DRIVER_STUFFUP;
577 return (TRY_AGAIN_LATER);
578 }
579 /*
580 * Stuff ourselves into the queue, in front
581 * if we came off in the first place.
582 */
583 adw_enqueue(sc, xs, fromqueue);
584 splx(s);
585 return (SUCCESSFULLY_QUEUED);
586 }
587 splx(s); /* done playing with the queue */
588
589 ccb->xs = xs;
590 ccb->timeout = xs->timeout;
591
592 if (adw_build_req(xs, ccb)) {
593 s = splbio();
594 adw_queue_ccb(sc, ccb);
595 splx(s);
596
597 /*
598 * Usually return SUCCESSFULLY QUEUED
599 */
600 if ((xs->flags & SCSI_POLL) == 0)
601 return (SUCCESSFULLY_QUEUED);
602
603 /*
604 * If we can't use interrupts, poll on completion
605 */
606 if (adw_poll(sc, xs, ccb->timeout)) {
607 adw_timeout(ccb);
608 if (adw_poll(sc, xs, ccb->timeout))
609 adw_timeout(ccb);
610 }
611 }
612 return (COMPLETE);
613 }
614
615
616 /*
617 * Build a request structure for the Wide Boards.
618 */
619 static int
620 adw_build_req(xs, ccb)
621 struct scsipi_xfer *xs;
622 ADW_CCB *ccb;
623 {
624 struct scsipi_link *sc_link = xs->sc_link;
625 ADW_SOFTC *sc = sc_link->adapter_softc;
626 bus_dma_tag_t dmat = sc->sc_dmat;
627 ADW_SCSI_REQ_Q *scsiqp;
628 int error;
629
630 scsiqp = &ccb->scsiq;
631 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
632
633 /*
634 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the CCB structure.
635 */
636 scsiqp->ccb_ptr = (ulong) ccb;
637
638
639 /*
640 * Build the ADW_SCSI_REQ_Q request.
641 */
642
643 /*
644 * Set CDB length and copy it to the request structure.
645 */
646 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
647
648 scsiqp->target_id = sc_link->scsipi_scsi.target;
649 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
650
651 scsiqp->vsense_addr = (ulong) & ccb->scsi_sense;
652 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
653 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
654 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
655
656 /*
657 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
658 */
659 if (xs->datalen) {
660 /*
661 * Map the DMA transfer.
662 */
663 #ifdef TFS
664 if (xs->flags & SCSI_DATA_UIO) {
665 error = bus_dmamap_load_uio(dmat,
666 ccb->dmamap_xfer, (struct uio *) xs->data,
667 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
668 BUS_DMA_WAITOK);
669 } else
670 #endif /* TFS */
671 {
672 error = bus_dmamap_load(dmat,
673 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
674 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
675 BUS_DMA_WAITOK);
676 }
677
678 if (error) {
679 if (error == EFBIG) {
680 printf("%s: adw_scsi_cmd, more than %d dma"
681 " segments\n",
682 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
683 } else {
684 printf("%s: adw_scsi_cmd, error %d loading"
685 " dma map\n",
686 sc->sc_dev.dv_xname, error);
687 }
688
689 xs->error = XS_DRIVER_STUFFUP;
690 adw_free_ccb(sc, ccb);
691 return (0);
692 }
693 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
694 ccb->dmamap_xfer->dm_mapsize,
695 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
696 BUS_DMASYNC_PREWRITE);
697
698 /*
699 * Build scatter-gather list.
700 */
701 scsiqp->data_cnt = xs->datalen;
702 scsiqp->vdata_addr = (ulong) xs->data;
703 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
704 scsiqp->sg_list_ptr = &ccb->sg_block[0];
705 bzero(scsiqp->sg_list_ptr,
706 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
707 adw_build_sglist(ccb, scsiqp);
708 } else {
709 /*
710 * No data xfer, use non S/G values.
711 */
712 scsiqp->data_cnt = 0;
713 scsiqp->vdata_addr = 0;
714 scsiqp->data_addr = 0;
715 scsiqp->sg_list_ptr = NULL;
716 }
717
718 return (1);
719 }
720
721
722 /*
723 * Build scatter-gather list for Wide Boards.
724 */
725 static void
726 adw_build_sglist(ccb, scsiqp)
727 ADW_CCB *ccb;
728 ADW_SCSI_REQ_Q *scsiqp;
729 {
730 struct scsipi_xfer *xs = ccb->xs;
731 ADW_SOFTC *sc = xs->sc_link->adapter_softc;
732 ADW_SG_BLOCK *sg_block = scsiqp->sg_list_ptr;
733 ulong sg_block_next_addr; /* block and its next */
734 ulong sg_block_physical_addr;
735 int sg_block_index, i; /* how many SG entries */
736 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
737 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
738
739
740 sg_block_next_addr = (ulong) sg_block; /* allow math operation */
741 sg_block_physical_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
742 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, sg_block[0]);
743 scsiqp->sg_real_addr = sg_block_physical_addr;
744
745 /*
746 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
747 * then split the request into multiple sg-list blocks.
748 */
749
750 sg_block_index = 0;
751 do {
752 sg_block->first_entry_no = sg_block_index;
753 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
754 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
755 sg_block->sg_list[i].sg_count = sg_list->ds_len;
756
757 if (--sg_elem_cnt == 0) {
758 /* last entry, get out */
759 scsiqp->sg_entry_cnt = sg_block_index + i + 1;
760 sg_block->last_entry_no = sg_block_index + i;
761 sg_block->sg_ptr = NULL; /* next link = NULL */
762 return;
763 }
764 sg_list++;
765 }
766 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
767 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
768
769 sg_block_index += NO_OF_SG_PER_BLOCK;
770 sg_block->sg_ptr = (ADW_SG_BLOCK *) sg_block_physical_addr;
771 sg_block->last_entry_no = sg_block_index - 1;
772 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
773 }
774 while (1);
775 }
776
777
778 int
779 adw_intr(arg)
780 void *arg;
781 {
782 ADW_SOFTC *sc = arg;
783 struct scsipi_xfer *xs;
784
785
786 AdvISR(sc);
787
788 /*
789 * If there are queue entries in the software queue, try to
790 * run the first one. We should be more or less guaranteed
791 * to succeed, since we just freed a CCB.
792 *
793 * NOTE: adw_scsi_cmd() relies on our calling it with
794 * the first entry in the queue.
795 */
796 if ((xs = sc->sc_queue.lh_first) != NULL)
797 (void) adw_scsi_cmd(xs);
798
799 return (1);
800 }
801
802
803 /*
804 * Poll a particular unit, looking for a particular xs
805 */
806 static int
807 adw_poll(sc, xs, count)
808 ADW_SOFTC *sc;
809 struct scsipi_xfer *xs;
810 int count;
811 {
812
813 /* timeouts are in msec, so we loop in 1000 usec cycles */
814 while (count) {
815 adw_intr(sc);
816 if (xs->flags & ITSDONE)
817 return (0);
818 delay(1000); /* only happens in boot so ok */
819 count--;
820 }
821 return (1);
822 }
823
824
825 static void
826 adw_timeout(arg)
827 void *arg;
828 {
829 ADW_CCB *ccb = arg;
830 struct scsipi_xfer *xs = ccb->xs;
831 struct scsipi_link *sc_link = xs->sc_link;
832 ADW_SOFTC *sc = sc_link->adapter_softc;
833 int s;
834
835 scsi_print_addr(sc_link);
836 printf("timed out");
837
838 s = splbio();
839
840 /*
841 * If it has been through before, then a previous abort has failed,
842 * don't try abort again, reset the bus instead.
843 */
844 if (ccb->flags & CCB_ABORT) {
845 /* abort timed out */
846 printf(" AGAIN. Resetting Bus\n");
847 /* Lets try resetting the bus! */
848 AdvResetSCSIBus(sc);
849 ccb->timeout = ADW_ABORT_TIMEOUT;
850 adw_queue_ccb(sc, ccb);
851 } else {
852 /* abort the operation that has timed out */
853 printf("\n");
854 ADW_ABORT_CCB(sc, ccb);
855 xs->error = XS_TIMEOUT;
856 ccb->timeout = ADW_ABORT_TIMEOUT;
857 ccb->flags |= CCB_ABORT;
858 adw_queue_ccb(sc, ccb);
859 }
860
861 splx(s);
862 }
863
864
865 static void
866 adw_watchdog(arg)
867 void *arg;
868 {
869 ADW_CCB *ccb = arg;
870 struct scsipi_xfer *xs = ccb->xs;
871 struct scsipi_link *sc_link = xs->sc_link;
872 ADW_SOFTC *sc = sc_link->adapter_softc;
873 int s;
874
875 s = splbio();
876
877 ccb->flags &= ~CCB_WATCHDOG;
878 adw_start_ccbs(sc);
879
880 splx(s);
881 }
882
883
884 /******************************************************************************/
885 /* NARROW and WIDE boards Interrupt callbacks */
886 /******************************************************************************/
887
888
889 /*
890 * adw_wide_isr_callback() - Second Level Interrupt Handler called by AdvISR()
891 *
892 * Interrupt callback function for the Wide SCSI Adv Library.
893 */
894 static void
895 adw_wide_isr_callback(sc, scsiq)
896 ADW_SOFTC *sc;
897 ADW_SCSI_REQ_Q *scsiq;
898 {
899 bus_dma_tag_t dmat = sc->sc_dmat;
900 ADW_CCB *ccb = (ADW_CCB *) scsiq->ccb_ptr;
901 struct scsipi_xfer *xs = ccb->xs;
902 struct scsipi_sense_data *s1, *s2;
903 //int underrun = ASC_FALSE;
904
905
906 untimeout(adw_timeout, ccb);
907
908 /*
909 * If we were a data transfer, unload the map that described
910 * the data buffer.
911 */
912 if (xs->datalen) {
913 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
914 ccb->dmamap_xfer->dm_mapsize,
915 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
916 BUS_DMASYNC_POSTWRITE);
917 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
918 }
919 if ((ccb->flags & CCB_ALLOC) == 0) {
920 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
921 Debugger();
922 return;
923 }
924 /*
925 * Check for an underrun condition.
926 */
927 /*
928 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
929 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
930 * scsiqp->data_cnt); underrun = ASC_TRUE; }
931 */
932 /*
933 * 'done_status' contains the command's ending status.
934 */
935 switch (scsiq->done_status) {
936 case QD_NO_ERROR:
937 switch (scsiq->host_status) {
938 case QHSTA_NO_ERROR:
939 xs->error = XS_NOERROR;
940 xs->resid = 0;
941 break;
942 default:
943 /* QHSTA error occurred. */
944 xs->error = XS_DRIVER_STUFFUP;
945 break;
946 }
947 /*
948 * If there was an underrun without any other error,
949 * set DID_ERROR to indicate the underrun error.
950 *
951 * Note: There is no way yet to indicate the number
952 * of underrun bytes.
953 */
954 /*
955 * if (xs->error == XS_NOERROR && underrun == ASC_TRUE) {
956 * scp->result = HOST_BYTE(DID_UNDERRUN); }
957 */ break;
958
959 case QD_WITH_ERROR:
960 switch (scsiq->host_status) {
961 case QHSTA_NO_ERROR:
962 if (scsiq->scsi_status == SS_CHK_CONDITION) {
963 s1 = &ccb->scsi_sense;
964 s2 = &xs->sense.scsi_sense;
965 *s2 = *s1;
966 xs->error = XS_SENSE;
967 } else {
968 xs->error = XS_DRIVER_STUFFUP;
969 }
970 break;
971
972 default:
973 /* Some other QHSTA error occurred. */
974 xs->error = XS_DRIVER_STUFFUP;
975 break;
976 }
977 break;
978
979 case QD_ABORTED_BY_HOST:
980 default:
981 xs->error = XS_DRIVER_STUFFUP;
982 break;
983 }
984
985
986 adw_free_ccb(sc, ccb);
987 xs->flags |= ITSDONE;
988 scsipi_done(xs);
989 }
990