adw.c revision 1.5 1 /* $NetBSD: adw.c,v 1.5 1998/12/05 19:43:49 mjacob Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static void adw_enqueue __P((ADW_SOFTC *, struct scsipi_xfer *, int));
74 static struct scsipi_xfer *adw_dequeue __P((ADW_SOFTC *));
75
76 static int adw_alloc_ccbs __P((ADW_SOFTC *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static void adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
83 static void adw_start_ccbs __P((ADW_SOFTC *));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *));
87 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *));
88 static void adwminphys __P((struct buf *));
89 static void adw_wide_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90
91 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
92 static void adw_timeout __P((void *));
93 static void adw_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adw_dev =
101 {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108
109 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
110 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
111
112
113 /******************************************************************************/
114 /* scsipi_xfer queue routines */
115 /******************************************************************************/
116
117 /*
118 * Insert a scsipi_xfer into the software queue. We overload xs->free_list
119 * to avoid having to allocate additional resources (since we're used
120 * only during resource shortages anyhow.
121 */
122 static void
123 adw_enqueue(sc, xs, infront)
124 ADW_SOFTC *sc;
125 struct scsipi_xfer *xs;
126 int infront;
127 {
128
129 if (infront || sc->sc_queue.lh_first == NULL) {
130 if (sc->sc_queue.lh_first == NULL)
131 sc->sc_queuelast = xs;
132 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
133 return;
134 }
135 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
136 sc->sc_queuelast = xs;
137 }
138
139
140 /*
141 * Pull a scsipi_xfer off the front of the software queue.
142 */
143 static struct scsipi_xfer *
144 adw_dequeue(sc)
145 ADW_SOFTC *sc;
146 {
147 struct scsipi_xfer *xs;
148
149 xs = sc->sc_queue.lh_first;
150 LIST_REMOVE(xs, free_list);
151
152 if (sc->sc_queue.lh_first == NULL)
153 sc->sc_queuelast = NULL;
154
155 return (xs);
156 }
157
158
159 /******************************************************************************/
160 /* Control Blocks routines */
161 /******************************************************************************/
162
163
164 static int
165 adw_alloc_ccbs(sc)
166 ADW_SOFTC *sc;
167 {
168 bus_dma_segment_t seg;
169 int error, rseg;
170
171 /*
172 * Allocate the control blocks.
173 */
174 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
175 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
176 printf("%s: unable to allocate control structures,"
177 " error = %d\n", sc->sc_dev.dv_xname, error);
178 return (error);
179 }
180 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
181 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
182 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
183 printf("%s: unable to map control structures, error = %d\n",
184 sc->sc_dev.dv_xname, error);
185 return (error);
186 }
187 /*
188 * Create and load the DMA map used for the control blocks.
189 */
190 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
191 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
192 &sc->sc_dmamap_control)) != 0) {
193 printf("%s: unable to create control DMA map, error = %d\n",
194 sc->sc_dev.dv_xname, error);
195 return (error);
196 }
197 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
198 sc->sc_control, sizeof(struct adw_control), NULL,
199 BUS_DMA_NOWAIT)) != 0) {
200 printf("%s: unable to load control DMA map, error = %d\n",
201 sc->sc_dev.dv_xname, error);
202 return (error);
203 }
204 return (0);
205 }
206
207
208 /*
209 * Create a set of ccbs and add them to the free list. Called once
210 * by adw_init(). We return the number of CCBs successfully created.
211 */
212 static int
213 adw_create_ccbs(sc, ccbstore, count)
214 ADW_SOFTC *sc;
215 ADW_CCB *ccbstore;
216 int count;
217 {
218 ADW_CCB *ccb;
219 int i, error;
220
221 bzero(ccbstore, sizeof(ADW_CCB) * count);
222 for (i = 0; i < count; i++) {
223 ccb = &ccbstore[i];
224 if ((error = adw_init_ccb(sc, ccb)) != 0) {
225 printf("%s: unable to initialize ccb, error = %d\n",
226 sc->sc_dev.dv_xname, error);
227 return (i);
228 }
229 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
230 }
231
232 return (i);
233 }
234
235
236 /*
237 * A ccb is put onto the free list.
238 */
239 static void
240 adw_free_ccb(sc, ccb)
241 ADW_SOFTC *sc;
242 ADW_CCB *ccb;
243 {
244 int s;
245
246 s = splbio();
247
248 adw_reset_ccb(ccb);
249 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
250
251 /*
252 * If there were none, wake anybody waiting for one to come free,
253 * starting with queued entries.
254 */
255 if (ccb->chain.tqe_next == 0)
256 wakeup(&sc->sc_free_ccb);
257
258 splx(s);
259 }
260
261
262 static void
263 adw_reset_ccb(ccb)
264 ADW_CCB *ccb;
265 {
266
267 ccb->flags = 0;
268 }
269
270
271 static int
272 adw_init_ccb(sc, ccb)
273 ADW_SOFTC *sc;
274 ADW_CCB *ccb;
275 {
276 int error;
277
278 /*
279 * Create the DMA map for this CCB.
280 */
281 error = bus_dmamap_create(sc->sc_dmat,
282 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
283 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
284 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
285 if (error) {
286 printf("%s: unable to create DMA map, error = %d\n",
287 sc->sc_dev.dv_xname, error);
288 return (error);
289 }
290 adw_reset_ccb(ccb);
291 return (0);
292 }
293
294
295 /*
296 * Get a free ccb
297 *
298 * If there are none, see if we can allocate a new one
299 */
300 static ADW_CCB *
301 adw_get_ccb(sc, flags)
302 ADW_SOFTC *sc;
303 int flags;
304 {
305 ADW_CCB *ccb = 0;
306 int s;
307
308 s = splbio();
309
310 /*
311 * If we can and have to, sleep waiting for one to come free
312 * but only if we can't allocate a new one.
313 */
314 for (;;) {
315 ccb = sc->sc_free_ccb.tqh_first;
316 if (ccb) {
317 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
318 break;
319 }
320 if ((flags & SCSI_NOSLEEP) != 0)
321 goto out;
322
323 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
324 }
325
326 ccb->flags |= CCB_ALLOC;
327
328 out:
329 splx(s);
330 return (ccb);
331 }
332
333
334 /*
335 * Queue a CCB to be sent to the controller, and send it if possible.
336 */
337 static void
338 adw_queue_ccb(sc, ccb)
339 ADW_SOFTC *sc;
340 ADW_CCB *ccb;
341 {
342
343 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
344
345 adw_start_ccbs(sc);
346 }
347
348
349 static void
350 adw_start_ccbs(sc)
351 ADW_SOFTC *sc;
352 {
353 ADW_CCB *ccb;
354
355 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
356 if (ccb->flags & CCB_WATCHDOG)
357 untimeout(adw_watchdog, ccb);
358
359 if (AdvExeScsiQueue(sc, &ccb->scsiq) == ADW_BUSY) {
360 ccb->flags |= CCB_WATCHDOG;
361 timeout(adw_watchdog, ccb,
362 (ADW_WATCH_TIMEOUT * hz) / 1000);
363 break;
364 }
365 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
366
367 if ((ccb->xs->flags & SCSI_POLL) == 0)
368 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
369 }
370 }
371
372
373 /******************************************************************************/
374 /* SCSI layer interfacing routines */
375 /******************************************************************************/
376
377
378 int
379 adw_init(sc)
380 ADW_SOFTC *sc;
381 {
382 u_int16_t warn_code;
383
384
385 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
386 ADW_LIB_VERSION_MINOR;
387 sc->cfg.chip_version =
388 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
389
390 /*
391 * Reset the chip to start and allow register writes.
392 */
393 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
394 panic("adw_init: adw_find_signature failed");
395 } else {
396 AdvResetChip(sc->sc_iot, sc->sc_ioh);
397
398 warn_code = AdvInitFromEEP(sc);
399 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
400 printf("%s: Bad checksum found. "
401 "Setting default values\n",
402 sc->sc_dev.dv_xname);
403 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
404 printf("%s: Bad bus termination setting."
405 "Using automatic termination.\n",
406 sc->sc_dev.dv_xname);
407
408 /*
409 * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
410 * Resets should be performed.
411 */
412 if (sc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
413 AdvResetSCSIBus(sc);
414 }
415
416 sc->isr_callback = (ulong) adw_wide_isr_callback;
417
418 return (0);
419 }
420
421
422 void
423 adw_attach(sc)
424 ADW_SOFTC *sc;
425 {
426 int i, error;
427
428
429 /*
430 * Initialize the ASC3550.
431 */
432 switch (AdvInitAsc3550Driver(sc)) {
433 case ASC_IERR_MCODE_CHKSUM:
434 panic("%s: Microcode checksum error",
435 sc->sc_dev.dv_xname);
436 break;
437
438 case ASC_IERR_ILLEGAL_CONNECTION:
439 panic("%s: All three connectors are in use",
440 sc->sc_dev.dv_xname);
441 break;
442
443 case ASC_IERR_REVERSED_CABLE:
444 panic("%s: Cable is reversed",
445 sc->sc_dev.dv_xname);
446 break;
447
448 case ASC_IERR_SINGLE_END_DEVICE:
449 panic("%s: single-ended device is attached to"
450 " one of the connectors",
451 sc->sc_dev.dv_xname);
452 break;
453 }
454
455 /*
456 * Fill in the adapter.
457 */
458 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
459 sc->sc_adapter.scsipi_minphys = adwminphys;
460
461 /*
462 * fill in the prototype scsipi_link.
463 */
464 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
465 sc->sc_link.adapter_softc = sc;
466 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
467 sc->sc_link.adapter = &sc->sc_adapter;
468 sc->sc_link.device = &adw_dev;
469 sc->sc_link.openings = 4;
470 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
471 sc->sc_link.scsipi_scsi.max_lun = 7;
472 sc->sc_link.type = BUS_SCSI;
473
474
475 TAILQ_INIT(&sc->sc_free_ccb);
476 TAILQ_INIT(&sc->sc_waiting_ccb);
477 LIST_INIT(&sc->sc_queue);
478
479
480 /*
481 * Allocate the Control Blocks.
482 */
483 error = adw_alloc_ccbs(sc);
484 if (error)
485 return; /* (error) */ ;
486
487 /*
488 * Create and initialize the Control Blocks.
489 */
490 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
491 if (i == 0) {
492 printf("%s: unable to create control blocks\n",
493 sc->sc_dev.dv_xname);
494 return; /* (ENOMEM) */ ;
495 } else if (i != ADW_MAX_CCB) {
496 printf("%s: WARNING: only %d of %d control blocks"
497 " created\n",
498 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
499 }
500 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
501 }
502
503
504 static void
505 adwminphys(bp)
506 struct buf *bp;
507 {
508
509 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
510 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
511 minphys(bp);
512 }
513
514
515 /*
516 * start a scsi operation given the command and the data address.
517 * Also needs the unit, target and lu.
518 */
519 static int
520 adw_scsi_cmd(xs)
521 struct scsipi_xfer *xs;
522 {
523 struct scsipi_link *sc_link = xs->sc_link;
524 ADW_SOFTC *sc = sc_link->adapter_softc;
525 ADW_CCB *ccb;
526 int s, fromqueue = 1, dontqueue = 0;
527
528 s = splbio(); /* protect the queue */
529
530 /*
531 * If we're running the queue from adw_done(), we've been
532 * called with the first queue entry as our argument.
533 */
534 if (xs == sc->sc_queue.lh_first) {
535 xs = adw_dequeue(sc);
536 fromqueue = 1;
537 } else {
538
539 /* Polled requests can't be queued for later. */
540 dontqueue = xs->flags & SCSI_POLL;
541
542 /*
543 * If there are jobs in the queue, run them first.
544 */
545 if (sc->sc_queue.lh_first != NULL) {
546 /*
547 * If we can't queue, we have to abort, since
548 * we have to preserve order.
549 */
550 if (dontqueue) {
551 splx(s);
552 xs->error = XS_DRIVER_STUFFUP;
553 return (TRY_AGAIN_LATER);
554 }
555 /*
556 * Swap with the first queue entry.
557 */
558 adw_enqueue(sc, xs, 0);
559 xs = adw_dequeue(sc);
560 fromqueue = 1;
561 }
562 }
563
564
565 /*
566 * get a ccb to use. If the transfer
567 * is from a buf (possibly from interrupt time)
568 * then we can't allow it to sleep
569 */
570
571 if ((ccb = adw_get_ccb(sc, xs->flags)) == NULL) {
572 /*
573 * If we can't queue, we lose.
574 */
575 if (dontqueue) {
576 splx(s);
577 xs->error = XS_DRIVER_STUFFUP;
578 return (TRY_AGAIN_LATER);
579 }
580 /*
581 * Stuff ourselves into the queue, in front
582 * if we came off in the first place.
583 */
584 adw_enqueue(sc, xs, fromqueue);
585 splx(s);
586 return (SUCCESSFULLY_QUEUED);
587 }
588 splx(s); /* done playing with the queue */
589
590 ccb->xs = xs;
591 ccb->timeout = xs->timeout;
592
593 if (adw_build_req(xs, ccb)) {
594 s = splbio();
595 adw_queue_ccb(sc, ccb);
596 splx(s);
597
598 /*
599 * Usually return SUCCESSFULLY QUEUED
600 */
601 if ((xs->flags & SCSI_POLL) == 0)
602 return (SUCCESSFULLY_QUEUED);
603
604 /*
605 * If we can't use interrupts, poll on completion
606 */
607 if (adw_poll(sc, xs, ccb->timeout)) {
608 adw_timeout(ccb);
609 if (adw_poll(sc, xs, ccb->timeout))
610 adw_timeout(ccb);
611 }
612 }
613 return (COMPLETE);
614 }
615
616
617 /*
618 * Build a request structure for the Wide Boards.
619 */
620 static int
621 adw_build_req(xs, ccb)
622 struct scsipi_xfer *xs;
623 ADW_CCB *ccb;
624 {
625 struct scsipi_link *sc_link = xs->sc_link;
626 ADW_SOFTC *sc = sc_link->adapter_softc;
627 bus_dma_tag_t dmat = sc->sc_dmat;
628 ADW_SCSI_REQ_Q *scsiqp;
629 int error;
630
631 scsiqp = &ccb->scsiq;
632 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
633
634 /*
635 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the CCB structure.
636 */
637 scsiqp->ccb_ptr = (ulong) ccb;
638
639
640 /*
641 * Build the ADW_SCSI_REQ_Q request.
642 */
643
644 /*
645 * Set CDB length and copy it to the request structure.
646 */
647 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
648
649 scsiqp->target_id = sc_link->scsipi_scsi.target;
650 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
651
652 scsiqp->vsense_addr = (ulong) & ccb->scsi_sense;
653 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
654 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
655 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
656
657 /*
658 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
659 */
660 if (xs->datalen) {
661 /*
662 * Map the DMA transfer.
663 */
664 #ifdef TFS
665 if (xs->flags & SCSI_DATA_UIO) {
666 error = bus_dmamap_load_uio(dmat,
667 ccb->dmamap_xfer, (struct uio *) xs->data,
668 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
669 BUS_DMA_WAITOK);
670 } else
671 #endif /* TFS */
672 {
673 error = bus_dmamap_load(dmat,
674 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
675 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
676 BUS_DMA_WAITOK);
677 }
678
679 if (error) {
680 if (error == EFBIG) {
681 printf("%s: adw_scsi_cmd, more than %d dma"
682 " segments\n",
683 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
684 } else {
685 printf("%s: adw_scsi_cmd, error %d loading"
686 " dma map\n",
687 sc->sc_dev.dv_xname, error);
688 }
689
690 xs->error = XS_DRIVER_STUFFUP;
691 adw_free_ccb(sc, ccb);
692 return (0);
693 }
694 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
695 ccb->dmamap_xfer->dm_mapsize,
696 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
697 BUS_DMASYNC_PREWRITE);
698
699 /*
700 * Build scatter-gather list.
701 */
702 scsiqp->data_cnt = xs->datalen;
703 scsiqp->vdata_addr = (ulong) xs->data;
704 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
705 scsiqp->sg_list_ptr = &ccb->sg_block[0];
706 bzero(scsiqp->sg_list_ptr,
707 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
708 adw_build_sglist(ccb, scsiqp);
709 } else {
710 /*
711 * No data xfer, use non S/G values.
712 */
713 scsiqp->data_cnt = 0;
714 scsiqp->vdata_addr = 0;
715 scsiqp->data_addr = 0;
716 scsiqp->sg_list_ptr = NULL;
717 }
718
719 return (1);
720 }
721
722
723 /*
724 * Build scatter-gather list for Wide Boards.
725 */
726 static void
727 adw_build_sglist(ccb, scsiqp)
728 ADW_CCB *ccb;
729 ADW_SCSI_REQ_Q *scsiqp;
730 {
731 struct scsipi_xfer *xs = ccb->xs;
732 ADW_SOFTC *sc = xs->sc_link->adapter_softc;
733 ADW_SG_BLOCK *sg_block = scsiqp->sg_list_ptr;
734 ulong sg_block_next_addr; /* block and its next */
735 ulong sg_block_physical_addr;
736 int sg_block_index, i; /* how many SG entries */
737 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
738 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
739
740
741 sg_block_next_addr = (ulong) sg_block; /* allow math operation */
742 sg_block_physical_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
743 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, sg_block[0]);
744 scsiqp->sg_real_addr = sg_block_physical_addr;
745
746 /*
747 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
748 * then split the request into multiple sg-list blocks.
749 */
750
751 sg_block_index = 0;
752 do {
753 sg_block->first_entry_no = sg_block_index;
754 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
755 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
756 sg_block->sg_list[i].sg_count = sg_list->ds_len;
757
758 if (--sg_elem_cnt == 0) {
759 /* last entry, get out */
760 scsiqp->sg_entry_cnt = sg_block_index + i + 1;
761 sg_block->last_entry_no = sg_block_index + i;
762 sg_block->sg_ptr = NULL; /* next link = NULL */
763 return;
764 }
765 sg_list++;
766 }
767 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
768 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
769
770 sg_block_index += NO_OF_SG_PER_BLOCK;
771 sg_block->sg_ptr = (ADW_SG_BLOCK *) sg_block_physical_addr;
772 sg_block->last_entry_no = sg_block_index - 1;
773 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
774 }
775 while (1);
776 }
777
778
779 int
780 adw_intr(arg)
781 void *arg;
782 {
783 ADW_SOFTC *sc = arg;
784 struct scsipi_xfer *xs;
785
786
787 AdvISR(sc);
788
789 /*
790 * If there are queue entries in the software queue, try to
791 * run the first one. We should be more or less guaranteed
792 * to succeed, since we just freed a CCB.
793 *
794 * NOTE: adw_scsi_cmd() relies on our calling it with
795 * the first entry in the queue.
796 */
797 if ((xs = sc->sc_queue.lh_first) != NULL)
798 (void) adw_scsi_cmd(xs);
799
800 return (1);
801 }
802
803
804 /*
805 * Poll a particular unit, looking for a particular xs
806 */
807 static int
808 adw_poll(sc, xs, count)
809 ADW_SOFTC *sc;
810 struct scsipi_xfer *xs;
811 int count;
812 {
813
814 /* timeouts are in msec, so we loop in 1000 usec cycles */
815 while (count) {
816 adw_intr(sc);
817 if (xs->flags & ITSDONE)
818 return (0);
819 delay(1000); /* only happens in boot so ok */
820 count--;
821 }
822 return (1);
823 }
824
825
826 static void
827 adw_timeout(arg)
828 void *arg;
829 {
830 ADW_CCB *ccb = arg;
831 struct scsipi_xfer *xs = ccb->xs;
832 struct scsipi_link *sc_link = xs->sc_link;
833 ADW_SOFTC *sc = sc_link->adapter_softc;
834 int s;
835
836 scsi_print_addr(sc_link);
837 printf("timed out");
838
839 s = splbio();
840
841 /*
842 * If it has been through before, then a previous abort has failed,
843 * don't try abort again, reset the bus instead.
844 */
845 if (ccb->flags & CCB_ABORT) {
846 /* abort timed out */
847 printf(" AGAIN. Resetting Bus\n");
848 /* Lets try resetting the bus! */
849 AdvResetSCSIBus(sc);
850 ccb->timeout = ADW_ABORT_TIMEOUT;
851 adw_queue_ccb(sc, ccb);
852 } else {
853 /* abort the operation that has timed out */
854 printf("\n");
855 ADW_ABORT_CCB(sc, ccb);
856 xs->error = XS_TIMEOUT;
857 ccb->timeout = ADW_ABORT_TIMEOUT;
858 ccb->flags |= CCB_ABORT;
859 adw_queue_ccb(sc, ccb);
860 }
861
862 splx(s);
863 }
864
865
866 static void
867 adw_watchdog(arg)
868 void *arg;
869 {
870 ADW_CCB *ccb = arg;
871 struct scsipi_xfer *xs = ccb->xs;
872 struct scsipi_link *sc_link = xs->sc_link;
873 ADW_SOFTC *sc = sc_link->adapter_softc;
874 int s;
875
876 s = splbio();
877
878 ccb->flags &= ~CCB_WATCHDOG;
879 adw_start_ccbs(sc);
880
881 splx(s);
882 }
883
884
885 /******************************************************************************/
886 /* NARROW and WIDE boards Interrupt callbacks */
887 /******************************************************************************/
888
889
890 /*
891 * adw_wide_isr_callback() - Second Level Interrupt Handler called by AdvISR()
892 *
893 * Interrupt callback function for the Wide SCSI Adv Library.
894 */
895 static void
896 adw_wide_isr_callback(sc, scsiq)
897 ADW_SOFTC *sc;
898 ADW_SCSI_REQ_Q *scsiq;
899 {
900 bus_dma_tag_t dmat = sc->sc_dmat;
901 ADW_CCB *ccb = (ADW_CCB *) scsiq->ccb_ptr;
902 struct scsipi_xfer *xs = ccb->xs;
903 struct scsipi_sense_data *s1, *s2;
904 //int underrun = ASC_FALSE;
905
906
907 untimeout(adw_timeout, ccb);
908
909 /*
910 * If we were a data transfer, unload the map that described
911 * the data buffer.
912 */
913 if (xs->datalen) {
914 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
915 ccb->dmamap_xfer->dm_mapsize,
916 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
917 BUS_DMASYNC_POSTWRITE);
918 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
919 }
920 if ((ccb->flags & CCB_ALLOC) == 0) {
921 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
922 Debugger();
923 return;
924 }
925 /*
926 * Check for an underrun condition.
927 */
928 /*
929 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
930 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
931 * scsiqp->data_cnt); underrun = ASC_TRUE; }
932 */
933 /*
934 * 'done_status' contains the command's ending status.
935 */
936 switch (scsiq->done_status) {
937 case QD_NO_ERROR:
938 switch (scsiq->host_status) {
939 case QHSTA_NO_ERROR:
940 xs->error = XS_NOERROR;
941 xs->resid = 0;
942 break;
943 default:
944 /* QHSTA error occurred. */
945 xs->error = XS_DRIVER_STUFFUP;
946 break;
947 }
948 /*
949 * If there was an underrun without any other error,
950 * set DID_ERROR to indicate the underrun error.
951 *
952 * Note: There is no way yet to indicate the number
953 * of underrun bytes.
954 */
955 /*
956 * if (xs->error == XS_NOERROR && underrun == ASC_TRUE) {
957 * scp->result = HOST_BYTE(DID_UNDERRUN); }
958 */ break;
959
960 case QD_WITH_ERROR:
961 switch (scsiq->host_status) {
962 case QHSTA_NO_ERROR:
963 if (scsiq->scsi_status == SS_CHK_CONDITION) {
964 s1 = &ccb->scsi_sense;
965 s2 = &xs->sense.scsi_sense;
966 *s2 = *s1;
967 xs->error = XS_SENSE;
968 } else {
969 xs->error = XS_DRIVER_STUFFUP;
970 }
971 break;
972
973 default:
974 /* Some other QHSTA error occurred. */
975 xs->error = XS_DRIVER_STUFFUP;
976 break;
977 }
978 break;
979
980 case QD_ABORTED_BY_HOST:
981 default:
982 xs->error = XS_DRIVER_STUFFUP;
983 break;
984 }
985
986
987 adw_free_ccb(sc, ccb);
988 xs->flags |= ITSDONE;
989 scsipi_done(xs);
990 }
991