adw.c revision 1.21 1 /* $NetBSD: adw.c,v 1.21 2000/05/14 18:25:49 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63
64 #include <dev/ic/adwlib.h>
65 #include <dev/ic/adw.h>
66
67 #ifndef DDB
68 #define Debugger() panic("should call debugger here (adw.c)")
69 #endif /* ! DDB */
70
71 /******************************************************************************/
72
73
74 static int adw_alloc_controls __P((ADW_SOFTC *));
75 static int adw_alloc_carriers __P((ADW_SOFTC *));
76 static int adw_create_carriers __P((ADW_SOFTC *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *, int));
83
84 static int adw_scsi_cmd __P((struct scsipi_xfer *));
85 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *, int));
86 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
87 static void adwminphys __P((struct buf *));
88 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
89 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
90
91 static void adw_print_info __P((ADW_SOFTC *, int));
92
93 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
94 static void adw_timeout __P((void *));
95 static void adw_reset_bus __P((ADW_SOFTC *, struct scsipi_xfer *));
96
97
98 /******************************************************************************/
99
100
101 /* the below structure is so we have a default dev struct for our link struct */
102 struct scsipi_device adw_dev =
103 {
104 NULL, /* Use default error handler */
105 NULL, /* have a queue, served by this */
106 NULL, /* have no async handler */
107 NULL, /* Use default 'done' routine */
108 };
109
110
111 /******************************************************************************/
112 /* Control Blocks routines */
113 /******************************************************************************/
114
115
116 static int
117 adw_alloc_controls(sc)
118 ADW_SOFTC *sc;
119 {
120 bus_dma_segment_t seg;
121 int error, rseg;
122
123 /*
124 * Allocate the control structure.
125 */
126 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
127 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
128 printf("%s: unable to allocate control structures,"
129 " error = %d\n", sc->sc_dev.dv_xname, error);
130 return (error);
131 }
132 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
133 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
134 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
135 printf("%s: unable to map control structures, error = %d\n",
136 sc->sc_dev.dv_xname, error);
137 return (error);
138 }
139
140 /*
141 * Create and load the DMA map used for the control blocks.
142 */
143 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
144 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
145 &sc->sc_dmamap_control)) != 0) {
146 printf("%s: unable to create control DMA map, error = %d\n",
147 sc->sc_dev.dv_xname, error);
148 return (error);
149 }
150 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
151 sc->sc_control, sizeof(struct adw_control), NULL,
152 BUS_DMA_NOWAIT)) != 0) {
153 printf("%s: unable to load control DMA map, error = %d\n",
154 sc->sc_dev.dv_xname, error);
155 return (error);
156 }
157
158 return (0);
159 }
160
161
162 static int
163 adw_alloc_carriers(sc)
164 ADW_SOFTC *sc;
165 {
166 bus_dma_segment_t seg;
167 int error, rseg;
168
169 /*
170 * Allocate the control structure.
171 */
172 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
173 M_DEVBUF, M_WAITOK);
174 if(!sc->sc_control->carriers) {
175 printf("%s: malloc() failed in allocating carrier structures\n",
176 sc->sc_dev.dv_xname);
177 return (ENOMEM);
178 }
179
180 if ((error = bus_dmamem_alloc(sc->sc_dmat,
181 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
182 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
183 printf("%s: unable to allocate carrier structures,"
184 " error = %d\n", sc->sc_dev.dv_xname, error);
185 return (error);
186 }
187 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
188 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
189 (caddr_t *) &sc->sc_control->carriers,
190 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
191 printf("%s: unable to map carrier structures,"
192 " error = %d\n", sc->sc_dev.dv_xname, error);
193 return (error);
194 }
195
196 /*
197 * Create and load the DMA map used for the control blocks.
198 */
199 if ((error = bus_dmamap_create(sc->sc_dmat,
200 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
201 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
202 &sc->sc_dmamap_carrier)) != 0) {
203 printf("%s: unable to create carriers DMA map,"
204 " error = %d\n", sc->sc_dev.dv_xname, error);
205 return (error);
206 }
207 if ((error = bus_dmamap_load(sc->sc_dmat,
208 sc->sc_dmamap_carrier, sc->sc_control->carriers,
209 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
210 BUS_DMA_NOWAIT)) != 0) {
211 printf("%s: unable to load carriers DMA map,"
212 " error = %d\n", sc->sc_dev.dv_xname, error);
213 return (error);
214 }
215
216 return (0);
217 }
218
219
220 /*
221 * Create a set of Carriers and add them to the free list. Called once
222 * by adw_init(). We return the number of Carriers successfully created.
223 */
224 static int
225 adw_create_carriers(sc)
226 ADW_SOFTC *sc;
227 {
228 ADW_CARRIER *carr;
229 u_int32_t carr_next = NULL;
230 int i;
231
232 for(i=0; i < ADW_MAX_CARRIER; i++) {
233 carr = (ADW_CARRIER *)(((u_int8_t *)sc->sc_control->carriers) +
234 (sizeof(ADW_CARRIER) * i));
235 carr->carr_ba = ADW_CARRIER_BADDR(sc, carr);
236 carr->carr_id = i;
237 carr->next_ba = carr_next;
238 carr_next = carr->carr_ba;
239 }
240 sc->carr_freelist = carr;
241 return (i);
242 }
243
244
245 /*
246 * Given a physical address, find the Carrier that it corresponds to.
247 */
248 inline ADW_CARRIER *
249 adw_carrier_phys_kv(sc, carr_phys)
250 ADW_SOFTC *sc;
251 u_int32_t carr_phys;
252 {
253 return (ADW_CARRIER_VADDR(sc, carr_phys));
254 }
255
256
257 /*
258 * Create a set of ccbs and add them to the free list. Called once
259 * by adw_init(). We return the number of CCBs successfully created.
260 */
261 static int
262 adw_create_ccbs(sc, ccbstore, count)
263 ADW_SOFTC *sc;
264 ADW_CCB *ccbstore;
265 int count;
266 {
267 ADW_CCB *ccb;
268 int i, error;
269
270 for (i = 0; i < count; i++) {
271 ccb = &ccbstore[i];
272 if ((error = adw_init_ccb(sc, ccb)) != 0) {
273 printf("%s: unable to initialize ccb, error = %d\n",
274 sc->sc_dev.dv_xname, error);
275 return (i);
276 }
277 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
278 }
279
280 return (i);
281 }
282
283
284 /*
285 * A ccb is put onto the free list.
286 */
287 static void
288 adw_free_ccb(sc, ccb)
289 ADW_SOFTC *sc;
290 ADW_CCB *ccb;
291 {
292 int s;
293
294 s = splbio();
295
296 adw_reset_ccb(ccb);
297 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
298
299 /*
300 * If there were none, wake anybody waiting for one to come free,
301 * starting with queued entries.
302 */
303 if (ccb->chain.tqe_next == 0)
304 wakeup(&sc->sc_free_ccb);
305
306 splx(s);
307 }
308
309
310 static void
311 adw_reset_ccb(ccb)
312 ADW_CCB *ccb;
313 {
314
315 ccb->flags = 0;
316 }
317
318
319 static int
320 adw_init_ccb(sc, ccb)
321 ADW_SOFTC *sc;
322 ADW_CCB *ccb;
323 {
324 int hashnum, error;
325
326 /*
327 * Create the DMA map for this CCB.
328 */
329 error = bus_dmamap_create(sc->sc_dmat,
330 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
331 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
332 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
333 if (error) {
334 printf("%s: unable to create CCB DMA map, error = %d\n",
335 sc->sc_dev.dv_xname, error);
336 return (error);
337 }
338
339 /*
340 * put in the phystokv hash table
341 * Never gets taken out.
342 */
343 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
344 ADW_CCB_OFF(ccb);
345 hashnum = CCB_HASH(ccb->hashkey);
346 ccb->nexthash = sc->sc_ccbhash[hashnum];
347 sc->sc_ccbhash[hashnum] = ccb;
348 adw_reset_ccb(ccb);
349 return (0);
350 }
351
352
353 /*
354 * Get a free ccb
355 *
356 * If there are none, see if we can allocate a new one
357 */
358 static ADW_CCB *
359 adw_get_ccb(sc, flags)
360 ADW_SOFTC *sc;
361 int flags;
362 {
363 ADW_CCB *ccb = 0;
364 int s;
365
366 s = splbio();
367
368 /*
369 * If we can and have to, sleep waiting for one to come free
370 * but only if we can't allocate a new one.
371 */
372 for (;;) {
373 ccb = sc->sc_free_ccb.tqh_first;
374 if (ccb) {
375 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
376 break;
377 }
378 if ((flags & XS_CTL_NOSLEEP) != 0)
379 goto out;
380
381 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
382 }
383
384 ccb->flags |= CCB_ALLOC;
385
386 out:
387 splx(s);
388 return (ccb);
389 }
390
391
392 /*
393 * Given a physical address, find the ccb that it corresponds to.
394 */
395 ADW_CCB *
396 adw_ccb_phys_kv(sc, ccb_phys)
397 ADW_SOFTC *sc;
398 u_int32_t ccb_phys;
399 {
400 int hashnum = CCB_HASH(ccb_phys);
401 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
402
403 while (ccb) {
404 if (ccb->hashkey == ccb_phys)
405 break;
406 ccb = ccb->nexthash;
407 }
408 return (ccb);
409 }
410
411
412 /*
413 * Queue a CCB to be sent to the controller, and send it if possible.
414 */
415 static int
416 adw_queue_ccb(sc, ccb, retry)
417 ADW_SOFTC *sc;
418 ADW_CCB *ccb;
419 int retry;
420 {
421 int errcode = ADW_SUCCESS;
422
423 if(!retry) {
424 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
425 }
426
427 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
428
429 errcode = AdvExeScsiQueue(sc, &ccb->scsiq);
430 switch(errcode) {
431 case ADW_SUCCESS:
432 break;
433
434 case ADW_BUSY:
435 printf("ADW_BUSY\n");
436 return(ADW_BUSY);
437
438 case ADW_ERROR:
439 printf("ADW_ERROR\n");
440 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
441 return(ADW_ERROR);
442 }
443
444 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
445 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
446
447 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
448 callout_reset(&ccb->xs->xs_callout,
449 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
450 }
451
452 return(errcode);
453 }
454
455
456 /******************************************************************************/
457 /* SCSI layer interfacing routines */
458 /******************************************************************************/
459
460
461 int
462 adw_init(sc)
463 ADW_SOFTC *sc;
464 {
465 u_int16_t warn_code;
466
467
468 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
469 ADW_LIB_VERSION_MINOR;
470 sc->cfg.chip_version =
471 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
472
473 /*
474 * Reset the chip to start and allow register writes.
475 */
476 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
477 panic("adw_init: adw_find_signature failed");
478 } else {
479 AdvResetChip(sc->sc_iot, sc->sc_ioh);
480
481 switch(sc->chip_type) {
482 case ADV_CHIP_ASC3550:
483 warn_code = AdvInitFrom3550EEP(sc);
484 break;
485
486 case ADV_CHIP_ASC38C0800:
487 warn_code = AdvInitFrom38C0800EEP(sc);
488 break;
489
490 case ADV_CHIP_ASC38C1600:
491 warn_code = AdvInitFrom38C1600EEP(sc);
492 break;
493
494 default:
495 return -1;
496 }
497
498 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
499 printf("%s: Bad checksum found. "
500 "Setting default values\n",
501 sc->sc_dev.dv_xname);
502 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
503 printf("%s: Bad bus termination setting."
504 "Using automatic termination.\n",
505 sc->sc_dev.dv_xname);
506 }
507
508 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
509 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
510
511 return 0;
512 }
513
514
515 void
516 adw_attach(sc)
517 ADW_SOFTC *sc;
518 {
519 int i, error;
520
521
522 TAILQ_INIT(&sc->sc_free_ccb);
523 TAILQ_INIT(&sc->sc_waiting_ccb);
524 TAILQ_INIT(&sc->sc_pending_ccb);
525 TAILQ_INIT(&sc->sc_queue);
526
527
528 /*
529 * Allocate the Control Blocks.
530 */
531 error = adw_alloc_controls(sc);
532 if (error)
533 return; /* (error) */ ;
534
535 bzero(sc->sc_control, sizeof(struct adw_control));
536
537 /*
538 * Create and initialize the Control Blocks.
539 */
540 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
541 if (i == 0) {
542 printf("%s: unable to create Control Blocks\n",
543 sc->sc_dev.dv_xname);
544 return; /* (ENOMEM) */ ;
545 } else if (i != ADW_MAX_CCB) {
546 printf("%s: WARNING: only %d of %d Control Blocks"
547 " created\n",
548 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
549 }
550
551 /*
552 * Create and initialize the Carriers.
553 */
554 error = adw_alloc_carriers(sc);
555 if (error)
556 return; /* (error) */ ;
557
558 bzero(sc->sc_control->carriers, sizeof(ADW_CARRIER) * ADW_MAX_CARRIER);
559
560 i = adw_create_carriers(sc);
561 if (i == 0) {
562 printf("%s: unable to create Carriers\n",
563 sc->sc_dev.dv_xname);
564 return; /* (ENOMEM) */ ;
565 } else if (i != ADW_MAX_CARRIER) {
566 printf("%s: WARNING: only %d of %d Carriers created\n",
567 sc->sc_dev.dv_xname, i, ADW_MAX_CARRIER);
568 }
569
570 /*
571 * Zero's the freeze_device status
572 */
573 bzero(sc->sc_freeze_dev, sizeof(sc->sc_freeze_dev));
574
575 /*
576 * Initialize the adapter
577 */
578 switch(sc->chip_type) {
579 case ADV_CHIP_ASC3550:
580 error = AdvInitAsc3550Driver(sc);
581 break;
582
583 case ADV_CHIP_ASC38C0800:
584 error = AdvInitAsc38C0800Driver(sc);
585 break;
586
587 case ADV_CHIP_ASC38C1600:
588 error = AdvInitAsc38C1600Driver(sc);
589 break;
590
591 default:
592 return;
593 }
594
595 switch (error) {
596 case ASC_IERR_BIST_PRE_TEST:
597 panic("%s: BIST pre-test error",
598 sc->sc_dev.dv_xname);
599 break;
600
601 case ASC_IERR_BIST_RAM_TEST:
602 panic("%s: BIST RAM test error",
603 sc->sc_dev.dv_xname);
604 break;
605
606 case ASC_IERR_MCODE_CHKSUM:
607 panic("%s: Microcode checksum error",
608 sc->sc_dev.dv_xname);
609 break;
610
611 case ASC_IERR_ILLEGAL_CONNECTION:
612 panic("%s: All three connectors are in use",
613 sc->sc_dev.dv_xname);
614 break;
615
616 case ASC_IERR_REVERSED_CABLE:
617 panic("%s: Cable is reversed",
618 sc->sc_dev.dv_xname);
619 break;
620
621 case ASC_IERR_HVD_DEVICE:
622 panic("%s: HVD attached to LVD connector",
623 sc->sc_dev.dv_xname);
624 break;
625
626 case ASC_IERR_SINGLE_END_DEVICE:
627 panic("%s: single-ended device is attached to"
628 " one of the connectors",
629 sc->sc_dev.dv_xname);
630 break;
631
632 case ASC_IERR_NO_CARRIER:
633 panic("%s: no carrier",
634 sc->sc_dev.dv_xname);
635 break;
636
637 case ASC_WARN_BUSRESET_ERROR:
638 printf("%s: WARNING: Bus Reset Error\n",
639 sc->sc_dev.dv_xname);
640 break;
641 }
642
643 /*
644 * Fill in the adapter.
645 */
646 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
647 sc->sc_adapter.scsipi_minphys = adwminphys;
648
649 /*
650 * fill in the prototype scsipi_link.
651 */
652 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
653 sc->sc_link.adapter_softc = sc;
654 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
655 sc->sc_link.adapter = &sc->sc_adapter;
656 sc->sc_link.device = &adw_dev;
657 sc->sc_link.openings = 4;
658 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
659 sc->sc_link.scsipi_scsi.max_lun = 7;
660 sc->sc_link.type = BUS_SCSI;
661
662
663 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
664 }
665
666
667 static void
668 adwminphys(bp)
669 struct buf *bp;
670 {
671
672 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
673 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
674 minphys(bp);
675 }
676
677
678 /*
679 * start a scsi operation given the command and the data address.
680 * Also needs the unit, target and lu.
681 */
682 static int
683 adw_scsi_cmd(xs)
684 struct scsipi_xfer *xs;
685 {
686 struct scsipi_link *sc_link = xs->sc_link;
687 ADW_SOFTC *sc = sc_link->adapter_softc;
688 ADW_CCB *ccb;
689 int s, fromqueue = 1, dontqueue = 0, nowait = 0, retry = 0;
690 int flags;
691
692 s = splbio(); /* protect the queue */
693
694 /*
695 * If we're running the queue from adw_done(), we've been
696 * called with the first queue entry as our argument.
697 */
698 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
699 if(sc->sc_freeze_dev[xs->sc_link->scsipi_scsi.target]) {
700 splx(s);
701 return (TRY_AGAIN_LATER);
702 }
703
704 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
705 fromqueue = 1;
706 nowait = 1;
707 } else {
708 if(sc->sc_freeze_dev[xs->sc_link->scsipi_scsi.target]) {
709 splx(s);
710 xs->error = XS_DRIVER_STUFFUP;
711 return (TRY_AGAIN_LATER);
712 }
713
714 /* Polled requests can't be queued for later. */
715 dontqueue = xs->xs_control & XS_CTL_POLL;
716
717 /*
718 * If there are jobs in the queue, run them first.
719 */
720 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
721 /*
722 * If we can't queue, we have to abort, since
723 * we have to preserve order.
724 */
725 if (dontqueue) {
726 splx(s);
727 xs->error = XS_DRIVER_STUFFUP;
728 return (TRY_AGAIN_LATER);
729 }
730 /*
731 * Swap with the first queue entry.
732 */
733 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
734 xs = TAILQ_FIRST(&sc->sc_queue);
735 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
736 fromqueue = 1;
737 }
738 }
739
740
741 /*
742 * get a ccb to use. If the transfer
743 * is from a buf (possibly from interrupt time)
744 * then we can't allow it to sleep
745 */
746
747 flags = xs->xs_control;
748 if (nowait)
749 flags |= XS_CTL_NOSLEEP;
750 if ((ccb = adw_get_ccb(sc, flags)) == NULL) {
751 /*
752 * If we can't queue, we lose.
753 */
754 if (dontqueue) {
755 splx(s);
756 xs->error = XS_DRIVER_STUFFUP;
757 return (TRY_AGAIN_LATER);
758 }
759 /*
760 * Stuff ourselves into the queue, in front
761 * if we came off in the first place.
762 */
763 if (fromqueue)
764 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
765 else
766 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
767 splx(s);
768 return (SUCCESSFULLY_QUEUED);
769 }
770 splx(s); /* done playing with the queue */
771
772 ccb->xs = xs;
773 ccb->timeout = xs->timeout;
774
775 if (adw_build_req(xs, ccb, flags)) {
776 retryagain:
777 s = splbio();
778 retry = adw_queue_ccb(sc, ccb, retry);
779 splx(s);
780
781 switch(retry) {
782 case ADW_BUSY:
783 goto retryagain;
784
785 case ADW_ERROR:
786 xs->error = XS_DRIVER_STUFFUP;
787 return (COMPLETE);
788 }
789
790 /*
791 * Usually return SUCCESSFULLY QUEUED
792 */
793 if ((xs->xs_control & XS_CTL_POLL) == 0)
794 return (SUCCESSFULLY_QUEUED);
795
796 /*
797 * If we can't use interrupts, poll on completion
798 */
799 if (adw_poll(sc, xs, ccb->timeout)) {
800 adw_timeout(ccb);
801 if (adw_poll(sc, xs, ccb->timeout))
802 adw_timeout(ccb);
803 }
804 }
805 return (COMPLETE);
806 }
807
808
809 /*
810 * Build a request structure for the Wide Boards.
811 */
812 static int
813 adw_build_req(xs, ccb, flags)
814 struct scsipi_xfer *xs;
815 ADW_CCB *ccb;
816 int flags;
817 {
818 struct scsipi_link *sc_link = xs->sc_link;
819 ADW_SOFTC *sc = sc_link->adapter_softc;
820 bus_dma_tag_t dmat = sc->sc_dmat;
821 ADW_SCSI_REQ_Q *scsiqp;
822 int error;
823
824 scsiqp = &ccb->scsiq;
825 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
826
827 /*
828 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
829 * physical CCB structure.
830 */
831 scsiqp->ccb_ptr = ccb->hashkey;
832
833 /*
834 * Build the ADW_SCSI_REQ_Q request.
835 */
836
837 /*
838 * Set CDB length and copy it to the request structure.
839 * For wide boards a CDB length maximum of 16 bytes
840 * is supported.
841 */
842 bcopy(xs->cmd, &scsiqp->cdb, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
843 xs->cmdlen : 12 );
844 if(xs->cmdlen > 12)
845 bcopy(&(xs->cmd[12]), &scsiqp->cdb16, xs->cmdlen - 12);
846
847 scsiqp->target_id = sc_link->scsipi_scsi.target;
848 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
849
850 scsiqp->vsense_addr = &ccb->scsi_sense;
851 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
852 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
853 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
854
855 /*
856 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
857 */
858 if (xs->datalen) {
859 /*
860 * Map the DMA transfer.
861 */
862 #ifdef TFS
863 if (xs->xs_control & SCSI_DATA_UIO) {
864 error = bus_dmamap_load_uio(dmat,
865 ccb->dmamap_xfer, (struct uio *) xs->data,
866 (flags & XS_CTL_NOSLEEP) ?
867 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
868 } else
869 #endif /* TFS */
870 {
871 error = bus_dmamap_load(dmat,
872 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
873 (flags & XS_CTL_NOSLEEP) ?
874 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
875 }
876
877 if (error) {
878 if (error == EFBIG) {
879 printf("%s: adw_scsi_cmd, more than %d dma"
880 " segments\n",
881 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
882 } else {
883 printf("%s: adw_scsi_cmd, error %d loading"
884 " dma map\n",
885 sc->sc_dev.dv_xname, error);
886 }
887
888 xs->error = XS_DRIVER_STUFFUP;
889 adw_free_ccb(sc, ccb);
890 return (0);
891 }
892 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
893 ccb->dmamap_xfer->dm_mapsize,
894 (xs->xs_control & XS_CTL_DATA_IN) ?
895 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
896
897 /*
898 * Build scatter-gather list.
899 */
900 scsiqp->data_cnt = xs->datalen;
901 scsiqp->vdata_addr = xs->data;
902 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
903 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
904 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
905 } else {
906 /*
907 * No data xfer, use non S/G values.
908 */
909 scsiqp->data_cnt = 0;
910 scsiqp->vdata_addr = 0;
911 scsiqp->data_addr = 0;
912 }
913
914 return (1);
915 }
916
917
918 /*
919 * Build scatter-gather list for Wide Boards.
920 */
921 static void
922 adw_build_sglist(ccb, scsiqp, sg_block)
923 ADW_CCB *ccb;
924 ADW_SCSI_REQ_Q *scsiqp;
925 ADW_SG_BLOCK *sg_block;
926 {
927 u_long sg_block_next_addr; /* block and its next */
928 u_int32_t sg_block_physical_addr;
929 int i; /* how many SG entries */
930 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
931 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
932
933
934 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
935 sg_block_physical_addr = ccb->hashkey +
936 offsetof(struct adw_ccb, sg_block[0]);
937 scsiqp->sg_real_addr = sg_block_physical_addr;
938
939 /*
940 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
941 * then split the request into multiple sg-list blocks.
942 */
943
944 do {
945 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
946 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
947 sg_block->sg_list[i].sg_count = sg_list->ds_len;
948
949 if (--sg_elem_cnt == 0) {
950 /* last entry, get out */
951 sg_block->sg_cnt = i + i;
952 sg_block->sg_ptr = NULL; /* next link = NULL */
953 return;
954 }
955 sg_list++;
956 }
957 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
958 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
959
960 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
961 sg_block->sg_ptr = sg_block_physical_addr;
962 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
963 } while (1);
964 }
965
966
967 int
968 adw_intr(arg)
969 void *arg;
970 {
971 ADW_SOFTC *sc = arg;
972 struct scsipi_xfer *xs;
973
974
975 if(AdvISR(sc) != ADW_FALSE) {
976 /*
977 * If there are queue entries in the software queue, try to
978 * run the first one. We should be more or less guaranteed
979 * to succeed, since we just freed a CCB.
980 *
981 * NOTE: adw_scsi_cmd() relies on our calling it with
982 * the first entry in the queue.
983 */
984 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
985 (void) adw_scsi_cmd(xs);
986
987 return (1);
988 }
989
990 return (0);
991 }
992
993
994 /*
995 * Poll a particular unit, looking for a particular xs
996 */
997 static int
998 adw_poll(sc, xs, count)
999 ADW_SOFTC *sc;
1000 struct scsipi_xfer *xs;
1001 int count;
1002 {
1003
1004 /* timeouts are in msec, so we loop in 1000 usec cycles */
1005 while (count) {
1006 adw_intr(sc);
1007 if (xs->xs_status & XS_STS_DONE)
1008 return (0);
1009 delay(1000); /* only happens in boot so ok */
1010 count--;
1011 }
1012 return (1);
1013 }
1014
1015
1016 static void
1017 adw_timeout(arg)
1018 void *arg;
1019 {
1020 ADW_CCB *ccb = arg;
1021 struct scsipi_xfer *xs = ccb->xs;
1022 struct scsipi_link *sc_link = xs->sc_link;
1023 ADW_SOFTC *sc = sc_link->adapter_softc;
1024 int s;
1025
1026 scsi_print_addr(sc_link);
1027 printf("timed out");
1028
1029 s = splbio();
1030
1031 if (ccb->flags & CCB_ABORTED) {
1032 /*
1033 * Abort Timed Out
1034 *
1035 * No more opportunities. Lets try resetting the bus and
1036 * reinitialize the host adapter.
1037 */
1038 callout_stop(&xs->xs_callout);
1039 printf(" AGAIN. Resetting SCSI Bus\n");
1040 adw_reset_bus(sc, xs);
1041 splx(s);
1042 return;
1043 } else if (ccb->flags & CCB_ABORTING) {
1044 /*
1045 * Abort the operation that has timed out.
1046 *
1047 * Second opportunity.
1048 */
1049 printf("\n");
1050 xs->error = XS_TIMEOUT;
1051 ccb->flags |= CCB_ABORTED;
1052 #if 0
1053 /*
1054 * - XXX - 3.3a microcode is BROKEN!!!
1055 *
1056 * We cannot abort a CCB, so we can only hope the command
1057 * get completed before the next timeout, otherwise a
1058 * Bus Reset will arrive inexorably.
1059 */
1060 /*
1061 * ADW_ABORT_CCB() makes the board to generate an interrupt
1062 *
1063 * - XXX - The above assertion MUST be verified (and this
1064 * code changed as well [callout_*()]), when the
1065 * ADW_ABORT_CCB will be working again
1066 */
1067 ADW_ABORT_CCB(sc, ccb);
1068 #endif
1069 /*
1070 * waiting for multishot callout_reset() let's restart it
1071 * by hand so the next time a timeout event will occour
1072 * we will reset the bus.
1073 */
1074 callout_reset(&xs->xs_callout,
1075 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
1076 } else {
1077 /*
1078 * Abort the operation that has timed out.
1079 *
1080 * First opportunity.
1081 */
1082 printf("\n");
1083 xs->error = XS_TIMEOUT;
1084 ccb->flags |= CCB_ABORTING;
1085 #if 0
1086 /*
1087 * - XXX - 3.3a microcode is BROKEN!!!
1088 *
1089 * We cannot abort a CCB, so we can only hope the command
1090 * get completed before the next 2 timeout, otherwise a
1091 * Bus Reset will arrive inexorably.
1092 */
1093 /*
1094 * ADW_ABORT_CCB() makes the board to generate an interrupt
1095 *
1096 * - XXX - The above assertion MUST be verified (and this
1097 * code changed as well [callout_*()]), when the
1098 * ADW_ABORT_CCB will be working again
1099 */
1100 ADW_ABORT_CCB(sc, ccb);
1101 #endif
1102 /*
1103 * waiting for multishot callout_reset() let's restart it
1104 * by hand so to give a second opportunity to the command
1105 * which timed-out.
1106 */
1107 callout_reset(&xs->xs_callout,
1108 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
1109 }
1110
1111 splx(s);
1112 }
1113
1114
1115 static void
1116 adw_reset_bus(sc, xs)
1117 ADW_SOFTC *sc;
1118 struct scsipi_xfer *xs;
1119 {
1120 ADW_CCB *ccb;
1121 int s;
1122
1123 s = splbio();
1124 AdvResetSCSIBus(sc);
1125 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
1126 adw_pending_ccb)) != NULL) {
1127 callout_stop(&ccb->xs->xs_callout);
1128 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1129 TAILQ_INSERT_HEAD(&sc->sc_waiting_ccb, ccb, chain);
1130 }
1131 adw_queue_ccb(sc, TAILQ_FIRST(&sc->sc_waiting_ccb), 1);
1132 splx(s);
1133 }
1134
1135
1136 /******************************************************************************/
1137 /* Host Adapter and Peripherals Information Routines */
1138 /******************************************************************************/
1139
1140
1141 static void
1142 adw_print_info(sc, tid)
1143 ADW_SOFTC *sc;
1144 int tid;
1145 {
1146 bus_space_tag_t iot = sc->sc_iot;
1147 bus_space_handle_t ioh = sc->sc_ioh;
1148 u_int16_t wdtr_able, wdtr_done, wdtr;
1149 u_int16_t sdtr_able, sdtr_done, sdtr, period;
1150 static int wdtr_reneg = 0, sdtr_reneg = 0;
1151
1152 if (tid == 0){
1153 wdtr_reneg = sdtr_reneg = 0;
1154 }
1155
1156 printf("%s: target %d ", sc->sc_dev.dv_xname, tid);
1157
1158 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_ABLE, wdtr_able);
1159 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
1160 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_DONE, wdtr_done);
1161 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_DEVICE_HSHK_CFG_TABLE +
1162 (2 * tid), wdtr);
1163 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
1164 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1165 wdtr_reneg = 1;
1166 } else {
1167 printf("wide transfers disabled, ");
1168 }
1169
1170 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_ABLE, sdtr_able);
1171 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
1172 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_DONE, sdtr_done);
1173 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_DEVICE_HSHK_CFG_TABLE +
1174 (2 * tid), sdtr);
1175 sdtr &= ~0x8000;
1176 if((sdtr & 0x1F) != 0) {
1177 if((sdtr & 0x1F00) == 0x1100){
1178 printf("80.0 MHz");
1179 } else if((sdtr & 0x1F00) == 0x1000){
1180 printf("40.0 MHz");
1181 } else {
1182 /* <= 20.0 MHz */
1183 period = (((sdtr >> 8) * 25) + 50)/4;
1184 if(period == 0) {
1185 /* Should never happen. */
1186 printf("? MHz");
1187 } else {
1188 printf("%d.%d MHz", 250/period,
1189 ADW_TENTHS(250, period));
1190 }
1191 }
1192 printf(" synchronous transfers\n");
1193 } else {
1194 printf("asynchronous transfers\n");
1195 }
1196 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1197 sdtr_reneg = 1;
1198 } else {
1199 printf("synchronous transfers disabled\n");
1200 }
1201
1202 if(wdtr_reneg || sdtr_reneg) {
1203 printf("%s: target %d %s", sc->sc_dev.dv_xname, tid,
1204 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1205 ((sdtr_reneg)? "sync" : "") );
1206 printf(" renegotiation pending before next command.\n");
1207 }
1208 }
1209
1210
1211 /******************************************************************************/
1212 /* WIDE boards Interrupt callbacks */
1213 /******************************************************************************/
1214
1215
1216 /*
1217 * adw_isr_callback() - Second Level Interrupt Handler called by AdvISR()
1218 *
1219 * Interrupt callback function for the Wide SCSI Adv Library.
1220 *
1221 * Notice:
1222 * Intrrupts are disabled by the caller (AdvISR() function), and will be
1223 * enabled at the end of the caller.
1224 */
1225 static void
1226 adw_isr_callback(sc, scsiq)
1227 ADW_SOFTC *sc;
1228 ADW_SCSI_REQ_Q *scsiq;
1229 {
1230 bus_dma_tag_t dmat = sc->sc_dmat;
1231 ADW_CCB *ccb;
1232 struct scsipi_xfer *xs;
1233 struct scsipi_sense_data *s1, *s2;
1234
1235
1236 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1237
1238 callout_stop(&ccb->xs->xs_callout);
1239
1240 xs = ccb->xs;
1241
1242 /*
1243 * If we were a data transfer, unload the map that described
1244 * the data buffer.
1245 */
1246 if (xs->datalen) {
1247 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1248 ccb->dmamap_xfer->dm_mapsize,
1249 (xs->xs_control & XS_CTL_DATA_IN) ?
1250 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1251 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1252 }
1253
1254 if ((ccb->flags & CCB_ALLOC) == 0) {
1255 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1256 Debugger();
1257 return;
1258 }
1259
1260 /*
1261 * 'done_status' contains the command's ending status.
1262 * 'host_status' conatins the host adapter status.
1263 * 'scsi_status' contains the scsi peripheral status.
1264 */
1265 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1266 ((scsiq->done_status == QD_NO_ERROR) ||
1267 (scsiq->done_status == QD_WITH_ERROR))) {
1268 switch (scsiq->host_status) {
1269 case SCSI_STATUS_GOOD:
1270 if ((scsiq->cdb[0] == INQUIRY) &&
1271 (scsiq->target_lun == 0)) {
1272 adw_print_info(sc, scsiq->target_id);
1273 }
1274 xs->error = XS_NOERROR;
1275 xs->resid = scsiq->data_cnt;
1276 sc->sc_freeze_dev[scsiq->target_id] = 0;
1277 break;
1278
1279 case SCSI_STATUS_CHECK_CONDITION:
1280 case SCSI_STATUS_CMD_TERMINATED:
1281 s1 = &ccb->scsi_sense;
1282 s2 = &xs->sense.scsi_sense;
1283 *s2 = *s1;
1284 xs->error = XS_SENSE;
1285 sc->sc_freeze_dev[scsiq->target_id] = 1;
1286 break;
1287
1288 default:
1289 xs->error = XS_BUSY;
1290 sc->sc_freeze_dev[scsiq->target_id] = 1;
1291 break;
1292 }
1293 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1294 xs->error = XS_DRIVER_STUFFUP;
1295 } else {
1296 switch (scsiq->host_status) {
1297 case QHSTA_M_SEL_TIMEOUT:
1298 xs->error = XS_SELTIMEOUT;
1299 break;
1300
1301 case QHSTA_M_SXFR_OFF_UFLW:
1302 case QHSTA_M_SXFR_OFF_OFLW:
1303 case QHSTA_M_DATA_OVER_RUN:
1304 printf("%s: Overrun/Overflow/Underflow condition\n",
1305 sc->sc_dev.dv_xname);
1306 xs->error = XS_DRIVER_STUFFUP;
1307 break;
1308
1309 case QHSTA_M_SXFR_DESELECTED:
1310 case QHSTA_M_UNEXPECTED_BUS_FREE:
1311 printf("%s: Unexpected BUS free\n",sc->sc_dev.dv_xname);
1312 xs->error = XS_DRIVER_STUFFUP;
1313 break;
1314
1315 case QHSTA_M_SCSI_BUS_RESET:
1316 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1317 printf("%s: BUS Reset\n", sc->sc_dev.dv_xname);
1318 xs->error = XS_DRIVER_STUFFUP;
1319 break;
1320
1321 case QHSTA_M_BUS_DEVICE_RESET:
1322 printf("%s: Device Reset\n", sc->sc_dev.dv_xname);
1323 xs->error = XS_DRIVER_STUFFUP;
1324 break;
1325
1326 case QHSTA_M_QUEUE_ABORTED:
1327 printf("%s: Queue Aborted\n", sc->sc_dev.dv_xname);
1328 xs->error = XS_DRIVER_STUFFUP;
1329 break;
1330
1331 case QHSTA_M_SXFR_SDMA_ERR:
1332 case QHSTA_M_SXFR_SXFR_PERR:
1333 case QHSTA_M_RDMA_PERR:
1334 /*
1335 * DMA Error. This should *NEVER* happen!
1336 *
1337 * Lets try resetting the bus and reinitialize
1338 * the host adapter.
1339 */
1340 printf("%s: DMA Error. Reseting bus\n",
1341 sc->sc_dev.dv_xname);
1342 adw_reset_bus(sc, xs);
1343 xs->error = XS_BUSY;
1344 break;
1345
1346 case QHSTA_M_WTM_TIMEOUT:
1347 case QHSTA_M_SXFR_WD_TMO:
1348 /* The SCSI bus hung in a phase */
1349 printf("%s: Watch Dog timer expired. Reseting bus\n",
1350 sc->sc_dev.dv_xname);
1351 adw_reset_bus(sc, xs);
1352 xs->error = XS_BUSY;
1353 break;
1354
1355 case QHSTA_M_SXFR_XFR_PH_ERR:
1356 printf("%s: Transfer Error\n", sc->sc_dev.dv_xname);
1357 xs->error = XS_DRIVER_STUFFUP;
1358 break;
1359
1360 case QHSTA_M_BAD_CMPL_STATUS_IN:
1361 /* No command complete after a status message */
1362 printf("%s: Bad Completion Status\n",
1363 sc->sc_dev.dv_xname);
1364 xs->error = XS_DRIVER_STUFFUP;
1365 break;
1366
1367 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1368 printf("%s: Auto Sense Failed\n", sc->sc_dev.dv_xname);
1369 xs->error = XS_DRIVER_STUFFUP;
1370 break;
1371
1372 case QHSTA_M_INVALID_DEVICE:
1373 printf("%s: Invalid Device\n", sc->sc_dev.dv_xname);
1374 xs->error = XS_DRIVER_STUFFUP;
1375 break;
1376
1377 case QHSTA_M_NO_AUTO_REQ_SENSE:
1378 /*
1379 * User didn't request sense, but we got a
1380 * check condition.
1381 */
1382 printf("%s: Unexpected Check Condition\n",
1383 sc->sc_dev.dv_xname);
1384 xs->error = XS_DRIVER_STUFFUP;
1385 break;
1386
1387 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1388 printf("%s: Unknown Error\n", sc->sc_dev.dv_xname);
1389 xs->error = XS_DRIVER_STUFFUP;
1390 break;
1391
1392 default:
1393 panic("%s: Unhandled Host Status Error %x",
1394 sc->sc_dev.dv_xname, scsiq->host_status);
1395 }
1396 }
1397
1398 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1399 adw_free_ccb(sc, ccb);
1400 xs->xs_status |= XS_STS_DONE;
1401 scsipi_done(xs);
1402 }
1403
1404
1405 /*
1406 * adv_async_callback() - Adv Library asynchronous event callback function.
1407 */
1408 static void
1409 adw_async_callback(sc, code)
1410 ADW_SOFTC *sc;
1411 u_int8_t code;
1412 {
1413 switch (code) {
1414 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1415 /* The firmware detected a SCSI Bus reset. */
1416 printf("%s: SCSI Bus reset detected\n", sc->sc_dev.dv_xname);
1417 break;
1418
1419 case ADV_ASYNC_RDMA_FAILURE:
1420 /*
1421 * Handle RDMA failure by resetting the SCSI Bus and
1422 * possibly the chip if it is unresponsive.
1423 */
1424 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1425 " the adapter\n", sc->sc_dev.dv_xname);
1426 AdvResetSCSIBus(sc);
1427 break;
1428
1429 case ADV_HOST_SCSI_BUS_RESET:
1430 /* Host generated SCSI bus reset occurred. */
1431 printf("%s: Host generated SCSI bus reset occurred\n",
1432 sc->sc_dev.dv_xname);
1433 break;
1434
1435 case ADV_ASYNC_CARRIER_READY_FAILURE:
1436 /* Carrier Ready failure. */
1437 printf("%s: Carrier Ready failure!\n", sc->sc_dev.dv_xname);
1438 break;
1439
1440 default:
1441 break;
1442 }
1443 }
1444