adw.c revision 1.19 1 /* $NetBSD: adw.c,v 1.19 2000/05/08 17:21:33 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63
64 #include <dev/ic/adwlib.h>
65 #include <dev/ic/adw.h>
66
67 #ifndef DDB
68 #define Debugger() panic("should call debugger here (adw.c)")
69 #endif /* ! DDB */
70
71 /******************************************************************************/
72
73
74 static int adw_alloc_controls __P((ADW_SOFTC *));
75 static int adw_alloc_carriers __P((ADW_SOFTC *));
76 static int adw_create_carriers __P((ADW_SOFTC *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *, int));
83
84 static int adw_scsi_cmd __P((struct scsipi_xfer *));
85 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *, int));
86 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
87 static void adwminphys __P((struct buf *));
88 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
89 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
90
91 static void adw_print_info __P((ADW_SOFTC *, int));
92
93 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
94 static void adw_timeout __P((void *));
95
96
97 /******************************************************************************/
98
99
100 /* the below structure is so we have a default dev struct for our link struct */
101 struct scsipi_device adw_dev =
102 {
103 NULL, /* Use default error handler */
104 NULL, /* have a queue, served by this */
105 NULL, /* have no async handler */
106 NULL, /* Use default 'done' routine */
107 };
108
109
110 /******************************************************************************/
111 /* Control Blocks routines */
112 /******************************************************************************/
113
114
115 static int
116 adw_alloc_controls(sc)
117 ADW_SOFTC *sc;
118 {
119 bus_dma_segment_t seg;
120 int error, rseg;
121
122 /*
123 * Allocate the control structure.
124 */
125 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
126 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
127 printf("%s: unable to allocate control structures,"
128 " error = %d\n", sc->sc_dev.dv_xname, error);
129 return (error);
130 }
131 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
132 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
133 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
134 printf("%s: unable to map control structures, error = %d\n",
135 sc->sc_dev.dv_xname, error);
136 return (error);
137 }
138
139 /*
140 * Create and load the DMA map used for the control blocks.
141 */
142 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
143 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
144 &sc->sc_dmamap_control)) != 0) {
145 printf("%s: unable to create control DMA map, error = %d\n",
146 sc->sc_dev.dv_xname, error);
147 return (error);
148 }
149 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
150 sc->sc_control, sizeof(struct adw_control), NULL,
151 BUS_DMA_NOWAIT)) != 0) {
152 printf("%s: unable to load control DMA map, error = %d\n",
153 sc->sc_dev.dv_xname, error);
154 return (error);
155 }
156
157 return (0);
158 }
159
160
161 static int
162 adw_alloc_carriers(sc)
163 ADW_SOFTC *sc;
164 {
165 bus_dma_segment_t seg;
166 int error, rseg;
167
168 /*
169 * Allocate the control structure.
170 */
171 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
172 M_DEVBUF, M_WAITOK);
173 if(!sc->sc_control->carriers) {
174 printf("%s: malloc() failed in allocating carrier structures\n",
175 sc->sc_dev.dv_xname);
176 return (ENOMEM);
177 }
178
179 if ((error = bus_dmamem_alloc(sc->sc_dmat,
180 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
181 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
182 printf("%s: unable to allocate carrier structures,"
183 " error = %d\n", sc->sc_dev.dv_xname, error);
184 return (error);
185 }
186 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
187 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
188 (caddr_t *) &sc->sc_control->carriers,
189 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
190 printf("%s: unable to map carrier structures,"
191 " error = %d\n", sc->sc_dev.dv_xname, error);
192 return (error);
193 }
194
195 /*
196 * Create and load the DMA map used for the control blocks.
197 */
198 if ((error = bus_dmamap_create(sc->sc_dmat,
199 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
200 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
201 &sc->sc_dmamap_carrier)) != 0) {
202 printf("%s: unable to create carriers DMA map,"
203 " error = %d\n", sc->sc_dev.dv_xname, error);
204 return (error);
205 }
206 if ((error = bus_dmamap_load(sc->sc_dmat,
207 sc->sc_dmamap_carrier, sc->sc_control->carriers,
208 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
209 BUS_DMA_NOWAIT)) != 0) {
210 printf("%s: unable to load carriers DMA map,"
211 " error = %d\n", sc->sc_dev.dv_xname, error);
212 return (error);
213 }
214
215 return (0);
216 }
217
218
219 /*
220 * Create a set of Carriers and add them to the free list. Called once
221 * by adw_init(). We return the number of Carriers successfully created.
222 */
223 static int
224 adw_create_carriers(sc)
225 ADW_SOFTC *sc;
226 {
227 ADW_CARRIER *carr;
228 u_int32_t carr_next = NULL;
229 int i;
230
231 for(i=0; i < ADW_MAX_CARRIER; i++) {
232 carr = (ADW_CARRIER *)(((u_int8_t *)sc->sc_control->carriers) +
233 (sizeof(ADW_CARRIER) * i));
234 carr->carr_pa = ADW_CARRIER_BADDR(sc, carr);
235 carr->carr_id = i;
236 carr->next_vpa = carr_next;
237 carr_next = carr->carr_pa;
238 }
239 sc->carr_freelist = carr;
240 return (i);
241 }
242
243
244 /*
245 * Given a physical address, find the Carrier that it corresponds to.
246 */
247 inline ADW_CARRIER *
248 adw_carrier_phys_kv(sc, carr_phys)
249 ADW_SOFTC *sc;
250 u_int32_t carr_phys;
251 {
252 return (ADW_CARRIER_VADDR(sc, carr_phys));
253 }
254
255
256 /*
257 * Create a set of ccbs and add them to the free list. Called once
258 * by adw_init(). We return the number of CCBs successfully created.
259 */
260 static int
261 adw_create_ccbs(sc, ccbstore, count)
262 ADW_SOFTC *sc;
263 ADW_CCB *ccbstore;
264 int count;
265 {
266 ADW_CCB *ccb;
267 int i, error;
268
269 for (i = 0; i < count; i++) {
270 ccb = &ccbstore[i];
271 if ((error = adw_init_ccb(sc, ccb)) != 0) {
272 printf("%s: unable to initialize ccb, error = %d\n",
273 sc->sc_dev.dv_xname, error);
274 return (i);
275 }
276 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
277 }
278
279 return (i);
280 }
281
282
283 /*
284 * A ccb is put onto the free list.
285 */
286 static void
287 adw_free_ccb(sc, ccb)
288 ADW_SOFTC *sc;
289 ADW_CCB *ccb;
290 {
291 int s;
292
293 s = splbio();
294
295 adw_reset_ccb(ccb);
296 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
297
298 /*
299 * If there were none, wake anybody waiting for one to come free,
300 * starting with queued entries.
301 */
302 if (ccb->chain.tqe_next == 0)
303 wakeup(&sc->sc_free_ccb);
304
305 splx(s);
306 }
307
308
309 static void
310 adw_reset_ccb(ccb)
311 ADW_CCB *ccb;
312 {
313
314 ccb->flags = 0;
315 }
316
317
318 static int
319 adw_init_ccb(sc, ccb)
320 ADW_SOFTC *sc;
321 ADW_CCB *ccb;
322 {
323 int hashnum, error;
324
325 /*
326 * Create the DMA map for this CCB.
327 */
328 error = bus_dmamap_create(sc->sc_dmat,
329 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
330 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
331 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
332 if (error) {
333 printf("%s: unable to create CCB DMA map, error = %d\n",
334 sc->sc_dev.dv_xname, error);
335 return (error);
336 }
337
338 /*
339 * put in the phystokv hash table
340 * Never gets taken out.
341 */
342 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
343 ADW_CCB_OFF(ccb);
344 hashnum = CCB_HASH(ccb->hashkey);
345 ccb->nexthash = sc->sc_ccbhash[hashnum];
346 sc->sc_ccbhash[hashnum] = ccb;
347 adw_reset_ccb(ccb);
348 return (0);
349 }
350
351
352 /*
353 * Get a free ccb
354 *
355 * If there are none, see if we can allocate a new one
356 */
357 static ADW_CCB *
358 adw_get_ccb(sc, flags)
359 ADW_SOFTC *sc;
360 int flags;
361 {
362 ADW_CCB *ccb = 0;
363 int s;
364
365 s = splbio();
366
367 /*
368 * If we can and have to, sleep waiting for one to come free
369 * but only if we can't allocate a new one.
370 */
371 for (;;) {
372 ccb = sc->sc_free_ccb.tqh_first;
373 if (ccb) {
374 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
375 break;
376 }
377 if ((flags & XS_CTL_NOSLEEP) != 0)
378 goto out;
379
380 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
381 }
382
383 ccb->flags |= CCB_ALLOC;
384
385 out:
386 splx(s);
387 return (ccb);
388 }
389
390
391 /*
392 * Given a physical address, find the ccb that it corresponds to.
393 */
394 ADW_CCB *
395 adw_ccb_phys_kv(sc, ccb_phys)
396 ADW_SOFTC *sc;
397 u_int32_t ccb_phys;
398 {
399 int hashnum = CCB_HASH(ccb_phys);
400 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
401
402 while (ccb) {
403 if (ccb->hashkey == ccb_phys)
404 break;
405 ccb = ccb->nexthash;
406 }
407 return (ccb);
408 }
409
410
411 /*
412 * Queue a CCB to be sent to the controller, and send it if possible.
413 */
414 static int
415 adw_queue_ccb(sc, ccb, retry)
416 ADW_SOFTC *sc;
417 ADW_CCB *ccb;
418 int retry;
419 {
420 int errcode = ADW_SUCCESS;
421
422 if(!retry) {
423 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
424 }
425
426 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
427
428 errcode = AdvExeScsiQueue(sc, &ccb->scsiq);
429 switch(errcode) {
430 case ADW_SUCCESS:
431 break;
432
433 case ADW_BUSY:
434 printf("ADW_BUSY\n");
435 return(ADW_BUSY);
436
437 case ADW_ERROR:
438 printf("ADW_ERROR\n");
439 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
440 return(ADW_ERROR);
441 }
442
443 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
444 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
445
446 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
447 callout_reset(&ccb->xs->xs_callout,
448 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
449 }
450
451 return(errcode);
452 }
453
454
455 /******************************************************************************/
456 /* SCSI layer interfacing routines */
457 /******************************************************************************/
458
459
460 int
461 adw_init(sc)
462 ADW_SOFTC *sc;
463 {
464 u_int16_t warn_code;
465
466
467 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
468 ADW_LIB_VERSION_MINOR;
469 sc->cfg.chip_version =
470 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
471
472 /*
473 * Reset the chip to start and allow register writes.
474 */
475 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
476 panic("adw_init: adw_find_signature failed");
477 } else {
478 AdvResetChip(sc->sc_iot, sc->sc_ioh);
479
480 switch(sc->chip_type) {
481 case ADV_CHIP_ASC3550:
482 warn_code = AdvInitFrom3550EEP(sc);
483 break;
484
485 case ADV_CHIP_ASC38C0800:
486 warn_code = AdvInitFrom38C0800EEP(sc);
487 break;
488
489 case ADV_CHIP_ASC38C1600:
490 warn_code = AdvInitFrom38C1600EEP(sc);
491 break;
492
493 default:
494 return -1;
495 }
496
497 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
498 printf("%s: Bad checksum found. "
499 "Setting default values\n",
500 sc->sc_dev.dv_xname);
501 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
502 printf("%s: Bad bus termination setting."
503 "Using automatic termination.\n",
504 sc->sc_dev.dv_xname);
505 }
506
507 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
508 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
509
510 return 0;
511 }
512
513
514 void
515 adw_attach(sc)
516 ADW_SOFTC *sc;
517 {
518 int i, error;
519
520
521 TAILQ_INIT(&sc->sc_free_ccb);
522 TAILQ_INIT(&sc->sc_waiting_ccb);
523 TAILQ_INIT(&sc->sc_pending_ccb);
524 TAILQ_INIT(&sc->sc_queue);
525
526
527 /*
528 * Allocate the Control Blocks.
529 */
530 error = adw_alloc_controls(sc);
531 if (error)
532 return; /* (error) */ ;
533
534 bzero(sc->sc_control, sizeof(struct adw_control));
535
536 /*
537 * Create and initialize the Control Blocks.
538 */
539 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
540 if (i == 0) {
541 printf("%s: unable to create Control Blocks\n",
542 sc->sc_dev.dv_xname);
543 return; /* (ENOMEM) */ ;
544 } else if (i != ADW_MAX_CCB) {
545 printf("%s: WARNING: only %d of %d Control Blocks"
546 " created\n",
547 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
548 }
549
550 /*
551 * Create and initialize the Carriers.
552 */
553 error = adw_alloc_carriers(sc);
554 if (error)
555 return; /* (error) */ ;
556
557 bzero(sc->sc_control->carriers, sizeof(ADW_CARRIER) * ADW_MAX_CARRIER);
558
559 i = adw_create_carriers(sc);
560 if (i == 0) {
561 printf("%s: unable to create Carriers\n",
562 sc->sc_dev.dv_xname);
563 return; /* (ENOMEM) */ ;
564 } else if (i != ADW_MAX_CARRIER) {
565 printf("%s: WARNING: only %d of %d Carriers created\n",
566 sc->sc_dev.dv_xname, i, ADW_MAX_CARRIER);
567 }
568
569
570 /*
571 * Initialize the adapter
572 */
573 switch(sc->chip_type) {
574 case ADV_CHIP_ASC3550:
575 error = AdvInitAsc3550Driver(sc);
576 break;
577
578 case ADV_CHIP_ASC38C0800:
579 error = AdvInitAsc38C0800Driver(sc);
580 break;
581
582 case ADV_CHIP_ASC38C1600:
583 error = AdvInitAsc38C1600Driver(sc);
584 break;
585
586 default:
587 return;
588 }
589
590 switch (error) {
591 case ASC_IERR_BIST_PRE_TEST:
592 panic("%s: BIST pre-test error",
593 sc->sc_dev.dv_xname);
594 break;
595
596 case ASC_IERR_BIST_RAM_TEST:
597 panic("%s: BIST RAM test error",
598 sc->sc_dev.dv_xname);
599 break;
600
601 case ASC_IERR_MCODE_CHKSUM:
602 panic("%s: Microcode checksum error",
603 sc->sc_dev.dv_xname);
604 break;
605
606 case ASC_IERR_ILLEGAL_CONNECTION:
607 panic("%s: All three connectors are in use",
608 sc->sc_dev.dv_xname);
609 break;
610
611 case ASC_IERR_REVERSED_CABLE:
612 panic("%s: Cable is reversed",
613 sc->sc_dev.dv_xname);
614 break;
615
616 case ASC_IERR_HVD_DEVICE:
617 panic("%s: HVD attached to LVD connector",
618 sc->sc_dev.dv_xname);
619 break;
620
621 case ASC_IERR_SINGLE_END_DEVICE:
622 panic("%s: single-ended device is attached to"
623 " one of the connectors",
624 sc->sc_dev.dv_xname);
625 break;
626
627 case ASC_IERR_NO_CARRIER:
628 panic("%s: no carrier",
629 sc->sc_dev.dv_xname);
630 break;
631
632 case ASC_WARN_BUSRESET_ERROR:
633 printf("%s: WARNING: Bus Reset Error\n",
634 sc->sc_dev.dv_xname);
635 break;
636 }
637
638 /*
639 * Fill in the adapter.
640 */
641 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
642 sc->sc_adapter.scsipi_minphys = adwminphys;
643
644 /*
645 * fill in the prototype scsipi_link.
646 */
647 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
648 sc->sc_link.adapter_softc = sc;
649 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
650 sc->sc_link.adapter = &sc->sc_adapter;
651 sc->sc_link.device = &adw_dev;
652 sc->sc_link.openings = 4;
653 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
654 sc->sc_link.scsipi_scsi.max_lun = 7;
655 sc->sc_link.type = BUS_SCSI;
656
657
658 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
659 }
660
661
662 static void
663 adwminphys(bp)
664 struct buf *bp;
665 {
666
667 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
668 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
669 minphys(bp);
670 }
671
672
673 /*
674 * start a scsi operation given the command and the data address.
675 * Also needs the unit, target and lu.
676 */
677 static int
678 adw_scsi_cmd(xs)
679 struct scsipi_xfer *xs;
680 {
681 struct scsipi_link *sc_link = xs->sc_link;
682 ADW_SOFTC *sc = sc_link->adapter_softc;
683 ADW_CCB *ccb;
684 int s, fromqueue = 1, dontqueue = 0, nowait = 0, retry = 0;
685 int flags;
686
687 s = splbio(); /* protect the queue */
688
689 /*
690 * If we're running the queue from adw_done(), we've been
691 * called with the first queue entry as our argument.
692 */
693 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
694 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
695 fromqueue = 1;
696 nowait = 1;
697 } else {
698
699 /* Polled requests can't be queued for later. */
700 dontqueue = xs->xs_control & XS_CTL_POLL;
701
702 /*
703 * If there are jobs in the queue, run them first.
704 */
705 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
706 /*
707 * If we can't queue, we have to abort, since
708 * we have to preserve order.
709 */
710 if (dontqueue) {
711 splx(s);
712 xs->error = XS_DRIVER_STUFFUP;
713 return (TRY_AGAIN_LATER);
714 }
715 /*
716 * Swap with the first queue entry.
717 */
718 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
719 xs = TAILQ_FIRST(&sc->sc_queue);
720 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
721 fromqueue = 1;
722 }
723 }
724
725
726 /*
727 * get a ccb to use. If the transfer
728 * is from a buf (possibly from interrupt time)
729 * then we can't allow it to sleep
730 */
731
732 flags = xs->xs_control;
733 if (nowait)
734 flags |= XS_CTL_NOSLEEP;
735 if ((ccb = adw_get_ccb(sc, flags)) == NULL) {
736 /*
737 * If we can't queue, we lose.
738 */
739 if (dontqueue) {
740 splx(s);
741 xs->error = XS_DRIVER_STUFFUP;
742 return (TRY_AGAIN_LATER);
743 }
744 /*
745 * Stuff ourselves into the queue, in front
746 * if we came off in the first place.
747 */
748 if (fromqueue)
749 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
750 else
751 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
752 splx(s);
753 return (SUCCESSFULLY_QUEUED);
754 }
755 splx(s); /* done playing with the queue */
756
757 ccb->xs = xs;
758 ccb->timeout = xs->timeout;
759
760 if (adw_build_req(xs, ccb, flags)) {
761 retryagain:
762 s = splbio();
763 retry = adw_queue_ccb(sc, ccb, retry);
764 splx(s);
765
766 switch(retry) {
767 case ADW_BUSY:
768 goto retryagain;
769
770 case ADW_ERROR:
771 xs->error = XS_DRIVER_STUFFUP;
772 return (COMPLETE);
773 }
774
775 /*
776 * Usually return SUCCESSFULLY QUEUED
777 */
778 if ((xs->xs_control & XS_CTL_POLL) == 0)
779 return (SUCCESSFULLY_QUEUED);
780
781 /*
782 * If we can't use interrupts, poll on completion
783 */
784 if (adw_poll(sc, xs, ccb->timeout)) {
785 adw_timeout(ccb);
786 if (adw_poll(sc, xs, ccb->timeout))
787 adw_timeout(ccb);
788 }
789 }
790 return (COMPLETE);
791 }
792
793
794 /*
795 * Build a request structure for the Wide Boards.
796 */
797 static int
798 adw_build_req(xs, ccb, flags)
799 struct scsipi_xfer *xs;
800 ADW_CCB *ccb;
801 int flags;
802 {
803 struct scsipi_link *sc_link = xs->sc_link;
804 ADW_SOFTC *sc = sc_link->adapter_softc;
805 bus_dma_tag_t dmat = sc->sc_dmat;
806 ADW_SCSI_REQ_Q *scsiqp;
807 int error;
808
809 scsiqp = &ccb->scsiq;
810 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
811
812 /*
813 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
814 * physical CCB structure.
815 */
816 scsiqp->ccb_ptr = ccb->hashkey;
817
818 /*
819 * Build the ADW_SCSI_REQ_Q request.
820 */
821
822 /*
823 * Set CDB length and copy it to the request structure.
824 * For wide boards a CDB length maximum of 16 bytes
825 * is supported.
826 */
827 bcopy(xs->cmd, &scsiqp->cdb, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
828 xs->cmdlen : 12 );
829 if(xs->cmdlen > 12)
830 bcopy(&(xs->cmd[12]), &scsiqp->cdb16, xs->cmdlen - 12);
831
832 scsiqp->target_id = sc_link->scsipi_scsi.target;
833 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
834
835 scsiqp->vsense_addr = &ccb->scsi_sense;
836 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
837 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
838 /* scsiqp->sense_addr = ccb->hashkey +
839 offsetof(struct adw_ccb, scsi_sense);
840 */ scsiqp->sense_len = sizeof(struct scsipi_sense_data);
841
842 /*
843 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
844 */
845 if (xs->datalen) {
846 /*
847 * Map the DMA transfer.
848 */
849 #ifdef TFS
850 if (xs->xs_control & SCSI_DATA_UIO) {
851 error = bus_dmamap_load_uio(dmat,
852 ccb->dmamap_xfer, (struct uio *) xs->data,
853 (flags & XS_CTL_NOSLEEP) ?
854 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
855 } else
856 #endif /* TFS */
857 {
858 error = bus_dmamap_load(dmat,
859 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
860 (flags & XS_CTL_NOSLEEP) ?
861 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
862 }
863
864 if (error) {
865 if (error == EFBIG) {
866 printf("%s: adw_scsi_cmd, more than %d dma"
867 " segments\n",
868 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
869 } else {
870 printf("%s: adw_scsi_cmd, error %d loading"
871 " dma map\n",
872 sc->sc_dev.dv_xname, error);
873 }
874
875 xs->error = XS_DRIVER_STUFFUP;
876 adw_free_ccb(sc, ccb);
877 return (0);
878 }
879 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
880 ccb->dmamap_xfer->dm_mapsize,
881 (xs->xs_control & XS_CTL_DATA_IN) ?
882 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
883
884 /*
885 * Build scatter-gather list.
886 */
887 scsiqp->data_cnt = xs->datalen;
888 scsiqp->vdata_addr = xs->data;
889 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
890 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
891 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
892 } else {
893 /*
894 * No data xfer, use non S/G values.
895 */
896 scsiqp->data_cnt = 0;
897 scsiqp->vdata_addr = 0;
898 scsiqp->data_addr = 0;
899 }
900
901 return (1);
902 }
903
904
905 /*
906 * Build scatter-gather list for Wide Boards.
907 */
908 static void
909 adw_build_sglist(ccb, scsiqp, sg_block)
910 ADW_CCB *ccb;
911 ADW_SCSI_REQ_Q *scsiqp;
912 ADW_SG_BLOCK *sg_block;
913 {
914 u_long sg_block_next_addr; /* block and its next */
915 u_int32_t sg_block_physical_addr;
916 int i; /* how many SG entries */
917 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
918 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
919
920
921 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
922 sg_block_physical_addr = ccb->hashkey +
923 offsetof(struct adw_ccb, sg_block[0]);
924 scsiqp->sg_real_addr = sg_block_physical_addr;
925
926 /*
927 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
928 * then split the request into multiple sg-list blocks.
929 */
930
931 do {
932 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
933 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
934 sg_block->sg_list[i].sg_count = sg_list->ds_len;
935
936 if (--sg_elem_cnt == 0) {
937 /* last entry, get out */
938 sg_block->sg_cnt = i + i;
939 sg_block->sg_ptr = NULL; /* next link = NULL */
940 return;
941 }
942 sg_list++;
943 }
944 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
945 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
946
947 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
948 sg_block->sg_ptr = sg_block_physical_addr;
949 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
950 } while (1);
951 }
952
953
954 int
955 adw_intr(arg)
956 void *arg;
957 {
958 ADW_SOFTC *sc = arg;
959 struct scsipi_xfer *xs;
960
961
962 if(AdvISR(sc) != ADW_FALSE) {
963 /*
964 * If there are queue entries in the software queue, try to
965 * run the first one. We should be more or less guaranteed
966 * to succeed, since we just freed a CCB.
967 *
968 * NOTE: adw_scsi_cmd() relies on our calling it with
969 * the first entry in the queue.
970 */
971 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
972 (void) adw_scsi_cmd(xs);
973
974 return (1);
975 }
976
977 return (0);
978 }
979
980
981 /*
982 * Poll a particular unit, looking for a particular xs
983 */
984 static int
985 adw_poll(sc, xs, count)
986 ADW_SOFTC *sc;
987 struct scsipi_xfer *xs;
988 int count;
989 {
990
991 /* timeouts are in msec, so we loop in 1000 usec cycles */
992 while (count) {
993 adw_intr(sc);
994 if (xs->xs_status & XS_STS_DONE)
995 return (0);
996 delay(1000); /* only happens in boot so ok */
997 count--;
998 }
999 return (1);
1000 }
1001
1002
1003 static void
1004 adw_timeout(arg)
1005 void *arg;
1006 {
1007 ADW_CCB *ccb = arg;
1008 struct scsipi_xfer *xs = ccb->xs;
1009 struct scsipi_link *sc_link = xs->sc_link;
1010 ADW_SOFTC *sc = sc_link->adapter_softc;
1011 int s;
1012
1013 scsi_print_addr(sc_link);
1014 printf("timed out");
1015
1016 s = splbio();
1017
1018 /*
1019 * If it has been through before, then previous aborts failed,
1020 * don't try abort again, reset the bus instead.
1021 */
1022 if (ccb->flags & CCB_ABORTED) {
1023 /*
1024 * Abort Timed Out
1025 *
1026 * No more opportunities. Lets try resetting the bus!
1027 */
1028 callout_stop(&xs->xs_callout);
1029
1030 printf(" AGAIN. Resetting SCSI Bus\n");
1031 AdvResetSCSIBus(sc);
1032
1033 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
1034 adw_pending_ccb)) != NULL) {
1035 callout_stop(&ccb->xs->xs_callout);
1036 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1037 TAILQ_INSERT_HEAD(&sc->sc_waiting_ccb, ccb, chain);
1038 }
1039 adw_queue_ccb(sc, TAILQ_FIRST(&sc->sc_waiting_ccb), 1);
1040 splx(s);
1041 return;
1042 } else if (ccb->flags & CCB_ABORTING) {
1043 /*
1044 * Abort the operation that has timed out
1045 *
1046 * Second opportunity.
1047 */
1048 printf("\n");
1049 xs->error = XS_TIMEOUT;
1050 ccb->flags |= CCB_ABORTED;
1051 #if 0
1052 /*
1053 * - XXX - 3.3a microcode is BROKEN!!!
1054 *
1055 * We cannot abort a CCB, so we can only hope the command
1056 * get completed before the next timeout, otherwise a
1057 * Bus Reset will arrive inexorably.
1058 */
1059 /*
1060 * ADW_ABORT_CCB() makes the board to generate an interrupt
1061 *
1062 * - XXX - The above assertion MUST be verified (and this
1063 * code changed as well [callout_*()]), when the
1064 * ADW_ABORT_CCB will be working again
1065 */
1066 ADW_ABORT_CCB(sc, ccb);
1067 #endif
1068 /*
1069 * waiting for multishot callout_reset() let's restart it
1070 * by hand so the next time a timeout event will occour
1071 * we will reset the bus.
1072 */
1073 callout_stop(&xs->xs_callout);
1074 callout_reset(&xs->xs_callout,
1075 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
1076 } else {
1077 /*
1078 * Abort the operation that has timed out
1079 *
1080 * First opportunity.
1081 */
1082 printf("\n");
1083 xs->error = XS_TIMEOUT;
1084 ccb->flags |= CCB_ABORTING;
1085 #if 0
1086 /*
1087 * - XXX - 3.3a microcode is BROKEN!!!
1088 *
1089 * We cannot abort a CCB, so we can only hope the command
1090 * get completed before the next 2 timeout, otherwise a
1091 * Bus Reset will arrive inexorably.
1092 */
1093 /*
1094 * ADW_ABORT_CCB() makes the board to generate an interrupt
1095 *
1096 * - XXX - The above assertion MUST be verified (and this
1097 * code changed as well [callout_*()]), when the
1098 * ADW_ABORT_CCB will be working again
1099 */
1100 ADW_ABORT_CCB(sc, ccb);
1101 #endif
1102 /*
1103 * waiting for multishot callout_reset() let's restart it
1104 * by hand so the next time a timeout event will occour
1105 * we will reset the bus.
1106 */
1107 callout_stop(&xs->xs_callout);
1108 callout_reset(&xs->xs_callout,
1109 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
1110 }
1111
1112 splx(s);
1113 }
1114
1115
1116 /******************************************************************************/
1117 /* Host Adapter and Peripherals Information Routines */
1118 /******************************************************************************/
1119
1120
1121 static void
1122 adw_print_info(sc, tid)
1123 ADW_SOFTC *sc;
1124 int tid;
1125 {
1126 bus_space_tag_t iot = sc->sc_iot;
1127 bus_space_handle_t ioh = sc->sc_ioh;
1128 u_int16_t wdtr_able, wdtr_done, wdtr;
1129 u_int16_t sdtr_able, sdtr_done, sdtr, period;
1130 int wdtr_reneg = 0, sdtr_reneg = 0;
1131
1132 printf("%s: target %d ", sc->sc_dev.dv_xname, tid);
1133
1134 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_ABLE, wdtr_able);
1135 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
1136 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_DONE, wdtr_done);
1137 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_DEVICE_HSHK_CFG_TABLE +
1138 (2 * tid), wdtr);
1139 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
1140 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1141 wdtr_reneg = 1;
1142 } else {
1143 printf("wide transfers disabled, ");
1144 }
1145
1146 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_ABLE, sdtr_able);
1147 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
1148 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_SDTR_DONE, sdtr_done);
1149 ADW_READ_WORD_LRAM(iot, ioh, ASC_MC_DEVICE_HSHK_CFG_TABLE +
1150 (2 * tid), sdtr);
1151 sdtr &= ~0x8000;
1152 if((sdtr & 0x1F) != 0) {
1153 if((sdtr & 0x1F00) == 0x1100){
1154 printf("80.0 MHz");
1155 } else if((sdtr & 0x1F00) == 0x1000){
1156 printf("40.0 MHz");
1157 } else {
1158 /* <= 20.0 MHz */
1159 period = (((sdtr >> 8) * 25) + 50)/4;
1160 if(period == 0) {
1161 /* Should never happen. */
1162 printf("? MHz");
1163 } else {
1164 printf("%d.%d MHz", 250/period,
1165 ADW_TENTHS(250, period));
1166 }
1167 }
1168 printf(" synchronous transfers\n");
1169 } else {
1170 printf("asynchronous transfers\n");
1171 }
1172 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1173 sdtr_reneg = 1;
1174 } else {
1175 printf("synchronous transfers disabled\n");
1176 }
1177
1178 if(wdtr_reneg || sdtr_reneg) {
1179 printf("%s: target %d %s", sc->sc_dev.dv_xname, tid,
1180 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1181 ((sdtr_reneg)? "sync" : "") );
1182 printf(" renegotiation pending before next command.\n");
1183 }
1184 }
1185
1186
1187 /******************************************************************************/
1188 /* WIDE boards Interrupt callbacks */
1189 /******************************************************************************/
1190
1191
1192 /*
1193 * adw_isr_callback() - Second Level Interrupt Handler called by AdvISR()
1194 *
1195 * Interrupt callback function for the Wide SCSI Adv Library.
1196 *
1197 * Notice:
1198 * Intrrupts are disabled by the caller (AdvISR() function), and will be
1199 * enabled at the end of the caller.
1200 */
1201 static void
1202 adw_isr_callback(sc, scsiq)
1203 ADW_SOFTC *sc;
1204 ADW_SCSI_REQ_Q *scsiq;
1205 {
1206 bus_dma_tag_t dmat = sc->sc_dmat;
1207 ADW_CCB *ccb;
1208 struct scsipi_xfer *xs;
1209 struct scsipi_sense_data *s1, *s2;
1210
1211
1212 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1213
1214 callout_stop(&ccb->xs->xs_callout);
1215
1216 xs = ccb->xs;
1217
1218 /*
1219 * If we were a data transfer, unload the map that described
1220 * the data buffer.
1221 */
1222 if (xs->datalen) {
1223 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1224 ccb->dmamap_xfer->dm_mapsize,
1225 (xs->xs_control & XS_CTL_DATA_IN) ?
1226 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1227 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1228 }
1229 if ((ccb->flags & CCB_ALLOC) == 0) {
1230 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1231 Debugger();
1232 return;
1233 }
1234 /*
1235 * Check for an underrun condition.
1236 */
1237 /*
1238 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
1239 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
1240 * scsiqp->data_cnt); underrun = ASC_TRUE; }
1241 */
1242 /*
1243 * 'done_status' contains the command's ending status.
1244 */
1245 switch (scsiq->done_status) {
1246 case QD_NO_ERROR:
1247 switch (scsiq->host_status) {
1248 case QHSTA_NO_ERROR:
1249 xs->error = XS_NOERROR;
1250 xs->resid = 0;
1251 if (scsiq->cdb[0] == INQUIRY &&
1252 scsiq->target_lun == 0) {
1253 adw_print_info(sc, scsiq->target_id);
1254 }
1255 break;
1256 default:
1257 /* QHSTA error occurred. */
1258 xs->error = XS_DRIVER_STUFFUP;
1259 break;
1260 }
1261 break;
1262
1263 case QD_WITH_ERROR:
1264 switch (scsiq->host_status) {
1265 case QHSTA_NO_ERROR:
1266 switch(scsiq->scsi_status) {
1267 case SS_CHK_CONDITION:
1268 case SS_CMD_TERMINATED:
1269 s1 = &ccb->scsi_sense;
1270 s2 = &xs->sense.scsi_sense;
1271 *s2 = *s1;
1272 xs->error = XS_SENSE;
1273 break;
1274 case SS_TARGET_BUSY:
1275 case SS_RSERV_CONFLICT:
1276 case SS_QUEUE_FULL:
1277 xs->error = XS_DRIVER_STUFFUP;
1278 break;
1279 case SS_CONDITION_MET:
1280 case SS_INTERMID:
1281 case SS_INTERMID_COND_MET:
1282 xs->error = XS_DRIVER_STUFFUP;
1283 break;
1284 case SS_GOOD:
1285 break;
1286 }
1287 break;
1288
1289 case QHSTA_M_SEL_TIMEOUT:
1290 xs->error = XS_DRIVER_STUFFUP;
1291 break;
1292
1293 default:
1294 /* Some other QHSTA error occurred. */
1295 xs->error = XS_DRIVER_STUFFUP;
1296 break;
1297 }
1298 break;
1299
1300 case QD_ABORTED_BY_HOST:
1301 xs->error = XS_DRIVER_STUFFUP;
1302 break;
1303
1304 default:
1305 xs->error = XS_DRIVER_STUFFUP;
1306 break;
1307 }
1308
1309 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1310 adw_free_ccb(sc, ccb);
1311 xs->xs_status |= XS_STS_DONE;
1312 scsipi_done(xs);
1313 }
1314
1315
1316 /*
1317 * adv_async_callback() - Adv Library asynchronous event callback function.
1318 */
1319 static void
1320 adw_async_callback(sc, code)
1321 ADW_SOFTC *sc;
1322 u_int8_t code;
1323 {
1324 switch (code) {
1325 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1326 /*
1327 * The firmware detected a SCSI Bus reset.
1328 */
1329 printf("%s: SCSI Bus reset detected\n", sc->sc_dev.dv_xname);
1330 break;
1331
1332 case ADV_ASYNC_RDMA_FAILURE:
1333 /*
1334 * Handle RDMA failure by resetting the SCSI Bus and
1335 * possibly the chip if it is unresponsive.
1336 */
1337 AdvResetSCSIBus(sc);
1338 break;
1339
1340 case ADV_HOST_SCSI_BUS_RESET:
1341 /*
1342 * Host generated SCSI bus reset occurred.
1343 */
1344 printf("%s: Host generated SCSI bus reset occurred\n",
1345 sc->sc_dev.dv_xname);
1346 break;
1347
1348 case ADV_ASYNC_CARRIER_READY_FAILURE:
1349 /*
1350 * Carrier Ready failure.
1351 */
1352 printf("%s: Carrier Ready failure!\n", sc->sc_dev.dv_xname);
1353 break;
1354
1355 default:
1356 break;
1357 }
1358 }
1359