adw.c revision 1.14 1 /* $NetBSD: adw.c,v 1.14 2000/02/12 19:19:42 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adw.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static int adw_alloc_controls __P((ADW_SOFTC *));
74 static int adw_alloc_carriers __P((ADW_SOFTC *));
75 static int adw_create_carriers __P((ADW_SOFTC *));
76 static int adw_init_carrier __P((ADW_SOFTC *, ADW_CARRIER *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *, int));
83
84 static int adw_scsi_cmd __P((struct scsipi_xfer *));
85 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *, int));
86 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
87 static void adwminphys __P((struct buf *));
88 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
89 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
90
91 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
92 static void adw_timeout __P((void *));
93
94
95 /******************************************************************************/
96
97
98 /* the below structure is so we have a default dev struct for out link struct */
99 struct scsipi_device adw_dev =
100 {
101 NULL, /* Use default error handler */
102 NULL, /* have a queue, served by this */
103 NULL, /* have no async handler */
104 NULL, /* Use default 'done' routine */
105 };
106
107
108 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
109 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
110
111
112 /******************************************************************************/
113 /* Control Blocks routines */
114 /******************************************************************************/
115
116
117 static int
118 adw_alloc_controls(sc)
119 ADW_SOFTC *sc;
120 {
121 bus_dma_segment_t seg;
122 int error, rseg;
123
124 /*
125 * Allocate the control structure.
126 */
127 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
128 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
129 printf("%s: unable to allocate control structures,"
130 " error = %d\n", sc->sc_dev.dv_xname, error);
131 return (error);
132 }
133 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
134 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
135 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
136 printf("%s: unable to map control structures, error = %d\n",
137 sc->sc_dev.dv_xname, error);
138 return (error);
139 }
140
141 /*
142 * Create and load the DMA map used for the control blocks.
143 */
144 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
145 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
146 &sc->sc_dmamap_control)) != 0) {
147 printf("%s: unable to create control DMA map, error = %d\n",
148 sc->sc_dev.dv_xname, error);
149 return (error);
150 }
151 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
152 sc->sc_control, sizeof(struct adw_control), NULL,
153 BUS_DMA_NOWAIT)) != 0) {
154 printf("%s: unable to load control DMA map, error = %d\n",
155 sc->sc_dev.dv_xname, error);
156 return (error);
157 }
158
159 return (0);
160 }
161
162
163 static int
164 adw_alloc_carriers(sc)
165 ADW_SOFTC *sc;
166 {
167 bus_dma_segment_t seg;
168 int error, rseg;
169
170 /*
171 * Allocate the control structure.
172 */
173 sc->sc_control->carriers = malloc(ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
174 M_DEVBUF, M_WAITOK);
175 if(!sc->sc_control->carriers) {
176 printf("%s: malloc() failed in allocating carrier structures,"
177 " error = %d\n", sc->sc_dev.dv_xname, error);
178 return (error);
179 }
180
181 if ((error = bus_dmamem_alloc(sc->sc_dmat,
182 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
183 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
184 printf("%s: unable to allocate carrier structures,"
185 " error = %d\n", sc->sc_dev.dv_xname, error);
186 return (error);
187 }
188 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
189 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
190 (caddr_t *) &sc->sc_control->carriers,
191 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
192 printf("%s: unable to map carrier structures,"
193 " error = %d\n", sc->sc_dev.dv_xname, error);
194 return (error);
195 }
196
197 /*
198 * Create and load the DMA map used for the control blocks.
199 */
200 if ((error = bus_dmamap_create(sc->sc_dmat,
201 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 1,
202 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 0, BUS_DMA_NOWAIT,
203 &sc->sc_dmamap_carrier)) != 0) {
204 printf("%s: unable to create carriers DMA map,"
205 " error = %d\n", sc->sc_dev.dv_xname, error);
206 return (error);
207 }
208 if ((error = bus_dmamap_load(sc->sc_dmat,
209 sc->sc_dmamap_carrier, sc->sc_control->carriers,
210 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, NULL,
211 BUS_DMA_NOWAIT)) != 0) {
212 printf("%s: unable to load carriers DMA map,"
213 " error = %d\n", sc->sc_dev.dv_xname, error);
214 return (error);
215 }
216
217 error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE* ADW_MAX_CARRIER,
218 1, ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
219 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
220 &sc->sc_control->dmamap_xfer);
221 if (error) {
222 printf("%s: unable to create Carrier DMA map, error = %d\n",
223 sc->sc_dev.dv_xname, error);
224 return (error);
225 }
226
227 return (0);
228 }
229
230
231 /*
232 * Create a set of Carriers and add them to the free list. Called once
233 * by adw_init(). We return the number of Carriers successfully created.
234 */
235 static int
236 adw_create_carriers(sc)
237 ADW_SOFTC *sc;
238 {
239 ADW_CARRIER *carr;
240 u_int32_t carr_next = NULL;
241 int i, error;
242
243 for(i=0; i < ADW_MAX_CARRIER; i++) {
244 carr = (ADW_CARRIER *)(((u_int8_t *)sc->sc_control->carriers) +
245 (ADW_CARRIER_SIZE * i));
246 if ((error = adw_init_carrier(sc, carr)) != 0) {
247 printf("%s: unable to initialize carrier, error = %d\n",
248 sc->sc_dev.dv_xname, error);
249 return (i);
250 }
251 carr->next_vpa = carr_next;
252 carr_next = carr->carr_pa;
253 carr->id = i;
254 }
255 sc->carr_freelist = carr;
256 return (i);
257 }
258
259
260 static int
261 adw_init_carrier(sc, carr)
262 ADW_SOFTC *sc;
263 ADW_CARRIER *carr;
264 {
265 u_int32_t carr_pa;
266 int /*error, */hashnum;
267
268 /*
269 * Create the DMA map for all of the Carriers.
270 */
271 /* error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE,
272 1, ADW_CARRIER_SIZE,
273 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
274 &carr->dmamap_xfer);
275 if (error) {
276 printf("%s: unable to create Carrier DMA map, error = %d\n",
277 sc->sc_dev.dv_xname, error);
278 return (error);
279 }
280 */
281 /*
282 * put in the phystokv hash table
283 * Never gets taken out.
284 */
285 carr_pa = ADW_CARRIER_ADDR(sc, carr);
286 carr->carr_pa = carr_pa;
287 hashnum = CARRIER_HASH(carr_pa);
288 carr->nexthash = sc->sc_carrhash[hashnum];
289 sc->sc_carrhash[hashnum] = carr;
290
291 return(0);
292 }
293
294
295 /*
296 * Given a physical address, find the Carrier that it corresponds to.
297 */
298 ADW_CARRIER *
299 adw_carrier_phys_kv(sc, carr_phys)
300 ADW_SOFTC *sc;
301 u_int32_t carr_phys;
302 {
303 int hashnum = CARRIER_HASH(carr_phys);
304 ADW_CARRIER *carr = sc->sc_carrhash[hashnum];
305
306 while (carr) {
307 if (carr->carr_pa == carr_phys)
308 break;
309 carr = carr->nexthash;
310 }
311 return (carr);
312 }
313
314
315 /*
316 * Create a set of ccbs and add them to the free list. Called once
317 * by adw_init(). We return the number of CCBs successfully created.
318 */
319 static int
320 adw_create_ccbs(sc, ccbstore, count)
321 ADW_SOFTC *sc;
322 ADW_CCB *ccbstore;
323 int count;
324 {
325 ADW_CCB *ccb;
326 int i, error;
327
328 for (i = 0; i < count; i++) {
329 ccb = &ccbstore[i];
330 if ((error = adw_init_ccb(sc, ccb)) != 0) {
331 printf("%s: unable to initialize ccb, error = %d\n",
332 sc->sc_dev.dv_xname, error);
333 return (i);
334 }
335 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
336 }
337
338 return (i);
339 }
340
341
342 /*
343 * A ccb is put onto the free list.
344 */
345 static void
346 adw_free_ccb(sc, ccb)
347 ADW_SOFTC *sc;
348 ADW_CCB *ccb;
349 {
350 int s;
351
352 s = splbio();
353
354 adw_reset_ccb(ccb);
355 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
356
357 /*
358 * If there were none, wake anybody waiting for one to come free,
359 * starting with queued entries.
360 */
361 if (ccb->chain.tqe_next == 0)
362 wakeup(&sc->sc_free_ccb);
363
364 splx(s);
365 }
366
367
368 static void
369 adw_reset_ccb(ccb)
370 ADW_CCB *ccb;
371 {
372
373 ccb->flags = 0;
374 }
375
376
377 static int
378 adw_init_ccb(sc, ccb)
379 ADW_SOFTC *sc;
380 ADW_CCB *ccb;
381 {
382 int hashnum, error;
383
384 /*
385 * Create the DMA map for this CCB.
386 */
387 error = bus_dmamap_create(sc->sc_dmat,
388 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
389 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
390 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
391 if (error) {
392 printf("%s: unable to create CCB DMA map, error = %d\n",
393 sc->sc_dev.dv_xname, error);
394 return (error);
395 }
396
397 /*
398 * put in the phystokv hash table
399 * Never gets taken out.
400 */
401 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
402 ADW_CCB_OFF(ccb);
403 hashnum = CCB_HASH(ccb->hashkey);
404 ccb->nexthash = sc->sc_ccbhash[hashnum];
405 sc->sc_ccbhash[hashnum] = ccb;
406 adw_reset_ccb(ccb);
407 return (0);
408 }
409
410
411 /*
412 * Get a free ccb
413 *
414 * If there are none, see if we can allocate a new one
415 */
416 static ADW_CCB *
417 adw_get_ccb(sc, flags)
418 ADW_SOFTC *sc;
419 int flags;
420 {
421 ADW_CCB *ccb = 0;
422 int s;
423
424 s = splbio();
425
426 /*
427 * If we can and have to, sleep waiting for one to come free
428 * but only if we can't allocate a new one.
429 */
430 for (;;) {
431 ccb = sc->sc_free_ccb.tqh_first;
432 if (ccb) {
433 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
434 break;
435 }
436 if ((flags & XS_CTL_NOSLEEP) != 0)
437 goto out;
438
439 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
440 }
441
442 ccb->flags |= CCB_ALLOC;
443
444 out:
445 splx(s);
446 return (ccb);
447 }
448
449
450 /*
451 * Given a physical address, find the ccb that it corresponds to.
452 */
453 ADW_CCB *
454 adw_ccb_phys_kv(sc, ccb_phys)
455 ADW_SOFTC *sc;
456 u_int32_t ccb_phys;
457 {
458 int hashnum = CCB_HASH(ccb_phys);
459 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
460
461 while (ccb) {
462 if (ccb->hashkey == ccb_phys)
463 break;
464 ccb = ccb->nexthash;
465 }
466 return (ccb);
467 }
468
469
470 /*
471 * Queue a CCB to be sent to the controller, and send it if possible.
472 */
473 static int
474 adw_queue_ccb(sc, ccb, retry)
475 ADW_SOFTC *sc;
476 ADW_CCB *ccb;
477 int retry;
478 {
479 int errcode;
480
481 if(!retry)
482 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
483
484 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
485
486 errcode = AdvExeScsiQueue(sc, &ccb->scsiq);
487 switch(errcode) {
488 case ADW_SUCCESS:
489 break;
490
491 case ADW_BUSY:
492 printf("ADW_BUSY\n");
493 return(ADW_BUSY);
494
495 case ADW_ERROR:
496 printf("ADW_ERROR\n");
497 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
498 return(ADW_ERROR);
499 }
500
501 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
502
503 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
504 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
505 }
506
507 return(errcode);
508 }
509
510
511 /******************************************************************************/
512 /* SCSI layer interfacing routines */
513 /******************************************************************************/
514
515
516 int
517 adw_init(sc)
518 ADW_SOFTC *sc;
519 {
520 u_int16_t warn_code;
521
522
523 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
524 ADW_LIB_VERSION_MINOR;
525 sc->cfg.chip_version =
526 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
527
528 /*
529 * Reset the chip to start and allow register writes.
530 */
531 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
532 panic("adw_init: adw_find_signature failed");
533 } else {
534 AdvResetChip(sc->sc_iot, sc->sc_ioh);
535
536 warn_code = (sc->chip_type == ADV_CHIP_ASC3550)?
537 AdvInitFrom3550EEP(sc) :
538 AdvInitFrom38C0800EEP(sc);
539
540 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
541 printf("%s: Bad checksum found. "
542 "Setting default values\n",
543 sc->sc_dev.dv_xname);
544 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
545 printf("%s: Bad bus termination setting."
546 "Using automatic termination.\n",
547 sc->sc_dev.dv_xname);
548 }
549
550 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
551 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
552
553 return (0);
554 }
555
556
557 void
558 adw_attach(sc)
559 ADW_SOFTC *sc;
560 {
561 int i, error;
562
563
564 TAILQ_INIT(&sc->sc_free_ccb);
565 TAILQ_INIT(&sc->sc_waiting_ccb);
566 TAILQ_INIT(&sc->sc_queue);
567
568
569 /*
570 * Allocate the Control Blocks.
571 */
572 error = adw_alloc_controls(sc);
573 if (error)
574 return; /* (error) */ ;
575
576 bzero(sc->sc_control, sizeof(struct adw_control));
577
578 /*
579 * Create and initialize the Control Blocks.
580 */
581 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
582 if (i == 0) {
583 printf("%s: unable to create Control Blocks\n",
584 sc->sc_dev.dv_xname);
585 return; /* (ENOMEM) */ ;
586 } else if (i != ADW_MAX_CCB) {
587 printf("%s: WARNING: only %d of %d Control Blocks"
588 " created\n",
589 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
590 }
591
592 /*
593 * Create and initialize the Carriers.
594 */
595 error = adw_alloc_carriers(sc);
596 if (error)
597 return; /* (error) */ ;
598
599 bzero(sc->sc_control->carriers, ADW_CARRIER_SIZE * ADW_MAX_CARRIER);
600
601 i = adw_create_carriers(sc);
602 if (i == 0) {
603 printf("%s: unable to create Carriers\n",
604 sc->sc_dev.dv_xname);
605 return; /* (ENOMEM) */ ;
606 } else if (i != ADW_MAX_CARRIER) {
607 printf("%s: WARNING: only %d of %d Carriers created\n",
608 sc->sc_dev.dv_xname, i, ADW_MAX_CARRIER);
609 }
610
611
612 /*
613 * Initialize the ASC3550.
614 */
615 error = (sc->chip_type == ADV_CHIP_ASC3550)?
616 AdvInitAsc3550Driver(sc) :
617 AdvInitAsc38C0800Driver(sc);
618 switch (error) {
619 case ASC_IERR_MCODE_CHKSUM:
620 panic("%s: Microcode checksum error",
621 sc->sc_dev.dv_xname);
622 break;
623
624 case ASC_IERR_ILLEGAL_CONNECTION:
625 panic("%s: All three connectors are in use",
626 sc->sc_dev.dv_xname);
627 break;
628
629 case ASC_IERR_REVERSED_CABLE:
630 panic("%s: Cable is reversed",
631 sc->sc_dev.dv_xname);
632 break;
633
634 case ASC_IERR_SINGLE_END_DEVICE:
635 panic("%s: single-ended device is attached to"
636 " one of the connectors",
637 sc->sc_dev.dv_xname);
638 break;
639
640 case ASC_IERR_NO_CARRIER:
641 panic("%s: no carrier",
642 sc->sc_dev.dv_xname);
643 break;
644
645 case ASC_WARN_BUSRESET_ERROR:
646 printf("%s: WARNING: Bus Reset Error\n",
647 sc->sc_dev.dv_xname);
648 break;
649 }
650
651 /*
652 * Fill in the adapter.
653 */
654 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
655 sc->sc_adapter.scsipi_minphys = adwminphys;
656
657 /*
658 * fill in the prototype scsipi_link.
659 */
660 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
661 sc->sc_link.adapter_softc = sc;
662 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
663 sc->sc_link.adapter = &sc->sc_adapter;
664 sc->sc_link.device = &adw_dev;
665 sc->sc_link.openings = 4;
666 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
667 sc->sc_link.scsipi_scsi.max_lun = 7;
668 sc->sc_link.type = BUS_SCSI;
669
670
671 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
672 }
673
674
675 static void
676 adwminphys(bp)
677 struct buf *bp;
678 {
679
680 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
681 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
682 minphys(bp);
683 }
684
685
686 /*
687 * start a scsi operation given the command and the data address.
688 * Also needs the unit, target and lu.
689 */
690 static int
691 adw_scsi_cmd(xs)
692 struct scsipi_xfer *xs;
693 {
694 struct scsipi_link *sc_link = xs->sc_link;
695 ADW_SOFTC *sc = sc_link->adapter_softc;
696 ADW_CCB *ccb;
697 int s, fromqueue = 1, dontqueue = 0, nowait = 0, retry = 0;
698 int flags;
699
700 s = splbio(); /* protect the queue */
701
702 /*
703 * If we're running the queue from adw_done(), we've been
704 * called with the first queue entry as our argument.
705 */
706 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
707 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
708 fromqueue = 1;
709 nowait = 1;
710 } else {
711
712 /* Polled requests can't be queued for later. */
713 dontqueue = xs->xs_control & XS_CTL_POLL;
714
715 /*
716 * If there are jobs in the queue, run them first.
717 */
718 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
719 /*
720 * If we can't queue, we have to abort, since
721 * we have to preserve order.
722 */
723 if (dontqueue) {
724 splx(s);
725 xs->error = XS_DRIVER_STUFFUP;
726 return (TRY_AGAIN_LATER);
727 }
728 /*
729 * Swap with the first queue entry.
730 */
731 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
732 xs = TAILQ_FIRST(&sc->sc_queue);
733 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
734 fromqueue = 1;
735 }
736 }
737
738
739 /*
740 * get a ccb to use. If the transfer
741 * is from a buf (possibly from interrupt time)
742 * then we can't allow it to sleep
743 */
744
745 flags = xs->xs_control;
746 if (nowait)
747 flags |= XS_CTL_NOSLEEP;
748 if ((ccb = adw_get_ccb(sc, flags)) == NULL) {
749 /*
750 * If we can't queue, we lose.
751 */
752 if (dontqueue) {
753 splx(s);
754 xs->error = XS_DRIVER_STUFFUP;
755 return (TRY_AGAIN_LATER);
756 }
757 /*
758 * Stuff ourselves into the queue, in front
759 * if we came off in the first place.
760 */
761 if (fromqueue)
762 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
763 else
764 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
765 splx(s);
766 return (SUCCESSFULLY_QUEUED);
767 }
768 splx(s); /* done playing with the queue */
769
770 ccb->xs = xs;
771 ccb->timeout = xs->timeout;
772
773 if (adw_build_req(xs, ccb, flags)) {
774 retryagain:
775 s = splbio();
776 retry = adw_queue_ccb(sc, ccb, retry);
777 splx(s);
778
779 switch(retry) {
780 case ADW_BUSY:
781 goto retryagain;
782
783 case ADW_ERROR:
784 xs->error = XS_DRIVER_STUFFUP;
785 return (COMPLETE);
786
787 }
788
789 /*
790 * Usually return SUCCESSFULLY QUEUED
791 */
792 if ((xs->xs_control & XS_CTL_POLL) == 0)
793 return (SUCCESSFULLY_QUEUED);
794
795 /*
796 * If we can't use interrupts, poll on completion
797 */
798 if (adw_poll(sc, xs, ccb->timeout)) {
799 adw_timeout(ccb);
800 if (adw_poll(sc, xs, ccb->timeout))
801 adw_timeout(ccb);
802 }
803 }
804 return (COMPLETE);
805 }
806
807
808 /*
809 * Build a request structure for the Wide Boards.
810 */
811 static int
812 adw_build_req(xs, ccb, flags)
813 struct scsipi_xfer *xs;
814 ADW_CCB *ccb;
815 int flags;
816 {
817 struct scsipi_link *sc_link = xs->sc_link;
818 ADW_SOFTC *sc = sc_link->adapter_softc;
819 bus_dma_tag_t dmat = sc->sc_dmat;
820 ADW_SCSI_REQ_Q *scsiqp;
821 int error;
822
823 scsiqp = &ccb->scsiq;
824 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
825
826 /*
827 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
828 * physical CCB structure.
829 */
830 scsiqp->ccb_ptr = ccb->hashkey;
831
832 /*
833 * Build the ADW_SCSI_REQ_Q request.
834 */
835
836 /*
837 * Set CDB length and copy it to the request structure.
838 */
839 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
840
841 scsiqp->target_id = sc_link->scsipi_scsi.target;
842 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
843
844 scsiqp->vsense_addr = &ccb->scsi_sense;
845 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
846 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
847 /* scsiqp->sense_addr = ccb->hashkey +
848 offsetof(struct adw_ccb, scsi_sense);
849 */ scsiqp->sense_len = sizeof(struct scsipi_sense_data);
850
851 /*
852 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
853 */
854 if (xs->datalen) {
855 /*
856 * Map the DMA transfer.
857 */
858 #ifdef TFS
859 if (xs->xs_control & SCSI_DATA_UIO) {
860 error = bus_dmamap_load_uio(dmat,
861 ccb->dmamap_xfer, (struct uio *) xs->data,
862 (flags & XS_CTL_NOSLEEP) ?
863 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
864 } else
865 #endif /* TFS */
866 {
867 error = bus_dmamap_load(dmat,
868 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
869 (flags & XS_CTL_NOSLEEP) ?
870 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
871 }
872
873 if (error) {
874 if (error == EFBIG) {
875 printf("%s: adw_scsi_cmd, more than %d dma"
876 " segments\n",
877 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
878 } else {
879 printf("%s: adw_scsi_cmd, error %d loading"
880 " dma map\n",
881 sc->sc_dev.dv_xname, error);
882 }
883
884 xs->error = XS_DRIVER_STUFFUP;
885 adw_free_ccb(sc, ccb);
886 return (0);
887 }
888 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
889 ccb->dmamap_xfer->dm_mapsize,
890 (xs->xs_control & XS_CTL_DATA_IN) ?
891 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
892
893 /*
894 * Build scatter-gather list.
895 */
896 scsiqp->data_cnt = xs->datalen;
897 scsiqp->vdata_addr = xs->data;
898 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
899 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
900 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
901 } else {
902 /*
903 * No data xfer, use non S/G values.
904 */
905 scsiqp->data_cnt = 0;
906 scsiqp->vdata_addr = 0;
907 scsiqp->data_addr = 0;
908 }
909
910 return (1);
911 }
912
913
914 /*
915 * Build scatter-gather list for Wide Boards.
916 */
917 static void
918 adw_build_sglist(ccb, scsiqp, sg_block)
919 ADW_CCB *ccb;
920 ADW_SCSI_REQ_Q *scsiqp;
921 ADW_SG_BLOCK *sg_block;
922 {
923 u_long sg_block_next_addr; /* block and its next */
924 u_int32_t sg_block_physical_addr;
925 int i; /* how many SG entries */
926 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
927 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
928
929
930 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
931 sg_block_physical_addr = ccb->hashkey +
932 offsetof(struct adw_ccb, sg_block[0]);
933 scsiqp->sg_real_addr = sg_block_physical_addr;
934
935 /*
936 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
937 * then split the request into multiple sg-list blocks.
938 */
939
940 do {
941 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
942 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
943 sg_block->sg_list[i].sg_count = sg_list->ds_len;
944
945 if (--sg_elem_cnt == 0) {
946 /* last entry, get out */
947 sg_block->sg_cnt = i + i;
948 sg_block->sg_ptr = NULL; /* next link = NULL */
949 return;
950 }
951 sg_list++;
952 }
953 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
954 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
955
956 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
957 sg_block->sg_ptr = sg_block_physical_addr;
958 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
959 } while (1);
960 }
961
962
963 int
964 adw_intr(arg)
965 void *arg;
966 {
967 ADW_SOFTC *sc = arg;
968 struct scsipi_xfer *xs;
969
970
971 if(AdvISR(sc) != ADW_FALSE) {
972 /*
973 * If there are queue entries in the software queue, try to
974 * run the first one. We should be more or less guaranteed
975 * to succeed, since we just freed a CCB.
976 *
977 * NOTE: adw_scsi_cmd() relies on our calling it with
978 * the first entry in the queue.
979 */
980 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
981 (void) adw_scsi_cmd(xs);
982 }
983
984 return (1);
985 }
986
987
988 /*
989 * Poll a particular unit, looking for a particular xs
990 */
991 static int
992 adw_poll(sc, xs, count)
993 ADW_SOFTC *sc;
994 struct scsipi_xfer *xs;
995 int count;
996 {
997
998 /* timeouts are in msec, so we loop in 1000 usec cycles */
999 while (count) {
1000 adw_intr(sc);
1001 if (xs->xs_status & XS_STS_DONE)
1002 return (0);
1003 delay(1000); /* only happens in boot so ok */
1004 count--;
1005 }
1006 return (1);
1007 }
1008
1009
1010 static void
1011 adw_timeout(arg)
1012 void *arg;
1013 {
1014 ADW_CCB *ccb = arg;
1015 struct scsipi_xfer *xs = ccb->xs;
1016 struct scsipi_link *sc_link = xs->sc_link;
1017 ADW_SOFTC *sc = sc_link->adapter_softc;
1018 int s;
1019
1020 scsi_print_addr(sc_link);
1021 printf("timed out");
1022
1023 s = splbio();
1024
1025 /*
1026 * If it has been through before, then a previous abort has failed,
1027 * don't try abort again, reset the bus instead.
1028 */
1029 if (ccb->flags & CCB_ABORTED) {
1030 /*
1031 * Abort Timed Out
1032 * Lets try resetting the bus!
1033 */
1034 printf(" AGAIN. Resetting SCSI Bus\n");
1035 ccb->flags &= ~CCB_ABORTED;
1036 /* AdvResetSCSIBus() will call sbreset_callback() */
1037 AdvResetSCSIBus(sc);
1038 } else {
1039 /*
1040 * Abort the operation that has timed out
1041 */
1042 printf("\n");
1043 xs->error = XS_TIMEOUT;
1044 ccb->flags |= CCB_ABORTING;
1045 /* ADW_ABORT_CCB() will implicitly call isr_callback() */
1046 ADW_ABORT_CCB(sc, ccb);
1047 }
1048
1049 splx(s);
1050 }
1051
1052
1053 /******************************************************************************/
1054 /* WIDE boards Interrupt callbacks */
1055 /******************************************************************************/
1056
1057
1058 /*
1059 * adw__isr_callback() - Second Level Interrupt Handler called by AdvISR()
1060 *
1061 * Interrupt callback function for the Wide SCSI Adv Library.
1062 */
1063 static void
1064 adw_isr_callback(sc, scsiq)
1065 ADW_SOFTC *sc;
1066 ADW_SCSI_REQ_Q *scsiq;
1067 {
1068 bus_dma_tag_t dmat = sc->sc_dmat;
1069 ADW_CCB *ccb;
1070 struct scsipi_xfer *xs;
1071 struct scsipi_sense_data *s1, *s2;
1072 // int s;
1073
1074
1075 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1076
1077 untimeout(adw_timeout, ccb);
1078
1079 /* if(ccb->flags & CCB_ABORTING) {
1080 printf("Retrying request\n");
1081 ccb->flags &= ~CCB_ABORTING;
1082 ccb->flags |= CCB_ABORTED;
1083 s = splbio();
1084 adw_queue_ccb(sc, ccb);
1085 splx(s);
1086 return;
1087 }
1088 */
1089 xs = ccb->xs;
1090
1091 /*
1092 * If we were a data transfer, unload the map that described
1093 * the data buffer.
1094 */
1095 if (xs->datalen) {
1096 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1097 ccb->dmamap_xfer->dm_mapsize,
1098 (xs->xs_control & XS_CTL_DATA_IN) ?
1099 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1100 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1101 }
1102 if ((ccb->flags & CCB_ALLOC) == 0) {
1103 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1104 Debugger();
1105 return;
1106 }
1107 /*
1108 * Check for an underrun condition.
1109 */
1110 /*
1111 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
1112 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
1113 * scsiqp->data_cnt); underrun = ASC_TRUE; }
1114 */
1115 /*
1116 * 'done_status' contains the command's ending status.
1117 */
1118 switch (scsiq->done_status) {
1119 case QD_NO_ERROR:
1120 switch (scsiq->host_status) {
1121 case QHSTA_NO_ERROR:
1122 xs->error = XS_NOERROR;
1123 xs->resid = 0;
1124 break;
1125 default:
1126 /* QHSTA error occurred. */
1127 xs->error = XS_DRIVER_STUFFUP;
1128 break;
1129 }
1130 break;
1131
1132 case QD_WITH_ERROR:
1133 switch (scsiq->host_status) {
1134 case QHSTA_NO_ERROR:
1135 switch(scsiq->scsi_status) {
1136 case SS_CHK_CONDITION:
1137 case SS_CMD_TERMINATED:
1138 s1 = &ccb->scsi_sense;
1139 s2 = &xs->sense.scsi_sense;
1140 *s2 = *s1;
1141 xs->error = XS_SENSE;
1142 break;
1143 case SS_TARGET_BUSY:
1144 case SS_RSERV_CONFLICT:
1145 case SS_QUEUE_FULL:
1146 xs->error = XS_DRIVER_STUFFUP;
1147 break;
1148 case SS_CONDITION_MET:
1149 case SS_INTERMID:
1150 case SS_INTERMID_COND_MET:
1151 xs->error = XS_DRIVER_STUFFUP;
1152 break;
1153 case SS_GOOD:
1154 break;
1155 }
1156 break;
1157
1158 case QHSTA_M_SEL_TIMEOUT:
1159 xs->error = XS_DRIVER_STUFFUP;
1160 break;
1161
1162 default:
1163 /* Some other QHSTA error occurred. */
1164 xs->error = XS_DRIVER_STUFFUP;
1165 break;
1166 }
1167 break;
1168
1169 case QD_ABORTED_BY_HOST:
1170 xs->error = XS_DRIVER_STUFFUP;
1171 break;
1172
1173 default:
1174 xs->error = XS_DRIVER_STUFFUP;
1175 break;
1176 }
1177
1178 adw_free_ccb(sc, ccb);
1179 xs->xs_status |= XS_STS_DONE;
1180 scsipi_done(xs);
1181 }
1182
1183
1184 /*
1185 * adv_async_callback() - Adv Library asynchronous event callback function.
1186 */
1187 static void
1188 adw_async_callback(sc, code)
1189 ADW_SOFTC *sc;
1190 u_int8_t code;
1191 {
1192 switch (code) {
1193 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1194 /*
1195 * The firmware detected a SCSI Bus reset.
1196 */
1197 break;
1198
1199 case ADV_ASYNC_RDMA_FAILURE:
1200 /*
1201 * Handle RDMA failure by resetting the SCSI Bus and
1202 * possibly the chip if it is unresponsive. Log the error
1203 * with a unique code.
1204 */
1205 AdvResetSCSIBus(sc);
1206 break;
1207
1208 case ADV_HOST_SCSI_BUS_RESET:
1209 /*
1210 * Host generated SCSI bus reset occurred.
1211 */
1212 break;
1213
1214 default:
1215 break;
1216 }
1217 }
1218