adw.c revision 1.16 1 /* $NetBSD: adw.c,v 1.16 2000/04/30 18:52:15 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63
64 #include <dev/ic/adwlib.h>
65 #include <dev/ic/adw.h>
66
67 #ifndef DDB
68 #define Debugger() panic("should call debugger here (adw.c)")
69 #endif /* ! DDB */
70
71 /******************************************************************************/
72
73
74 static int adw_alloc_controls __P((ADW_SOFTC *));
75 static int adw_alloc_carriers __P((ADW_SOFTC *));
76 static int adw_create_carriers __P((ADW_SOFTC *));
77 static int adw_init_carrier __P((ADW_SOFTC *, ADW_CARRIER *));
78 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
79 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
80 static void adw_reset_ccb __P((ADW_CCB *));
81 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
82 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
83 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *, int));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *, int));
87 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
88 static void adwminphys __P((struct buf *));
89 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
91
92 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
93 static void adw_timeout __P((void *));
94
95
96 /******************************************************************************/
97
98
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adw_dev =
101 {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108
109 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
110 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
111
112
113 /******************************************************************************/
114 /* Control Blocks routines */
115 /******************************************************************************/
116
117
118 static int
119 adw_alloc_controls(sc)
120 ADW_SOFTC *sc;
121 {
122 bus_dma_segment_t seg;
123 int error, rseg;
124
125 /*
126 * Allocate the control structure.
127 */
128 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
129 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
130 printf("%s: unable to allocate control structures,"
131 " error = %d\n", sc->sc_dev.dv_xname, error);
132 return (error);
133 }
134 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
135 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
136 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
137 printf("%s: unable to map control structures, error = %d\n",
138 sc->sc_dev.dv_xname, error);
139 return (error);
140 }
141
142 /*
143 * Create and load the DMA map used for the control blocks.
144 */
145 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
146 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
147 &sc->sc_dmamap_control)) != 0) {
148 printf("%s: unable to create control DMA map, error = %d\n",
149 sc->sc_dev.dv_xname, error);
150 return (error);
151 }
152 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
153 sc->sc_control, sizeof(struct adw_control), NULL,
154 BUS_DMA_NOWAIT)) != 0) {
155 printf("%s: unable to load control DMA map, error = %d\n",
156 sc->sc_dev.dv_xname, error);
157 return (error);
158 }
159
160 return (0);
161 }
162
163
164 static int
165 adw_alloc_carriers(sc)
166 ADW_SOFTC *sc;
167 {
168 bus_dma_segment_t seg;
169 int error, rseg;
170
171 /*
172 * Allocate the control structure.
173 */
174 sc->sc_control->carriers = malloc(ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
175 M_DEVBUF, M_WAITOK);
176 if(!sc->sc_control->carriers) {
177 printf("%s: malloc() failed in allocating carrier structures,"
178 " error = %d\n", sc->sc_dev.dv_xname, error);
179 return (error);
180 }
181
182 if ((error = bus_dmamem_alloc(sc->sc_dmat,
183 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
184 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
185 printf("%s: unable to allocate carrier structures,"
186 " error = %d\n", sc->sc_dev.dv_xname, error);
187 return (error);
188 }
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
191 (caddr_t *) &sc->sc_control->carriers,
192 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
193 printf("%s: unable to map carrier structures,"
194 " error = %d\n", sc->sc_dev.dv_xname, error);
195 return (error);
196 }
197
198 /*
199 * Create and load the DMA map used for the control blocks.
200 */
201 if ((error = bus_dmamap_create(sc->sc_dmat,
202 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 1,
203 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 0, BUS_DMA_NOWAIT,
204 &sc->sc_dmamap_carrier)) != 0) {
205 printf("%s: unable to create carriers DMA map,"
206 " error = %d\n", sc->sc_dev.dv_xname, error);
207 return (error);
208 }
209 if ((error = bus_dmamap_load(sc->sc_dmat,
210 sc->sc_dmamap_carrier, sc->sc_control->carriers,
211 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, NULL,
212 BUS_DMA_NOWAIT)) != 0) {
213 printf("%s: unable to load carriers DMA map,"
214 " error = %d\n", sc->sc_dev.dv_xname, error);
215 return (error);
216 }
217
218 error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE* ADW_MAX_CARRIER,
219 1, ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
220 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
221 &sc->sc_control->dmamap_xfer);
222 if (error) {
223 printf("%s: unable to create Carrier DMA map, error = %d\n",
224 sc->sc_dev.dv_xname, error);
225 return (error);
226 }
227
228 return (0);
229 }
230
231
232 /*
233 * Create a set of Carriers and add them to the free list. Called once
234 * by adw_init(). We return the number of Carriers successfully created.
235 */
236 static int
237 adw_create_carriers(sc)
238 ADW_SOFTC *sc;
239 {
240 ADW_CARRIER *carr;
241 u_int32_t carr_next = NULL;
242 int i, error;
243
244 for(i=0; i < ADW_MAX_CARRIER; i++) {
245 carr = (ADW_CARRIER *)(((u_int8_t *)sc->sc_control->carriers) +
246 (ADW_CARRIER_SIZE * i));
247 if ((error = adw_init_carrier(sc, carr)) != 0) {
248 printf("%s: unable to initialize carrier, error = %d\n",
249 sc->sc_dev.dv_xname, error);
250 return (i);
251 }
252 carr->next_vpa = carr_next;
253 carr_next = carr->carr_pa;
254 carr->id = i;
255 }
256 sc->carr_freelist = carr;
257 return (i);
258 }
259
260
261 static int
262 adw_init_carrier(sc, carr)
263 ADW_SOFTC *sc;
264 ADW_CARRIER *carr;
265 {
266 u_int32_t carr_pa;
267 int /*error, */hashnum;
268
269 /*
270 * Create the DMA map for all of the Carriers.
271 */
272 /* error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE,
273 1, ADW_CARRIER_SIZE,
274 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
275 &carr->dmamap_xfer);
276 if (error) {
277 printf("%s: unable to create Carrier DMA map, error = %d\n",
278 sc->sc_dev.dv_xname, error);
279 return (error);
280 }
281 */
282 /*
283 * put in the phystokv hash table
284 * Never gets taken out.
285 */
286 carr_pa = ADW_CARRIER_ADDR(sc, carr);
287 carr->carr_pa = carr_pa;
288 hashnum = CARRIER_HASH(carr_pa);
289 carr->nexthash = sc->sc_carrhash[hashnum];
290 sc->sc_carrhash[hashnum] = carr;
291
292 return(0);
293 }
294
295
296 /*
297 * Given a physical address, find the Carrier that it corresponds to.
298 */
299 ADW_CARRIER *
300 adw_carrier_phys_kv(sc, carr_phys)
301 ADW_SOFTC *sc;
302 u_int32_t carr_phys;
303 {
304 int hashnum = CARRIER_HASH(carr_phys);
305 ADW_CARRIER *carr = sc->sc_carrhash[hashnum];
306
307 while (carr) {
308 if (carr->carr_pa == carr_phys)
309 break;
310 carr = carr->nexthash;
311 }
312 return (carr);
313 }
314
315
316 /*
317 * Create a set of ccbs and add them to the free list. Called once
318 * by adw_init(). We return the number of CCBs successfully created.
319 */
320 static int
321 adw_create_ccbs(sc, ccbstore, count)
322 ADW_SOFTC *sc;
323 ADW_CCB *ccbstore;
324 int count;
325 {
326 ADW_CCB *ccb;
327 int i, error;
328
329 for (i = 0; i < count; i++) {
330 ccb = &ccbstore[i];
331 if ((error = adw_init_ccb(sc, ccb)) != 0) {
332 printf("%s: unable to initialize ccb, error = %d\n",
333 sc->sc_dev.dv_xname, error);
334 return (i);
335 }
336 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
337 }
338
339 return (i);
340 }
341
342
343 /*
344 * A ccb is put onto the free list.
345 */
346 static void
347 adw_free_ccb(sc, ccb)
348 ADW_SOFTC *sc;
349 ADW_CCB *ccb;
350 {
351 int s;
352
353 s = splbio();
354
355 adw_reset_ccb(ccb);
356 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
357
358 /*
359 * If there were none, wake anybody waiting for one to come free,
360 * starting with queued entries.
361 */
362 if (ccb->chain.tqe_next == 0)
363 wakeup(&sc->sc_free_ccb);
364
365 splx(s);
366 }
367
368
369 static void
370 adw_reset_ccb(ccb)
371 ADW_CCB *ccb;
372 {
373
374 ccb->flags = 0;
375 }
376
377
378 static int
379 adw_init_ccb(sc, ccb)
380 ADW_SOFTC *sc;
381 ADW_CCB *ccb;
382 {
383 int hashnum, error;
384
385 /*
386 * Create the DMA map for this CCB.
387 */
388 error = bus_dmamap_create(sc->sc_dmat,
389 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
390 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
391 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
392 if (error) {
393 printf("%s: unable to create CCB DMA map, error = %d\n",
394 sc->sc_dev.dv_xname, error);
395 return (error);
396 }
397
398 /*
399 * put in the phystokv hash table
400 * Never gets taken out.
401 */
402 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
403 ADW_CCB_OFF(ccb);
404 hashnum = CCB_HASH(ccb->hashkey);
405 ccb->nexthash = sc->sc_ccbhash[hashnum];
406 sc->sc_ccbhash[hashnum] = ccb;
407 adw_reset_ccb(ccb);
408 return (0);
409 }
410
411
412 /*
413 * Get a free ccb
414 *
415 * If there are none, see if we can allocate a new one
416 */
417 static ADW_CCB *
418 adw_get_ccb(sc, flags)
419 ADW_SOFTC *sc;
420 int flags;
421 {
422 ADW_CCB *ccb = 0;
423 int s;
424
425 s = splbio();
426
427 /*
428 * If we can and have to, sleep waiting for one to come free
429 * but only if we can't allocate a new one.
430 */
431 for (;;) {
432 ccb = sc->sc_free_ccb.tqh_first;
433 if (ccb) {
434 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
435 break;
436 }
437 if ((flags & XS_CTL_NOSLEEP) != 0)
438 goto out;
439
440 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
441 }
442
443 ccb->flags |= CCB_ALLOC;
444
445 out:
446 splx(s);
447 return (ccb);
448 }
449
450
451 /*
452 * Given a physical address, find the ccb that it corresponds to.
453 */
454 ADW_CCB *
455 adw_ccb_phys_kv(sc, ccb_phys)
456 ADW_SOFTC *sc;
457 u_int32_t ccb_phys;
458 {
459 int hashnum = CCB_HASH(ccb_phys);
460 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
461
462 while (ccb) {
463 if (ccb->hashkey == ccb_phys)
464 break;
465 ccb = ccb->nexthash;
466 }
467 return (ccb);
468 }
469
470
471 /*
472 * Queue a CCB to be sent to the controller, and send it if possible.
473 */
474 static int
475 adw_queue_ccb(sc, ccb, retry)
476 ADW_SOFTC *sc;
477 ADW_CCB *ccb;
478 int retry;
479 {
480 int errcode;
481
482 if(!retry)
483 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
484
485 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
486
487 errcode = AdvExeScsiQueue(sc, &ccb->scsiq);
488 switch(errcode) {
489 case ADW_SUCCESS:
490 break;
491
492 case ADW_BUSY:
493 printf("ADW_BUSY\n");
494 return(ADW_BUSY);
495
496 case ADW_ERROR:
497 printf("ADW_ERROR\n");
498 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
499 return(ADW_ERROR);
500 }
501
502 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
503
504 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
505 callout_reset(&ccb->xs->xs_callout,
506 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
507 }
508
509 return(errcode);
510 }
511
512
513 /******************************************************************************/
514 /* SCSI layer interfacing routines */
515 /******************************************************************************/
516
517
518 int
519 adw_init(sc)
520 ADW_SOFTC *sc;
521 {
522 u_int16_t warn_code;
523
524
525 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
526 ADW_LIB_VERSION_MINOR;
527 sc->cfg.chip_version =
528 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
529
530 /*
531 * Reset the chip to start and allow register writes.
532 */
533 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
534 panic("adw_init: adw_find_signature failed");
535 } else {
536 AdvResetChip(sc->sc_iot, sc->sc_ioh);
537
538 switch(sc->chip_type) {
539 case ADV_CHIP_ASC3550:
540 warn_code = AdvInitFrom3550EEP(sc);
541 break;
542
543 case ADV_CHIP_ASC38C0800:
544 warn_code = AdvInitFrom38C0800EEP(sc);
545 break;
546
547 case ADV_CHIP_ASC38C1600:
548 warn_code = AdvInitFrom38C1600EEP(sc);
549 break;
550
551 default:
552 return -1;
553 }
554
555 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
556 printf("%s: Bad checksum found. "
557 "Setting default values\n",
558 sc->sc_dev.dv_xname);
559 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
560 printf("%s: Bad bus termination setting."
561 "Using automatic termination.\n",
562 sc->sc_dev.dv_xname);
563 }
564
565 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
566 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
567
568 return 0;
569 }
570
571
572 void
573 adw_attach(sc)
574 ADW_SOFTC *sc;
575 {
576 int i, error;
577
578
579 TAILQ_INIT(&sc->sc_free_ccb);
580 TAILQ_INIT(&sc->sc_waiting_ccb);
581 TAILQ_INIT(&sc->sc_queue);
582
583
584 /*
585 * Allocate the Control Blocks.
586 */
587 error = adw_alloc_controls(sc);
588 if (error)
589 return; /* (error) */ ;
590
591 bzero(sc->sc_control, sizeof(struct adw_control));
592
593 /*
594 * Create and initialize the Control Blocks.
595 */
596 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
597 if (i == 0) {
598 printf("%s: unable to create Control Blocks\n",
599 sc->sc_dev.dv_xname);
600 return; /* (ENOMEM) */ ;
601 } else if (i != ADW_MAX_CCB) {
602 printf("%s: WARNING: only %d of %d Control Blocks"
603 " created\n",
604 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
605 }
606
607 /*
608 * Create and initialize the Carriers.
609 */
610 error = adw_alloc_carriers(sc);
611 if (error)
612 return; /* (error) */ ;
613
614 bzero(sc->sc_control->carriers, ADW_CARRIER_SIZE * ADW_MAX_CARRIER);
615
616 i = adw_create_carriers(sc);
617 if (i == 0) {
618 printf("%s: unable to create Carriers\n",
619 sc->sc_dev.dv_xname);
620 return; /* (ENOMEM) */ ;
621 } else if (i != ADW_MAX_CARRIER) {
622 printf("%s: WARNING: only %d of %d Carriers created\n",
623 sc->sc_dev.dv_xname, i, ADW_MAX_CARRIER);
624 }
625
626
627 /*
628 * Initialize the adapter
629 */
630 switch(sc->chip_type) {
631 case ADV_CHIP_ASC3550:
632 error = AdvInitAsc3550Driver(sc);
633 break;
634
635 case ADV_CHIP_ASC38C0800:
636 error = AdvInitAsc38C0800Driver(sc);
637 break;
638
639 case ADV_CHIP_ASC38C1600:
640 error = AdvInitAsc38C1600Driver(sc);
641 break;
642
643 default:
644 return;
645 }
646
647 switch (error) {
648 case ASC_IERR_MCODE_CHKSUM:
649 panic("%s: Microcode checksum error",
650 sc->sc_dev.dv_xname);
651 break;
652
653 case ASC_IERR_ILLEGAL_CONNECTION:
654 panic("%s: All three connectors are in use",
655 sc->sc_dev.dv_xname);
656 break;
657
658 case ASC_IERR_REVERSED_CABLE:
659 panic("%s: Cable is reversed",
660 sc->sc_dev.dv_xname);
661 break;
662
663 case ASC_IERR_SINGLE_END_DEVICE:
664 panic("%s: single-ended device is attached to"
665 " one of the connectors",
666 sc->sc_dev.dv_xname);
667 break;
668
669 case ASC_IERR_NO_CARRIER:
670 panic("%s: no carrier",
671 sc->sc_dev.dv_xname);
672 break;
673
674 case ASC_WARN_BUSRESET_ERROR:
675 printf("%s: WARNING: Bus Reset Error\n",
676 sc->sc_dev.dv_xname);
677 break;
678 }
679
680 /*
681 * Fill in the adapter.
682 */
683 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
684 sc->sc_adapter.scsipi_minphys = adwminphys;
685
686 /*
687 * fill in the prototype scsipi_link.
688 */
689 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
690 sc->sc_link.adapter_softc = sc;
691 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
692 sc->sc_link.adapter = &sc->sc_adapter;
693 sc->sc_link.device = &adw_dev;
694 sc->sc_link.openings = 4;
695 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
696 sc->sc_link.scsipi_scsi.max_lun = 7;
697 sc->sc_link.type = BUS_SCSI;
698
699
700 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
701 }
702
703
704 static void
705 adwminphys(bp)
706 struct buf *bp;
707 {
708
709 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
710 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
711 minphys(bp);
712 }
713
714
715 /*
716 * start a scsi operation given the command and the data address.
717 * Also needs the unit, target and lu.
718 */
719 static int
720 adw_scsi_cmd(xs)
721 struct scsipi_xfer *xs;
722 {
723 struct scsipi_link *sc_link = xs->sc_link;
724 ADW_SOFTC *sc = sc_link->adapter_softc;
725 ADW_CCB *ccb;
726 int s, fromqueue = 1, dontqueue = 0, nowait = 0, retry = 0;
727 int flags;
728
729 s = splbio(); /* protect the queue */
730
731 /*
732 * If we're running the queue from adw_done(), we've been
733 * called with the first queue entry as our argument.
734 */
735 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
736 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
737 fromqueue = 1;
738 nowait = 1;
739 } else {
740
741 /* Polled requests can't be queued for later. */
742 dontqueue = xs->xs_control & XS_CTL_POLL;
743
744 /*
745 * If there are jobs in the queue, run them first.
746 */
747 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
748 /*
749 * If we can't queue, we have to abort, since
750 * we have to preserve order.
751 */
752 if (dontqueue) {
753 splx(s);
754 xs->error = XS_DRIVER_STUFFUP;
755 return (TRY_AGAIN_LATER);
756 }
757 /*
758 * Swap with the first queue entry.
759 */
760 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
761 xs = TAILQ_FIRST(&sc->sc_queue);
762 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
763 fromqueue = 1;
764 }
765 }
766
767
768 /*
769 * get a ccb to use. If the transfer
770 * is from a buf (possibly from interrupt time)
771 * then we can't allow it to sleep
772 */
773
774 flags = xs->xs_control;
775 if (nowait)
776 flags |= XS_CTL_NOSLEEP;
777 if ((ccb = adw_get_ccb(sc, flags)) == NULL) {
778 /*
779 * If we can't queue, we lose.
780 */
781 if (dontqueue) {
782 splx(s);
783 xs->error = XS_DRIVER_STUFFUP;
784 return (TRY_AGAIN_LATER);
785 }
786 /*
787 * Stuff ourselves into the queue, in front
788 * if we came off in the first place.
789 */
790 if (fromqueue)
791 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
792 else
793 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
794 splx(s);
795 return (SUCCESSFULLY_QUEUED);
796 }
797 splx(s); /* done playing with the queue */
798
799 ccb->xs = xs;
800 ccb->timeout = xs->timeout;
801
802 if (adw_build_req(xs, ccb, flags)) {
803 retryagain:
804 s = splbio();
805 retry = adw_queue_ccb(sc, ccb, retry);
806 splx(s);
807
808 switch(retry) {
809 case ADW_BUSY:
810 goto retryagain;
811
812 case ADW_ERROR:
813 xs->error = XS_DRIVER_STUFFUP;
814 return (COMPLETE);
815
816 }
817
818 /*
819 * Usually return SUCCESSFULLY QUEUED
820 */
821 if ((xs->xs_control & XS_CTL_POLL) == 0)
822 return (SUCCESSFULLY_QUEUED);
823
824 /*
825 * If we can't use interrupts, poll on completion
826 */
827 if (adw_poll(sc, xs, ccb->timeout)) {
828 adw_timeout(ccb);
829 if (adw_poll(sc, xs, ccb->timeout))
830 adw_timeout(ccb);
831 }
832 }
833 return (COMPLETE);
834 }
835
836
837 /*
838 * Build a request structure for the Wide Boards.
839 */
840 static int
841 adw_build_req(xs, ccb, flags)
842 struct scsipi_xfer *xs;
843 ADW_CCB *ccb;
844 int flags;
845 {
846 struct scsipi_link *sc_link = xs->sc_link;
847 ADW_SOFTC *sc = sc_link->adapter_softc;
848 bus_dma_tag_t dmat = sc->sc_dmat;
849 ADW_SCSI_REQ_Q *scsiqp;
850 int error;
851
852 scsiqp = &ccb->scsiq;
853 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
854
855 /*
856 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
857 * physical CCB structure.
858 */
859 scsiqp->ccb_ptr = ccb->hashkey;
860
861 /*
862 * Build the ADW_SCSI_REQ_Q request.
863 */
864
865 /*
866 * Set CDB length and copy it to the request structure.
867 * For wide boards a CDB length maximum of 16 bytes
868 * is supported.
869 */
870 bcopy(xs->cmd, &scsiqp->cdb, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
871 xs->cmdlen : 12 );
872 if(xs->cmdlen > 12)
873 bcopy(&(xs->cmd[12]), &scsiqp->cdb16, xs->cmdlen - 12);
874
875 scsiqp->target_id = sc_link->scsipi_scsi.target;
876 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
877
878 scsiqp->vsense_addr = &ccb->scsi_sense;
879 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
880 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
881 /* scsiqp->sense_addr = ccb->hashkey +
882 offsetof(struct adw_ccb, scsi_sense);
883 */ scsiqp->sense_len = sizeof(struct scsipi_sense_data);
884
885 /*
886 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
887 */
888 if (xs->datalen) {
889 /*
890 * Map the DMA transfer.
891 */
892 #ifdef TFS
893 if (xs->xs_control & SCSI_DATA_UIO) {
894 error = bus_dmamap_load_uio(dmat,
895 ccb->dmamap_xfer, (struct uio *) xs->data,
896 (flags & XS_CTL_NOSLEEP) ?
897 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
898 } else
899 #endif /* TFS */
900 {
901 error = bus_dmamap_load(dmat,
902 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
903 (flags & XS_CTL_NOSLEEP) ?
904 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
905 }
906
907 if (error) {
908 if (error == EFBIG) {
909 printf("%s: adw_scsi_cmd, more than %d dma"
910 " segments\n",
911 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
912 } else {
913 printf("%s: adw_scsi_cmd, error %d loading"
914 " dma map\n",
915 sc->sc_dev.dv_xname, error);
916 }
917
918 xs->error = XS_DRIVER_STUFFUP;
919 adw_free_ccb(sc, ccb);
920 return (0);
921 }
922 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
923 ccb->dmamap_xfer->dm_mapsize,
924 (xs->xs_control & XS_CTL_DATA_IN) ?
925 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
926
927 /*
928 * Build scatter-gather list.
929 */
930 scsiqp->data_cnt = xs->datalen;
931 scsiqp->vdata_addr = xs->data;
932 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
933 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
934 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
935 } else {
936 /*
937 * No data xfer, use non S/G values.
938 */
939 scsiqp->data_cnt = 0;
940 scsiqp->vdata_addr = 0;
941 scsiqp->data_addr = 0;
942 }
943
944 return (1);
945 }
946
947
948 /*
949 * Build scatter-gather list for Wide Boards.
950 */
951 static void
952 adw_build_sglist(ccb, scsiqp, sg_block)
953 ADW_CCB *ccb;
954 ADW_SCSI_REQ_Q *scsiqp;
955 ADW_SG_BLOCK *sg_block;
956 {
957 u_long sg_block_next_addr; /* block and its next */
958 u_int32_t sg_block_physical_addr;
959 int i; /* how many SG entries */
960 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
961 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
962
963
964 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
965 sg_block_physical_addr = ccb->hashkey +
966 offsetof(struct adw_ccb, sg_block[0]);
967 scsiqp->sg_real_addr = sg_block_physical_addr;
968
969 /*
970 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
971 * then split the request into multiple sg-list blocks.
972 */
973
974 do {
975 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
976 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
977 sg_block->sg_list[i].sg_count = sg_list->ds_len;
978
979 if (--sg_elem_cnt == 0) {
980 /* last entry, get out */
981 sg_block->sg_cnt = i + i;
982 sg_block->sg_ptr = NULL; /* next link = NULL */
983 return;
984 }
985 sg_list++;
986 }
987 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
988 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
989
990 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
991 sg_block->sg_ptr = sg_block_physical_addr;
992 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
993 } while (1);
994 }
995
996
997 int
998 adw_intr(arg)
999 void *arg;
1000 {
1001 ADW_SOFTC *sc = arg;
1002 struct scsipi_xfer *xs;
1003
1004
1005 if(AdvISR(sc) != ADW_FALSE) {
1006 /*
1007 * If there are queue entries in the software queue, try to
1008 * run the first one. We should be more or less guaranteed
1009 * to succeed, since we just freed a CCB.
1010 *
1011 * NOTE: adw_scsi_cmd() relies on our calling it with
1012 * the first entry in the queue.
1013 */
1014 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
1015 (void) adw_scsi_cmd(xs);
1016
1017 return (1);
1018 }
1019
1020 return (0);
1021 }
1022
1023
1024 /*
1025 * Poll a particular unit, looking for a particular xs
1026 */
1027 static int
1028 adw_poll(sc, xs, count)
1029 ADW_SOFTC *sc;
1030 struct scsipi_xfer *xs;
1031 int count;
1032 {
1033
1034 /* timeouts are in msec, so we loop in 1000 usec cycles */
1035 while (count) {
1036 adw_intr(sc);
1037 if (xs->xs_status & XS_STS_DONE)
1038 return (0);
1039 delay(1000); /* only happens in boot so ok */
1040 count--;
1041 }
1042 return (1);
1043 }
1044
1045
1046 static void
1047 adw_timeout(arg)
1048 void *arg;
1049 {
1050 ADW_CCB *ccb = arg;
1051 struct scsipi_xfer *xs = ccb->xs;
1052 struct scsipi_link *sc_link = xs->sc_link;
1053 ADW_SOFTC *sc = sc_link->adapter_softc;
1054 int s;
1055
1056 scsi_print_addr(sc_link);
1057 printf("timed out");
1058
1059 s = splbio();
1060
1061 /*
1062 * If it has been through before, then a previous abort has failed,
1063 * don't try abort again, reset the bus instead.
1064 */
1065 if (ccb->flags & CCB_ABORTED) {
1066 /*
1067 * Abort Timed Out
1068 * Lets try resetting the bus!
1069 */
1070 printf(" AGAIN. Resetting SCSI Bus\n");
1071 ccb->flags &= ~CCB_ABORTED;
1072 /* AdvResetSCSIBus() will call sbreset_callback() */
1073 AdvResetSCSIBus(sc);
1074 } else {
1075 /*
1076 * Abort the operation that has timed out
1077 */
1078 printf("\n");
1079 xs->error = XS_TIMEOUT;
1080 ccb->flags |= CCB_ABORTING;
1081 /* ADW_ABORT_CCB() will implicitly call isr_callback() */
1082 ADW_ABORT_CCB(sc, ccb);
1083 }
1084
1085 splx(s);
1086 }
1087
1088
1089 /******************************************************************************/
1090 /* WIDE boards Interrupt callbacks */
1091 /******************************************************************************/
1092
1093
1094 /*
1095 * adw__isr_callback() - Second Level Interrupt Handler called by AdvISR()
1096 *
1097 * Interrupt callback function for the Wide SCSI Adv Library.
1098 */
1099 static void
1100 adw_isr_callback(sc, scsiq)
1101 ADW_SOFTC *sc;
1102 ADW_SCSI_REQ_Q *scsiq;
1103 {
1104 bus_dma_tag_t dmat = sc->sc_dmat;
1105 ADW_CCB *ccb;
1106 struct scsipi_xfer *xs;
1107 struct scsipi_sense_data *s1, *s2;
1108 // int s;
1109
1110
1111 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1112
1113 callout_stop(&ccb->xs->xs_callout);
1114
1115 /* if(ccb->flags & CCB_ABORTING) {
1116 printf("Retrying request\n");
1117 ccb->flags &= ~CCB_ABORTING;
1118 ccb->flags |= CCB_ABORTED;
1119 s = splbio();
1120 adw_queue_ccb(sc, ccb);
1121 splx(s);
1122 return;
1123 }
1124 */
1125 xs = ccb->xs;
1126
1127 /*
1128 * If we were a data transfer, unload the map that described
1129 * the data buffer.
1130 */
1131 if (xs->datalen) {
1132 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1133 ccb->dmamap_xfer->dm_mapsize,
1134 (xs->xs_control & XS_CTL_DATA_IN) ?
1135 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1136 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1137 }
1138 if ((ccb->flags & CCB_ALLOC) == 0) {
1139 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1140 Debugger();
1141 return;
1142 }
1143 /*
1144 * Check for an underrun condition.
1145 */
1146 /*
1147 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
1148 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
1149 * scsiqp->data_cnt); underrun = ASC_TRUE; }
1150 */
1151 /*
1152 * 'done_status' contains the command's ending status.
1153 */
1154 switch (scsiq->done_status) {
1155 case QD_NO_ERROR:
1156 switch (scsiq->host_status) {
1157 case QHSTA_NO_ERROR:
1158 xs->error = XS_NOERROR;
1159 xs->resid = 0;
1160 break;
1161 default:
1162 /* QHSTA error occurred. */
1163 xs->error = XS_DRIVER_STUFFUP;
1164 break;
1165 }
1166 break;
1167
1168 case QD_WITH_ERROR:
1169 switch (scsiq->host_status) {
1170 case QHSTA_NO_ERROR:
1171 switch(scsiq->scsi_status) {
1172 case SS_CHK_CONDITION:
1173 case SS_CMD_TERMINATED:
1174 s1 = &ccb->scsi_sense;
1175 s2 = &xs->sense.scsi_sense;
1176 *s2 = *s1;
1177 xs->error = XS_SENSE;
1178 break;
1179 case SS_TARGET_BUSY:
1180 case SS_RSERV_CONFLICT:
1181 case SS_QUEUE_FULL:
1182 xs->error = XS_DRIVER_STUFFUP;
1183 break;
1184 case SS_CONDITION_MET:
1185 case SS_INTERMID:
1186 case SS_INTERMID_COND_MET:
1187 xs->error = XS_DRIVER_STUFFUP;
1188 break;
1189 case SS_GOOD:
1190 break;
1191 }
1192 break;
1193
1194 case QHSTA_M_SEL_TIMEOUT:
1195 xs->error = XS_DRIVER_STUFFUP;
1196 break;
1197
1198 default:
1199 /* Some other QHSTA error occurred. */
1200 xs->error = XS_DRIVER_STUFFUP;
1201 break;
1202 }
1203 break;
1204
1205 case QD_ABORTED_BY_HOST:
1206 xs->error = XS_DRIVER_STUFFUP;
1207 break;
1208
1209 default:
1210 xs->error = XS_DRIVER_STUFFUP;
1211 break;
1212 }
1213
1214 adw_free_ccb(sc, ccb);
1215 xs->xs_status |= XS_STS_DONE;
1216 scsipi_done(xs);
1217 }
1218
1219
1220 /*
1221 * adv_async_callback() - Adv Library asynchronous event callback function.
1222 */
1223 static void
1224 adw_async_callback(sc, code)
1225 ADW_SOFTC *sc;
1226 u_int8_t code;
1227 {
1228 switch (code) {
1229 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1230 /*
1231 * The firmware detected a SCSI Bus reset.
1232 */
1233 break;
1234
1235 case ADV_ASYNC_RDMA_FAILURE:
1236 /*
1237 * Handle RDMA failure by resetting the SCSI Bus and
1238 * possibly the chip if it is unresponsive. Log the error
1239 * with a unique code.
1240 */
1241 AdvResetSCSIBus(sc);
1242 break;
1243
1244 case ADV_HOST_SCSI_BUS_RESET:
1245 /*
1246 * Host generated SCSI bus reset occurred.
1247 */
1248 break;
1249
1250 default:
1251 break;
1252 }
1253 }
1254