adw.c revision 1.18 1 /* $NetBSD: adw.c,v 1.18 2000/05/03 19:15:27 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63
64 #include <dev/ic/adwlib.h>
65 #include <dev/ic/adw.h>
66
67 #ifndef DDB
68 #define Debugger() panic("should call debugger here (adw.c)")
69 #endif /* ! DDB */
70
71 /******************************************************************************/
72
73
74 static int adw_alloc_controls __P((ADW_SOFTC *));
75 static int adw_alloc_carriers __P((ADW_SOFTC *));
76 static int adw_create_carriers __P((ADW_SOFTC *));
77 static int adw_init_carrier __P((ADW_SOFTC *, ADW_CARRIER *));
78 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
79 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
80 static void adw_reset_ccb __P((ADW_CCB *));
81 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
82 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
83 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *, int));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *, int));
87 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
88 static void adwminphys __P((struct buf *));
89 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
91
92 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
93 static void adw_timeout __P((void *));
94
95
96 /******************************************************************************/
97
98
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adw_dev =
101 {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108
109 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
110 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
111
112
113 /******************************************************************************/
114 /* Control Blocks routines */
115 /******************************************************************************/
116
117
118 static int
119 adw_alloc_controls(sc)
120 ADW_SOFTC *sc;
121 {
122 bus_dma_segment_t seg;
123 int error, rseg;
124
125 /*
126 * Allocate the control structure.
127 */
128 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
129 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
130 printf("%s: unable to allocate control structures,"
131 " error = %d\n", sc->sc_dev.dv_xname, error);
132 return (error);
133 }
134 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
135 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
136 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
137 printf("%s: unable to map control structures, error = %d\n",
138 sc->sc_dev.dv_xname, error);
139 return (error);
140 }
141
142 /*
143 * Create and load the DMA map used for the control blocks.
144 */
145 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
146 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
147 &sc->sc_dmamap_control)) != 0) {
148 printf("%s: unable to create control DMA map, error = %d\n",
149 sc->sc_dev.dv_xname, error);
150 return (error);
151 }
152 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
153 sc->sc_control, sizeof(struct adw_control), NULL,
154 BUS_DMA_NOWAIT)) != 0) {
155 printf("%s: unable to load control DMA map, error = %d\n",
156 sc->sc_dev.dv_xname, error);
157 return (error);
158 }
159
160 return (0);
161 }
162
163
164 static int
165 adw_alloc_carriers(sc)
166 ADW_SOFTC *sc;
167 {
168 bus_dma_segment_t seg;
169 int error, rseg;
170
171 /*
172 * Allocate the control structure.
173 */
174 sc->sc_control->carriers = malloc(ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
175 M_DEVBUF, M_WAITOK);
176 if(!sc->sc_control->carriers) {
177 printf("%s: malloc() failed in allocating carrier structures\n",
178 sc->sc_dev.dv_xname);
179 return (ENOMEM);
180 }
181
182 if ((error = bus_dmamem_alloc(sc->sc_dmat,
183 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
184 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
185 printf("%s: unable to allocate carrier structures,"
186 " error = %d\n", sc->sc_dev.dv_xname, error);
187 return (error);
188 }
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
191 (caddr_t *) &sc->sc_control->carriers,
192 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
193 printf("%s: unable to map carrier structures,"
194 " error = %d\n", sc->sc_dev.dv_xname, error);
195 return (error);
196 }
197
198 /*
199 * Create and load the DMA map used for the control blocks.
200 */
201 if ((error = bus_dmamap_create(sc->sc_dmat,
202 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 1,
203 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 0, BUS_DMA_NOWAIT,
204 &sc->sc_dmamap_carrier)) != 0) {
205 printf("%s: unable to create carriers DMA map,"
206 " error = %d\n", sc->sc_dev.dv_xname, error);
207 return (error);
208 }
209 if ((error = bus_dmamap_load(sc->sc_dmat,
210 sc->sc_dmamap_carrier, sc->sc_control->carriers,
211 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, NULL,
212 BUS_DMA_NOWAIT)) != 0) {
213 printf("%s: unable to load carriers DMA map,"
214 " error = %d\n", sc->sc_dev.dv_xname, error);
215 return (error);
216 }
217
218 error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE* ADW_MAX_CARRIER,
219 1, ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
220 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
221 &sc->sc_control->dmamap_xfer);
222 if (error) {
223 printf("%s: unable to create Carrier DMA map, error = %d\n",
224 sc->sc_dev.dv_xname, error);
225 return (error);
226 }
227
228 return (0);
229 }
230
231
232 /*
233 * Create a set of Carriers and add them to the free list. Called once
234 * by adw_init(). We return the number of Carriers successfully created.
235 */
236 static int
237 adw_create_carriers(sc)
238 ADW_SOFTC *sc;
239 {
240 ADW_CARRIER *carr;
241 u_int32_t carr_next = NULL;
242 int i, error;
243
244 for(i=0; i < ADW_MAX_CARRIER; i++) {
245 carr = (ADW_CARRIER *)(((u_int8_t *)sc->sc_control->carriers) +
246 (ADW_CARRIER_SIZE * i));
247 if ((error = adw_init_carrier(sc, carr)) != 0) {
248 printf("%s: unable to initialize carrier, error = %d\n",
249 sc->sc_dev.dv_xname, error);
250 return (i);
251 }
252 carr->next_vpa = carr_next;
253 carr_next = carr->carr_pa;
254 carr->id = i;
255 }
256 sc->carr_freelist = carr;
257 return (i);
258 }
259
260
261 static int
262 adw_init_carrier(sc, carr)
263 ADW_SOFTC *sc;
264 ADW_CARRIER *carr;
265 {
266 u_int32_t carr_pa;
267 int /*error, */hashnum;
268
269 /*
270 * Create the DMA map for all of the Carriers.
271 */
272 /* error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE,
273 1, ADW_CARRIER_SIZE,
274 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
275 &carr->dmamap_xfer);
276 if (error) {
277 printf("%s: unable to create Carrier DMA map, error = %d\n",
278 sc->sc_dev.dv_xname, error);
279 return (error);
280 }
281 */
282 /*
283 * put in the phystokv hash table
284 * Never gets taken out.
285 */
286 carr_pa = ADW_CARRIER_ADDR(sc, carr);
287 carr->carr_pa = carr_pa;
288 hashnum = CARRIER_HASH(carr_pa);
289 carr->nexthash = sc->sc_carrhash[hashnum];
290 sc->sc_carrhash[hashnum] = carr;
291
292 return(0);
293 }
294
295
296 /*
297 * Given a physical address, find the Carrier that it corresponds to.
298 */
299 ADW_CARRIER *
300 adw_carrier_phys_kv(sc, carr_phys)
301 ADW_SOFTC *sc;
302 u_int32_t carr_phys;
303 {
304 int hashnum = CARRIER_HASH(carr_phys);
305 ADW_CARRIER *carr = sc->sc_carrhash[hashnum];
306
307 while (carr) {
308 if (carr->carr_pa == carr_phys)
309 break;
310 carr = carr->nexthash;
311 }
312 return (carr);
313 }
314
315
316 /*
317 * Create a set of ccbs and add them to the free list. Called once
318 * by adw_init(). We return the number of CCBs successfully created.
319 */
320 static int
321 adw_create_ccbs(sc, ccbstore, count)
322 ADW_SOFTC *sc;
323 ADW_CCB *ccbstore;
324 int count;
325 {
326 ADW_CCB *ccb;
327 int i, error;
328
329 for (i = 0; i < count; i++) {
330 ccb = &ccbstore[i];
331 if ((error = adw_init_ccb(sc, ccb)) != 0) {
332 printf("%s: unable to initialize ccb, error = %d\n",
333 sc->sc_dev.dv_xname, error);
334 return (i);
335 }
336 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
337 }
338
339 return (i);
340 }
341
342
343 /*
344 * A ccb is put onto the free list.
345 */
346 static void
347 adw_free_ccb(sc, ccb)
348 ADW_SOFTC *sc;
349 ADW_CCB *ccb;
350 {
351 int s;
352
353 s = splbio();
354
355 adw_reset_ccb(ccb);
356 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
357
358 /*
359 * If there were none, wake anybody waiting for one to come free,
360 * starting with queued entries.
361 */
362 if (ccb->chain.tqe_next == 0)
363 wakeup(&sc->sc_free_ccb);
364
365 splx(s);
366 }
367
368
369 static void
370 adw_reset_ccb(ccb)
371 ADW_CCB *ccb;
372 {
373
374 ccb->flags = 0;
375 }
376
377
378 static int
379 adw_init_ccb(sc, ccb)
380 ADW_SOFTC *sc;
381 ADW_CCB *ccb;
382 {
383 int hashnum, error;
384
385 /*
386 * Create the DMA map for this CCB.
387 */
388 error = bus_dmamap_create(sc->sc_dmat,
389 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
390 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
391 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
392 if (error) {
393 printf("%s: unable to create CCB DMA map, error = %d\n",
394 sc->sc_dev.dv_xname, error);
395 return (error);
396 }
397
398 /*
399 * put in the phystokv hash table
400 * Never gets taken out.
401 */
402 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
403 ADW_CCB_OFF(ccb);
404 hashnum = CCB_HASH(ccb->hashkey);
405 ccb->nexthash = sc->sc_ccbhash[hashnum];
406 sc->sc_ccbhash[hashnum] = ccb;
407 adw_reset_ccb(ccb);
408 return (0);
409 }
410
411
412 /*
413 * Get a free ccb
414 *
415 * If there are none, see if we can allocate a new one
416 */
417 static ADW_CCB *
418 adw_get_ccb(sc, flags)
419 ADW_SOFTC *sc;
420 int flags;
421 {
422 ADW_CCB *ccb = 0;
423 int s;
424
425 s = splbio();
426
427 /*
428 * If we can and have to, sleep waiting for one to come free
429 * but only if we can't allocate a new one.
430 */
431 for (;;) {
432 ccb = sc->sc_free_ccb.tqh_first;
433 if (ccb) {
434 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
435 break;
436 }
437 if ((flags & XS_CTL_NOSLEEP) != 0)
438 goto out;
439
440 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
441 }
442
443 ccb->flags |= CCB_ALLOC;
444
445 out:
446 splx(s);
447 return (ccb);
448 }
449
450
451 /*
452 * Given a physical address, find the ccb that it corresponds to.
453 */
454 ADW_CCB *
455 adw_ccb_phys_kv(sc, ccb_phys)
456 ADW_SOFTC *sc;
457 u_int32_t ccb_phys;
458 {
459 int hashnum = CCB_HASH(ccb_phys);
460 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
461
462 while (ccb) {
463 if (ccb->hashkey == ccb_phys)
464 break;
465 ccb = ccb->nexthash;
466 }
467 return (ccb);
468 }
469
470
471 /*
472 * Queue a CCB to be sent to the controller, and send it if possible.
473 */
474 static int
475 adw_queue_ccb(sc, ccb, retry)
476 ADW_SOFTC *sc;
477 ADW_CCB *ccb;
478 int retry;
479 {
480 int errcode;
481
482 if(!retry)
483 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
484
485 errcode = ADW_SUCCESS;
486
487 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
488
489 errcode = AdvExeScsiQueue(sc, &ccb->scsiq);
490 switch(errcode) {
491 case ADW_SUCCESS:
492 break;
493
494 case ADW_BUSY:
495 printf("ADW_BUSY\n");
496 return(ADW_BUSY);
497
498 case ADW_ERROR:
499 printf("ADW_ERROR\n");
500 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
501 return(ADW_ERROR);
502 }
503
504 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
505
506 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
507 callout_reset(&ccb->xs->xs_callout,
508 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
509 }
510
511 return(errcode);
512 }
513
514
515 /******************************************************************************/
516 /* SCSI layer interfacing routines */
517 /******************************************************************************/
518
519
520 int
521 adw_init(sc)
522 ADW_SOFTC *sc;
523 {
524 u_int16_t warn_code;
525
526
527 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
528 ADW_LIB_VERSION_MINOR;
529 sc->cfg.chip_version =
530 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
531
532 /*
533 * Reset the chip to start and allow register writes.
534 */
535 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
536 panic("adw_init: adw_find_signature failed");
537 } else {
538 AdvResetChip(sc->sc_iot, sc->sc_ioh);
539
540 switch(sc->chip_type) {
541 case ADV_CHIP_ASC3550:
542 warn_code = AdvInitFrom3550EEP(sc);
543 break;
544
545 case ADV_CHIP_ASC38C0800:
546 warn_code = AdvInitFrom38C0800EEP(sc);
547 break;
548
549 case ADV_CHIP_ASC38C1600:
550 warn_code = AdvInitFrom38C1600EEP(sc);
551 break;
552
553 default:
554 return -1;
555 }
556
557 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
558 printf("%s: Bad checksum found. "
559 "Setting default values\n",
560 sc->sc_dev.dv_xname);
561 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
562 printf("%s: Bad bus termination setting."
563 "Using automatic termination.\n",
564 sc->sc_dev.dv_xname);
565 }
566
567 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
568 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
569
570 return 0;
571 }
572
573
574 void
575 adw_attach(sc)
576 ADW_SOFTC *sc;
577 {
578 int i, error;
579
580
581 TAILQ_INIT(&sc->sc_free_ccb);
582 TAILQ_INIT(&sc->sc_waiting_ccb);
583 TAILQ_INIT(&sc->sc_queue);
584
585
586 /*
587 * Allocate the Control Blocks.
588 */
589 error = adw_alloc_controls(sc);
590 if (error)
591 return; /* (error) */ ;
592
593 bzero(sc->sc_control, sizeof(struct adw_control));
594
595 /*
596 * Create and initialize the Control Blocks.
597 */
598 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
599 if (i == 0) {
600 printf("%s: unable to create Control Blocks\n",
601 sc->sc_dev.dv_xname);
602 return; /* (ENOMEM) */ ;
603 } else if (i != ADW_MAX_CCB) {
604 printf("%s: WARNING: only %d of %d Control Blocks"
605 " created\n",
606 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
607 }
608
609 /*
610 * Create and initialize the Carriers.
611 */
612 error = adw_alloc_carriers(sc);
613 if (error)
614 return; /* (error) */ ;
615
616 bzero(sc->sc_control->carriers, ADW_CARRIER_SIZE * ADW_MAX_CARRIER);
617
618 i = adw_create_carriers(sc);
619 if (i == 0) {
620 printf("%s: unable to create Carriers\n",
621 sc->sc_dev.dv_xname);
622 return; /* (ENOMEM) */ ;
623 } else if (i != ADW_MAX_CARRIER) {
624 printf("%s: WARNING: only %d of %d Carriers created\n",
625 sc->sc_dev.dv_xname, i, ADW_MAX_CARRIER);
626 }
627
628
629 /*
630 * Initialize the adapter
631 */
632 switch(sc->chip_type) {
633 case ADV_CHIP_ASC3550:
634 error = AdvInitAsc3550Driver(sc);
635 break;
636
637 case ADV_CHIP_ASC38C0800:
638 error = AdvInitAsc38C0800Driver(sc);
639 break;
640
641 case ADV_CHIP_ASC38C1600:
642 error = AdvInitAsc38C1600Driver(sc);
643 break;
644
645 default:
646 return;
647 }
648
649 switch (error) {
650 case ASC_IERR_MCODE_CHKSUM:
651 panic("%s: Microcode checksum error",
652 sc->sc_dev.dv_xname);
653 break;
654
655 case ASC_IERR_ILLEGAL_CONNECTION:
656 panic("%s: All three connectors are in use",
657 sc->sc_dev.dv_xname);
658 break;
659
660 case ASC_IERR_REVERSED_CABLE:
661 panic("%s: Cable is reversed",
662 sc->sc_dev.dv_xname);
663 break;
664
665 case ASC_IERR_SINGLE_END_DEVICE:
666 panic("%s: single-ended device is attached to"
667 " one of the connectors",
668 sc->sc_dev.dv_xname);
669 break;
670
671 case ASC_IERR_NO_CARRIER:
672 panic("%s: no carrier",
673 sc->sc_dev.dv_xname);
674 break;
675
676 case ASC_WARN_BUSRESET_ERROR:
677 printf("%s: WARNING: Bus Reset Error\n",
678 sc->sc_dev.dv_xname);
679 break;
680 }
681
682 /*
683 * Fill in the adapter.
684 */
685 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
686 sc->sc_adapter.scsipi_minphys = adwminphys;
687
688 /*
689 * fill in the prototype scsipi_link.
690 */
691 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
692 sc->sc_link.adapter_softc = sc;
693 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
694 sc->sc_link.adapter = &sc->sc_adapter;
695 sc->sc_link.device = &adw_dev;
696 sc->sc_link.openings = 4;
697 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
698 sc->sc_link.scsipi_scsi.max_lun = 7;
699 sc->sc_link.type = BUS_SCSI;
700
701
702 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
703 }
704
705
706 static void
707 adwminphys(bp)
708 struct buf *bp;
709 {
710
711 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
712 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
713 minphys(bp);
714 }
715
716
717 /*
718 * start a scsi operation given the command and the data address.
719 * Also needs the unit, target and lu.
720 */
721 static int
722 adw_scsi_cmd(xs)
723 struct scsipi_xfer *xs;
724 {
725 struct scsipi_link *sc_link = xs->sc_link;
726 ADW_SOFTC *sc = sc_link->adapter_softc;
727 ADW_CCB *ccb;
728 int s, fromqueue = 1, dontqueue = 0, nowait = 0, retry = 0;
729 int flags;
730
731 s = splbio(); /* protect the queue */
732
733 /*
734 * If we're running the queue from adw_done(), we've been
735 * called with the first queue entry as our argument.
736 */
737 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
738 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
739 fromqueue = 1;
740 nowait = 1;
741 } else {
742
743 /* Polled requests can't be queued for later. */
744 dontqueue = xs->xs_control & XS_CTL_POLL;
745
746 /*
747 * If there are jobs in the queue, run them first.
748 */
749 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
750 /*
751 * If we can't queue, we have to abort, since
752 * we have to preserve order.
753 */
754 if (dontqueue) {
755 splx(s);
756 xs->error = XS_DRIVER_STUFFUP;
757 return (TRY_AGAIN_LATER);
758 }
759 /*
760 * Swap with the first queue entry.
761 */
762 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
763 xs = TAILQ_FIRST(&sc->sc_queue);
764 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
765 fromqueue = 1;
766 }
767 }
768
769
770 /*
771 * get a ccb to use. If the transfer
772 * is from a buf (possibly from interrupt time)
773 * then we can't allow it to sleep
774 */
775
776 flags = xs->xs_control;
777 if (nowait)
778 flags |= XS_CTL_NOSLEEP;
779 if ((ccb = adw_get_ccb(sc, flags)) == NULL) {
780 /*
781 * If we can't queue, we lose.
782 */
783 if (dontqueue) {
784 splx(s);
785 xs->error = XS_DRIVER_STUFFUP;
786 return (TRY_AGAIN_LATER);
787 }
788 /*
789 * Stuff ourselves into the queue, in front
790 * if we came off in the first place.
791 */
792 if (fromqueue)
793 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
794 else
795 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
796 splx(s);
797 return (SUCCESSFULLY_QUEUED);
798 }
799 splx(s); /* done playing with the queue */
800
801 ccb->xs = xs;
802 ccb->timeout = xs->timeout;
803
804 if (adw_build_req(xs, ccb, flags)) {
805 retryagain:
806 s = splbio();
807 retry = adw_queue_ccb(sc, ccb, retry);
808 splx(s);
809
810 switch(retry) {
811 case ADW_BUSY:
812 goto retryagain;
813
814 case ADW_ERROR:
815 xs->error = XS_DRIVER_STUFFUP;
816 return (COMPLETE);
817
818 }
819
820 /*
821 * Usually return SUCCESSFULLY QUEUED
822 */
823 if ((xs->xs_control & XS_CTL_POLL) == 0)
824 return (SUCCESSFULLY_QUEUED);
825
826 /*
827 * If we can't use interrupts, poll on completion
828 */
829 if (adw_poll(sc, xs, ccb->timeout)) {
830 adw_timeout(ccb);
831 if (adw_poll(sc, xs, ccb->timeout))
832 adw_timeout(ccb);
833 }
834 }
835 return (COMPLETE);
836 }
837
838
839 /*
840 * Build a request structure for the Wide Boards.
841 */
842 static int
843 adw_build_req(xs, ccb, flags)
844 struct scsipi_xfer *xs;
845 ADW_CCB *ccb;
846 int flags;
847 {
848 struct scsipi_link *sc_link = xs->sc_link;
849 ADW_SOFTC *sc = sc_link->adapter_softc;
850 bus_dma_tag_t dmat = sc->sc_dmat;
851 ADW_SCSI_REQ_Q *scsiqp;
852 int error;
853
854 scsiqp = &ccb->scsiq;
855 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
856
857 /*
858 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
859 * physical CCB structure.
860 */
861 scsiqp->ccb_ptr = ccb->hashkey;
862
863 /*
864 * Build the ADW_SCSI_REQ_Q request.
865 */
866
867 /*
868 * Set CDB length and copy it to the request structure.
869 * For wide boards a CDB length maximum of 16 bytes
870 * is supported.
871 */
872 bcopy(xs->cmd, &scsiqp->cdb, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
873 xs->cmdlen : 12 );
874 if(xs->cmdlen > 12)
875 bcopy(&(xs->cmd[12]), &scsiqp->cdb16, xs->cmdlen - 12);
876
877 scsiqp->target_id = sc_link->scsipi_scsi.target;
878 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
879
880 scsiqp->vsense_addr = &ccb->scsi_sense;
881 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
882 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
883 /* scsiqp->sense_addr = ccb->hashkey +
884 offsetof(struct adw_ccb, scsi_sense);
885 */ scsiqp->sense_len = sizeof(struct scsipi_sense_data);
886
887 /*
888 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
889 */
890 if (xs->datalen) {
891 /*
892 * Map the DMA transfer.
893 */
894 #ifdef TFS
895 if (xs->xs_control & SCSI_DATA_UIO) {
896 error = bus_dmamap_load_uio(dmat,
897 ccb->dmamap_xfer, (struct uio *) xs->data,
898 (flags & XS_CTL_NOSLEEP) ?
899 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
900 } else
901 #endif /* TFS */
902 {
903 error = bus_dmamap_load(dmat,
904 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
905 (flags & XS_CTL_NOSLEEP) ?
906 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
907 }
908
909 if (error) {
910 if (error == EFBIG) {
911 printf("%s: adw_scsi_cmd, more than %d dma"
912 " segments\n",
913 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
914 } else {
915 printf("%s: adw_scsi_cmd, error %d loading"
916 " dma map\n",
917 sc->sc_dev.dv_xname, error);
918 }
919
920 xs->error = XS_DRIVER_STUFFUP;
921 adw_free_ccb(sc, ccb);
922 return (0);
923 }
924 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
925 ccb->dmamap_xfer->dm_mapsize,
926 (xs->xs_control & XS_CTL_DATA_IN) ?
927 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
928
929 /*
930 * Build scatter-gather list.
931 */
932 scsiqp->data_cnt = xs->datalen;
933 scsiqp->vdata_addr = xs->data;
934 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
935 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
936 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
937 } else {
938 /*
939 * No data xfer, use non S/G values.
940 */
941 scsiqp->data_cnt = 0;
942 scsiqp->vdata_addr = 0;
943 scsiqp->data_addr = 0;
944 }
945
946 return (1);
947 }
948
949
950 /*
951 * Build scatter-gather list for Wide Boards.
952 */
953 static void
954 adw_build_sglist(ccb, scsiqp, sg_block)
955 ADW_CCB *ccb;
956 ADW_SCSI_REQ_Q *scsiqp;
957 ADW_SG_BLOCK *sg_block;
958 {
959 u_long sg_block_next_addr; /* block and its next */
960 u_int32_t sg_block_physical_addr;
961 int i; /* how many SG entries */
962 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
963 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
964
965
966 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
967 sg_block_physical_addr = ccb->hashkey +
968 offsetof(struct adw_ccb, sg_block[0]);
969 scsiqp->sg_real_addr = sg_block_physical_addr;
970
971 /*
972 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
973 * then split the request into multiple sg-list blocks.
974 */
975
976 do {
977 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
978 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
979 sg_block->sg_list[i].sg_count = sg_list->ds_len;
980
981 if (--sg_elem_cnt == 0) {
982 /* last entry, get out */
983 sg_block->sg_cnt = i + i;
984 sg_block->sg_ptr = NULL; /* next link = NULL */
985 return;
986 }
987 sg_list++;
988 }
989 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
990 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
991
992 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
993 sg_block->sg_ptr = sg_block_physical_addr;
994 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
995 } while (1);
996 }
997
998
999 int
1000 adw_intr(arg)
1001 void *arg;
1002 {
1003 ADW_SOFTC *sc = arg;
1004 struct scsipi_xfer *xs;
1005
1006
1007 if(AdvISR(sc) != ADW_FALSE) {
1008 /*
1009 * If there are queue entries in the software queue, try to
1010 * run the first one. We should be more or less guaranteed
1011 * to succeed, since we just freed a CCB.
1012 *
1013 * NOTE: adw_scsi_cmd() relies on our calling it with
1014 * the first entry in the queue.
1015 */
1016 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
1017 (void) adw_scsi_cmd(xs);
1018
1019 return (1);
1020 }
1021
1022 return (0);
1023 }
1024
1025
1026 /*
1027 * Poll a particular unit, looking for a particular xs
1028 */
1029 static int
1030 adw_poll(sc, xs, count)
1031 ADW_SOFTC *sc;
1032 struct scsipi_xfer *xs;
1033 int count;
1034 {
1035
1036 /* timeouts are in msec, so we loop in 1000 usec cycles */
1037 while (count) {
1038 adw_intr(sc);
1039 if (xs->xs_status & XS_STS_DONE)
1040 return (0);
1041 delay(1000); /* only happens in boot so ok */
1042 count--;
1043 }
1044 return (1);
1045 }
1046
1047
1048 static void
1049 adw_timeout(arg)
1050 void *arg;
1051 {
1052 ADW_CCB *ccb = arg;
1053 struct scsipi_xfer *xs = ccb->xs;
1054 struct scsipi_link *sc_link = xs->sc_link;
1055 ADW_SOFTC *sc = sc_link->adapter_softc;
1056 int s;
1057
1058 scsi_print_addr(sc_link);
1059 printf("timed out");
1060
1061 s = splbio();
1062
1063 /*
1064 * If it has been through before, then a previous abort has failed,
1065 * don't try abort again, reset the bus instead.
1066 */
1067 if (ccb->flags & CCB_ABORTED) {
1068 /*
1069 * Abort Timed Out
1070 * Lets try resetting the bus!
1071 */
1072 printf(" AGAIN. Resetting SCSI Bus\n");
1073 ccb->flags &= ~CCB_ABORTED;
1074 /* AdvResetSCSIBus() will call sbreset_callback() */
1075 AdvResetSCSIBus(sc);
1076 } else {
1077 /*
1078 * Abort the operation that has timed out
1079 */
1080 printf("\n");
1081 xs->error = XS_TIMEOUT;
1082 ccb->flags |= CCB_ABORTING;
1083 /* ADW_ABORT_CCB() will implicitly call isr_callback() */
1084 ADW_ABORT_CCB(sc, ccb);
1085 }
1086
1087 splx(s);
1088 }
1089
1090
1091 /******************************************************************************/
1092 /* WIDE boards Interrupt callbacks */
1093 /******************************************************************************/
1094
1095
1096 /*
1097 * adw__isr_callback() - Second Level Interrupt Handler called by AdvISR()
1098 *
1099 * Interrupt callback function for the Wide SCSI Adv Library.
1100 */
1101 static void
1102 adw_isr_callback(sc, scsiq)
1103 ADW_SOFTC *sc;
1104 ADW_SCSI_REQ_Q *scsiq;
1105 {
1106 bus_dma_tag_t dmat = sc->sc_dmat;
1107 ADW_CCB *ccb;
1108 struct scsipi_xfer *xs;
1109 struct scsipi_sense_data *s1, *s2;
1110 // int s;
1111
1112
1113 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1114
1115 callout_stop(&ccb->xs->xs_callout);
1116
1117 /* if(ccb->flags & CCB_ABORTING) {
1118 printf("Retrying request\n");
1119 ccb->flags &= ~CCB_ABORTING;
1120 ccb->flags |= CCB_ABORTED;
1121 s = splbio();
1122 adw_queue_ccb(sc, ccb);
1123 splx(s);
1124 return;
1125 }
1126 */
1127 xs = ccb->xs;
1128
1129 /*
1130 * If we were a data transfer, unload the map that described
1131 * the data buffer.
1132 */
1133 if (xs->datalen) {
1134 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1135 ccb->dmamap_xfer->dm_mapsize,
1136 (xs->xs_control & XS_CTL_DATA_IN) ?
1137 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1138 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1139 }
1140 if ((ccb->flags & CCB_ALLOC) == 0) {
1141 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1142 Debugger();
1143 return;
1144 }
1145 /*
1146 * Check for an underrun condition.
1147 */
1148 /*
1149 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
1150 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
1151 * scsiqp->data_cnt); underrun = ASC_TRUE; }
1152 */
1153 /*
1154 * 'done_status' contains the command's ending status.
1155 */
1156 switch (scsiq->done_status) {
1157 case QD_NO_ERROR:
1158 switch (scsiq->host_status) {
1159 case QHSTA_NO_ERROR:
1160 xs->error = XS_NOERROR;
1161 xs->resid = 0;
1162 break;
1163 default:
1164 /* QHSTA error occurred. */
1165 xs->error = XS_DRIVER_STUFFUP;
1166 break;
1167 }
1168 break;
1169
1170 case QD_WITH_ERROR:
1171 switch (scsiq->host_status) {
1172 case QHSTA_NO_ERROR:
1173 switch(scsiq->scsi_status) {
1174 case SS_CHK_CONDITION:
1175 case SS_CMD_TERMINATED:
1176 s1 = &ccb->scsi_sense;
1177 s2 = &xs->sense.scsi_sense;
1178 *s2 = *s1;
1179 xs->error = XS_SENSE;
1180 break;
1181 case SS_TARGET_BUSY:
1182 case SS_RSERV_CONFLICT:
1183 case SS_QUEUE_FULL:
1184 xs->error = XS_DRIVER_STUFFUP;
1185 break;
1186 case SS_CONDITION_MET:
1187 case SS_INTERMID:
1188 case SS_INTERMID_COND_MET:
1189 xs->error = XS_DRIVER_STUFFUP;
1190 break;
1191 case SS_GOOD:
1192 break;
1193 }
1194 break;
1195
1196 case QHSTA_M_SEL_TIMEOUT:
1197 xs->error = XS_DRIVER_STUFFUP;
1198 break;
1199
1200 default:
1201 /* Some other QHSTA error occurred. */
1202 xs->error = XS_DRIVER_STUFFUP;
1203 break;
1204 }
1205 break;
1206
1207 case QD_ABORTED_BY_HOST:
1208 xs->error = XS_DRIVER_STUFFUP;
1209 break;
1210
1211 default:
1212 xs->error = XS_DRIVER_STUFFUP;
1213 break;
1214 }
1215
1216 adw_free_ccb(sc, ccb);
1217 xs->xs_status |= XS_STS_DONE;
1218 scsipi_done(xs);
1219 }
1220
1221
1222 /*
1223 * adv_async_callback() - Adv Library asynchronous event callback function.
1224 */
1225 static void
1226 adw_async_callback(sc, code)
1227 ADW_SOFTC *sc;
1228 u_int8_t code;
1229 {
1230 switch (code) {
1231 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1232 /*
1233 * The firmware detected a SCSI Bus reset.
1234 */
1235 break;
1236
1237 case ADV_ASYNC_RDMA_FAILURE:
1238 /*
1239 * Handle RDMA failure by resetting the SCSI Bus and
1240 * possibly the chip if it is unresponsive. Log the error
1241 * with a unique code.
1242 */
1243 AdvResetSCSIBus(sc);
1244 break;
1245
1246 case ADV_HOST_SCSI_BUS_RESET:
1247 /*
1248 * Host generated SCSI bus reset occurred.
1249 */
1250 break;
1251
1252 default:
1253 break;
1254 }
1255 }
1256