adw.c revision 1.15 1 /* $NetBSD: adw.c,v 1.15 2000/03/23 07:01:28 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <vm/vm.h>
57 #include <vm/vm_param.h>
58 #include <vm/pmap.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63
64 #include <dev/ic/adwlib.h>
65 #include <dev/ic/adw.h>
66
67 #ifndef DDB
68 #define Debugger() panic("should call debugger here (adw.c)")
69 #endif /* ! DDB */
70
71 /******************************************************************************/
72
73
74 static int adw_alloc_controls __P((ADW_SOFTC *));
75 static int adw_alloc_carriers __P((ADW_SOFTC *));
76 static int adw_create_carriers __P((ADW_SOFTC *));
77 static int adw_init_carrier __P((ADW_SOFTC *, ADW_CARRIER *));
78 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
79 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
80 static void adw_reset_ccb __P((ADW_CCB *));
81 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
82 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
83 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *, int));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *, int));
87 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
88 static void adwminphys __P((struct buf *));
89 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
91
92 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
93 static void adw_timeout __P((void *));
94
95
96 /******************************************************************************/
97
98
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adw_dev =
101 {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108
109 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
110 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
111
112
113 /******************************************************************************/
114 /* Control Blocks routines */
115 /******************************************************************************/
116
117
118 static int
119 adw_alloc_controls(sc)
120 ADW_SOFTC *sc;
121 {
122 bus_dma_segment_t seg;
123 int error, rseg;
124
125 /*
126 * Allocate the control structure.
127 */
128 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
129 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
130 printf("%s: unable to allocate control structures,"
131 " error = %d\n", sc->sc_dev.dv_xname, error);
132 return (error);
133 }
134 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
135 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
136 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
137 printf("%s: unable to map control structures, error = %d\n",
138 sc->sc_dev.dv_xname, error);
139 return (error);
140 }
141
142 /*
143 * Create and load the DMA map used for the control blocks.
144 */
145 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
146 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
147 &sc->sc_dmamap_control)) != 0) {
148 printf("%s: unable to create control DMA map, error = %d\n",
149 sc->sc_dev.dv_xname, error);
150 return (error);
151 }
152 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
153 sc->sc_control, sizeof(struct adw_control), NULL,
154 BUS_DMA_NOWAIT)) != 0) {
155 printf("%s: unable to load control DMA map, error = %d\n",
156 sc->sc_dev.dv_xname, error);
157 return (error);
158 }
159
160 return (0);
161 }
162
163
164 static int
165 adw_alloc_carriers(sc)
166 ADW_SOFTC *sc;
167 {
168 bus_dma_segment_t seg;
169 int error, rseg;
170
171 /*
172 * Allocate the control structure.
173 */
174 sc->sc_control->carriers = malloc(ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
175 M_DEVBUF, M_WAITOK);
176 if(!sc->sc_control->carriers) {
177 printf("%s: malloc() failed in allocating carrier structures,"
178 " error = %d\n", sc->sc_dev.dv_xname, error);
179 return (error);
180 }
181
182 if ((error = bus_dmamem_alloc(sc->sc_dmat,
183 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
184 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
185 printf("%s: unable to allocate carrier structures,"
186 " error = %d\n", sc->sc_dev.dv_xname, error);
187 return (error);
188 }
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
191 (caddr_t *) &sc->sc_control->carriers,
192 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
193 printf("%s: unable to map carrier structures,"
194 " error = %d\n", sc->sc_dev.dv_xname, error);
195 return (error);
196 }
197
198 /*
199 * Create and load the DMA map used for the control blocks.
200 */
201 if ((error = bus_dmamap_create(sc->sc_dmat,
202 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 1,
203 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, 0, BUS_DMA_NOWAIT,
204 &sc->sc_dmamap_carrier)) != 0) {
205 printf("%s: unable to create carriers DMA map,"
206 " error = %d\n", sc->sc_dev.dv_xname, error);
207 return (error);
208 }
209 if ((error = bus_dmamap_load(sc->sc_dmat,
210 sc->sc_dmamap_carrier, sc->sc_control->carriers,
211 ADW_CARRIER_SIZE * ADW_MAX_CARRIER, NULL,
212 BUS_DMA_NOWAIT)) != 0) {
213 printf("%s: unable to load carriers DMA map,"
214 " error = %d\n", sc->sc_dev.dv_xname, error);
215 return (error);
216 }
217
218 error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE* ADW_MAX_CARRIER,
219 1, ADW_CARRIER_SIZE * ADW_MAX_CARRIER,
220 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
221 &sc->sc_control->dmamap_xfer);
222 if (error) {
223 printf("%s: unable to create Carrier DMA map, error = %d\n",
224 sc->sc_dev.dv_xname, error);
225 return (error);
226 }
227
228 return (0);
229 }
230
231
232 /*
233 * Create a set of Carriers and add them to the free list. Called once
234 * by adw_init(). We return the number of Carriers successfully created.
235 */
236 static int
237 adw_create_carriers(sc)
238 ADW_SOFTC *sc;
239 {
240 ADW_CARRIER *carr;
241 u_int32_t carr_next = NULL;
242 int i, error;
243
244 for(i=0; i < ADW_MAX_CARRIER; i++) {
245 carr = (ADW_CARRIER *)(((u_int8_t *)sc->sc_control->carriers) +
246 (ADW_CARRIER_SIZE * i));
247 if ((error = adw_init_carrier(sc, carr)) != 0) {
248 printf("%s: unable to initialize carrier, error = %d\n",
249 sc->sc_dev.dv_xname, error);
250 return (i);
251 }
252 carr->next_vpa = carr_next;
253 carr_next = carr->carr_pa;
254 carr->id = i;
255 }
256 sc->carr_freelist = carr;
257 return (i);
258 }
259
260
261 static int
262 adw_init_carrier(sc, carr)
263 ADW_SOFTC *sc;
264 ADW_CARRIER *carr;
265 {
266 u_int32_t carr_pa;
267 int /*error, */hashnum;
268
269 /*
270 * Create the DMA map for all of the Carriers.
271 */
272 /* error = bus_dmamap_create(sc->sc_dmat, ADW_CARRIER_SIZE,
273 1, ADW_CARRIER_SIZE,
274 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
275 &carr->dmamap_xfer);
276 if (error) {
277 printf("%s: unable to create Carrier DMA map, error = %d\n",
278 sc->sc_dev.dv_xname, error);
279 return (error);
280 }
281 */
282 /*
283 * put in the phystokv hash table
284 * Never gets taken out.
285 */
286 carr_pa = ADW_CARRIER_ADDR(sc, carr);
287 carr->carr_pa = carr_pa;
288 hashnum = CARRIER_HASH(carr_pa);
289 carr->nexthash = sc->sc_carrhash[hashnum];
290 sc->sc_carrhash[hashnum] = carr;
291
292 return(0);
293 }
294
295
296 /*
297 * Given a physical address, find the Carrier that it corresponds to.
298 */
299 ADW_CARRIER *
300 adw_carrier_phys_kv(sc, carr_phys)
301 ADW_SOFTC *sc;
302 u_int32_t carr_phys;
303 {
304 int hashnum = CARRIER_HASH(carr_phys);
305 ADW_CARRIER *carr = sc->sc_carrhash[hashnum];
306
307 while (carr) {
308 if (carr->carr_pa == carr_phys)
309 break;
310 carr = carr->nexthash;
311 }
312 return (carr);
313 }
314
315
316 /*
317 * Create a set of ccbs and add them to the free list. Called once
318 * by adw_init(). We return the number of CCBs successfully created.
319 */
320 static int
321 adw_create_ccbs(sc, ccbstore, count)
322 ADW_SOFTC *sc;
323 ADW_CCB *ccbstore;
324 int count;
325 {
326 ADW_CCB *ccb;
327 int i, error;
328
329 for (i = 0; i < count; i++) {
330 ccb = &ccbstore[i];
331 if ((error = adw_init_ccb(sc, ccb)) != 0) {
332 printf("%s: unable to initialize ccb, error = %d\n",
333 sc->sc_dev.dv_xname, error);
334 return (i);
335 }
336 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
337 }
338
339 return (i);
340 }
341
342
343 /*
344 * A ccb is put onto the free list.
345 */
346 static void
347 adw_free_ccb(sc, ccb)
348 ADW_SOFTC *sc;
349 ADW_CCB *ccb;
350 {
351 int s;
352
353 s = splbio();
354
355 adw_reset_ccb(ccb);
356 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
357
358 /*
359 * If there were none, wake anybody waiting for one to come free,
360 * starting with queued entries.
361 */
362 if (ccb->chain.tqe_next == 0)
363 wakeup(&sc->sc_free_ccb);
364
365 splx(s);
366 }
367
368
369 static void
370 adw_reset_ccb(ccb)
371 ADW_CCB *ccb;
372 {
373
374 ccb->flags = 0;
375 }
376
377
378 static int
379 adw_init_ccb(sc, ccb)
380 ADW_SOFTC *sc;
381 ADW_CCB *ccb;
382 {
383 int hashnum, error;
384
385 /*
386 * Create the DMA map for this CCB.
387 */
388 error = bus_dmamap_create(sc->sc_dmat,
389 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
390 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
391 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
392 if (error) {
393 printf("%s: unable to create CCB DMA map, error = %d\n",
394 sc->sc_dev.dv_xname, error);
395 return (error);
396 }
397
398 /*
399 * put in the phystokv hash table
400 * Never gets taken out.
401 */
402 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
403 ADW_CCB_OFF(ccb);
404 hashnum = CCB_HASH(ccb->hashkey);
405 ccb->nexthash = sc->sc_ccbhash[hashnum];
406 sc->sc_ccbhash[hashnum] = ccb;
407 adw_reset_ccb(ccb);
408 return (0);
409 }
410
411
412 /*
413 * Get a free ccb
414 *
415 * If there are none, see if we can allocate a new one
416 */
417 static ADW_CCB *
418 adw_get_ccb(sc, flags)
419 ADW_SOFTC *sc;
420 int flags;
421 {
422 ADW_CCB *ccb = 0;
423 int s;
424
425 s = splbio();
426
427 /*
428 * If we can and have to, sleep waiting for one to come free
429 * but only if we can't allocate a new one.
430 */
431 for (;;) {
432 ccb = sc->sc_free_ccb.tqh_first;
433 if (ccb) {
434 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
435 break;
436 }
437 if ((flags & XS_CTL_NOSLEEP) != 0)
438 goto out;
439
440 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
441 }
442
443 ccb->flags |= CCB_ALLOC;
444
445 out:
446 splx(s);
447 return (ccb);
448 }
449
450
451 /*
452 * Given a physical address, find the ccb that it corresponds to.
453 */
454 ADW_CCB *
455 adw_ccb_phys_kv(sc, ccb_phys)
456 ADW_SOFTC *sc;
457 u_int32_t ccb_phys;
458 {
459 int hashnum = CCB_HASH(ccb_phys);
460 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
461
462 while (ccb) {
463 if (ccb->hashkey == ccb_phys)
464 break;
465 ccb = ccb->nexthash;
466 }
467 return (ccb);
468 }
469
470
471 /*
472 * Queue a CCB to be sent to the controller, and send it if possible.
473 */
474 static int
475 adw_queue_ccb(sc, ccb, retry)
476 ADW_SOFTC *sc;
477 ADW_CCB *ccb;
478 int retry;
479 {
480 int errcode;
481
482 if(!retry)
483 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
484
485 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
486
487 errcode = AdvExeScsiQueue(sc, &ccb->scsiq);
488 switch(errcode) {
489 case ADW_SUCCESS:
490 break;
491
492 case ADW_BUSY:
493 printf("ADW_BUSY\n");
494 return(ADW_BUSY);
495
496 case ADW_ERROR:
497 printf("ADW_ERROR\n");
498 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
499 return(ADW_ERROR);
500 }
501
502 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
503
504 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
505 callout_reset(&ccb->xs->xs_callout,
506 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
507 }
508
509 return(errcode);
510 }
511
512
513 /******************************************************************************/
514 /* SCSI layer interfacing routines */
515 /******************************************************************************/
516
517
518 int
519 adw_init(sc)
520 ADW_SOFTC *sc;
521 {
522 u_int16_t warn_code;
523
524
525 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
526 ADW_LIB_VERSION_MINOR;
527 sc->cfg.chip_version =
528 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
529
530 /*
531 * Reset the chip to start and allow register writes.
532 */
533 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
534 panic("adw_init: adw_find_signature failed");
535 } else {
536 AdvResetChip(sc->sc_iot, sc->sc_ioh);
537
538 warn_code = (sc->chip_type == ADV_CHIP_ASC3550)?
539 AdvInitFrom3550EEP(sc) :
540 AdvInitFrom38C0800EEP(sc);
541
542 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
543 printf("%s: Bad checksum found. "
544 "Setting default values\n",
545 sc->sc_dev.dv_xname);
546 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
547 printf("%s: Bad bus termination setting."
548 "Using automatic termination.\n",
549 sc->sc_dev.dv_xname);
550 }
551
552 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
553 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
554
555 return (0);
556 }
557
558
559 void
560 adw_attach(sc)
561 ADW_SOFTC *sc;
562 {
563 int i, error;
564
565
566 TAILQ_INIT(&sc->sc_free_ccb);
567 TAILQ_INIT(&sc->sc_waiting_ccb);
568 TAILQ_INIT(&sc->sc_queue);
569
570
571 /*
572 * Allocate the Control Blocks.
573 */
574 error = adw_alloc_controls(sc);
575 if (error)
576 return; /* (error) */ ;
577
578 bzero(sc->sc_control, sizeof(struct adw_control));
579
580 /*
581 * Create and initialize the Control Blocks.
582 */
583 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
584 if (i == 0) {
585 printf("%s: unable to create Control Blocks\n",
586 sc->sc_dev.dv_xname);
587 return; /* (ENOMEM) */ ;
588 } else if (i != ADW_MAX_CCB) {
589 printf("%s: WARNING: only %d of %d Control Blocks"
590 " created\n",
591 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
592 }
593
594 /*
595 * Create and initialize the Carriers.
596 */
597 error = adw_alloc_carriers(sc);
598 if (error)
599 return; /* (error) */ ;
600
601 bzero(sc->sc_control->carriers, ADW_CARRIER_SIZE * ADW_MAX_CARRIER);
602
603 i = adw_create_carriers(sc);
604 if (i == 0) {
605 printf("%s: unable to create Carriers\n",
606 sc->sc_dev.dv_xname);
607 return; /* (ENOMEM) */ ;
608 } else if (i != ADW_MAX_CARRIER) {
609 printf("%s: WARNING: only %d of %d Carriers created\n",
610 sc->sc_dev.dv_xname, i, ADW_MAX_CARRIER);
611 }
612
613
614 /*
615 * Initialize the ASC3550.
616 */
617 error = (sc->chip_type == ADV_CHIP_ASC3550)?
618 AdvInitAsc3550Driver(sc) :
619 AdvInitAsc38C0800Driver(sc);
620 switch (error) {
621 case ASC_IERR_MCODE_CHKSUM:
622 panic("%s: Microcode checksum error",
623 sc->sc_dev.dv_xname);
624 break;
625
626 case ASC_IERR_ILLEGAL_CONNECTION:
627 panic("%s: All three connectors are in use",
628 sc->sc_dev.dv_xname);
629 break;
630
631 case ASC_IERR_REVERSED_CABLE:
632 panic("%s: Cable is reversed",
633 sc->sc_dev.dv_xname);
634 break;
635
636 case ASC_IERR_SINGLE_END_DEVICE:
637 panic("%s: single-ended device is attached to"
638 " one of the connectors",
639 sc->sc_dev.dv_xname);
640 break;
641
642 case ASC_IERR_NO_CARRIER:
643 panic("%s: no carrier",
644 sc->sc_dev.dv_xname);
645 break;
646
647 case ASC_WARN_BUSRESET_ERROR:
648 printf("%s: WARNING: Bus Reset Error\n",
649 sc->sc_dev.dv_xname);
650 break;
651 }
652
653 /*
654 * Fill in the adapter.
655 */
656 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
657 sc->sc_adapter.scsipi_minphys = adwminphys;
658
659 /*
660 * fill in the prototype scsipi_link.
661 */
662 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
663 sc->sc_link.adapter_softc = sc;
664 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
665 sc->sc_link.adapter = &sc->sc_adapter;
666 sc->sc_link.device = &adw_dev;
667 sc->sc_link.openings = 4;
668 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
669 sc->sc_link.scsipi_scsi.max_lun = 7;
670 sc->sc_link.type = BUS_SCSI;
671
672
673 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
674 }
675
676
677 static void
678 adwminphys(bp)
679 struct buf *bp;
680 {
681
682 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
683 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
684 minphys(bp);
685 }
686
687
688 /*
689 * start a scsi operation given the command and the data address.
690 * Also needs the unit, target and lu.
691 */
692 static int
693 adw_scsi_cmd(xs)
694 struct scsipi_xfer *xs;
695 {
696 struct scsipi_link *sc_link = xs->sc_link;
697 ADW_SOFTC *sc = sc_link->adapter_softc;
698 ADW_CCB *ccb;
699 int s, fromqueue = 1, dontqueue = 0, nowait = 0, retry = 0;
700 int flags;
701
702 s = splbio(); /* protect the queue */
703
704 /*
705 * If we're running the queue from adw_done(), we've been
706 * called with the first queue entry as our argument.
707 */
708 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
709 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
710 fromqueue = 1;
711 nowait = 1;
712 } else {
713
714 /* Polled requests can't be queued for later. */
715 dontqueue = xs->xs_control & XS_CTL_POLL;
716
717 /*
718 * If there are jobs in the queue, run them first.
719 */
720 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
721 /*
722 * If we can't queue, we have to abort, since
723 * we have to preserve order.
724 */
725 if (dontqueue) {
726 splx(s);
727 xs->error = XS_DRIVER_STUFFUP;
728 return (TRY_AGAIN_LATER);
729 }
730 /*
731 * Swap with the first queue entry.
732 */
733 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
734 xs = TAILQ_FIRST(&sc->sc_queue);
735 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
736 fromqueue = 1;
737 }
738 }
739
740
741 /*
742 * get a ccb to use. If the transfer
743 * is from a buf (possibly from interrupt time)
744 * then we can't allow it to sleep
745 */
746
747 flags = xs->xs_control;
748 if (nowait)
749 flags |= XS_CTL_NOSLEEP;
750 if ((ccb = adw_get_ccb(sc, flags)) == NULL) {
751 /*
752 * If we can't queue, we lose.
753 */
754 if (dontqueue) {
755 splx(s);
756 xs->error = XS_DRIVER_STUFFUP;
757 return (TRY_AGAIN_LATER);
758 }
759 /*
760 * Stuff ourselves into the queue, in front
761 * if we came off in the first place.
762 */
763 if (fromqueue)
764 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
765 else
766 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
767 splx(s);
768 return (SUCCESSFULLY_QUEUED);
769 }
770 splx(s); /* done playing with the queue */
771
772 ccb->xs = xs;
773 ccb->timeout = xs->timeout;
774
775 if (adw_build_req(xs, ccb, flags)) {
776 retryagain:
777 s = splbio();
778 retry = adw_queue_ccb(sc, ccb, retry);
779 splx(s);
780
781 switch(retry) {
782 case ADW_BUSY:
783 goto retryagain;
784
785 case ADW_ERROR:
786 xs->error = XS_DRIVER_STUFFUP;
787 return (COMPLETE);
788
789 }
790
791 /*
792 * Usually return SUCCESSFULLY QUEUED
793 */
794 if ((xs->xs_control & XS_CTL_POLL) == 0)
795 return (SUCCESSFULLY_QUEUED);
796
797 /*
798 * If we can't use interrupts, poll on completion
799 */
800 if (adw_poll(sc, xs, ccb->timeout)) {
801 adw_timeout(ccb);
802 if (adw_poll(sc, xs, ccb->timeout))
803 adw_timeout(ccb);
804 }
805 }
806 return (COMPLETE);
807 }
808
809
810 /*
811 * Build a request structure for the Wide Boards.
812 */
813 static int
814 adw_build_req(xs, ccb, flags)
815 struct scsipi_xfer *xs;
816 ADW_CCB *ccb;
817 int flags;
818 {
819 struct scsipi_link *sc_link = xs->sc_link;
820 ADW_SOFTC *sc = sc_link->adapter_softc;
821 bus_dma_tag_t dmat = sc->sc_dmat;
822 ADW_SCSI_REQ_Q *scsiqp;
823 int error;
824
825 scsiqp = &ccb->scsiq;
826 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
827
828 /*
829 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
830 * physical CCB structure.
831 */
832 scsiqp->ccb_ptr = ccb->hashkey;
833
834 /*
835 * Build the ADW_SCSI_REQ_Q request.
836 */
837
838 /*
839 * Set CDB length and copy it to the request structure.
840 */
841 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
842
843 scsiqp->target_id = sc_link->scsipi_scsi.target;
844 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
845
846 scsiqp->vsense_addr = &ccb->scsi_sense;
847 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
848 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
849 /* scsiqp->sense_addr = ccb->hashkey +
850 offsetof(struct adw_ccb, scsi_sense);
851 */ scsiqp->sense_len = sizeof(struct scsipi_sense_data);
852
853 /*
854 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
855 */
856 if (xs->datalen) {
857 /*
858 * Map the DMA transfer.
859 */
860 #ifdef TFS
861 if (xs->xs_control & SCSI_DATA_UIO) {
862 error = bus_dmamap_load_uio(dmat,
863 ccb->dmamap_xfer, (struct uio *) xs->data,
864 (flags & XS_CTL_NOSLEEP) ?
865 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
866 } else
867 #endif /* TFS */
868 {
869 error = bus_dmamap_load(dmat,
870 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
871 (flags & XS_CTL_NOSLEEP) ?
872 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
873 }
874
875 if (error) {
876 if (error == EFBIG) {
877 printf("%s: adw_scsi_cmd, more than %d dma"
878 " segments\n",
879 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
880 } else {
881 printf("%s: adw_scsi_cmd, error %d loading"
882 " dma map\n",
883 sc->sc_dev.dv_xname, error);
884 }
885
886 xs->error = XS_DRIVER_STUFFUP;
887 adw_free_ccb(sc, ccb);
888 return (0);
889 }
890 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
891 ccb->dmamap_xfer->dm_mapsize,
892 (xs->xs_control & XS_CTL_DATA_IN) ?
893 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
894
895 /*
896 * Build scatter-gather list.
897 */
898 scsiqp->data_cnt = xs->datalen;
899 scsiqp->vdata_addr = xs->data;
900 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
901 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
902 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
903 } else {
904 /*
905 * No data xfer, use non S/G values.
906 */
907 scsiqp->data_cnt = 0;
908 scsiqp->vdata_addr = 0;
909 scsiqp->data_addr = 0;
910 }
911
912 return (1);
913 }
914
915
916 /*
917 * Build scatter-gather list for Wide Boards.
918 */
919 static void
920 adw_build_sglist(ccb, scsiqp, sg_block)
921 ADW_CCB *ccb;
922 ADW_SCSI_REQ_Q *scsiqp;
923 ADW_SG_BLOCK *sg_block;
924 {
925 u_long sg_block_next_addr; /* block and its next */
926 u_int32_t sg_block_physical_addr;
927 int i; /* how many SG entries */
928 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
929 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
930
931
932 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
933 sg_block_physical_addr = ccb->hashkey +
934 offsetof(struct adw_ccb, sg_block[0]);
935 scsiqp->sg_real_addr = sg_block_physical_addr;
936
937 /*
938 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
939 * then split the request into multiple sg-list blocks.
940 */
941
942 do {
943 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
944 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
945 sg_block->sg_list[i].sg_count = sg_list->ds_len;
946
947 if (--sg_elem_cnt == 0) {
948 /* last entry, get out */
949 sg_block->sg_cnt = i + i;
950 sg_block->sg_ptr = NULL; /* next link = NULL */
951 return;
952 }
953 sg_list++;
954 }
955 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
956 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
957
958 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
959 sg_block->sg_ptr = sg_block_physical_addr;
960 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
961 } while (1);
962 }
963
964
965 int
966 adw_intr(arg)
967 void *arg;
968 {
969 ADW_SOFTC *sc = arg;
970 struct scsipi_xfer *xs;
971
972
973 if(AdvISR(sc) != ADW_FALSE) {
974 /*
975 * If there are queue entries in the software queue, try to
976 * run the first one. We should be more or less guaranteed
977 * to succeed, since we just freed a CCB.
978 *
979 * NOTE: adw_scsi_cmd() relies on our calling it with
980 * the first entry in the queue.
981 */
982 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
983 (void) adw_scsi_cmd(xs);
984 }
985
986 return (1);
987 }
988
989
990 /*
991 * Poll a particular unit, looking for a particular xs
992 */
993 static int
994 adw_poll(sc, xs, count)
995 ADW_SOFTC *sc;
996 struct scsipi_xfer *xs;
997 int count;
998 {
999
1000 /* timeouts are in msec, so we loop in 1000 usec cycles */
1001 while (count) {
1002 adw_intr(sc);
1003 if (xs->xs_status & XS_STS_DONE)
1004 return (0);
1005 delay(1000); /* only happens in boot so ok */
1006 count--;
1007 }
1008 return (1);
1009 }
1010
1011
1012 static void
1013 adw_timeout(arg)
1014 void *arg;
1015 {
1016 ADW_CCB *ccb = arg;
1017 struct scsipi_xfer *xs = ccb->xs;
1018 struct scsipi_link *sc_link = xs->sc_link;
1019 ADW_SOFTC *sc = sc_link->adapter_softc;
1020 int s;
1021
1022 scsi_print_addr(sc_link);
1023 printf("timed out");
1024
1025 s = splbio();
1026
1027 /*
1028 * If it has been through before, then a previous abort has failed,
1029 * don't try abort again, reset the bus instead.
1030 */
1031 if (ccb->flags & CCB_ABORTED) {
1032 /*
1033 * Abort Timed Out
1034 * Lets try resetting the bus!
1035 */
1036 printf(" AGAIN. Resetting SCSI Bus\n");
1037 ccb->flags &= ~CCB_ABORTED;
1038 /* AdvResetSCSIBus() will call sbreset_callback() */
1039 AdvResetSCSIBus(sc);
1040 } else {
1041 /*
1042 * Abort the operation that has timed out
1043 */
1044 printf("\n");
1045 xs->error = XS_TIMEOUT;
1046 ccb->flags |= CCB_ABORTING;
1047 /* ADW_ABORT_CCB() will implicitly call isr_callback() */
1048 ADW_ABORT_CCB(sc, ccb);
1049 }
1050
1051 splx(s);
1052 }
1053
1054
1055 /******************************************************************************/
1056 /* WIDE boards Interrupt callbacks */
1057 /******************************************************************************/
1058
1059
1060 /*
1061 * adw__isr_callback() - Second Level Interrupt Handler called by AdvISR()
1062 *
1063 * Interrupt callback function for the Wide SCSI Adv Library.
1064 */
1065 static void
1066 adw_isr_callback(sc, scsiq)
1067 ADW_SOFTC *sc;
1068 ADW_SCSI_REQ_Q *scsiq;
1069 {
1070 bus_dma_tag_t dmat = sc->sc_dmat;
1071 ADW_CCB *ccb;
1072 struct scsipi_xfer *xs;
1073 struct scsipi_sense_data *s1, *s2;
1074 // int s;
1075
1076
1077 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1078
1079 callout_stop(&ccb->xs->xs_callout);
1080
1081 /* if(ccb->flags & CCB_ABORTING) {
1082 printf("Retrying request\n");
1083 ccb->flags &= ~CCB_ABORTING;
1084 ccb->flags |= CCB_ABORTED;
1085 s = splbio();
1086 adw_queue_ccb(sc, ccb);
1087 splx(s);
1088 return;
1089 }
1090 */
1091 xs = ccb->xs;
1092
1093 /*
1094 * If we were a data transfer, unload the map that described
1095 * the data buffer.
1096 */
1097 if (xs->datalen) {
1098 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1099 ccb->dmamap_xfer->dm_mapsize,
1100 (xs->xs_control & XS_CTL_DATA_IN) ?
1101 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1102 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1103 }
1104 if ((ccb->flags & CCB_ALLOC) == 0) {
1105 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1106 Debugger();
1107 return;
1108 }
1109 /*
1110 * Check for an underrun condition.
1111 */
1112 /*
1113 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
1114 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
1115 * scsiqp->data_cnt); underrun = ASC_TRUE; }
1116 */
1117 /*
1118 * 'done_status' contains the command's ending status.
1119 */
1120 switch (scsiq->done_status) {
1121 case QD_NO_ERROR:
1122 switch (scsiq->host_status) {
1123 case QHSTA_NO_ERROR:
1124 xs->error = XS_NOERROR;
1125 xs->resid = 0;
1126 break;
1127 default:
1128 /* QHSTA error occurred. */
1129 xs->error = XS_DRIVER_STUFFUP;
1130 break;
1131 }
1132 break;
1133
1134 case QD_WITH_ERROR:
1135 switch (scsiq->host_status) {
1136 case QHSTA_NO_ERROR:
1137 switch(scsiq->scsi_status) {
1138 case SS_CHK_CONDITION:
1139 case SS_CMD_TERMINATED:
1140 s1 = &ccb->scsi_sense;
1141 s2 = &xs->sense.scsi_sense;
1142 *s2 = *s1;
1143 xs->error = XS_SENSE;
1144 break;
1145 case SS_TARGET_BUSY:
1146 case SS_RSERV_CONFLICT:
1147 case SS_QUEUE_FULL:
1148 xs->error = XS_DRIVER_STUFFUP;
1149 break;
1150 case SS_CONDITION_MET:
1151 case SS_INTERMID:
1152 case SS_INTERMID_COND_MET:
1153 xs->error = XS_DRIVER_STUFFUP;
1154 break;
1155 case SS_GOOD:
1156 break;
1157 }
1158 break;
1159
1160 case QHSTA_M_SEL_TIMEOUT:
1161 xs->error = XS_DRIVER_STUFFUP;
1162 break;
1163
1164 default:
1165 /* Some other QHSTA error occurred. */
1166 xs->error = XS_DRIVER_STUFFUP;
1167 break;
1168 }
1169 break;
1170
1171 case QD_ABORTED_BY_HOST:
1172 xs->error = XS_DRIVER_STUFFUP;
1173 break;
1174
1175 default:
1176 xs->error = XS_DRIVER_STUFFUP;
1177 break;
1178 }
1179
1180 adw_free_ccb(sc, ccb);
1181 xs->xs_status |= XS_STS_DONE;
1182 scsipi_done(xs);
1183 }
1184
1185
1186 /*
1187 * adv_async_callback() - Adv Library asynchronous event callback function.
1188 */
1189 static void
1190 adw_async_callback(sc, code)
1191 ADW_SOFTC *sc;
1192 u_int8_t code;
1193 {
1194 switch (code) {
1195 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1196 /*
1197 * The firmware detected a SCSI Bus reset.
1198 */
1199 break;
1200
1201 case ADV_ASYNC_RDMA_FAILURE:
1202 /*
1203 * Handle RDMA failure by resetting the SCSI Bus and
1204 * possibly the chip if it is unresponsive. Log the error
1205 * with a unique code.
1206 */
1207 AdvResetSCSIBus(sc);
1208 break;
1209
1210 case ADV_HOST_SCSI_BUS_RESET:
1211 /*
1212 * Host generated SCSI bus reset occurred.
1213 */
1214 break;
1215
1216 default:
1217 break;
1218 }
1219 }
1220