adw.c revision 1.12.2.7 1 /* $NetBSD: adw.c,v 1.12.2.7 2001/03/12 13:30:12 bouyer Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <dev/scsipi/scsi_all.h>
59 #include <dev/scsipi/scsipi_all.h>
60 #include <dev/scsipi/scsiconf.h>
61
62 #include <dev/ic/adwlib.h>
63 #include <dev/ic/adwmcode.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adw.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static int adw_alloc_controls __P((ADW_SOFTC *));
74 static int adw_alloc_carriers __P((ADW_SOFTC *));
75 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
76 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
77 static void adw_reset_ccb __P((ADW_CCB *));
78 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *));
80 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
81
82 static void adw_scsipi_request __P((struct scsipi_channel *,
83 scsipi_adapter_req_t, void *));
84 static int adw_build_req __P((ADW_SOFTC *, ADW_CCB *));
85 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
86 static void adwminphys __P((struct buf *));
87 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
88 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
89
90 static void adw_print_info __P((ADW_SOFTC *, int));
91
92 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
93 static void adw_timeout __P((void *));
94 static void adw_reset_bus __P((ADW_SOFTC *));
95
96
97 /******************************************************************************/
98 /* DMA Mapping for Control Blocks */
99 /******************************************************************************/
100
101
102 static int
103 adw_alloc_controls(sc)
104 ADW_SOFTC *sc;
105 {
106 bus_dma_segment_t seg;
107 int error, rseg;
108
109 /*
110 * Allocate the control structure.
111 */
112 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
113 PAGE_SIZE, 0, &seg, 1, &rseg,
114 BUS_DMA_NOWAIT)) != 0) {
115 printf("%s: unable to allocate control structures,"
116 " error = %d\n", sc->sc_dev.dv_xname, error);
117 return (error);
118 }
119 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
120 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
121 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
122 printf("%s: unable to map control structures, error = %d\n",
123 sc->sc_dev.dv_xname, error);
124 return (error);
125 }
126
127 /*
128 * Create and load the DMA map used for the control blocks.
129 */
130 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
131 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
132 &sc->sc_dmamap_control)) != 0) {
133 printf("%s: unable to create control DMA map, error = %d\n",
134 sc->sc_dev.dv_xname, error);
135 return (error);
136 }
137 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
138 sc->sc_control, sizeof(struct adw_control), NULL,
139 BUS_DMA_NOWAIT)) != 0) {
140 printf("%s: unable to load control DMA map, error = %d\n",
141 sc->sc_dev.dv_xname, error);
142 return (error);
143 }
144
145 return (0);
146 }
147
148
149 static int
150 adw_alloc_carriers(sc)
151 ADW_SOFTC *sc;
152 {
153 bus_dma_segment_t seg;
154 int error, rseg;
155
156 /*
157 * Allocate the control structure.
158 */
159 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
160 M_DEVBUF, M_WAITOK);
161 if(!sc->sc_control->carriers) {
162 printf("%s: malloc() failed in allocating carrier structures\n",
163 sc->sc_dev.dv_xname);
164 return (ENOMEM);
165 }
166
167 if ((error = bus_dmamem_alloc(sc->sc_dmat,
168 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
169 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
170 printf("%s: unable to allocate carrier structures,"
171 " error = %d\n", sc->sc_dev.dv_xname, error);
172 return (error);
173 }
174 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
175 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
176 (caddr_t *) &sc->sc_control->carriers,
177 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
178 printf("%s: unable to map carrier structures,"
179 " error = %d\n", sc->sc_dev.dv_xname, error);
180 return (error);
181 }
182
183 /*
184 * Create and load the DMA map used for the control blocks.
185 */
186 if ((error = bus_dmamap_create(sc->sc_dmat,
187 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
188 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
189 &sc->sc_dmamap_carrier)) != 0) {
190 printf("%s: unable to create carriers DMA map,"
191 " error = %d\n", sc->sc_dev.dv_xname, error);
192 return (error);
193 }
194 if ((error = bus_dmamap_load(sc->sc_dmat,
195 sc->sc_dmamap_carrier, sc->sc_control->carriers,
196 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
197 BUS_DMA_NOWAIT)) != 0) {
198 printf("%s: unable to load carriers DMA map,"
199 " error = %d\n", sc->sc_dev.dv_xname, error);
200 return (error);
201 }
202
203 return (0);
204 }
205
206
207 /******************************************************************************/
208 /* Control Blocks routines */
209 /******************************************************************************/
210
211
212 /*
213 * Create a set of ccbs and add them to the free list. Called once
214 * by adw_init(). We return the number of CCBs successfully created.
215 */
216 static int
217 adw_create_ccbs(sc, ccbstore, count)
218 ADW_SOFTC *sc;
219 ADW_CCB *ccbstore;
220 int count;
221 {
222 ADW_CCB *ccb;
223 int i, error;
224
225 for (i = 0; i < count; i++) {
226 ccb = &ccbstore[i];
227 if ((error = adw_init_ccb(sc, ccb)) != 0) {
228 printf("%s: unable to initialize ccb, error = %d\n",
229 sc->sc_dev.dv_xname, error);
230 return (i);
231 }
232 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
233 }
234
235 return (i);
236 }
237
238
239 /*
240 * A ccb is put onto the free list.
241 */
242 static void
243 adw_free_ccb(sc, ccb)
244 ADW_SOFTC *sc;
245 ADW_CCB *ccb;
246 {
247 int s;
248
249 s = splbio();
250
251 adw_reset_ccb(ccb);
252 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
253
254 splx(s);
255 }
256
257
258 static void
259 adw_reset_ccb(ccb)
260 ADW_CCB *ccb;
261 {
262
263 ccb->flags = 0;
264 }
265
266
267 static int
268 adw_init_ccb(sc, ccb)
269 ADW_SOFTC *sc;
270 ADW_CCB *ccb;
271 {
272 int hashnum, error;
273
274 /*
275 * Create the DMA map for this CCB.
276 */
277 error = bus_dmamap_create(sc->sc_dmat,
278 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
279 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
280 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
281 if (error) {
282 printf("%s: unable to create CCB DMA map, error = %d\n",
283 sc->sc_dev.dv_xname, error);
284 return (error);
285 }
286
287 /*
288 * put in the phystokv hash table
289 * Never gets taken out.
290 */
291 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
292 ADW_CCB_OFF(ccb);
293 hashnum = CCB_HASH(ccb->hashkey);
294 ccb->nexthash = sc->sc_ccbhash[hashnum];
295 sc->sc_ccbhash[hashnum] = ccb;
296 adw_reset_ccb(ccb);
297 return (0);
298 }
299
300
301 /*
302 * Get a free ccb
303 *
304 * If there are none, see if we can allocate a new one
305 */
306 static ADW_CCB *
307 adw_get_ccb(sc)
308 ADW_SOFTC *sc;
309 {
310 ADW_CCB *ccb = 0;
311 int s;
312
313 s = splbio();
314
315 ccb = sc->sc_free_ccb.tqh_first;
316 if (ccb != NULL) {
317 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
318 ccb->flags |= CCB_ALLOC;
319 }
320 splx(s);
321 return (ccb);
322 }
323
324
325 /*
326 * Given a physical address, find the ccb that it corresponds to.
327 */
328 ADW_CCB *
329 adw_ccb_phys_kv(sc, ccb_phys)
330 ADW_SOFTC *sc;
331 u_int32_t ccb_phys;
332 {
333 int hashnum = CCB_HASH(ccb_phys);
334 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
335
336 while (ccb) {
337 if (ccb->hashkey == ccb_phys)
338 break;
339 ccb = ccb->nexthash;
340 }
341 return (ccb);
342 }
343
344
345 /*
346 * Queue a CCB to be sent to the controller, and send it if possible.
347 */
348 static int
349 adw_queue_ccb(sc, ccb)
350 ADW_SOFTC *sc;
351 ADW_CCB *ccb;
352 {
353 int errcode = ADW_SUCCESS;
354
355 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
356
357 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
358
359 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
360 errcode = AdwExeScsiQueue(sc, &ccb->scsiq);
361 switch(errcode) {
362 case ADW_SUCCESS:
363 break;
364
365 case ADW_BUSY:
366 printf("ADW_BUSY\n");
367 return(ADW_BUSY);
368
369 case ADW_ERROR:
370 printf("ADW_ERROR\n");
371 return(ADW_ERROR);
372 }
373
374 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
375
376 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
377 callout_reset(&ccb->xs->xs_callout,
378 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
379 }
380
381 return(errcode);
382 }
383
384
385 /******************************************************************************/
386 /* SCSI layer interfacing routines */
387 /******************************************************************************/
388
389
390 int
391 adw_init(sc)
392 ADW_SOFTC *sc;
393 {
394 u_int16_t warn_code;
395
396
397 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
398 ADW_LIB_VERSION_MINOR;
399 sc->cfg.chip_version =
400 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
401
402 /*
403 * Reset the chip to start and allow register writes.
404 */
405 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
406 panic("adw_init: adw_find_signature failed");
407 } else {
408 AdwResetChip(sc->sc_iot, sc->sc_ioh);
409
410 warn_code = AdwInitFromEEPROM(sc);
411
412 if (warn_code & ADW_WARN_EEPROM_CHKSUM)
413 printf("%s: Bad checksum found. "
414 "Setting default values\n",
415 sc->sc_dev.dv_xname);
416 if (warn_code & ADW_WARN_EEPROM_TERMINATION)
417 printf("%s: Bad bus termination setting."
418 "Using automatic termination.\n",
419 sc->sc_dev.dv_xname);
420 }
421
422 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
423 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
424
425 return 0;
426 }
427
428
429 void
430 adw_attach(sc)
431 ADW_SOFTC *sc;
432 {
433 struct scsipi_adapter *adapt = &sc->sc_adapter;
434 struct scsipi_channel *chan = &sc->sc_channel;
435 int ncontrols, error;
436
437 TAILQ_INIT(&sc->sc_free_ccb);
438 TAILQ_INIT(&sc->sc_waiting_ccb);
439 TAILQ_INIT(&sc->sc_pending_ccb);
440
441 /*
442 * Allocate the Control Blocks.
443 */
444 error = adw_alloc_controls(sc);
445 if (error)
446 return; /* (error) */ ;
447
448 bzero(sc->sc_control, sizeof(struct adw_control));
449
450 /*
451 * Create and initialize the Control Blocks.
452 */
453 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
454 if (ncontrols == 0) {
455 printf("%s: unable to create Control Blocks\n",
456 sc->sc_dev.dv_xname);
457 return; /* (ENOMEM) */ ;
458 } else if (ncontrols != ADW_MAX_CCB) {
459 printf("%s: WARNING: only %d of %d Control Blocks"
460 " created\n",
461 sc->sc_dev.dv_xname, ncontrols, ADW_MAX_CCB);
462 }
463
464 /*
465 * Create and initialize the Carriers.
466 */
467 error = adw_alloc_carriers(sc);
468 if (error)
469 return; /* (error) */ ;
470
471 /*
472 * Zero's the freeze_device status
473 */
474 bzero(sc->sc_freeze_dev, sizeof(sc->sc_freeze_dev));
475
476 /*
477 * Initialize the adapter
478 */
479 switch (AdwInitDriver(sc)) {
480 case ADW_IERR_BIST_PRE_TEST:
481 panic("%s: BIST pre-test error",
482 sc->sc_dev.dv_xname);
483 break;
484
485 case ADW_IERR_BIST_RAM_TEST:
486 panic("%s: BIST RAM test error",
487 sc->sc_dev.dv_xname);
488 break;
489
490 case ADW_IERR_MCODE_CHKSUM:
491 panic("%s: Microcode checksum error",
492 sc->sc_dev.dv_xname);
493 break;
494
495 case ADW_IERR_ILLEGAL_CONNECTION:
496 panic("%s: All three connectors are in use",
497 sc->sc_dev.dv_xname);
498 break;
499
500 case ADW_IERR_REVERSED_CABLE:
501 panic("%s: Cable is reversed",
502 sc->sc_dev.dv_xname);
503 break;
504
505 case ADW_IERR_HVD_DEVICE:
506 panic("%s: HVD attached to LVD connector",
507 sc->sc_dev.dv_xname);
508 break;
509
510 case ADW_IERR_SINGLE_END_DEVICE:
511 panic("%s: single-ended device is attached to"
512 " one of the connectors",
513 sc->sc_dev.dv_xname);
514 break;
515
516 case ADW_IERR_NO_CARRIER:
517 panic("%s: unable to create Carriers",
518 sc->sc_dev.dv_xname);
519 break;
520
521 case ADW_WARN_BUSRESET_ERROR:
522 printf("%s: WARNING: Bus Reset Error\n",
523 sc->sc_dev.dv_xname);
524 break;
525 }
526
527 /*
528 * Fill in the scsipi_adapter.
529 */
530 memset(adapt, 0, sizeof(*adapt));
531 adapt->adapt_dev = &sc->sc_dev;
532 adapt->adapt_nchannels = 1;
533 adapt->adapt_openings = ncontrols;
534 adapt->adapt_max_periph = adapt->adapt_openings;
535 adapt->adapt_request = adw_scsipi_request;
536 adapt->adapt_minphys = adwminphys;
537
538 /*
539 * Fill in the scsipi_channel.
540 */
541 memset(chan, 0, sizeof(*chan));
542 chan->chan_adapter = adapt;
543 chan->chan_bustype = &scsi_bustype;
544 chan->chan_channel = 0;
545 chan->chan_ntargets = ADW_MAX_TID + 1;
546 chan->chan_nluns = 7;
547 chan->chan_id = sc->chip_scsi_id;
548
549 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
550 }
551
552
553 static void
554 adwminphys(bp)
555 struct buf *bp;
556 {
557
558 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
559 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
560 minphys(bp);
561 }
562
563
564 /*
565 * start a scsi operation given the command and the data address.
566 * Also needs the unit, target and lu.
567 */
568 static void
569 adw_scsipi_request(chan, req, arg)
570 struct scsipi_channel *chan;
571 scsipi_adapter_req_t req;
572 void *arg;
573 {
574 struct scsipi_xfer *xs;
575 ADW_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev;
576 ADW_CCB *ccb;
577 int s, retry;
578
579 switch (req) {
580 case ADAPTER_REQ_RUN_XFER:
581 xs = arg;
582
583 /*
584 * get a ccb to use. If the transfer
585 * is from a buf (possibly from interrupt time)
586 * then we can't allow it to sleep
587 */
588
589 ccb = adw_get_ccb(sc);
590 #ifdef DIAGNOSTIC
591 /*
592 * This should never happen as we track the resources
593 * in the mid-layer.
594 */
595 if (ccb == NULL) {
596 scsipi_printaddr(xs->xs_periph);
597 printf("unable to allocate ccb\n");
598 panic("adw_scsipi_request");
599 }
600 #endif
601
602 ccb->xs = xs;
603 ccb->timeout = xs->timeout;
604
605 if (adw_build_req(sc, ccb)) {
606 s = splbio();
607 retry = adw_queue_ccb(sc, ccb);
608 splx(s);
609
610 switch(retry) {
611 case ADW_BUSY:
612 xs->error = XS_RESOURCE_SHORTAGE;
613 adw_free_ccb(sc, ccb);
614 scsipi_done(xs);
615 return;
616
617 case ADW_ERROR:
618 xs->error = XS_DRIVER_STUFFUP;
619 adw_free_ccb(sc, ccb);
620 scsipi_done(xs);
621 return;
622 }
623 if ((xs->xs_control & XS_CTL_POLL) == 0)
624 return;
625 /*
626 * Not allowed to use interrupts, poll for completion.
627 */
628 if (adw_poll(sc, xs, ccb->timeout)) {
629 adw_timeout(ccb);
630 if (adw_poll(sc, xs, ccb->timeout))
631 adw_timeout(ccb);
632 }
633 }
634 return;
635
636 case ADAPTER_REQ_GROW_RESOURCES:
637 /* XXX Not supported. */
638 return;
639
640 case ADAPTER_REQ_SET_XFER_MODE:
641 /* XXX XXX XXX */
642 return;
643 }
644 }
645
646
647 /*
648 * Build a request structure for the Wide Boards.
649 */
650 static int
651 adw_build_req(sc, ccb)
652 ADW_SOFTC *sc;
653 ADW_CCB *ccb;
654 {
655 struct scsipi_xfer *xs = ccb->xs;
656 struct scsipi_periph *periph = xs->xs_periph;
657 bus_dma_tag_t dmat = sc->sc_dmat;
658 ADW_SCSI_REQ_Q *scsiqp;
659 int error;
660
661 scsiqp = &ccb->scsiq;
662 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
663
664 /*
665 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
666 * physical CCB structure.
667 */
668 scsiqp->ccb_ptr = ccb->hashkey;
669
670 /*
671 * Build the ADW_SCSI_REQ_Q request.
672 */
673
674 /*
675 * Set CDB length and copy it to the request structure.
676 * For wide boards a CDB length maximum of 16 bytes
677 * is supported.
678 */
679 bcopy(xs->cmd, &scsiqp->cdb, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
680 xs->cmdlen : 12 );
681 if(xs->cmdlen > 12)
682 bcopy(&(xs->cmd[12]), &scsiqp->cdb16, xs->cmdlen - 12);
683
684 scsiqp->target_id = periph->periph_target;
685 scsiqp->target_lun = periph->periph_lun;
686
687 scsiqp->vsense_addr = &ccb->scsi_sense;
688 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
689 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
690 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
691
692 /*
693 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
694 */
695 if (xs->datalen) {
696 /*
697 * Map the DMA transfer.
698 */
699 #ifdef TFS
700 if (xs->xs_control & SCSI_DATA_UIO) {
701 error = bus_dmamap_load_uio(dmat,
702 ccb->dmamap_xfer, (struct uio *) xs->data,
703 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
704 BUS_DMA_WAITOK) | BUS_DMA_STREAMING);
705 } else
706 #endif /* TFS */
707 {
708 error = bus_dmamap_load(dmat,
709 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
710 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
711 BUS_DMA_WAITOK) | BUS_DMA_STREAMING);
712 }
713
714 switch (error) {
715 case 0:
716 break;
717 case ENOMEM:
718 case EAGAIN:
719 xs->error = XS_RESOURCE_SHORTAGE;
720 goto out_bad;
721
722 default:
723 xs->error = XS_DRIVER_STUFFUP;
724 printf("%s: error %d loading DMA map\n",
725 sc->sc_dev.dv_xname, error);
726 out_bad:
727 adw_free_ccb(sc, ccb);
728 scsipi_done(xs);
729 return(0);
730 }
731
732 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
733 ccb->dmamap_xfer->dm_mapsize,
734 (xs->xs_control & XS_CTL_DATA_IN) ?
735 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
736
737 /*
738 * Build scatter-gather list.
739 */
740 scsiqp->data_cnt = xs->datalen;
741 scsiqp->vdata_addr = xs->data;
742 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
743 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
744 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
745 } else {
746 /*
747 * No data xfer, use non S/G values.
748 */
749 scsiqp->data_cnt = 0;
750 scsiqp->vdata_addr = 0;
751 scsiqp->data_addr = 0;
752 }
753
754 return (1);
755 }
756
757
758 /*
759 * Build scatter-gather list for Wide Boards.
760 */
761 static void
762 adw_build_sglist(ccb, scsiqp, sg_block)
763 ADW_CCB *ccb;
764 ADW_SCSI_REQ_Q *scsiqp;
765 ADW_SG_BLOCK *sg_block;
766 {
767 u_long sg_block_next_addr; /* block and its next */
768 u_int32_t sg_block_physical_addr;
769 int i; /* how many SG entries */
770 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
771 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
772
773
774 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
775 sg_block_physical_addr = ccb->hashkey +
776 offsetof(struct adw_ccb, sg_block[0]);
777 scsiqp->sg_real_addr = sg_block_physical_addr;
778
779 /*
780 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
781 * then split the request into multiple sg-list blocks.
782 */
783
784 do {
785 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
786 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
787 sg_block->sg_list[i].sg_count = sg_list->ds_len;
788
789 if (--sg_elem_cnt == 0) {
790 /* last entry, get out */
791 sg_block->sg_cnt = i + 1;
792 sg_block->sg_ptr = NULL; /* next link = NULL */
793 return;
794 }
795 sg_list++;
796 }
797 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
798 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
799
800 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
801 sg_block->sg_ptr = sg_block_physical_addr;
802 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
803 } while (1);
804 }
805
806
807 /******************************************************************************/
808 /* Interrupts and TimeOut routines */
809 /******************************************************************************/
810
811
812 int
813 adw_intr(arg)
814 void *arg;
815 {
816 ADW_SOFTC *sc = arg;
817
818
819 if(AdwISR(sc) != ADW_FALSE) {
820 return (1);
821 }
822
823 return (0);
824 }
825
826
827 /*
828 * Poll a particular unit, looking for a particular xs
829 */
830 static int
831 adw_poll(sc, xs, count)
832 ADW_SOFTC *sc;
833 struct scsipi_xfer *xs;
834 int count;
835 {
836
837 /* timeouts are in msec, so we loop in 1000 usec cycles */
838 while (count) {
839 adw_intr(sc);
840 if (xs->xs_status & XS_STS_DONE)
841 return (0);
842 delay(1000); /* only happens in boot so ok */
843 count--;
844 }
845 return (1);
846 }
847
848
849 static void
850 adw_timeout(arg)
851 void *arg;
852 {
853 ADW_CCB *ccb = arg;
854 struct scsipi_xfer *xs = ccb->xs;
855 struct scsipi_periph *periph = xs->xs_periph;
856 ADW_SOFTC *sc =
857 (void *)periph->periph_channel->chan_adapter->adapt_dev;
858 int s;
859
860 scsipi_printaddr(periph);
861 printf("timed out");
862
863 s = splbio();
864
865 if (ccb->flags & CCB_ABORTED) {
866 /*
867 * Abort Timed Out
868 *
869 * No more opportunities. Lets try resetting the bus and
870 * reinitialize the host adapter.
871 */
872 callout_stop(&xs->xs_callout);
873 printf(" AGAIN. Resetting SCSI Bus\n");
874 adw_reset_bus(sc);
875 splx(s);
876 return;
877 } else if (ccb->flags & CCB_ABORTING) {
878 /*
879 * Abort the operation that has timed out.
880 *
881 * Second opportunity.
882 */
883 printf("\n");
884 xs->error = XS_TIMEOUT;
885 ccb->flags |= CCB_ABORTED;
886 #if 0
887 /*
888 * - XXX - 3.3a microcode is BROKEN!!!
889 *
890 * We cannot abort a CCB, so we can only hope the command
891 * get completed before the next timeout, otherwise a
892 * Bus Reset will arrive inexorably.
893 */
894 /*
895 * ADW_ABORT_CCB() makes the board to generate an interrupt
896 *
897 * - XXX - The above assertion MUST be verified (and this
898 * code changed as well [callout_*()]), when the
899 * ADW_ABORT_CCB will be working again
900 */
901 ADW_ABORT_CCB(sc, ccb);
902 #endif
903 /*
904 * waiting for multishot callout_reset() let's restart it
905 * by hand so the next time a timeout event will occour
906 * we will reset the bus.
907 */
908 callout_reset(&xs->xs_callout,
909 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
910 } else {
911 /*
912 * Abort the operation that has timed out.
913 *
914 * First opportunity.
915 */
916 printf("\n");
917 xs->error = XS_TIMEOUT;
918 ccb->flags |= CCB_ABORTING;
919 #if 0
920 /*
921 * - XXX - 3.3a microcode is BROKEN!!!
922 *
923 * We cannot abort a CCB, so we can only hope the command
924 * get completed before the next 2 timeout, otherwise a
925 * Bus Reset will arrive inexorably.
926 */
927 /*
928 * ADW_ABORT_CCB() makes the board to generate an interrupt
929 *
930 * - XXX - The above assertion MUST be verified (and this
931 * code changed as well [callout_*()]), when the
932 * ADW_ABORT_CCB will be working again
933 */
934 ADW_ABORT_CCB(sc, ccb);
935 #endif
936 /*
937 * waiting for multishot callout_reset() let's restart it
938 * by hand so to give a second opportunity to the command
939 * which timed-out.
940 */
941 callout_reset(&xs->xs_callout,
942 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
943 }
944
945 splx(s);
946 }
947
948
949 static void
950 adw_reset_bus(sc)
951 ADW_SOFTC *sc;
952 {
953 ADW_CCB *ccb;
954 int s;
955 struct scsipi_xfer *xs;
956
957 s = splbio();
958 AdwResetSCSIBus(sc);
959 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
960 adw_pending_ccb)) != NULL) {
961 callout_stop(&ccb->xs->xs_callout);
962 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
963 xs = ccb->xs;
964 adw_free_ccb(sc, ccb);
965 xs->error = XS_RESOURCE_SHORTAGE;
966 scsipi_done(xs);
967 }
968 splx(s);
969 }
970
971
972 /******************************************************************************/
973 /* Host Adapter and Peripherals Information Routines */
974 /******************************************************************************/
975
976
977 static void
978 adw_print_info(sc, tid)
979 ADW_SOFTC *sc;
980 int tid;
981 {
982 bus_space_tag_t iot = sc->sc_iot;
983 bus_space_handle_t ioh = sc->sc_ioh;
984 u_int16_t wdtr_able, wdtr_done, wdtr;
985 u_int16_t sdtr_able, sdtr_done, sdtr, period;
986 static int wdtr_reneg = 0, sdtr_reneg = 0;
987
988 if (tid == 0){
989 wdtr_reneg = sdtr_reneg = 0;
990 }
991
992 printf("%s: target %d ", sc->sc_dev.dv_xname, tid);
993
994 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able);
995 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
996 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done);
997 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
998 (2 * tid), wdtr);
999 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
1000 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1001 wdtr_reneg = 1;
1002 } else {
1003 printf("wide transfers disabled, ");
1004 }
1005
1006 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able);
1007 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
1008 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done);
1009 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
1010 (2 * tid), sdtr);
1011 sdtr &= ~0x8000;
1012 if((sdtr & 0x1F) != 0) {
1013 if((sdtr & 0x1F00) == 0x1100){
1014 printf("80.0 MHz");
1015 } else if((sdtr & 0x1F00) == 0x1000){
1016 printf("40.0 MHz");
1017 } else {
1018 /* <= 20.0 MHz */
1019 period = (((sdtr >> 8) * 25) + 50)/4;
1020 if(period == 0) {
1021 /* Should never happen. */
1022 printf("? MHz");
1023 } else {
1024 printf("%d.%d MHz", 250/period,
1025 ADW_TENTHS(250, period));
1026 }
1027 }
1028 printf(" synchronous transfers\n");
1029 } else {
1030 printf("asynchronous transfers\n");
1031 }
1032 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1033 sdtr_reneg = 1;
1034 } else {
1035 printf("synchronous transfers disabled\n");
1036 }
1037
1038 if(wdtr_reneg || sdtr_reneg) {
1039 printf("%s: target %d %s", sc->sc_dev.dv_xname, tid,
1040 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1041 ((sdtr_reneg)? "sync" : "") );
1042 printf(" renegotiation pending before next command.\n");
1043 }
1044 }
1045
1046
1047 /******************************************************************************/
1048 /* WIDE boards Interrupt callbacks */
1049 /******************************************************************************/
1050
1051
1052 /*
1053 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR()
1054 *
1055 * Interrupt callback function for the Wide SCSI Adv Library.
1056 *
1057 * Notice:
1058 * Interrupts are disabled by the caller (AdwISR() function), and will be
1059 * enabled at the end of the caller.
1060 */
1061 static void
1062 adw_isr_callback(sc, scsiq)
1063 ADW_SOFTC *sc;
1064 ADW_SCSI_REQ_Q *scsiq;
1065 {
1066 bus_dma_tag_t dmat = sc->sc_dmat;
1067 ADW_CCB *ccb;
1068 struct scsipi_xfer *xs;
1069 struct scsipi_sense_data *s1, *s2;
1070
1071
1072 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1073
1074 callout_stop(&ccb->xs->xs_callout);
1075
1076 xs = ccb->xs;
1077
1078 /*
1079 * If we were a data transfer, unload the map that described
1080 * the data buffer.
1081 */
1082 if (xs->datalen) {
1083 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1084 ccb->dmamap_xfer->dm_mapsize,
1085 (xs->xs_control & XS_CTL_DATA_IN) ?
1086 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1087 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1088 }
1089
1090 if ((ccb->flags & CCB_ALLOC) == 0) {
1091 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1092 Debugger();
1093 return;
1094 }
1095
1096 /*
1097 * 'done_status' contains the command's ending status.
1098 * 'host_status' conatins the host adapter status.
1099 * 'scsi_status' contains the scsi peripheral status.
1100 */
1101 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1102 ((scsiq->done_status == QD_NO_ERROR) ||
1103 (scsiq->done_status == QD_WITH_ERROR))) {
1104 switch (scsiq->host_status) {
1105 case SCSI_STATUS_GOOD:
1106 if ((scsiq->cdb[0] == INQUIRY) &&
1107 (scsiq->target_lun == 0)) {
1108 adw_print_info(sc, scsiq->target_id);
1109 }
1110 xs->error = XS_NOERROR;
1111 xs->resid = scsiq->data_cnt;
1112 sc->sc_freeze_dev[scsiq->target_id] = 0;
1113 break;
1114
1115 case SCSI_STATUS_CHECK_CONDITION:
1116 case SCSI_STATUS_CMD_TERMINATED:
1117 s1 = &ccb->scsi_sense;
1118 s2 = &xs->sense.scsi_sense;
1119 *s2 = *s1;
1120 xs->error = XS_SENSE;
1121 sc->sc_freeze_dev[scsiq->target_id] = 1;
1122 break;
1123
1124 default:
1125 xs->error = XS_BUSY;
1126 sc->sc_freeze_dev[scsiq->target_id] = 1;
1127 break;
1128 }
1129 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1130 xs->error = XS_DRIVER_STUFFUP;
1131 } else {
1132 switch (scsiq->host_status) {
1133 case QHSTA_M_SEL_TIMEOUT:
1134 xs->error = XS_SELTIMEOUT;
1135 break;
1136
1137 case QHSTA_M_SXFR_OFF_UFLW:
1138 case QHSTA_M_SXFR_OFF_OFLW:
1139 case QHSTA_M_DATA_OVER_RUN:
1140 printf("%s: Overrun/Overflow/Underflow condition\n",
1141 sc->sc_dev.dv_xname);
1142 xs->error = XS_DRIVER_STUFFUP;
1143 break;
1144
1145 case QHSTA_M_SXFR_DESELECTED:
1146 case QHSTA_M_UNEXPECTED_BUS_FREE:
1147 printf("%s: Unexpected BUS free\n",sc->sc_dev.dv_xname);
1148 xs->error = XS_DRIVER_STUFFUP;
1149 break;
1150
1151 case QHSTA_M_SCSI_BUS_RESET:
1152 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1153 printf("%s: BUS Reset\n", sc->sc_dev.dv_xname);
1154 xs->error = XS_DRIVER_STUFFUP;
1155 break;
1156
1157 case QHSTA_M_BUS_DEVICE_RESET:
1158 printf("%s: Device Reset\n", sc->sc_dev.dv_xname);
1159 xs->error = XS_DRIVER_STUFFUP;
1160 break;
1161
1162 case QHSTA_M_QUEUE_ABORTED:
1163 printf("%s: Queue Aborted\n", sc->sc_dev.dv_xname);
1164 xs->error = XS_DRIVER_STUFFUP;
1165 break;
1166
1167 case QHSTA_M_SXFR_SDMA_ERR:
1168 case QHSTA_M_SXFR_SXFR_PERR:
1169 case QHSTA_M_RDMA_PERR:
1170 /*
1171 * DMA Error. This should *NEVER* happen!
1172 *
1173 * Lets try resetting the bus and reinitialize
1174 * the host adapter.
1175 */
1176 printf("%s: DMA Error. Reseting bus\n",
1177 sc->sc_dev.dv_xname);
1178 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1179 adw_reset_bus(sc);
1180 xs->error = XS_BUSY;
1181 goto done;
1182
1183 case QHSTA_M_WTM_TIMEOUT:
1184 case QHSTA_M_SXFR_WD_TMO:
1185 /* The SCSI bus hung in a phase */
1186 printf("%s: Watch Dog timer expired. Reseting bus\n",
1187 sc->sc_dev.dv_xname);
1188 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1189 adw_reset_bus(sc);
1190 xs->error = XS_BUSY;
1191 goto done;
1192
1193 case QHSTA_M_SXFR_XFR_PH_ERR:
1194 printf("%s: Transfer Error\n", sc->sc_dev.dv_xname);
1195 xs->error = XS_DRIVER_STUFFUP;
1196 break;
1197
1198 case QHSTA_M_BAD_CMPL_STATUS_IN:
1199 /* No command complete after a status message */
1200 printf("%s: Bad Completion Status\n",
1201 sc->sc_dev.dv_xname);
1202 xs->error = XS_DRIVER_STUFFUP;
1203 break;
1204
1205 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1206 printf("%s: Auto Sense Failed\n", sc->sc_dev.dv_xname);
1207 xs->error = XS_DRIVER_STUFFUP;
1208 break;
1209
1210 case QHSTA_M_INVALID_DEVICE:
1211 printf("%s: Invalid Device\n", sc->sc_dev.dv_xname);
1212 xs->error = XS_DRIVER_STUFFUP;
1213 break;
1214
1215 case QHSTA_M_NO_AUTO_REQ_SENSE:
1216 /*
1217 * User didn't request sense, but we got a
1218 * check condition.
1219 */
1220 printf("%s: Unexpected Check Condition\n",
1221 sc->sc_dev.dv_xname);
1222 xs->error = XS_DRIVER_STUFFUP;
1223 break;
1224
1225 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1226 printf("%s: Unknown Error\n", sc->sc_dev.dv_xname);
1227 xs->error = XS_DRIVER_STUFFUP;
1228 break;
1229
1230 default:
1231 panic("%s: Unhandled Host Status Error %x",
1232 sc->sc_dev.dv_xname, scsiq->host_status);
1233 }
1234 }
1235
1236 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1237 done: adw_free_ccb(sc, ccb);
1238 scsipi_done(xs);
1239 }
1240
1241
1242 /*
1243 * adw_async_callback() - Adv Library asynchronous event callback function.
1244 */
1245 static void
1246 adw_async_callback(sc, code)
1247 ADW_SOFTC *sc;
1248 u_int8_t code;
1249 {
1250 switch (code) {
1251 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1252 /* The firmware detected a SCSI Bus reset. */
1253 printf("%s: SCSI Bus reset detected\n", sc->sc_dev.dv_xname);
1254 break;
1255
1256 case ADV_ASYNC_RDMA_FAILURE:
1257 /*
1258 * Handle RDMA failure by resetting the SCSI Bus and
1259 * possibly the chip if it is unresponsive.
1260 */
1261 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1262 " the adapter\n", sc->sc_dev.dv_xname);
1263 AdwResetSCSIBus(sc);
1264 break;
1265
1266 case ADV_HOST_SCSI_BUS_RESET:
1267 /* Host generated SCSI bus reset occurred. */
1268 printf("%s: Host generated SCSI bus reset occurred\n",
1269 sc->sc_dev.dv_xname);
1270 break;
1271
1272 case ADV_ASYNC_CARRIER_READY_FAILURE:
1273 /* Carrier Ready failure. */
1274 printf("%s: Carrier Ready failure!\n", sc->sc_dev.dv_xname);
1275 break;
1276
1277 default:
1278 break;
1279 }
1280 }
1281