adw.c revision 1.12.2.6 1 /* $NetBSD: adw.c,v 1.12.2.6 2000/11/22 16:03:10 bouyer Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <dev/scsipi/scsi_all.h>
59 #include <dev/scsipi/scsipi_all.h>
60 #include <dev/scsipi/scsiconf.h>
61
62 #include <dev/ic/adwlib.h>
63 #include <dev/ic/adwmcode.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adw.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static int adw_alloc_controls __P((ADW_SOFTC *));
74 static int adw_alloc_carriers __P((ADW_SOFTC *));
75 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
76 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
77 static void adw_reset_ccb __P((ADW_CCB *));
78 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *));
80 static int adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
81
82 static void adw_scsipi_request __P((struct scsipi_channel *,
83 scsipi_adapter_req_t, void *));
84 static int adw_build_req __P((ADW_SOFTC *, ADW_CCB *));
85 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
86 static void adwminphys __P((struct buf *));
87 static void adw_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
88 static void adw_async_callback __P((ADW_SOFTC *, u_int8_t));
89
90 static void adw_print_info __P((ADW_SOFTC *, int));
91
92 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
93 static void adw_timeout __P((void *));
94 static void adw_reset_bus __P((ADW_SOFTC *));
95
96
97 /******************************************************************************/
98 /* DMA Mapping for Control Blocks */
99 /******************************************************************************/
100
101
102 static int
103 adw_alloc_controls(sc)
104 ADW_SOFTC *sc;
105 {
106 bus_dma_segment_t seg;
107 int error, rseg;
108
109 /*
110 * Allocate the control structure.
111 */
112 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
113 PAGE_SIZE, 0, &seg, 1, &rseg,
114 BUS_DMA_NOWAIT)) != 0) {
115 printf("%s: unable to allocate control structures,"
116 " error = %d\n", sc->sc_dev.dv_xname, error);
117 return (error);
118 }
119 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
120 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
121 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
122 printf("%s: unable to map control structures, error = %d\n",
123 sc->sc_dev.dv_xname, error);
124 return (error);
125 }
126
127 /*
128 * Create and load the DMA map used for the control blocks.
129 */
130 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
131 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
132 &sc->sc_dmamap_control)) != 0) {
133 printf("%s: unable to create control DMA map, error = %d\n",
134 sc->sc_dev.dv_xname, error);
135 return (error);
136 }
137 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
138 sc->sc_control, sizeof(struct adw_control), NULL,
139 BUS_DMA_NOWAIT)) != 0) {
140 printf("%s: unable to load control DMA map, error = %d\n",
141 sc->sc_dev.dv_xname, error);
142 return (error);
143 }
144
145 return (0);
146 }
147
148
149 static int
150 adw_alloc_carriers(sc)
151 ADW_SOFTC *sc;
152 {
153 bus_dma_segment_t seg;
154 int error, rseg;
155
156 /*
157 * Allocate the control structure.
158 */
159 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
160 M_DEVBUF, M_WAITOK);
161 if(!sc->sc_control->carriers) {
162 printf("%s: malloc() failed in allocating carrier structures\n",
163 sc->sc_dev.dv_xname);
164 return (ENOMEM);
165 }
166
167 if ((error = bus_dmamem_alloc(sc->sc_dmat,
168 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
169 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
170 printf("%s: unable to allocate carrier structures,"
171 " error = %d\n", sc->sc_dev.dv_xname, error);
172 return (error);
173 }
174 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
175 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
176 (caddr_t *) &sc->sc_control->carriers,
177 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
178 printf("%s: unable to map carrier structures,"
179 " error = %d\n", sc->sc_dev.dv_xname, error);
180 return (error);
181 }
182
183 /*
184 * Create and load the DMA map used for the control blocks.
185 */
186 if ((error = bus_dmamap_create(sc->sc_dmat,
187 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
188 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
189 &sc->sc_dmamap_carrier)) != 0) {
190 printf("%s: unable to create carriers DMA map,"
191 " error = %d\n", sc->sc_dev.dv_xname, error);
192 return (error);
193 }
194 if ((error = bus_dmamap_load(sc->sc_dmat,
195 sc->sc_dmamap_carrier, sc->sc_control->carriers,
196 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
197 BUS_DMA_NOWAIT)) != 0) {
198 printf("%s: unable to load carriers DMA map,"
199 " error = %d\n", sc->sc_dev.dv_xname, error);
200 return (error);
201 }
202
203 return (0);
204 }
205
206
207 /******************************************************************************/
208 /* Control Blocks routines */
209 /******************************************************************************/
210
211
212 /*
213 * Create a set of ccbs and add them to the free list. Called once
214 * by adw_init(). We return the number of CCBs successfully created.
215 */
216 static int
217 adw_create_ccbs(sc, ccbstore, count)
218 ADW_SOFTC *sc;
219 ADW_CCB *ccbstore;
220 int count;
221 {
222 ADW_CCB *ccb;
223 int i, error;
224
225 for (i = 0; i < count; i++) {
226 ccb = &ccbstore[i];
227 if ((error = adw_init_ccb(sc, ccb)) != 0) {
228 printf("%s: unable to initialize ccb, error = %d\n",
229 sc->sc_dev.dv_xname, error);
230 return (i);
231 }
232 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
233 }
234
235 return (i);
236 }
237
238
239 /*
240 * A ccb is put onto the free list.
241 */
242 static void
243 adw_free_ccb(sc, ccb)
244 ADW_SOFTC *sc;
245 ADW_CCB *ccb;
246 {
247 int s;
248
249 s = splbio();
250
251 adw_reset_ccb(ccb);
252 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
253
254 splx(s);
255 }
256
257
258 static void
259 adw_reset_ccb(ccb)
260 ADW_CCB *ccb;
261 {
262
263 ccb->flags = 0;
264 }
265
266
267 static int
268 adw_init_ccb(sc, ccb)
269 ADW_SOFTC *sc;
270 ADW_CCB *ccb;
271 {
272 int hashnum, error;
273
274 /*
275 * Create the DMA map for this CCB.
276 */
277 error = bus_dmamap_create(sc->sc_dmat,
278 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
279 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
280 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
281 if (error) {
282 printf("%s: unable to create CCB DMA map, error = %d\n",
283 sc->sc_dev.dv_xname, error);
284 return (error);
285 }
286
287 /*
288 * put in the phystokv hash table
289 * Never gets taken out.
290 */
291 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
292 ADW_CCB_OFF(ccb);
293 hashnum = CCB_HASH(ccb->hashkey);
294 ccb->nexthash = sc->sc_ccbhash[hashnum];
295 sc->sc_ccbhash[hashnum] = ccb;
296 adw_reset_ccb(ccb);
297 return (0);
298 }
299
300
301 /*
302 * Get a free ccb
303 *
304 * If there are none, see if we can allocate a new one
305 */
306 static ADW_CCB *
307 adw_get_ccb(sc)
308 ADW_SOFTC *sc;
309 {
310 ADW_CCB *ccb = 0;
311 int s;
312
313 s = splbio();
314
315 ccb = sc->sc_free_ccb.tqh_first;
316 if (ccb != NULL) {
317 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
318 ccb->flags |= CCB_ALLOC;
319 }
320 splx(s);
321 return (ccb);
322 }
323
324
325 /*
326 * Given a physical address, find the ccb that it corresponds to.
327 */
328 ADW_CCB *
329 adw_ccb_phys_kv(sc, ccb_phys)
330 ADW_SOFTC *sc;
331 u_int32_t ccb_phys;
332 {
333 int hashnum = CCB_HASH(ccb_phys);
334 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
335
336 while (ccb) {
337 if (ccb->hashkey == ccb_phys)
338 break;
339 ccb = ccb->nexthash;
340 }
341 return (ccb);
342 }
343
344
345 /*
346 * Queue a CCB to be sent to the controller, and send it if possible.
347 */
348 static int
349 adw_queue_ccb(sc, ccb)
350 ADW_SOFTC *sc;
351 ADW_CCB *ccb;
352 {
353 int errcode = ADW_SUCCESS;
354
355 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
356
357 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
358
359 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
360 errcode = AdwExeScsiQueue(sc, &ccb->scsiq);
361 switch(errcode) {
362 case ADW_SUCCESS:
363 break;
364
365 case ADW_BUSY:
366 printf("ADW_BUSY\n");
367 return(ADW_BUSY);
368
369 case ADW_ERROR:
370 printf("ADW_ERROR\n");
371 return(ADW_ERROR);
372 }
373
374 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
375
376 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
377 callout_reset(&ccb->xs->xs_callout,
378 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
379 }
380
381 return(errcode);
382 }
383
384
385 /******************************************************************************/
386 /* SCSI layer interfacing routines */
387 /******************************************************************************/
388
389
390 int
391 adw_init(sc)
392 ADW_SOFTC *sc;
393 {
394 u_int16_t warn_code;
395
396
397 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
398 ADW_LIB_VERSION_MINOR;
399 sc->cfg.chip_version =
400 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
401
402 /*
403 * Reset the chip to start and allow register writes.
404 */
405 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
406 panic("adw_init: adw_find_signature failed");
407 } else {
408 AdwResetChip(sc->sc_iot, sc->sc_ioh);
409
410 warn_code = AdwInitFromEEPROM(sc);
411
412 if (warn_code & ADW_WARN_EEPROM_CHKSUM)
413 printf("%s: Bad checksum found. "
414 "Setting default values\n",
415 sc->sc_dev.dv_xname);
416 if (warn_code & ADW_WARN_EEPROM_TERMINATION)
417 printf("%s: Bad bus termination setting."
418 "Using automatic termination.\n",
419 sc->sc_dev.dv_xname);
420 }
421
422 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
423 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
424
425 return 0;
426 }
427
428
429 void
430 adw_attach(sc)
431 ADW_SOFTC *sc;
432 {
433 struct scsipi_adapter *adapt = &sc->sc_adapter;
434 struct scsipi_channel *chan = &sc->sc_channel;
435 int ncontrols, error;
436
437 TAILQ_INIT(&sc->sc_free_ccb);
438 TAILQ_INIT(&sc->sc_waiting_ccb);
439 TAILQ_INIT(&sc->sc_pending_ccb);
440
441 /*
442 * Allocate the Control Blocks.
443 */
444 error = adw_alloc_controls(sc);
445 if (error)
446 return; /* (error) */ ;
447
448 bzero(sc->sc_control, sizeof(struct adw_control));
449
450 /*
451 * Create and initialize the Control Blocks.
452 */
453 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
454 if (ncontrols == 0) {
455 printf("%s: unable to create Control Blocks\n",
456 sc->sc_dev.dv_xname);
457 return; /* (ENOMEM) */ ;
458 } else if (ncontrols != ADW_MAX_CCB) {
459 printf("%s: WARNING: only %d of %d Control Blocks"
460 " created\n",
461 sc->sc_dev.dv_xname, ncontrols, ADW_MAX_CCB);
462 }
463
464 /*
465 * Create and initialize the Carriers.
466 */
467 error = adw_alloc_carriers(sc);
468 if (error)
469 return; /* (error) */ ;
470
471 /*
472 * Zero's the freeze_device status
473 */
474 bzero(sc->sc_freeze_dev, sizeof(sc->sc_freeze_dev));
475
476 /*
477 * Initialize the adapter
478 */
479 switch (AdwInitDriver(sc)) {
480 case ADW_IERR_BIST_PRE_TEST:
481 panic("%s: BIST pre-test error",
482 sc->sc_dev.dv_xname);
483 break;
484
485 case ADW_IERR_BIST_RAM_TEST:
486 panic("%s: BIST RAM test error",
487 sc->sc_dev.dv_xname);
488 break;
489
490 case ADW_IERR_MCODE_CHKSUM:
491 panic("%s: Microcode checksum error",
492 sc->sc_dev.dv_xname);
493 break;
494
495 case ADW_IERR_ILLEGAL_CONNECTION:
496 panic("%s: All three connectors are in use",
497 sc->sc_dev.dv_xname);
498 break;
499
500 case ADW_IERR_REVERSED_CABLE:
501 panic("%s: Cable is reversed",
502 sc->sc_dev.dv_xname);
503 break;
504
505 case ADW_IERR_HVD_DEVICE:
506 panic("%s: HVD attached to LVD connector",
507 sc->sc_dev.dv_xname);
508 break;
509
510 case ADW_IERR_SINGLE_END_DEVICE:
511 panic("%s: single-ended device is attached to"
512 " one of the connectors",
513 sc->sc_dev.dv_xname);
514 break;
515
516 case ADW_IERR_NO_CARRIER:
517 panic("%s: unable to create Carriers",
518 sc->sc_dev.dv_xname);
519 break;
520
521 case ADW_WARN_BUSRESET_ERROR:
522 printf("%s: WARNING: Bus Reset Error\n",
523 sc->sc_dev.dv_xname);
524 break;
525 }
526
527 /*
528 * Fill in the scsipi_adapter.
529 */
530 memset(adapt, 0, sizeof(*adapt));
531 adapt->adapt_dev = &sc->sc_dev;
532 adapt->adapt_nchannels = 1;
533 adapt->adapt_openings = ncontrols;
534 adapt->adapt_max_periph = adapt->adapt_openings;
535 adapt->adapt_request = adw_scsipi_request;
536 adapt->adapt_minphys = adwminphys;
537
538 /*
539 * Fill in the scsipi_channel.
540 */
541 memset(chan, 0, sizeof(*chan));
542 chan->chan_adapter = adapt;
543 chan->chan_bustype = &scsi_bustype;
544 chan->chan_channel = 0;
545 chan->chan_ntargets = ADW_MAX_TID + 1;
546 chan->chan_nluns = 7;
547 chan->chan_id = sc->chip_scsi_id;
548
549 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
550 }
551
552
553 static void
554 adwminphys(bp)
555 struct buf *bp;
556 {
557
558 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
559 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
560 minphys(bp);
561 }
562
563
564 /*
565 * start a scsi operation given the command and the data address.
566 * Also needs the unit, target and lu.
567 */
568 static void
569 adw_scsipi_request(chan, req, arg)
570 struct scsipi_channel *chan;
571 scsipi_adapter_req_t req;
572 void *arg;
573 {
574 struct scsipi_xfer *xs;
575 ADW_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev;
576 ADW_CCB *ccb;
577 int s, retry;
578
579 switch (req) {
580 case ADAPTER_REQ_RUN_XFER:
581 xs = arg;
582
583 /*
584 * get a ccb to use. If the transfer
585 * is from a buf (possibly from interrupt time)
586 * then we can't allow it to sleep
587 */
588
589 ccb = adw_get_ccb(sc);
590 #ifdef DIAGNOSTIC
591 /*
592 * This should never happen as we track the resources
593 * in the mid-layer.
594 */
595 if (ccb == NULL) {
596 scsipi_printaddr(xs->xs_periph);
597 printf("unable to allocate ccb\n");
598 panic("adw_scsipi_request");
599 }
600 #endif
601
602 ccb->xs = xs;
603 ccb->timeout = xs->timeout;
604
605 if (adw_build_req(sc, ccb)) {
606 s = splbio();
607 retry = adw_queue_ccb(sc, ccb);
608 splx(s);
609
610 switch(retry) {
611 case ADW_BUSY:
612 xs->error = XS_RESOURCE_SHORTAGE;
613 adw_free_ccb(sc, ccb);
614 scsipi_done(xs);
615 return;
616
617 case ADW_ERROR:
618 xs->error = XS_DRIVER_STUFFUP;
619 adw_free_ccb(sc, ccb);
620 scsipi_done(xs);
621 return;
622 }
623 if ((xs->xs_control & XS_CTL_POLL) == 0)
624 return;
625 /*
626 * Not allowed to use interrupts, poll for completion.
627 */
628 if (adw_poll(sc, xs, ccb->timeout)) {
629 adw_timeout(ccb);
630 if (adw_poll(sc, xs, ccb->timeout))
631 adw_timeout(ccb);
632 }
633 }
634 return;
635
636 case ADAPTER_REQ_GROW_RESOURCES:
637 /* XXX Not supported. */
638 return;
639
640 case ADAPTER_REQ_SET_XFER_MODE:
641 /* XXX XXX XXX */
642 return;
643 }
644 }
645
646
647 /*
648 * Build a request structure for the Wide Boards.
649 */
650 static int
651 adw_build_req(sc, ccb)
652 ADW_SOFTC *sc;
653 ADW_CCB *ccb;
654 {
655 struct scsipi_xfer *xs = ccb->xs;
656 struct scsipi_periph *periph = xs->xs_periph;
657 bus_dma_tag_t dmat = sc->sc_dmat;
658 ADW_SCSI_REQ_Q *scsiqp;
659 int error;
660
661 scsiqp = &ccb->scsiq;
662 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
663
664 /*
665 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
666 * physical CCB structure.
667 */
668 scsiqp->ccb_ptr = ccb->hashkey;
669
670 /*
671 * Build the ADW_SCSI_REQ_Q request.
672 */
673
674 /*
675 * Set CDB length and copy it to the request structure.
676 * For wide boards a CDB length maximum of 16 bytes
677 * is supported.
678 */
679 bcopy(xs->cmd, &scsiqp->cdb, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
680 xs->cmdlen : 12 );
681 if(xs->cmdlen > 12)
682 bcopy(&(xs->cmd[12]), &scsiqp->cdb16, xs->cmdlen - 12);
683
684 scsiqp->target_id = periph->periph_target;
685 scsiqp->target_lun = periph->periph_lun;
686
687 scsiqp->vsense_addr = &ccb->scsi_sense;
688 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
689 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
690 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
691
692 /*
693 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
694 */
695 if (xs->datalen) {
696 /*
697 * Map the DMA transfer.
698 */
699 #ifdef TFS
700 if (xs->xs_control & SCSI_DATA_UIO) {
701 error = bus_dmamap_load_uio(dmat,
702 ccb->dmamap_xfer, (struct uio *) xs->data,
703 BUS_DMA_NOWAIT);
704 } else
705 #endif /* TFS */
706 {
707 error = bus_dmamap_load(dmat,
708 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
709 BUS_DMA_NOWAIT);
710 }
711
712 switch (error) {
713 case 0:
714 break;
715 case ENOMEM:
716 case EAGAIN:
717 xs->error = XS_RESOURCE_SHORTAGE;
718 goto out_bad;
719
720 default:
721 xs->error = XS_DRIVER_STUFFUP;
722 printf("%s: error %d loading DMA map\n",
723 sc->sc_dev.dv_xname, error);
724 out_bad:
725 adw_free_ccb(sc, ccb);
726 scsipi_done(xs);
727 return(0);
728 }
729
730 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
731 ccb->dmamap_xfer->dm_mapsize,
732 (xs->xs_control & XS_CTL_DATA_IN) ?
733 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
734
735 /*
736 * Build scatter-gather list.
737 */
738 scsiqp->data_cnt = xs->datalen;
739 scsiqp->vdata_addr = xs->data;
740 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
741 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
742 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
743 } else {
744 /*
745 * No data xfer, use non S/G values.
746 */
747 scsiqp->data_cnt = 0;
748 scsiqp->vdata_addr = 0;
749 scsiqp->data_addr = 0;
750 }
751
752 return (1);
753 }
754
755
756 /*
757 * Build scatter-gather list for Wide Boards.
758 */
759 static void
760 adw_build_sglist(ccb, scsiqp, sg_block)
761 ADW_CCB *ccb;
762 ADW_SCSI_REQ_Q *scsiqp;
763 ADW_SG_BLOCK *sg_block;
764 {
765 u_long sg_block_next_addr; /* block and its next */
766 u_int32_t sg_block_physical_addr;
767 int i; /* how many SG entries */
768 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
769 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
770
771
772 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
773 sg_block_physical_addr = ccb->hashkey +
774 offsetof(struct adw_ccb, sg_block[0]);
775 scsiqp->sg_real_addr = sg_block_physical_addr;
776
777 /*
778 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
779 * then split the request into multiple sg-list blocks.
780 */
781
782 do {
783 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
784 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
785 sg_block->sg_list[i].sg_count = sg_list->ds_len;
786
787 if (--sg_elem_cnt == 0) {
788 /* last entry, get out */
789 sg_block->sg_cnt = i + i;
790 sg_block->sg_ptr = NULL; /* next link = NULL */
791 return;
792 }
793 sg_list++;
794 }
795 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
796 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
797
798 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
799 sg_block->sg_ptr = sg_block_physical_addr;
800 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
801 } while (1);
802 }
803
804
805 /******************************************************************************/
806 /* Interrupts and TimeOut routines */
807 /******************************************************************************/
808
809
810 int
811 adw_intr(arg)
812 void *arg;
813 {
814 ADW_SOFTC *sc = arg;
815
816
817 if(AdwISR(sc) != ADW_FALSE) {
818 return (1);
819 }
820
821 return (0);
822 }
823
824
825 /*
826 * Poll a particular unit, looking for a particular xs
827 */
828 static int
829 adw_poll(sc, xs, count)
830 ADW_SOFTC *sc;
831 struct scsipi_xfer *xs;
832 int count;
833 {
834
835 /* timeouts are in msec, so we loop in 1000 usec cycles */
836 while (count) {
837 adw_intr(sc);
838 if (xs->xs_status & XS_STS_DONE)
839 return (0);
840 delay(1000); /* only happens in boot so ok */
841 count--;
842 }
843 return (1);
844 }
845
846
847 static void
848 adw_timeout(arg)
849 void *arg;
850 {
851 ADW_CCB *ccb = arg;
852 struct scsipi_xfer *xs = ccb->xs;
853 struct scsipi_periph *periph = xs->xs_periph;
854 ADW_SOFTC *sc =
855 (void *)periph->periph_channel->chan_adapter->adapt_dev;
856 int s;
857
858 scsipi_printaddr(periph);
859 printf("timed out");
860
861 s = splbio();
862
863 if (ccb->flags & CCB_ABORTED) {
864 /*
865 * Abort Timed Out
866 *
867 * No more opportunities. Lets try resetting the bus and
868 * reinitialize the host adapter.
869 */
870 callout_stop(&xs->xs_callout);
871 printf(" AGAIN. Resetting SCSI Bus\n");
872 adw_reset_bus(sc);
873 splx(s);
874 return;
875 } else if (ccb->flags & CCB_ABORTING) {
876 /*
877 * Abort the operation that has timed out.
878 *
879 * Second opportunity.
880 */
881 printf("\n");
882 xs->error = XS_TIMEOUT;
883 ccb->flags |= CCB_ABORTED;
884 #if 0
885 /*
886 * - XXX - 3.3a microcode is BROKEN!!!
887 *
888 * We cannot abort a CCB, so we can only hope the command
889 * get completed before the next timeout, otherwise a
890 * Bus Reset will arrive inexorably.
891 */
892 /*
893 * ADW_ABORT_CCB() makes the board to generate an interrupt
894 *
895 * - XXX - The above assertion MUST be verified (and this
896 * code changed as well [callout_*()]), when the
897 * ADW_ABORT_CCB will be working again
898 */
899 ADW_ABORT_CCB(sc, ccb);
900 #endif
901 /*
902 * waiting for multishot callout_reset() let's restart it
903 * by hand so the next time a timeout event will occour
904 * we will reset the bus.
905 */
906 callout_reset(&xs->xs_callout,
907 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
908 } else {
909 /*
910 * Abort the operation that has timed out.
911 *
912 * First opportunity.
913 */
914 printf("\n");
915 xs->error = XS_TIMEOUT;
916 ccb->flags |= CCB_ABORTING;
917 #if 0
918 /*
919 * - XXX - 3.3a microcode is BROKEN!!!
920 *
921 * We cannot abort a CCB, so we can only hope the command
922 * get completed before the next 2 timeout, otherwise a
923 * Bus Reset will arrive inexorably.
924 */
925 /*
926 * ADW_ABORT_CCB() makes the board to generate an interrupt
927 *
928 * - XXX - The above assertion MUST be verified (and this
929 * code changed as well [callout_*()]), when the
930 * ADW_ABORT_CCB will be working again
931 */
932 ADW_ABORT_CCB(sc, ccb);
933 #endif
934 /*
935 * waiting for multishot callout_reset() let's restart it
936 * by hand so to give a second opportunity to the command
937 * which timed-out.
938 */
939 callout_reset(&xs->xs_callout,
940 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
941 }
942
943 splx(s);
944 }
945
946
947 static void
948 adw_reset_bus(sc)
949 ADW_SOFTC *sc;
950 {
951 ADW_CCB *ccb;
952 int s;
953 struct scsipi_xfer *xs;
954
955 s = splbio();
956 AdwResetSCSIBus(sc);
957 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
958 adw_pending_ccb)) != NULL) {
959 callout_stop(&ccb->xs->xs_callout);
960 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
961 xs = ccb->xs;
962 adw_free_ccb(sc, ccb);
963 xs->error = XS_RESOURCE_SHORTAGE;
964 scsipi_done(xs);
965 }
966 splx(s);
967 }
968
969
970 /******************************************************************************/
971 /* Host Adapter and Peripherals Information Routines */
972 /******************************************************************************/
973
974
975 static void
976 adw_print_info(sc, tid)
977 ADW_SOFTC *sc;
978 int tid;
979 {
980 bus_space_tag_t iot = sc->sc_iot;
981 bus_space_handle_t ioh = sc->sc_ioh;
982 u_int16_t wdtr_able, wdtr_done, wdtr;
983 u_int16_t sdtr_able, sdtr_done, sdtr, period;
984 static int wdtr_reneg = 0, sdtr_reneg = 0;
985
986 if (tid == 0){
987 wdtr_reneg = sdtr_reneg = 0;
988 }
989
990 printf("%s: target %d ", sc->sc_dev.dv_xname, tid);
991
992 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able);
993 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
994 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done);
995 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
996 (2 * tid), wdtr);
997 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
998 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
999 wdtr_reneg = 1;
1000 } else {
1001 printf("wide transfers disabled, ");
1002 }
1003
1004 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able);
1005 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
1006 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done);
1007 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
1008 (2 * tid), sdtr);
1009 sdtr &= ~0x8000;
1010 if((sdtr & 0x1F) != 0) {
1011 if((sdtr & 0x1F00) == 0x1100){
1012 printf("80.0 MHz");
1013 } else if((sdtr & 0x1F00) == 0x1000){
1014 printf("40.0 MHz");
1015 } else {
1016 /* <= 20.0 MHz */
1017 period = (((sdtr >> 8) * 25) + 50)/4;
1018 if(period == 0) {
1019 /* Should never happen. */
1020 printf("? MHz");
1021 } else {
1022 printf("%d.%d MHz", 250/period,
1023 ADW_TENTHS(250, period));
1024 }
1025 }
1026 printf(" synchronous transfers\n");
1027 } else {
1028 printf("asynchronous transfers\n");
1029 }
1030 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1031 sdtr_reneg = 1;
1032 } else {
1033 printf("synchronous transfers disabled\n");
1034 }
1035
1036 if(wdtr_reneg || sdtr_reneg) {
1037 printf("%s: target %d %s", sc->sc_dev.dv_xname, tid,
1038 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1039 ((sdtr_reneg)? "sync" : "") );
1040 printf(" renegotiation pending before next command.\n");
1041 }
1042 }
1043
1044
1045 /******************************************************************************/
1046 /* WIDE boards Interrupt callbacks */
1047 /******************************************************************************/
1048
1049
1050 /*
1051 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR()
1052 *
1053 * Interrupt callback function for the Wide SCSI Adv Library.
1054 *
1055 * Notice:
1056 * Interrupts are disabled by the caller (AdwISR() function), and will be
1057 * enabled at the end of the caller.
1058 */
1059 static void
1060 adw_isr_callback(sc, scsiq)
1061 ADW_SOFTC *sc;
1062 ADW_SCSI_REQ_Q *scsiq;
1063 {
1064 bus_dma_tag_t dmat = sc->sc_dmat;
1065 ADW_CCB *ccb;
1066 struct scsipi_xfer *xs;
1067 struct scsipi_sense_data *s1, *s2;
1068
1069
1070 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1071
1072 callout_stop(&ccb->xs->xs_callout);
1073
1074 xs = ccb->xs;
1075
1076 /*
1077 * If we were a data transfer, unload the map that described
1078 * the data buffer.
1079 */
1080 if (xs->datalen) {
1081 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1082 ccb->dmamap_xfer->dm_mapsize,
1083 (xs->xs_control & XS_CTL_DATA_IN) ?
1084 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1085 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1086 }
1087
1088 if ((ccb->flags & CCB_ALLOC) == 0) {
1089 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1090 Debugger();
1091 return;
1092 }
1093
1094 /*
1095 * 'done_status' contains the command's ending status.
1096 * 'host_status' conatins the host adapter status.
1097 * 'scsi_status' contains the scsi peripheral status.
1098 */
1099 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1100 ((scsiq->done_status == QD_NO_ERROR) ||
1101 (scsiq->done_status == QD_WITH_ERROR))) {
1102 switch (scsiq->host_status) {
1103 case SCSI_STATUS_GOOD:
1104 if ((scsiq->cdb[0] == INQUIRY) &&
1105 (scsiq->target_lun == 0)) {
1106 adw_print_info(sc, scsiq->target_id);
1107 }
1108 xs->error = XS_NOERROR;
1109 xs->resid = scsiq->data_cnt;
1110 sc->sc_freeze_dev[scsiq->target_id] = 0;
1111 break;
1112
1113 case SCSI_STATUS_CHECK_CONDITION:
1114 case SCSI_STATUS_CMD_TERMINATED:
1115 s1 = &ccb->scsi_sense;
1116 s2 = &xs->sense.scsi_sense;
1117 *s2 = *s1;
1118 xs->error = XS_SENSE;
1119 sc->sc_freeze_dev[scsiq->target_id] = 1;
1120 break;
1121
1122 default:
1123 xs->error = XS_BUSY;
1124 sc->sc_freeze_dev[scsiq->target_id] = 1;
1125 break;
1126 }
1127 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1128 xs->error = XS_DRIVER_STUFFUP;
1129 } else {
1130 switch (scsiq->host_status) {
1131 case QHSTA_M_SEL_TIMEOUT:
1132 xs->error = XS_SELTIMEOUT;
1133 break;
1134
1135 case QHSTA_M_SXFR_OFF_UFLW:
1136 case QHSTA_M_SXFR_OFF_OFLW:
1137 case QHSTA_M_DATA_OVER_RUN:
1138 printf("%s: Overrun/Overflow/Underflow condition\n",
1139 sc->sc_dev.dv_xname);
1140 xs->error = XS_DRIVER_STUFFUP;
1141 break;
1142
1143 case QHSTA_M_SXFR_DESELECTED:
1144 case QHSTA_M_UNEXPECTED_BUS_FREE:
1145 printf("%s: Unexpected BUS free\n",sc->sc_dev.dv_xname);
1146 xs->error = XS_DRIVER_STUFFUP;
1147 break;
1148
1149 case QHSTA_M_SCSI_BUS_RESET:
1150 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1151 printf("%s: BUS Reset\n", sc->sc_dev.dv_xname);
1152 xs->error = XS_DRIVER_STUFFUP;
1153 break;
1154
1155 case QHSTA_M_BUS_DEVICE_RESET:
1156 printf("%s: Device Reset\n", sc->sc_dev.dv_xname);
1157 xs->error = XS_DRIVER_STUFFUP;
1158 break;
1159
1160 case QHSTA_M_QUEUE_ABORTED:
1161 printf("%s: Queue Aborted\n", sc->sc_dev.dv_xname);
1162 xs->error = XS_DRIVER_STUFFUP;
1163 break;
1164
1165 case QHSTA_M_SXFR_SDMA_ERR:
1166 case QHSTA_M_SXFR_SXFR_PERR:
1167 case QHSTA_M_RDMA_PERR:
1168 /*
1169 * DMA Error. This should *NEVER* happen!
1170 *
1171 * Lets try resetting the bus and reinitialize
1172 * the host adapter.
1173 */
1174 printf("%s: DMA Error. Reseting bus\n",
1175 sc->sc_dev.dv_xname);
1176 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1177 adw_reset_bus(sc);
1178 xs->error = XS_BUSY;
1179 goto done;
1180
1181 case QHSTA_M_WTM_TIMEOUT:
1182 case QHSTA_M_SXFR_WD_TMO:
1183 /* The SCSI bus hung in a phase */
1184 printf("%s: Watch Dog timer expired. Reseting bus\n",
1185 sc->sc_dev.dv_xname);
1186 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1187 adw_reset_bus(sc);
1188 xs->error = XS_BUSY;
1189 goto done;
1190
1191 case QHSTA_M_SXFR_XFR_PH_ERR:
1192 printf("%s: Transfer Error\n", sc->sc_dev.dv_xname);
1193 xs->error = XS_DRIVER_STUFFUP;
1194 break;
1195
1196 case QHSTA_M_BAD_CMPL_STATUS_IN:
1197 /* No command complete after a status message */
1198 printf("%s: Bad Completion Status\n",
1199 sc->sc_dev.dv_xname);
1200 xs->error = XS_DRIVER_STUFFUP;
1201 break;
1202
1203 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1204 printf("%s: Auto Sense Failed\n", sc->sc_dev.dv_xname);
1205 xs->error = XS_DRIVER_STUFFUP;
1206 break;
1207
1208 case QHSTA_M_INVALID_DEVICE:
1209 printf("%s: Invalid Device\n", sc->sc_dev.dv_xname);
1210 xs->error = XS_DRIVER_STUFFUP;
1211 break;
1212
1213 case QHSTA_M_NO_AUTO_REQ_SENSE:
1214 /*
1215 * User didn't request sense, but we got a
1216 * check condition.
1217 */
1218 printf("%s: Unexpected Check Condition\n",
1219 sc->sc_dev.dv_xname);
1220 xs->error = XS_DRIVER_STUFFUP;
1221 break;
1222
1223 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1224 printf("%s: Unknown Error\n", sc->sc_dev.dv_xname);
1225 xs->error = XS_DRIVER_STUFFUP;
1226 break;
1227
1228 default:
1229 panic("%s: Unhandled Host Status Error %x",
1230 sc->sc_dev.dv_xname, scsiq->host_status);
1231 }
1232 }
1233
1234 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1235 done: adw_free_ccb(sc, ccb);
1236 scsipi_done(xs);
1237 }
1238
1239
1240 /*
1241 * adw_async_callback() - Adv Library asynchronous event callback function.
1242 */
1243 static void
1244 adw_async_callback(sc, code)
1245 ADW_SOFTC *sc;
1246 u_int8_t code;
1247 {
1248 switch (code) {
1249 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1250 /* The firmware detected a SCSI Bus reset. */
1251 printf("%s: SCSI Bus reset detected\n", sc->sc_dev.dv_xname);
1252 break;
1253
1254 case ADV_ASYNC_RDMA_FAILURE:
1255 /*
1256 * Handle RDMA failure by resetting the SCSI Bus and
1257 * possibly the chip if it is unresponsive.
1258 */
1259 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1260 " the adapter\n", sc->sc_dev.dv_xname);
1261 AdwResetSCSIBus(sc);
1262 break;
1263
1264 case ADV_HOST_SCSI_BUS_RESET:
1265 /* Host generated SCSI bus reset occurred. */
1266 printf("%s: Host generated SCSI bus reset occurred\n",
1267 sc->sc_dev.dv_xname);
1268 break;
1269
1270 case ADV_ASYNC_CARRIER_READY_FAILURE:
1271 /* Carrier Ready failure. */
1272 printf("%s: Carrier Ready failure!\n", sc->sc_dev.dv_xname);
1273 break;
1274
1275 default:
1276 break;
1277 }
1278 }
1279