adw.c revision 1.33 1 /* $NetBSD: adw.c,v 1.33 2001/07/19 16:25:23 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <dev/scsipi/scsi_all.h>
59 #include <dev/scsipi/scsipi_all.h>
60 #include <dev/scsipi/scsiconf.h>
61
62 #include <dev/ic/adwlib.h>
63 #include <dev/ic/adwmcode.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adw.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static int adw_alloc_controls(ADW_SOFTC *);
74 static int adw_alloc_carriers(ADW_SOFTC *);
75 static int adw_create_ccbs(ADW_SOFTC *, ADW_CCB *, int);
76 static void adw_free_ccb(ADW_SOFTC *, ADW_CCB *);
77 static void adw_reset_ccb(ADW_CCB *);
78 static int adw_init_ccb(ADW_SOFTC *, ADW_CCB *);
79 static ADW_CCB *adw_get_ccb(ADW_SOFTC *);
80 static int adw_queue_ccb(ADW_SOFTC *, ADW_CCB *);
81
82 static void adw_scsipi_request(struct scsipi_channel *,
83 scsipi_adapter_req_t, void *);
84 static int adw_build_req(ADW_SOFTC *, ADW_CCB *);
85 static void adw_build_sglist(ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *);
86 static void adwminphys(struct buf *);
87 static void adw_isr_callback(ADW_SOFTC *, ADW_SCSI_REQ_Q *);
88 static void adw_async_callback(ADW_SOFTC *, u_int8_t);
89
90 static void adw_print_info(ADW_SOFTC *, int);
91
92 static int adw_poll(ADW_SOFTC *, struct scsipi_xfer *, int);
93 static void adw_timeout(void *);
94 static void adw_reset_bus(ADW_SOFTC *);
95
96
97 /******************************************************************************/
98 /* DMA Mapping for Control Blocks */
99 /******************************************************************************/
100
101
102 static int
103 adw_alloc_controls(ADW_SOFTC *sc)
104 {
105 bus_dma_segment_t seg;
106 int error, rseg;
107
108 /*
109 * Allocate the control structure.
110 */
111 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
112 PAGE_SIZE, 0, &seg, 1, &rseg,
113 BUS_DMA_NOWAIT)) != 0) {
114 printf("%s: unable to allocate control structures,"
115 " error = %d\n", sc->sc_dev.dv_xname, error);
116 return (error);
117 }
118 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
119 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
120 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
121 printf("%s: unable to map control structures, error = %d\n",
122 sc->sc_dev.dv_xname, error);
123 return (error);
124 }
125
126 /*
127 * Create and load the DMA map used for the control blocks.
128 */
129 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
130 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
131 &sc->sc_dmamap_control)) != 0) {
132 printf("%s: unable to create control DMA map, error = %d\n",
133 sc->sc_dev.dv_xname, error);
134 return (error);
135 }
136 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
137 sc->sc_control, sizeof(struct adw_control), NULL,
138 BUS_DMA_NOWAIT)) != 0) {
139 printf("%s: unable to load control DMA map, error = %d\n",
140 sc->sc_dev.dv_xname, error);
141 return (error);
142 }
143
144 return (0);
145 }
146
147
148 static int
149 adw_alloc_carriers(ADW_SOFTC *sc)
150 {
151 bus_dma_segment_t seg;
152 int error, rseg;
153
154 /*
155 * Allocate the control structure.
156 */
157 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
158 M_DEVBUF, M_WAITOK);
159 if(!sc->sc_control->carriers) {
160 printf("%s: malloc() failed in allocating carrier structures\n",
161 sc->sc_dev.dv_xname);
162 return (ENOMEM);
163 }
164
165 if ((error = bus_dmamem_alloc(sc->sc_dmat,
166 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
167 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
168 printf("%s: unable to allocate carrier structures,"
169 " error = %d\n", sc->sc_dev.dv_xname, error);
170 return (error);
171 }
172 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
173 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
174 (caddr_t *) &sc->sc_control->carriers,
175 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
176 printf("%s: unable to map carrier structures,"
177 " error = %d\n", sc->sc_dev.dv_xname, error);
178 return (error);
179 }
180
181 /*
182 * Create and load the DMA map used for the control blocks.
183 */
184 if ((error = bus_dmamap_create(sc->sc_dmat,
185 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
186 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
187 &sc->sc_dmamap_carrier)) != 0) {
188 printf("%s: unable to create carriers DMA map,"
189 " error = %d\n", sc->sc_dev.dv_xname, error);
190 return (error);
191 }
192 if ((error = bus_dmamap_load(sc->sc_dmat,
193 sc->sc_dmamap_carrier, sc->sc_control->carriers,
194 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
195 BUS_DMA_NOWAIT)) != 0) {
196 printf("%s: unable to load carriers DMA map,"
197 " error = %d\n", sc->sc_dev.dv_xname, error);
198 return (error);
199 }
200
201 return (0);
202 }
203
204
205 /******************************************************************************/
206 /* Control Blocks routines */
207 /******************************************************************************/
208
209
210 /*
211 * Create a set of ccbs and add them to the free list. Called once
212 * by adw_init(). We return the number of CCBs successfully created.
213 */
214 static int
215 adw_create_ccbs(ADW_SOFTC *sc, ADW_CCB *ccbstore, int count)
216 {
217 ADW_CCB *ccb;
218 int i, error;
219
220 for (i = 0; i < count; i++) {
221 ccb = &ccbstore[i];
222 if ((error = adw_init_ccb(sc, ccb)) != 0) {
223 printf("%s: unable to initialize ccb, error = %d\n",
224 sc->sc_dev.dv_xname, error);
225 return (i);
226 }
227 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
228 }
229
230 return (i);
231 }
232
233
234 /*
235 * A ccb is put onto the free list.
236 */
237 static void
238 adw_free_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
239 {
240 int s;
241
242 s = splbio();
243
244 adw_reset_ccb(ccb);
245 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
246
247 splx(s);
248 }
249
250
251 static void
252 adw_reset_ccb(ADW_CCB *ccb)
253 {
254
255 ccb->flags = 0;
256 }
257
258
259 static int
260 adw_init_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
261 {
262 int hashnum, error;
263
264 /*
265 * Create the DMA map for this CCB.
266 */
267 error = bus_dmamap_create(sc->sc_dmat,
268 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
269 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
270 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
271 if (error) {
272 printf("%s: unable to create CCB DMA map, error = %d\n",
273 sc->sc_dev.dv_xname, error);
274 return (error);
275 }
276
277 /*
278 * put in the phystokv hash table
279 * Never gets taken out.
280 */
281 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
282 ADW_CCB_OFF(ccb);
283 hashnum = CCB_HASH(ccb->hashkey);
284 ccb->nexthash = sc->sc_ccbhash[hashnum];
285 sc->sc_ccbhash[hashnum] = ccb;
286 adw_reset_ccb(ccb);
287 return (0);
288 }
289
290
291 /*
292 * Get a free ccb
293 *
294 * If there are none, see if we can allocate a new one
295 */
296 static ADW_CCB *
297 adw_get_ccb(ADW_SOFTC *sc)
298 {
299 ADW_CCB *ccb = 0;
300 int s;
301
302 s = splbio();
303
304 ccb = sc->sc_free_ccb.tqh_first;
305 if (ccb != NULL) {
306 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
307 ccb->flags |= CCB_ALLOC;
308 }
309 splx(s);
310 return (ccb);
311 }
312
313
314 /*
315 * Given a physical address, find the ccb that it corresponds to.
316 */
317 ADW_CCB *
318 adw_ccb_phys_kv(ADW_SOFTC *sc, u_int32_t ccb_phys)
319 {
320 int hashnum = CCB_HASH(ccb_phys);
321 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
322
323 while (ccb) {
324 if (ccb->hashkey == ccb_phys)
325 break;
326 ccb = ccb->nexthash;
327 }
328 return (ccb);
329 }
330
331
332 /*
333 * Queue a CCB to be sent to the controller, and send it if possible.
334 */
335 static int
336 adw_queue_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
337 {
338 int errcode = ADW_SUCCESS;
339
340 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
341
342 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
343
344 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
345 errcode = AdwExeScsiQueue(sc, &ccb->scsiq);
346 switch(errcode) {
347 case ADW_SUCCESS:
348 break;
349
350 case ADW_BUSY:
351 printf("ADW_BUSY\n");
352 return(ADW_BUSY);
353
354 case ADW_ERROR:
355 printf("ADW_ERROR\n");
356 return(ADW_ERROR);
357 }
358
359 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
360
361 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
362 callout_reset(&ccb->xs->xs_callout,
363 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
364 }
365
366 return(errcode);
367 }
368
369
370 /******************************************************************************/
371 /* SCSI layer interfacing routines */
372 /******************************************************************************/
373
374
375 int
376 adw_init(ADW_SOFTC *sc)
377 {
378 u_int16_t warn_code;
379
380
381 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
382 ADW_LIB_VERSION_MINOR;
383 sc->cfg.chip_version =
384 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
385
386 /*
387 * Reset the chip to start and allow register writes.
388 */
389 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
390 panic("adw_init: adw_find_signature failed");
391 } else {
392 AdwResetChip(sc->sc_iot, sc->sc_ioh);
393
394 warn_code = AdwInitFromEEPROM(sc);
395
396 if (warn_code & ADW_WARN_EEPROM_CHKSUM)
397 printf("%s: Bad checksum found. "
398 "Setting default values\n",
399 sc->sc_dev.dv_xname);
400 if (warn_code & ADW_WARN_EEPROM_TERMINATION)
401 printf("%s: Bad bus termination setting."
402 "Using automatic termination.\n",
403 sc->sc_dev.dv_xname);
404 }
405
406 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
407 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
408
409 return 0;
410 }
411
412
413 void
414 adw_attach(ADW_SOFTC *sc)
415 {
416 struct scsipi_adapter *adapt = &sc->sc_adapter;
417 struct scsipi_channel *chan = &sc->sc_channel;
418 int ncontrols, error;
419
420 TAILQ_INIT(&sc->sc_free_ccb);
421 TAILQ_INIT(&sc->sc_waiting_ccb);
422 TAILQ_INIT(&sc->sc_pending_ccb);
423
424 /*
425 * Allocate the Control Blocks.
426 */
427 error = adw_alloc_controls(sc);
428 if (error)
429 return; /* (error) */ ;
430
431 memset(sc->sc_control, 0, sizeof(struct adw_control));
432
433 /*
434 * Create and initialize the Control Blocks.
435 */
436 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
437 if (ncontrols == 0) {
438 printf("%s: unable to create Control Blocks\n",
439 sc->sc_dev.dv_xname);
440 return; /* (ENOMEM) */ ;
441 } else if (ncontrols != ADW_MAX_CCB) {
442 printf("%s: WARNING: only %d of %d Control Blocks"
443 " created\n",
444 sc->sc_dev.dv_xname, ncontrols, ADW_MAX_CCB);
445 }
446
447 /*
448 * Create and initialize the Carriers.
449 */
450 error = adw_alloc_carriers(sc);
451 if (error)
452 return; /* (error) */ ;
453
454 /*
455 * Zero's the freeze_device status
456 */
457 memset(sc->sc_freeze_dev, 0, sizeof(sc->sc_freeze_dev));
458
459 /*
460 * Initialize the adapter
461 */
462 switch (AdwInitDriver(sc)) {
463 case ADW_IERR_BIST_PRE_TEST:
464 panic("%s: BIST pre-test error",
465 sc->sc_dev.dv_xname);
466 break;
467
468 case ADW_IERR_BIST_RAM_TEST:
469 panic("%s: BIST RAM test error",
470 sc->sc_dev.dv_xname);
471 break;
472
473 case ADW_IERR_MCODE_CHKSUM:
474 panic("%s: Microcode checksum error",
475 sc->sc_dev.dv_xname);
476 break;
477
478 case ADW_IERR_ILLEGAL_CONNECTION:
479 panic("%s: All three connectors are in use",
480 sc->sc_dev.dv_xname);
481 break;
482
483 case ADW_IERR_REVERSED_CABLE:
484 panic("%s: Cable is reversed",
485 sc->sc_dev.dv_xname);
486 break;
487
488 case ADW_IERR_HVD_DEVICE:
489 panic("%s: HVD attached to LVD connector",
490 sc->sc_dev.dv_xname);
491 break;
492
493 case ADW_IERR_SINGLE_END_DEVICE:
494 panic("%s: single-ended device is attached to"
495 " one of the connectors",
496 sc->sc_dev.dv_xname);
497 break;
498
499 case ADW_IERR_NO_CARRIER:
500 panic("%s: unable to create Carriers",
501 sc->sc_dev.dv_xname);
502 break;
503
504 case ADW_WARN_BUSRESET_ERROR:
505 printf("%s: WARNING: Bus Reset Error\n",
506 sc->sc_dev.dv_xname);
507 break;
508 }
509
510 /*
511 * Fill in the scsipi_adapter.
512 */
513 memset(adapt, 0, sizeof(*adapt));
514 adapt->adapt_dev = &sc->sc_dev;
515 adapt->adapt_nchannels = 1;
516 adapt->adapt_openings = ncontrols;
517 adapt->adapt_max_periph = adapt->adapt_openings;
518 adapt->adapt_request = adw_scsipi_request;
519 adapt->adapt_minphys = adwminphys;
520
521 /*
522 * Fill in the scsipi_channel.
523 */
524 memset(chan, 0, sizeof(*chan));
525 chan->chan_adapter = adapt;
526 chan->chan_bustype = &scsi_bustype;
527 chan->chan_channel = 0;
528 chan->chan_ntargets = ADW_MAX_TID + 1;
529 chan->chan_nluns = 7;
530 chan->chan_id = sc->chip_scsi_id;
531
532 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
533 }
534
535
536 static void
537 adwminphys(struct buf *bp)
538 {
539
540 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
541 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
542 minphys(bp);
543 }
544
545
546 /*
547 * start a scsi operation given the command and the data address.
548 * Also needs the unit, target and lu.
549 */
550 static void
551 adw_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
552 void *arg)
553 {
554 struct scsipi_xfer *xs;
555 ADW_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev;
556 ADW_CCB *ccb;
557 int s, retry;
558
559 switch (req) {
560 case ADAPTER_REQ_RUN_XFER:
561 xs = arg;
562
563 /*
564 * get a ccb to use. If the transfer
565 * is from a buf (possibly from interrupt time)
566 * then we can't allow it to sleep
567 */
568
569 ccb = adw_get_ccb(sc);
570 #ifdef DIAGNOSTIC
571 /*
572 * This should never happen as we track the resources
573 * in the mid-layer.
574 */
575 if (ccb == NULL) {
576 scsipi_printaddr(xs->xs_periph);
577 printf("unable to allocate ccb\n");
578 panic("adw_scsipi_request");
579 }
580 #endif
581
582 ccb->xs = xs;
583 ccb->timeout = xs->timeout;
584
585 if (adw_build_req(sc, ccb)) {
586 s = splbio();
587 retry = adw_queue_ccb(sc, ccb);
588 splx(s);
589
590 switch(retry) {
591 case ADW_BUSY:
592 xs->error = XS_RESOURCE_SHORTAGE;
593 adw_free_ccb(sc, ccb);
594 scsipi_done(xs);
595 return;
596
597 case ADW_ERROR:
598 xs->error = XS_DRIVER_STUFFUP;
599 adw_free_ccb(sc, ccb);
600 scsipi_done(xs);
601 return;
602 }
603 if ((xs->xs_control & XS_CTL_POLL) == 0)
604 return;
605 /*
606 * Not allowed to use interrupts, poll for completion.
607 */
608 if (adw_poll(sc, xs, ccb->timeout)) {
609 adw_timeout(ccb);
610 if (adw_poll(sc, xs, ccb->timeout))
611 adw_timeout(ccb);
612 }
613 }
614 return;
615
616 case ADAPTER_REQ_GROW_RESOURCES:
617 /* XXX Not supported. */
618 return;
619
620 case ADAPTER_REQ_SET_XFER_MODE:
621 /* XXX XXX XXX */
622 return;
623 }
624 }
625
626
627 /*
628 * Build a request structure for the Wide Boards.
629 */
630 static int
631 adw_build_req(ADW_SOFTC *sc, ADW_CCB *ccb)
632 {
633 struct scsipi_xfer *xs = ccb->xs;
634 struct scsipi_periph *periph = xs->xs_periph;
635 bus_dma_tag_t dmat = sc->sc_dmat;
636 ADW_SCSI_REQ_Q *scsiqp;
637 int error;
638
639 scsiqp = &ccb->scsiq;
640 memset(scsiqp, 0, sizeof(ADW_SCSI_REQ_Q));
641
642 /*
643 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
644 * physical CCB structure.
645 */
646 scsiqp->ccb_ptr = ccb->hashkey;
647
648 /*
649 * Build the ADW_SCSI_REQ_Q request.
650 */
651
652 /*
653 * Set CDB length and copy it to the request structure.
654 * For wide boards a CDB length maximum of 16 bytes
655 * is supported.
656 */
657 memcpy(&scsiqp->cdb, xs->cmd, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
658 xs->cmdlen : 12 );
659 if(xs->cmdlen > 12)
660 memcpy(&scsiqp->cdb16, &(xs->cmd[12]), xs->cmdlen - 12);
661
662 scsiqp->target_id = periph->periph_target;
663 scsiqp->target_lun = periph->periph_lun;
664
665 scsiqp->vsense_addr = &ccb->scsi_sense;
666 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
667 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
668 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
669
670 /*
671 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
672 */
673 if (xs->datalen) {
674 /*
675 * Map the DMA transfer.
676 */
677 #ifdef TFS
678 if (xs->xs_control & SCSI_DATA_UIO) {
679 error = bus_dmamap_load_uio(dmat,
680 ccb->dmamap_xfer, (struct uio *) xs->data,
681 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
682 BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
683 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
684 BUS_DMA_WRITE));
685 } else
686 #endif /* TFS */
687 {
688 error = bus_dmamap_load(dmat,
689 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
690 ((xs->xs_control & XS_CTL_NOSLEEP) ?
691 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
692 BUS_DMA_STREAMING |
693 ((xs->xs_control & XS_CTL_DATA_IN) ?
694 BUS_DMA_READ : BUS_DMA_WRITE));
695 }
696
697 switch (error) {
698 case 0:
699 break;
700 case ENOMEM:
701 case EAGAIN:
702 xs->error = XS_RESOURCE_SHORTAGE;
703 goto out_bad;
704
705 default:
706 xs->error = XS_DRIVER_STUFFUP;
707 printf("%s: error %d loading DMA map\n",
708 sc->sc_dev.dv_xname, error);
709 out_bad:
710 adw_free_ccb(sc, ccb);
711 scsipi_done(xs);
712 return(0);
713 }
714
715 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
716 ccb->dmamap_xfer->dm_mapsize,
717 (xs->xs_control & XS_CTL_DATA_IN) ?
718 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
719
720 /*
721 * Build scatter-gather list.
722 */
723 scsiqp->data_cnt = xs->datalen;
724 scsiqp->vdata_addr = xs->data;
725 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
726 memset(ccb->sg_block, 0,
727 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
728 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
729 } else {
730 /*
731 * No data xfer, use non S/G values.
732 */
733 scsiqp->data_cnt = 0;
734 scsiqp->vdata_addr = 0;
735 scsiqp->data_addr = 0;
736 }
737
738 return (1);
739 }
740
741
742 /*
743 * Build scatter-gather list for Wide Boards.
744 */
745 static void
746 adw_build_sglist(ADW_CCB *ccb, ADW_SCSI_REQ_Q *scsiqp, ADW_SG_BLOCK *sg_block)
747 {
748 u_long sg_block_next_addr; /* block and its next */
749 u_int32_t sg_block_physical_addr;
750 int i; /* how many SG entries */
751 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
752 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
753
754
755 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
756 sg_block_physical_addr = ccb->hashkey +
757 offsetof(struct adw_ccb, sg_block[0]);
758 scsiqp->sg_real_addr = sg_block_physical_addr;
759
760 /*
761 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
762 * then split the request into multiple sg-list blocks.
763 */
764
765 do {
766 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
767 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
768 sg_block->sg_list[i].sg_count = sg_list->ds_len;
769
770 if (--sg_elem_cnt == 0) {
771 /* last entry, get out */
772 sg_block->sg_cnt = i + 1;
773 sg_block->sg_ptr = NULL; /* next link = NULL */
774 return;
775 }
776 sg_list++;
777 }
778 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
779 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
780
781 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
782 sg_block->sg_ptr = sg_block_physical_addr;
783 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
784 } while (1);
785 }
786
787
788 /******************************************************************************/
789 /* Interrupts and TimeOut routines */
790 /******************************************************************************/
791
792
793 int
794 adw_intr(void *arg)
795 {
796 ADW_SOFTC *sc = arg;
797
798
799 if(AdwISR(sc) != ADW_FALSE) {
800 return (1);
801 }
802
803 return (0);
804 }
805
806
807 /*
808 * Poll a particular unit, looking for a particular xs
809 */
810 static int
811 adw_poll(ADW_SOFTC *sc, struct scsipi_xfer *xs, int count)
812 {
813
814 /* timeouts are in msec, so we loop in 1000 usec cycles */
815 while (count) {
816 adw_intr(sc);
817 if (xs->xs_status & XS_STS_DONE)
818 return (0);
819 delay(1000); /* only happens in boot so ok */
820 count--;
821 }
822 return (1);
823 }
824
825
826 static void
827 adw_timeout(void *arg)
828 {
829 ADW_CCB *ccb = arg;
830 struct scsipi_xfer *xs = ccb->xs;
831 struct scsipi_periph *periph = xs->xs_periph;
832 ADW_SOFTC *sc =
833 (void *)periph->periph_channel->chan_adapter->adapt_dev;
834 int s;
835
836 scsipi_printaddr(periph);
837 printf("timed out");
838
839 s = splbio();
840
841 if (ccb->flags & CCB_ABORTED) {
842 /*
843 * Abort Timed Out
844 *
845 * No more opportunities. Lets try resetting the bus and
846 * reinitialize the host adapter.
847 */
848 callout_stop(&xs->xs_callout);
849 printf(" AGAIN. Resetting SCSI Bus\n");
850 adw_reset_bus(sc);
851 splx(s);
852 return;
853 } else if (ccb->flags & CCB_ABORTING) {
854 /*
855 * Abort the operation that has timed out.
856 *
857 * Second opportunity.
858 */
859 printf("\n");
860 xs->error = XS_TIMEOUT;
861 ccb->flags |= CCB_ABORTED;
862 #if 0
863 /*
864 * - XXX - 3.3a microcode is BROKEN!!!
865 *
866 * We cannot abort a CCB, so we can only hope the command
867 * get completed before the next timeout, otherwise a
868 * Bus Reset will arrive inexorably.
869 */
870 /*
871 * ADW_ABORT_CCB() makes the board to generate an interrupt
872 *
873 * - XXX - The above assertion MUST be verified (and this
874 * code changed as well [callout_*()]), when the
875 * ADW_ABORT_CCB will be working again
876 */
877 ADW_ABORT_CCB(sc, ccb);
878 #endif
879 /*
880 * waiting for multishot callout_reset() let's restart it
881 * by hand so the next time a timeout event will occour
882 * we will reset the bus.
883 */
884 callout_reset(&xs->xs_callout,
885 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
886 } else {
887 /*
888 * Abort the operation that has timed out.
889 *
890 * First opportunity.
891 */
892 printf("\n");
893 xs->error = XS_TIMEOUT;
894 ccb->flags |= CCB_ABORTING;
895 #if 0
896 /*
897 * - XXX - 3.3a microcode is BROKEN!!!
898 *
899 * We cannot abort a CCB, so we can only hope the command
900 * get completed before the next 2 timeout, otherwise a
901 * Bus Reset will arrive inexorably.
902 */
903 /*
904 * ADW_ABORT_CCB() makes the board to generate an interrupt
905 *
906 * - XXX - The above assertion MUST be verified (and this
907 * code changed as well [callout_*()]), when the
908 * ADW_ABORT_CCB will be working again
909 */
910 ADW_ABORT_CCB(sc, ccb);
911 #endif
912 /*
913 * waiting for multishot callout_reset() let's restart it
914 * by hand so to give a second opportunity to the command
915 * which timed-out.
916 */
917 callout_reset(&xs->xs_callout,
918 (ccb->timeout * hz) / 1000, adw_timeout, ccb);
919 }
920
921 splx(s);
922 }
923
924
925 static void
926 adw_reset_bus(ADW_SOFTC *sc)
927 {
928 ADW_CCB *ccb;
929 int s;
930 struct scsipi_xfer *xs;
931
932 s = splbio();
933 AdwResetSCSIBus(sc);
934 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
935 adw_pending_ccb)) != NULL) {
936 callout_stop(&ccb->xs->xs_callout);
937 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
938 xs = ccb->xs;
939 adw_free_ccb(sc, ccb);
940 xs->error = XS_RESOURCE_SHORTAGE;
941 scsipi_done(xs);
942 }
943 splx(s);
944 }
945
946
947 /******************************************************************************/
948 /* Host Adapter and Peripherals Information Routines */
949 /******************************************************************************/
950
951
952 static void
953 adw_print_info(ADW_SOFTC *sc, int tid)
954 {
955 bus_space_tag_t iot = sc->sc_iot;
956 bus_space_handle_t ioh = sc->sc_ioh;
957 u_int16_t wdtr_able, wdtr_done, wdtr;
958 u_int16_t sdtr_able, sdtr_done, sdtr, period;
959 static int wdtr_reneg = 0, sdtr_reneg = 0;
960
961 if (tid == 0){
962 wdtr_reneg = sdtr_reneg = 0;
963 }
964
965 printf("%s: target %d ", sc->sc_dev.dv_xname, tid);
966
967 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able);
968 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
969 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done);
970 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
971 (2 * tid), wdtr);
972 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
973 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
974 wdtr_reneg = 1;
975 } else {
976 printf("wide transfers disabled, ");
977 }
978
979 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able);
980 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
981 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done);
982 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
983 (2 * tid), sdtr);
984 sdtr &= ~0x8000;
985 if((sdtr & 0x1F) != 0) {
986 if((sdtr & 0x1F00) == 0x1100){
987 printf("80.0 MHz");
988 } else if((sdtr & 0x1F00) == 0x1000){
989 printf("40.0 MHz");
990 } else {
991 /* <= 20.0 MHz */
992 period = (((sdtr >> 8) * 25) + 50)/4;
993 if(period == 0) {
994 /* Should never happen. */
995 printf("? MHz");
996 } else {
997 printf("%d.%d MHz", 250/period,
998 ADW_TENTHS(250, period));
999 }
1000 }
1001 printf(" synchronous transfers\n");
1002 } else {
1003 printf("asynchronous transfers\n");
1004 }
1005 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1006 sdtr_reneg = 1;
1007 } else {
1008 printf("synchronous transfers disabled\n");
1009 }
1010
1011 if(wdtr_reneg || sdtr_reneg) {
1012 printf("%s: target %d %s", sc->sc_dev.dv_xname, tid,
1013 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1014 ((sdtr_reneg)? "sync" : "") );
1015 printf(" renegotiation pending before next command.\n");
1016 }
1017 }
1018
1019
1020 /******************************************************************************/
1021 /* WIDE boards Interrupt callbacks */
1022 /******************************************************************************/
1023
1024
1025 /*
1026 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR()
1027 *
1028 * Interrupt callback function for the Wide SCSI Adv Library.
1029 *
1030 * Notice:
1031 * Interrupts are disabled by the caller (AdwISR() function), and will be
1032 * enabled at the end of the caller.
1033 */
1034 static void
1035 adw_isr_callback(ADW_SOFTC *sc, ADW_SCSI_REQ_Q *scsiq)
1036 {
1037 bus_dma_tag_t dmat = sc->sc_dmat;
1038 ADW_CCB *ccb;
1039 struct scsipi_xfer *xs;
1040 struct scsipi_sense_data *s1, *s2;
1041
1042
1043 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1044
1045 callout_stop(&ccb->xs->xs_callout);
1046
1047 xs = ccb->xs;
1048
1049 /*
1050 * If we were a data transfer, unload the map that described
1051 * the data buffer.
1052 */
1053 if (xs->datalen) {
1054 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1055 ccb->dmamap_xfer->dm_mapsize,
1056 (xs->xs_control & XS_CTL_DATA_IN) ?
1057 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1058 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1059 }
1060
1061 if ((ccb->flags & CCB_ALLOC) == 0) {
1062 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
1063 Debugger();
1064 return;
1065 }
1066
1067 /*
1068 * 'done_status' contains the command's ending status.
1069 * 'host_status' conatins the host adapter status.
1070 * 'scsi_status' contains the scsi peripheral status.
1071 */
1072 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1073 ((scsiq->done_status == QD_NO_ERROR) ||
1074 (scsiq->done_status == QD_WITH_ERROR))) {
1075 switch (scsiq->host_status) {
1076 case SCSI_STATUS_GOOD:
1077 if ((scsiq->cdb[0] == INQUIRY) &&
1078 (scsiq->target_lun == 0)) {
1079 adw_print_info(sc, scsiq->target_id);
1080 }
1081 xs->error = XS_NOERROR;
1082 xs->resid = scsiq->data_cnt;
1083 sc->sc_freeze_dev[scsiq->target_id] = 0;
1084 break;
1085
1086 case SCSI_STATUS_CHECK_CONDITION:
1087 case SCSI_STATUS_CMD_TERMINATED:
1088 s1 = &ccb->scsi_sense;
1089 s2 = &xs->sense.scsi_sense;
1090 *s2 = *s1;
1091 xs->error = XS_SENSE;
1092 sc->sc_freeze_dev[scsiq->target_id] = 1;
1093 break;
1094
1095 default:
1096 xs->error = XS_BUSY;
1097 sc->sc_freeze_dev[scsiq->target_id] = 1;
1098 break;
1099 }
1100 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1101 xs->error = XS_DRIVER_STUFFUP;
1102 } else {
1103 switch (scsiq->host_status) {
1104 case QHSTA_M_SEL_TIMEOUT:
1105 xs->error = XS_SELTIMEOUT;
1106 break;
1107
1108 case QHSTA_M_SXFR_OFF_UFLW:
1109 case QHSTA_M_SXFR_OFF_OFLW:
1110 case QHSTA_M_DATA_OVER_RUN:
1111 printf("%s: Overrun/Overflow/Underflow condition\n",
1112 sc->sc_dev.dv_xname);
1113 xs->error = XS_DRIVER_STUFFUP;
1114 break;
1115
1116 case QHSTA_M_SXFR_DESELECTED:
1117 case QHSTA_M_UNEXPECTED_BUS_FREE:
1118 printf("%s: Unexpected BUS free\n",sc->sc_dev.dv_xname);
1119 xs->error = XS_DRIVER_STUFFUP;
1120 break;
1121
1122 case QHSTA_M_SCSI_BUS_RESET:
1123 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1124 printf("%s: BUS Reset\n", sc->sc_dev.dv_xname);
1125 xs->error = XS_DRIVER_STUFFUP;
1126 break;
1127
1128 case QHSTA_M_BUS_DEVICE_RESET:
1129 printf("%s: Device Reset\n", sc->sc_dev.dv_xname);
1130 xs->error = XS_DRIVER_STUFFUP;
1131 break;
1132
1133 case QHSTA_M_QUEUE_ABORTED:
1134 printf("%s: Queue Aborted\n", sc->sc_dev.dv_xname);
1135 xs->error = XS_DRIVER_STUFFUP;
1136 break;
1137
1138 case QHSTA_M_SXFR_SDMA_ERR:
1139 case QHSTA_M_SXFR_SXFR_PERR:
1140 case QHSTA_M_RDMA_PERR:
1141 /*
1142 * DMA Error. This should *NEVER* happen!
1143 *
1144 * Lets try resetting the bus and reinitialize
1145 * the host adapter.
1146 */
1147 printf("%s: DMA Error. Reseting bus\n",
1148 sc->sc_dev.dv_xname);
1149 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1150 adw_reset_bus(sc);
1151 xs->error = XS_BUSY;
1152 goto done;
1153
1154 case QHSTA_M_WTM_TIMEOUT:
1155 case QHSTA_M_SXFR_WD_TMO:
1156 /* The SCSI bus hung in a phase */
1157 printf("%s: Watch Dog timer expired. Reseting bus\n",
1158 sc->sc_dev.dv_xname);
1159 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1160 adw_reset_bus(sc);
1161 xs->error = XS_BUSY;
1162 goto done;
1163
1164 case QHSTA_M_SXFR_XFR_PH_ERR:
1165 printf("%s: Transfer Error\n", sc->sc_dev.dv_xname);
1166 xs->error = XS_DRIVER_STUFFUP;
1167 break;
1168
1169 case QHSTA_M_BAD_CMPL_STATUS_IN:
1170 /* No command complete after a status message */
1171 printf("%s: Bad Completion Status\n",
1172 sc->sc_dev.dv_xname);
1173 xs->error = XS_DRIVER_STUFFUP;
1174 break;
1175
1176 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1177 printf("%s: Auto Sense Failed\n", sc->sc_dev.dv_xname);
1178 xs->error = XS_DRIVER_STUFFUP;
1179 break;
1180
1181 case QHSTA_M_INVALID_DEVICE:
1182 printf("%s: Invalid Device\n", sc->sc_dev.dv_xname);
1183 xs->error = XS_DRIVER_STUFFUP;
1184 break;
1185
1186 case QHSTA_M_NO_AUTO_REQ_SENSE:
1187 /*
1188 * User didn't request sense, but we got a
1189 * check condition.
1190 */
1191 printf("%s: Unexpected Check Condition\n",
1192 sc->sc_dev.dv_xname);
1193 xs->error = XS_DRIVER_STUFFUP;
1194 break;
1195
1196 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1197 printf("%s: Unknown Error\n", sc->sc_dev.dv_xname);
1198 xs->error = XS_DRIVER_STUFFUP;
1199 break;
1200
1201 default:
1202 panic("%s: Unhandled Host Status Error %x",
1203 sc->sc_dev.dv_xname, scsiq->host_status);
1204 }
1205 }
1206
1207 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1208 done: adw_free_ccb(sc, ccb);
1209 scsipi_done(xs);
1210 }
1211
1212
1213 /*
1214 * adw_async_callback() - Adv Library asynchronous event callback function.
1215 */
1216 static void
1217 adw_async_callback(ADW_SOFTC *sc, u_int8_t code)
1218 {
1219 switch (code) {
1220 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1221 /* The firmware detected a SCSI Bus reset. */
1222 printf("%s: SCSI Bus reset detected\n", sc->sc_dev.dv_xname);
1223 break;
1224
1225 case ADV_ASYNC_RDMA_FAILURE:
1226 /*
1227 * Handle RDMA failure by resetting the SCSI Bus and
1228 * possibly the chip if it is unresponsive.
1229 */
1230 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1231 " the adapter\n", sc->sc_dev.dv_xname);
1232 AdwResetSCSIBus(sc);
1233 break;
1234
1235 case ADV_HOST_SCSI_BUS_RESET:
1236 /* Host generated SCSI bus reset occurred. */
1237 printf("%s: Host generated SCSI bus reset occurred\n",
1238 sc->sc_dev.dv_xname);
1239 break;
1240
1241 case ADV_ASYNC_CARRIER_READY_FAILURE:
1242 /* Carrier Ready failure. */
1243 printf("%s: Carrier Ready failure!\n", sc->sc_dev.dv_xname);
1244 break;
1245
1246 default:
1247 break;
1248 }
1249 }
1250