adw.c revision 1.52 1 /* $NetBSD: adw.c,v 1.52 2012/10/27 17:18:18 chs Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: adw.c,v 1.52 2012/10/27 17:18:18 chs Exp $");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/proc.h>
53
54 #include <sys/bus.h>
55 #include <sys/intr.h>
56
57 #include <dev/scsipi/scsi_all.h>
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsiconf.h>
60
61 #include <dev/ic/adwlib.h>
62 #include <dev/ic/adwmcode.h>
63 #include <dev/ic/adw.h>
64
65 #ifndef DDB
66 #define Debugger() panic("should call debugger here (adw.c)")
67 #endif /* ! DDB */
68
69 /******************************************************************************/
70
71
72 static int adw_alloc_controls(ADW_SOFTC *);
73 static int adw_alloc_carriers(ADW_SOFTC *);
74 static int adw_create_ccbs(ADW_SOFTC *, ADW_CCB *, int);
75 static void adw_free_ccb(ADW_SOFTC *, ADW_CCB *);
76 static void adw_reset_ccb(ADW_CCB *);
77 static int adw_init_ccb(ADW_SOFTC *, ADW_CCB *);
78 static ADW_CCB *adw_get_ccb(ADW_SOFTC *);
79 static int adw_queue_ccb(ADW_SOFTC *, ADW_CCB *);
80
81 static void adw_scsipi_request(struct scsipi_channel *,
82 scsipi_adapter_req_t, void *);
83 static int adw_build_req(ADW_SOFTC *, ADW_CCB *);
84 static void adw_build_sglist(ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *);
85 static void adwminphys(struct buf *);
86 static void adw_isr_callback(ADW_SOFTC *, ADW_SCSI_REQ_Q *);
87 static void adw_async_callback(ADW_SOFTC *, u_int8_t);
88
89 static void adw_print_info(ADW_SOFTC *, int);
90
91 static int adw_poll(ADW_SOFTC *, struct scsipi_xfer *, int);
92 static void adw_timeout(void *);
93 static void adw_reset_bus(ADW_SOFTC *);
94
95
96 /******************************************************************************/
97 /* DMA Mapping for Control Blocks */
98 /******************************************************************************/
99
100
101 static int
102 adw_alloc_controls(ADW_SOFTC *sc)
103 {
104 bus_dma_segment_t seg;
105 int error, rseg;
106
107 /*
108 * Allocate the control structure.
109 */
110 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
111 PAGE_SIZE, 0, &seg, 1, &rseg,
112 BUS_DMA_NOWAIT)) != 0) {
113 aprint_error_dev(sc->sc_dev, "unable to allocate control structures,"
114 " error = %d\n", error);
115 return (error);
116 }
117 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
118 sizeof(struct adw_control), (void **) & sc->sc_control,
119 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
120 aprint_error_dev(sc->sc_dev, "unable to map control structures, error = %d\n",
121 error);
122 return (error);
123 }
124
125 /*
126 * Create and load the DMA map used for the control blocks.
127 */
128 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
129 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
130 &sc->sc_dmamap_control)) != 0) {
131 aprint_error_dev(sc->sc_dev, "unable to create control DMA map, error = %d\n",
132 error);
133 return (error);
134 }
135 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
136 sc->sc_control, sizeof(struct adw_control), NULL,
137 BUS_DMA_NOWAIT)) != 0) {
138 aprint_error_dev(sc->sc_dev, "unable to load control DMA map, error = %d\n",
139 error);
140 return (error);
141 }
142
143 return (0);
144 }
145
146
147 static int
148 adw_alloc_carriers(ADW_SOFTC *sc)
149 {
150 bus_dma_segment_t seg;
151 int error, rseg;
152
153 /*
154 * Allocate the control structure.
155 */
156 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
157 M_DEVBUF, M_WAITOK);
158 if(!sc->sc_control->carriers) {
159 aprint_error_dev(sc->sc_dev,
160 "malloc() failed in allocating carrier structures\n");
161 return (ENOMEM);
162 }
163
164 if ((error = bus_dmamem_alloc(sc->sc_dmat,
165 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
166 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
167 aprint_error_dev(sc->sc_dev, "unable to allocate carrier structures,"
168 " error = %d\n", error);
169 return (error);
170 }
171 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
172 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
173 (void **) &sc->sc_control->carriers,
174 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
175 aprint_error_dev(sc->sc_dev, "unable to map carrier structures,"
176 " error = %d\n", error);
177 return (error);
178 }
179
180 /*
181 * Create and load the DMA map used for the control blocks.
182 */
183 if ((error = bus_dmamap_create(sc->sc_dmat,
184 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
185 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
186 &sc->sc_dmamap_carrier)) != 0) {
187 aprint_error_dev(sc->sc_dev, "unable to create carriers DMA map,"
188 " error = %d\n", error);
189 return (error);
190 }
191 if ((error = bus_dmamap_load(sc->sc_dmat,
192 sc->sc_dmamap_carrier, sc->sc_control->carriers,
193 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
194 BUS_DMA_NOWAIT)) != 0) {
195 aprint_error_dev(sc->sc_dev, "unable to load carriers DMA map,"
196 " error = %d\n", error);
197 return (error);
198 }
199
200 return (0);
201 }
202
203
204 /******************************************************************************/
205 /* Control Blocks routines */
206 /******************************************************************************/
207
208
209 /*
210 * Create a set of ccbs and add them to the free list. Called once
211 * by adw_init(). We return the number of CCBs successfully created.
212 */
213 static int
214 adw_create_ccbs(ADW_SOFTC *sc, ADW_CCB *ccbstore, int count)
215 {
216 ADW_CCB *ccb;
217 int i, error;
218
219 for (i = 0; i < count; i++) {
220 ccb = &ccbstore[i];
221 if ((error = adw_init_ccb(sc, ccb)) != 0) {
222 aprint_error_dev(sc->sc_dev, "unable to initialize ccb, error = %d\n",
223 error);
224 return (i);
225 }
226 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
227 }
228
229 return (i);
230 }
231
232
233 /*
234 * A ccb is put onto the free list.
235 */
236 static void
237 adw_free_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
238 {
239 int s;
240
241 s = splbio();
242
243 adw_reset_ccb(ccb);
244 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
245
246 splx(s);
247 }
248
249
250 static void
251 adw_reset_ccb(ADW_CCB *ccb)
252 {
253
254 ccb->flags = 0;
255 }
256
257
258 static int
259 adw_init_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
260 {
261 int hashnum, error;
262
263 /*
264 * Create the DMA map for this CCB.
265 */
266 error = bus_dmamap_create(sc->sc_dmat,
267 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
268 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
269 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
270 if (error) {
271 aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
272 error);
273 return (error);
274 }
275
276 /*
277 * put in the phystokv hash table
278 * Never gets taken out.
279 */
280 ccb->hashkey = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr +
281 ADW_CCB_OFF(ccb));
282 hashnum = CCB_HASH(ccb->hashkey);
283 ccb->nexthash = sc->sc_ccbhash[hashnum];
284 sc->sc_ccbhash[hashnum] = ccb;
285 adw_reset_ccb(ccb);
286 return (0);
287 }
288
289
290 /*
291 * Get a free ccb
292 *
293 * If there are none, see if we can allocate a new one
294 */
295 static ADW_CCB *
296 adw_get_ccb(ADW_SOFTC *sc)
297 {
298 ADW_CCB *ccb = 0;
299 int s;
300
301 s = splbio();
302
303 ccb = sc->sc_free_ccb.tqh_first;
304 if (ccb != NULL) {
305 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
306 ccb->flags |= CCB_ALLOC;
307 }
308 splx(s);
309 return (ccb);
310 }
311
312
313 /*
314 * Given a physical address, find the ccb that it corresponds to.
315 */
316 ADW_CCB *
317 adw_ccb_phys_kv(ADW_SOFTC *sc, u_int32_t ccb_phys)
318 {
319 int hashnum = CCB_HASH(ccb_phys);
320 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
321
322 while (ccb) {
323 if (ccb->hashkey == ccb_phys)
324 break;
325 ccb = ccb->nexthash;
326 }
327 return (ccb);
328 }
329
330
331 /*
332 * Queue a CCB to be sent to the controller, and send it if possible.
333 */
334 static int
335 adw_queue_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
336 {
337 int errcode = ADW_SUCCESS;
338
339 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
340
341 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
342
343 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
344 errcode = AdwExeScsiQueue(sc, &ccb->scsiq);
345 switch(errcode) {
346 case ADW_SUCCESS:
347 break;
348
349 case ADW_BUSY:
350 printf("ADW_BUSY\n");
351 return(ADW_BUSY);
352
353 case ADW_ERROR:
354 printf("ADW_ERROR\n");
355 return(ADW_ERROR);
356 }
357
358 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
359
360 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
361 callout_reset(&ccb->xs->xs_callout,
362 mstohz(ccb->timeout), adw_timeout, ccb);
363 }
364
365 return(errcode);
366 }
367
368
369 /******************************************************************************/
370 /* SCSI layer interfacing routines */
371 /******************************************************************************/
372
373
374 int
375 adw_init(ADW_SOFTC *sc)
376 {
377 u_int16_t warn_code;
378
379
380 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
381 ADW_LIB_VERSION_MINOR;
382 sc->cfg.chip_version =
383 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
384
385 /*
386 * Reset the chip to start and allow register writes.
387 */
388 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
389 panic("adw_init: adw_find_signature failed");
390 } else {
391 AdwResetChip(sc->sc_iot, sc->sc_ioh);
392
393 warn_code = AdwInitFromEEPROM(sc);
394
395 if (warn_code & ADW_WARN_EEPROM_CHKSUM)
396 aprint_error_dev(sc->sc_dev, "Bad checksum found. "
397 "Setting default values\n");
398 if (warn_code & ADW_WARN_EEPROM_TERMINATION)
399 aprint_error_dev(sc->sc_dev, "Bad bus termination setting."
400 "Using automatic termination.\n");
401 }
402
403 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
404 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
405
406 return 0;
407 }
408
409
410 void
411 adw_attach(ADW_SOFTC *sc)
412 {
413 struct scsipi_adapter *adapt = &sc->sc_adapter;
414 struct scsipi_channel *chan = &sc->sc_channel;
415 int ncontrols, error;
416
417 TAILQ_INIT(&sc->sc_free_ccb);
418 TAILQ_INIT(&sc->sc_waiting_ccb);
419 TAILQ_INIT(&sc->sc_pending_ccb);
420
421 /*
422 * Allocate the Control Blocks.
423 */
424 error = adw_alloc_controls(sc);
425 if (error)
426 return; /* (error) */ ;
427
428 memset(sc->sc_control, 0, sizeof(struct adw_control));
429
430 /*
431 * Create and initialize the Control Blocks.
432 */
433 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
434 if (ncontrols == 0) {
435 aprint_error_dev(sc->sc_dev, "unable to create Control Blocks\n");
436 return; /* (ENOMEM) */ ;
437 } else if (ncontrols != ADW_MAX_CCB) {
438 aprint_error_dev(sc->sc_dev, "WARNING: only %d of %d Control Blocks"
439 " created\n",
440 ncontrols, ADW_MAX_CCB);
441 }
442
443 /*
444 * Create and initialize the Carriers.
445 */
446 error = adw_alloc_carriers(sc);
447 if (error)
448 return; /* (error) */ ;
449
450 /*
451 * Zero's the freeze_device status
452 */
453 memset(sc->sc_freeze_dev, 0, sizeof(sc->sc_freeze_dev));
454
455 /*
456 * Initialize the adapter
457 */
458 switch (AdwInitDriver(sc)) {
459 case ADW_IERR_BIST_PRE_TEST:
460 panic("%s: BIST pre-test error",
461 device_xname(sc->sc_dev));
462 break;
463
464 case ADW_IERR_BIST_RAM_TEST:
465 panic("%s: BIST RAM test error",
466 device_xname(sc->sc_dev));
467 break;
468
469 case ADW_IERR_MCODE_CHKSUM:
470 panic("%s: Microcode checksum error",
471 device_xname(sc->sc_dev));
472 break;
473
474 case ADW_IERR_ILLEGAL_CONNECTION:
475 panic("%s: All three connectors are in use",
476 device_xname(sc->sc_dev));
477 break;
478
479 case ADW_IERR_REVERSED_CABLE:
480 panic("%s: Cable is reversed",
481 device_xname(sc->sc_dev));
482 break;
483
484 case ADW_IERR_HVD_DEVICE:
485 panic("%s: HVD attached to LVD connector",
486 device_xname(sc->sc_dev));
487 break;
488
489 case ADW_IERR_SINGLE_END_DEVICE:
490 panic("%s: single-ended device is attached to"
491 " one of the connectors",
492 device_xname(sc->sc_dev));
493 break;
494
495 case ADW_IERR_NO_CARRIER:
496 panic("%s: unable to create Carriers",
497 device_xname(sc->sc_dev));
498 break;
499
500 case ADW_WARN_BUSRESET_ERROR:
501 aprint_error_dev(sc->sc_dev, "WARNING: Bus Reset Error\n");
502 break;
503 }
504
505 /*
506 * Fill in the scsipi_adapter.
507 */
508 memset(adapt, 0, sizeof(*adapt));
509 adapt->adapt_dev = sc->sc_dev;
510 adapt->adapt_nchannels = 1;
511 adapt->adapt_openings = ncontrols;
512 adapt->adapt_max_periph = adapt->adapt_openings;
513 adapt->adapt_request = adw_scsipi_request;
514 adapt->adapt_minphys = adwminphys;
515
516 /*
517 * Fill in the scsipi_channel.
518 */
519 memset(chan, 0, sizeof(*chan));
520 chan->chan_adapter = adapt;
521 chan->chan_bustype = &scsi_bustype;
522 chan->chan_channel = 0;
523 chan->chan_ntargets = ADW_MAX_TID + 1;
524 chan->chan_nluns = 8;
525 chan->chan_id = sc->chip_scsi_id;
526
527 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
528 }
529
530
531 static void
532 adwminphys(struct buf *bp)
533 {
534
535 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
536 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
537 minphys(bp);
538 }
539
540
541 /*
542 * start a scsi operation given the command and the data address.
543 * Also needs the unit, target and lu.
544 */
545 static void
546 adw_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
547 void *arg)
548 {
549 struct scsipi_xfer *xs;
550 ADW_SOFTC *sc = device_private(chan->chan_adapter->adapt_dev);
551 ADW_CCB *ccb;
552 int s, retry;
553
554 switch (req) {
555 case ADAPTER_REQ_RUN_XFER:
556 xs = arg;
557
558 /*
559 * get a ccb to use. If the transfer
560 * is from a buf (possibly from interrupt time)
561 * then we can't allow it to sleep
562 */
563
564 ccb = adw_get_ccb(sc);
565 #ifdef DIAGNOSTIC
566 /*
567 * This should never happen as we track the resources
568 * in the mid-layer.
569 */
570 if (ccb == NULL) {
571 scsipi_printaddr(xs->xs_periph);
572 printf("unable to allocate ccb\n");
573 panic("adw_scsipi_request");
574 }
575 #endif
576
577 ccb->xs = xs;
578 ccb->timeout = xs->timeout;
579
580 if (adw_build_req(sc, ccb)) {
581 s = splbio();
582 retry = adw_queue_ccb(sc, ccb);
583 splx(s);
584
585 switch(retry) {
586 case ADW_BUSY:
587 xs->error = XS_RESOURCE_SHORTAGE;
588 adw_free_ccb(sc, ccb);
589 scsipi_done(xs);
590 return;
591
592 case ADW_ERROR:
593 xs->error = XS_DRIVER_STUFFUP;
594 adw_free_ccb(sc, ccb);
595 scsipi_done(xs);
596 return;
597 }
598 if ((xs->xs_control & XS_CTL_POLL) == 0)
599 return;
600 /*
601 * Not allowed to use interrupts, poll for completion.
602 */
603 if (adw_poll(sc, xs, ccb->timeout)) {
604 adw_timeout(ccb);
605 if (adw_poll(sc, xs, ccb->timeout))
606 adw_timeout(ccb);
607 }
608 }
609 return;
610
611 case ADAPTER_REQ_GROW_RESOURCES:
612 /* XXX Not supported. */
613 return;
614
615 case ADAPTER_REQ_SET_XFER_MODE:
616 /* XXX XXX XXX */
617 return;
618 }
619 }
620
621
622 /*
623 * Build a request structure for the Wide Boards.
624 */
625 static int
626 adw_build_req(ADW_SOFTC *sc, ADW_CCB *ccb)
627 {
628 struct scsipi_xfer *xs = ccb->xs;
629 struct scsipi_periph *periph = xs->xs_periph;
630 bus_dma_tag_t dmat = sc->sc_dmat;
631 ADW_SCSI_REQ_Q *scsiqp;
632 int error;
633
634 scsiqp = &ccb->scsiq;
635 memset(scsiqp, 0, sizeof(ADW_SCSI_REQ_Q));
636
637 /*
638 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
639 * physical CCB structure.
640 */
641 scsiqp->ccb_ptr = ccb->hashkey;
642
643 /*
644 * Build the ADW_SCSI_REQ_Q request.
645 */
646
647 /*
648 * Set CDB length and copy it to the request structure.
649 * For wide boards a CDB length maximum of 16 bytes
650 * is supported.
651 */
652 memcpy(&scsiqp->cdb, xs->cmd, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
653 xs->cmdlen : 12 );
654 if(xs->cmdlen > 12)
655 memcpy(&scsiqp->cdb16, &(xs->cmd[12]), xs->cmdlen - 12);
656
657 scsiqp->target_id = periph->periph_target;
658 scsiqp->target_lun = periph->periph_lun;
659
660 scsiqp->vsense_addr = &ccb->scsi_sense;
661 scsiqp->sense_addr = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr +
662 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense));
663 scsiqp->sense_len = sizeof(struct scsi_sense_data);
664
665 /*
666 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
667 */
668 if (xs->datalen) {
669 /*
670 * Map the DMA transfer.
671 */
672 #ifdef TFS
673 if (xs->xs_control & SCSI_DATA_UIO) {
674 error = bus_dmamap_load_uio(dmat,
675 ccb->dmamap_xfer, (struct uio *) xs->data,
676 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
677 BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
678 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
679 BUS_DMA_WRITE));
680 } else
681 #endif /* TFS */
682 {
683 error = bus_dmamap_load(dmat,
684 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
685 ((xs->xs_control & XS_CTL_NOSLEEP) ?
686 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
687 BUS_DMA_STREAMING |
688 ((xs->xs_control & XS_CTL_DATA_IN) ?
689 BUS_DMA_READ : BUS_DMA_WRITE));
690 }
691
692 switch (error) {
693 case 0:
694 break;
695 case ENOMEM:
696 case EAGAIN:
697 xs->error = XS_RESOURCE_SHORTAGE;
698 goto out_bad;
699
700 default:
701 xs->error = XS_DRIVER_STUFFUP;
702 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
703 error);
704 out_bad:
705 adw_free_ccb(sc, ccb);
706 scsipi_done(xs);
707 return(0);
708 }
709
710 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
711 ccb->dmamap_xfer->dm_mapsize,
712 (xs->xs_control & XS_CTL_DATA_IN) ?
713 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
714
715 /*
716 * Build scatter-gather list.
717 */
718 scsiqp->data_cnt = htole32(xs->datalen);
719 scsiqp->vdata_addr = xs->data;
720 scsiqp->data_addr = htole32(ccb->dmamap_xfer->dm_segs[0].ds_addr);
721 memset(ccb->sg_block, 0,
722 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
723 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
724 } else {
725 /*
726 * No data xfer, use non S/G values.
727 */
728 scsiqp->data_cnt = 0;
729 scsiqp->vdata_addr = 0;
730 scsiqp->data_addr = 0;
731 }
732
733 return (1);
734 }
735
736
737 /*
738 * Build scatter-gather list for Wide Boards.
739 */
740 static void
741 adw_build_sglist(ADW_CCB *ccb, ADW_SCSI_REQ_Q *scsiqp, ADW_SG_BLOCK *sg_block)
742 {
743 u_long sg_block_next_addr; /* block and its next */
744 u_int32_t sg_block_physical_addr;
745 int i; /* how many SG entries */
746 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
747 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
748
749
750 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
751 sg_block_physical_addr = le32toh(ccb->hashkey) +
752 offsetof(struct adw_ccb, sg_block[0]);
753 scsiqp->sg_real_addr = htole32(sg_block_physical_addr);
754
755 /*
756 * If there are more than NO_OF_SG_PER_BLOCK DMA segments (hw sg-list)
757 * then split the request into multiple sg-list blocks.
758 */
759
760 do {
761 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
762 sg_block->sg_list[i].sg_addr = htole32(sg_list->ds_addr);
763 sg_block->sg_list[i].sg_count = htole32(sg_list->ds_len);
764
765 if (--sg_elem_cnt == 0) {
766 /* last entry, get out */
767 sg_block->sg_cnt = i + 1;
768 sg_block->sg_ptr = 0; /* next link = NULL */
769 return;
770 }
771 sg_list++;
772 }
773 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
774 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
775
776 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
777 sg_block->sg_ptr = htole32(sg_block_physical_addr);
778 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
779 } while (1);
780 }
781
782
783 /******************************************************************************/
784 /* Interrupts and TimeOut routines */
785 /******************************************************************************/
786
787
788 int
789 adw_intr(void *arg)
790 {
791 ADW_SOFTC *sc = arg;
792
793
794 if(AdwISR(sc) != ADW_FALSE) {
795 return (1);
796 }
797
798 return (0);
799 }
800
801
802 /*
803 * Poll a particular unit, looking for a particular xs
804 */
805 static int
806 adw_poll(ADW_SOFTC *sc, struct scsipi_xfer *xs, int count)
807 {
808
809 /* timeouts are in msec, so we loop in 1000 usec cycles */
810 while (count) {
811 adw_intr(sc);
812 if (xs->xs_status & XS_STS_DONE)
813 return (0);
814 delay(1000); /* only happens in boot so ok */
815 count--;
816 }
817 return (1);
818 }
819
820
821 static void
822 adw_timeout(void *arg)
823 {
824 ADW_CCB *ccb = arg;
825 struct scsipi_xfer *xs = ccb->xs;
826 struct scsipi_periph *periph = xs->xs_periph;
827 ADW_SOFTC *sc =
828 device_private(periph->periph_channel->chan_adapter->adapt_dev);
829 int s;
830
831 scsipi_printaddr(periph);
832 printf("timed out");
833
834 s = splbio();
835
836 if (ccb->flags & CCB_ABORTED) {
837 /*
838 * Abort Timed Out
839 *
840 * No more opportunities. Lets try resetting the bus and
841 * reinitialize the host adapter.
842 */
843 callout_stop(&xs->xs_callout);
844 printf(" AGAIN. Resetting SCSI Bus\n");
845 adw_reset_bus(sc);
846 splx(s);
847 return;
848 } else if (ccb->flags & CCB_ABORTING) {
849 /*
850 * Abort the operation that has timed out.
851 *
852 * Second opportunity.
853 */
854 printf("\n");
855 xs->error = XS_TIMEOUT;
856 ccb->flags |= CCB_ABORTED;
857 #if 0
858 /*
859 * - XXX - 3.3a microcode is BROKEN!!!
860 *
861 * We cannot abort a CCB, so we can only hope the command
862 * get completed before the next timeout, otherwise a
863 * Bus Reset will arrive inexorably.
864 */
865 /*
866 * ADW_ABORT_CCB() makes the board to generate an interrupt
867 *
868 * - XXX - The above assertion MUST be verified (and this
869 * code changed as well [callout_*()]), when the
870 * ADW_ABORT_CCB will be working again
871 */
872 ADW_ABORT_CCB(sc, ccb);
873 #endif
874 /*
875 * waiting for multishot callout_reset() let's restart it
876 * by hand so the next time a timeout event will occur
877 * we will reset the bus.
878 */
879 callout_reset(&xs->xs_callout,
880 mstohz(ccb->timeout), adw_timeout, ccb);
881 } else {
882 /*
883 * Abort the operation that has timed out.
884 *
885 * First opportunity.
886 */
887 printf("\n");
888 xs->error = XS_TIMEOUT;
889 ccb->flags |= CCB_ABORTING;
890 #if 0
891 /*
892 * - XXX - 3.3a microcode is BROKEN!!!
893 *
894 * We cannot abort a CCB, so we can only hope the command
895 * get completed before the next 2 timeout, otherwise a
896 * Bus Reset will arrive inexorably.
897 */
898 /*
899 * ADW_ABORT_CCB() makes the board to generate an interrupt
900 *
901 * - XXX - The above assertion MUST be verified (and this
902 * code changed as well [callout_*()]), when the
903 * ADW_ABORT_CCB will be working again
904 */
905 ADW_ABORT_CCB(sc, ccb);
906 #endif
907 /*
908 * waiting for multishot callout_reset() let's restart it
909 * by hand so to give a second opportunity to the command
910 * which timed-out.
911 */
912 callout_reset(&xs->xs_callout,
913 mstohz(ccb->timeout), adw_timeout, ccb);
914 }
915
916 splx(s);
917 }
918
919
920 static void
921 adw_reset_bus(ADW_SOFTC *sc)
922 {
923 ADW_CCB *ccb;
924 int s;
925 struct scsipi_xfer *xs;
926
927 s = splbio();
928 AdwResetSCSIBus(sc);
929 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
930 adw_pending_ccb)) != NULL) {
931 callout_stop(&ccb->xs->xs_callout);
932 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
933 xs = ccb->xs;
934 adw_free_ccb(sc, ccb);
935 xs->error = XS_RESOURCE_SHORTAGE;
936 scsipi_done(xs);
937 }
938 splx(s);
939 }
940
941
942 /******************************************************************************/
943 /* Host Adapter and Peripherals Information Routines */
944 /******************************************************************************/
945
946
947 static void
948 adw_print_info(ADW_SOFTC *sc, int tid)
949 {
950 bus_space_tag_t iot = sc->sc_iot;
951 bus_space_handle_t ioh = sc->sc_ioh;
952 u_int16_t wdtr_able, wdtr_done, wdtr;
953 u_int16_t sdtr_able, sdtr_done, sdtr, period;
954 static int wdtr_reneg = 0, sdtr_reneg = 0;
955
956 if (tid == 0){
957 wdtr_reneg = sdtr_reneg = 0;
958 }
959
960 printf("%s: target %d ", device_xname(sc->sc_dev), tid);
961
962 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able);
963 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
964 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done);
965 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
966 (2 * tid), wdtr);
967 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
968 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
969 wdtr_reneg = 1;
970 } else {
971 printf("wide transfers disabled, ");
972 }
973
974 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able);
975 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
976 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done);
977 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
978 (2 * tid), sdtr);
979 sdtr &= ~0x8000;
980 if((sdtr & 0x1F) != 0) {
981 if((sdtr & 0x1F00) == 0x1100){
982 printf("80.0 MHz");
983 } else if((sdtr & 0x1F00) == 0x1000){
984 printf("40.0 MHz");
985 } else {
986 /* <= 20.0 MHz */
987 period = (((sdtr >> 8) * 25) + 50)/4;
988 if(period == 0) {
989 /* Should never happen. */
990 printf("? MHz");
991 } else {
992 printf("%d.%d MHz", 250/period,
993 ADW_TENTHS(250, period));
994 }
995 }
996 printf(" synchronous transfers\n");
997 } else {
998 printf("asynchronous transfers\n");
999 }
1000 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1001 sdtr_reneg = 1;
1002 } else {
1003 printf("synchronous transfers disabled\n");
1004 }
1005
1006 if(wdtr_reneg || sdtr_reneg) {
1007 printf("%s: target %d %s", device_xname(sc->sc_dev), tid,
1008 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1009 ((sdtr_reneg)? "sync" : "") );
1010 printf(" renegotiation pending before next command.\n");
1011 }
1012 }
1013
1014
1015 /******************************************************************************/
1016 /* WIDE boards Interrupt callbacks */
1017 /******************************************************************************/
1018
1019
1020 /*
1021 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR()
1022 *
1023 * Interrupt callback function for the Wide SCSI Adv Library.
1024 *
1025 * Notice:
1026 * Interrupts are disabled by the caller (AdwISR() function), and will be
1027 * enabled at the end of the caller.
1028 */
1029 static void
1030 adw_isr_callback(ADW_SOFTC *sc, ADW_SCSI_REQ_Q *scsiq)
1031 {
1032 bus_dma_tag_t dmat = sc->sc_dmat;
1033 ADW_CCB *ccb;
1034 struct scsipi_xfer *xs;
1035 struct scsi_sense_data *s1, *s2;
1036
1037
1038 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1039
1040 callout_stop(&ccb->xs->xs_callout);
1041
1042 xs = ccb->xs;
1043
1044 /*
1045 * If we were a data transfer, unload the map that described
1046 * the data buffer.
1047 */
1048 if (xs->datalen) {
1049 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1050 ccb->dmamap_xfer->dm_mapsize,
1051 (xs->xs_control & XS_CTL_DATA_IN) ?
1052 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1053 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1054 }
1055
1056 if ((ccb->flags & CCB_ALLOC) == 0) {
1057 aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n");
1058 Debugger();
1059 return;
1060 }
1061
1062 /*
1063 * 'done_status' contains the command's ending status.
1064 * 'host_status' contains the host adapter status.
1065 * 'scsi_status' contains the scsi peripheral status.
1066 */
1067 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1068 ((scsiq->done_status == QD_NO_ERROR) ||
1069 (scsiq->done_status == QD_WITH_ERROR))) {
1070 switch (scsiq->scsi_status) {
1071 case SCSI_STATUS_GOOD:
1072 if ((scsiq->cdb[0] == INQUIRY) &&
1073 (scsiq->target_lun == 0)) {
1074 adw_print_info(sc, scsiq->target_id);
1075 }
1076 xs->error = XS_NOERROR;
1077 xs->resid = le32toh(scsiq->data_cnt);
1078 sc->sc_freeze_dev[scsiq->target_id] = 0;
1079 break;
1080
1081 case SCSI_STATUS_CHECK_CONDITION:
1082 case SCSI_STATUS_CMD_TERMINATED:
1083 s1 = &ccb->scsi_sense;
1084 s2 = &xs->sense.scsi_sense;
1085 *s2 = *s1;
1086 xs->error = XS_SENSE;
1087 sc->sc_freeze_dev[scsiq->target_id] = 1;
1088 break;
1089
1090 default:
1091 xs->error = XS_BUSY;
1092 sc->sc_freeze_dev[scsiq->target_id] = 1;
1093 break;
1094 }
1095 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1096 xs->error = XS_DRIVER_STUFFUP;
1097 } else {
1098 switch (scsiq->host_status) {
1099 case QHSTA_M_SEL_TIMEOUT:
1100 xs->error = XS_SELTIMEOUT;
1101 break;
1102
1103 case QHSTA_M_SXFR_OFF_UFLW:
1104 case QHSTA_M_SXFR_OFF_OFLW:
1105 case QHSTA_M_DATA_OVER_RUN:
1106 aprint_error_dev(sc->sc_dev, "Overrun/Overflow/Underflow condition\n");
1107 xs->error = XS_DRIVER_STUFFUP;
1108 break;
1109
1110 case QHSTA_M_SXFR_DESELECTED:
1111 case QHSTA_M_UNEXPECTED_BUS_FREE:
1112 aprint_error_dev(sc->sc_dev, "Unexpected BUS free\n");
1113 xs->error = XS_DRIVER_STUFFUP;
1114 break;
1115
1116 case QHSTA_M_SCSI_BUS_RESET:
1117 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1118 aprint_error_dev(sc->sc_dev, "BUS Reset\n");
1119 xs->error = XS_DRIVER_STUFFUP;
1120 break;
1121
1122 case QHSTA_M_BUS_DEVICE_RESET:
1123 aprint_error_dev(sc->sc_dev, "Device Reset\n");
1124 xs->error = XS_DRIVER_STUFFUP;
1125 break;
1126
1127 case QHSTA_M_QUEUE_ABORTED:
1128 aprint_error_dev(sc->sc_dev, "Queue Aborted\n");
1129 xs->error = XS_DRIVER_STUFFUP;
1130 break;
1131
1132 case QHSTA_M_SXFR_SDMA_ERR:
1133 case QHSTA_M_SXFR_SXFR_PERR:
1134 case QHSTA_M_RDMA_PERR:
1135 /*
1136 * DMA Error. This should *NEVER* happen!
1137 *
1138 * Lets try resetting the bus and reinitialize
1139 * the host adapter.
1140 */
1141 aprint_error_dev(sc->sc_dev, "DMA Error. Reseting bus\n");
1142 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1143 adw_reset_bus(sc);
1144 xs->error = XS_BUSY;
1145 goto done;
1146
1147 case QHSTA_M_WTM_TIMEOUT:
1148 case QHSTA_M_SXFR_WD_TMO:
1149 /* The SCSI bus hung in a phase */
1150 printf("%s: Watch Dog timer expired. Reseting bus\n",
1151 device_xname(sc->sc_dev));
1152 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1153 adw_reset_bus(sc);
1154 xs->error = XS_BUSY;
1155 goto done;
1156
1157 case QHSTA_M_SXFR_XFR_PH_ERR:
1158 aprint_error_dev(sc->sc_dev, "Transfer Error\n");
1159 xs->error = XS_DRIVER_STUFFUP;
1160 break;
1161
1162 case QHSTA_M_BAD_CMPL_STATUS_IN:
1163 /* No command complete after a status message */
1164 printf("%s: Bad Completion Status\n",
1165 device_xname(sc->sc_dev));
1166 xs->error = XS_DRIVER_STUFFUP;
1167 break;
1168
1169 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1170 aprint_error_dev(sc->sc_dev, "Auto Sense Failed\n");
1171 xs->error = XS_DRIVER_STUFFUP;
1172 break;
1173
1174 case QHSTA_M_INVALID_DEVICE:
1175 aprint_error_dev(sc->sc_dev, "Invalid Device\n");
1176 xs->error = XS_DRIVER_STUFFUP;
1177 break;
1178
1179 case QHSTA_M_NO_AUTO_REQ_SENSE:
1180 /*
1181 * User didn't request sense, but we got a
1182 * check condition.
1183 */
1184 aprint_error_dev(sc->sc_dev, "Unexpected Check Condition\n");
1185 xs->error = XS_DRIVER_STUFFUP;
1186 break;
1187
1188 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1189 aprint_error_dev(sc->sc_dev, "Unknown Error\n");
1190 xs->error = XS_DRIVER_STUFFUP;
1191 break;
1192
1193 default:
1194 panic("%s: Unhandled Host Status Error %x",
1195 device_xname(sc->sc_dev), scsiq->host_status);
1196 }
1197 }
1198
1199 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1200 done: adw_free_ccb(sc, ccb);
1201 scsipi_done(xs);
1202 }
1203
1204
1205 /*
1206 * adw_async_callback() - Adv Library asynchronous event callback function.
1207 */
1208 static void
1209 adw_async_callback(ADW_SOFTC *sc, u_int8_t code)
1210 {
1211 switch (code) {
1212 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1213 /* The firmware detected a SCSI Bus reset. */
1214 printf("%s: SCSI Bus reset detected\n", device_xname(sc->sc_dev));
1215 break;
1216
1217 case ADV_ASYNC_RDMA_FAILURE:
1218 /*
1219 * Handle RDMA failure by resetting the SCSI Bus and
1220 * possibly the chip if it is unresponsive.
1221 */
1222 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1223 " the adapter\n", device_xname(sc->sc_dev));
1224 AdwResetSCSIBus(sc);
1225 break;
1226
1227 case ADV_HOST_SCSI_BUS_RESET:
1228 /* Host generated SCSI bus reset occurred. */
1229 printf("%s: Host generated SCSI bus reset occurred\n",
1230 device_xname(sc->sc_dev));
1231 break;
1232
1233 case ADV_ASYNC_CARRIER_READY_FAILURE:
1234 /* Carrier Ready failure. */
1235 printf("%s: Carrier Ready failure!\n", device_xname(sc->sc_dev));
1236 break;
1237
1238 default:
1239 break;
1240 }
1241 }
1242