adw.c revision 1.53 1 /* $NetBSD: adw.c,v 1.53 2016/07/07 06:55:41 msaitoh Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: adw.c,v 1.53 2016/07/07 06:55:41 msaitoh Exp $");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/proc.h>
53
54 #include <sys/bus.h>
55 #include <sys/intr.h>
56
57 #include <dev/scsipi/scsi_all.h>
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsiconf.h>
60
61 #include <dev/ic/adwlib.h>
62 #include <dev/ic/adwmcode.h>
63 #include <dev/ic/adw.h>
64
65 #ifndef DDB
66 #define Debugger() panic("should call debugger here (adw.c)")
67 #endif /* ! DDB */
68
69 /******************************************************************************/
70
71
72 static int adw_alloc_controls(ADW_SOFTC *);
73 static int adw_alloc_carriers(ADW_SOFTC *);
74 static int adw_create_ccbs(ADW_SOFTC *, ADW_CCB *, int);
75 static void adw_free_ccb(ADW_SOFTC *, ADW_CCB *);
76 static void adw_reset_ccb(ADW_CCB *);
77 static int adw_init_ccb(ADW_SOFTC *, ADW_CCB *);
78 static ADW_CCB *adw_get_ccb(ADW_SOFTC *);
79 static int adw_queue_ccb(ADW_SOFTC *, ADW_CCB *);
80
81 static void adw_scsipi_request(struct scsipi_channel *,
82 scsipi_adapter_req_t, void *);
83 static int adw_build_req(ADW_SOFTC *, ADW_CCB *);
84 static void adw_build_sglist(ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *);
85 static void adwminphys(struct buf *);
86 static void adw_isr_callback(ADW_SOFTC *, ADW_SCSI_REQ_Q *);
87 static void adw_async_callback(ADW_SOFTC *, u_int8_t);
88
89 static void adw_print_info(ADW_SOFTC *, int);
90
91 static int adw_poll(ADW_SOFTC *, struct scsipi_xfer *, int);
92 static void adw_timeout(void *);
93 static void adw_reset_bus(ADW_SOFTC *);
94
95
96 /******************************************************************************/
97 /* DMA Mapping for Control Blocks */
98 /******************************************************************************/
99
100
101 static int
102 adw_alloc_controls(ADW_SOFTC *sc)
103 {
104 bus_dma_segment_t seg;
105 int error, rseg;
106
107 /*
108 * Allocate the control structure.
109 */
110 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
111 PAGE_SIZE, 0, &seg, 1, &rseg,
112 BUS_DMA_NOWAIT)) != 0) {
113 aprint_error_dev(sc->sc_dev, "unable to allocate control structures,"
114 " error = %d\n", error);
115 return (error);
116 }
117 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
118 sizeof(struct adw_control), (void **) & sc->sc_control,
119 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
120 aprint_error_dev(sc->sc_dev, "unable to map control structures, error = %d\n",
121 error);
122 return (error);
123 }
124
125 /*
126 * Create and load the DMA map used for the control blocks.
127 */
128 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
129 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
130 &sc->sc_dmamap_control)) != 0) {
131 aprint_error_dev(sc->sc_dev, "unable to create control DMA map, error = %d\n",
132 error);
133 return (error);
134 }
135 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
136 sc->sc_control, sizeof(struct adw_control), NULL,
137 BUS_DMA_NOWAIT)) != 0) {
138 aprint_error_dev(sc->sc_dev, "unable to load control DMA map, error = %d\n",
139 error);
140 return (error);
141 }
142
143 return (0);
144 }
145
146
147 static int
148 adw_alloc_carriers(ADW_SOFTC *sc)
149 {
150 bus_dma_segment_t seg;
151 int error, rseg;
152
153 /*
154 * Allocate the control structure.
155 */
156 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
157 M_DEVBUF, M_WAITOK);
158 if(!sc->sc_control->carriers) {
159 aprint_error_dev(sc->sc_dev,
160 "malloc() failed in allocating carrier structures\n");
161 return (ENOMEM);
162 }
163
164 if ((error = bus_dmamem_alloc(sc->sc_dmat,
165 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
166 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
167 aprint_error_dev(sc->sc_dev, "unable to allocate carrier structures,"
168 " error = %d\n", error);
169 return (error);
170 }
171 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
172 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
173 (void **) &sc->sc_control->carriers,
174 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
175 aprint_error_dev(sc->sc_dev, "unable to map carrier structures,"
176 " error = %d\n", error);
177 return (error);
178 }
179
180 /*
181 * Create and load the DMA map used for the control blocks.
182 */
183 if ((error = bus_dmamap_create(sc->sc_dmat,
184 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
185 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
186 &sc->sc_dmamap_carrier)) != 0) {
187 aprint_error_dev(sc->sc_dev, "unable to create carriers DMA map,"
188 " error = %d\n", error);
189 return (error);
190 }
191 if ((error = bus_dmamap_load(sc->sc_dmat,
192 sc->sc_dmamap_carrier, sc->sc_control->carriers,
193 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
194 BUS_DMA_NOWAIT)) != 0) {
195 aprint_error_dev(sc->sc_dev, "unable to load carriers DMA map,"
196 " error = %d\n", error);
197 return (error);
198 }
199
200 return (0);
201 }
202
203
204 /******************************************************************************/
205 /* Control Blocks routines */
206 /******************************************************************************/
207
208
209 /*
210 * Create a set of ccbs and add them to the free list. Called once
211 * by adw_init(). We return the number of CCBs successfully created.
212 */
213 static int
214 adw_create_ccbs(ADW_SOFTC *sc, ADW_CCB *ccbstore, int count)
215 {
216 ADW_CCB *ccb;
217 int i, error;
218
219 for (i = 0; i < count; i++) {
220 ccb = &ccbstore[i];
221 if ((error = adw_init_ccb(sc, ccb)) != 0) {
222 aprint_error_dev(sc->sc_dev, "unable to initialize ccb, error = %d\n",
223 error);
224 return (i);
225 }
226 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
227 }
228
229 return (i);
230 }
231
232
233 /*
234 * A ccb is put onto the free list.
235 */
236 static void
237 adw_free_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
238 {
239 int s;
240
241 s = splbio();
242
243 adw_reset_ccb(ccb);
244 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
245
246 splx(s);
247 }
248
249
250 static void
251 adw_reset_ccb(ADW_CCB *ccb)
252 {
253
254 ccb->flags = 0;
255 }
256
257
258 static int
259 adw_init_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
260 {
261 int hashnum, error;
262
263 /*
264 * Create the DMA map for this CCB.
265 */
266 error = bus_dmamap_create(sc->sc_dmat,
267 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
268 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
269 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
270 if (error) {
271 aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
272 error);
273 return (error);
274 }
275
276 /*
277 * put in the phystokv hash table
278 * Never gets taken out.
279 */
280 ccb->hashkey = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr +
281 ADW_CCB_OFF(ccb));
282 hashnum = CCB_HASH(ccb->hashkey);
283 ccb->nexthash = sc->sc_ccbhash[hashnum];
284 sc->sc_ccbhash[hashnum] = ccb;
285 adw_reset_ccb(ccb);
286 return (0);
287 }
288
289
290 /*
291 * Get a free ccb
292 *
293 * If there are none, see if we can allocate a new one
294 */
295 static ADW_CCB *
296 adw_get_ccb(ADW_SOFTC *sc)
297 {
298 ADW_CCB *ccb = 0;
299 int s;
300
301 s = splbio();
302
303 ccb = sc->sc_free_ccb.tqh_first;
304 if (ccb != NULL) {
305 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
306 ccb->flags |= CCB_ALLOC;
307 }
308 splx(s);
309 return (ccb);
310 }
311
312
313 /*
314 * Given a physical address, find the ccb that it corresponds to.
315 */
316 ADW_CCB *
317 adw_ccb_phys_kv(ADW_SOFTC *sc, u_int32_t ccb_phys)
318 {
319 int hashnum = CCB_HASH(ccb_phys);
320 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
321
322 while (ccb) {
323 if (ccb->hashkey == ccb_phys)
324 break;
325 ccb = ccb->nexthash;
326 }
327 return (ccb);
328 }
329
330
331 /*
332 * Queue a CCB to be sent to the controller, and send it if possible.
333 */
334 static int
335 adw_queue_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
336 {
337 int errcode = ADW_SUCCESS;
338
339 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
340
341 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
342
343 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
344 errcode = AdwExeScsiQueue(sc, &ccb->scsiq);
345 switch(errcode) {
346 case ADW_SUCCESS:
347 break;
348
349 case ADW_BUSY:
350 printf("ADW_BUSY\n");
351 return(ADW_BUSY);
352
353 case ADW_ERROR:
354 printf("ADW_ERROR\n");
355 return(ADW_ERROR);
356 }
357
358 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
359
360 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
361 callout_reset(&ccb->xs->xs_callout,
362 mstohz(ccb->timeout), adw_timeout, ccb);
363 }
364
365 return(errcode);
366 }
367
368
369 /******************************************************************************/
370 /* SCSI layer interfacing routines */
371 /******************************************************************************/
372
373
374 int
375 adw_init(ADW_SOFTC *sc)
376 {
377 u_int16_t warn_code;
378
379
380 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
381 ADW_LIB_VERSION_MINOR;
382 sc->cfg.chip_version =
383 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
384
385 /*
386 * Reset the chip to start and allow register writes.
387 */
388 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
389 panic("adw_init: adw_find_signature failed");
390 } else {
391 AdwResetChip(sc->sc_iot, sc->sc_ioh);
392
393 warn_code = AdwInitFromEEPROM(sc);
394
395 if (warn_code & ADW_WARN_EEPROM_CHKSUM)
396 aprint_error_dev(sc->sc_dev, "Bad checksum found. "
397 "Setting default values\n");
398 if (warn_code & ADW_WARN_EEPROM_TERMINATION)
399 aprint_error_dev(sc->sc_dev, "Bad bus termination setting."
400 "Using automatic termination.\n");
401 }
402
403 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
404 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
405
406 return 0;
407 }
408
409
410 void
411 adw_attach(ADW_SOFTC *sc)
412 {
413 struct scsipi_adapter *adapt = &sc->sc_adapter;
414 struct scsipi_channel *chan = &sc->sc_channel;
415 int ncontrols, error;
416
417 TAILQ_INIT(&sc->sc_free_ccb);
418 TAILQ_INIT(&sc->sc_waiting_ccb);
419 TAILQ_INIT(&sc->sc_pending_ccb);
420
421 /*
422 * Allocate the Control Blocks.
423 */
424 error = adw_alloc_controls(sc);
425 if (error)
426 return; /* (error) */ ;
427
428 memset(sc->sc_control, 0, sizeof(struct adw_control));
429
430 /*
431 * Create and initialize the Control Blocks.
432 */
433 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
434 if (ncontrols == 0) {
435 aprint_error_dev(sc->sc_dev,
436 "unable to create Control Blocks\n");
437 return; /* (ENOMEM) */ ;
438 } else if (ncontrols != ADW_MAX_CCB) {
439 aprint_error_dev(sc->sc_dev,
440 "WARNING: only %d of %d Control Blocks created\n",
441 ncontrols, ADW_MAX_CCB);
442 }
443
444 /*
445 * Create and initialize the Carriers.
446 */
447 error = adw_alloc_carriers(sc);
448 if (error)
449 return; /* (error) */ ;
450
451 /*
452 * Zero's the freeze_device status
453 */
454 memset(sc->sc_freeze_dev, 0, sizeof(sc->sc_freeze_dev));
455
456 /*
457 * Initialize the adapter
458 */
459 switch (AdwInitDriver(sc)) {
460 case ADW_IERR_BIST_PRE_TEST:
461 panic("%s: BIST pre-test error",
462 device_xname(sc->sc_dev));
463 break;
464
465 case ADW_IERR_BIST_RAM_TEST:
466 panic("%s: BIST RAM test error",
467 device_xname(sc->sc_dev));
468 break;
469
470 case ADW_IERR_MCODE_CHKSUM:
471 panic("%s: Microcode checksum error",
472 device_xname(sc->sc_dev));
473 break;
474
475 case ADW_IERR_ILLEGAL_CONNECTION:
476 panic("%s: All three connectors are in use",
477 device_xname(sc->sc_dev));
478 break;
479
480 case ADW_IERR_REVERSED_CABLE:
481 panic("%s: Cable is reversed",
482 device_xname(sc->sc_dev));
483 break;
484
485 case ADW_IERR_HVD_DEVICE:
486 panic("%s: HVD attached to LVD connector",
487 device_xname(sc->sc_dev));
488 break;
489
490 case ADW_IERR_SINGLE_END_DEVICE:
491 panic("%s: single-ended device is attached to"
492 " one of the connectors",
493 device_xname(sc->sc_dev));
494 break;
495
496 case ADW_IERR_NO_CARRIER:
497 panic("%s: unable to create Carriers",
498 device_xname(sc->sc_dev));
499 break;
500
501 case ADW_WARN_BUSRESET_ERROR:
502 aprint_error_dev(sc->sc_dev, "WARNING: Bus Reset Error\n");
503 break;
504 }
505
506 /*
507 * Fill in the scsipi_adapter.
508 */
509 memset(adapt, 0, sizeof(*adapt));
510 adapt->adapt_dev = sc->sc_dev;
511 adapt->adapt_nchannels = 1;
512 adapt->adapt_openings = ncontrols;
513 adapt->adapt_max_periph = adapt->adapt_openings;
514 adapt->adapt_request = adw_scsipi_request;
515 adapt->adapt_minphys = adwminphys;
516
517 /*
518 * Fill in the scsipi_channel.
519 */
520 memset(chan, 0, sizeof(*chan));
521 chan->chan_adapter = adapt;
522 chan->chan_bustype = &scsi_bustype;
523 chan->chan_channel = 0;
524 chan->chan_ntargets = ADW_MAX_TID + 1;
525 chan->chan_nluns = 8;
526 chan->chan_id = sc->chip_scsi_id;
527
528 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
529 }
530
531
532 static void
533 adwminphys(struct buf *bp)
534 {
535
536 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
537 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
538 minphys(bp);
539 }
540
541
542 /*
543 * start a scsi operation given the command and the data address.
544 * Also needs the unit, target and lu.
545 */
546 static void
547 adw_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
548 void *arg)
549 {
550 struct scsipi_xfer *xs;
551 ADW_SOFTC *sc = device_private(chan->chan_adapter->adapt_dev);
552 ADW_CCB *ccb;
553 int s, retry;
554
555 switch (req) {
556 case ADAPTER_REQ_RUN_XFER:
557 xs = arg;
558
559 /*
560 * get a ccb to use. If the transfer
561 * is from a buf (possibly from interrupt time)
562 * then we can't allow it to sleep
563 */
564
565 ccb = adw_get_ccb(sc);
566 #ifdef DIAGNOSTIC
567 /*
568 * This should never happen as we track the resources
569 * in the mid-layer.
570 */
571 if (ccb == NULL) {
572 scsipi_printaddr(xs->xs_periph);
573 printf("unable to allocate ccb\n");
574 panic("adw_scsipi_request");
575 }
576 #endif
577
578 ccb->xs = xs;
579 ccb->timeout = xs->timeout;
580
581 if (adw_build_req(sc, ccb)) {
582 s = splbio();
583 retry = adw_queue_ccb(sc, ccb);
584 splx(s);
585
586 switch(retry) {
587 case ADW_BUSY:
588 xs->error = XS_RESOURCE_SHORTAGE;
589 adw_free_ccb(sc, ccb);
590 scsipi_done(xs);
591 return;
592
593 case ADW_ERROR:
594 xs->error = XS_DRIVER_STUFFUP;
595 adw_free_ccb(sc, ccb);
596 scsipi_done(xs);
597 return;
598 }
599 if ((xs->xs_control & XS_CTL_POLL) == 0)
600 return;
601 /*
602 * Not allowed to use interrupts, poll for completion.
603 */
604 if (adw_poll(sc, xs, ccb->timeout)) {
605 adw_timeout(ccb);
606 if (adw_poll(sc, xs, ccb->timeout))
607 adw_timeout(ccb);
608 }
609 }
610 return;
611
612 case ADAPTER_REQ_GROW_RESOURCES:
613 /* XXX Not supported. */
614 return;
615
616 case ADAPTER_REQ_SET_XFER_MODE:
617 /* XXX XXX XXX */
618 return;
619 }
620 }
621
622
623 /*
624 * Build a request structure for the Wide Boards.
625 */
626 static int
627 adw_build_req(ADW_SOFTC *sc, ADW_CCB *ccb)
628 {
629 struct scsipi_xfer *xs = ccb->xs;
630 struct scsipi_periph *periph = xs->xs_periph;
631 bus_dma_tag_t dmat = sc->sc_dmat;
632 ADW_SCSI_REQ_Q *scsiqp;
633 int error;
634
635 scsiqp = &ccb->scsiq;
636 memset(scsiqp, 0, sizeof(ADW_SCSI_REQ_Q));
637
638 /*
639 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
640 * physical CCB structure.
641 */
642 scsiqp->ccb_ptr = ccb->hashkey;
643
644 /*
645 * Build the ADW_SCSI_REQ_Q request.
646 */
647
648 /*
649 * Set CDB length and copy it to the request structure.
650 * For wide boards a CDB length maximum of 16 bytes
651 * is supported.
652 */
653 memcpy(&scsiqp->cdb, xs->cmd, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
654 xs->cmdlen : 12 );
655 if(xs->cmdlen > 12)
656 memcpy(&scsiqp->cdb16, &(xs->cmd[12]), xs->cmdlen - 12);
657
658 scsiqp->target_id = periph->periph_target;
659 scsiqp->target_lun = periph->periph_lun;
660
661 scsiqp->vsense_addr = &ccb->scsi_sense;
662 scsiqp->sense_addr = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr +
663 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense));
664 scsiqp->sense_len = sizeof(struct scsi_sense_data);
665
666 /*
667 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
668 */
669 if (xs->datalen) {
670 /*
671 * Map the DMA transfer.
672 */
673 #ifdef TFS
674 if (xs->xs_control & SCSI_DATA_UIO) {
675 error = bus_dmamap_load_uio(dmat,
676 ccb->dmamap_xfer, (struct uio *) xs->data,
677 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
678 BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
679 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
680 BUS_DMA_WRITE));
681 } else
682 #endif /* TFS */
683 {
684 error = bus_dmamap_load(dmat,
685 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
686 ((xs->xs_control & XS_CTL_NOSLEEP) ?
687 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
688 BUS_DMA_STREAMING |
689 ((xs->xs_control & XS_CTL_DATA_IN) ?
690 BUS_DMA_READ : BUS_DMA_WRITE));
691 }
692
693 switch (error) {
694 case 0:
695 break;
696 case ENOMEM:
697 case EAGAIN:
698 xs->error = XS_RESOURCE_SHORTAGE;
699 goto out_bad;
700
701 default:
702 xs->error = XS_DRIVER_STUFFUP;
703 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
704 error);
705 out_bad:
706 adw_free_ccb(sc, ccb);
707 scsipi_done(xs);
708 return(0);
709 }
710
711 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
712 ccb->dmamap_xfer->dm_mapsize,
713 (xs->xs_control & XS_CTL_DATA_IN) ?
714 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
715
716 /*
717 * Build scatter-gather list.
718 */
719 scsiqp->data_cnt = htole32(xs->datalen);
720 scsiqp->vdata_addr = xs->data;
721 scsiqp->data_addr = htole32(ccb->dmamap_xfer->dm_segs[0].ds_addr);
722 memset(ccb->sg_block, 0,
723 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
724 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
725 } else {
726 /*
727 * No data xfer, use non S/G values.
728 */
729 scsiqp->data_cnt = 0;
730 scsiqp->vdata_addr = 0;
731 scsiqp->data_addr = 0;
732 }
733
734 return (1);
735 }
736
737
738 /*
739 * Build scatter-gather list for Wide Boards.
740 */
741 static void
742 adw_build_sglist(ADW_CCB *ccb, ADW_SCSI_REQ_Q *scsiqp, ADW_SG_BLOCK *sg_block)
743 {
744 u_long sg_block_next_addr; /* block and its next */
745 u_int32_t sg_block_physical_addr;
746 int i; /* how many SG entries */
747 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
748 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
749
750
751 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
752 sg_block_physical_addr = le32toh(ccb->hashkey) +
753 offsetof(struct adw_ccb, sg_block[0]);
754 scsiqp->sg_real_addr = htole32(sg_block_physical_addr);
755
756 /*
757 * If there are more than NO_OF_SG_PER_BLOCK DMA segments (hw sg-list)
758 * then split the request into multiple sg-list blocks.
759 */
760
761 do {
762 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
763 sg_block->sg_list[i].sg_addr = htole32(sg_list->ds_addr);
764 sg_block->sg_list[i].sg_count = htole32(sg_list->ds_len);
765
766 if (--sg_elem_cnt == 0) {
767 /* last entry, get out */
768 sg_block->sg_cnt = i + 1;
769 sg_block->sg_ptr = 0; /* next link = NULL */
770 return;
771 }
772 sg_list++;
773 }
774 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
775 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
776
777 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
778 sg_block->sg_ptr = htole32(sg_block_physical_addr);
779 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
780 } while (1);
781 }
782
783
784 /******************************************************************************/
785 /* Interrupts and TimeOut routines */
786 /******************************************************************************/
787
788
789 int
790 adw_intr(void *arg)
791 {
792 ADW_SOFTC *sc = arg;
793
794
795 if(AdwISR(sc) != ADW_FALSE) {
796 return (1);
797 }
798
799 return (0);
800 }
801
802
803 /*
804 * Poll a particular unit, looking for a particular xs
805 */
806 static int
807 adw_poll(ADW_SOFTC *sc, struct scsipi_xfer *xs, int count)
808 {
809
810 /* timeouts are in msec, so we loop in 1000 usec cycles */
811 while (count) {
812 adw_intr(sc);
813 if (xs->xs_status & XS_STS_DONE)
814 return (0);
815 delay(1000); /* only happens in boot so ok */
816 count--;
817 }
818 return (1);
819 }
820
821
822 static void
823 adw_timeout(void *arg)
824 {
825 ADW_CCB *ccb = arg;
826 struct scsipi_xfer *xs = ccb->xs;
827 struct scsipi_periph *periph = xs->xs_periph;
828 ADW_SOFTC *sc =
829 device_private(periph->periph_channel->chan_adapter->adapt_dev);
830 int s;
831
832 scsipi_printaddr(periph);
833 printf("timed out");
834
835 s = splbio();
836
837 if (ccb->flags & CCB_ABORTED) {
838 /*
839 * Abort Timed Out
840 *
841 * No more opportunities. Lets try resetting the bus and
842 * reinitialize the host adapter.
843 */
844 callout_stop(&xs->xs_callout);
845 printf(" AGAIN. Resetting SCSI Bus\n");
846 adw_reset_bus(sc);
847 splx(s);
848 return;
849 } else if (ccb->flags & CCB_ABORTING) {
850 /*
851 * Abort the operation that has timed out.
852 *
853 * Second opportunity.
854 */
855 printf("\n");
856 xs->error = XS_TIMEOUT;
857 ccb->flags |= CCB_ABORTED;
858 #if 0
859 /*
860 * - XXX - 3.3a microcode is BROKEN!!!
861 *
862 * We cannot abort a CCB, so we can only hope the command
863 * get completed before the next timeout, otherwise a
864 * Bus Reset will arrive inexorably.
865 */
866 /*
867 * ADW_ABORT_CCB() makes the board to generate an interrupt
868 *
869 * - XXX - The above assertion MUST be verified (and this
870 * code changed as well [callout_*()]), when the
871 * ADW_ABORT_CCB will be working again
872 */
873 ADW_ABORT_CCB(sc, ccb);
874 #endif
875 /*
876 * waiting for multishot callout_reset() let's restart it
877 * by hand so the next time a timeout event will occur
878 * we will reset the bus.
879 */
880 callout_reset(&xs->xs_callout,
881 mstohz(ccb->timeout), adw_timeout, ccb);
882 } else {
883 /*
884 * Abort the operation that has timed out.
885 *
886 * First opportunity.
887 */
888 printf("\n");
889 xs->error = XS_TIMEOUT;
890 ccb->flags |= CCB_ABORTING;
891 #if 0
892 /*
893 * - XXX - 3.3a microcode is BROKEN!!!
894 *
895 * We cannot abort a CCB, so we can only hope the command
896 * get completed before the next 2 timeout, otherwise a
897 * Bus Reset will arrive inexorably.
898 */
899 /*
900 * ADW_ABORT_CCB() makes the board to generate an interrupt
901 *
902 * - XXX - The above assertion MUST be verified (and this
903 * code changed as well [callout_*()]), when the
904 * ADW_ABORT_CCB will be working again
905 */
906 ADW_ABORT_CCB(sc, ccb);
907 #endif
908 /*
909 * waiting for multishot callout_reset() let's restart it
910 * by hand so to give a second opportunity to the command
911 * which timed-out.
912 */
913 callout_reset(&xs->xs_callout,
914 mstohz(ccb->timeout), adw_timeout, ccb);
915 }
916
917 splx(s);
918 }
919
920
921 static void
922 adw_reset_bus(ADW_SOFTC *sc)
923 {
924 ADW_CCB *ccb;
925 int s;
926 struct scsipi_xfer *xs;
927
928 s = splbio();
929 AdwResetSCSIBus(sc);
930 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
931 adw_pending_ccb)) != NULL) {
932 callout_stop(&ccb->xs->xs_callout);
933 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
934 xs = ccb->xs;
935 adw_free_ccb(sc, ccb);
936 xs->error = XS_RESOURCE_SHORTAGE;
937 scsipi_done(xs);
938 }
939 splx(s);
940 }
941
942
943 /******************************************************************************/
944 /* Host Adapter and Peripherals Information Routines */
945 /******************************************************************************/
946
947
948 static void
949 adw_print_info(ADW_SOFTC *sc, int tid)
950 {
951 bus_space_tag_t iot = sc->sc_iot;
952 bus_space_handle_t ioh = sc->sc_ioh;
953 u_int16_t wdtr_able, wdtr_done, wdtr;
954 u_int16_t sdtr_able, sdtr_done, sdtr, period;
955 static int wdtr_reneg = 0, sdtr_reneg = 0;
956
957 if (tid == 0){
958 wdtr_reneg = sdtr_reneg = 0;
959 }
960
961 printf("%s: target %d ", device_xname(sc->sc_dev), tid);
962
963 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able);
964 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
965 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done);
966 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
967 (2 * tid), wdtr);
968 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
969 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
970 wdtr_reneg = 1;
971 } else {
972 printf("wide transfers disabled, ");
973 }
974
975 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able);
976 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
977 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done);
978 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
979 (2 * tid), sdtr);
980 sdtr &= ~0x8000;
981 if((sdtr & 0x1F) != 0) {
982 if((sdtr & 0x1F00) == 0x1100){
983 printf("80.0 MHz");
984 } else if((sdtr & 0x1F00) == 0x1000){
985 printf("40.0 MHz");
986 } else {
987 /* <= 20.0 MHz */
988 period = (((sdtr >> 8) * 25) + 50)/4;
989 if(period == 0) {
990 /* Should never happen. */
991 printf("? MHz");
992 } else {
993 printf("%d.%d MHz", 250/period,
994 ADW_TENTHS(250, period));
995 }
996 }
997 printf(" synchronous transfers\n");
998 } else {
999 printf("asynchronous transfers\n");
1000 }
1001 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1002 sdtr_reneg = 1;
1003 } else {
1004 printf("synchronous transfers disabled\n");
1005 }
1006
1007 if(wdtr_reneg || sdtr_reneg) {
1008 printf("%s: target %d %s", device_xname(sc->sc_dev), tid,
1009 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1010 ((sdtr_reneg)? "sync" : "") );
1011 printf(" renegotiation pending before next command.\n");
1012 }
1013 }
1014
1015
1016 /******************************************************************************/
1017 /* WIDE boards Interrupt callbacks */
1018 /******************************************************************************/
1019
1020
1021 /*
1022 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR()
1023 *
1024 * Interrupt callback function for the Wide SCSI Adv Library.
1025 *
1026 * Notice:
1027 * Interrupts are disabled by the caller (AdwISR() function), and will be
1028 * enabled at the end of the caller.
1029 */
1030 static void
1031 adw_isr_callback(ADW_SOFTC *sc, ADW_SCSI_REQ_Q *scsiq)
1032 {
1033 bus_dma_tag_t dmat = sc->sc_dmat;
1034 ADW_CCB *ccb;
1035 struct scsipi_xfer *xs;
1036 struct scsi_sense_data *s1, *s2;
1037
1038
1039 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1040
1041 callout_stop(&ccb->xs->xs_callout);
1042
1043 xs = ccb->xs;
1044
1045 /*
1046 * If we were a data transfer, unload the map that described
1047 * the data buffer.
1048 */
1049 if (xs->datalen) {
1050 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1051 ccb->dmamap_xfer->dm_mapsize,
1052 (xs->xs_control & XS_CTL_DATA_IN) ?
1053 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1054 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1055 }
1056
1057 if ((ccb->flags & CCB_ALLOC) == 0) {
1058 aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n");
1059 Debugger();
1060 return;
1061 }
1062
1063 /*
1064 * 'done_status' contains the command's ending status.
1065 * 'host_status' contains the host adapter status.
1066 * 'scsi_status' contains the scsi peripheral status.
1067 */
1068 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1069 ((scsiq->done_status == QD_NO_ERROR) ||
1070 (scsiq->done_status == QD_WITH_ERROR))) {
1071 switch (scsiq->scsi_status) {
1072 case SCSI_STATUS_GOOD:
1073 if ((scsiq->cdb[0] == INQUIRY) &&
1074 (scsiq->target_lun == 0)) {
1075 adw_print_info(sc, scsiq->target_id);
1076 }
1077 xs->error = XS_NOERROR;
1078 xs->resid = le32toh(scsiq->data_cnt);
1079 sc->sc_freeze_dev[scsiq->target_id] = 0;
1080 break;
1081
1082 case SCSI_STATUS_CHECK_CONDITION:
1083 case SCSI_STATUS_CMD_TERMINATED:
1084 s1 = &ccb->scsi_sense;
1085 s2 = &xs->sense.scsi_sense;
1086 *s2 = *s1;
1087 xs->error = XS_SENSE;
1088 sc->sc_freeze_dev[scsiq->target_id] = 1;
1089 break;
1090
1091 default:
1092 xs->error = XS_BUSY;
1093 sc->sc_freeze_dev[scsiq->target_id] = 1;
1094 break;
1095 }
1096 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1097 xs->error = XS_DRIVER_STUFFUP;
1098 } else {
1099 switch (scsiq->host_status) {
1100 case QHSTA_M_SEL_TIMEOUT:
1101 xs->error = XS_SELTIMEOUT;
1102 break;
1103
1104 case QHSTA_M_SXFR_OFF_UFLW:
1105 case QHSTA_M_SXFR_OFF_OFLW:
1106 case QHSTA_M_DATA_OVER_RUN:
1107 aprint_error_dev(sc->sc_dev, "Overrun/Overflow/Underflow condition\n");
1108 xs->error = XS_DRIVER_STUFFUP;
1109 break;
1110
1111 case QHSTA_M_SXFR_DESELECTED:
1112 case QHSTA_M_UNEXPECTED_BUS_FREE:
1113 aprint_error_dev(sc->sc_dev, "Unexpected BUS free\n");
1114 xs->error = XS_DRIVER_STUFFUP;
1115 break;
1116
1117 case QHSTA_M_SCSI_BUS_RESET:
1118 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1119 aprint_error_dev(sc->sc_dev, "BUS Reset\n");
1120 xs->error = XS_DRIVER_STUFFUP;
1121 break;
1122
1123 case QHSTA_M_BUS_DEVICE_RESET:
1124 aprint_error_dev(sc->sc_dev, "Device Reset\n");
1125 xs->error = XS_DRIVER_STUFFUP;
1126 break;
1127
1128 case QHSTA_M_QUEUE_ABORTED:
1129 aprint_error_dev(sc->sc_dev, "Queue Aborted\n");
1130 xs->error = XS_DRIVER_STUFFUP;
1131 break;
1132
1133 case QHSTA_M_SXFR_SDMA_ERR:
1134 case QHSTA_M_SXFR_SXFR_PERR:
1135 case QHSTA_M_RDMA_PERR:
1136 /*
1137 * DMA Error. This should *NEVER* happen!
1138 *
1139 * Lets try resetting the bus and reinitialize
1140 * the host adapter.
1141 */
1142 aprint_error_dev(sc->sc_dev, "DMA Error. Reseting bus\n");
1143 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1144 adw_reset_bus(sc);
1145 xs->error = XS_BUSY;
1146 goto done;
1147
1148 case QHSTA_M_WTM_TIMEOUT:
1149 case QHSTA_M_SXFR_WD_TMO:
1150 /* The SCSI bus hung in a phase */
1151 printf("%s: Watch Dog timer expired. Reseting bus\n",
1152 device_xname(sc->sc_dev));
1153 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1154 adw_reset_bus(sc);
1155 xs->error = XS_BUSY;
1156 goto done;
1157
1158 case QHSTA_M_SXFR_XFR_PH_ERR:
1159 aprint_error_dev(sc->sc_dev, "Transfer Error\n");
1160 xs->error = XS_DRIVER_STUFFUP;
1161 break;
1162
1163 case QHSTA_M_BAD_CMPL_STATUS_IN:
1164 /* No command complete after a status message */
1165 printf("%s: Bad Completion Status\n",
1166 device_xname(sc->sc_dev));
1167 xs->error = XS_DRIVER_STUFFUP;
1168 break;
1169
1170 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1171 aprint_error_dev(sc->sc_dev, "Auto Sense Failed\n");
1172 xs->error = XS_DRIVER_STUFFUP;
1173 break;
1174
1175 case QHSTA_M_INVALID_DEVICE:
1176 aprint_error_dev(sc->sc_dev, "Invalid Device\n");
1177 xs->error = XS_DRIVER_STUFFUP;
1178 break;
1179
1180 case QHSTA_M_NO_AUTO_REQ_SENSE:
1181 /*
1182 * User didn't request sense, but we got a
1183 * check condition.
1184 */
1185 aprint_error_dev(sc->sc_dev, "Unexpected Check Condition\n");
1186 xs->error = XS_DRIVER_STUFFUP;
1187 break;
1188
1189 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1190 aprint_error_dev(sc->sc_dev, "Unknown Error\n");
1191 xs->error = XS_DRIVER_STUFFUP;
1192 break;
1193
1194 default:
1195 panic("%s: Unhandled Host Status Error %x",
1196 device_xname(sc->sc_dev), scsiq->host_status);
1197 }
1198 }
1199
1200 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1201 done: adw_free_ccb(sc, ccb);
1202 scsipi_done(xs);
1203 }
1204
1205
1206 /*
1207 * adw_async_callback() - Adv Library asynchronous event callback function.
1208 */
1209 static void
1210 adw_async_callback(ADW_SOFTC *sc, u_int8_t code)
1211 {
1212 switch (code) {
1213 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1214 /* The firmware detected a SCSI Bus reset. */
1215 printf("%s: SCSI Bus reset detected\n", device_xname(sc->sc_dev));
1216 break;
1217
1218 case ADV_ASYNC_RDMA_FAILURE:
1219 /*
1220 * Handle RDMA failure by resetting the SCSI Bus and
1221 * possibly the chip if it is unresponsive.
1222 */
1223 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1224 " the adapter\n", device_xname(sc->sc_dev));
1225 AdwResetSCSIBus(sc);
1226 break;
1227
1228 case ADV_HOST_SCSI_BUS_RESET:
1229 /* Host generated SCSI bus reset occurred. */
1230 printf("%s: Host generated SCSI bus reset occurred\n",
1231 device_xname(sc->sc_dev));
1232 break;
1233
1234 case ADV_ASYNC_CARRIER_READY_FAILURE:
1235 /* Carrier Ready failure. */
1236 printf("%s: Carrier Ready failure!\n", device_xname(sc->sc_dev));
1237 break;
1238
1239 default:
1240 break;
1241 }
1242 }
1243