adw.c revision 1.2 1 /* $NetBSD: adw.c,v 1.2 1998/09/26 19:54:22 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static void adw_enqueue __P((ADW_SOFTC *, struct scsipi_xfer *, int));
74 static struct scsipi_xfer *adw_dequeue __P((ADW_SOFTC *));
75
76 static int adw_alloc_ccbs __P((ADW_SOFTC *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static void adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
83 static void adw_start_ccbs __P((ADW_SOFTC *));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *));
87 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *));
88 static void adwminphys __P((struct buf *));
89 static void adw_wide_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90
91 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
92 static void adw_timeout __P((void *));
93 static void adw_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98
99 struct scsipi_adapter adw_switch =
100 {
101 adw_scsi_cmd, /* called to start/enqueue a SCSI command */
102 adwminphys, /* to limit the transfer to max device can do */
103 0, /* IT SEEMS IT IS NOT USED YET */
104 0, /* as above... */
105 };
106
107
108 /* the below structure is so we have a default dev struct for out link struct */
109 struct scsipi_device adw_dev =
110 {
111 NULL, /* Use default error handler */
112 NULL, /* have a queue, served by this */
113 NULL, /* have no async handler */
114 NULL, /* Use default 'done' routine */
115 };
116
117
118 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
119 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
120
121
122 /******************************************************************************/
123 /* scsipi_xfer queue routines */
124 /******************************************************************************/
125
126 /*
127 * Insert a scsipi_xfer into the software queue. We overload xs->free_list
128 * to avoid having to allocate additional resources (since we're used
129 * only during resource shortages anyhow.
130 */
131 static void
132 adw_enqueue(sc, xs, infront)
133 ADW_SOFTC *sc;
134 struct scsipi_xfer *xs;
135 int infront;
136 {
137
138 if (infront || sc->sc_queue.lh_first == NULL) {
139 if (sc->sc_queue.lh_first == NULL)
140 sc->sc_queuelast = xs;
141 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
142 return;
143 }
144 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
145 sc->sc_queuelast = xs;
146 }
147
148
149 /*
150 * Pull a scsipi_xfer off the front of the software queue.
151 */
152 static struct scsipi_xfer *
153 adw_dequeue(sc)
154 ADW_SOFTC *sc;
155 {
156 struct scsipi_xfer *xs;
157
158 xs = sc->sc_queue.lh_first;
159 LIST_REMOVE(xs, free_list);
160
161 if (sc->sc_queue.lh_first == NULL)
162 sc->sc_queuelast = NULL;
163
164 return (xs);
165 }
166
167
168 /******************************************************************************/
169 /* Control Blocks routines */
170 /******************************************************************************/
171
172
173 static int
174 adw_alloc_ccbs(sc)
175 ADW_SOFTC *sc;
176 {
177 bus_dma_segment_t seg;
178 int error, rseg;
179
180 /*
181 * Allocate the control blocks.
182 */
183 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
184 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
185 printf("%s: unable to allocate control structures,"
186 " error = %d\n", sc->sc_dev.dv_xname, error);
187 return (error);
188 }
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
191 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
192 printf("%s: unable to map control structures, error = %d\n",
193 sc->sc_dev.dv_xname, error);
194 return (error);
195 }
196 /*
197 * Create and load the DMA map used for the control blocks.
198 */
199 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
200 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
201 &sc->sc_dmamap_control)) != 0) {
202 printf("%s: unable to create control DMA map, error = %d\n",
203 sc->sc_dev.dv_xname, error);
204 return (error);
205 }
206 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
207 sc->sc_control, sizeof(struct adw_control), NULL,
208 BUS_DMA_NOWAIT)) != 0) {
209 printf("%s: unable to load control DMA map, error = %d\n",
210 sc->sc_dev.dv_xname, error);
211 return (error);
212 }
213 return (0);
214 }
215
216
217 /*
218 * Create a set of ccbs and add them to the free list. Called once
219 * by adw_init(). We return the number of CCBs successfully created.
220 */
221 static int
222 adw_create_ccbs(sc, ccbstore, count)
223 ADW_SOFTC *sc;
224 ADW_CCB *ccbstore;
225 int count;
226 {
227 ADW_CCB *ccb;
228 int i, error;
229
230 bzero(ccbstore, sizeof(ADW_CCB) * count);
231 for (i = 0; i < count; i++) {
232 ccb = &ccbstore[i];
233 if ((error = adw_init_ccb(sc, ccb)) != 0) {
234 printf("%s: unable to initialize ccb, error = %d\n",
235 sc->sc_dev.dv_xname, error);
236 return (i);
237 }
238 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
239 }
240
241 return (i);
242 }
243
244
245 /*
246 * A ccb is put onto the free list.
247 */
248 static void
249 adw_free_ccb(sc, ccb)
250 ADW_SOFTC *sc;
251 ADW_CCB *ccb;
252 {
253 int s;
254
255 s = splbio();
256
257 adw_reset_ccb(ccb);
258 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
259
260 /*
261 * If there were none, wake anybody waiting for one to come free,
262 * starting with queued entries.
263 */
264 if (ccb->chain.tqe_next == 0)
265 wakeup(&sc->sc_free_ccb);
266
267 splx(s);
268 }
269
270
271 static void
272 adw_reset_ccb(ccb)
273 ADW_CCB *ccb;
274 {
275
276 ccb->flags = 0;
277 }
278
279
280 static int
281 adw_init_ccb(sc, ccb)
282 ADW_SOFTC *sc;
283 ADW_CCB *ccb;
284 {
285 int error;
286
287 /*
288 * Create the DMA map for this CCB.
289 */
290 error = bus_dmamap_create(sc->sc_dmat,
291 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
292 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
293 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
294 if (error) {
295 printf("%s: unable to create DMA map, error = %d\n",
296 sc->sc_dev.dv_xname, error);
297 return (error);
298 }
299 adw_reset_ccb(ccb);
300 return (0);
301 }
302
303
304 /*
305 * Get a free ccb
306 *
307 * If there are none, see if we can allocate a new one
308 */
309 static ADW_CCB *
310 adw_get_ccb(sc, flags)
311 ADW_SOFTC *sc;
312 int flags;
313 {
314 ADW_CCB *ccb = 0;
315 int s;
316
317 s = splbio();
318
319 /*
320 * If we can and have to, sleep waiting for one to come free
321 * but only if we can't allocate a new one.
322 */
323 for (;;) {
324 ccb = sc->sc_free_ccb.tqh_first;
325 if (ccb) {
326 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
327 break;
328 }
329 if ((flags & SCSI_NOSLEEP) != 0)
330 goto out;
331
332 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
333 }
334
335 ccb->flags |= CCB_ALLOC;
336
337 out:
338 splx(s);
339 return (ccb);
340 }
341
342
343 /*
344 * Queue a CCB to be sent to the controller, and send it if possible.
345 */
346 static void
347 adw_queue_ccb(sc, ccb)
348 ADW_SOFTC *sc;
349 ADW_CCB *ccb;
350 {
351
352 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
353
354 adw_start_ccbs(sc);
355 }
356
357
358 static void
359 adw_start_ccbs(sc)
360 ADW_SOFTC *sc;
361 {
362 ADW_CCB *ccb;
363
364 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
365 if (ccb->flags & CCB_WATCHDOG)
366 untimeout(adw_watchdog, ccb);
367
368 if (AdvExeScsiQueue(sc, &ccb->scsiq) == ADW_BUSY) {
369 ccb->flags |= CCB_WATCHDOG;
370 timeout(adw_watchdog, ccb,
371 (ADW_WATCH_TIMEOUT * hz) / 1000);
372 break;
373 }
374 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
375
376 if ((ccb->xs->flags & SCSI_POLL) == 0)
377 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
378 }
379 }
380
381
382 /******************************************************************************/
383 /* SCSI layer interfacing routines */
384 /******************************************************************************/
385
386
387 int
388 adw_init(sc)
389 ADW_SOFTC *sc;
390 {
391 u_int16_t warn_code;
392
393
394 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
395 ADW_LIB_VERSION_MINOR;
396 sc->cfg.chip_version =
397 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
398
399 /*
400 * Reset the chip to start and allow register writes.
401 */
402 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
403 panic("adw_init: adw_find_signature failed");
404 } else {
405 AdvResetChip(sc->sc_iot, sc->sc_ioh);
406
407 warn_code = AdvInitFromEEP(sc);
408 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
409 printf("%s: Bad checksum found. "
410 "Setting default values\n",
411 sc->sc_dev.dv_xname);
412 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
413 printf("%s: Bad bus termination setting."
414 "Using automatic termination.\n",
415 sc->sc_dev.dv_xname);
416
417 /*
418 * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
419 * Resets should be performed.
420 */
421 if (sc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
422 AdvResetSCSIBus(sc);
423 }
424
425 sc->isr_callback = (ulong) adw_wide_isr_callback;
426
427 return (0);
428 }
429
430
431 void
432 adw_attach(sc)
433 ADW_SOFTC *sc;
434 {
435 int i, error;
436
437
438 /*
439 * Initialize the ASC3550.
440 */
441 switch (AdvInitAsc3550Driver(sc)) {
442 case ASC_IERR_MCODE_CHKSUM:
443 panic("%s: Microcode checksum error",
444 sc->sc_dev.dv_xname);
445 break;
446
447 case ASC_IERR_ILLEGAL_CONNECTION:
448 panic("%s: All three connectors are in use",
449 sc->sc_dev.dv_xname);
450 break;
451
452 case ASC_IERR_REVERSED_CABLE:
453 panic("%s: Cable is reversed",
454 sc->sc_dev.dv_xname);
455 break;
456
457 case ASC_IERR_SINGLE_END_DEVICE:
458 panic("%s: single-ended device is attached to"
459 " one of the connectors",
460 sc->sc_dev.dv_xname);
461 break;
462 }
463
464
465 /*
466 * fill in the prototype scsipi_link.
467 */
468 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
469 sc->sc_link.adapter_softc = sc;
470 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
471 sc->sc_link.adapter = &adw_switch;
472 sc->sc_link.device = &adw_dev;
473 sc->sc_link.openings = 4;
474 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
475 sc->sc_link.type = BUS_SCSI;
476
477
478 TAILQ_INIT(&sc->sc_free_ccb);
479 TAILQ_INIT(&sc->sc_waiting_ccb);
480 LIST_INIT(&sc->sc_queue);
481
482
483 /*
484 * Allocate the Control Blocks.
485 */
486 error = adw_alloc_ccbs(sc);
487 if (error)
488 return; /* (error) */ ;
489
490 /*
491 * Create and initialize the Control Blocks.
492 */
493 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
494 if (i == 0) {
495 printf("%s: unable to create control blocks\n",
496 sc->sc_dev.dv_xname);
497 return; /* (ENOMEM) */ ;
498 } else if (i != ADW_MAX_CCB) {
499 printf("%s: WARNING: only %d of %d control blocks"
500 " created\n",
501 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
502 }
503 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
504 }
505
506
507 static void
508 adwminphys(bp)
509 struct buf *bp;
510 {
511
512 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
513 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
514 minphys(bp);
515 }
516
517
518 /*
519 * start a scsi operation given the command and the data address.
520 * Also needs the unit, target and lu.
521 */
522 static int
523 adw_scsi_cmd(xs)
524 struct scsipi_xfer *xs;
525 {
526 struct scsipi_link *sc_link = xs->sc_link;
527 ADW_SOFTC *sc = sc_link->adapter_softc;
528 ADW_CCB *ccb;
529 int s, fromqueue = 1, dontqueue = 0;
530
531 s = splbio(); /* protect the queue */
532
533 /*
534 * If we're running the queue from adw_done(), we've been
535 * called with the first queue entry as our argument.
536 */
537 if (xs == sc->sc_queue.lh_first) {
538 xs = adw_dequeue(sc);
539 fromqueue = 1;
540 } else {
541
542 /* Polled requests can't be queued for later. */
543 dontqueue = xs->flags & SCSI_POLL;
544
545 /*
546 * If there are jobs in the queue, run them first.
547 */
548 if (sc->sc_queue.lh_first != NULL) {
549 /*
550 * If we can't queue, we have to abort, since
551 * we have to preserve order.
552 */
553 if (dontqueue) {
554 splx(s);
555 xs->error = XS_DRIVER_STUFFUP;
556 return (TRY_AGAIN_LATER);
557 }
558 /*
559 * Swap with the first queue entry.
560 */
561 adw_enqueue(sc, xs, 0);
562 xs = adw_dequeue(sc);
563 fromqueue = 1;
564 }
565 }
566
567
568 /*
569 * get a ccb to use. If the transfer
570 * is from a buf (possibly from interrupt time)
571 * then we can't allow it to sleep
572 */
573
574 if ((ccb = adw_get_ccb(sc, xs->flags)) == NULL) {
575 /*
576 * If we can't queue, we lose.
577 */
578 if (dontqueue) {
579 splx(s);
580 xs->error = XS_DRIVER_STUFFUP;
581 return (TRY_AGAIN_LATER);
582 }
583 /*
584 * Stuff ourselves into the queue, in front
585 * if we came off in the first place.
586 */
587 adw_enqueue(sc, xs, fromqueue);
588 splx(s);
589 return (SUCCESSFULLY_QUEUED);
590 }
591 splx(s); /* done playing with the queue */
592
593 ccb->xs = xs;
594 ccb->timeout = xs->timeout;
595
596 if (adw_build_req(xs, ccb)) {
597 s = splbio();
598 adw_queue_ccb(sc, ccb);
599 splx(s);
600
601 /*
602 * Usually return SUCCESSFULLY QUEUED
603 */
604 if ((xs->flags & SCSI_POLL) == 0)
605 return (SUCCESSFULLY_QUEUED);
606
607 /*
608 * If we can't use interrupts, poll on completion
609 */
610 if (adw_poll(sc, xs, ccb->timeout)) {
611 adw_timeout(ccb);
612 if (adw_poll(sc, xs, ccb->timeout))
613 adw_timeout(ccb);
614 }
615 }
616 return (COMPLETE);
617 }
618
619
620 /*
621 * Build a request structure for the Wide Boards.
622 */
623 static int
624 adw_build_req(xs, ccb)
625 struct scsipi_xfer *xs;
626 ADW_CCB *ccb;
627 {
628 struct scsipi_link *sc_link = xs->sc_link;
629 ADW_SOFTC *sc = sc_link->adapter_softc;
630 bus_dma_tag_t dmat = sc->sc_dmat;
631 ADW_SCSI_REQ_Q *scsiqp;
632 int error;
633
634 scsiqp = &ccb->scsiq;
635 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
636
637 /*
638 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the CCB structure.
639 */
640 scsiqp->ccb_ptr = (ulong) ccb;
641
642
643 /*
644 * Build the ADW_SCSI_REQ_Q request.
645 */
646
647 /*
648 * Set CDB length and copy it to the request structure.
649 */
650 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
651
652 scsiqp->target_id = sc_link->scsipi_scsi.target;
653 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
654
655 scsiqp->vsense_addr = (ulong) & ccb->scsi_sense;
656 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
657 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
658 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
659
660 /*
661 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
662 */
663 if (xs->datalen) {
664 /*
665 * Map the DMA transfer.
666 */
667 #ifdef TFS
668 if (xs->flags & SCSI_DATA_UIO) {
669 error = bus_dmamap_load_uio(dmat,
670 ccb->dmamap_xfer, (struct uio *) xs->data,
671 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
672 BUS_DMA_WAITOK);
673 } else
674 #endif /* TFS */
675 {
676 error = bus_dmamap_load(dmat,
677 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
678 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
679 BUS_DMA_WAITOK);
680 }
681
682 if (error) {
683 if (error == EFBIG) {
684 printf("%s: adw_scsi_cmd, more than %d dma"
685 " segments\n",
686 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
687 } else {
688 printf("%s: adw_scsi_cmd, error %d loading"
689 " dma map\n",
690 sc->sc_dev.dv_xname, error);
691 }
692
693 xs->error = XS_DRIVER_STUFFUP;
694 adw_free_ccb(sc, ccb);
695 return (0);
696 }
697 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
698 ccb->dmamap_xfer->dm_mapsize,
699 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
700 BUS_DMASYNC_PREWRITE);
701
702 /*
703 * Build scatter-gather list.
704 */
705 scsiqp->data_cnt = xs->datalen;
706 scsiqp->vdata_addr = (ulong) xs->data;
707 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
708 scsiqp->sg_list_ptr = &ccb->sg_block[0];
709 bzero(scsiqp->sg_list_ptr,
710 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
711 adw_build_sglist(ccb, scsiqp);
712 } else {
713 /*
714 * No data xfer, use non S/G values.
715 */
716 scsiqp->data_cnt = 0;
717 scsiqp->vdata_addr = 0;
718 scsiqp->data_addr = 0;
719 scsiqp->sg_list_ptr = NULL;
720 }
721
722 return (1);
723 }
724
725
726 /*
727 * Build scatter-gather list for Wide Boards.
728 */
729 static void
730 adw_build_sglist(ccb, scsiqp)
731 ADW_CCB *ccb;
732 ADW_SCSI_REQ_Q *scsiqp;
733 {
734 struct scsipi_xfer *xs = ccb->xs;
735 ADW_SOFTC *sc = xs->sc_link->adapter_softc;
736 ADW_SG_BLOCK *sg_block = scsiqp->sg_list_ptr;
737 ulong sg_block_next_addr; /* block and its next */
738 ulong sg_block_physical_addr;
739 int sg_block_index, i; /* how many SG entries */
740 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
741 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
742
743
744 sg_block_next_addr = (ulong) sg_block; /* allow math operation */
745 sg_block_physical_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
746 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, sg_block[0]);
747 scsiqp->sg_real_addr = sg_block_physical_addr;
748
749 /*
750 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
751 * then split the request into multiple sg-list blocks.
752 */
753
754 sg_block_index = 0;
755 do {
756 sg_block->first_entry_no = sg_block_index;
757 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
758 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
759 sg_block->sg_list[i].sg_count = sg_list->ds_len;
760
761 if (--sg_elem_cnt == 0) {
762 /* last entry, get out */
763 scsiqp->sg_entry_cnt = sg_block_index + i + 1;
764 sg_block->last_entry_no = sg_block_index + i;
765 sg_block->sg_ptr = NULL; /* next link = NULL */
766 return;
767 }
768 sg_list++;
769 }
770 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
771 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
772
773 sg_block_index += NO_OF_SG_PER_BLOCK;
774 sg_block->sg_ptr = (ADW_SG_BLOCK *) sg_block_physical_addr;
775 sg_block->last_entry_no = sg_block_index - 1;
776 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
777 }
778 while (1);
779 }
780
781
782 int
783 adw_intr(arg)
784 void *arg;
785 {
786 ADW_SOFTC *sc = arg;
787 struct scsipi_xfer *xs;
788
789
790 AdvISR(sc);
791
792 /*
793 * If there are queue entries in the software queue, try to
794 * run the first one. We should be more or less guaranteed
795 * to succeed, since we just freed a CCB.
796 *
797 * NOTE: adw_scsi_cmd() relies on our calling it with
798 * the first entry in the queue.
799 */
800 if ((xs = sc->sc_queue.lh_first) != NULL)
801 (void) adw_scsi_cmd(xs);
802
803 return (1);
804 }
805
806
807 /*
808 * Poll a particular unit, looking for a particular xs
809 */
810 static int
811 adw_poll(sc, xs, count)
812 ADW_SOFTC *sc;
813 struct scsipi_xfer *xs;
814 int count;
815 {
816
817 /* timeouts are in msec, so we loop in 1000 usec cycles */
818 while (count) {
819 adw_intr(sc);
820 if (xs->flags & ITSDONE)
821 return (0);
822 delay(1000); /* only happens in boot so ok */
823 count--;
824 }
825 return (1);
826 }
827
828
829 static void
830 adw_timeout(arg)
831 void *arg;
832 {
833 ADW_CCB *ccb = arg;
834 struct scsipi_xfer *xs = ccb->xs;
835 struct scsipi_link *sc_link = xs->sc_link;
836 ADW_SOFTC *sc = sc_link->adapter_softc;
837 int s;
838
839 scsi_print_addr(sc_link);
840 printf("timed out");
841
842 s = splbio();
843
844 /*
845 * If it has been through before, then a previous abort has failed,
846 * don't try abort again, reset the bus instead.
847 */
848 if (ccb->flags & CCB_ABORT) {
849 /* abort timed out */
850 printf(" AGAIN. Resetting Bus\n");
851 /* Lets try resetting the bus! */
852 AdvResetSCSIBus(sc);
853 ccb->timeout = ADW_ABORT_TIMEOUT;
854 adw_queue_ccb(sc, ccb);
855 } else {
856 /* abort the operation that has timed out */
857 printf("\n");
858 ADW_ABORT_CCB(sc, ccb);
859 xs->error = XS_TIMEOUT;
860 ccb->timeout = ADW_ABORT_TIMEOUT;
861 ccb->flags |= CCB_ABORT;
862 adw_queue_ccb(sc, ccb);
863 }
864
865 splx(s);
866 }
867
868
869 static void
870 adw_watchdog(arg)
871 void *arg;
872 {
873 ADW_CCB *ccb = arg;
874 struct scsipi_xfer *xs = ccb->xs;
875 struct scsipi_link *sc_link = xs->sc_link;
876 ADW_SOFTC *sc = sc_link->adapter_softc;
877 int s;
878
879 s = splbio();
880
881 ccb->flags &= ~CCB_WATCHDOG;
882 adw_start_ccbs(sc);
883
884 splx(s);
885 }
886
887
888 /******************************************************************************/
889 /* NARROW and WIDE boards Interrupt callbacks */
890 /******************************************************************************/
891
892
893 /*
894 * adw_wide_isr_callback() - Second Level Interrupt Handler called by AdvISR()
895 *
896 * Interrupt callback function for the Wide SCSI Adv Library.
897 */
898 static void
899 adw_wide_isr_callback(sc, scsiq)
900 ADW_SOFTC *sc;
901 ADW_SCSI_REQ_Q *scsiq;
902 {
903 bus_dma_tag_t dmat = sc->sc_dmat;
904 ADW_CCB *ccb = (ADW_CCB *) scsiq->ccb_ptr;
905 struct scsipi_xfer *xs = ccb->xs;
906 struct scsipi_sense_data *s1, *s2;
907 //int underrun = ASC_FALSE;
908
909
910 untimeout(adw_timeout, ccb);
911
912 /*
913 * If we were a data transfer, unload the map that described
914 * the data buffer.
915 */
916 if (xs->datalen) {
917 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
918 ccb->dmamap_xfer->dm_mapsize,
919 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
920 BUS_DMASYNC_POSTWRITE);
921 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
922 }
923 if ((ccb->flags & CCB_ALLOC) == 0) {
924 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
925 Debugger();
926 return;
927 }
928 /*
929 * Check for an underrun condition.
930 */
931 /*
932 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
933 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
934 * scsiqp->data_cnt); underrun = ASC_TRUE; }
935 */
936 /*
937 * 'done_status' contains the command's ending status.
938 */
939 switch (scsiq->done_status) {
940 case QD_NO_ERROR:
941 switch (scsiq->host_status) {
942 case QHSTA_NO_ERROR:
943 xs->error = XS_NOERROR;
944 xs->resid = 0;
945 break;
946 default:
947 /* QHSTA error occurred. */
948 xs->error = XS_DRIVER_STUFFUP;
949 break;
950 }
951 /*
952 * If there was an underrun without any other error,
953 * set DID_ERROR to indicate the underrun error.
954 *
955 * Note: There is no way yet to indicate the number
956 * of underrun bytes.
957 */
958 /*
959 * if (xs->error == XS_NOERROR && underrun == ASC_TRUE) {
960 * scp->result = HOST_BYTE(DID_UNDERRUN); }
961 */ break;
962
963 case QD_WITH_ERROR:
964 switch (scsiq->host_status) {
965 case QHSTA_NO_ERROR:
966 if (scsiq->scsi_status == SS_CHK_CONDITION) {
967 s1 = &ccb->scsi_sense;
968 s2 = &xs->sense.scsi_sense;
969 *s2 = *s1;
970 xs->error = XS_SENSE;
971 } else {
972 xs->error = XS_DRIVER_STUFFUP;
973 }
974 break;
975
976 default:
977 /* Some other QHSTA error occurred. */
978 xs->error = XS_DRIVER_STUFFUP;
979 break;
980 }
981 break;
982
983 case QD_ABORTED_BY_HOST:
984 default:
985 xs->error = XS_DRIVER_STUFFUP;
986 break;
987 }
988
989
990 adw_free_ccb(sc, ccb);
991 xs->flags |= ITSDONE;
992 scsipi_done(xs);
993 }
994