adw.c revision 1.1 1 /* $NetBSD: adw.c,v 1.1 1998/09/26 16:10:40 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static void adw_enqueue __P((ADW_SOFTC *, struct scsipi_xfer *, int));
74 static struct scsipi_xfer *adw_dequeue __P((ADW_SOFTC *));
75
76 static int adw_alloc_ccbs __P((ADW_SOFTC *));
77 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
78 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
79 static void adw_reset_ccb __P((ADW_CCB *));
80 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
81 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
82 static void adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
83 static void adw_start_ccbs __P((ADW_SOFTC *));
84
85 static int adw_scsi_cmd __P((struct scsipi_xfer *));
86 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *));
87 static void adw_build_sglist __P(( ADW_CCB *, ADW_SCSI_REQ_Q *));
88 static void adwminphys __P((struct buf *));
89 static void adw_wide_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
90
91 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
92 static void adw_timeout __P((void *));
93 static void adw_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98
99 struct scsipi_adapter adw_switch =
100 {
101 adw_scsi_cmd, /* called to start/enqueue a SCSI command */
102 adwminphys, /* to limit the transfer to max device can do */
103 0, /* IT SEEMS IT IS NOT USED YET */
104 0, /* as above... */
105 };
106
107
108 /* the below structure is so we have a default dev struct for out link struct */
109 struct scsipi_device adw_dev =
110 {
111 NULL, /* Use default error handler */
112 NULL, /* have a queue, served by this */
113 NULL, /* have no async handler */
114 NULL, /* Use default 'done' routine */
115 };
116
117
118 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
119 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
120
121
122 /******************************************************************************/
123 /* scsipi_xfer queue routines */
124 /******************************************************************************/
125
126 /*
127 * Insert a scsipi_xfer into the software queue. We overload xs->free_list
128 * to avoid having to allocate additional resources (since we're used
129 * only during resource shortages anyhow.
130 */
131 static void
132 adw_enqueue(sc, xs, infront)
133 ADW_SOFTC *sc;
134 struct scsipi_xfer *xs;
135 int infront;
136 {
137
138 if (infront || sc->sc_queue.lh_first == NULL) {
139 if (sc->sc_queue.lh_first == NULL)
140 sc->sc_queuelast = xs;
141 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
142 return;
143 }
144 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
145 sc->sc_queuelast = xs;
146 }
147
148
149 /*
150 * Pull a scsipi_xfer off the front of the software queue.
151 */
152 static struct scsipi_xfer *
153 adw_dequeue(sc)
154 ADW_SOFTC *sc;
155 {
156 struct scsipi_xfer *xs;
157
158 xs = sc->sc_queue.lh_first;
159 LIST_REMOVE(xs, free_list);
160
161 if (sc->sc_queue.lh_first == NULL)
162 sc->sc_queuelast = NULL;
163
164 return (xs);
165 }
166
167
168 /******************************************************************************/
169 /* Control Blocks routines */
170 /******************************************************************************/
171
172
173 static int
174 adw_alloc_ccbs(sc)
175 ADW_SOFTC *sc;
176 {
177 bus_dma_segment_t seg;
178 int error, rseg;
179
180 /*
181 * Allocate the control blocks.
182 */
183 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
184 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
185 printf("%s: unable to allocate control structures,"
186 " error = %d\n", sc->sc_dev.dv_xname, error);
187 return (error);
188 }
189 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
190 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
191 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
192 printf("%s: unable to map control structures, error = %d\n",
193 sc->sc_dev.dv_xname, error);
194 return (error);
195 }
196 /*
197 * Create and load the DMA map used for the control blocks.
198 */
199 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
200 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
201 &sc->sc_dmamap_control)) != 0) {
202 printf("%s: unable to create control DMA map, error = %d\n",
203 sc->sc_dev.dv_xname, error);
204 return (error);
205 }
206 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
207 sc->sc_control, sizeof(struct adw_control), NULL,
208 BUS_DMA_NOWAIT)) != 0) {
209 printf("%s: unable to load control DMA map, error = %d\n",
210 sc->sc_dev.dv_xname, error);
211 return (error);
212 }
213 return (0);
214 }
215
216
217 /*
218 * Create a set of ccbs and add them to the free list. Called once
219 * by adw_init(). We return the number of CCBs successfully created.
220 */
221 static int
222 adw_create_ccbs(sc, ccbstore, count)
223 ADW_SOFTC *sc;
224 ADW_CCB *ccbstore;
225 int count;
226 {
227 ADW_CCB *ccb;
228 int i, error;
229
230 bzero(ccbstore, sizeof(ADW_CCB) * count);
231 for (i = 0; i < count; i++) {
232 ccb = &ccbstore[i];
233 if ((error = adw_init_ccb(sc, ccb)) != 0) {
234 printf("%s: unable to initialize ccb, error = %d\n",
235 sc->sc_dev.dv_xname, error);
236 return (i);
237 }
238 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
239 }
240
241 return (i);
242 }
243
244
245 /*
246 * A ccb is put onto the free list.
247 */
248 static void
249 adw_free_ccb(sc, ccb)
250 ADW_SOFTC *sc;
251 ADW_CCB *ccb;
252 {
253 int s;
254
255 s = splbio();
256
257 adw_reset_ccb(ccb);
258 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
259
260 /*
261 * If there were none, wake anybody waiting for one to come free,
262 * starting with queued entries.
263 */
264 if (ccb->chain.tqe_next == 0)
265 wakeup(&sc->sc_free_ccb);
266
267 splx(s);
268 }
269
270
271 static void
272 adw_reset_ccb(ccb)
273 ADW_CCB *ccb;
274 {
275
276 ccb->flags = 0;
277 }
278
279
280 static int
281 adw_init_ccb(sc, ccb)
282 ADW_SOFTC *sc;
283 ADW_CCB *ccb;
284 {
285 int error;
286
287 /*
288 * Create the DMA map for this CCB.
289 */
290 error = bus_dmamap_create(sc->sc_dmat,
291 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
292 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
293 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
294 if (error) {
295 printf("%s: unable to create DMA map, error = %d\n",
296 sc->sc_dev.dv_xname, error);
297 return (error);
298 }
299 adw_reset_ccb(ccb);
300 return (0);
301 }
302
303
304 /*
305 * Get a free ccb
306 *
307 * If there are none, see if we can allocate a new one
308 */
309 static ADW_CCB *
310 adw_get_ccb(sc, flags)
311 ADW_SOFTC *sc;
312 int flags;
313 {
314 ADW_CCB *ccb = 0;
315 int s;
316
317 s = splbio();
318
319 /*
320 * If we can and have to, sleep waiting for one to come free
321 * but only if we can't allocate a new one.
322 */
323 for (;;) {
324 ccb = sc->sc_free_ccb.tqh_first;
325 if (ccb) {
326 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
327 break;
328 }
329 if ((flags & SCSI_NOSLEEP) != 0)
330 goto out;
331
332 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
333 }
334
335 ccb->flags |= CCB_ALLOC;
336
337 out:
338 splx(s);
339 return (ccb);
340 }
341
342
343 /*
344 * Queue a CCB to be sent to the controller, and send it if possible.
345 */
346 static void
347 adw_queue_ccb(sc, ccb)
348 ADW_SOFTC *sc;
349 ADW_CCB *ccb;
350 {
351
352 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
353
354 adw_start_ccbs(sc);
355 }
356
357
358 static void
359 adw_start_ccbs(sc)
360 ADW_SOFTC *sc;
361 {
362 ADW_CCB *ccb;
363
364 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
365 if (ccb->flags & CCB_WATCHDOG)
366 untimeout(adw_watchdog, ccb);
367
368 if (AdvExeScsiQueue(sc, &ccb->scsiq) == ADW_BUSY) {
369 ccb->flags |= CCB_WATCHDOG;
370 timeout(adw_watchdog, ccb,
371 (ADW_WATCH_TIMEOUT * hz) / 1000);
372 break;
373 }
374 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
375
376 if ((ccb->xs->flags & SCSI_POLL) == 0)
377 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
378 }
379 }
380
381
382 /******************************************************************************/
383 /* SCSI layer interfacing routines */
384 /******************************************************************************/
385
386
387 int
388 adw_init(sc)
389 ADW_SOFTC *sc;
390 {
391 u_int16_t warn_code;
392
393
394 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
395 ADW_LIB_VERSION_MINOR;
396 sc->cfg.chip_version =
397 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
398
399 /*
400 * Reset the chip to start and allow register writes.
401 */
402 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
403 panic("adw_init: adw_find_signature failed");
404 }
405 else
406 {
407 AdvResetChip(sc->sc_iot, sc->sc_ioh);
408
409 warn_code = AdvInitFromEEP(sc);
410 if(warn_code & ASC_WARN_EEPROM_CHKSUM)
411 printf("%s: Bad checksum found. "
412 "Setting default values\n",
413 sc->sc_dev.dv_xname);
414 if(warn_code & ASC_WARN_EEPROM_TERMINATION)
415 printf("%s: Bad bus termination setting."
416 "Using automatic termination.\n",
417 sc->sc_dev.dv_xname);
418
419 /*
420 * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
421 * Resets should be performed.
422 */
423 if (sc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
424 AdvResetSCSIBus(sc);
425 }
426
427 sc->isr_callback = (ulong) adw_wide_isr_callback;
428
429 return (0);
430 }
431
432
433 void
434 adw_attach(sc)
435 ADW_SOFTC *sc;
436 {
437 int i, error;
438
439
440 /*
441 * Initialize the ASC3550.
442 */
443 switch(AdvInitAsc3550Driver(sc))
444 {
445 case ASC_IERR_MCODE_CHKSUM:
446 panic("%s: Microcode checksum error",
447 sc->sc_dev.dv_xname);
448 break;
449
450 case ASC_IERR_ILLEGAL_CONNECTION:
451 panic("%s: All three connectors are in use",
452 sc->sc_dev.dv_xname);
453 break;
454
455 case ASC_IERR_REVERSED_CABLE:
456 panic("%s: Cable is reversed",
457 sc->sc_dev.dv_xname);
458 break;
459
460 case ASC_IERR_SINGLE_END_DEVICE:
461 panic("%s: single-ended device is attached to"
462 " one of the connectors",
463 sc->sc_dev.dv_xname);
464 break;
465 }
466
467
468 /*
469 * fill in the prototype scsipi_link.
470 */
471 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
472 sc->sc_link.adapter_softc = sc;
473 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
474 sc->sc_link.adapter = &adw_switch;
475 sc->sc_link.device = &adw_dev;
476 sc->sc_link.openings = 4;
477 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
478 sc->sc_link.type = BUS_SCSI;
479
480
481 TAILQ_INIT(&sc->sc_free_ccb);
482 TAILQ_INIT(&sc->sc_waiting_ccb);
483 LIST_INIT(&sc->sc_queue);
484
485
486 /*
487 * Allocate the Control Blocks.
488 */
489 error = adw_alloc_ccbs(sc);
490 if (error)
491 return; /* (error) */ ;
492
493 /*
494 * Create and initialize the Control Blocks.
495 */
496 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
497 if (i == 0) {
498 printf("%s: unable to create control blocks\n",
499 sc->sc_dev.dv_xname);
500 return; /* (ENOMEM) */ ;
501 } else if (i != ADW_MAX_CCB) {
502 printf("%s: WARNING: only %d of %d control blocks"
503 " created\n",
504 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
505 }
506
507 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
508 }
509
510
511 static void
512 adwminphys(bp)
513 struct buf *bp;
514 {
515
516 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
517 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
518 minphys(bp);
519 }
520
521
522 /*
523 * start a scsi operation given the command and the data address. Also needs
524 * the unit, target and lu.
525 */
526 static int
527 adw_scsi_cmd(xs)
528 struct scsipi_xfer *xs;
529 {
530 struct scsipi_link *sc_link = xs->sc_link;
531 ADW_SOFTC *sc = sc_link->adapter_softc;
532 ADW_CCB *ccb;
533 int s, fromqueue = 1, dontqueue = 0;
534
535 s = splbio(); /* protect the queue */
536
537 /*
538 * If we're running the queue from adw_done(), we've been
539 * called with the first queue entry as our argument.
540 */
541 if (xs == sc->sc_queue.lh_first) {
542 xs = adw_dequeue(sc);
543 fromqueue = 1;
544 } else {
545
546 /* Polled requests can't be queued for later. */
547 dontqueue = xs->flags & SCSI_POLL;
548
549 /*
550 * If there are jobs in the queue, run them first.
551 */
552 if (sc->sc_queue.lh_first != NULL) {
553 /*
554 * If we can't queue, we have to abort, since
555 * we have to preserve order.
556 */
557 if (dontqueue) {
558 splx(s);
559 xs->error = XS_DRIVER_STUFFUP;
560 return (TRY_AGAIN_LATER);
561 }
562 /*
563 * Swap with the first queue entry.
564 */
565 adw_enqueue(sc, xs, 0);
566 xs = adw_dequeue(sc);
567 fromqueue = 1;
568 }
569 }
570
571
572 /*
573 * get a ccb to use. If the transfer
574 * is from a buf (possibly from interrupt time)
575 * then we can't allow it to sleep
576 */
577
578 if ((ccb = adw_get_ccb(sc, xs->flags)) == NULL) {
579 /*
580 * If we can't queue, we lose.
581 */
582 if (dontqueue) {
583 splx(s);
584 xs->error = XS_DRIVER_STUFFUP;
585 return (TRY_AGAIN_LATER);
586 }
587 /*
588 * Stuff ourselves into the queue, in front
589 * if we came off in the first place.
590 */
591 adw_enqueue(sc, xs, fromqueue);
592 splx(s);
593 return (SUCCESSFULLY_QUEUED);
594 }
595 splx(s); /* done playing with the queue */
596
597 ccb->xs = xs;
598 ccb->timeout = xs->timeout;
599
600 if(adw_build_req(xs, ccb))
601 {
602 s = splbio();
603 adw_queue_ccb(sc, ccb);
604 splx(s);
605
606 /*
607 * Usually return SUCCESSFULLY QUEUED
608 */
609 if ((xs->flags & SCSI_POLL) == 0)
610 return (SUCCESSFULLY_QUEUED);
611
612 /*
613 * If we can't use interrupts, poll on completion
614 */
615 if (adw_poll(sc, xs, ccb->timeout)) {
616 adw_timeout(ccb);
617 if (adw_poll(sc, xs, ccb->timeout))
618 adw_timeout(ccb);
619 }
620 }
621
622 return (COMPLETE);
623 }
624
625
626 /*
627 * Build a request structure for the Wide Boards.
628 */
629 static int
630 adw_build_req(xs, ccb)
631 struct scsipi_xfer *xs;
632 ADW_CCB *ccb;
633 {
634 struct scsipi_link *sc_link = xs->sc_link;
635 ADW_SOFTC *sc = sc_link->adapter_softc;
636 bus_dma_tag_t dmat = sc->sc_dmat;
637 ADW_SCSI_REQ_Q *scsiqp;
638 int error;
639
640 scsiqp = &ccb->scsiq;
641 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
642
643 /*
644 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the CCB structure.
645 */
646 scsiqp->ccb_ptr = (ulong) ccb;
647
648
649 /*
650 * Build the ADW_SCSI_REQ_Q request.
651 */
652
653 /*
654 * Set CDB length and copy it to the request structure.
655 */
656 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
657
658 scsiqp->target_id = sc_link->scsipi_scsi.target;
659 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
660
661 scsiqp->vsense_addr = (ulong) &ccb->scsi_sense;
662 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
663 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
664 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
665
666 /*
667 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
668 */
669 if (xs->datalen) {
670 /*
671 * Map the DMA transfer.
672 */
673 #ifdef TFS
674 if (xs->flags & SCSI_DATA_UIO) {
675 error = bus_dmamap_load_uio(dmat,
676 ccb->dmamap_xfer, (struct uio *) xs->data,
677 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
678 } else
679 #endif /* TFS */
680 {
681 error = bus_dmamap_load(dmat,
682 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
683 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
684 }
685
686 if (error) {
687 if (error == EFBIG) {
688 printf("%s: adw_scsi_cmd, more than %d dma"
689 " segments\n",
690 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
691 } else {
692 printf("%s: adw_scsi_cmd, error %d loading"
693 " dma map\n",
694 sc->sc_dev.dv_xname, error);
695 }
696
697 xs->error = XS_DRIVER_STUFFUP;
698 adw_free_ccb(sc, ccb);
699 return (0);
700 }
701
702 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
703 ccb->dmamap_xfer->dm_mapsize,
704 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
705 BUS_DMASYNC_PREWRITE);
706
707 /*
708 * Build scatter-gather list.
709 */
710 scsiqp->data_cnt = xs->datalen;
711 scsiqp->vdata_addr = (ulong) xs->data;
712 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
713 scsiqp->sg_list_ptr = &ccb->sg_block[0];
714 bzero(scsiqp->sg_list_ptr, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
715 adw_build_sglist(ccb, scsiqp);
716 } else {
717 /*
718 * No data xfer, use non S/G values.
719 */
720 scsiqp->data_cnt = 0;
721 scsiqp->vdata_addr = 0;
722 scsiqp->data_addr = 0;
723 scsiqp->sg_list_ptr = NULL;
724 }
725
726 return (1);
727 }
728
729
730 /*
731 * Build scatter-gather list for Wide Boards.
732 */
733 static void
734 adw_build_sglist(ccb, scsiqp)
735 ADW_CCB *ccb;
736 ADW_SCSI_REQ_Q *scsiqp;
737 {
738 struct scsipi_xfer *xs = ccb->xs;
739 ADW_SOFTC *sc = xs->sc_link->adapter_softc;
740 ADW_SG_BLOCK *sg_block = scsiqp->sg_list_ptr;
741 ulong sg_block_next_addr; /* block and its next */
742 ulong sg_block_physical_addr;
743 int sg_block_index, i; /* how many SG entries */
744 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
745 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
746
747
748 sg_block_next_addr = (ulong) sg_block; /* allow math operation */
749 sg_block_physical_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
750 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, sg_block[0]);
751 scsiqp->sg_real_addr = sg_block_physical_addr;
752
753 /*
754 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
755 * then split the request into multiple sg-list blocks.
756 */
757
758 sg_block_index = 0;
759 do
760 {
761 sg_block->first_entry_no = sg_block_index;
762 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++)
763 {
764 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
765 sg_block->sg_list[i].sg_count = sg_list->ds_len;
766
767 if (--sg_elem_cnt == 0)
768 {
769 /* last entry, get out */
770 scsiqp->sg_entry_cnt = sg_block_index + i + 1;
771 sg_block->last_entry_no = sg_block_index + i;
772 sg_block->sg_ptr = NULL;/* next link = NULL */
773 return;
774 }
775 sg_list++;
776 }
777 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
778 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
779
780 sg_block_index += NO_OF_SG_PER_BLOCK;
781 sg_block->sg_ptr = (ADW_SG_BLOCK *) sg_block_physical_addr;
782 sg_block->last_entry_no = sg_block_index - 1;
783 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virtual addr */
784 }
785 while (1);
786 }
787
788
789 int
790 adw_intr(arg)
791 void *arg;
792 {
793 ADW_SOFTC *sc = arg;
794 struct scsipi_xfer *xs;
795
796
797 AdvISR(sc);
798
799 /*
800 * If there are queue entries in the software queue, try to
801 * run the first one. We should be more or less guaranteed
802 * to succeed, since we just freed a CCB.
803 *
804 * NOTE: adw_scsi_cmd() relies on our calling it with
805 * the first entry in the queue.
806 */
807 if ((xs = sc->sc_queue.lh_first) != NULL)
808 (void) adw_scsi_cmd(xs);
809
810 return (1);
811 }
812
813
814 /*
815 * Poll a particular unit, looking for a particular xs
816 */
817 static int
818 adw_poll(sc, xs, count)
819 ADW_SOFTC *sc;
820 struct scsipi_xfer *xs;
821 int count;
822 {
823
824 /* timeouts are in msec, so we loop in 1000 usec cycles */
825 while (count) {
826 adw_intr(sc);
827 if (xs->flags & ITSDONE)
828 return (0);
829 delay(1000); /* only happens in boot so ok */
830 count--;
831 }
832 return (1);
833 }
834
835
836 static void
837 adw_timeout(arg)
838 void *arg;
839 {
840 ADW_CCB *ccb = arg;
841 struct scsipi_xfer *xs = ccb->xs;
842 struct scsipi_link *sc_link = xs->sc_link;
843 ADW_SOFTC *sc = sc_link->adapter_softc;
844 int s;
845
846 scsi_print_addr(sc_link);
847 printf("timed out");
848
849 s = splbio();
850
851 /*
852 * If it has been through before, then a previous abort has failed,
853 * don't try abort again, reset the bus instead.
854 */
855 if (ccb->flags & CCB_ABORT) {
856 /* abort timed out */
857 printf(" AGAIN. Resetting Bus\n");
858 /* Lets try resetting the bus! */
859 AdvResetSCSIBus(sc);
860 ccb->timeout = ADW_ABORT_TIMEOUT;
861 adw_queue_ccb(sc, ccb);
862 } else {
863 /* abort the operation that has timed out */
864 printf("\n");
865 ADW_ABORT_CCB(sc, ccb);
866 xs->error = XS_TIMEOUT;
867 ccb->timeout = ADW_ABORT_TIMEOUT;
868 ccb->flags |= CCB_ABORT;
869 adw_queue_ccb(sc, ccb);
870 }
871
872 splx(s);
873 }
874
875
876 static void
877 adw_watchdog(arg)
878 void *arg;
879 {
880 ADW_CCB *ccb = arg;
881 struct scsipi_xfer *xs = ccb->xs;
882 struct scsipi_link *sc_link = xs->sc_link;
883 ADW_SOFTC *sc = sc_link->adapter_softc;
884 int s;
885
886 s = splbio();
887
888 ccb->flags &= ~CCB_WATCHDOG;
889 adw_start_ccbs(sc);
890
891 splx(s);
892 }
893
894
895 /******************************************************************************/
896 /* NARROW and WIDE boards Interrupt callbacks */
897 /******************************************************************************/
898
899
900 /*
901 * adw_wide_isr_callback() - Second Level Interrupt Handler called by AdvISR()
902 *
903 * Interrupt callback function for the Wide SCSI Adv Library.
904 */
905 static void
906 adw_wide_isr_callback(sc, scsiq)
907 ADW_SOFTC *sc;
908 ADW_SCSI_REQ_Q *scsiq;
909 {
910 bus_dma_tag_t dmat = sc->sc_dmat;
911 ADW_CCB *ccb = (ADW_CCB *) scsiq->ccb_ptr;
912 struct scsipi_xfer *xs = ccb->xs;
913 struct scsipi_sense_data *s1, *s2;
914 // int underrun = ASC_FALSE;
915
916
917 untimeout(adw_timeout, ccb);
918
919 /*
920 * If we were a data transfer, unload the map that described
921 * the data buffer.
922 */
923 if (xs->datalen) {
924 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
925 ccb->dmamap_xfer->dm_mapsize,
926 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
927 BUS_DMASYNC_POSTWRITE);
928 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
929 }
930
931 if ((ccb->flags & CCB_ALLOC) == 0) {
932 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
933 Debugger();
934 return;
935 }
936
937
938 /*
939 * Check for an underrun condition.
940 */
941 /* if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
942 ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
943 scsiqp->data_cnt);
944 underrun = ASC_TRUE;
945 }
946 */
947 /*
948 * 'done_status' contains the command's ending status.
949 */
950 switch (scsiq->done_status) {
951 case QD_NO_ERROR:
952 switch (scsiq->host_status) {
953 case QHSTA_NO_ERROR:
954 xs->error = XS_NOERROR;
955 xs->resid = 0;
956 break;
957 default:
958 /* QHSTA error occurred. */
959 xs->error = XS_DRIVER_STUFFUP;
960 break;
961 }
962 /*
963 * If there was an underrun without any other error,
964 * set DID_ERROR to indicate the underrun error.
965 *
966 * Note: There is no way yet to indicate the number
967 * of underrun bytes.
968 */
969 /* if (xs->error == XS_NOERROR && underrun == ASC_TRUE) {
970 scp->result = HOST_BYTE(DID_UNDERRUN);
971 }
972 */ break;
973
974 case QD_WITH_ERROR:
975 switch (scsiq->host_status) {
976 case QHSTA_NO_ERROR:
977 if (scsiq->scsi_status == SS_CHK_CONDITION) {
978 s1 = &ccb->scsi_sense;
979 s2 = &xs->sense.scsi_sense;
980 *s2 = *s1;
981 xs->error = XS_SENSE;
982 } else {
983 xs->error = XS_DRIVER_STUFFUP;
984 }
985 break;
986
987 default:
988 /* Some other QHSTA error occurred. */
989 xs->error = XS_DRIVER_STUFFUP;
990 break;
991 }
992 break;
993
994 case QD_ABORTED_BY_HOST:
995 default:
996 xs->error = XS_DRIVER_STUFFUP;
997 break;
998 }
999
1000
1001 adw_free_ccb(sc, ccb);
1002 xs->flags |= ITSDONE;
1003 scsipi_done(xs);
1004 }
1005