adw.c revision 1.8 1 /* $NetBSD: adw.c,v 1.8 1999/03/04 20:15:53 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static int adw_alloc_ccbs __P((ADW_SOFTC *));
74 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
75 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
76 static void adw_reset_ccb __P((ADW_CCB *));
77 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
78 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
79 static void adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
80 static void adw_start_ccbs __P((ADW_SOFTC *));
81
82 static int adw_scsi_cmd __P((struct scsipi_xfer *));
83 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *));
84 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
85 static void adwminphys __P((struct buf *));
86 static void adw_wide_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
87
88 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
89 static void adw_timeout __P((void *));
90 static void adw_watchdog __P((void *));
91
92
93 /******************************************************************************/
94
95
96 /* the below structure is so we have a default dev struct for out link struct */
97 struct scsipi_device adw_dev =
98 {
99 NULL, /* Use default error handler */
100 NULL, /* have a queue, served by this */
101 NULL, /* have no async handler */
102 NULL, /* Use default 'done' routine */
103 };
104
105
106 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
107 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
108
109
110 /******************************************************************************/
111 /* Control Blocks routines */
112 /******************************************************************************/
113
114
115 static int
116 adw_alloc_ccbs(sc)
117 ADW_SOFTC *sc;
118 {
119 bus_dma_segment_t seg;
120 int error, rseg;
121
122 /*
123 * Allocate the control blocks.
124 */
125 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
126 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
127 printf("%s: unable to allocate control structures,"
128 " error = %d\n", sc->sc_dev.dv_xname, error);
129 return (error);
130 }
131 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
132 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
133 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
134 printf("%s: unable to map control structures, error = %d\n",
135 sc->sc_dev.dv_xname, error);
136 return (error);
137 }
138 /*
139 * Create and load the DMA map used for the control blocks.
140 */
141 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
142 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
143 &sc->sc_dmamap_control)) != 0) {
144 printf("%s: unable to create control DMA map, error = %d\n",
145 sc->sc_dev.dv_xname, error);
146 return (error);
147 }
148 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
149 sc->sc_control, sizeof(struct adw_control), NULL,
150 BUS_DMA_NOWAIT)) != 0) {
151 printf("%s: unable to load control DMA map, error = %d\n",
152 sc->sc_dev.dv_xname, error);
153 return (error);
154 }
155 return (0);
156 }
157
158
159 /*
160 * Create a set of ccbs and add them to the free list. Called once
161 * by adw_init(). We return the number of CCBs successfully created.
162 */
163 static int
164 adw_create_ccbs(sc, ccbstore, count)
165 ADW_SOFTC *sc;
166 ADW_CCB *ccbstore;
167 int count;
168 {
169 ADW_CCB *ccb;
170 int i, error;
171
172 bzero(ccbstore, sizeof(ADW_CCB) * count);
173 for (i = 0; i < count; i++) {
174 ccb = &ccbstore[i];
175 if ((error = adw_init_ccb(sc, ccb)) != 0) {
176 printf("%s: unable to initialize ccb, error = %d\n",
177 sc->sc_dev.dv_xname, error);
178 return (i);
179 }
180 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
181 }
182
183 return (i);
184 }
185
186
187 /*
188 * A ccb is put onto the free list.
189 */
190 static void
191 adw_free_ccb(sc, ccb)
192 ADW_SOFTC *sc;
193 ADW_CCB *ccb;
194 {
195 int s;
196
197 s = splbio();
198
199 adw_reset_ccb(ccb);
200 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
201
202 /*
203 * If there were none, wake anybody waiting for one to come free,
204 * starting with queued entries.
205 */
206 if (ccb->chain.tqe_next == 0)
207 wakeup(&sc->sc_free_ccb);
208
209 splx(s);
210 }
211
212
213 static void
214 adw_reset_ccb(ccb)
215 ADW_CCB *ccb;
216 {
217
218 ccb->flags = 0;
219 }
220
221
222 static int
223 adw_init_ccb(sc, ccb)
224 ADW_SOFTC *sc;
225 ADW_CCB *ccb;
226 {
227 int hashnum, error;
228
229 /*
230 * Create the DMA map for this CCB.
231 */
232 error = bus_dmamap_create(sc->sc_dmat,
233 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
234 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
235 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
236 if (error) {
237 printf("%s: unable to create DMA map, error = %d\n",
238 sc->sc_dev.dv_xname, error);
239 return (error);
240 }
241
242 /*
243 * put in the phystokv hash table
244 * Never gets taken out.
245 */
246 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
247 ADW_CCB_OFF(ccb);
248 hashnum = CCB_HASH(ccb->hashkey);
249 ccb->nexthash = sc->sc_ccbhash[hashnum];
250 sc->sc_ccbhash[hashnum] = ccb;
251 adw_reset_ccb(ccb);
252 return (0);
253 }
254
255
256 /*
257 * Get a free ccb
258 *
259 * If there are none, see if we can allocate a new one
260 */
261 static ADW_CCB *
262 adw_get_ccb(sc, flags)
263 ADW_SOFTC *sc;
264 int flags;
265 {
266 ADW_CCB *ccb = 0;
267 int s;
268
269 s = splbio();
270
271 /*
272 * If we can and have to, sleep waiting for one to come free
273 * but only if we can't allocate a new one.
274 */
275 for (;;) {
276 ccb = sc->sc_free_ccb.tqh_first;
277 if (ccb) {
278 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
279 break;
280 }
281 if ((flags & SCSI_NOSLEEP) != 0)
282 goto out;
283
284 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
285 }
286
287 ccb->flags |= CCB_ALLOC;
288
289 out:
290 splx(s);
291 return (ccb);
292 }
293
294
295 /*
296 * Given a physical address, find the ccb that it corresponds to.
297 */
298 ADW_CCB *
299 adw_ccb_phys_kv(sc, ccb_phys)
300 ADW_SOFTC *sc;
301 u_long ccb_phys;
302 {
303 int hashnum = CCB_HASH(ccb_phys);
304 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
305
306 while (ccb) {
307 if (ccb->hashkey == ccb_phys)
308 break;
309 ccb = ccb->nexthash;
310 }
311 return (ccb);
312 }
313
314
315 /*
316 * Queue a CCB to be sent to the controller, and send it if possible.
317 */
318 static void
319 adw_queue_ccb(sc, ccb)
320 ADW_SOFTC *sc;
321 ADW_CCB *ccb;
322 {
323
324 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
325
326 adw_start_ccbs(sc);
327 }
328
329
330 static void
331 adw_start_ccbs(sc)
332 ADW_SOFTC *sc;
333 {
334 ADW_CCB *ccb;
335
336 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
337 if (ccb->flags & CCB_WATCHDOG)
338 untimeout(adw_watchdog, ccb);
339
340 if (AdvExeScsiQueue(sc, &ccb->scsiq) == ADW_BUSY) {
341 ccb->flags |= CCB_WATCHDOG;
342 timeout(adw_watchdog, ccb,
343 (ADW_WATCH_TIMEOUT * hz) / 1000);
344 break;
345 }
346 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
347
348 if ((ccb->xs->flags & SCSI_POLL) == 0)
349 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
350 }
351 }
352
353
354 /******************************************************************************/
355 /* SCSI layer interfacing routines */
356 /******************************************************************************/
357
358
359 int
360 adw_init(sc)
361 ADW_SOFTC *sc;
362 {
363 u_int16_t warn_code;
364
365
366 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
367 ADW_LIB_VERSION_MINOR;
368 sc->cfg.chip_version =
369 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
370
371 /*
372 * Reset the chip to start and allow register writes.
373 */
374 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
375 panic("adw_init: adw_find_signature failed");
376 } else {
377 AdvResetChip(sc->sc_iot, sc->sc_ioh);
378
379 warn_code = AdvInitFromEEP(sc);
380 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
381 printf("%s: Bad checksum found. "
382 "Setting default values\n",
383 sc->sc_dev.dv_xname);
384 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
385 printf("%s: Bad bus termination setting."
386 "Using automatic termination.\n",
387 sc->sc_dev.dv_xname);
388
389 /*
390 * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
391 * Resets should be performed.
392 */
393 if (sc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
394 AdvResetSCSIBus(sc);
395 }
396
397 sc->isr_callback = (ADW_CALLBACK) adw_wide_isr_callback;
398
399 return (0);
400 }
401
402
403 void
404 adw_attach(sc)
405 ADW_SOFTC *sc;
406 {
407 int i, error;
408
409
410 /*
411 * Initialize the ASC3550.
412 */
413 switch (AdvInitAsc3550Driver(sc)) {
414 case ASC_IERR_MCODE_CHKSUM:
415 panic("%s: Microcode checksum error",
416 sc->sc_dev.dv_xname);
417 break;
418
419 case ASC_IERR_ILLEGAL_CONNECTION:
420 panic("%s: All three connectors are in use",
421 sc->sc_dev.dv_xname);
422 break;
423
424 case ASC_IERR_REVERSED_CABLE:
425 panic("%s: Cable is reversed",
426 sc->sc_dev.dv_xname);
427 break;
428
429 case ASC_IERR_SINGLE_END_DEVICE:
430 panic("%s: single-ended device is attached to"
431 " one of the connectors",
432 sc->sc_dev.dv_xname);
433 break;
434 }
435
436 /*
437 * Fill in the adapter.
438 */
439 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
440 sc->sc_adapter.scsipi_minphys = adwminphys;
441
442 /*
443 * fill in the prototype scsipi_link.
444 */
445 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
446 sc->sc_link.adapter_softc = sc;
447 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
448 sc->sc_link.adapter = &sc->sc_adapter;
449 sc->sc_link.device = &adw_dev;
450 sc->sc_link.openings = 4;
451 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
452 sc->sc_link.scsipi_scsi.max_lun = 7;
453 sc->sc_link.type = BUS_SCSI;
454
455
456 TAILQ_INIT(&sc->sc_free_ccb);
457 TAILQ_INIT(&sc->sc_waiting_ccb);
458 TAILQ_INIT(&sc->sc_queue);
459
460
461 /*
462 * Allocate the Control Blocks.
463 */
464 error = adw_alloc_ccbs(sc);
465 if (error)
466 return; /* (error) */ ;
467
468 /*
469 * Create and initialize the Control Blocks.
470 */
471 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
472 if (i == 0) {
473 printf("%s: unable to create control blocks\n",
474 sc->sc_dev.dv_xname);
475 return; /* (ENOMEM) */ ;
476 } else if (i != ADW_MAX_CCB) {
477 printf("%s: WARNING: only %d of %d control blocks"
478 " created\n",
479 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
480 }
481 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
482 }
483
484
485 static void
486 adwminphys(bp)
487 struct buf *bp;
488 {
489
490 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
491 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
492 minphys(bp);
493 }
494
495
496 /*
497 * start a scsi operation given the command and the data address.
498 * Also needs the unit, target and lu.
499 */
500 static int
501 adw_scsi_cmd(xs)
502 struct scsipi_xfer *xs;
503 {
504 struct scsipi_link *sc_link = xs->sc_link;
505 ADW_SOFTC *sc = sc_link->adapter_softc;
506 ADW_CCB *ccb;
507 int s, fromqueue = 1, dontqueue = 0;
508
509 s = splbio(); /* protect the queue */
510
511 /*
512 * If we're running the queue from adw_done(), we've been
513 * called with the first queue entry as our argument.
514 */
515 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
516 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
517 fromqueue = 1;
518 } else {
519
520 /* Polled requests can't be queued for later. */
521 dontqueue = xs->flags & SCSI_POLL;
522
523 /*
524 * If there are jobs in the queue, run them first.
525 */
526 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
527 /*
528 * If we can't queue, we have to abort, since
529 * we have to preserve order.
530 */
531 if (dontqueue) {
532 splx(s);
533 xs->error = XS_DRIVER_STUFFUP;
534 return (TRY_AGAIN_LATER);
535 }
536 /*
537 * Swap with the first queue entry.
538 */
539 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
540 xs = TAILQ_FIRST(&sc->sc_queue);
541 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
542 fromqueue = 1;
543 }
544 }
545
546
547 /*
548 * get a ccb to use. If the transfer
549 * is from a buf (possibly from interrupt time)
550 * then we can't allow it to sleep
551 */
552
553 if ((ccb = adw_get_ccb(sc, xs->flags)) == NULL) {
554 /*
555 * If we can't queue, we lose.
556 */
557 if (dontqueue) {
558 splx(s);
559 xs->error = XS_DRIVER_STUFFUP;
560 return (TRY_AGAIN_LATER);
561 }
562 /*
563 * Stuff ourselves into the queue, in front
564 * if we came off in the first place.
565 */
566 if (fromqueue)
567 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
568 else
569 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
570 splx(s);
571 return (SUCCESSFULLY_QUEUED);
572 }
573 splx(s); /* done playing with the queue */
574
575 ccb->xs = xs;
576 ccb->timeout = xs->timeout;
577
578 if (adw_build_req(xs, ccb)) {
579 s = splbio();
580 adw_queue_ccb(sc, ccb);
581 splx(s);
582
583 /*
584 * Usually return SUCCESSFULLY QUEUED
585 */
586 if ((xs->flags & SCSI_POLL) == 0)
587 return (SUCCESSFULLY_QUEUED);
588
589 /*
590 * If we can't use interrupts, poll on completion
591 */
592 if (adw_poll(sc, xs, ccb->timeout)) {
593 adw_timeout(ccb);
594 if (adw_poll(sc, xs, ccb->timeout))
595 adw_timeout(ccb);
596 }
597 }
598 return (COMPLETE);
599 }
600
601
602 /*
603 * Build a request structure for the Wide Boards.
604 */
605 static int
606 adw_build_req(xs, ccb)
607 struct scsipi_xfer *xs;
608 ADW_CCB *ccb;
609 {
610 struct scsipi_link *sc_link = xs->sc_link;
611 ADW_SOFTC *sc = sc_link->adapter_softc;
612 bus_dma_tag_t dmat = sc->sc_dmat;
613 ADW_SCSI_REQ_Q *scsiqp;
614 int error;
615
616 scsiqp = &ccb->scsiq;
617 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
618
619 /*
620 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
621 * physical CCB structure.
622 */
623 scsiqp->ccb_ptr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
624 ADW_CCB_OFF(ccb);
625
626
627 /*
628 * Build the ADW_SCSI_REQ_Q request.
629 */
630
631 /*
632 * Set CDB length and copy it to the request structure.
633 */
634 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
635
636 scsiqp->target_id = sc_link->scsipi_scsi.target;
637 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
638
639 scsiqp->vsense_addr = &ccb->scsi_sense;
640 scsiqp->sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
641 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense);
642 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
643
644 /*
645 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
646 */
647 if (xs->datalen) {
648 /*
649 * Map the DMA transfer.
650 */
651 #ifdef TFS
652 if (xs->flags & SCSI_DATA_UIO) {
653 error = bus_dmamap_load_uio(dmat,
654 ccb->dmamap_xfer, (struct uio *) xs->data,
655 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
656 BUS_DMA_WAITOK);
657 } else
658 #endif /* TFS */
659 {
660 error = bus_dmamap_load(dmat,
661 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
662 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
663 BUS_DMA_WAITOK);
664 }
665
666 if (error) {
667 if (error == EFBIG) {
668 printf("%s: adw_scsi_cmd, more than %d dma"
669 " segments\n",
670 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
671 } else {
672 printf("%s: adw_scsi_cmd, error %d loading"
673 " dma map\n",
674 sc->sc_dev.dv_xname, error);
675 }
676
677 xs->error = XS_DRIVER_STUFFUP;
678 adw_free_ccb(sc, ccb);
679 return (0);
680 }
681 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
682 ccb->dmamap_xfer->dm_mapsize,
683 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
684 BUS_DMASYNC_PREWRITE);
685
686 /*
687 * Build scatter-gather list.
688 */
689 scsiqp->data_cnt = xs->datalen;
690 scsiqp->vdata_addr = xs->data;
691 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
692 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
693 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
694 } else {
695 /*
696 * No data xfer, use non S/G values.
697 */
698 scsiqp->data_cnt = 0;
699 scsiqp->vdata_addr = 0;
700 scsiqp->data_addr = 0;
701 }
702
703 return (1);
704 }
705
706
707 /*
708 * Build scatter-gather list for Wide Boards.
709 */
710 static void
711 adw_build_sglist(ccb, scsiqp, sg_block)
712 ADW_CCB *ccb;
713 ADW_SCSI_REQ_Q *scsiqp;
714 ADW_SG_BLOCK *sg_block;
715 {
716 struct scsipi_xfer *xs = ccb->xs;
717 ADW_SOFTC *sc = xs->sc_link->adapter_softc;
718 ulong sg_block_next_addr; /* block and its next */
719 ulong sg_block_physical_addr;
720 int sg_block_index, i; /* how many SG entries */
721 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
722 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
723
724
725 sg_block_next_addr = (ulong) sg_block; /* allow math operation */
726 sg_block_physical_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
727 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, sg_block[0]);
728 scsiqp->sg_real_addr = sg_block_physical_addr;
729
730 /*
731 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
732 * then split the request into multiple sg-list blocks.
733 */
734
735 sg_block_index = 0;
736 do {
737 sg_block->first_entry_no = sg_block_index;
738 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
739 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
740 sg_block->sg_list[i].sg_count = sg_list->ds_len;
741
742 if (--sg_elem_cnt == 0) {
743 /* last entry, get out */
744 scsiqp->sg_entry_cnt = sg_block_index + i + 1;
745 sg_block->last_entry_no = sg_block_index + i;
746 sg_block->sg_ptr = NULL; /* next link = NULL */
747 return;
748 }
749 sg_list++;
750 }
751 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
752 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
753
754 sg_block_index += NO_OF_SG_PER_BLOCK;
755 sg_block->sg_ptr = (ADW_SG_BLOCK *) sg_block_physical_addr;
756 sg_block->last_entry_no = sg_block_index - 1;
757 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
758 }
759 while (1);
760 }
761
762
763 int
764 adw_intr(arg)
765 void *arg;
766 {
767 ADW_SOFTC *sc = arg;
768 struct scsipi_xfer *xs;
769
770
771 AdvISR(sc);
772
773 /*
774 * If there are queue entries in the software queue, try to
775 * run the first one. We should be more or less guaranteed
776 * to succeed, since we just freed a CCB.
777 *
778 * NOTE: adw_scsi_cmd() relies on our calling it with
779 * the first entry in the queue.
780 */
781 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
782 (void) adw_scsi_cmd(xs);
783
784 return (1);
785 }
786
787
788 /*
789 * Poll a particular unit, looking for a particular xs
790 */
791 static int
792 adw_poll(sc, xs, count)
793 ADW_SOFTC *sc;
794 struct scsipi_xfer *xs;
795 int count;
796 {
797
798 /* timeouts are in msec, so we loop in 1000 usec cycles */
799 while (count) {
800 adw_intr(sc);
801 if (xs->flags & ITSDONE)
802 return (0);
803 delay(1000); /* only happens in boot so ok */
804 count--;
805 }
806 return (1);
807 }
808
809
810 static void
811 adw_timeout(arg)
812 void *arg;
813 {
814 ADW_CCB *ccb = arg;
815 struct scsipi_xfer *xs = ccb->xs;
816 struct scsipi_link *sc_link = xs->sc_link;
817 ADW_SOFTC *sc = sc_link->adapter_softc;
818 int s;
819
820 scsi_print_addr(sc_link);
821 printf("timed out");
822
823 s = splbio();
824
825 /*
826 * If it has been through before, then a previous abort has failed,
827 * don't try abort again, reset the bus instead.
828 */
829 if (ccb->flags & CCB_ABORT) {
830 /* abort timed out */
831 printf(" AGAIN. Resetting Bus\n");
832 /* Lets try resetting the bus! */
833 AdvResetSCSIBus(sc);
834 ccb->timeout = ADW_ABORT_TIMEOUT;
835 adw_queue_ccb(sc, ccb);
836 } else {
837 /* abort the operation that has timed out */
838 printf("\n");
839 ADW_ABORT_CCB(sc, ccb);
840 xs->error = XS_TIMEOUT;
841 ccb->timeout = ADW_ABORT_TIMEOUT;
842 ccb->flags |= CCB_ABORT;
843 adw_queue_ccb(sc, ccb);
844 }
845
846 splx(s);
847 }
848
849
850 static void
851 adw_watchdog(arg)
852 void *arg;
853 {
854 ADW_CCB *ccb = arg;
855 struct scsipi_xfer *xs = ccb->xs;
856 struct scsipi_link *sc_link = xs->sc_link;
857 ADW_SOFTC *sc = sc_link->adapter_softc;
858 int s;
859
860 s = splbio();
861
862 ccb->flags &= ~CCB_WATCHDOG;
863 adw_start_ccbs(sc);
864
865 splx(s);
866 }
867
868
869 /******************************************************************************/
870 /* WIDE boards Interrupt callbacks */
871 /******************************************************************************/
872
873
874 /*
875 * adw_wide_isr_callback() - Second Level Interrupt Handler called by AdvISR()
876 *
877 * Interrupt callback function for the Wide SCSI Adv Library.
878 */
879 static void
880 adw_wide_isr_callback(sc, scsiq)
881 ADW_SOFTC *sc;
882 ADW_SCSI_REQ_Q *scsiq;
883 {
884 bus_dma_tag_t dmat = sc->sc_dmat;
885 ADW_CCB *ccb;
886 struct scsipi_xfer *xs;
887 struct scsipi_sense_data *s1, *s2;
888 //int underrun = ASC_FALSE;
889
890
891 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
892 xs = ccb->xs;
893
894 untimeout(adw_timeout, ccb);
895
896 /*
897 * If we were a data transfer, unload the map that described
898 * the data buffer.
899 */
900 if (xs->datalen) {
901 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
902 ccb->dmamap_xfer->dm_mapsize,
903 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
904 BUS_DMASYNC_POSTWRITE);
905 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
906 }
907 if ((ccb->flags & CCB_ALLOC) == 0) {
908 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
909 Debugger();
910 return;
911 }
912 /*
913 * Check for an underrun condition.
914 */
915 /*
916 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
917 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
918 * scsiqp->data_cnt); underrun = ASC_TRUE; }
919 */
920 /*
921 * 'done_status' contains the command's ending status.
922 */
923 switch (scsiq->done_status) {
924 case QD_NO_ERROR:
925 switch (scsiq->host_status) {
926 case QHSTA_NO_ERROR:
927 xs->error = XS_NOERROR;
928 xs->resid = 0;
929 break;
930 default:
931 /* QHSTA error occurred. */
932 xs->error = XS_DRIVER_STUFFUP;
933 break;
934 }
935 /*
936 * If there was an underrun without any other error,
937 * set DID_ERROR to indicate the underrun error.
938 *
939 * Note: There is no way yet to indicate the number
940 * of underrun bytes.
941 */
942 /*
943 * if (xs->error == XS_NOERROR && underrun == ASC_TRUE) {
944 * scp->result = HOST_BYTE(DID_UNDERRUN); }
945 */ break;
946
947 case QD_WITH_ERROR:
948 switch (scsiq->host_status) {
949 case QHSTA_NO_ERROR:
950 if (scsiq->scsi_status == SS_CHK_CONDITION) {
951 s1 = &ccb->scsi_sense;
952 s2 = &xs->sense.scsi_sense;
953 *s2 = *s1;
954 xs->error = XS_SENSE;
955 } else {
956 xs->error = XS_DRIVER_STUFFUP;
957 }
958 break;
959
960 default:
961 /* Some other QHSTA error occurred. */
962 xs->error = XS_DRIVER_STUFFUP;
963 break;
964 }
965 break;
966
967 case QD_ABORTED_BY_HOST:
968 default:
969 xs->error = XS_DRIVER_STUFFUP;
970 break;
971 }
972
973
974 adw_free_ccb(sc, ccb);
975 xs->flags |= ITSDONE;
976 scsipi_done(xs);
977 }
978