adw.c revision 1.11 1 /* $NetBSD: adw.c,v 1.11 1999/09/11 15:34:45 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adwlib.h>
64 #include <dev/ic/adw.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adw.c)")
68 #endif /* ! DDB */
69
70 /******************************************************************************/
71
72
73 static int adw_alloc_ccbs __P((ADW_SOFTC *));
74 static int adw_create_ccbs __P((ADW_SOFTC *, ADW_CCB *, int));
75 static void adw_free_ccb __P((ADW_SOFTC *, ADW_CCB *));
76 static void adw_reset_ccb __P((ADW_CCB *));
77 static int adw_init_ccb __P((ADW_SOFTC *, ADW_CCB *));
78 static ADW_CCB *adw_get_ccb __P((ADW_SOFTC *, int));
79 static void adw_queue_ccb __P((ADW_SOFTC *, ADW_CCB *));
80 static void adw_start_ccbs __P((ADW_SOFTC *));
81
82 static int adw_scsi_cmd __P((struct scsipi_xfer *));
83 static int adw_build_req __P((struct scsipi_xfer *, ADW_CCB *));
84 static void adw_build_sglist __P((ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *));
85 static void adwminphys __P((struct buf *));
86 static void adw_wide_isr_callback __P((ADW_SOFTC *, ADW_SCSI_REQ_Q *));
87 static void adw_sbreset_callback __P((ADW_SOFTC *));
88
89 static int adw_poll __P((ADW_SOFTC *, struct scsipi_xfer *, int));
90 static void adw_timeout __P((void *));
91
92
93 /******************************************************************************/
94
95
96 /* the below structure is so we have a default dev struct for out link struct */
97 struct scsipi_device adw_dev =
98 {
99 NULL, /* Use default error handler */
100 NULL, /* have a queue, served by this */
101 NULL, /* have no async handler */
102 NULL, /* Use default 'done' routine */
103 };
104
105
106 #define ADW_ABORT_TIMEOUT 10000 /* time to wait for abort (mSec) */
107 #define ADW_WATCH_TIMEOUT 10000 /* time to wait for watchdog (mSec) */
108
109
110 /******************************************************************************/
111 /* Control Blocks routines */
112 /******************************************************************************/
113
114
115 static int
116 adw_alloc_ccbs(sc)
117 ADW_SOFTC *sc;
118 {
119 bus_dma_segment_t seg;
120 int error, rseg;
121
122 /*
123 * Allocate the control blocks.
124 */
125 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
126 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
127 printf("%s: unable to allocate control structures,"
128 " error = %d\n", sc->sc_dev.dv_xname, error);
129 return (error);
130 }
131 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
132 sizeof(struct adw_control), (caddr_t *) & sc->sc_control,
133 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
134 printf("%s: unable to map control structures, error = %d\n",
135 sc->sc_dev.dv_xname, error);
136 return (error);
137 }
138 /*
139 * Create and load the DMA map used for the control blocks.
140 */
141 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
142 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
143 &sc->sc_dmamap_control)) != 0) {
144 printf("%s: unable to create control DMA map, error = %d\n",
145 sc->sc_dev.dv_xname, error);
146 return (error);
147 }
148 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
149 sc->sc_control, sizeof(struct adw_control), NULL,
150 BUS_DMA_NOWAIT)) != 0) {
151 printf("%s: unable to load control DMA map, error = %d\n",
152 sc->sc_dev.dv_xname, error);
153 return (error);
154 }
155 return (0);
156 }
157
158
159 /*
160 * Create a set of ccbs and add them to the free list. Called once
161 * by adw_init(). We return the number of CCBs successfully created.
162 */
163 static int
164 adw_create_ccbs(sc, ccbstore, count)
165 ADW_SOFTC *sc;
166 ADW_CCB *ccbstore;
167 int count;
168 {
169 ADW_CCB *ccb;
170 int i, error;
171
172 bzero(ccbstore, sizeof(ADW_CCB) * count);
173 for (i = 0; i < count; i++) {
174 ccb = &ccbstore[i];
175 if ((error = adw_init_ccb(sc, ccb)) != 0) {
176 printf("%s: unable to initialize ccb, error = %d\n",
177 sc->sc_dev.dv_xname, error);
178 return (i);
179 }
180 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
181 }
182
183 return (i);
184 }
185
186
187 /*
188 * A ccb is put onto the free list.
189 */
190 static void
191 adw_free_ccb(sc, ccb)
192 ADW_SOFTC *sc;
193 ADW_CCB *ccb;
194 {
195 int s;
196
197 s = splbio();
198
199 adw_reset_ccb(ccb);
200 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
201
202 /*
203 * If there were none, wake anybody waiting for one to come free,
204 * starting with queued entries.
205 */
206 if (ccb->chain.tqe_next == 0)
207 wakeup(&sc->sc_free_ccb);
208
209 splx(s);
210 }
211
212
213 static void
214 adw_reset_ccb(ccb)
215 ADW_CCB *ccb;
216 {
217
218 ccb->flags = 0;
219 }
220
221
222 static int
223 adw_init_ccb(sc, ccb)
224 ADW_SOFTC *sc;
225 ADW_CCB *ccb;
226 {
227 int hashnum, error;
228
229 /*
230 * Create the DMA map for this CCB.
231 */
232 error = bus_dmamap_create(sc->sc_dmat,
233 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
234 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
235 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
236 if (error) {
237 printf("%s: unable to create DMA map, error = %d\n",
238 sc->sc_dev.dv_xname, error);
239 return (error);
240 }
241
242 /*
243 * put in the phystokv hash table
244 * Never gets taken out.
245 */
246 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
247 ADW_CCB_OFF(ccb);
248 hashnum = CCB_HASH(ccb->hashkey);
249 ccb->nexthash = sc->sc_ccbhash[hashnum];
250 sc->sc_ccbhash[hashnum] = ccb;
251 adw_reset_ccb(ccb);
252 return (0);
253 }
254
255
256 /*
257 * Get a free ccb
258 *
259 * If there are none, see if we can allocate a new one
260 */
261 static ADW_CCB *
262 adw_get_ccb(sc, flags)
263 ADW_SOFTC *sc;
264 int flags;
265 {
266 ADW_CCB *ccb = 0;
267 int s;
268
269 s = splbio();
270
271 /*
272 * If we can and have to, sleep waiting for one to come free
273 * but only if we can't allocate a new one.
274 */
275 for (;;) {
276 ccb = sc->sc_free_ccb.tqh_first;
277 if (ccb) {
278 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
279 break;
280 }
281 if ((flags & SCSI_NOSLEEP) != 0)
282 goto out;
283
284 tsleep(&sc->sc_free_ccb, PRIBIO, "adwccb", 0);
285 }
286
287 ccb->flags |= CCB_ALLOC;
288
289 out:
290 splx(s);
291 return (ccb);
292 }
293
294
295 /*
296 * Given a physical address, find the ccb that it corresponds to.
297 */
298 ADW_CCB *
299 adw_ccb_phys_kv(sc, ccb_phys)
300 ADW_SOFTC *sc;
301 u_int32_t ccb_phys;
302 {
303 int hashnum = CCB_HASH(ccb_phys);
304 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
305
306 while (ccb) {
307 if (ccb->hashkey == ccb_phys)
308 break;
309 ccb = ccb->nexthash;
310 }
311 return (ccb);
312 }
313
314
315 /*
316 * Queue a CCB to be sent to the controller, and send it if possible.
317 */
318 static void
319 adw_queue_ccb(sc, ccb)
320 ADW_SOFTC *sc;
321 ADW_CCB *ccb;
322 {
323 int s;
324
325 s = splbio();
326 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
327 splx(s);
328
329 adw_start_ccbs(sc);
330 }
331
332
333 static void
334 adw_start_ccbs(sc)
335 ADW_SOFTC *sc;
336 {
337 ADW_CCB *ccb;
338 int s;
339
340 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
341
342 while (AdvExeScsiQueue(sc, &ccb->scsiq) == ADW_BUSY);
343
344 s = splbio();
345 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
346 splx(s);
347
348 if ((ccb->xs->flags & SCSI_POLL) == 0)
349 timeout(adw_timeout, ccb, (ccb->timeout * hz) / 1000);
350 }
351 }
352
353
354 /******************************************************************************/
355 /* SCSI layer interfacing routines */
356 /******************************************************************************/
357
358
359 int
360 adw_init(sc)
361 ADW_SOFTC *sc;
362 {
363 u_int16_t warn_code;
364
365
366 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
367 ADW_LIB_VERSION_MINOR;
368 sc->cfg.chip_version =
369 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
370
371 /*
372 * Reset the chip to start and allow register writes.
373 */
374 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
375 panic("adw_init: adw_find_signature failed");
376 } else {
377 AdvResetChip(sc->sc_iot, sc->sc_ioh);
378
379 warn_code = AdvInitFromEEP(sc);
380 if (warn_code & ASC_WARN_EEPROM_CHKSUM)
381 printf("%s: Bad checksum found. "
382 "Setting default values\n",
383 sc->sc_dev.dv_xname);
384 if (warn_code & ASC_WARN_EEPROM_TERMINATION)
385 printf("%s: Bad bus termination setting."
386 "Using automatic termination.\n",
387 sc->sc_dev.dv_xname);
388
389 /*
390 * Reset the SCSI Bus if the EEPROM indicates that SCSI Bus
391 * Resets should be performed.
392 */
393 if (sc->bios_ctrl & BIOS_CTRL_RESET_SCSI_BUS)
394 AdvResetSCSIBus(sc);
395 }
396
397 sc->isr_callback = (ADW_CALLBACK) adw_wide_isr_callback;
398 sc->sbreset_callback = (ADW_CALLBACK) adw_sbreset_callback;
399
400 return (0);
401 }
402
403
404 void
405 adw_attach(sc)
406 ADW_SOFTC *sc;
407 {
408 int i, error;
409
410
411 /*
412 * Initialize the ASC3550.
413 */
414 switch (AdvInitAsc3550Driver(sc)) {
415 case ASC_IERR_MCODE_CHKSUM:
416 panic("%s: Microcode checksum error",
417 sc->sc_dev.dv_xname);
418 break;
419
420 case ASC_IERR_ILLEGAL_CONNECTION:
421 panic("%s: All three connectors are in use",
422 sc->sc_dev.dv_xname);
423 break;
424
425 case ASC_IERR_REVERSED_CABLE:
426 panic("%s: Cable is reversed",
427 sc->sc_dev.dv_xname);
428 break;
429
430 case ASC_IERR_SINGLE_END_DEVICE:
431 panic("%s: single-ended device is attached to"
432 " one of the connectors",
433 sc->sc_dev.dv_xname);
434 break;
435 }
436
437 /*
438 * Fill in the adapter.
439 */
440 sc->sc_adapter.scsipi_cmd = adw_scsi_cmd;
441 sc->sc_adapter.scsipi_minphys = adwminphys;
442
443 /*
444 * fill in the prototype scsipi_link.
445 */
446 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
447 sc->sc_link.adapter_softc = sc;
448 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
449 sc->sc_link.adapter = &sc->sc_adapter;
450 sc->sc_link.device = &adw_dev;
451 sc->sc_link.openings = 4;
452 sc->sc_link.scsipi_scsi.max_target = ADW_MAX_TID;
453 sc->sc_link.scsipi_scsi.max_lun = 7;
454 sc->sc_link.type = BUS_SCSI;
455
456
457 TAILQ_INIT(&sc->sc_free_ccb);
458 TAILQ_INIT(&sc->sc_waiting_ccb);
459 TAILQ_INIT(&sc->sc_queue);
460
461
462 /*
463 * Allocate the Control Blocks.
464 */
465 error = adw_alloc_ccbs(sc);
466 if (error)
467 return; /* (error) */ ;
468
469 /*
470 * Create and initialize the Control Blocks.
471 */
472 i = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
473 if (i == 0) {
474 printf("%s: unable to create control blocks\n",
475 sc->sc_dev.dv_xname);
476 return; /* (ENOMEM) */ ;
477 } else if (i != ADW_MAX_CCB) {
478 printf("%s: WARNING: only %d of %d control blocks"
479 " created\n",
480 sc->sc_dev.dv_xname, i, ADW_MAX_CCB);
481 }
482 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
483 }
484
485
486 static void
487 adwminphys(bp)
488 struct buf *bp;
489 {
490
491 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
492 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
493 minphys(bp);
494 }
495
496
497 /*
498 * start a scsi operation given the command and the data address.
499 * Also needs the unit, target and lu.
500 */
501 static int
502 adw_scsi_cmd(xs)
503 struct scsipi_xfer *xs;
504 {
505 struct scsipi_link *sc_link = xs->sc_link;
506 ADW_SOFTC *sc = sc_link->adapter_softc;
507 ADW_CCB *ccb;
508 int s, fromqueue = 1, dontqueue = 0;
509
510 s = splbio(); /* protect the queue */
511
512 /*
513 * If we're running the queue from adw_done(), we've been
514 * called with the first queue entry as our argument.
515 */
516 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
517 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
518 fromqueue = 1;
519 } else {
520
521 /* Polled requests can't be queued for later. */
522 dontqueue = xs->flags & SCSI_POLL;
523
524 /*
525 * If there are jobs in the queue, run them first.
526 */
527 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
528 /*
529 * If we can't queue, we have to abort, since
530 * we have to preserve order.
531 */
532 if (dontqueue) {
533 splx(s);
534 xs->error = XS_DRIVER_STUFFUP;
535 return (TRY_AGAIN_LATER);
536 }
537 /*
538 * Swap with the first queue entry.
539 */
540 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
541 xs = TAILQ_FIRST(&sc->sc_queue);
542 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
543 fromqueue = 1;
544 }
545 }
546
547
548 /*
549 * get a ccb to use. If the transfer
550 * is from a buf (possibly from interrupt time)
551 * then we can't allow it to sleep
552 */
553
554 if ((ccb = adw_get_ccb(sc, xs->flags)) == NULL) {
555 /*
556 * If we can't queue, we lose.
557 */
558 if (dontqueue) {
559 splx(s);
560 xs->error = XS_DRIVER_STUFFUP;
561 return (TRY_AGAIN_LATER);
562 }
563 /*
564 * Stuff ourselves into the queue, in front
565 * if we came off in the first place.
566 */
567 if (fromqueue)
568 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
569 else
570 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
571 splx(s);
572 return (SUCCESSFULLY_QUEUED);
573 }
574 splx(s); /* done playing with the queue */
575
576 ccb->xs = xs;
577 ccb->timeout = xs->timeout;
578
579 if (adw_build_req(xs, ccb)) {
580 // s = splbio();
581 adw_queue_ccb(sc, ccb);
582 // splx(s);
583
584 /*
585 * Usually return SUCCESSFULLY QUEUED
586 */
587 if ((xs->flags & SCSI_POLL) == 0)
588 return (SUCCESSFULLY_QUEUED);
589
590 /*
591 * If we can't use interrupts, poll on completion
592 */
593 if (adw_poll(sc, xs, ccb->timeout)) {
594 adw_timeout(ccb);
595 if (adw_poll(sc, xs, ccb->timeout))
596 adw_timeout(ccb);
597 }
598 }
599 return (COMPLETE);
600 }
601
602
603 /*
604 * Build a request structure for the Wide Boards.
605 */
606 static int
607 adw_build_req(xs, ccb)
608 struct scsipi_xfer *xs;
609 ADW_CCB *ccb;
610 {
611 struct scsipi_link *sc_link = xs->sc_link;
612 ADW_SOFTC *sc = sc_link->adapter_softc;
613 bus_dma_tag_t dmat = sc->sc_dmat;
614 ADW_SCSI_REQ_Q *scsiqp;
615 int error;
616
617 scsiqp = &ccb->scsiq;
618 bzero(scsiqp, sizeof(ADW_SCSI_REQ_Q));
619
620 /*
621 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
622 * physical CCB structure.
623 */
624 scsiqp->ccb_ptr = ccb->hashkey;
625
626 /*
627 * Build the ADW_SCSI_REQ_Q request.
628 */
629
630 /*
631 * Set CDB length and copy it to the request structure.
632 */
633 bcopy(xs->cmd, &scsiqp->cdb, scsiqp->cdb_len = xs->cmdlen);
634
635 scsiqp->target_id = sc_link->scsipi_scsi.target;
636 scsiqp->target_lun = sc_link->scsipi_scsi.lun;
637
638 scsiqp->vsense_addr = &ccb->scsi_sense;
639 scsiqp->sense_addr = ccb->hashkey +
640 offsetof(struct adw_ccb, scsi_sense);
641 scsiqp->sense_len = sizeof(struct scsipi_sense_data);
642
643 /*
644 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
645 */
646 if (xs->datalen) {
647 /*
648 * Map the DMA transfer.
649 */
650 #ifdef TFS
651 if (xs->flags & SCSI_DATA_UIO) {
652 error = bus_dmamap_load_uio(dmat,
653 ccb->dmamap_xfer, (struct uio *) xs->data,
654 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
655 BUS_DMA_WAITOK);
656 } else
657 #endif /* TFS */
658 {
659 error = bus_dmamap_load(dmat,
660 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
661 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT :
662 BUS_DMA_WAITOK);
663 }
664
665 if (error) {
666 if (error == EFBIG) {
667 printf("%s: adw_scsi_cmd, more than %d dma"
668 " segments\n",
669 sc->sc_dev.dv_xname, ADW_MAX_SG_LIST);
670 } else {
671 printf("%s: adw_scsi_cmd, error %d loading"
672 " dma map\n",
673 sc->sc_dev.dv_xname, error);
674 }
675
676 xs->error = XS_DRIVER_STUFFUP;
677 adw_free_ccb(sc, ccb);
678 return (0);
679 }
680 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
681 ccb->dmamap_xfer->dm_mapsize,
682 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
683 BUS_DMASYNC_PREWRITE);
684
685 /*
686 * Build scatter-gather list.
687 */
688 scsiqp->data_cnt = xs->datalen;
689 scsiqp->vdata_addr = xs->data;
690 scsiqp->data_addr = ccb->dmamap_xfer->dm_segs[0].ds_addr;
691 bzero(ccb->sg_block, sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
692 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
693 } else {
694 /*
695 * No data xfer, use non S/G values.
696 */
697 scsiqp->data_cnt = 0;
698 scsiqp->vdata_addr = 0;
699 scsiqp->data_addr = 0;
700 }
701
702 return (1);
703 }
704
705
706 /*
707 * Build scatter-gather list for Wide Boards.
708 */
709 static void
710 adw_build_sglist(ccb, scsiqp, sg_block)
711 ADW_CCB *ccb;
712 ADW_SCSI_REQ_Q *scsiqp;
713 ADW_SG_BLOCK *sg_block;
714 {
715 u_long sg_block_next_addr; /* block and its next */
716 u_int32_t sg_block_physical_addr;
717 int sg_block_index, i; /* how many SG entries */
718 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
719 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
720
721
722 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
723 sg_block_physical_addr = ccb->hashkey +
724 offsetof(struct adw_ccb, sg_block[0]);
725 scsiqp->sg_real_addr = sg_block_physical_addr;
726
727 /*
728 * If there are more than NO_OF_SG_PER_BLOCK dma segments (hw sg-list)
729 * then split the request into multiple sg-list blocks.
730 */
731
732 sg_block_index = 0;
733 do {
734 sg_block->first_entry_no = sg_block_index;
735 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
736 sg_block->sg_list[i].sg_addr = sg_list->ds_addr;
737 sg_block->sg_list[i].sg_count = sg_list->ds_len;
738
739 if (--sg_elem_cnt == 0) {
740 /* last entry, get out */
741 scsiqp->sg_entry_cnt = sg_block_index + i + 1;
742 sg_block->last_entry_no = sg_block_index + i;
743 sg_block->sg_ptr = NULL; /* next link = NULL */
744 return;
745 }
746 sg_list++;
747 }
748 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
749 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
750
751 sg_block_index += NO_OF_SG_PER_BLOCK;
752 sg_block->sg_ptr = sg_block_physical_addr;
753 sg_block->last_entry_no = sg_block_index - 1;
754 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
755 } while (1);
756 }
757
758
759 int
760 adw_intr(arg)
761 void *arg;
762 {
763 ADW_SOFTC *sc = arg;
764 struct scsipi_xfer *xs;
765
766
767 AdvISR(sc);
768
769 /*
770 * If there are queue entries in the software queue, try to
771 * run the first one. We should be more or less guaranteed
772 * to succeed, since we just freed a CCB.
773 *
774 * NOTE: adw_scsi_cmd() relies on our calling it with
775 * the first entry in the queue.
776 */
777 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
778 (void) adw_scsi_cmd(xs);
779
780 return (1);
781 }
782
783
784 /*
785 * Poll a particular unit, looking for a particular xs
786 */
787 static int
788 adw_poll(sc, xs, count)
789 ADW_SOFTC *sc;
790 struct scsipi_xfer *xs;
791 int count;
792 {
793
794 /* timeouts are in msec, so we loop in 1000 usec cycles */
795 while (count) {
796 adw_intr(sc);
797 if (xs->flags & ITSDONE)
798 return (0);
799 delay(1000); /* only happens in boot so ok */
800 count--;
801 }
802 return (1);
803 }
804
805
806 static void
807 adw_timeout(arg)
808 void *arg;
809 {
810 ADW_CCB *ccb = arg;
811 struct scsipi_xfer *xs = ccb->xs;
812 struct scsipi_link *sc_link = xs->sc_link;
813 ADW_SOFTC *sc = sc_link->adapter_softc;
814 int s;
815
816 scsi_print_addr(sc_link);
817 printf("timed out");
818
819 s = splbio();
820
821 /*
822 * If it has been through before, then a previous abort has failed,
823 * don't try abort again, reset the bus instead.
824 */
825 if (ccb->flags & CCB_ABORTED) {
826 /*
827 * Abort Timed Out
828 * Lets try resetting the bus!
829 */
830 printf(" AGAIN. Resetting SCSI Bus\n");
831 ccb->flags &= ~CCB_ABORTED;
832 /* AdvResetSCSIBus() will call sbreset_callback() */
833 AdvResetSCSIBus(sc);
834 } else {
835 /*
836 * Abort the operation that has timed out
837 */
838 printf("\n");
839 xs->error = XS_TIMEOUT;
840 ccb->flags |= CCB_ABORTING;
841 /* ADW_ABORT_CCB() will implicitly call isr_callback() */
842 ADW_ABORT_CCB(sc, ccb);
843 }
844
845 splx(s);
846 }
847
848
849 /******************************************************************************/
850 /* WIDE boards Interrupt callbacks */
851 /******************************************************************************/
852
853
854 /*
855 * adw_wide_isr_callback() - Second Level Interrupt Handler called by AdvISR()
856 *
857 * Interrupt callback function for the Wide SCSI Adv Library.
858 */
859 static void
860 adw_wide_isr_callback(sc, scsiq)
861 ADW_SOFTC *sc;
862 ADW_SCSI_REQ_Q *scsiq;
863 {
864 bus_dma_tag_t dmat = sc->sc_dmat;
865 ADW_CCB *ccb;
866 struct scsipi_xfer *xs;
867 struct scsipi_sense_data *s1, *s2;
868 int s;
869 //int underrun = ASC_FALSE;
870
871
872 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
873
874 untimeout(adw_timeout, ccb);
875
876 if(ccb->flags & CCB_ABORTING) {
877 printf("Retrying request\n");
878 ccb->flags &= ~CCB_ABORTING;
879 ccb->flags |= CCB_ABORTED;
880 s = splbio();
881 adw_queue_ccb(sc, ccb);
882 splx(s);
883 return;
884 }
885
886 xs = ccb->xs;
887
888
889 /*
890 * If we were a data transfer, unload the map that described
891 * the data buffer.
892 */
893 if (xs->datalen) {
894 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
895 ccb->dmamap_xfer->dm_mapsize,
896 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
897 BUS_DMASYNC_POSTWRITE);
898 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
899 }
900 if ((ccb->flags & CCB_ALLOC) == 0) {
901 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
902 Debugger();
903 return;
904 }
905 /*
906 * Check for an underrun condition.
907 */
908 /*
909 * if (xs->request_bufflen != 0 && scsiqp->data_cnt != 0) {
910 * ASC_DBG1(1, "adw_isr_callback: underrun condition %lu bytes\n",
911 * scsiqp->data_cnt); underrun = ASC_TRUE; }
912 */
913 /*
914 * 'done_status' contains the command's ending status.
915 */
916 switch (scsiq->done_status) {
917 case QD_NO_ERROR:
918 switch (scsiq->host_status) {
919 case QHSTA_NO_ERROR:
920 xs->error = XS_NOERROR;
921 xs->resid = 0;
922 break;
923 case QHSTA_M_SEL_TIMEOUT:
924 default:
925 /* QHSTA error occurred. */
926 xs->error = XS_DRIVER_STUFFUP;
927 break;
928 }
929 /*
930 * If there was an underrun without any other error,
931 * set DID_ERROR to indicate the underrun error.
932 *
933 * Note: There is no way yet to indicate the number
934 * of underrun bytes.
935 */
936 /*
937 * if (xs->error == XS_NOERROR && underrun == ASC_TRUE) {
938 * scp->result = HOST_BYTE(DID_UNDERRUN); }
939 */ break;
940
941 case QD_WITH_ERROR:
942 switch (scsiq->host_status) {
943 case QHSTA_NO_ERROR:
944 switch(scsiq->scsi_status) {
945 case SS_CHK_CONDITION:
946 case SS_CMD_TERMINATED:
947 s1 = &ccb->scsi_sense;
948 s2 = &xs->sense.scsi_sense;
949 *s2 = *s1;
950 xs->error = XS_SENSE;
951 break;
952 case SS_TARGET_BUSY:
953 case SS_RSERV_CONFLICT:
954 case SS_QUEUE_FULL:
955 xs->error = XS_DRIVER_STUFFUP;
956 break;
957 case SS_CONDITION_MET:
958 case SS_INTERMID:
959 case SS_INTERMID_COND_MET:
960 xs->error = XS_DRIVER_STUFFUP;
961 break;
962 case SS_GOOD:
963 break;
964 }
965 break;
966
967 case QHSTA_M_SEL_TIMEOUT:
968 xs->error = XS_DRIVER_STUFFUP;
969 break;
970
971 default:
972 /* Some other QHSTA error occurred. */
973 xs->error = XS_DRIVER_STUFFUP;
974 break;
975 }
976 break;
977
978 case QD_ABORTED_BY_HOST:
979 xs->error = XS_DRIVER_STUFFUP;
980 break;
981
982 default:
983 xs->error = XS_DRIVER_STUFFUP;
984 break;
985 }
986
987 adw_free_ccb(sc, ccb);
988 xs->flags |= ITSDONE;
989 scsipi_done(xs);
990 }
991
992
993 static void
994 adw_sbreset_callback(sc)
995 ADW_SOFTC *sc;
996 {
997 }
998