adv.c revision 1.15 1 /* $NetBSD: adv.c,v 1.15 2000/02/12 19:12:52 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/advlib.h>
64 #include <dev/ic/adv.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70
71 /* #define ASC_DEBUG */
72
73 /******************************************************************************/
74
75
76 static int adv_alloc_control_data __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84
85
86 static int adv_scsi_cmd __P((struct scsipi_xfer *));
87 static void advminphys __P((struct buf *));
88 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
89
90 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
91 static void adv_timeout __P((void *));
92 static void adv_watchdog __P((void *));
93
94
95 /******************************************************************************/
96
97
98 /* the below structure is so we have a default dev struct for out link struct */
99 struct scsipi_device adv_dev =
100 {
101 NULL, /* Use default error handler */
102 NULL, /* have a queue, served by this */
103 NULL, /* have no async handler */
104 NULL, /* Use default 'done' routine */
105 };
106
107
108 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
109 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */
110
111
112 /******************************************************************************/
113 /* Control Blocks routines */
114 /******************************************************************************/
115
116
117 static int
118 adv_alloc_control_data(sc)
119 ASC_SOFTC *sc;
120 {
121 bus_dma_segment_t seg;
122 int error, rseg;
123
124 /*
125 * Allocate the control blocks.
126 */
127 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
128 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
129 printf("%s: unable to allocate control structures,"
130 " error = %d\n", sc->sc_dev.dv_xname, error);
131 return (error);
132 }
133 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
134 sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
135 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
136 printf("%s: unable to map control structures, error = %d\n",
137 sc->sc_dev.dv_xname, error);
138 return (error);
139 }
140 /*
141 * Create and load the DMA map used for the control blocks.
142 */
143 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
144 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
145 &sc->sc_dmamap_control)) != 0) {
146 printf("%s: unable to create control DMA map, error = %d\n",
147 sc->sc_dev.dv_xname, error);
148 return (error);
149 }
150 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
151 sc->sc_control, sizeof(struct adv_control), NULL,
152 BUS_DMA_NOWAIT)) != 0) {
153 printf("%s: unable to load control DMA map, error = %d\n",
154 sc->sc_dev.dv_xname, error);
155 return (error);
156 }
157
158 /*
159 * Initialize the overrun_buf address.
160 */
161 sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
162 offsetof(struct adv_control, overrun_buf);
163
164 return (0);
165 }
166
167
168 /*
169 * Create a set of ccbs and add them to the free list. Called once
170 * by adv_init(). We return the number of CCBs successfully created.
171 */
172 static int
173 adv_create_ccbs(sc, ccbstore, count)
174 ASC_SOFTC *sc;
175 ADV_CCB *ccbstore;
176 int count;
177 {
178 ADV_CCB *ccb;
179 int i, error;
180
181 bzero(ccbstore, sizeof(ADV_CCB) * count);
182 for (i = 0; i < count; i++) {
183 ccb = &ccbstore[i];
184 if ((error = adv_init_ccb(sc, ccb)) != 0) {
185 printf("%s: unable to initialize ccb, error = %d\n",
186 sc->sc_dev.dv_xname, error);
187 return (i);
188 }
189 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
190 }
191
192 return (i);
193 }
194
195
196 /*
197 * A ccb is put onto the free list.
198 */
199 static void
200 adv_free_ccb(sc, ccb)
201 ASC_SOFTC *sc;
202 ADV_CCB *ccb;
203 {
204 int s;
205
206 s = splbio();
207
208 adv_reset_ccb(ccb);
209 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
210
211 /*
212 * If there were none, wake anybody waiting for one to come free,
213 * starting with queued entries.
214 */
215 if (ccb->chain.tqe_next == 0)
216 wakeup(&sc->sc_free_ccb);
217
218 splx(s);
219 }
220
221
222 static void
223 adv_reset_ccb(ccb)
224 ADV_CCB *ccb;
225 {
226
227 ccb->flags = 0;
228 }
229
230
231 static int
232 adv_init_ccb(sc, ccb)
233 ASC_SOFTC *sc;
234 ADV_CCB *ccb;
235 {
236 int hashnum, error;
237
238 /*
239 * Create the DMA map for this CCB.
240 */
241 error = bus_dmamap_create(sc->sc_dmat,
242 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
243 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
244 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
245 if (error) {
246 printf("%s: unable to create DMA map, error = %d\n",
247 sc->sc_dev.dv_xname, error);
248 return (error);
249 }
250
251 /*
252 * put in the phystokv hash table
253 * Never gets taken out.
254 */
255 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
256 ADV_CCB_OFF(ccb);
257 hashnum = CCB_HASH(ccb->hashkey);
258 ccb->nexthash = sc->sc_ccbhash[hashnum];
259 sc->sc_ccbhash[hashnum] = ccb;
260
261 adv_reset_ccb(ccb);
262 return (0);
263 }
264
265
266 /*
267 * Get a free ccb
268 *
269 * If there are none, see if we can allocate a new one
270 */
271 static ADV_CCB *
272 adv_get_ccb(sc, flags)
273 ASC_SOFTC *sc;
274 int flags;
275 {
276 ADV_CCB *ccb = 0;
277 int s;
278
279 s = splbio();
280
281 /*
282 * If we can and have to, sleep waiting for one to come free
283 * but only if we can't allocate a new one.
284 */
285 for (;;) {
286 ccb = sc->sc_free_ccb.tqh_first;
287 if (ccb) {
288 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
289 break;
290 }
291 if ((flags & XS_CTL_NOSLEEP) != 0)
292 goto out;
293
294 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
295 }
296
297 ccb->flags |= CCB_ALLOC;
298
299 out:
300 splx(s);
301 return (ccb);
302 }
303
304
305 /*
306 * Given a physical address, find the ccb that it corresponds to.
307 */
308 ADV_CCB *
309 adv_ccb_phys_kv(sc, ccb_phys)
310 ASC_SOFTC *sc;
311 u_long ccb_phys;
312 {
313 int hashnum = CCB_HASH(ccb_phys);
314 ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
315
316 while (ccb) {
317 if (ccb->hashkey == ccb_phys)
318 break;
319 ccb = ccb->nexthash;
320 }
321 return (ccb);
322 }
323
324
325 /*
326 * Queue a CCB to be sent to the controller, and send it if possible.
327 */
328 static void
329 adv_queue_ccb(sc, ccb)
330 ASC_SOFTC *sc;
331 ADV_CCB *ccb;
332 {
333
334 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
335
336 adv_start_ccbs(sc);
337 }
338
339
340 static void
341 adv_start_ccbs(sc)
342 ASC_SOFTC *sc;
343 {
344 ADV_CCB *ccb;
345
346 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
347 if (ccb->flags & CCB_WATCHDOG)
348 untimeout(adv_watchdog, ccb);
349
350 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
351 ccb->flags |= CCB_WATCHDOG;
352 timeout(adv_watchdog, ccb,
353 (ADV_WATCH_TIMEOUT * hz) / 1000);
354 break;
355 }
356 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
357
358 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
359 timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
360 }
361 }
362
363
364 /******************************************************************************/
365 /* SCSI layer interfacing routines */
366 /******************************************************************************/
367
368
369 int
370 adv_init(sc)
371 ASC_SOFTC *sc;
372 {
373 int warn;
374
375 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
376 printf("adv_init: failed to find signature\n");
377 return (1);
378 }
379
380 /*
381 * Read the board configuration
382 */
383 AscInitASC_SOFTC(sc);
384 warn = AscInitFromEEP(sc);
385 if (warn) {
386 printf("%s -get: ", sc->sc_dev.dv_xname);
387 switch (warn) {
388 case -1:
389 printf("Chip is not halted\n");
390 break;
391
392 case -2:
393 printf("Couldn't get MicroCode Start"
394 " address\n");
395 break;
396
397 case ASC_WARN_IO_PORT_ROTATE:
398 printf("I/O port address modified\n");
399 break;
400
401 case ASC_WARN_AUTO_CONFIG:
402 printf("I/O port increment switch enabled\n");
403 break;
404
405 case ASC_WARN_EEPROM_CHKSUM:
406 printf("EEPROM checksum error\n");
407 break;
408
409 case ASC_WARN_IRQ_MODIFIED:
410 printf("IRQ modified\n");
411 break;
412
413 case ASC_WARN_CMD_QNG_CONFLICT:
414 printf("tag queuing enabled w/o disconnects\n");
415 break;
416
417 default:
418 printf("unknown warning %d\n", warn);
419 }
420 }
421 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
422 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
423
424 /*
425 * Modify the board configuration
426 */
427 warn = AscInitFromASC_SOFTC(sc);
428 if (warn) {
429 printf("%s -set: ", sc->sc_dev.dv_xname);
430 switch (warn) {
431 case ASC_WARN_CMD_QNG_CONFLICT:
432 printf("tag queuing enabled w/o disconnects\n");
433 break;
434
435 case ASC_WARN_AUTO_CONFIG:
436 printf("I/O port increment switch enabled\n");
437 break;
438
439 default:
440 printf("unknown warning %d\n", warn);
441 }
442 }
443 sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
444
445 return (0);
446 }
447
448
449 void
450 adv_attach(sc)
451 ASC_SOFTC *sc;
452 {
453 int i, error;
454
455 /*
456 * Initialize board RISC chip and enable interrupts.
457 */
458 switch (AscInitDriver(sc)) {
459 case 0:
460 /* AllOK */
461 break;
462
463 case 1:
464 panic("%s: bad signature", sc->sc_dev.dv_xname);
465 break;
466
467 case 2:
468 panic("%s: unable to load MicroCode",
469 sc->sc_dev.dv_xname);
470 break;
471
472 case 3:
473 panic("%s: unable to initialize MicroCode",
474 sc->sc_dev.dv_xname);
475 break;
476
477 default:
478 panic("%s: unable to initialize board RISC chip",
479 sc->sc_dev.dv_xname);
480 }
481
482 /*
483 * Fill in the adapter.
484 */
485 sc->sc_adapter.scsipi_cmd = adv_scsi_cmd;
486 sc->sc_adapter.scsipi_minphys = advminphys;
487
488 /*
489 * fill in the prototype scsipi_link.
490 */
491 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
492 sc->sc_link.adapter_softc = sc;
493 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
494 sc->sc_link.adapter = &sc->sc_adapter;
495 sc->sc_link.device = &adv_dev;
496 sc->sc_link.openings = 4;
497 sc->sc_link.scsipi_scsi.max_target = 7;
498 sc->sc_link.scsipi_scsi.max_lun = 7;
499 sc->sc_link.type = BUS_SCSI;
500
501
502 TAILQ_INIT(&sc->sc_free_ccb);
503 TAILQ_INIT(&sc->sc_waiting_ccb);
504 TAILQ_INIT(&sc->sc_queue);
505
506
507 /*
508 * Allocate the Control Blocks and the overrun buffer.
509 */
510 error = adv_alloc_control_data(sc);
511 if (error)
512 return; /* (error) */
513
514 /*
515 * Create and initialize the Control Blocks.
516 */
517 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
518 if (i == 0) {
519 printf("%s: unable to create control blocks\n",
520 sc->sc_dev.dv_xname);
521 return; /* (ENOMEM) */ ;
522 } else if (i != ADV_MAX_CCB) {
523 printf("%s: WARNING: only %d of %d control blocks created\n",
524 sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
525 }
526 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
527 }
528
529
530 static void
531 advminphys(bp)
532 struct buf *bp;
533 {
534
535 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
536 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
537 minphys(bp);
538 }
539
540
541 /*
542 * start a scsi operation given the command and the data address. Also needs
543 * the unit, target and lu.
544 */
545 static int
546 adv_scsi_cmd(xs)
547 struct scsipi_xfer *xs;
548 {
549 struct scsipi_link *sc_link = xs->sc_link;
550 ASC_SOFTC *sc = sc_link->adapter_softc;
551 bus_dma_tag_t dmat = sc->sc_dmat;
552 ADV_CCB *ccb;
553 int s, flags, error, nsegs;
554 int fromqueue = 0, dontqueue = 0, nowait = 0;
555
556
557 s = splbio(); /* protect the queue */
558
559 /*
560 * If we're running the queue from adv_done(), we've been
561 * called with the first queue entry as our argument.
562 */
563 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
564 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
565 fromqueue = 1;
566 nowait = 1;
567 } else {
568
569 /* Polled requests can't be queued for later. */
570 dontqueue = xs->xs_control & XS_CTL_POLL;
571
572 /*
573 * If there are jobs in the queue, run them first.
574 */
575 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
576 /*
577 * If we can't queue, we have to abort, since
578 * we have to preserve order.
579 */
580 if (dontqueue) {
581 splx(s);
582 xs->error = XS_DRIVER_STUFFUP;
583 return (TRY_AGAIN_LATER);
584 }
585 /*
586 * Swap with the first queue entry.
587 */
588 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
589 xs = TAILQ_FIRST(&sc->sc_queue);
590 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
591 fromqueue = 1;
592 }
593 }
594
595
596 /*
597 * get a ccb to use. If the transfer
598 * is from a buf (possibly from interrupt time)
599 * then we can't allow it to sleep
600 */
601
602 flags = xs->xs_control;
603 if (nowait)
604 flags |= XS_CTL_NOSLEEP;
605 if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
606 /*
607 * If we can't queue, we lose.
608 */
609 if (dontqueue) {
610 splx(s);
611 xs->error = XS_DRIVER_STUFFUP;
612 return (TRY_AGAIN_LATER);
613 }
614 /*
615 * Stuff ourselves into the queue, in front
616 * if we came off in the first place.
617 */
618 if (fromqueue)
619 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
620 else
621 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
622 splx(s);
623 return (SUCCESSFULLY_QUEUED);
624 }
625 splx(s); /* done playing with the queue */
626
627 ccb->xs = xs;
628 ccb->timeout = xs->timeout;
629
630 /*
631 * Build up the request
632 */
633 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
634
635 ccb->scsiq.q2.ccb_ptr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
636 ADV_CCB_OFF(ccb);
637
638 ccb->scsiq.cdbptr = &xs->cmd->opcode;
639 ccb->scsiq.q2.cdb_len = xs->cmdlen;
640 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
641 ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
642 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
643 sc_link->scsipi_scsi.lun);
644 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
645 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
646 ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
647
648 /*
649 * If there are any outstanding requests for the current target,
650 * then every 255th request send an ORDERED request. This heuristic
651 * tries to retain the benefit of request sorting while preventing
652 * request starvation. 255 is the max number of tags or pending commands
653 * a device may have outstanding.
654 */
655 sc->reqcnt[sc_link->scsipi_scsi.target]++;
656 if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
657 (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
658 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
659 } else {
660 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
661 }
662
663
664 if (xs->datalen) {
665 /*
666 * Map the DMA transfer.
667 */
668 #ifdef TFS
669 if (flags & SCSI_DATA_UIO) {
670 error = bus_dmamap_load_uio(dmat,
671 ccb->dmamap_xfer, (struct uio *) xs->data,
672 (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
673 } else
674 #endif /* TFS */
675 {
676 error = bus_dmamap_load(dmat,
677 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
678 (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
679 }
680
681 if (error) {
682 if (error == EFBIG) {
683 printf("%s: adv_scsi_cmd, more than %d dma"
684 " segments\n",
685 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
686 } else {
687 printf("%s: adv_scsi_cmd, error %d loading"
688 " dma map\n",
689 sc->sc_dev.dv_xname, error);
690 }
691
692 xs->error = XS_DRIVER_STUFFUP;
693 adv_free_ccb(sc, ccb);
694 return (COMPLETE);
695 }
696 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
697 ccb->dmamap_xfer->dm_mapsize,
698 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
699 BUS_DMASYNC_PREWRITE);
700
701
702 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
703
704 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
705
706 ccb->sghead.sg_list[nsegs].addr =
707 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
708 ccb->sghead.sg_list[nsegs].bytes =
709 ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
710 }
711
712 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
713 ccb->dmamap_xfer->dm_nsegs;
714
715 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
716 ccb->scsiq.sg_head = &ccb->sghead;
717 ccb->scsiq.q1.data_addr = 0;
718 ccb->scsiq.q1.data_cnt = 0;
719 } else {
720 /*
721 * No data xfer, use non S/G values.
722 */
723 ccb->scsiq.q1.data_addr = 0;
724 ccb->scsiq.q1.data_cnt = 0;
725 }
726
727 #ifdef ASC_DEBUG
728 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
729 sc_link->scsipi_scsi.target,
730 sc_link->scsipi_scsi.lun, xs->cmd->opcode,
731 (unsigned long)ccb);
732 #endif
733 s = splbio();
734 adv_queue_ccb(sc, ccb);
735 splx(s);
736
737 /*
738 * Usually return SUCCESSFULLY QUEUED
739 */
740 if ((flags & XS_CTL_POLL) == 0)
741 return (SUCCESSFULLY_QUEUED);
742
743 /*
744 * If we can't use interrupts, poll on completion
745 */
746 if (adv_poll(sc, xs, ccb->timeout)) {
747 adv_timeout(ccb);
748 if (adv_poll(sc, xs, ccb->timeout))
749 adv_timeout(ccb);
750 }
751 return (COMPLETE);
752 }
753
754
755 int
756 adv_intr(arg)
757 void *arg;
758 {
759 ASC_SOFTC *sc = arg;
760 struct scsipi_xfer *xs;
761
762 #ifdef ASC_DEBUG
763 int int_pend = FALSE;
764
765 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
766 {
767 int_pend = TRUE;
768 printf("ISR - ");
769 }
770 #endif
771 AscISR(sc);
772 #ifdef ASC_DEBUG
773 if(int_pend)
774 printf("\n");
775 #endif
776
777 /*
778 * If there are queue entries in the software queue, try to
779 * run the first one. We should be more or less guaranteed
780 * to succeed, since we just freed a CCB.
781 *
782 * NOTE: adv_scsi_cmd() relies on our calling it with
783 * the first entry in the queue.
784 */
785 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
786 (void) adv_scsi_cmd(xs);
787
788 return (1);
789 }
790
791
792 /*
793 * Poll a particular unit, looking for a particular xs
794 */
795 static int
796 adv_poll(sc, xs, count)
797 ASC_SOFTC *sc;
798 struct scsipi_xfer *xs;
799 int count;
800 {
801
802 /* timeouts are in msec, so we loop in 1000 usec cycles */
803 while (count) {
804 adv_intr(sc);
805 if (xs->xs_status & XS_STS_DONE)
806 return (0);
807 delay(1000); /* only happens in boot so ok */
808 count--;
809 }
810 return (1);
811 }
812
813
814 static void
815 adv_timeout(arg)
816 void *arg;
817 {
818 ADV_CCB *ccb = arg;
819 struct scsipi_xfer *xs = ccb->xs;
820 struct scsipi_link *sc_link = xs->sc_link;
821 ASC_SOFTC *sc = sc_link->adapter_softc;
822 int s;
823
824 scsi_print_addr(sc_link);
825 printf("timed out");
826
827 s = splbio();
828
829 /*
830 * If it has been through before, then a previous abort has failed,
831 * don't try abort again, reset the bus instead.
832 */
833 if (ccb->flags & CCB_ABORT) {
834 /* abort timed out */
835 printf(" AGAIN. Resetting Bus\n");
836 /* Lets try resetting the bus! */
837 if (AscResetBus(sc) == ASC_ERROR) {
838 ccb->timeout = sc->scsi_reset_wait;
839 adv_queue_ccb(sc, ccb);
840 }
841 } else {
842 /* abort the operation that has timed out */
843 printf("\n");
844 AscAbortCCB(sc, ccb);
845 ccb->xs->error = XS_TIMEOUT;
846 ccb->timeout = ADV_ABORT_TIMEOUT;
847 ccb->flags |= CCB_ABORT;
848 adv_queue_ccb(sc, ccb);
849 }
850
851 splx(s);
852 }
853
854
855 static void
856 adv_watchdog(arg)
857 void *arg;
858 {
859 ADV_CCB *ccb = arg;
860 struct scsipi_xfer *xs = ccb->xs;
861 struct scsipi_link *sc_link = xs->sc_link;
862 ASC_SOFTC *sc = sc_link->adapter_softc;
863 int s;
864
865 s = splbio();
866
867 ccb->flags &= ~CCB_WATCHDOG;
868 adv_start_ccbs(sc);
869
870 splx(s);
871 }
872
873
874 /******************************************************************************/
875 /* NARROW boards Interrupt callbacks */
876 /******************************************************************************/
877
878
879 /*
880 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
881 *
882 * Interrupt callback function for the Narrow SCSI Asc Library.
883 */
884 static void
885 adv_narrow_isr_callback(sc, qdonep)
886 ASC_SOFTC *sc;
887 ASC_QDONE_INFO *qdonep;
888 {
889 bus_dma_tag_t dmat = sc->sc_dmat;
890 ADV_CCB *ccb;
891 struct scsipi_xfer *xs;
892 struct scsipi_sense_data *s1, *s2;
893
894
895 ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
896 xs = ccb->xs;
897
898 #ifdef ASC_DEBUG
899 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
900 (unsigned long)ccb,
901 xs->sc_link->scsipi_scsi.target,
902 xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
903 #endif
904 untimeout(adv_timeout, ccb);
905
906 /*
907 * If we were a data transfer, unload the map that described
908 * the data buffer.
909 */
910 if (xs->datalen) {
911 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
912 ccb->dmamap_xfer->dm_mapsize,
913 (xs->xs_control & XS_CTL_DATA_IN) ?
914 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
915 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
916 }
917 if ((ccb->flags & CCB_ALLOC) == 0) {
918 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
919 Debugger();
920 return;
921 }
922 /*
923 * 'qdonep' contains the command's ending status.
924 */
925 #ifdef ASC_DEBUG
926 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
927 #endif
928 switch (qdonep->d3.done_stat) {
929 case ASC_QD_NO_ERROR:
930 switch (qdonep->d3.host_stat) {
931 case ASC_QHSTA_NO_ERROR:
932 xs->error = XS_NOERROR;
933 xs->resid = 0;
934 break;
935
936 default:
937 /* QHSTA error occurred */
938 xs->error = XS_DRIVER_STUFFUP;
939 break;
940 }
941
942 /*
943 * If an INQUIRY command completed successfully, then call
944 * the AscInquiryHandling() function to patch bugged boards.
945 */
946 if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
947 (xs->sc_link->scsipi_scsi.lun == 0) &&
948 (xs->datalen - qdonep->remain_bytes) >= 8) {
949 AscInquiryHandling(sc,
950 xs->sc_link->scsipi_scsi.target & 0x7,
951 (ASC_SCSI_INQUIRY *) xs->data);
952 }
953 break;
954
955 case ASC_QD_WITH_ERROR:
956 switch (qdonep->d3.host_stat) {
957 case ASC_QHSTA_NO_ERROR:
958 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
959 s1 = &ccb->scsi_sense;
960 s2 = &xs->sense.scsi_sense;
961 *s2 = *s1;
962 xs->error = XS_SENSE;
963 } else {
964 xs->error = XS_DRIVER_STUFFUP;
965 }
966 break;
967
968 default:
969 /* QHSTA error occurred */
970 xs->error = XS_DRIVER_STUFFUP;
971 break;
972 }
973 break;
974
975 case ASC_QD_ABORTED_BY_HOST:
976 default:
977 xs->error = XS_DRIVER_STUFFUP;
978 break;
979 }
980
981
982 adv_free_ccb(sc, ccb);
983 xs->xs_status |= XS_STS_DONE;
984 scsipi_done(xs);
985 }
986