adv.c revision 1.14 1 /* $NetBSD: adv.c,v 1.14 1999/09/30 23:04:40 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/advlib.h>
64 #include <dev/ic/adv.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70
71 /* #define ASC_DEBUG */
72
73 /******************************************************************************/
74
75
76 static int adv_alloc_control_data __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84
85
86 static int adv_scsi_cmd __P((struct scsipi_xfer *));
87 static void advminphys __P((struct buf *));
88 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
89
90 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
91 static void adv_timeout __P((void *));
92 static void adv_watchdog __P((void *));
93
94
95 /******************************************************************************/
96
97
98 /* the below structure is so we have a default dev struct for out link struct */
99 struct scsipi_device adv_dev =
100 {
101 NULL, /* Use default error handler */
102 NULL, /* have a queue, served by this */
103 NULL, /* have no async handler */
104 NULL, /* Use default 'done' routine */
105 };
106
107
108 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
109 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */
110
111
112 /******************************************************************************/
113 /* Control Blocks routines */
114 /******************************************************************************/
115
116
117 static int
118 adv_alloc_control_data(sc)
119 ASC_SOFTC *sc;
120 {
121 bus_dma_segment_t seg;
122 int error, rseg;
123
124 /*
125 * Allocate the control blocks.
126 */
127 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
128 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
129 printf("%s: unable to allocate control structures,"
130 " error = %d\n", sc->sc_dev.dv_xname, error);
131 return (error);
132 }
133 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
134 sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
135 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
136 printf("%s: unable to map control structures, error = %d\n",
137 sc->sc_dev.dv_xname, error);
138 return (error);
139 }
140 /*
141 * Create and load the DMA map used for the control blocks.
142 */
143 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
144 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
145 &sc->sc_dmamap_control)) != 0) {
146 printf("%s: unable to create control DMA map, error = %d\n",
147 sc->sc_dev.dv_xname, error);
148 return (error);
149 }
150 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
151 sc->sc_control, sizeof(struct adv_control), NULL,
152 BUS_DMA_NOWAIT)) != 0) {
153 printf("%s: unable to load control DMA map, error = %d\n",
154 sc->sc_dev.dv_xname, error);
155 return (error);
156 }
157
158 /*
159 * Initialize the overrun_buf address.
160 */
161 sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
162 offsetof(struct adv_control, overrun_buf);
163
164 return (0);
165 }
166
167
168 /*
169 * Create a set of ccbs and add them to the free list. Called once
170 * by adv_init(). We return the number of CCBs successfully created.
171 */
172 static int
173 adv_create_ccbs(sc, ccbstore, count)
174 ASC_SOFTC *sc;
175 ADV_CCB *ccbstore;
176 int count;
177 {
178 ADV_CCB *ccb;
179 int i, error;
180
181 bzero(ccbstore, sizeof(ADV_CCB) * count);
182 for (i = 0; i < count; i++) {
183 ccb = &ccbstore[i];
184 if ((error = adv_init_ccb(sc, ccb)) != 0) {
185 printf("%s: unable to initialize ccb, error = %d\n",
186 sc->sc_dev.dv_xname, error);
187 return (i);
188 }
189 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
190 }
191
192 return (i);
193 }
194
195
196 /*
197 * A ccb is put onto the free list.
198 */
199 static void
200 adv_free_ccb(sc, ccb)
201 ASC_SOFTC *sc;
202 ADV_CCB *ccb;
203 {
204 int s;
205
206 s = splbio();
207
208 adv_reset_ccb(ccb);
209 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
210
211 /*
212 * If there were none, wake anybody waiting for one to come free,
213 * starting with queued entries.
214 */
215 if (ccb->chain.tqe_next == 0)
216 wakeup(&sc->sc_free_ccb);
217
218 splx(s);
219 }
220
221
222 static void
223 adv_reset_ccb(ccb)
224 ADV_CCB *ccb;
225 {
226
227 ccb->flags = 0;
228 }
229
230
231 static int
232 adv_init_ccb(sc, ccb)
233 ASC_SOFTC *sc;
234 ADV_CCB *ccb;
235 {
236 int hashnum, error;
237
238 /*
239 * Create the DMA map for this CCB.
240 */
241 error = bus_dmamap_create(sc->sc_dmat,
242 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
243 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
244 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
245 if (error) {
246 printf("%s: unable to create DMA map, error = %d\n",
247 sc->sc_dev.dv_xname, error);
248 return (error);
249 }
250
251 /*
252 * put in the phystokv hash table
253 * Never gets taken out.
254 */
255 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
256 ADV_CCB_OFF(ccb);
257 hashnum = CCB_HASH(ccb->hashkey);
258 ccb->nexthash = sc->sc_ccbhash[hashnum];
259 sc->sc_ccbhash[hashnum] = ccb;
260
261 adv_reset_ccb(ccb);
262 return (0);
263 }
264
265
266 /*
267 * Get a free ccb
268 *
269 * If there are none, see if we can allocate a new one
270 */
271 static ADV_CCB *
272 adv_get_ccb(sc, flags)
273 ASC_SOFTC *sc;
274 int flags;
275 {
276 ADV_CCB *ccb = 0;
277 int s;
278
279 s = splbio();
280
281 /*
282 * If we can and have to, sleep waiting for one to come free
283 * but only if we can't allocate a new one.
284 */
285 for (;;) {
286 ccb = sc->sc_free_ccb.tqh_first;
287 if (ccb) {
288 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
289 break;
290 }
291 if ((flags & XS_CTL_NOSLEEP) != 0)
292 goto out;
293
294 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
295 }
296
297 ccb->flags |= CCB_ALLOC;
298
299 out:
300 splx(s);
301 return (ccb);
302 }
303
304
305 /*
306 * Given a physical address, find the ccb that it corresponds to.
307 */
308 ADV_CCB *
309 adv_ccb_phys_kv(sc, ccb_phys)
310 ASC_SOFTC *sc;
311 u_long ccb_phys;
312 {
313 int hashnum = CCB_HASH(ccb_phys);
314 ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
315
316 while (ccb) {
317 if (ccb->hashkey == ccb_phys)
318 break;
319 ccb = ccb->nexthash;
320 }
321 return (ccb);
322 }
323
324
325 /*
326 * Queue a CCB to be sent to the controller, and send it if possible.
327 */
328 static void
329 adv_queue_ccb(sc, ccb)
330 ASC_SOFTC *sc;
331 ADV_CCB *ccb;
332 {
333
334 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
335
336 adv_start_ccbs(sc);
337 }
338
339
340 static void
341 adv_start_ccbs(sc)
342 ASC_SOFTC *sc;
343 {
344 ADV_CCB *ccb;
345
346 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
347 if (ccb->flags & CCB_WATCHDOG)
348 untimeout(adv_watchdog, ccb);
349
350 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
351 ccb->flags |= CCB_WATCHDOG;
352 timeout(adv_watchdog, ccb,
353 (ADV_WATCH_TIMEOUT * hz) / 1000);
354 break;
355 }
356 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
357
358 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
359 timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
360 }
361 }
362
363
364 /******************************************************************************/
365 /* SCSI layer interfacing routines */
366 /******************************************************************************/
367
368
369 int
370 adv_init(sc)
371 ASC_SOFTC *sc;
372 {
373 int warn;
374
375 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
376 printf("adv_init: failed to find signature\n");
377 return (1);
378 }
379
380 /*
381 * Read the board configuration
382 */
383 AscInitASC_SOFTC(sc);
384 warn = AscInitFromEEP(sc);
385 if (warn) {
386 printf("%s -get: ", sc->sc_dev.dv_xname);
387 switch (warn) {
388 case -1:
389 printf("Chip is not halted\n");
390 break;
391
392 case -2:
393 printf("Couldn't get MicroCode Start"
394 " address\n");
395 break;
396
397 case ASC_WARN_IO_PORT_ROTATE:
398 printf("I/O port address modified\n");
399 break;
400
401 case ASC_WARN_AUTO_CONFIG:
402 printf("I/O port increment switch enabled\n");
403 break;
404
405 case ASC_WARN_EEPROM_CHKSUM:
406 printf("EEPROM checksum error\n");
407 break;
408
409 case ASC_WARN_IRQ_MODIFIED:
410 printf("IRQ modified\n");
411 break;
412
413 case ASC_WARN_CMD_QNG_CONFLICT:
414 printf("tag queuing enabled w/o disconnects\n");
415 break;
416
417 default:
418 printf("unknown warning %d\n", warn);
419 }
420 }
421 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
422 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
423
424 /*
425 * Modify the board configuration
426 */
427 warn = AscInitFromASC_SOFTC(sc);
428 if (warn) {
429 printf("%s -set: ", sc->sc_dev.dv_xname);
430 switch (warn) {
431 case ASC_WARN_CMD_QNG_CONFLICT:
432 printf("tag queuing enabled w/o disconnects\n");
433 break;
434
435 case ASC_WARN_AUTO_CONFIG:
436 printf("I/O port increment switch enabled\n");
437 break;
438
439 default:
440 printf("unknown warning %d\n", warn);
441 }
442 }
443 sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
444
445 return (0);
446 }
447
448
449 void
450 adv_attach(sc)
451 ASC_SOFTC *sc;
452 {
453 int i, error;
454
455 /*
456 * Initialize board RISC chip and enable interrupts.
457 */
458 switch (AscInitDriver(sc)) {
459 case 0:
460 /* AllOK */
461 break;
462
463 case 1:
464 panic("%s: bad signature", sc->sc_dev.dv_xname);
465 break;
466
467 case 2:
468 panic("%s: unable to load MicroCode",
469 sc->sc_dev.dv_xname);
470 break;
471
472 case 3:
473 panic("%s: unable to initialize MicroCode",
474 sc->sc_dev.dv_xname);
475 break;
476
477 default:
478 panic("%s: unable to initialize board RISC chip",
479 sc->sc_dev.dv_xname);
480 }
481
482 /*
483 * Fill in the adapter.
484 */
485 sc->sc_adapter.scsipi_cmd = adv_scsi_cmd;
486 sc->sc_adapter.scsipi_minphys = advminphys;
487
488 /*
489 * fill in the prototype scsipi_link.
490 */
491 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
492 sc->sc_link.adapter_softc = sc;
493 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
494 sc->sc_link.adapter = &sc->sc_adapter;
495 sc->sc_link.device = &adv_dev;
496 sc->sc_link.openings = 4;
497 sc->sc_link.scsipi_scsi.max_target = 7;
498 sc->sc_link.scsipi_scsi.max_lun = 7;
499 sc->sc_link.type = BUS_SCSI;
500
501
502 TAILQ_INIT(&sc->sc_free_ccb);
503 TAILQ_INIT(&sc->sc_waiting_ccb);
504 TAILQ_INIT(&sc->sc_queue);
505
506
507 /*
508 * Allocate the Control Blocks and the overrun buffer.
509 */
510 error = adv_alloc_control_data(sc);
511 if (error)
512 return; /* (error) */
513
514 /*
515 * Create and initialize the Control Blocks.
516 */
517 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
518 if (i == 0) {
519 printf("%s: unable to create control blocks\n",
520 sc->sc_dev.dv_xname);
521 return; /* (ENOMEM) */ ;
522 } else if (i != ADV_MAX_CCB) {
523 printf("%s: WARNING: only %d of %d control blocks created\n",
524 sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
525 }
526 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
527 }
528
529
530 static void
531 advminphys(bp)
532 struct buf *bp;
533 {
534
535 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
536 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
537 minphys(bp);
538 }
539
540
541 /*
542 * start a scsi operation given the command and the data address. Also needs
543 * the unit, target and lu.
544 */
545 static int
546 adv_scsi_cmd(xs)
547 struct scsipi_xfer *xs;
548 {
549 struct scsipi_link *sc_link = xs->sc_link;
550 ASC_SOFTC *sc = sc_link->adapter_softc;
551 bus_dma_tag_t dmat = sc->sc_dmat;
552 ADV_CCB *ccb;
553 int s, flags, error, nsegs;
554 int fromqueue = 1, dontqueue = 0;
555
556
557 s = splbio(); /* protect the queue */
558
559 /*
560 * If we're running the queue from adv_done(), we've been
561 * called with the first queue entry as our argument.
562 */
563 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
564 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
565 fromqueue = 1;
566 } else {
567
568 /* Polled requests can't be queued for later. */
569 dontqueue = xs->xs_control & XS_CTL_POLL;
570
571 /*
572 * If there are jobs in the queue, run them first.
573 */
574 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
575 /*
576 * If we can't queue, we have to abort, since
577 * we have to preserve order.
578 */
579 if (dontqueue) {
580 splx(s);
581 xs->error = XS_DRIVER_STUFFUP;
582 return (TRY_AGAIN_LATER);
583 }
584 /*
585 * Swap with the first queue entry.
586 */
587 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
588 xs = TAILQ_FIRST(&sc->sc_queue);
589 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
590 fromqueue = 1;
591 }
592 }
593
594
595 /*
596 * get a ccb to use. If the transfer
597 * is from a buf (possibly from interrupt time)
598 * then we can't allow it to sleep
599 */
600
601 flags = xs->xs_control;
602 if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
603 /*
604 * If we can't queue, we lose.
605 */
606 if (dontqueue) {
607 splx(s);
608 xs->error = XS_DRIVER_STUFFUP;
609 return (TRY_AGAIN_LATER);
610 }
611 /*
612 * Stuff ourselves into the queue, in front
613 * if we came off in the first place.
614 */
615 if (fromqueue)
616 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
617 else
618 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
619 splx(s);
620 return (SUCCESSFULLY_QUEUED);
621 }
622 splx(s); /* done playing with the queue */
623
624 ccb->xs = xs;
625 ccb->timeout = xs->timeout;
626
627 /*
628 * Build up the request
629 */
630 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
631
632 ccb->scsiq.q2.ccb_ptr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
633 ADV_CCB_OFF(ccb);
634
635 ccb->scsiq.cdbptr = &xs->cmd->opcode;
636 ccb->scsiq.q2.cdb_len = xs->cmdlen;
637 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
638 ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
639 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
640 sc_link->scsipi_scsi.lun);
641 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
642 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
643 ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
644
645 /*
646 * If there are any outstanding requests for the current target,
647 * then every 255th request send an ORDERED request. This heuristic
648 * tries to retain the benefit of request sorting while preventing
649 * request starvation. 255 is the max number of tags or pending commands
650 * a device may have outstanding.
651 */
652 sc->reqcnt[sc_link->scsipi_scsi.target]++;
653 if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
654 (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
655 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
656 } else {
657 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
658 }
659
660
661 if (xs->datalen) {
662 /*
663 * Map the DMA transfer.
664 */
665 #ifdef TFS
666 if (flags & SCSI_DATA_UIO) {
667 error = bus_dmamap_load_uio(dmat,
668 ccb->dmamap_xfer, (struct uio *) xs->data,
669 (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
670 } else
671 #endif /* TFS */
672 {
673 error = bus_dmamap_load(dmat,
674 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
675 (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
676 }
677
678 if (error) {
679 if (error == EFBIG) {
680 printf("%s: adv_scsi_cmd, more than %d dma"
681 " segments\n",
682 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
683 } else {
684 printf("%s: adv_scsi_cmd, error %d loading"
685 " dma map\n",
686 sc->sc_dev.dv_xname, error);
687 }
688
689 xs->error = XS_DRIVER_STUFFUP;
690 adv_free_ccb(sc, ccb);
691 return (COMPLETE);
692 }
693 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
694 ccb->dmamap_xfer->dm_mapsize,
695 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
696 BUS_DMASYNC_PREWRITE);
697
698
699 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
700
701 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
702
703 ccb->sghead.sg_list[nsegs].addr =
704 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
705 ccb->sghead.sg_list[nsegs].bytes =
706 ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
707 }
708
709 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
710 ccb->dmamap_xfer->dm_nsegs;
711
712 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
713 ccb->scsiq.sg_head = &ccb->sghead;
714 ccb->scsiq.q1.data_addr = 0;
715 ccb->scsiq.q1.data_cnt = 0;
716 } else {
717 /*
718 * No data xfer, use non S/G values.
719 */
720 ccb->scsiq.q1.data_addr = 0;
721 ccb->scsiq.q1.data_cnt = 0;
722 }
723
724 #ifdef ASC_DEBUG
725 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
726 sc_link->scsipi_scsi.target,
727 sc_link->scsipi_scsi.lun, xs->cmd->opcode,
728 (unsigned long)ccb);
729 #endif
730 s = splbio();
731 adv_queue_ccb(sc, ccb);
732 splx(s);
733
734 /*
735 * Usually return SUCCESSFULLY QUEUED
736 */
737 if ((flags & XS_CTL_POLL) == 0)
738 return (SUCCESSFULLY_QUEUED);
739
740 /*
741 * If we can't use interrupts, poll on completion
742 */
743 if (adv_poll(sc, xs, ccb->timeout)) {
744 adv_timeout(ccb);
745 if (adv_poll(sc, xs, ccb->timeout))
746 adv_timeout(ccb);
747 }
748 return (COMPLETE);
749 }
750
751
752 int
753 adv_intr(arg)
754 void *arg;
755 {
756 ASC_SOFTC *sc = arg;
757 struct scsipi_xfer *xs;
758
759 #ifdef ASC_DEBUG
760 int int_pend = FALSE;
761
762 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
763 {
764 int_pend = TRUE;
765 printf("ISR - ");
766 }
767 #endif
768 AscISR(sc);
769 #ifdef ASC_DEBUG
770 if(int_pend)
771 printf("\n");
772 #endif
773
774 /*
775 * If there are queue entries in the software queue, try to
776 * run the first one. We should be more or less guaranteed
777 * to succeed, since we just freed a CCB.
778 *
779 * NOTE: adv_scsi_cmd() relies on our calling it with
780 * the first entry in the queue.
781 */
782 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
783 (void) adv_scsi_cmd(xs);
784
785 return (1);
786 }
787
788
789 /*
790 * Poll a particular unit, looking for a particular xs
791 */
792 static int
793 adv_poll(sc, xs, count)
794 ASC_SOFTC *sc;
795 struct scsipi_xfer *xs;
796 int count;
797 {
798
799 /* timeouts are in msec, so we loop in 1000 usec cycles */
800 while (count) {
801 adv_intr(sc);
802 if (xs->xs_status & XS_STS_DONE)
803 return (0);
804 delay(1000); /* only happens in boot so ok */
805 count--;
806 }
807 return (1);
808 }
809
810
811 static void
812 adv_timeout(arg)
813 void *arg;
814 {
815 ADV_CCB *ccb = arg;
816 struct scsipi_xfer *xs = ccb->xs;
817 struct scsipi_link *sc_link = xs->sc_link;
818 ASC_SOFTC *sc = sc_link->adapter_softc;
819 int s;
820
821 scsi_print_addr(sc_link);
822 printf("timed out");
823
824 s = splbio();
825
826 /*
827 * If it has been through before, then a previous abort has failed,
828 * don't try abort again, reset the bus instead.
829 */
830 if (ccb->flags & CCB_ABORT) {
831 /* abort timed out */
832 printf(" AGAIN. Resetting Bus\n");
833 /* Lets try resetting the bus! */
834 if (AscResetBus(sc) == ASC_ERROR) {
835 ccb->timeout = sc->scsi_reset_wait;
836 adv_queue_ccb(sc, ccb);
837 }
838 } else {
839 /* abort the operation that has timed out */
840 printf("\n");
841 AscAbortCCB(sc, ccb);
842 ccb->xs->error = XS_TIMEOUT;
843 ccb->timeout = ADV_ABORT_TIMEOUT;
844 ccb->flags |= CCB_ABORT;
845 adv_queue_ccb(sc, ccb);
846 }
847
848 splx(s);
849 }
850
851
852 static void
853 adv_watchdog(arg)
854 void *arg;
855 {
856 ADV_CCB *ccb = arg;
857 struct scsipi_xfer *xs = ccb->xs;
858 struct scsipi_link *sc_link = xs->sc_link;
859 ASC_SOFTC *sc = sc_link->adapter_softc;
860 int s;
861
862 s = splbio();
863
864 ccb->flags &= ~CCB_WATCHDOG;
865 adv_start_ccbs(sc);
866
867 splx(s);
868 }
869
870
871 /******************************************************************************/
872 /* NARROW boards Interrupt callbacks */
873 /******************************************************************************/
874
875
876 /*
877 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
878 *
879 * Interrupt callback function for the Narrow SCSI Asc Library.
880 */
881 static void
882 adv_narrow_isr_callback(sc, qdonep)
883 ASC_SOFTC *sc;
884 ASC_QDONE_INFO *qdonep;
885 {
886 bus_dma_tag_t dmat = sc->sc_dmat;
887 ADV_CCB *ccb;
888 struct scsipi_xfer *xs;
889 struct scsipi_sense_data *s1, *s2;
890
891
892 ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
893 xs = ccb->xs;
894
895 #ifdef ASC_DEBUG
896 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
897 (unsigned long)ccb,
898 xs->sc_link->scsipi_scsi.target,
899 xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
900 #endif
901 untimeout(adv_timeout, ccb);
902
903 /*
904 * If we were a data transfer, unload the map that described
905 * the data buffer.
906 */
907 if (xs->datalen) {
908 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
909 ccb->dmamap_xfer->dm_mapsize,
910 (xs->xs_control & XS_CTL_DATA_IN) ?
911 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
912 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
913 }
914 if ((ccb->flags & CCB_ALLOC) == 0) {
915 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
916 Debugger();
917 return;
918 }
919 /*
920 * 'qdonep' contains the command's ending status.
921 */
922 #ifdef ASC_DEBUG
923 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
924 #endif
925 switch (qdonep->d3.done_stat) {
926 case ASC_QD_NO_ERROR:
927 switch (qdonep->d3.host_stat) {
928 case ASC_QHSTA_NO_ERROR:
929 xs->error = XS_NOERROR;
930 xs->resid = 0;
931 break;
932
933 default:
934 /* QHSTA error occurred */
935 xs->error = XS_DRIVER_STUFFUP;
936 break;
937 }
938
939 /*
940 * If an INQUIRY command completed successfully, then call
941 * the AscInquiryHandling() function to patch bugged boards.
942 */
943 if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
944 (xs->sc_link->scsipi_scsi.lun == 0) &&
945 (xs->datalen - qdonep->remain_bytes) >= 8) {
946 AscInquiryHandling(sc,
947 xs->sc_link->scsipi_scsi.target & 0x7,
948 (ASC_SCSI_INQUIRY *) xs->data);
949 }
950 break;
951
952 case ASC_QD_WITH_ERROR:
953 switch (qdonep->d3.host_stat) {
954 case ASC_QHSTA_NO_ERROR:
955 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
956 s1 = &ccb->scsi_sense;
957 s2 = &xs->sense.scsi_sense;
958 *s2 = *s1;
959 xs->error = XS_SENSE;
960 } else {
961 xs->error = XS_DRIVER_STUFFUP;
962 }
963 break;
964
965 default:
966 /* QHSTA error occurred */
967 xs->error = XS_DRIVER_STUFFUP;
968 break;
969 }
970 break;
971
972 case ASC_QD_ABORTED_BY_HOST:
973 default:
974 xs->error = XS_DRIVER_STUFFUP;
975 break;
976 }
977
978
979 adv_free_ccb(sc, ccb);
980 xs->xs_status |= XS_STS_DONE;
981 scsipi_done(xs);
982 }
983