adv.c revision 1.14.2.1 1 /* $NetBSD: adv.c,v 1.14.2.1 1999/10/19 17:47:28 thorpej Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/advlib.h>
64 #include <dev/ic/adv.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70
71 /* #define ASC_DEBUG */
72
73 /******************************************************************************/
74
75
76 static int adv_alloc_control_data __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84
85
86 static void adv_scsipi_request __P((struct scsipi_channel *,
87 scsipi_adapter_req_t, void *));
88 static void advminphys __P((struct buf *));
89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
90
91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
92 static void adv_timeout __P((void *));
93 static void adv_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
99 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */
100
101 /******************************************************************************/
102 /* Control Blocks routines */
103 /******************************************************************************/
104
105
106 static int
107 adv_alloc_control_data(sc)
108 ASC_SOFTC *sc;
109 {
110 bus_dma_segment_t seg;
111 int error, rseg;
112
113 /*
114 * Allocate the control blocks.
115 */
116 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
117 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
118 printf("%s: unable to allocate control structures,"
119 " error = %d\n", sc->sc_dev.dv_xname, error);
120 return (error);
121 }
122 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
123 sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
124 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
125 printf("%s: unable to map control structures, error = %d\n",
126 sc->sc_dev.dv_xname, error);
127 return (error);
128 }
129 /*
130 * Create and load the DMA map used for the control blocks.
131 */
132 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
133 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
134 &sc->sc_dmamap_control)) != 0) {
135 printf("%s: unable to create control DMA map, error = %d\n",
136 sc->sc_dev.dv_xname, error);
137 return (error);
138 }
139 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
140 sc->sc_control, sizeof(struct adv_control), NULL,
141 BUS_DMA_NOWAIT)) != 0) {
142 printf("%s: unable to load control DMA map, error = %d\n",
143 sc->sc_dev.dv_xname, error);
144 return (error);
145 }
146
147 /*
148 * Initialize the overrun_buf address.
149 */
150 sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
151 offsetof(struct adv_control, overrun_buf);
152
153 return (0);
154 }
155
156
157 /*
158 * Create a set of ccbs and add them to the free list. Called once
159 * by adv_init(). We return the number of CCBs successfully created.
160 */
161 static int
162 adv_create_ccbs(sc, ccbstore, count)
163 ASC_SOFTC *sc;
164 ADV_CCB *ccbstore;
165 int count;
166 {
167 ADV_CCB *ccb;
168 int i, error;
169
170 bzero(ccbstore, sizeof(ADV_CCB) * count);
171 for (i = 0; i < count; i++) {
172 ccb = &ccbstore[i];
173 if ((error = adv_init_ccb(sc, ccb)) != 0) {
174 printf("%s: unable to initialize ccb, error = %d\n",
175 sc->sc_dev.dv_xname, error);
176 return (i);
177 }
178 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
179 }
180
181 return (i);
182 }
183
184
185 /*
186 * A ccb is put onto the free list.
187 */
188 static void
189 adv_free_ccb(sc, ccb)
190 ASC_SOFTC *sc;
191 ADV_CCB *ccb;
192 {
193 int s;
194
195 s = splbio();
196
197 adv_reset_ccb(ccb);
198 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
199
200 /*
201 * If there were none, wake anybody waiting for one to come free,
202 * starting with queued entries.
203 */
204 if (ccb->chain.tqe_next == 0)
205 wakeup(&sc->sc_free_ccb);
206
207 splx(s);
208 }
209
210
211 static void
212 adv_reset_ccb(ccb)
213 ADV_CCB *ccb;
214 {
215
216 ccb->flags = 0;
217 }
218
219
220 static int
221 adv_init_ccb(sc, ccb)
222 ASC_SOFTC *sc;
223 ADV_CCB *ccb;
224 {
225 int hashnum, error;
226
227 /*
228 * Create the DMA map for this CCB.
229 */
230 error = bus_dmamap_create(sc->sc_dmat,
231 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
232 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
233 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
234 if (error) {
235 printf("%s: unable to create DMA map, error = %d\n",
236 sc->sc_dev.dv_xname, error);
237 return (error);
238 }
239
240 /*
241 * put in the phystokv hash table
242 * Never gets taken out.
243 */
244 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
245 ADV_CCB_OFF(ccb);
246 hashnum = CCB_HASH(ccb->hashkey);
247 ccb->nexthash = sc->sc_ccbhash[hashnum];
248 sc->sc_ccbhash[hashnum] = ccb;
249
250 adv_reset_ccb(ccb);
251 return (0);
252 }
253
254
255 /*
256 * Get a free ccb
257 *
258 * If there are none, see if we can allocate a new one
259 */
260 static ADV_CCB *
261 adv_get_ccb(sc, flags)
262 ASC_SOFTC *sc;
263 int flags;
264 {
265 ADV_CCB *ccb = 0;
266 int s;
267
268 s = splbio();
269
270 /*
271 * If we can and have to, sleep waiting for one to come free
272 * but only if we can't allocate a new one.
273 */
274 for (;;) {
275 ccb = sc->sc_free_ccb.tqh_first;
276 if (ccb) {
277 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
278 break;
279 }
280 if ((flags & XS_CTL_NOSLEEP) != 0)
281 goto out;
282
283 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
284 }
285
286 ccb->flags |= CCB_ALLOC;
287
288 out:
289 splx(s);
290 return (ccb);
291 }
292
293
294 /*
295 * Given a physical address, find the ccb that it corresponds to.
296 */
297 ADV_CCB *
298 adv_ccb_phys_kv(sc, ccb_phys)
299 ASC_SOFTC *sc;
300 u_long ccb_phys;
301 {
302 int hashnum = CCB_HASH(ccb_phys);
303 ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
304
305 while (ccb) {
306 if (ccb->hashkey == ccb_phys)
307 break;
308 ccb = ccb->nexthash;
309 }
310 return (ccb);
311 }
312
313
314 /*
315 * Queue a CCB to be sent to the controller, and send it if possible.
316 */
317 static void
318 adv_queue_ccb(sc, ccb)
319 ASC_SOFTC *sc;
320 ADV_CCB *ccb;
321 {
322
323 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
324
325 adv_start_ccbs(sc);
326 }
327
328
329 static void
330 adv_start_ccbs(sc)
331 ASC_SOFTC *sc;
332 {
333 ADV_CCB *ccb;
334
335 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
336 if (ccb->flags & CCB_WATCHDOG)
337 untimeout(adv_watchdog, ccb);
338
339 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
340 ccb->flags |= CCB_WATCHDOG;
341 timeout(adv_watchdog, ccb,
342 (ADV_WATCH_TIMEOUT * hz) / 1000);
343 break;
344 }
345 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
346
347 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
348 timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
349 }
350 }
351
352
353 /******************************************************************************/
354 /* SCSI layer interfacing routines */
355 /******************************************************************************/
356
357
358 int
359 adv_init(sc)
360 ASC_SOFTC *sc;
361 {
362 int warn;
363
364 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
365 printf("adv_init: failed to find signature\n");
366 return (1);
367 }
368
369 /*
370 * Read the board configuration
371 */
372 AscInitASC_SOFTC(sc);
373 warn = AscInitFromEEP(sc);
374 if (warn) {
375 printf("%s -get: ", sc->sc_dev.dv_xname);
376 switch (warn) {
377 case -1:
378 printf("Chip is not halted\n");
379 break;
380
381 case -2:
382 printf("Couldn't get MicroCode Start"
383 " address\n");
384 break;
385
386 case ASC_WARN_IO_PORT_ROTATE:
387 printf("I/O port address modified\n");
388 break;
389
390 case ASC_WARN_AUTO_CONFIG:
391 printf("I/O port increment switch enabled\n");
392 break;
393
394 case ASC_WARN_EEPROM_CHKSUM:
395 printf("EEPROM checksum error\n");
396 break;
397
398 case ASC_WARN_IRQ_MODIFIED:
399 printf("IRQ modified\n");
400 break;
401
402 case ASC_WARN_CMD_QNG_CONFLICT:
403 printf("tag queuing enabled w/o disconnects\n");
404 break;
405
406 default:
407 printf("unknown warning %d\n", warn);
408 }
409 }
410 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
411 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
412
413 /*
414 * Modify the board configuration
415 */
416 warn = AscInitFromASC_SOFTC(sc);
417 if (warn) {
418 printf("%s -set: ", sc->sc_dev.dv_xname);
419 switch (warn) {
420 case ASC_WARN_CMD_QNG_CONFLICT:
421 printf("tag queuing enabled w/o disconnects\n");
422 break;
423
424 case ASC_WARN_AUTO_CONFIG:
425 printf("I/O port increment switch enabled\n");
426 break;
427
428 default:
429 printf("unknown warning %d\n", warn);
430 }
431 }
432 sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
433
434 return (0);
435 }
436
437
438 void
439 adv_attach(sc)
440 ASC_SOFTC *sc;
441 {
442 struct scsipi_adapter *adapt = &sc->sc_adapter;
443 struct scsipi_channel *chan = &sc->sc_channel;
444 int i, error;
445
446 /*
447 * Initialize board RISC chip and enable interrupts.
448 */
449 switch (AscInitDriver(sc)) {
450 case 0:
451 /* AllOK */
452 break;
453
454 case 1:
455 panic("%s: bad signature", sc->sc_dev.dv_xname);
456 break;
457
458 case 2:
459 panic("%s: unable to load MicroCode",
460 sc->sc_dev.dv_xname);
461 break;
462
463 case 3:
464 panic("%s: unable to initialize MicroCode",
465 sc->sc_dev.dv_xname);
466 break;
467
468 default:
469 panic("%s: unable to initialize board RISC chip",
470 sc->sc_dev.dv_xname);
471 }
472
473 /*
474 * Fill in the scsipi_adapter.
475 */
476 memset(adapt, 0, sizeof(*adapt));
477 adapt->adapt_dev = &sc->sc_dev;
478 adapt->adapt_nchannels = 1;
479 /* adapt_openings initialized below */
480 /* adapt_max_periph initialized below */
481 adapt->adapt_request = adv_scsipi_request;
482 adapt->adapt_minphys = advminphys;
483
484 /*
485 * Fill in the scsipi_channel.
486 */
487 memset(chan, 0, sizeof(*chan));
488 chan->chan_adapter = adapt;
489 chan->chan_bustype = &scsi_bustype;
490 chan->chan_channel = 0;
491 chan->chan_ntargets = 8;
492 chan->chan_nluns = 8;
493 chan->chan_id = sc->chip_scsi_id;
494
495 TAILQ_INIT(&sc->sc_free_ccb);
496 TAILQ_INIT(&sc->sc_waiting_ccb);
497
498 /*
499 * Allocate the Control Blocks and the overrun buffer.
500 */
501 error = adv_alloc_control_data(sc);
502 if (error)
503 return; /* (error) */
504
505 /*
506 * Create and initialize the Control Blocks.
507 */
508 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
509 if (i == 0) {
510 printf("%s: unable to create control blocks\n",
511 sc->sc_dev.dv_xname);
512 return; /* (ENOMEM) */ ;
513 } else if (i != ADV_MAX_CCB) {
514 printf("%s: WARNING: only %d of %d control blocks created\n",
515 sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
516 }
517
518 adapt->adapt_openings = i;
519 adapt->adapt_max_periph = adapt->adapt_openings;
520
521 config_found(&sc->sc_dev, chan, scsiprint);
522 }
523
524
525 static void
526 advminphys(bp)
527 struct buf *bp;
528 {
529
530 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
531 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
532 minphys(bp);
533 }
534
535
536 /*
537 * start a scsi operation given the command and the data address. Also needs
538 * the unit, target and lu.
539 */
540 static void
541 adv_scsipi_request(chan, req, arg)
542 struct scsipi_channel *chan;
543 scsipi_adapter_req_t req;
544 void *arg;
545 {
546 struct scsipi_xfer *xs;
547 struct scsipi_periph *periph;
548 ASC_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev;
549 bus_dma_tag_t dmat = sc->sc_dmat;
550 ADV_CCB *ccb;
551 int s, flags, error, nsegs;
552
553 switch (req) {
554 case ADAPTER_REQ_RUN_XFER:
555 xs = arg;
556 periph = xs->xs_periph;
557 flags = xs->xs_control;
558
559 /*
560 * Get a CCB to use.
561 */
562 ccb = adv_get_ccb(sc, flags);
563 #ifdef DIAGNOSTIC
564 /*
565 * This should never happen as we track the resources
566 * in the mid-layer.
567 */
568 if (ccb == NULL) {
569 scsipi_printaddr(periph);
570 printf("unable to allocate ccb\n");
571 panic("adv_scsipi_request");
572 }
573 #endif
574
575 ccb->xs = xs;
576 ccb->timeout = xs->timeout;
577
578 /*
579 * Build up the request
580 */
581 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
582
583 ccb->scsiq.q2.ccb_ptr =
584 sc->sc_dmamap_control->dm_segs[0].ds_addr +
585 ADV_CCB_OFF(ccb);
586
587 ccb->scsiq.cdbptr = &xs->cmd->opcode;
588 ccb->scsiq.q2.cdb_len = xs->cmdlen;
589 ccb->scsiq.q1.target_id =
590 ASC_TID_TO_TARGET_ID(periph->periph_target);
591 ccb->scsiq.q1.target_lun = periph->periph_lun;
592 ccb->scsiq.q2.target_ix =
593 ASC_TIDLUN_TO_IX(periph->periph_target,
594 periph->periph_lun);
595 ccb->scsiq.q1.sense_addr =
596 sc->sc_dmamap_control->dm_segs[0].ds_addr +
597 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
598 ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
599
600 /*
601 * If there are any outstanding requests for the current
602 * target, then every 255th request send an ORDERED request.
603 * This heuristic tries to retain the benefit of request
604 * sorting while preventing request starvation. 255 is the
605 * max number of tags or pending commands a device may have
606 * outstanding.
607 */
608 sc->reqcnt[periph->periph_target]++;
609 if ((sc->reqcnt[periph->periph_target] > 0) &&
610 (sc->reqcnt[periph->periph_target] % 255) == 0) {
611 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
612 } else {
613 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
614 }
615
616 if (xs->datalen) {
617 /*
618 * Map the DMA transfer.
619 */
620 #ifdef TFS
621 if (flags & SCSI_DATA_UIO) {
622 error = bus_dmamap_load_uio(dmat,
623 ccb->dmamap_xfer, (struct uio *) xs->data,
624 (flags & XS_CTL_NOSLEEP) ?
625 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
626 } else
627 #endif /* TFS */
628 {
629 error = bus_dmamap_load(dmat, ccb->dmamap_xfer,
630 xs->data, xs->datalen, NULL,
631 (flags & XS_CTL_NOSLEEP) ?
632 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
633 }
634
635 if (error) {
636 if (error == EFBIG) {
637 printf("%s: adv_scsipi_request, "
638 "more than %d dma segments\n",
639 sc->sc_dev.dv_xname,
640 ASC_MAX_SG_LIST);
641 } else {
642 printf("%s: adv_scsipi_request, "
643 "error %d loading dma map\n",
644 sc->sc_dev.dv_xname, error);
645 }
646
647 adv_free_ccb(sc, ccb);
648 xs->error = XS_DRIVER_STUFFUP;
649 scsipi_done(xs);
650 return;
651 }
652 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
653 ccb->dmamap_xfer->dm_mapsize,
654 (flags & XS_CTL_DATA_IN) ?
655 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
656
657 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
658
659 for (nsegs = 0;
660 nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
661 ccb->sghead.sg_list[nsegs].addr =
662 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
663 ccb->sghead.sg_list[nsegs].bytes =
664 ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
665 }
666
667 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
668 ccb->dmamap_xfer->dm_nsegs;
669
670 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
671 ccb->scsiq.sg_head = &ccb->sghead;
672 ccb->scsiq.q1.data_addr = 0;
673 ccb->scsiq.q1.data_cnt = 0;
674 } else {
675 /*
676 * No data xfer, use non S/G values.
677 */
678 ccb->scsiq.q1.data_addr = 0;
679 ccb->scsiq.q1.data_cnt = 0;
680 }
681
682 #ifdef ASC_DEBUG
683 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
684 periph->periph_target,
685 periph->periph_lun, xs->cmd->opcode,
686 (unsigned long)ccb);
687 #endif
688 s = splbio();
689 adv_queue_ccb(sc, ccb);
690 splx(s);
691
692 if ((flags & XS_CTL_POLL) == 0)
693 return;
694
695 /* Not allowed to use interrupts, poll for completion. */
696 if (adv_poll(sc, xs, ccb->timeout)) {
697 adv_timeout(ccb);
698 if (adv_poll(sc, xs, ccb->timeout))
699 adv_timeout(ccb);
700 }
701 return;
702
703 case ADAPTER_REQ_GROW_RESOURCES:
704 /* XXX Not supported. */
705 return;
706
707 case ADAPTER_REQ_SET_XFER_MODE:
708 /* XXX XXX XXX */
709 return;
710
711 case ADAPTER_REQ_GET_XFER_MODE:
712 {
713 u_int8_t sdtr_data;
714 ASC_SCSI_BIT_ID_TYPE tid_bit;
715
716 periph = arg;
717 tid_bit = ASC_TIX_TO_TARGET_ID(periph->periph_target);
718
719 periph->periph_mode = 0;
720 periph->periph_period = 0;
721 periph->periph_offset = 0;
722
723 if (sc->init_sdtr & tid_bit) {
724 periph->periph_mode |= PERIPH_CAP_SYNC;
725 sdtr_data = sc->sdtr_data[periph->periph_target];
726 periph->periph_period =
727 sc->sdtr_period_tbl[(sdtr_data >> 4) &
728 (sc->max_sdtr_index - 1)];
729 periph->periph_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
730 }
731
732 if (sc->use_tagged_qng & tid_bit)
733 periph->periph_mode |= PERIPH_CAP_TQING;
734
735 periph->periph_flags |= PERIPH_MODE_VALID;
736 return;
737 }
738 }
739 }
740
741 int
742 adv_intr(arg)
743 void *arg;
744 {
745 ASC_SOFTC *sc = arg;
746
747 #ifdef ASC_DEBUG
748 int int_pend = FALSE;
749
750 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
751 {
752 int_pend = TRUE;
753 printf("ISR - ");
754 }
755 #endif
756 AscISR(sc);
757 #ifdef ASC_DEBUG
758 if(int_pend)
759 printf("\n");
760 #endif
761
762 return (1);
763 }
764
765
766 /*
767 * Poll a particular unit, looking for a particular xs
768 */
769 static int
770 adv_poll(sc, xs, count)
771 ASC_SOFTC *sc;
772 struct scsipi_xfer *xs;
773 int count;
774 {
775
776 /* timeouts are in msec, so we loop in 1000 usec cycles */
777 while (count) {
778 adv_intr(sc);
779 if (xs->xs_status & XS_STS_DONE)
780 return (0);
781 delay(1000); /* only happens in boot so ok */
782 count--;
783 }
784 return (1);
785 }
786
787
788 static void
789 adv_timeout(arg)
790 void *arg;
791 {
792 ADV_CCB *ccb = arg;
793 struct scsipi_xfer *xs = ccb->xs;
794 struct scsipi_periph *periph = xs->xs_periph;
795 ASC_SOFTC *sc =
796 (void *)periph->periph_channel->chan_adapter->adapt_dev;
797 int s;
798
799 scsipi_printaddr(periph);
800 printf("timed out");
801
802 s = splbio();
803
804 /*
805 * If it has been through before, then a previous abort has failed,
806 * don't try abort again, reset the bus instead.
807 */
808 if (ccb->flags & CCB_ABORT) {
809 /* abort timed out */
810 printf(" AGAIN. Resetting Bus\n");
811 /* Lets try resetting the bus! */
812 if (AscResetBus(sc) == ASC_ERROR) {
813 ccb->timeout = sc->scsi_reset_wait;
814 adv_queue_ccb(sc, ccb);
815 }
816 } else {
817 /* abort the operation that has timed out */
818 printf("\n");
819 AscAbortCCB(sc, ccb);
820 ccb->xs->error = XS_TIMEOUT;
821 ccb->timeout = ADV_ABORT_TIMEOUT;
822 ccb->flags |= CCB_ABORT;
823 adv_queue_ccb(sc, ccb);
824 }
825
826 splx(s);
827 }
828
829
830 static void
831 adv_watchdog(arg)
832 void *arg;
833 {
834 ADV_CCB *ccb = arg;
835 struct scsipi_xfer *xs = ccb->xs;
836 struct scsipi_periph *periph = xs->xs_periph;
837 ASC_SOFTC *sc =
838 (void *)periph->periph_channel->chan_adapter->adapt_dev;
839 int s;
840
841 s = splbio();
842
843 ccb->flags &= ~CCB_WATCHDOG;
844 adv_start_ccbs(sc);
845
846 splx(s);
847 }
848
849
850 /******************************************************************************/
851 /* NARROW boards Interrupt callbacks */
852 /******************************************************************************/
853
854
855 /*
856 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
857 *
858 * Interrupt callback function for the Narrow SCSI Asc Library.
859 */
860 static void
861 adv_narrow_isr_callback(sc, qdonep)
862 ASC_SOFTC *sc;
863 ASC_QDONE_INFO *qdonep;
864 {
865 bus_dma_tag_t dmat = sc->sc_dmat;
866 ADV_CCB *ccb;
867 struct scsipi_xfer *xs;
868 struct scsipi_sense_data *s1, *s2;
869
870
871 ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
872 xs = ccb->xs;
873
874 #ifdef ASC_DEBUG
875 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
876 (unsigned long)ccb,
877 xs->xs_periph->periph_target,
878 xs->xs_periph->periph_lun, xs->cmd->opcode);
879 #endif
880 untimeout(adv_timeout, ccb);
881
882 /*
883 * If we were a data transfer, unload the map that described
884 * the data buffer.
885 */
886 if (xs->datalen) {
887 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
888 ccb->dmamap_xfer->dm_mapsize,
889 (xs->xs_control & XS_CTL_DATA_IN) ?
890 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
891 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
892 }
893 if ((ccb->flags & CCB_ALLOC) == 0) {
894 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
895 Debugger();
896 return;
897 }
898 /*
899 * 'qdonep' contains the command's ending status.
900 */
901 #ifdef ASC_DEBUG
902 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
903 #endif
904 switch (qdonep->d3.done_stat) {
905 case ASC_QD_NO_ERROR:
906 switch (qdonep->d3.host_stat) {
907 case ASC_QHSTA_NO_ERROR:
908 xs->error = XS_NOERROR;
909 xs->resid = 0;
910 break;
911
912 default:
913 /* QHSTA error occurred */
914 xs->error = XS_DRIVER_STUFFUP;
915 break;
916 }
917
918 /*
919 * If an INQUIRY command completed successfully, then call
920 * the AscInquiryHandling() function to patch bugged boards.
921 */
922 if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
923 (xs->xs_periph->periph_lun == 0) &&
924 (xs->datalen - qdonep->remain_bytes) >= 8) {
925 AscInquiryHandling(sc,
926 xs->xs_periph->periph_target & 0x7,
927 (ASC_SCSI_INQUIRY *) xs->data);
928 }
929 break;
930
931 case ASC_QD_WITH_ERROR:
932 switch (qdonep->d3.host_stat) {
933 case ASC_QHSTA_NO_ERROR:
934 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
935 s1 = &ccb->scsi_sense;
936 s2 = &xs->sense.scsi_sense;
937 *s2 = *s1;
938 xs->error = XS_SENSE;
939 } else {
940 xs->error = XS_DRIVER_STUFFUP;
941 }
942 break;
943
944 default:
945 /* QHSTA error occurred */
946 xs->error = XS_DRIVER_STUFFUP;
947 break;
948 }
949 break;
950
951 case ASC_QD_ABORTED_BY_HOST:
952 default:
953 xs->error = XS_DRIVER_STUFFUP;
954 break;
955 }
956
957
958 adv_free_ccb(sc, ccb);
959 xs->xs_status |= XS_STS_DONE;
960 scsipi_done(xs);
961 }
962