adv.c revision 1.26 1 /* $NetBSD: adv.c,v 1.26 2001/06/09 18:08:20 briggs Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/callout.h>
44 #include <sys/kernel.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/device.h>
48 #include <sys/malloc.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/user.h>
52
53 #include <machine/bus.h>
54 #include <machine/intr.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <dev/scsipi/scsi_all.h>
59 #include <dev/scsipi/scsipi_all.h>
60 #include <dev/scsipi/scsiconf.h>
61
62 #include <dev/ic/advlib.h>
63 #include <dev/ic/adv.h>
64
65 #ifndef DDB
66 #define Debugger() panic("should call debugger here (adv.c)")
67 #endif /* ! DDB */
68
69
70 /* #define ASC_DEBUG */
71
72 /******************************************************************************/
73
74
75 static int adv_alloc_control_data __P((ASC_SOFTC *));
76 static void adv_free_control_data __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84
85
86 static void adv_scsipi_request __P((struct scsipi_channel *,
87 scsipi_adapter_req_t, void *));
88 static void advminphys __P((struct buf *));
89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
90
91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
92 static void adv_timeout __P((void *));
93 static void adv_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
99 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */
100
101 /******************************************************************************/
102 /* Control Blocks routines */
103 /******************************************************************************/
104
105
106 static int
107 adv_alloc_control_data(sc)
108 ASC_SOFTC *sc;
109 {
110 int error;
111
112 /*
113 * Allocate the control blocks.
114 */
115 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
116 PAGE_SIZE, 0, &sc->sc_control_seg, 1,
117 &sc->sc_control_nsegs, BUS_DMA_NOWAIT)) != 0) {
118 printf("%s: unable to allocate control structures,"
119 " error = %d\n", sc->sc_dev.dv_xname, error);
120 return (error);
121 }
122 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_control_seg,
123 sc->sc_control_nsegs, sizeof(struct adv_control),
124 (caddr_t *) & sc->sc_control,
125 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
126 printf("%s: unable to map control structures, error = %d\n",
127 sc->sc_dev.dv_xname, error);
128 return (error);
129 }
130 /*
131 * Create and load the DMA map used for the control blocks.
132 */
133 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
134 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
135 &sc->sc_dmamap_control)) != 0) {
136 printf("%s: unable to create control DMA map, error = %d\n",
137 sc->sc_dev.dv_xname, error);
138 return (error);
139 }
140 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
141 sc->sc_control, sizeof(struct adv_control), NULL,
142 BUS_DMA_NOWAIT)) != 0) {
143 printf("%s: unable to load control DMA map, error = %d\n",
144 sc->sc_dev.dv_xname, error);
145 return (error);
146 }
147
148 /*
149 * Initialize the overrun_buf address.
150 */
151 sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
152 offsetof(struct adv_control, overrun_buf);
153
154 return (0);
155 }
156
157 static void
158 adv_free_control_data(sc)
159 ASC_SOFTC *sc;
160 {
161
162 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_control);
163 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_control);
164 sc->sc_dmamap_control = NULL;
165
166 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
167 sizeof(struct adv_control));
168 bus_dmamem_free(sc->sc_dmat, &sc->sc_control_seg,
169 sc->sc_control_nsegs);
170 }
171
172 /*
173 * Create a set of ccbs and add them to the free list. Called once
174 * by adv_init(). We return the number of CCBs successfully created.
175 */
176 static int
177 adv_create_ccbs(sc, ccbstore, count)
178 ASC_SOFTC *sc;
179 ADV_CCB *ccbstore;
180 int count;
181 {
182 ADV_CCB *ccb;
183 int i, error;
184
185 bzero(ccbstore, sizeof(ADV_CCB) * count);
186 for (i = 0; i < count; i++) {
187 ccb = &ccbstore[i];
188 if ((error = adv_init_ccb(sc, ccb)) != 0) {
189 printf("%s: unable to initialize ccb, error = %d\n",
190 sc->sc_dev.dv_xname, error);
191 return (i);
192 }
193 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
194 }
195
196 return (i);
197 }
198
199
200 /*
201 * A ccb is put onto the free list.
202 */
203 static void
204 adv_free_ccb(sc, ccb)
205 ASC_SOFTC *sc;
206 ADV_CCB *ccb;
207 {
208 int s;
209
210 s = splbio();
211 adv_reset_ccb(ccb);
212 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
213 splx(s);
214 }
215
216
217 static void
218 adv_reset_ccb(ccb)
219 ADV_CCB *ccb;
220 {
221
222 ccb->flags = 0;
223 }
224
225
226 static int
227 adv_init_ccb(sc, ccb)
228 ASC_SOFTC *sc;
229 ADV_CCB *ccb;
230 {
231 int hashnum, error;
232
233 callout_init(&ccb->ccb_watchdog);
234
235 /*
236 * Create the DMA map for this CCB.
237 */
238 error = bus_dmamap_create(sc->sc_dmat,
239 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
240 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
241 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
242 if (error) {
243 printf("%s: unable to create DMA map, error = %d\n",
244 sc->sc_dev.dv_xname, error);
245 return (error);
246 }
247
248 /*
249 * put in the phystokv hash table
250 * Never gets taken out.
251 */
252 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
253 ADV_CCB_OFF(ccb);
254 hashnum = CCB_HASH(ccb->hashkey);
255 ccb->nexthash = sc->sc_ccbhash[hashnum];
256 sc->sc_ccbhash[hashnum] = ccb;
257
258 adv_reset_ccb(ccb);
259 return (0);
260 }
261
262
263 /*
264 * Get a free ccb
265 *
266 * If there are none, see if we can allocate a new one
267 */
268 static ADV_CCB *
269 adv_get_ccb(sc)
270 ASC_SOFTC *sc;
271 {
272 ADV_CCB *ccb = 0;
273 int s;
274
275 s = splbio();
276 ccb = TAILQ_FIRST(&sc->sc_free_ccb);
277 if (ccb != NULL) {
278 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
279 ccb->flags |= CCB_ALLOC;
280 }
281 splx(s);
282 return (ccb);
283 }
284
285
286 /*
287 * Given a physical address, find the ccb that it corresponds to.
288 */
289 ADV_CCB *
290 adv_ccb_phys_kv(sc, ccb_phys)
291 ASC_SOFTC *sc;
292 u_long ccb_phys;
293 {
294 int hashnum = CCB_HASH(ccb_phys);
295 ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
296
297 while (ccb) {
298 if (ccb->hashkey == ccb_phys)
299 break;
300 ccb = ccb->nexthash;
301 }
302 return (ccb);
303 }
304
305
306 /*
307 * Queue a CCB to be sent to the controller, and send it if possible.
308 */
309 static void
310 adv_queue_ccb(sc, ccb)
311 ASC_SOFTC *sc;
312 ADV_CCB *ccb;
313 {
314
315 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
316
317 adv_start_ccbs(sc);
318 }
319
320
321 static void
322 adv_start_ccbs(sc)
323 ASC_SOFTC *sc;
324 {
325 ADV_CCB *ccb;
326
327 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
328 if (ccb->flags & CCB_WATCHDOG)
329 callout_stop(&ccb->ccb_watchdog);
330
331 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
332 ccb->flags |= CCB_WATCHDOG;
333 callout_reset(&ccb->ccb_watchdog,
334 (ADV_WATCH_TIMEOUT * hz) / 1000,
335 adv_watchdog, ccb);
336 break;
337 }
338 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
339
340 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
341 callout_reset(&ccb->xs->xs_callout,
342 ((u_int64_t)ccb->timeout * (u_int64_t)hz) / 1000,
343 adv_timeout, ccb);
344 }
345 }
346
347
348 /******************************************************************************/
349 /* SCSI layer interfacing routines */
350 /******************************************************************************/
351
352
353 int
354 adv_init(sc)
355 ASC_SOFTC *sc;
356 {
357 int warn;
358
359 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
360 printf("adv_init: failed to find signature\n");
361 return (1);
362 }
363
364 /*
365 * Read the board configuration
366 */
367 AscInitASC_SOFTC(sc);
368 warn = AscInitFromEEP(sc);
369 if (warn) {
370 printf("%s -get: ", sc->sc_dev.dv_xname);
371 switch (warn) {
372 case -1:
373 printf("Chip is not halted\n");
374 break;
375
376 case -2:
377 printf("Couldn't get MicroCode Start"
378 " address\n");
379 break;
380
381 case ASC_WARN_IO_PORT_ROTATE:
382 printf("I/O port address modified\n");
383 break;
384
385 case ASC_WARN_AUTO_CONFIG:
386 printf("I/O port increment switch enabled\n");
387 break;
388
389 case ASC_WARN_EEPROM_CHKSUM:
390 printf("EEPROM checksum error\n");
391 break;
392
393 case ASC_WARN_IRQ_MODIFIED:
394 printf("IRQ modified\n");
395 break;
396
397 case ASC_WARN_CMD_QNG_CONFLICT:
398 printf("tag queuing enabled w/o disconnects\n");
399 break;
400
401 default:
402 printf("unknown warning %d\n", warn);
403 }
404 }
405 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
406 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
407
408 /*
409 * Modify the board configuration
410 */
411 warn = AscInitFromASC_SOFTC(sc);
412 if (warn) {
413 printf("%s -set: ", sc->sc_dev.dv_xname);
414 switch (warn) {
415 case ASC_WARN_CMD_QNG_CONFLICT:
416 printf("tag queuing enabled w/o disconnects\n");
417 break;
418
419 case ASC_WARN_AUTO_CONFIG:
420 printf("I/O port increment switch enabled\n");
421 break;
422
423 default:
424 printf("unknown warning %d\n", warn);
425 }
426 }
427 sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
428
429 return (0);
430 }
431
432
433 void
434 adv_attach(sc)
435 ASC_SOFTC *sc;
436 {
437 struct scsipi_adapter *adapt = &sc->sc_adapter;
438 struct scsipi_channel *chan = &sc->sc_channel;
439 int i, error;
440
441 /*
442 * Initialize board RISC chip and enable interrupts.
443 */
444 switch (AscInitDriver(sc)) {
445 case 0:
446 /* AllOK */
447 break;
448
449 case 1:
450 panic("%s: bad signature", sc->sc_dev.dv_xname);
451 break;
452
453 case 2:
454 panic("%s: unable to load MicroCode",
455 sc->sc_dev.dv_xname);
456 break;
457
458 case 3:
459 panic("%s: unable to initialize MicroCode",
460 sc->sc_dev.dv_xname);
461 break;
462
463 default:
464 panic("%s: unable to initialize board RISC chip",
465 sc->sc_dev.dv_xname);
466 }
467
468 /*
469 * Fill in the scsipi_adapter.
470 */
471 memset(adapt, 0, sizeof(*adapt));
472 adapt->adapt_dev = &sc->sc_dev;
473 adapt->adapt_nchannels = 1;
474 /* adapt_openings initialized below */
475 /* adapt_max_periph initialized below */
476 adapt->adapt_request = adv_scsipi_request;
477 adapt->adapt_minphys = advminphys;
478
479 /*
480 * Fill in the scsipi_channel.
481 */
482 memset(chan, 0, sizeof(*chan));
483 chan->chan_adapter = adapt;
484 chan->chan_bustype = &scsi_bustype;
485 chan->chan_channel = 0;
486 chan->chan_ntargets = 8;
487 chan->chan_nluns = 8;
488 chan->chan_id = sc->chip_scsi_id;
489
490 TAILQ_INIT(&sc->sc_free_ccb);
491 TAILQ_INIT(&sc->sc_waiting_ccb);
492
493 /*
494 * Allocate the Control Blocks and the overrun buffer.
495 */
496 error = adv_alloc_control_data(sc);
497 if (error)
498 return; /* (error) */
499
500 /*
501 * Create and initialize the Control Blocks.
502 */
503 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
504 if (i == 0) {
505 printf("%s: unable to create control blocks\n",
506 sc->sc_dev.dv_xname);
507 return; /* (ENOMEM) */ ;
508 } else if (i != ADV_MAX_CCB) {
509 printf("%s: WARNING: only %d of %d control blocks created\n",
510 sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
511 }
512
513 adapt->adapt_openings = i;
514 adapt->adapt_max_periph = adapt->adapt_openings;
515
516 sc->sc_child = config_found(&sc->sc_dev, chan, scsiprint);
517 }
518
519 int
520 adv_detach(sc, flags)
521 ASC_SOFTC *sc;
522 int flags;
523 {
524 int rv = 0;
525
526 if (sc->sc_child != NULL)
527 rv = config_detach(sc->sc_child, flags);
528
529 adv_free_control_data(sc);
530
531 return (rv);
532 }
533
534 static void
535 advminphys(bp)
536 struct buf *bp;
537 {
538
539 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
540 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
541 minphys(bp);
542 }
543
544
545 /*
546 * start a scsi operation given the command and the data address. Also needs
547 * the unit, target and lu.
548 */
549
550 static void
551 adv_scsipi_request(chan, req, arg)
552 struct scsipi_channel *chan;
553 scsipi_adapter_req_t req;
554 void *arg;
555 {
556 struct scsipi_xfer *xs;
557 struct scsipi_periph *periph;
558 ASC_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev;
559 bus_dma_tag_t dmat = sc->sc_dmat;
560 ADV_CCB *ccb;
561 int s, flags, error, nsegs;
562
563 switch (req) {
564 case ADAPTER_REQ_RUN_XFER:
565 xs = arg;
566 periph = xs->xs_periph;
567 flags = xs->xs_control;
568
569 /*
570 * Get a CCB to use.
571 */
572 ccb = adv_get_ccb(sc);
573 #ifdef DIAGNOSTIC
574 /*
575 * This should never happen as we track the resources
576 * in the mid-layer.
577 */
578 if (ccb == NULL) {
579 scsipi_printaddr(periph);
580 printf("unable to allocate ccb\n");
581 panic("adv_scsipi_request");
582 }
583 #endif
584
585 ccb->xs = xs;
586 ccb->timeout = xs->timeout;
587
588 /*
589 * Build up the request
590 */
591 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
592
593 ccb->scsiq.q2.ccb_ptr =
594 sc->sc_dmamap_control->dm_segs[0].ds_addr +
595 ADV_CCB_OFF(ccb);
596
597 ccb->scsiq.cdbptr = &xs->cmd->opcode;
598 ccb->scsiq.q2.cdb_len = xs->cmdlen;
599 ccb->scsiq.q1.target_id =
600 ASC_TID_TO_TARGET_ID(periph->periph_target);
601 ccb->scsiq.q1.target_lun = periph->periph_lun;
602 ccb->scsiq.q2.target_ix =
603 ASC_TIDLUN_TO_IX(periph->periph_target,
604 periph->periph_lun);
605 ccb->scsiq.q1.sense_addr =
606 sc->sc_dmamap_control->dm_segs[0].ds_addr +
607 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
608 ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
609
610 /*
611 * If there are any outstanding requests for the current
612 * target, then every 255th request send an ORDERED request.
613 * This heuristic tries to retain the benefit of request
614 * sorting while preventing request starvation. 255 is the
615 * max number of tags or pending commands a device may have
616 * outstanding.
617 */
618 sc->reqcnt[periph->periph_target]++;
619 if (((sc->reqcnt[periph->periph_target] > 0) &&
620 (sc->reqcnt[periph->periph_target] % 255) == 0) ||
621 xs->bp == NULL || (xs->bp->b_flags & B_ASYNC) == 0) {
622 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
623 } else {
624 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
625 }
626
627 if (xs->datalen) {
628 /*
629 * Map the DMA transfer.
630 */
631 #ifdef TFS
632 if (flags & SCSI_DATA_UIO) {
633 error = bus_dmamap_load_uio(dmat,
634 ccb->dmamap_xfer, (struct uio *) xs->data,
635 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
636 BUS_DMA_WAITOK) | BUS_DMA_STREAMING);
637 } else
638 #endif /* TFS */
639 {
640 error = bus_dmamap_load(dmat, ccb->dmamap_xfer,
641 xs->data, xs->datalen, NULL,
642 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
643 BUS_DMA_WAITOK) | BUS_DMA_STREAMING);
644 }
645
646 switch (error) {
647 case 0:
648 break;
649
650
651 case ENOMEM:
652 case EAGAIN:
653 xs->error = XS_RESOURCE_SHORTAGE;
654 goto out_bad;
655
656 default:
657 xs->error = XS_DRIVER_STUFFUP;
658 if (error == EFBIG) {
659 printf("%s: adv_scsi_cmd, more than %d"
660 " dma segments\n",
661 sc->sc_dev.dv_xname,
662 ASC_MAX_SG_LIST);
663 } else {
664 printf("%s: adv_scsi_cmd, error %d"
665 " loading dma map\n",
666 sc->sc_dev.dv_xname, error);
667 }
668
669 out_bad:
670 adv_free_ccb(sc, ccb);
671 scsipi_done(xs);
672 return;
673 }
674 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
675 ccb->dmamap_xfer->dm_mapsize,
676 (flags & XS_CTL_DATA_IN) ?
677 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
678
679 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
680
681 for (nsegs = 0;
682 nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
683 ccb->sghead.sg_list[nsegs].addr =
684 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
685 ccb->sghead.sg_list[nsegs].bytes =
686 ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
687 }
688
689 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
690 ccb->dmamap_xfer->dm_nsegs;
691
692 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
693 ccb->scsiq.sg_head = &ccb->sghead;
694 ccb->scsiq.q1.data_addr = 0;
695 ccb->scsiq.q1.data_cnt = 0;
696 } else {
697 /*
698 * No data xfer, use non S/G values.
699 */
700 ccb->scsiq.q1.data_addr = 0;
701 ccb->scsiq.q1.data_cnt = 0;
702 }
703
704 #ifdef ASC_DEBUG
705 printf("id = 0, lun = 0, cmd = 0, ccb = 0x0 \n",
706 periph->periph_target,
707 periph->periph_lun, xs->cmd->opcode,
708 (unsigned long)ccb);
709 #endif
710 s = splbio();
711 adv_queue_ccb(sc, ccb);
712 splx(s);
713
714 if ((flags & XS_CTL_POLL) == 0)
715 return;
716
717 /* Not allowed to use interrupts, poll for completion. */
718 if (adv_poll(sc, xs, ccb->timeout)) {
719 adv_timeout(ccb);
720 if (adv_poll(sc, xs, ccb->timeout))
721 adv_timeout(ccb);
722 }
723 return;
724
725 case ADAPTER_REQ_GROW_RESOURCES:
726 /* XXX Not supported. */
727 return;
728
729 case ADAPTER_REQ_SET_XFER_MODE:
730 {
731 /*
732 * We can't really set the mode, but we know how to
733 * query what the firmware negotiated.
734 */
735 struct scsipi_xfer_mode *xm = arg;
736 u_int8_t sdtr_data;
737 ASC_SCSI_BIT_ID_TYPE tid_bit;
738
739 tid_bit = ASC_TIX_TO_TARGET_ID(xm->xm_target);
740
741 xm->xm_mode = 0;
742 xm->xm_period = 0;
743 xm->xm_offset = 0;
744
745 if (sc->init_sdtr & tid_bit) {
746 xm->xm_mode |= PERIPH_CAP_SYNC;
747 sdtr_data = sc->sdtr_data[xm->xm_target];
748 xm->xm_period =
749 sc->sdtr_period_tbl[(sdtr_data >> 4) &
750 (sc->max_sdtr_index - 1)];
751 xm->xm_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
752 }
753
754 if (sc->use_tagged_qng & tid_bit)
755 xm->xm_mode |= PERIPH_CAP_TQING;
756
757 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
758 return;
759 }
760 }
761 }
762
763 int
764 adv_intr(arg)
765 void *arg;
766 {
767 ASC_SOFTC *sc = arg;
768
769 #ifdef ASC_DEBUG
770 int int_pend = FALSE;
771
772 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
773 {
774 int_pend = TRUE;
775 printf("ISR - ");
776 }
777 #endif
778 AscISR(sc);
779 #ifdef ASC_DEBUG
780 if(int_pend)
781 printf("\n");
782 #endif
783
784 return (1);
785 }
786
787
788 /*
789 * Poll a particular unit, looking for a particular xs
790 */
791 static int
792 adv_poll(sc, xs, count)
793 ASC_SOFTC *sc;
794 struct scsipi_xfer *xs;
795 int count;
796 {
797
798 /* timeouts are in msec, so we loop in 1000 usec cycles */
799 while (count) {
800 adv_intr(sc);
801 if (xs->xs_status & XS_STS_DONE)
802 return (0);
803 delay(1000); /* only happens in boot so ok */
804 count--;
805 }
806 return (1);
807 }
808
809
810 static void
811 adv_timeout(arg)
812 void *arg;
813 {
814 ADV_CCB *ccb = arg;
815 struct scsipi_xfer *xs = ccb->xs;
816 struct scsipi_periph *periph = xs->xs_periph;
817 ASC_SOFTC *sc =
818 (void *)periph->periph_channel->chan_adapter->adapt_dev;
819 int s;
820
821 scsipi_printaddr(periph);
822 printf("timed out");
823
824 s = splbio();
825
826 /*
827 * If it has been through before, then a previous abort has failed,
828 * don't try abort again, reset the bus instead.
829 */
830 if (ccb->flags & CCB_ABORT) {
831 /* abort timed out */
832 printf(" AGAIN. Resetting Bus\n");
833 /* Lets try resetting the bus! */
834 if (AscResetBus(sc) == ASC_ERROR) {
835 ccb->timeout = sc->scsi_reset_wait;
836 adv_queue_ccb(sc, ccb);
837 }
838 } else {
839 /* abort the operation that has timed out */
840 printf("\n");
841 AscAbortCCB(sc, ccb);
842 ccb->xs->error = XS_TIMEOUT;
843 ccb->timeout = ADV_ABORT_TIMEOUT;
844 ccb->flags |= CCB_ABORT;
845 adv_queue_ccb(sc, ccb);
846 }
847
848 splx(s);
849 }
850
851
852 static void
853 adv_watchdog(arg)
854 void *arg;
855 {
856 ADV_CCB *ccb = arg;
857 struct scsipi_xfer *xs = ccb->xs;
858 struct scsipi_periph *periph = xs->xs_periph;
859 ASC_SOFTC *sc =
860 (void *)periph->periph_channel->chan_adapter->adapt_dev;
861 int s;
862
863 s = splbio();
864
865 ccb->flags &= ~CCB_WATCHDOG;
866 adv_start_ccbs(sc);
867
868 splx(s);
869 }
870
871
872 /******************************************************************************/
873 /* NARROW boards Interrupt callbacks */
874 /******************************************************************************/
875
876
877 /*
878 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
879 *
880 * Interrupt callback function for the Narrow SCSI Asc Library.
881 */
882 static void
883 adv_narrow_isr_callback(sc, qdonep)
884 ASC_SOFTC *sc;
885 ASC_QDONE_INFO *qdonep;
886 {
887 bus_dma_tag_t dmat = sc->sc_dmat;
888 ADV_CCB *ccb;
889 struct scsipi_xfer *xs;
890 struct scsipi_sense_data *s1, *s2;
891
892
893 ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
894 xs = ccb->xs;
895
896 #ifdef ASC_DEBUG
897 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
898 (unsigned long)ccb,
899 xs->xs_periph->periph_target,
900 xs->xs_periph->periph_lun, xs->cmd->opcode);
901 #endif
902 callout_stop(&ccb->xs->xs_callout);
903
904 /*
905 * If we were a data transfer, unload the map that described
906 * the data buffer.
907 */
908 if (xs->datalen) {
909 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
910 ccb->dmamap_xfer->dm_mapsize,
911 (xs->xs_control & XS_CTL_DATA_IN) ?
912 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
913 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
914 }
915 if ((ccb->flags & CCB_ALLOC) == 0) {
916 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
917 Debugger();
918 return;
919 }
920 /*
921 * 'qdonep' contains the command's ending status.
922 */
923 #ifdef ASC_DEBUG
924 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
925 #endif
926 switch (qdonep->d3.done_stat) {
927 case ASC_QD_NO_ERROR:
928 switch (qdonep->d3.host_stat) {
929 case ASC_QHSTA_NO_ERROR:
930 xs->error = XS_NOERROR;
931 xs->resid = 0;
932 break;
933
934 default:
935 /* QHSTA error occurred */
936 xs->error = XS_DRIVER_STUFFUP;
937 break;
938 }
939
940 /*
941 * If an INQUIRY command completed successfully, then call
942 * the AscInquiryHandling() function to patch bugged boards.
943 */
944 if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
945 (xs->xs_periph->periph_lun == 0) &&
946 (xs->datalen - qdonep->remain_bytes) >= 8) {
947 AscInquiryHandling(sc,
948 xs->xs_periph->periph_target & 0x7,
949 (ASC_SCSI_INQUIRY *) xs->data);
950 }
951 break;
952
953 case ASC_QD_WITH_ERROR:
954 switch (qdonep->d3.host_stat) {
955 case ASC_QHSTA_NO_ERROR:
956 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
957 s1 = &ccb->scsi_sense;
958 s2 = &xs->sense.scsi_sense;
959 *s2 = *s1;
960 xs->error = XS_SENSE;
961 } else {
962 xs->error = XS_DRIVER_STUFFUP;
963 }
964 break;
965
966 case ASC_QHSTA_M_SEL_TIMEOUT:
967 xs->error = XS_SELTIMEOUT;
968 break;
969
970 default:
971 /* QHSTA error occurred */
972 xs->error = XS_DRIVER_STUFFUP;
973 break;
974 }
975 break;
976
977 case ASC_QD_ABORTED_BY_HOST:
978 default:
979 xs->error = XS_DRIVER_STUFFUP;
980 break;
981 }
982
983
984 adv_free_ccb(sc, ccb);
985 scsipi_done(xs);
986 }
987