adv.c revision 1.12 1 /* $NetBSD: adv.c,v 1.12 1999/06/06 17:33:18 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/advlib.h>
64 #include <dev/ic/adv.h>
65
66 #ifndef DDB
67 #define Debugger() panic("should call debugger here (adv.c)")
68 #endif /* ! DDB */
69
70
71 /* #define ASC_DEBUG */
72
73 /******************************************************************************/
74
75
76 static int adv_alloc_ccbs __P((ASC_SOFTC *));
77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_reset_ccb __P((ADV_CCB *));
80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
83 static void adv_start_ccbs __P((ASC_SOFTC *));
84
85 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
86
87 static int adv_scsi_cmd __P((struct scsipi_xfer *));
88 static void advminphys __P((struct buf *));
89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
90
91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
92 static void adv_timeout __P((void *));
93 static void adv_watchdog __P((void *));
94
95
96 /******************************************************************************/
97
98
99 /* the below structure is so we have a default dev struct for out link struct */
100 struct scsipi_device adv_dev =
101 {
102 NULL, /* Use default error handler */
103 NULL, /* have a queue, served by this */
104 NULL, /* have no async handler */
105 NULL, /* Use default 'done' routine */
106 };
107
108
109 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
110 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */
111
112
113 /******************************************************************************/
114 /* Control Blocks routines */
115 /******************************************************************************/
116
117
118 static int
119 adv_alloc_ccbs(sc)
120 ASC_SOFTC *sc;
121 {
122 bus_dma_segment_t seg;
123 int error, rseg;
124
125 /*
126 * Allocate the control blocks.
127 */
128 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
129 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
130 printf("%s: unable to allocate control structures,"
131 " error = %d\n", sc->sc_dev.dv_xname, error);
132 return (error);
133 }
134 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
135 sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
136 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
137 printf("%s: unable to map control structures, error = %d\n",
138 sc->sc_dev.dv_xname, error);
139 return (error);
140 }
141 /*
142 * Create and load the DMA map used for the control blocks.
143 */
144 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
145 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
146 &sc->sc_dmamap_control)) != 0) {
147 printf("%s: unable to create control DMA map, error = %d\n",
148 sc->sc_dev.dv_xname, error);
149 return (error);
150 }
151 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
152 sc->sc_control, sizeof(struct adv_control), NULL,
153 BUS_DMA_NOWAIT)) != 0) {
154 printf("%s: unable to load control DMA map, error = %d\n",
155 sc->sc_dev.dv_xname, error);
156 return (error);
157 }
158 return (0);
159 }
160
161
162 /*
163 * Create a set of ccbs and add them to the free list. Called once
164 * by adv_init(). We return the number of CCBs successfully created.
165 */
166 static int
167 adv_create_ccbs(sc, ccbstore, count)
168 ASC_SOFTC *sc;
169 ADV_CCB *ccbstore;
170 int count;
171 {
172 ADV_CCB *ccb;
173 int i, error;
174
175 bzero(ccbstore, sizeof(ADV_CCB) * count);
176 for (i = 0; i < count; i++) {
177 ccb = &ccbstore[i];
178 if ((error = adv_init_ccb(sc, ccb)) != 0) {
179 printf("%s: unable to initialize ccb, error = %d\n",
180 sc->sc_dev.dv_xname, error);
181 return (i);
182 }
183 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
184 }
185
186 return (i);
187 }
188
189
190 /*
191 * A ccb is put onto the free list.
192 */
193 static void
194 adv_free_ccb(sc, ccb)
195 ASC_SOFTC *sc;
196 ADV_CCB *ccb;
197 {
198 int s;
199
200 s = splbio();
201
202 adv_reset_ccb(ccb);
203 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
204
205 /*
206 * If there were none, wake anybody waiting for one to come free,
207 * starting with queued entries.
208 */
209 if (ccb->chain.tqe_next == 0)
210 wakeup(&sc->sc_free_ccb);
211
212 splx(s);
213 }
214
215
216 static void
217 adv_reset_ccb(ccb)
218 ADV_CCB *ccb;
219 {
220
221 ccb->flags = 0;
222 }
223
224
225 static int
226 adv_init_ccb(sc, ccb)
227 ASC_SOFTC *sc;
228 ADV_CCB *ccb;
229 {
230 int hashnum, error;
231
232 /*
233 * Create the DMA map for this CCB.
234 */
235 error = bus_dmamap_create(sc->sc_dmat,
236 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
237 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
238 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
239 if (error) {
240 printf("%s: unable to create DMA map, error = %d\n",
241 sc->sc_dev.dv_xname, error);
242 return (error);
243 }
244
245 /*
246 * put in the phystokv hash table
247 * Never gets taken out.
248 */
249 ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
250 ADV_CCB_OFF(ccb);
251 hashnum = CCB_HASH(ccb->hashkey);
252 ccb->nexthash = sc->sc_ccbhash[hashnum];
253 sc->sc_ccbhash[hashnum] = ccb;
254
255 adv_reset_ccb(ccb);
256 return (0);
257 }
258
259
260 /*
261 * Get a free ccb
262 *
263 * If there are none, see if we can allocate a new one
264 */
265 static ADV_CCB *
266 adv_get_ccb(sc, flags)
267 ASC_SOFTC *sc;
268 int flags;
269 {
270 ADV_CCB *ccb = 0;
271 int s;
272
273 s = splbio();
274
275 /*
276 * If we can and have to, sleep waiting for one to come free
277 * but only if we can't allocate a new one.
278 */
279 for (;;) {
280 ccb = sc->sc_free_ccb.tqh_first;
281 if (ccb) {
282 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
283 break;
284 }
285 if ((flags & SCSI_NOSLEEP) != 0)
286 goto out;
287
288 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
289 }
290
291 ccb->flags |= CCB_ALLOC;
292
293 out:
294 splx(s);
295 return (ccb);
296 }
297
298
299 /*
300 * Given a physical address, find the ccb that it corresponds to.
301 */
302 ADV_CCB *
303 adv_ccb_phys_kv(sc, ccb_phys)
304 ASC_SOFTC *sc;
305 u_long ccb_phys;
306 {
307 int hashnum = CCB_HASH(ccb_phys);
308 ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
309
310 while (ccb) {
311 if (ccb->hashkey == ccb_phys)
312 break;
313 ccb = ccb->nexthash;
314 }
315 return (ccb);
316 }
317
318
319 /*
320 * Queue a CCB to be sent to the controller, and send it if possible.
321 */
322 static void
323 adv_queue_ccb(sc, ccb)
324 ASC_SOFTC *sc;
325 ADV_CCB *ccb;
326 {
327
328 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
329
330 adv_start_ccbs(sc);
331 }
332
333
334 static void
335 adv_start_ccbs(sc)
336 ASC_SOFTC *sc;
337 {
338 ADV_CCB *ccb;
339
340 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
341 if (ccb->flags & CCB_WATCHDOG)
342 untimeout(adv_watchdog, ccb);
343
344 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
345 ccb->flags |= CCB_WATCHDOG;
346 timeout(adv_watchdog, ccb,
347 (ADV_WATCH_TIMEOUT * hz) / 1000);
348 break;
349 }
350 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
351
352 if ((ccb->xs->flags & SCSI_POLL) == 0)
353 timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
354 }
355 }
356
357
358 /******************************************************************************/
359 /* DMA able memory allocation routines */
360 /******************************************************************************/
361
362
363 /*
364 * Allocate a DMA able memory for overrun_buffer.
365 * This memory can be safely shared among all the AdvanSys boards.
366 */
367 u_int8_t *
368 adv_alloc_overrunbuf(dvname, dmat)
369 char *dvname;
370 bus_dma_tag_t dmat;
371 {
372 static u_int8_t *overrunbuf = NULL;
373
374 bus_dmamap_t ovrbuf_dmamap;
375 bus_dma_segment_t seg;
376 int rseg, error;
377
378
379 /*
380 * if an overrun buffer has been already allocated don't allocate it
381 * again. Instead return the address of the allocated buffer.
382 */
383 if (overrunbuf)
384 return (overrunbuf);
385
386
387 if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
388 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
389 printf("%s: unable to allocate overrun buffer, error = %d\n",
390 dvname, error);
391 return (0);
392 }
393 if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
394 (caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
395 printf("%s: unable to map overrun buffer, error = %d\n",
396 dvname, error);
397
398 bus_dmamem_free(dmat, &seg, 1);
399 return (0);
400 }
401 if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
402 ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
403 printf("%s: unable to create overrun buffer DMA map,"
404 " error = %d\n", dvname, error);
405
406 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
407 bus_dmamem_free(dmat, &seg, 1);
408 return (0);
409 }
410 if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
411 ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
412 printf("%s: unable to load overrun buffer DMA map,"
413 " error = %d\n", dvname, error);
414
415 bus_dmamap_destroy(dmat, ovrbuf_dmamap);
416 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
417 bus_dmamem_free(dmat, &seg, 1);
418 return (0);
419 }
420 return (overrunbuf);
421 }
422
423
424 /******************************************************************************/
425 /* SCSI layer interfacing routines */
426 /******************************************************************************/
427
428
429 int
430 adv_init(sc)
431 ASC_SOFTC *sc;
432 {
433 int warn;
434
435 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
436 printf("adv_init: failed to find signature\n");
437 return (1);
438 }
439
440 /*
441 * Read the board configuration
442 */
443 AscInitASC_SOFTC(sc);
444 warn = AscInitFromEEP(sc);
445 if (warn) {
446 printf("%s -get: ", sc->sc_dev.dv_xname);
447 switch (warn) {
448 case -1:
449 printf("Chip is not halted\n");
450 break;
451
452 case -2:
453 printf("Couldn't get MicroCode Start"
454 " address\n");
455 break;
456
457 case ASC_WARN_IO_PORT_ROTATE:
458 printf("I/O port address modified\n");
459 break;
460
461 case ASC_WARN_AUTO_CONFIG:
462 printf("I/O port increment switch enabled\n");
463 break;
464
465 case ASC_WARN_EEPROM_CHKSUM:
466 printf("EEPROM checksum error\n");
467 break;
468
469 case ASC_WARN_IRQ_MODIFIED:
470 printf("IRQ modified\n");
471 break;
472
473 case ASC_WARN_CMD_QNG_CONFLICT:
474 printf("tag queuing enabled w/o disconnects\n");
475 break;
476
477 default:
478 printf("unknown warning %d\n", warn);
479 }
480 }
481 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
482 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
483
484 /*
485 * Modify the board configuration
486 */
487 warn = AscInitFromASC_SOFTC(sc);
488 if (warn) {
489 printf("%s -set: ", sc->sc_dev.dv_xname);
490 switch (warn) {
491 case ASC_WARN_CMD_QNG_CONFLICT:
492 printf("tag queuing enabled w/o disconnects\n");
493 break;
494
495 case ASC_WARN_AUTO_CONFIG:
496 printf("I/O port increment switch enabled\n");
497 break;
498
499 default:
500 printf("unknown warning %d\n", warn);
501 }
502 }
503 sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
504
505 if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
506 sc->sc_dmat))) {
507 panic("adv_init: adv_alloc_overrunbuf failed");
508 }
509
510 return (0);
511 }
512
513
514 void
515 adv_attach(sc)
516 ASC_SOFTC *sc;
517 {
518 int i, error;
519
520 /*
521 * Initialize board RISC chip and enable interrupts.
522 */
523 switch (AscInitDriver(sc)) {
524 case 0:
525 /* AllOK */
526 break;
527
528 case 1:
529 panic("%s: bad signature", sc->sc_dev.dv_xname);
530 break;
531
532 case 2:
533 panic("%s: unable to load MicroCode",
534 sc->sc_dev.dv_xname);
535 break;
536
537 case 3:
538 panic("%s: unable to initialize MicroCode",
539 sc->sc_dev.dv_xname);
540 break;
541
542 default:
543 panic("%s: unable to initialize board RISC chip",
544 sc->sc_dev.dv_xname);
545 }
546
547 /*
548 * Fill in the adapter.
549 */
550 sc->sc_adapter.scsipi_cmd = adv_scsi_cmd;
551 sc->sc_adapter.scsipi_minphys = advminphys;
552
553 /*
554 * fill in the prototype scsipi_link.
555 */
556 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
557 sc->sc_link.adapter_softc = sc;
558 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
559 sc->sc_link.adapter = &sc->sc_adapter;
560 sc->sc_link.device = &adv_dev;
561 sc->sc_link.openings = 4;
562 sc->sc_link.scsipi_scsi.max_target = 7;
563 sc->sc_link.scsipi_scsi.max_lun = 7;
564 sc->sc_link.type = BUS_SCSI;
565
566
567 TAILQ_INIT(&sc->sc_free_ccb);
568 TAILQ_INIT(&sc->sc_waiting_ccb);
569 TAILQ_INIT(&sc->sc_queue);
570
571
572 /*
573 * Allocate the Control Blocks.
574 */
575 error = adv_alloc_ccbs(sc);
576 if (error)
577 return; /* (error) */
578
579 /*
580 * Create and initialize the Control Blocks.
581 */
582 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
583 if (i == 0) {
584 printf("%s: unable to create control blocks\n",
585 sc->sc_dev.dv_xname);
586 return; /* (ENOMEM) */ ;
587 } else if (i != ADV_MAX_CCB) {
588 printf("%s: WARNING: only %d of %d control blocks created\n",
589 sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
590 }
591 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
592 }
593
594
595 static void
596 advminphys(bp)
597 struct buf *bp;
598 {
599
600 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
601 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
602 minphys(bp);
603 }
604
605
606 /*
607 * start a scsi operation given the command and the data address. Also needs
608 * the unit, target and lu.
609 */
610 static int
611 adv_scsi_cmd(xs)
612 struct scsipi_xfer *xs;
613 {
614 struct scsipi_link *sc_link = xs->sc_link;
615 ASC_SOFTC *sc = sc_link->adapter_softc;
616 bus_dma_tag_t dmat = sc->sc_dmat;
617 ADV_CCB *ccb;
618 int s, flags, error, nsegs;
619 int fromqueue = 1, dontqueue = 0;
620
621
622 s = splbio(); /* protect the queue */
623
624 /*
625 * If we're running the queue from adv_done(), we've been
626 * called with the first queue entry as our argument.
627 */
628 if (xs == TAILQ_FIRST(&sc->sc_queue)) {
629 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
630 fromqueue = 1;
631 } else {
632
633 /* Polled requests can't be queued for later. */
634 dontqueue = xs->flags & SCSI_POLL;
635
636 /*
637 * If there are jobs in the queue, run them first.
638 */
639 if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
640 /*
641 * If we can't queue, we have to abort, since
642 * we have to preserve order.
643 */
644 if (dontqueue) {
645 splx(s);
646 xs->error = XS_DRIVER_STUFFUP;
647 return (TRY_AGAIN_LATER);
648 }
649 /*
650 * Swap with the first queue entry.
651 */
652 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
653 xs = TAILQ_FIRST(&sc->sc_queue);
654 TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
655 fromqueue = 1;
656 }
657 }
658
659
660 /*
661 * get a ccb to use. If the transfer
662 * is from a buf (possibly from interrupt time)
663 * then we can't allow it to sleep
664 */
665
666 flags = xs->flags;
667 if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
668 /*
669 * If we can't queue, we lose.
670 */
671 if (dontqueue) {
672 splx(s);
673 xs->error = XS_DRIVER_STUFFUP;
674 return (TRY_AGAIN_LATER);
675 }
676 /*
677 * Stuff ourselves into the queue, in front
678 * if we came off in the first place.
679 */
680 if (fromqueue)
681 TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
682 else
683 TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
684 splx(s);
685 return (SUCCESSFULLY_QUEUED);
686 }
687 splx(s); /* done playing with the queue */
688
689 ccb->xs = xs;
690 ccb->timeout = xs->timeout;
691
692 /*
693 * Build up the request
694 */
695 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
696
697 ccb->scsiq.q2.ccb_ptr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
698 ADV_CCB_OFF(ccb);
699
700 ccb->scsiq.cdbptr = &xs->cmd->opcode;
701 ccb->scsiq.q2.cdb_len = xs->cmdlen;
702 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
703 ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
704 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
705 sc_link->scsipi_scsi.lun);
706 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
707 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
708 ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
709
710 /*
711 * If there are any outstanding requests for the current target,
712 * then every 255th request send an ORDERED request. This heuristic
713 * tries to retain the benefit of request sorting while preventing
714 * request starvation. 255 is the max number of tags or pending commands
715 * a device may have outstanding.
716 */
717 sc->reqcnt[sc_link->scsipi_scsi.target]++;
718 if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
719 (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
720 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
721 } else {
722 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
723 }
724
725
726 if (xs->datalen) {
727 /*
728 * Map the DMA transfer.
729 */
730 #ifdef TFS
731 if (flags & SCSI_DATA_UIO) {
732 error = bus_dmamap_load_uio(dmat,
733 ccb->dmamap_xfer, (struct uio *) xs->data,
734 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
735 } else
736 #endif /* TFS */
737 {
738 error = bus_dmamap_load(dmat,
739 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
740 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
741 }
742
743 if (error) {
744 if (error == EFBIG) {
745 printf("%s: adv_scsi_cmd, more than %d dma"
746 " segments\n",
747 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
748 } else {
749 printf("%s: adv_scsi_cmd, error %d loading"
750 " dma map\n",
751 sc->sc_dev.dv_xname, error);
752 }
753
754 xs->error = XS_DRIVER_STUFFUP;
755 adv_free_ccb(sc, ccb);
756 return (COMPLETE);
757 }
758 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
759 ccb->dmamap_xfer->dm_mapsize,
760 (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
761 BUS_DMASYNC_PREWRITE);
762
763
764 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
765
766 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
767
768 ccb->sghead.sg_list[nsegs].addr =
769 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
770 ccb->sghead.sg_list[nsegs].bytes =
771 ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
772 }
773
774 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
775 ccb->dmamap_xfer->dm_nsegs;
776
777 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
778 ccb->scsiq.sg_head = &ccb->sghead;
779 ccb->scsiq.q1.data_addr = 0;
780 ccb->scsiq.q1.data_cnt = 0;
781 } else {
782 /*
783 * No data xfer, use non S/G values.
784 */
785 ccb->scsiq.q1.data_addr = 0;
786 ccb->scsiq.q1.data_cnt = 0;
787 }
788
789 #ifdef ASC_DEBUG
790 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n",
791 sc_link->scsipi_scsi.target,
792 sc_link->scsipi_scsi.lun, xs->cmd->opcode,
793 (unsigned long)ccb);
794 #endif
795 s = splbio();
796 adv_queue_ccb(sc, ccb);
797 splx(s);
798
799 /*
800 * Usually return SUCCESSFULLY QUEUED
801 */
802 if ((flags & SCSI_POLL) == 0)
803 return (SUCCESSFULLY_QUEUED);
804
805 /*
806 * If we can't use interrupts, poll on completion
807 */
808 if (adv_poll(sc, xs, ccb->timeout)) {
809 adv_timeout(ccb);
810 if (adv_poll(sc, xs, ccb->timeout))
811 adv_timeout(ccb);
812 }
813 return (COMPLETE);
814 }
815
816
817 int
818 adv_intr(arg)
819 void *arg;
820 {
821 ASC_SOFTC *sc = arg;
822 struct scsipi_xfer *xs;
823
824 #ifdef ASC_DEBUG
825 int int_pend = FALSE;
826
827 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh))
828 {
829 int_pend = TRUE;
830 printf("ISR - ");
831 }
832 #endif
833 AscISR(sc);
834 #ifdef ASC_DEBUG
835 if(int_pend)
836 printf("\n");
837 #endif
838
839 /*
840 * If there are queue entries in the software queue, try to
841 * run the first one. We should be more or less guaranteed
842 * to succeed, since we just freed a CCB.
843 *
844 * NOTE: adv_scsi_cmd() relies on our calling it with
845 * the first entry in the queue.
846 */
847 if ((xs = TAILQ_FIRST(&sc->sc_queue)) != NULL)
848 (void) adv_scsi_cmd(xs);
849
850 return (1);
851 }
852
853
854 /*
855 * Poll a particular unit, looking for a particular xs
856 */
857 static int
858 adv_poll(sc, xs, count)
859 ASC_SOFTC *sc;
860 struct scsipi_xfer *xs;
861 int count;
862 {
863
864 /* timeouts are in msec, so we loop in 1000 usec cycles */
865 while (count) {
866 adv_intr(sc);
867 if (xs->flags & ITSDONE)
868 return (0);
869 delay(1000); /* only happens in boot so ok */
870 count--;
871 }
872 return (1);
873 }
874
875
876 static void
877 adv_timeout(arg)
878 void *arg;
879 {
880 ADV_CCB *ccb = arg;
881 struct scsipi_xfer *xs = ccb->xs;
882 struct scsipi_link *sc_link = xs->sc_link;
883 ASC_SOFTC *sc = sc_link->adapter_softc;
884 int s;
885
886 scsi_print_addr(sc_link);
887 printf("timed out");
888
889 s = splbio();
890
891 /*
892 * If it has been through before, then a previous abort has failed,
893 * don't try abort again, reset the bus instead.
894 */
895 if (ccb->flags & CCB_ABORT) {
896 /* abort timed out */
897 printf(" AGAIN. Resetting Bus\n");
898 /* Lets try resetting the bus! */
899 if (AscResetBus(sc) == ASC_ERROR) {
900 ccb->timeout = sc->scsi_reset_wait;
901 adv_queue_ccb(sc, ccb);
902 }
903 } else {
904 /* abort the operation that has timed out */
905 printf("\n");
906 AscAbortCCB(sc, ccb);
907 ccb->xs->error = XS_TIMEOUT;
908 ccb->timeout = ADV_ABORT_TIMEOUT;
909 ccb->flags |= CCB_ABORT;
910 adv_queue_ccb(sc, ccb);
911 }
912
913 splx(s);
914 }
915
916
917 static void
918 adv_watchdog(arg)
919 void *arg;
920 {
921 ADV_CCB *ccb = arg;
922 struct scsipi_xfer *xs = ccb->xs;
923 struct scsipi_link *sc_link = xs->sc_link;
924 ASC_SOFTC *sc = sc_link->adapter_softc;
925 int s;
926
927 s = splbio();
928
929 ccb->flags &= ~CCB_WATCHDOG;
930 adv_start_ccbs(sc);
931
932 splx(s);
933 }
934
935
936 /******************************************************************************/
937 /* NARROW boards Interrupt callbacks */
938 /******************************************************************************/
939
940
941 /*
942 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
943 *
944 * Interrupt callback function for the Narrow SCSI Asc Library.
945 */
946 static void
947 adv_narrow_isr_callback(sc, qdonep)
948 ASC_SOFTC *sc;
949 ASC_QDONE_INFO *qdonep;
950 {
951 bus_dma_tag_t dmat = sc->sc_dmat;
952 ADV_CCB *ccb;
953 struct scsipi_xfer *xs;
954 struct scsipi_sense_data *s1, *s2;
955
956
957 ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
958 xs = ccb->xs;
959
960 #ifdef ASC_DEBUG
961 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
962 (unsigned long)ccb,
963 xs->sc_link->scsipi_scsi.target,
964 xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode);
965 #endif
966 untimeout(adv_timeout, ccb);
967
968 /*
969 * If we were a data transfer, unload the map that described
970 * the data buffer.
971 */
972 if (xs->datalen) {
973 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
974 ccb->dmamap_xfer->dm_mapsize,
975 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
976 BUS_DMASYNC_POSTWRITE);
977 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
978 }
979 if ((ccb->flags & CCB_ALLOC) == 0) {
980 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
981 Debugger();
982 return;
983 }
984 /*
985 * 'qdonep' contains the command's ending status.
986 */
987 #ifdef ASC_DEBUG
988 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
989 #endif
990 switch (qdonep->d3.done_stat) {
991 case ASC_QD_NO_ERROR:
992 switch (qdonep->d3.host_stat) {
993 case ASC_QHSTA_NO_ERROR:
994 xs->error = XS_NOERROR;
995 xs->resid = 0;
996 break;
997
998 default:
999 /* QHSTA error occurred */
1000 xs->error = XS_DRIVER_STUFFUP;
1001 break;
1002 }
1003
1004 /*
1005 * If an INQUIRY command completed successfully, then call
1006 * the AscInquiryHandling() function to patch bugged boards.
1007 */
1008 if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
1009 (xs->sc_link->scsipi_scsi.lun == 0) &&
1010 (xs->datalen - qdonep->remain_bytes) >= 8) {
1011 AscInquiryHandling(sc,
1012 xs->sc_link->scsipi_scsi.target & 0x7,
1013 (ASC_SCSI_INQUIRY *) xs->data);
1014 }
1015 break;
1016
1017 case ASC_QD_WITH_ERROR:
1018 switch (qdonep->d3.host_stat) {
1019 case ASC_QHSTA_NO_ERROR:
1020 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
1021 s1 = &ccb->scsi_sense;
1022 s2 = &xs->sense.scsi_sense;
1023 *s2 = *s1;
1024 xs->error = XS_SENSE;
1025 } else {
1026 xs->error = XS_DRIVER_STUFFUP;
1027 }
1028 break;
1029
1030 default:
1031 /* QHSTA error occurred */
1032 xs->error = XS_DRIVER_STUFFUP;
1033 break;
1034 }
1035 break;
1036
1037 case ASC_QD_ABORTED_BY_HOST:
1038 default:
1039 xs->error = XS_DRIVER_STUFFUP;
1040 break;
1041 }
1042
1043
1044 adv_free_ccb(sc, ccb);
1045 xs->flags |= ITSDONE;
1046 scsipi_done(xs);
1047 }
1048