adv.c revision 1.2 1 /* $NetBSD: adv.c,v 1.2 1998/08/29 13:45:56 dante Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante (at) mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/types.h>
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/errno.h>
45 #include <sys/ioctl.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48 #include <sys/buf.h>
49 #include <sys/proc.h>
50 #include <sys/user.h>
51
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62
63 #include <dev/ic/adv.h>
64 #include <dev/ic/advlib.h>
65
66 /******************************************************************************/
67
68
69 static void adv_enqueue __P((ASC_SOFTC *, struct scsipi_xfer *, int));
70 static struct scsipi_xfer *adv_dequeue __P((ASC_SOFTC *));
71
72 static int adv_alloc_ccbs __P((ASC_SOFTC *));
73 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
74 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
75 static void adv_reset_ccb __P((ADV_CCB *));
76 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
77 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
78 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
79 static void adv_start_ccbs __P((ASC_SOFTC *));
80
81 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
82
83 static int adv_scsi_cmd __P((struct scsipi_xfer *));
84 static void advminphys __P((struct buf *));
85 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
86
87 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
88 static void adv_timeout __P((void *));
89 static void adv_watchdog __P((void *));
90
91
92 /******************************************************************************/
93
94
95 struct scsipi_adapter adv_switch =
96 {
97 adv_scsi_cmd, /* called to start/enqueue a SCSI command */
98 advminphys, /* to limit the transfer to max device can do */
99 0, /* IT SEEMS IT IS NOT USED YET */
100 0, /* as above... */
101 };
102
103
104 /* the below structure is so we have a default dev struct for out link struct */
105 struct scsipi_device adv_dev =
106 {
107 NULL, /* Use default error handler */
108 NULL, /* have a queue, served by this */
109 NULL, /* have no async handler */
110 NULL, /* Use default 'done' routine */
111 };
112
113
114 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
115 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */
116
117
118 /******************************************************************************/
119 /* scsipi_xfer queue routines */
120 /******************************************************************************/
121
122
123 /*
124 * Insert a scsipi_xfer into the software queue. We overload xs->free_list
125 * to avoid having to allocate additional resources (since we're used
126 * only during resource shortages anyhow.
127 */
128 static void
129 adv_enqueue(sc, xs, infront)
130 ASC_SOFTC *sc;
131 struct scsipi_xfer *xs;
132 int infront;
133 {
134
135 if (infront || sc->sc_queue.lh_first == NULL) {
136 if (sc->sc_queue.lh_first == NULL)
137 sc->sc_queuelast = xs;
138 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
139 return;
140 }
141 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
142 sc->sc_queuelast = xs;
143 }
144
145
146 /*
147 * Pull a scsipi_xfer off the front of the software queue.
148 */
149 static struct scsipi_xfer *
150 adv_dequeue(sc)
151 ASC_SOFTC *sc;
152 {
153 struct scsipi_xfer *xs;
154
155 xs = sc->sc_queue.lh_first;
156 LIST_REMOVE(xs, free_list);
157
158 if (sc->sc_queue.lh_first == NULL)
159 sc->sc_queuelast = NULL;
160
161 return (xs);
162 }
163
164
165 /******************************************************************************/
166 /* Control Blocks routines */
167 /******************************************************************************/
168
169
170 static int
171 adv_alloc_ccbs(sc)
172 ASC_SOFTC *sc;
173 {
174 bus_dma_segment_t seg;
175 int error, rseg;
176
177 /*
178 * Allocate the control blocks.
179 */
180 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
181 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
182 printf("%s: unable to allocate control structures,"
183 " error = %d\n", sc->sc_dev.dv_xname, error);
184 return (error);
185 }
186 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
187 sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
188 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
189 printf("%s: unable to map control structures, error = %d\n",
190 sc->sc_dev.dv_xname, error);
191 return (error);
192 }
193 /*
194 * Create and load the DMA map used for the control blocks.
195 */
196 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
197 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
198 &sc->sc_dmamap_control)) != 0) {
199 printf("%s: unable to create control DMA map, error = %d\n",
200 sc->sc_dev.dv_xname, error);
201 return (error);
202 }
203 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
204 sc->sc_control, sizeof(struct adv_control), NULL,
205 BUS_DMA_NOWAIT)) != 0) {
206 printf("%s: unable to load control DMA map, error = %d\n",
207 sc->sc_dev.dv_xname, error);
208 return (error);
209 }
210 return (0);
211 }
212
213
214 /*
215 * Create a set of ccbs and add them to the free list. Called once
216 * by adv_init(). We return the number of CCBs successfully created.
217 */
218 static int
219 adv_create_ccbs(sc, ccbstore, count)
220 ASC_SOFTC *sc;
221 ADV_CCB *ccbstore;
222 int count;
223 {
224 ADV_CCB *ccb;
225 int i, error;
226
227 bzero(ccbstore, sizeof(ADV_CCB) * count);
228 for (i = 0; i < count; i++) {
229 ccb = &ccbstore[i];
230 if ((error = adv_init_ccb(sc, ccb)) != 0) {
231 printf("%s: unable to initialize ccb, error = %d\n",
232 sc->sc_dev.dv_xname, error);
233 return (i);
234 }
235 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
236 }
237
238 return (i);
239 }
240
241
242 /*
243 * A ccb is put onto the free list.
244 */
245 static void
246 adv_free_ccb(sc, ccb)
247 ASC_SOFTC *sc;
248 ADV_CCB *ccb;
249 {
250 int s;
251
252 s = splbio();
253
254 adv_reset_ccb(ccb);
255 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
256
257 /*
258 * If there were none, wake anybody waiting for one to come free,
259 * starting with queued entries.
260 */
261 if (ccb->chain.tqe_next == 0)
262 wakeup(&sc->sc_free_ccb);
263
264 splx(s);
265 }
266
267
268 static void
269 adv_reset_ccb(ccb)
270 ADV_CCB *ccb;
271 {
272
273 ccb->flags = 0;
274 }
275
276
277 static int
278 adv_init_ccb(sc, ccb)
279 ASC_SOFTC *sc;
280 ADV_CCB *ccb;
281 {
282 int error;
283
284 /*
285 * Create the DMA map for this CCB.
286 */
287 error = bus_dmamap_create(sc->sc_dmat,
288 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
289 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
290 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
291 if (error) {
292 printf("%s: unable to create DMA map, error = %d\n",
293 sc->sc_dev.dv_xname, error);
294 return (error);
295 }
296 adv_reset_ccb(ccb);
297 return (0);
298 }
299
300
301 /*
302 * Get a free ccb
303 *
304 * If there are none, see if we can allocate a new one
305 */
306 static ADV_CCB *
307 adv_get_ccb(sc, flags)
308 ASC_SOFTC *sc;
309 int flags;
310 {
311 ADV_CCB *ccb = 0;
312 int s;
313
314 s = splbio();
315
316 /*
317 * If we can and have to, sleep waiting for one to come free
318 * but only if we can't allocate a new one.
319 */
320 for (;;) {
321 ccb = sc->sc_free_ccb.tqh_first;
322 if (ccb) {
323 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
324 break;
325 }
326 if ((flags & SCSI_NOSLEEP) != 0)
327 goto out;
328
329 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
330 }
331
332 ccb->flags |= CCB_ALLOC;
333
334 out:
335 splx(s);
336 return (ccb);
337 }
338
339
340 /*
341 * Queue a CCB to be sent to the controller, and send it if possible.
342 */
343 static void
344 adv_queue_ccb(sc, ccb)
345 ASC_SOFTC *sc;
346 ADV_CCB *ccb;
347 {
348
349 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
350
351 adv_start_ccbs(sc);
352 }
353
354
355 static void
356 adv_start_ccbs(sc)
357 ASC_SOFTC *sc;
358 {
359 ADV_CCB *ccb;
360
361 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
362 if (ccb->flags & CCB_WATCHDOG)
363 untimeout(adv_watchdog, ccb);
364
365 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
366 ccb->flags |= CCB_WATCHDOG;
367 timeout(adv_watchdog, ccb,
368 (ADV_WATCH_TIMEOUT * hz) / 1000);
369 break;
370 }
371 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
372
373 if ((ccb->xs->flags & SCSI_POLL) == 0)
374 timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
375 }
376 }
377
378
379 /******************************************************************************/
380 /* DMA able memory allocation routines */
381 /******************************************************************************/
382
383
384 /*
385 * Allocate a DMA able memory for overrun_buffer.
386 * This memory can be safely shared among all the AdvanSys boards.
387 */
388 u_int8_t *
389 adv_alloc_overrunbuf(dvname, dmat)
390 char *dvname;
391 bus_dma_tag_t dmat;
392 {
393 static u_int8_t *overrunbuf = NULL;
394
395 bus_dmamap_t ovrbuf_dmamap;
396 bus_dma_segment_t seg;
397 int rseg, error;
398
399
400 /*
401 * if an overrun buffer has been already allocated don't allocate it
402 * again. Instead return the address of the allocated buffer.
403 */
404 if (overrunbuf)
405 return (overrunbuf);
406
407
408 if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
409 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
410 printf("%s: unable to allocate overrun buffer, error = %d\n",
411 dvname, error);
412 return (0);
413 }
414 if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
415 (caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
416 printf("%s: unable to map overrun buffer, error = %d\n",
417 dvname, error);
418
419 bus_dmamem_free(dmat, &seg, 1);
420 return (0);
421 }
422 if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
423 ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
424 printf("%s: unable to create overrun buffer DMA map,"
425 " error = %d\n", dvname, error);
426
427 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
428 bus_dmamem_free(dmat, &seg, 1);
429 return (0);
430 }
431 if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
432 ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
433 printf("%s: unable to load overrun buffer DMA map,"
434 " error = %d\n", dvname, error);
435
436 bus_dmamap_destroy(dmat, ovrbuf_dmamap);
437 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
438 bus_dmamem_free(dmat, &seg, 1);
439 return (0);
440 }
441 return (overrunbuf);
442 }
443
444
445 /******************************************************************************/
446 /* SCSI layer interfacing routines */
447 /******************************************************************************/
448
449
450 int
451 adv_init(sc)
452 ASC_SOFTC *sc;
453 {
454 int warn;
455
456 if (ASC_IS_NARROW_BOARD(sc)) {
457 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh))
458 panic("adv_init: adv_find_signature failed");
459
460 /*
461 * Read the board configuration
462 */
463 AscInitASC_SOFTC(sc);
464 warn = AscInitFromEEP(sc);
465 if (warn) {
466 printf("%s -get: ", sc->sc_dev.dv_xname);
467 switch (warn) {
468 case -1:
469 printf("Chip is not halted\n");
470 break;
471
472 case -2:
473 printf("Couldn't get MicroCode Start"
474 " address\n");
475 break;
476
477 case ASC_WARN_IO_PORT_ROTATE:
478 printf("I/O port address modified\n");
479 break;
480
481 case ASC_WARN_AUTO_CONFIG:
482 printf("I/O port increment switch enabled\n");
483 break;
484
485 case ASC_WARN_EEPROM_CHKSUM:
486 printf("EEPROM checksum error\n");
487 break;
488
489 case ASC_WARN_IRQ_MODIFIED:
490 printf("IRQ modified\n");
491 break;
492
493 case ASC_WARN_CMD_QNG_CONFLICT:
494 printf("tag queuing enabled w/o disconnects\n");
495 break;
496
497 default:
498 printf("unknown warning %d\n", warn);
499 }
500 }
501 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
502 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
503
504 /*
505 * Modify the board configuration
506 */
507 warn = AscInitFromASC_SOFTC(sc);
508 if (warn) {
509 printf("%s -set: ", sc->sc_dev.dv_xname);
510 switch (warn) {
511 case ASC_WARN_CMD_QNG_CONFLICT:
512 printf("tag queuing enabled w/o disconnects\n");
513 break;
514
515 case ASC_WARN_AUTO_CONFIG:
516 printf("I/O port increment switch enabled\n");
517 break;
518
519 default:
520 printf("unknown warning %d\n", warn);
521 }
522 }
523 sc->isr_callback = (ulong) adv_narrow_isr_callback;
524
525 if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
526 sc->sc_dmat))) {
527 return (1);
528 }
529 } else
530 //IS_WIDE_BOARD
531 {
532 printf("%s: Wide boards are not supported yet\n",
533 sc->sc_dev.dv_xname);
534 return (1);
535 }
536
537 return (0);
538 }
539
540
541 void
542 adv_attach(sc)
543 ASC_SOFTC *sc;
544 {
545 int i, error;
546
547 if (ASC_IS_NARROW_BOARD(sc)) {
548 /*
549 * Initialize board RISC chip and enable interrupts.
550 */
551 switch (AscInitDriver(sc)) {
552 case 0:
553 /* AllOK */
554 break;
555
556 case 1:
557 panic("%s: bad signature", sc->sc_dev.dv_xname);
558 break;
559
560 case 2:
561 panic("%s: unable to load MicroCode",
562 sc->sc_dev.dv_xname);
563 break;
564
565 case 3:
566 panic("%s: unable to initialize MicroCode",
567 sc->sc_dev.dv_xname);
568 break;
569
570 default:
571 panic("%s: unable to initialize board RISC chip",
572 sc->sc_dev.dv_xname);
573 }
574 } else
575 //Wide Boards
576 {
577 /* ToDo */
578 }
579
580
581 /*
582 * fill in the prototype scsipi_link.
583 */
584 sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
585 sc->sc_link.adapter_softc = sc;
586 sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
587 sc->sc_link.adapter = &adv_switch;
588 sc->sc_link.device = &adv_dev;
589 sc->sc_link.openings = 4;
590 sc->sc_link.scsipi_scsi.max_target = ASC_IS_NARROW_BOARD(sc) ? 7 : 15;
591 sc->sc_link.type = BUS_SCSI;
592
593
594 TAILQ_INIT(&sc->sc_free_ccb);
595 TAILQ_INIT(&sc->sc_waiting_ccb);
596 LIST_INIT(&sc->sc_queue);
597
598
599 /*
600 * Allocate the Control Blocks.
601 */
602 error = adv_alloc_ccbs(sc);
603 if (error)
604 return; /* (error) */ ;
605
606 /*
607 * Create and initialize the Control Blocks.
608 */
609 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
610 if (i == 0) {
611 printf("%s: unable to create control blocks\n",
612 sc->sc_dev.dv_xname);
613 return; /* (ENOMEM) */ ;
614 } else if (i != ADV_MAX_CCB) {
615 printf("%s: WARNING: only %d of %d control blocks created\n",
616 sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
617 }
618 config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
619 }
620
621
622 static void
623 advminphys(bp)
624 struct buf *bp;
625 {
626
627 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
628 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
629 minphys(bp);
630 }
631
632
633 /*
634 * start a scsi operation given the command and the data address. Also needs
635 * the unit, target and lu.
636 */
637 static int
638 adv_scsi_cmd(xs)
639 struct scsipi_xfer *xs;
640 {
641 struct scsipi_link *sc_link = xs->sc_link;
642 ASC_SOFTC *sc = sc_link->adapter_softc;
643 bus_dma_tag_t dmat = sc->sc_dmat;
644 ADV_CCB *ccb;
645 int s, flags, error, nsegs;
646 int fromqueue = 1, dontqueue = 0;
647
648
649 s = splbio(); /* protect the queue */
650
651 /*
652 * If we're running the queue from adv_done(), we've been
653 * called with the first queue entry as our argument.
654 */
655 if (xs == sc->sc_queue.lh_first) {
656 xs = adv_dequeue(sc);
657 fromqueue = 1;
658 } else {
659
660 /* Polled requests can't be queued for later. */
661 dontqueue = xs->flags & SCSI_POLL;
662
663 /*
664 * If there are jobs in the queue, run them first.
665 */
666 if (sc->sc_queue.lh_first != NULL) {
667 /*
668 * If we can't queue, we have to abort, since
669 * we have to preserve order.
670 */
671 if (dontqueue) {
672 splx(s);
673 xs->error = XS_DRIVER_STUFFUP;
674 return (TRY_AGAIN_LATER);
675 }
676 /*
677 * Swap with the first queue entry.
678 */
679 adv_enqueue(sc, xs, 0);
680 xs = adv_dequeue(sc);
681 fromqueue = 1;
682 }
683 }
684
685
686 /*
687 * get a ccb to use. If the transfer
688 * is from a buf (possibly from interrupt time)
689 * then we can't allow it to sleep
690 */
691
692 flags = xs->flags;
693 if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
694 /*
695 * If we can't queue, we lose.
696 */
697 if (dontqueue) {
698 splx(s);
699 xs->error = XS_DRIVER_STUFFUP;
700 return (TRY_AGAIN_LATER);
701 }
702 /*
703 * Stuff ourselves into the queue, in front
704 * if we came off in the first place.
705 */
706 adv_enqueue(sc, xs, fromqueue);
707 splx(s);
708 return (SUCCESSFULLY_QUEUED);
709 }
710 splx(s); /* done playing with the queue */
711
712 ccb->xs = xs;
713 ccb->timeout = xs->timeout;
714
715 /*
716 * Build up the request
717 */
718 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
719
720 ccb->scsiq.q2.ccb_ptr = (ulong) ccb;
721
722 ccb->scsiq.cdbptr = &xs->cmd->opcode;
723 ccb->scsiq.q2.cdb_len = xs->cmdlen;
724 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
725 ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
726 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
727 sc_link->scsipi_scsi.lun);
728 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
729 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
730 ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
731
732 /*
733 * If there are any outstanding requests for the current target,
734 * then every 255th request send an ORDERED request. This heuristic
735 * tries to retain the benefit of request sorting while preventing
736 * request starvation. 255 is the max number of tags or pending commands
737 * a device may have outstanding.
738 */
739 sc->reqcnt[sc_link->scsipi_scsi.target]++;
740 if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
741 (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
742 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
743 } else {
744 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
745 }
746
747
748 if (xs->datalen) {
749 /*
750 * Map the DMA transfer.
751 */
752 #ifdef TFS
753 if (flags & SCSI_DATA_UIO) {
754 error = bus_dmamap_load_uio(dmat,
755 ccb->dmamap_xfer, (struct uio *) xs->data,
756 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
757 } else
758 #endif /* TFS */
759 {
760 error = bus_dmamap_load(dmat,
761 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
762 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
763 }
764
765 if (error) {
766 if (error == EFBIG) {
767 printf("%s: adv_scsi_cmd, more than %d dma"
768 " segments\n",
769 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
770 } else {
771 printf("%s: adv_scsi_cmd, error %d loading"
772 " dma map\n",
773 sc->sc_dev.dv_xname, error);
774 }
775
776 xs->error = XS_DRIVER_STUFFUP;
777 adv_free_ccb(sc, ccb);
778 return (COMPLETE);
779 }
780 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
781 ccb->dmamap_xfer->dm_mapsize,
782 (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
783 BUS_DMASYNC_PREWRITE);
784
785
786 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
787
788 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
789
790 ccb->sghead.sg_list[nsegs].addr =
791 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
792 ccb->sghead.sg_list[nsegs].bytes =
793 ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
794 }
795
796 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
797 ccb->dmamap_xfer->dm_nsegs;
798
799 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
800 ccb->scsiq.sg_head = &ccb->sghead;
801 ccb->scsiq.q1.data_addr = 0;
802 ccb->scsiq.q1.data_cnt = 0;
803 } else {
804 /*
805 * No data xfer, use non S/G values.
806 */
807 ccb->scsiq.q1.data_addr = 0;
808 ccb->scsiq.q1.data_cnt = 0;
809 }
810
811 s = splbio();
812 adv_queue_ccb(sc, ccb);
813 splx(s);
814
815 /*
816 * Usually return SUCCESSFULLY QUEUED
817 */
818 if ((flags & SCSI_POLL) == 0)
819 return (SUCCESSFULLY_QUEUED);
820
821 /*
822 * If we can't use interrupts, poll on completion
823 */
824 if (adv_poll(sc, xs, ccb->timeout)) {
825 adv_timeout(ccb);
826 if (adv_poll(sc, xs, ccb->timeout))
827 adv_timeout(ccb);
828 }
829 return (COMPLETE);
830 }
831
832
833 int
834 adv_intr(arg)
835 void *arg;
836 {
837 ASC_SOFTC *sc = arg;
838 struct scsipi_xfer *xs;
839
840 if (ASC_IS_NARROW_BOARD(sc)) {
841 AscISR(sc);
842 } else
843 //Wide Boards
844 {
845 /* ToDo AdvISR */
846 }
847
848 /*
849 * If there are queue entries in the software queue, try to
850 * run the first one. We should be more or less guaranteed
851 * to succeed, since we just freed a CCB.
852 *
853 * NOTE: adv_scsi_cmd() relies on our calling it with
854 * the first entry in the queue.
855 */
856 if ((xs = sc->sc_queue.lh_first) != NULL)
857 (void) adv_scsi_cmd(xs);
858
859 return (1);
860 }
861
862
863 /*
864 * Poll a particular unit, looking for a particular xs
865 */
866 static int
867 adv_poll(sc, xs, count)
868 ASC_SOFTC *sc;
869 struct scsipi_xfer *xs;
870 int count;
871 {
872
873 /* timeouts are in msec, so we loop in 1000 usec cycles */
874 while (count) {
875 adv_intr(sc);
876 if (xs->flags & ITSDONE)
877 return (0);
878 delay(1000); /* only happens in boot so ok */
879 count--;
880 }
881 return (1);
882 }
883
884
885 static void
886 adv_timeout(arg)
887 void *arg;
888 {
889 ADV_CCB *ccb = arg;
890 struct scsipi_xfer *xs = ccb->xs;
891 struct scsipi_link *sc_link = xs->sc_link;
892 ASC_SOFTC *sc = sc_link->adapter_softc;
893 int s;
894
895 scsi_print_addr(sc_link);
896 printf("timed out");
897
898 s = splbio();
899
900 /*
901 * If it has been through before, then a previous abort has failed,
902 * don't try abort again, reset the bus instead.
903 */
904 if (ccb->flags & CCB_ABORT) {
905 /* abort timed out */
906 printf(" AGAIN. Resetting Bus\n");
907 /* Lets try resetting the bus! */
908 if (AscResetBus(sc) == ASC_ERROR) {
909 ccb->timeout = sc->scsi_reset_wait;
910 adv_queue_ccb(sc, ccb);
911 }
912 } else {
913 /* abort the operation that has timed out */
914 printf("\n");
915 AscAbortCCB(sc, (u_int32_t) ccb);
916 ccb->xs->error = XS_TIMEOUT;
917 ccb->timeout = ADV_ABORT_TIMEOUT;
918 ccb->flags |= CCB_ABORT;
919 adv_queue_ccb(sc, ccb);
920 }
921
922 splx(s);
923 }
924
925
926 static void
927 adv_watchdog(arg)
928 void *arg;
929 {
930 ADV_CCB *ccb = arg;
931 struct scsipi_xfer *xs = ccb->xs;
932 struct scsipi_link *sc_link = xs->sc_link;
933 ASC_SOFTC *sc = sc_link->adapter_softc;
934 int s;
935
936 s = splbio();
937
938 ccb->flags &= ~CCB_WATCHDOG;
939 adv_start_ccbs(sc);
940
941 splx(s);
942 }
943
944
945 /******************************************************************************/
946 /* NARROW and WIDE boards Interrupt callbacks */
947 /******************************************************************************/
948
949
950 /*
951 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
952 *
953 * Interrupt callback function for the Narrow SCSI Asc Library.
954 */
955 static void
956 adv_narrow_isr_callback(sc, qdonep)
957 ASC_SOFTC *sc;
958 ASC_QDONE_INFO *qdonep;
959 {
960 bus_dma_tag_t dmat = sc->sc_dmat;
961 ADV_CCB *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr;
962 struct scsipi_xfer *xs = ccb->xs;
963 struct scsipi_sense_data *s1, *s2;
964
965
966 untimeout(adv_timeout, ccb);
967
968 /*
969 * If we were a data transfer, unload the map that described
970 * the data buffer.
971 */
972 if (xs->datalen) {
973 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
974 ccb->dmamap_xfer->dm_mapsize,
975 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
976 BUS_DMASYNC_POSTWRITE);
977 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
978 }
979 if ((ccb->flags & CCB_ALLOC) == 0) {
980 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
981 Debugger();
982 return;
983 }
984 /*
985 * 'qdonep' contains the command's ending status.
986 */
987 switch (qdonep->d3.done_stat) {
988 case ASC_QD_NO_ERROR:
989 switch (qdonep->d3.host_stat) {
990 case ASC_QHSTA_NO_ERROR:
991 xs->error = XS_NOERROR;
992 xs->resid = 0;
993 break;
994
995 default:
996 /* QHSTA error occurred */
997 xs->error = XS_DRIVER_STUFFUP;
998 break;
999 }
1000
1001 /*
1002 * If an INQUIRY command completed successfully, then call
1003 * the AscInquiryHandling() function to patch bugged boards.
1004 */
1005 if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
1006 (xs->sc_link->scsipi_scsi.lun == 0) &&
1007 (xs->datalen - qdonep->remain_bytes) >= 8) {
1008 AscInquiryHandling(sc,
1009 xs->sc_link->scsipi_scsi.target & 0x7,
1010 (ASC_SCSI_INQUIRY *) xs->data);
1011 }
1012 break;
1013
1014 case ASC_QD_WITH_ERROR:
1015 switch (qdonep->d3.host_stat) {
1016 case ASC_QHSTA_NO_ERROR:
1017 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
1018 s1 = &ccb->scsi_sense;
1019 s2 = &xs->sense.scsi_sense;
1020 *s2 = *s1;
1021 xs->error = XS_SENSE;
1022 } else
1023 xs->error = XS_DRIVER_STUFFUP;
1024 break;
1025
1026 default:
1027 /* QHSTA error occurred */
1028 xs->error = XS_DRIVER_STUFFUP;
1029 break;
1030 }
1031 break;
1032
1033 case ASC_QD_ABORTED_BY_HOST:
1034 default:
1035 xs->error = XS_DRIVER_STUFFUP;
1036 break;
1037 }
1038
1039
1040 adv_free_ccb(sc, ccb);
1041 xs->flags |= ITSDONE;
1042 scsipi_done(xs);
1043 }
1044