Home | History | Annotate | Line # | Download | only in ic
adv.c revision 1.5
      1 /*	$NetBSD: adv.c,v 1.5 1998/10/10 00:28:32 thorpej Exp $	*/
      2 
      3 /*
      4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
      5  *
      6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * Author: Baldassare Dante Profeta <dante (at) mclink.it>
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/types.h>
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/kernel.h>
     44 #include <sys/errno.h>
     45 #include <sys/ioctl.h>
     46 #include <sys/device.h>
     47 #include <sys/malloc.h>
     48 #include <sys/buf.h>
     49 #include <sys/proc.h>
     50 #include <sys/user.h>
     51 
     52 #include <machine/bus.h>
     53 #include <machine/intr.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_param.h>
     57 #include <vm/pmap.h>
     58 
     59 #include <dev/scsipi/scsi_all.h>
     60 #include <dev/scsipi/scsipi_all.h>
     61 #include <dev/scsipi/scsiconf.h>
     62 
     63 #include <dev/ic/adv.h>
     64 #include <dev/ic/advlib.h>
     65 
     66 #ifndef DDB
     67 #define	Debugger()	panic("should call debugger here (adv.c)")
     68 #endif /* ! DDB */
     69 
     70 /******************************************************************************/
     71 
     72 
     73 static void adv_enqueue __P((ASC_SOFTC *, struct scsipi_xfer *, int));
     74 static struct scsipi_xfer *adv_dequeue __P((ASC_SOFTC *));
     75 
     76 static int adv_alloc_ccbs __P((ASC_SOFTC *));
     77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
     78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
     79 static void adv_reset_ccb __P((ADV_CCB *));
     80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
     81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
     82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
     83 static void adv_start_ccbs __P((ASC_SOFTC *));
     84 
     85 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
     86 
     87 static int adv_scsi_cmd __P((struct scsipi_xfer *));
     88 static void advminphys __P((struct buf *));
     89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
     90 
     91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
     92 static void adv_timeout __P((void *));
     93 static void adv_watchdog __P((void *));
     94 
     95 
     96 /******************************************************************************/
     97 
     98 
     99 struct scsipi_adapter adv_switch =
    100 {
    101 	adv_scsi_cmd,		/* called to start/enqueue a SCSI command */
    102 	advminphys,		/* to limit the transfer to max device can do */
    103 	NULL,			/* scsipi_ioctl */
    104 };
    105 
    106 
    107 /* the below structure is so we have a default dev struct for out link struct */
    108 struct scsipi_device adv_dev =
    109 {
    110 	NULL,			/* Use default error handler */
    111 	NULL,			/* have a queue, served by this */
    112 	NULL,			/* have no async handler */
    113 	NULL,			/* Use default 'done' routine */
    114 };
    115 
    116 
    117 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
    118 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
    119 
    120 
    121 /******************************************************************************/
    122 /*                            scsipi_xfer queue routines                      */
    123 /******************************************************************************/
    124 
    125 
    126 /*
    127  * Insert a scsipi_xfer into the software queue.  We overload xs->free_list
    128  * to avoid having to allocate additional resources (since we're used
    129  * only during resource shortages anyhow.
    130  */
    131 static void
    132 adv_enqueue(sc, xs, infront)
    133 	ASC_SOFTC      *sc;
    134 	struct scsipi_xfer *xs;
    135 	int             infront;
    136 {
    137 
    138 	if (infront || sc->sc_queue.lh_first == NULL) {
    139 		if (sc->sc_queue.lh_first == NULL)
    140 			sc->sc_queuelast = xs;
    141 		LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
    142 		return;
    143 	}
    144 	LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
    145 	sc->sc_queuelast = xs;
    146 }
    147 
    148 
    149 /*
    150  * Pull a scsipi_xfer off the front of the software queue.
    151  */
    152 static struct scsipi_xfer *
    153 adv_dequeue(sc)
    154 	ASC_SOFTC      *sc;
    155 {
    156 	struct scsipi_xfer *xs;
    157 
    158 	xs = sc->sc_queue.lh_first;
    159 	LIST_REMOVE(xs, free_list);
    160 
    161 	if (sc->sc_queue.lh_first == NULL)
    162 		sc->sc_queuelast = NULL;
    163 
    164 	return (xs);
    165 }
    166 
    167 
    168 /******************************************************************************/
    169 /*                             Control Blocks routines                        */
    170 /******************************************************************************/
    171 
    172 
    173 static int
    174 adv_alloc_ccbs(sc)
    175 	ASC_SOFTC      *sc;
    176 {
    177 	bus_dma_segment_t seg;
    178 	int             error, rseg;
    179 
    180 	/*
    181          * Allocate the control blocks.
    182          */
    183 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
    184 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    185 		printf("%s: unable to allocate control structures,"
    186 		       " error = %d\n", sc->sc_dev.dv_xname, error);
    187 		return (error);
    188 	}
    189 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    190 		   sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
    191 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    192 		printf("%s: unable to map control structures, error = %d\n",
    193 		       sc->sc_dev.dv_xname, error);
    194 		return (error);
    195 	}
    196 	/*
    197          * Create and load the DMA map used for the control blocks.
    198          */
    199 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
    200 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
    201 				       &sc->sc_dmamap_control)) != 0) {
    202 		printf("%s: unable to create control DMA map, error = %d\n",
    203 		       sc->sc_dev.dv_xname, error);
    204 		return (error);
    205 	}
    206 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
    207 			   sc->sc_control, sizeof(struct adv_control), NULL,
    208 				     BUS_DMA_NOWAIT)) != 0) {
    209 		printf("%s: unable to load control DMA map, error = %d\n",
    210 		       sc->sc_dev.dv_xname, error);
    211 		return (error);
    212 	}
    213 	return (0);
    214 }
    215 
    216 
    217 /*
    218  * Create a set of ccbs and add them to the free list.  Called once
    219  * by adv_init().  We return the number of CCBs successfully created.
    220  */
    221 static int
    222 adv_create_ccbs(sc, ccbstore, count)
    223 	ASC_SOFTC      *sc;
    224 	ADV_CCB        *ccbstore;
    225 	int             count;
    226 {
    227 	ADV_CCB        *ccb;
    228 	int             i, error;
    229 
    230 	bzero(ccbstore, sizeof(ADV_CCB) * count);
    231 	for (i = 0; i < count; i++) {
    232 		ccb = &ccbstore[i];
    233 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
    234 			printf("%s: unable to initialize ccb, error = %d\n",
    235 			       sc->sc_dev.dv_xname, error);
    236 			return (i);
    237 		}
    238 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
    239 	}
    240 
    241 	return (i);
    242 }
    243 
    244 
    245 /*
    246  * A ccb is put onto the free list.
    247  */
    248 static void
    249 adv_free_ccb(sc, ccb)
    250 	ASC_SOFTC      *sc;
    251 	ADV_CCB        *ccb;
    252 {
    253 	int             s;
    254 
    255 	s = splbio();
    256 
    257 	adv_reset_ccb(ccb);
    258 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
    259 
    260 	/*
    261          * If there were none, wake anybody waiting for one to come free,
    262          * starting with queued entries.
    263          */
    264 	if (ccb->chain.tqe_next == 0)
    265 		wakeup(&sc->sc_free_ccb);
    266 
    267 	splx(s);
    268 }
    269 
    270 
    271 static void
    272 adv_reset_ccb(ccb)
    273 	ADV_CCB        *ccb;
    274 {
    275 
    276 	ccb->flags = 0;
    277 }
    278 
    279 
    280 static int
    281 adv_init_ccb(sc, ccb)
    282 	ASC_SOFTC      *sc;
    283 	ADV_CCB        *ccb;
    284 {
    285 	int             error;
    286 
    287 	/*
    288          * Create the DMA map for this CCB.
    289          */
    290 	error = bus_dmamap_create(sc->sc_dmat,
    291 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    292 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    293 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
    294 	if (error) {
    295 		printf("%s: unable to create DMA map, error = %d\n",
    296 		       sc->sc_dev.dv_xname, error);
    297 		return (error);
    298 	}
    299 	adv_reset_ccb(ccb);
    300 	return (0);
    301 }
    302 
    303 
    304 /*
    305  * Get a free ccb
    306  *
    307  * If there are none, see if we can allocate a new one
    308  */
    309 static ADV_CCB *
    310 adv_get_ccb(sc, flags)
    311 	ASC_SOFTC      *sc;
    312 	int             flags;
    313 {
    314 	ADV_CCB        *ccb = 0;
    315 	int             s;
    316 
    317 	s = splbio();
    318 
    319 	/*
    320          * If we can and have to, sleep waiting for one to come free
    321          * but only if we can't allocate a new one.
    322          */
    323 	for (;;) {
    324 		ccb = sc->sc_free_ccb.tqh_first;
    325 		if (ccb) {
    326 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
    327 			break;
    328 		}
    329 		if ((flags & SCSI_NOSLEEP) != 0)
    330 			goto out;
    331 
    332 		tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
    333 	}
    334 
    335 	ccb->flags |= CCB_ALLOC;
    336 
    337 out:
    338 	splx(s);
    339 	return (ccb);
    340 }
    341 
    342 
    343 /*
    344  * Queue a CCB to be sent to the controller, and send it if possible.
    345  */
    346 static void
    347 adv_queue_ccb(sc, ccb)
    348 	ASC_SOFTC      *sc;
    349 	ADV_CCB        *ccb;
    350 {
    351 
    352 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
    353 
    354 	adv_start_ccbs(sc);
    355 }
    356 
    357 
    358 static void
    359 adv_start_ccbs(sc)
    360 	ASC_SOFTC      *sc;
    361 {
    362 	ADV_CCB        *ccb;
    363 
    364 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
    365 		if (ccb->flags & CCB_WATCHDOG)
    366 			untimeout(adv_watchdog, ccb);
    367 
    368 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
    369 			ccb->flags |= CCB_WATCHDOG;
    370 			timeout(adv_watchdog, ccb,
    371 				(ADV_WATCH_TIMEOUT * hz) / 1000);
    372 			break;
    373 		}
    374 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
    375 
    376 		if ((ccb->xs->flags & SCSI_POLL) == 0)
    377 			timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
    378 	}
    379 }
    380 
    381 
    382 /******************************************************************************/
    383 /*                      DMA able memory allocation routines                   */
    384 /******************************************************************************/
    385 
    386 
    387 /*
    388  * Allocate a DMA able memory for overrun_buffer.
    389  * This memory can be safely shared among all the AdvanSys boards.
    390  */
    391 u_int8_t       *
    392 adv_alloc_overrunbuf(dvname, dmat)
    393 	char           *dvname;
    394 	bus_dma_tag_t   dmat;
    395 {
    396 	static u_int8_t *overrunbuf = NULL;
    397 
    398 	bus_dmamap_t    ovrbuf_dmamap;
    399 	bus_dma_segment_t seg;
    400 	int             rseg, error;
    401 
    402 
    403 	/*
    404          * if an overrun buffer has been already allocated don't allocate it
    405          * again. Instead return the address of the allocated buffer.
    406          */
    407 	if (overrunbuf)
    408 		return (overrunbuf);
    409 
    410 
    411 	if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
    412 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    413 		printf("%s: unable to allocate overrun buffer, error = %d\n",
    414 		       dvname, error);
    415 		return (0);
    416 	}
    417 	if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
    418 	(caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    419 		printf("%s: unable to map overrun buffer, error = %d\n",
    420 		       dvname, error);
    421 
    422 		bus_dmamem_free(dmat, &seg, 1);
    423 		return (0);
    424 	}
    425 	if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
    426 	      ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
    427 		printf("%s: unable to create overrun buffer DMA map,"
    428 		       " error = %d\n", dvname, error);
    429 
    430 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
    431 		bus_dmamem_free(dmat, &seg, 1);
    432 		return (0);
    433 	}
    434 	if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
    435 			   ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
    436 		printf("%s: unable to load overrun buffer DMA map,"
    437 		       " error = %d\n", dvname, error);
    438 
    439 		bus_dmamap_destroy(dmat, ovrbuf_dmamap);
    440 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
    441 		bus_dmamem_free(dmat, &seg, 1);
    442 		return (0);
    443 	}
    444 	return (overrunbuf);
    445 }
    446 
    447 
    448 /******************************************************************************/
    449 /*                         SCSI layer interfacing routines                    */
    450 /******************************************************************************/
    451 
    452 
    453 int
    454 adv_init(sc)
    455 	ASC_SOFTC      *sc;
    456 {
    457 	int             warn;
    458 
    459 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh))
    460 		panic("adv_init: adv_find_signature failed");
    461 
    462 	/*
    463          * Read the board configuration
    464          */
    465 	AscInitASC_SOFTC(sc);
    466 	warn = AscInitFromEEP(sc);
    467 	if (warn) {
    468 		printf("%s -get: ", sc->sc_dev.dv_xname);
    469 		switch (warn) {
    470 		case -1:
    471 			printf("Chip is not halted\n");
    472 			break;
    473 
    474 		case -2:
    475 			printf("Couldn't get MicroCode Start"
    476 			       " address\n");
    477 			break;
    478 
    479 		case ASC_WARN_IO_PORT_ROTATE:
    480 			printf("I/O port address modified\n");
    481 			break;
    482 
    483 		case ASC_WARN_AUTO_CONFIG:
    484 			printf("I/O port increment switch enabled\n");
    485 			break;
    486 
    487 		case ASC_WARN_EEPROM_CHKSUM:
    488 			printf("EEPROM checksum error\n");
    489 			break;
    490 
    491 		case ASC_WARN_IRQ_MODIFIED:
    492 			printf("IRQ modified\n");
    493 			break;
    494 
    495 		case ASC_WARN_CMD_QNG_CONFLICT:
    496 			printf("tag queuing enabled w/o disconnects\n");
    497 			break;
    498 
    499 		default:
    500 			printf("unknown warning %d\n", warn);
    501 		}
    502 	}
    503 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
    504 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
    505 
    506 	/*
    507          * Modify the board configuration
    508          */
    509 	warn = AscInitFromASC_SOFTC(sc);
    510 	if (warn) {
    511 		printf("%s -set: ", sc->sc_dev.dv_xname);
    512 		switch (warn) {
    513 		case ASC_WARN_CMD_QNG_CONFLICT:
    514 			printf("tag queuing enabled w/o disconnects\n");
    515 			break;
    516 
    517 		case ASC_WARN_AUTO_CONFIG:
    518 			printf("I/O port increment switch enabled\n");
    519 			break;
    520 
    521 		default:
    522 			printf("unknown warning %d\n", warn);
    523 		}
    524 	}
    525 	sc->isr_callback = (ulong) adv_narrow_isr_callback;
    526 
    527 	if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
    528 						     sc->sc_dmat))) {
    529 		return (1);
    530 	}
    531 
    532 	return (0);
    533 }
    534 
    535 
    536 void
    537 adv_attach(sc)
    538 	ASC_SOFTC      *sc;
    539 {
    540 	int             i, error;
    541 
    542 	/*
    543          * Initialize board RISC chip and enable interrupts.
    544          */
    545 	switch (AscInitDriver(sc)) {
    546 	case 0:
    547 		/* AllOK */
    548 		break;
    549 
    550 	case 1:
    551 		panic("%s: bad signature", sc->sc_dev.dv_xname);
    552 		break;
    553 
    554 	case 2:
    555 		panic("%s: unable to load MicroCode",
    556 		      sc->sc_dev.dv_xname);
    557 		break;
    558 
    559 	case 3:
    560 		panic("%s: unable to initialize MicroCode",
    561 		      sc->sc_dev.dv_xname);
    562 		break;
    563 
    564 	default:
    565 		panic("%s: unable to initialize board RISC chip",
    566 		      sc->sc_dev.dv_xname);
    567 	}
    568 
    569 
    570 	/*
    571          * fill in the prototype scsipi_link.
    572          */
    573 	sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
    574 	sc->sc_link.adapter_softc = sc;
    575 	sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
    576 	sc->sc_link.adapter = &adv_switch;
    577 	sc->sc_link.device = &adv_dev;
    578 	sc->sc_link.openings = 4;
    579 	sc->sc_link.scsipi_scsi.max_target = 7;
    580 	sc->sc_link.type = BUS_SCSI;
    581 
    582 
    583 	TAILQ_INIT(&sc->sc_free_ccb);
    584 	TAILQ_INIT(&sc->sc_waiting_ccb);
    585 	LIST_INIT(&sc->sc_queue);
    586 
    587 
    588 	/*
    589          * Allocate the Control Blocks.
    590          */
    591 	error = adv_alloc_ccbs(sc);
    592 	if (error)
    593 		return; /* (error) */ ;
    594 
    595 	/*
    596          * Create and initialize the Control Blocks.
    597          */
    598 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
    599 	if (i == 0) {
    600 		printf("%s: unable to create control blocks\n",
    601 		       sc->sc_dev.dv_xname);
    602 		return; /* (ENOMEM) */ ;
    603 	} else if (i != ADV_MAX_CCB) {
    604 		printf("%s: WARNING: only %d of %d control blocks created\n",
    605 		       sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
    606 	}
    607 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
    608 }
    609 
    610 
    611 static void
    612 advminphys(bp)
    613 	struct buf     *bp;
    614 {
    615 
    616 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
    617 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
    618 	minphys(bp);
    619 }
    620 
    621 
    622 /*
    623  * start a scsi operation given the command and the data address.  Also needs
    624  * the unit, target and lu.
    625  */
    626 static int
    627 adv_scsi_cmd(xs)
    628 	struct scsipi_xfer *xs;
    629 {
    630 	struct scsipi_link *sc_link = xs->sc_link;
    631 	ASC_SOFTC      *sc = sc_link->adapter_softc;
    632 	bus_dma_tag_t   dmat = sc->sc_dmat;
    633 	ADV_CCB        *ccb;
    634 	int             s, flags, error, nsegs;
    635 	int             fromqueue = 1, dontqueue = 0;
    636 
    637 
    638 	s = splbio();		/* protect the queue */
    639 
    640 	/*
    641          * If we're running the queue from adv_done(), we've been
    642          * called with the first queue entry as our argument.
    643          */
    644 	if (xs == sc->sc_queue.lh_first) {
    645 		xs = adv_dequeue(sc);
    646 		fromqueue = 1;
    647 	} else {
    648 
    649 		/* Polled requests can't be queued for later. */
    650 		dontqueue = xs->flags & SCSI_POLL;
    651 
    652 		/*
    653                  * If there are jobs in the queue, run them first.
    654                  */
    655 		if (sc->sc_queue.lh_first != NULL) {
    656 			/*
    657                          * If we can't queue, we have to abort, since
    658                          * we have to preserve order.
    659                          */
    660 			if (dontqueue) {
    661 				splx(s);
    662 				xs->error = XS_DRIVER_STUFFUP;
    663 				return (TRY_AGAIN_LATER);
    664 			}
    665 			/*
    666                          * Swap with the first queue entry.
    667                          */
    668 			adv_enqueue(sc, xs, 0);
    669 			xs = adv_dequeue(sc);
    670 			fromqueue = 1;
    671 		}
    672 	}
    673 
    674 
    675 	/*
    676          * get a ccb to use. If the transfer
    677          * is from a buf (possibly from interrupt time)
    678          * then we can't allow it to sleep
    679          */
    680 
    681 	flags = xs->flags;
    682 	if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
    683 		/*
    684                  * If we can't queue, we lose.
    685                  */
    686 		if (dontqueue) {
    687 			splx(s);
    688 			xs->error = XS_DRIVER_STUFFUP;
    689 			return (TRY_AGAIN_LATER);
    690 		}
    691 		/*
    692                  * Stuff ourselves into the queue, in front
    693                  * if we came off in the first place.
    694                  */
    695 		adv_enqueue(sc, xs, fromqueue);
    696 		splx(s);
    697 		return (SUCCESSFULLY_QUEUED);
    698 	}
    699 	splx(s);		/* done playing with the queue */
    700 
    701 	ccb->xs = xs;
    702 	ccb->timeout = xs->timeout;
    703 
    704 	/*
    705          * Build up the request
    706          */
    707 	memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
    708 
    709 	ccb->scsiq.q2.ccb_ptr = (ulong) ccb;
    710 
    711 	ccb->scsiq.cdbptr = &xs->cmd->opcode;
    712 	ccb->scsiq.q2.cdb_len = xs->cmdlen;
    713 	ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
    714 	ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
    715 	ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
    716 						   sc_link->scsipi_scsi.lun);
    717 	ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
    718 		ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
    719 	ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
    720 
    721 	/*
    722          * If  there  are  any  outstanding  requests  for  the  current target,
    723          * then  every  255th request  send an  ORDERED request.  This heuristic
    724          * tries  to  retain  the  benefit  of request  sorting while preventing
    725          * request starvation. 255 is the max number of tags or pending commands
    726          * a device may have outstanding.
    727          */
    728 	sc->reqcnt[sc_link->scsipi_scsi.target]++;
    729 	if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
    730 	    (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
    731 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
    732 	} else {
    733 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
    734 	}
    735 
    736 
    737 	if (xs->datalen) {
    738 		/*
    739                  * Map the DMA transfer.
    740                  */
    741 #ifdef TFS
    742 		if (flags & SCSI_DATA_UIO) {
    743 			error = bus_dmamap_load_uio(dmat,
    744 				  ccb->dmamap_xfer, (struct uio *) xs->data,
    745 						    (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    746 		} else
    747 #endif				/* TFS */
    748 		{
    749 			error = bus_dmamap_load(dmat,
    750 			      ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
    751 						(flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    752 		}
    753 
    754 		if (error) {
    755 			if (error == EFBIG) {
    756 				printf("%s: adv_scsi_cmd, more than %d dma"
    757 				       " segments\n",
    758 				       sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
    759 			} else {
    760 				printf("%s: adv_scsi_cmd, error %d loading"
    761 				       " dma map\n",
    762 				       sc->sc_dev.dv_xname, error);
    763 			}
    764 
    765 			xs->error = XS_DRIVER_STUFFUP;
    766 			adv_free_ccb(sc, ccb);
    767 			return (COMPLETE);
    768 		}
    769 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    770 				ccb->dmamap_xfer->dm_mapsize,
    771 			      (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
    772 				BUS_DMASYNC_PREWRITE);
    773 
    774 
    775 		memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
    776 
    777 		for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
    778 
    779 			ccb->sghead.sg_list[nsegs].addr =
    780 				ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
    781 			ccb->sghead.sg_list[nsegs].bytes =
    782 				ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
    783 		}
    784 
    785 		ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
    786 			ccb->dmamap_xfer->dm_nsegs;
    787 
    788 		ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
    789 		ccb->scsiq.sg_head = &ccb->sghead;
    790 		ccb->scsiq.q1.data_addr = 0;
    791 		ccb->scsiq.q1.data_cnt = 0;
    792 	} else {
    793 		/*
    794                  * No data xfer, use non S/G values.
    795                  */
    796 		ccb->scsiq.q1.data_addr = 0;
    797 		ccb->scsiq.q1.data_cnt = 0;
    798 	}
    799 
    800 	s = splbio();
    801 	adv_queue_ccb(sc, ccb);
    802 	splx(s);
    803 
    804 	/*
    805          * Usually return SUCCESSFULLY QUEUED
    806          */
    807 	if ((flags & SCSI_POLL) == 0)
    808 		return (SUCCESSFULLY_QUEUED);
    809 
    810 	/*
    811          * If we can't use interrupts, poll on completion
    812          */
    813 	if (adv_poll(sc, xs, ccb->timeout)) {
    814 		adv_timeout(ccb);
    815 		if (adv_poll(sc, xs, ccb->timeout))
    816 			adv_timeout(ccb);
    817 	}
    818 	return (COMPLETE);
    819 }
    820 
    821 
    822 int
    823 adv_intr(arg)
    824 	void           *arg;
    825 {
    826 	ASC_SOFTC      *sc = arg;
    827 	struct scsipi_xfer *xs;
    828 
    829 	AscISR(sc);
    830 
    831 	/*
    832          * If there are queue entries in the software queue, try to
    833          * run the first one.  We should be more or less guaranteed
    834          * to succeed, since we just freed a CCB.
    835          *
    836          * NOTE: adv_scsi_cmd() relies on our calling it with
    837          * the first entry in the queue.
    838          */
    839 	if ((xs = sc->sc_queue.lh_first) != NULL)
    840 		(void) adv_scsi_cmd(xs);
    841 
    842 	return (1);
    843 }
    844 
    845 
    846 /*
    847  * Poll a particular unit, looking for a particular xs
    848  */
    849 static int
    850 adv_poll(sc, xs, count)
    851 	ASC_SOFTC      *sc;
    852 	struct scsipi_xfer *xs;
    853 	int             count;
    854 {
    855 
    856 	/* timeouts are in msec, so we loop in 1000 usec cycles */
    857 	while (count) {
    858 		adv_intr(sc);
    859 		if (xs->flags & ITSDONE)
    860 			return (0);
    861 		delay(1000);	/* only happens in boot so ok */
    862 		count--;
    863 	}
    864 	return (1);
    865 }
    866 
    867 
    868 static void
    869 adv_timeout(arg)
    870 	void           *arg;
    871 {
    872 	ADV_CCB        *ccb = arg;
    873 	struct scsipi_xfer *xs = ccb->xs;
    874 	struct scsipi_link *sc_link = xs->sc_link;
    875 	ASC_SOFTC      *sc = sc_link->adapter_softc;
    876 	int             s;
    877 
    878 	scsi_print_addr(sc_link);
    879 	printf("timed out");
    880 
    881 	s = splbio();
    882 
    883 	/*
    884          * If it has been through before, then a previous abort has failed,
    885          * don't try abort again, reset the bus instead.
    886          */
    887 	if (ccb->flags & CCB_ABORT) {
    888 		/* abort timed out */
    889 		printf(" AGAIN. Resetting Bus\n");
    890 		/* Lets try resetting the bus! */
    891 		if (AscResetBus(sc) == ASC_ERROR) {
    892 			ccb->timeout = sc->scsi_reset_wait;
    893 			adv_queue_ccb(sc, ccb);
    894 		}
    895 	} else {
    896 		/* abort the operation that has timed out */
    897 		printf("\n");
    898 		AscAbortCCB(sc, (u_int32_t) ccb);
    899 		ccb->xs->error = XS_TIMEOUT;
    900 		ccb->timeout = ADV_ABORT_TIMEOUT;
    901 		ccb->flags |= CCB_ABORT;
    902 		adv_queue_ccb(sc, ccb);
    903 	}
    904 
    905 	splx(s);
    906 }
    907 
    908 
    909 static void
    910 adv_watchdog(arg)
    911 	void           *arg;
    912 {
    913 	ADV_CCB        *ccb = arg;
    914 	struct scsipi_xfer *xs = ccb->xs;
    915 	struct scsipi_link *sc_link = xs->sc_link;
    916 	ASC_SOFTC      *sc = sc_link->adapter_softc;
    917 	int             s;
    918 
    919 	s = splbio();
    920 
    921 	ccb->flags &= ~CCB_WATCHDOG;
    922 	adv_start_ccbs(sc);
    923 
    924 	splx(s);
    925 }
    926 
    927 
    928 /******************************************************************************/
    929 /*                  NARROW and WIDE boards Interrupt callbacks                */
    930 /******************************************************************************/
    931 
    932 
    933 /*
    934  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
    935  *
    936  * Interrupt callback function for the Narrow SCSI Asc Library.
    937  */
    938 static void
    939 adv_narrow_isr_callback(sc, qdonep)
    940 	ASC_SOFTC      *sc;
    941 	ASC_QDONE_INFO *qdonep;
    942 {
    943 	bus_dma_tag_t   dmat = sc->sc_dmat;
    944 	ADV_CCB        *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr;
    945 	struct scsipi_xfer *xs = ccb->xs;
    946 	struct scsipi_sense_data *s1, *s2;
    947 
    948 
    949 	untimeout(adv_timeout, ccb);
    950 
    951 	/*
    952          * If we were a data transfer, unload the map that described
    953          * the data buffer.
    954          */
    955 	if (xs->datalen) {
    956 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    957 				ccb->dmamap_xfer->dm_mapsize,
    958 			 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
    959 				BUS_DMASYNC_POSTWRITE);
    960 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
    961 	}
    962 	if ((ccb->flags & CCB_ALLOC) == 0) {
    963 		printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
    964 		Debugger();
    965 		return;
    966 	}
    967 	/*
    968          * 'qdonep' contains the command's ending status.
    969          */
    970 	switch (qdonep->d3.done_stat) {
    971 	case ASC_QD_NO_ERROR:
    972 		switch (qdonep->d3.host_stat) {
    973 		case ASC_QHSTA_NO_ERROR:
    974 			xs->error = XS_NOERROR;
    975 			xs->resid = 0;
    976 			break;
    977 
    978 		default:
    979 			/* QHSTA error occurred */
    980 			xs->error = XS_DRIVER_STUFFUP;
    981 			break;
    982 		}
    983 
    984 		/*
    985                  * If an INQUIRY command completed successfully, then call
    986                  * the AscInquiryHandling() function to patch bugged boards.
    987                  */
    988 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
    989 		    (xs->sc_link->scsipi_scsi.lun == 0) &&
    990 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
    991 			AscInquiryHandling(sc,
    992 				      xs->sc_link->scsipi_scsi.target & 0x7,
    993 					   (ASC_SCSI_INQUIRY *) xs->data);
    994 		}
    995 		break;
    996 
    997 	case ASC_QD_WITH_ERROR:
    998 		switch (qdonep->d3.host_stat) {
    999 		case ASC_QHSTA_NO_ERROR:
   1000 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
   1001 				s1 = &ccb->scsi_sense;
   1002 				s2 = &xs->sense.scsi_sense;
   1003 				*s2 = *s1;
   1004 				xs->error = XS_SENSE;
   1005 			} else {
   1006 				xs->error = XS_DRIVER_STUFFUP;
   1007 			}
   1008 			break;
   1009 
   1010 		default:
   1011 			/* QHSTA error occurred */
   1012 			xs->error = XS_DRIVER_STUFFUP;
   1013 			break;
   1014 		}
   1015 		break;
   1016 
   1017 	case ASC_QD_ABORTED_BY_HOST:
   1018 	default:
   1019 		xs->error = XS_DRIVER_STUFFUP;
   1020 		break;
   1021 	}
   1022 
   1023 
   1024 	adv_free_ccb(sc, ccb);
   1025 	xs->flags |= ITSDONE;
   1026 	scsipi_done(xs);
   1027 }
   1028