Home | History | Annotate | Line # | Download | only in ic
adv.c revision 1.3
      1 /*	$NetBSD: adv.c,v 1.3 1998/09/09 05:28:58 thorpej Exp $	*/
      2 
      3 /*
      4  * Generic driver for the Advanced Systems Inc. SCSI controllers
      5  *
      6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * Author: Baldassare Dante Profeta <dante (at) mclink.it>
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *    This product includes software developed by the NetBSD
     22  *    Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/types.h>
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/kernel.h>
     44 #include <sys/errno.h>
     45 #include <sys/ioctl.h>
     46 #include <sys/device.h>
     47 #include <sys/malloc.h>
     48 #include <sys/buf.h>
     49 #include <sys/proc.h>
     50 #include <sys/user.h>
     51 
     52 #include <machine/bus.h>
     53 #include <machine/intr.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_param.h>
     57 #include <vm/pmap.h>
     58 
     59 #include <dev/scsipi/scsi_all.h>
     60 #include <dev/scsipi/scsipi_all.h>
     61 #include <dev/scsipi/scsiconf.h>
     62 
     63 #include <dev/ic/adv.h>
     64 #include <dev/ic/advlib.h>
     65 
     66 #ifndef DDB
     67 #define	Debugger()	panic("should call debugger here (adv.c)")
     68 #endif /* ! DDB */
     69 
     70 /******************************************************************************/
     71 
     72 
     73 static void adv_enqueue __P((ASC_SOFTC *, struct scsipi_xfer *, int));
     74 static struct scsipi_xfer *adv_dequeue __P((ASC_SOFTC *));
     75 
     76 static int adv_alloc_ccbs __P((ASC_SOFTC *));
     77 static int adv_create_ccbs __P((ASC_SOFTC *, ADV_CCB *, int));
     78 static void adv_free_ccb __P((ASC_SOFTC *, ADV_CCB *));
     79 static void adv_reset_ccb __P((ADV_CCB *));
     80 static int adv_init_ccb __P((ASC_SOFTC *, ADV_CCB *));
     81 static ADV_CCB *adv_get_ccb __P((ASC_SOFTC *, int));
     82 static void adv_queue_ccb __P((ASC_SOFTC *, ADV_CCB *));
     83 static void adv_start_ccbs __P((ASC_SOFTC *));
     84 
     85 static u_int8_t *adv_alloc_overrunbuf __P((char *dvname, bus_dma_tag_t));
     86 
     87 static int adv_scsi_cmd __P((struct scsipi_xfer *));
     88 static void advminphys __P((struct buf *));
     89 static void adv_narrow_isr_callback __P((ASC_SOFTC *, ASC_QDONE_INFO *));
     90 
     91 static int adv_poll __P((ASC_SOFTC *, struct scsipi_xfer *, int));
     92 static void adv_timeout __P((void *));
     93 static void adv_watchdog __P((void *));
     94 
     95 
     96 /******************************************************************************/
     97 
     98 
     99 struct scsipi_adapter adv_switch =
    100 {
    101 	adv_scsi_cmd,		/* called to start/enqueue a SCSI command */
    102 	advminphys,		/* to limit the transfer to max device can do */
    103 	0,			/* IT SEEMS IT IS NOT USED YET */
    104 	0,			/* as above... */
    105 };
    106 
    107 
    108 /* the below structure is so we have a default dev struct for out link struct */
    109 struct scsipi_device adv_dev =
    110 {
    111 	NULL,			/* Use default error handler */
    112 	NULL,			/* have a queue, served by this */
    113 	NULL,			/* have no async handler */
    114 	NULL,			/* Use default 'done' routine */
    115 };
    116 
    117 
    118 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
    119 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
    120 
    121 
    122 /******************************************************************************/
    123 /*                            scsipi_xfer queue routines                      */
    124 /******************************************************************************/
    125 
    126 
    127 /*
    128  * Insert a scsipi_xfer into the software queue.  We overload xs->free_list
    129  * to avoid having to allocate additional resources (since we're used
    130  * only during resource shortages anyhow.
    131  */
    132 static void
    133 adv_enqueue(sc, xs, infront)
    134 	ASC_SOFTC      *sc;
    135 	struct scsipi_xfer *xs;
    136 	int             infront;
    137 {
    138 
    139 	if (infront || sc->sc_queue.lh_first == NULL) {
    140 		if (sc->sc_queue.lh_first == NULL)
    141 			sc->sc_queuelast = xs;
    142 		LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
    143 		return;
    144 	}
    145 	LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
    146 	sc->sc_queuelast = xs;
    147 }
    148 
    149 
    150 /*
    151  * Pull a scsipi_xfer off the front of the software queue.
    152  */
    153 static struct scsipi_xfer *
    154 adv_dequeue(sc)
    155 	ASC_SOFTC      *sc;
    156 {
    157 	struct scsipi_xfer *xs;
    158 
    159 	xs = sc->sc_queue.lh_first;
    160 	LIST_REMOVE(xs, free_list);
    161 
    162 	if (sc->sc_queue.lh_first == NULL)
    163 		sc->sc_queuelast = NULL;
    164 
    165 	return (xs);
    166 }
    167 
    168 
    169 /******************************************************************************/
    170 /*                             Control Blocks routines                        */
    171 /******************************************************************************/
    172 
    173 
    174 static int
    175 adv_alloc_ccbs(sc)
    176 	ASC_SOFTC      *sc;
    177 {
    178 	bus_dma_segment_t seg;
    179 	int             error, rseg;
    180 
    181 	/*
    182          * Allocate the control blocks.
    183          */
    184 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
    185 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    186 		printf("%s: unable to allocate control structures,"
    187 		       " error = %d\n", sc->sc_dev.dv_xname, error);
    188 		return (error);
    189 	}
    190 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
    191 		   sizeof(struct adv_control), (caddr_t *) & sc->sc_control,
    192 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    193 		printf("%s: unable to map control structures, error = %d\n",
    194 		       sc->sc_dev.dv_xname, error);
    195 		return (error);
    196 	}
    197 	/*
    198          * Create and load the DMA map used for the control blocks.
    199          */
    200 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
    201 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
    202 				       &sc->sc_dmamap_control)) != 0) {
    203 		printf("%s: unable to create control DMA map, error = %d\n",
    204 		       sc->sc_dev.dv_xname, error);
    205 		return (error);
    206 	}
    207 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
    208 			   sc->sc_control, sizeof(struct adv_control), NULL,
    209 				     BUS_DMA_NOWAIT)) != 0) {
    210 		printf("%s: unable to load control DMA map, error = %d\n",
    211 		       sc->sc_dev.dv_xname, error);
    212 		return (error);
    213 	}
    214 	return (0);
    215 }
    216 
    217 
    218 /*
    219  * Create a set of ccbs and add them to the free list.  Called once
    220  * by adv_init().  We return the number of CCBs successfully created.
    221  */
    222 static int
    223 adv_create_ccbs(sc, ccbstore, count)
    224 	ASC_SOFTC      *sc;
    225 	ADV_CCB        *ccbstore;
    226 	int             count;
    227 {
    228 	ADV_CCB        *ccb;
    229 	int             i, error;
    230 
    231 	bzero(ccbstore, sizeof(ADV_CCB) * count);
    232 	for (i = 0; i < count; i++) {
    233 		ccb = &ccbstore[i];
    234 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
    235 			printf("%s: unable to initialize ccb, error = %d\n",
    236 			       sc->sc_dev.dv_xname, error);
    237 			return (i);
    238 		}
    239 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
    240 	}
    241 
    242 	return (i);
    243 }
    244 
    245 
    246 /*
    247  * A ccb is put onto the free list.
    248  */
    249 static void
    250 adv_free_ccb(sc, ccb)
    251 	ASC_SOFTC      *sc;
    252 	ADV_CCB        *ccb;
    253 {
    254 	int             s;
    255 
    256 	s = splbio();
    257 
    258 	adv_reset_ccb(ccb);
    259 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
    260 
    261 	/*
    262          * If there were none, wake anybody waiting for one to come free,
    263          * starting with queued entries.
    264          */
    265 	if (ccb->chain.tqe_next == 0)
    266 		wakeup(&sc->sc_free_ccb);
    267 
    268 	splx(s);
    269 }
    270 
    271 
    272 static void
    273 adv_reset_ccb(ccb)
    274 	ADV_CCB        *ccb;
    275 {
    276 
    277 	ccb->flags = 0;
    278 }
    279 
    280 
    281 static int
    282 adv_init_ccb(sc, ccb)
    283 	ASC_SOFTC      *sc;
    284 	ADV_CCB        *ccb;
    285 {
    286 	int             error;
    287 
    288 	/*
    289          * Create the DMA map for this CCB.
    290          */
    291 	error = bus_dmamap_create(sc->sc_dmat,
    292 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    293 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    294 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
    295 	if (error) {
    296 		printf("%s: unable to create DMA map, error = %d\n",
    297 		       sc->sc_dev.dv_xname, error);
    298 		return (error);
    299 	}
    300 	adv_reset_ccb(ccb);
    301 	return (0);
    302 }
    303 
    304 
    305 /*
    306  * Get a free ccb
    307  *
    308  * If there are none, see if we can allocate a new one
    309  */
    310 static ADV_CCB *
    311 adv_get_ccb(sc, flags)
    312 	ASC_SOFTC      *sc;
    313 	int             flags;
    314 {
    315 	ADV_CCB        *ccb = 0;
    316 	int             s;
    317 
    318 	s = splbio();
    319 
    320 	/*
    321          * If we can and have to, sleep waiting for one to come free
    322          * but only if we can't allocate a new one.
    323          */
    324 	for (;;) {
    325 		ccb = sc->sc_free_ccb.tqh_first;
    326 		if (ccb) {
    327 			TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
    328 			break;
    329 		}
    330 		if ((flags & SCSI_NOSLEEP) != 0)
    331 			goto out;
    332 
    333 		tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0);
    334 	}
    335 
    336 	ccb->flags |= CCB_ALLOC;
    337 
    338 out:
    339 	splx(s);
    340 	return (ccb);
    341 }
    342 
    343 
    344 /*
    345  * Queue a CCB to be sent to the controller, and send it if possible.
    346  */
    347 static void
    348 adv_queue_ccb(sc, ccb)
    349 	ASC_SOFTC      *sc;
    350 	ADV_CCB        *ccb;
    351 {
    352 
    353 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
    354 
    355 	adv_start_ccbs(sc);
    356 }
    357 
    358 
    359 static void
    360 adv_start_ccbs(sc)
    361 	ASC_SOFTC      *sc;
    362 {
    363 	ADV_CCB        *ccb;
    364 
    365 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
    366 		if (ccb->flags & CCB_WATCHDOG)
    367 			untimeout(adv_watchdog, ccb);
    368 
    369 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
    370 			ccb->flags |= CCB_WATCHDOG;
    371 			timeout(adv_watchdog, ccb,
    372 				(ADV_WATCH_TIMEOUT * hz) / 1000);
    373 			break;
    374 		}
    375 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
    376 
    377 		if ((ccb->xs->flags & SCSI_POLL) == 0)
    378 			timeout(adv_timeout, ccb, (ccb->timeout * hz) / 1000);
    379 	}
    380 }
    381 
    382 
    383 /******************************************************************************/
    384 /*                      DMA able memory allocation routines                   */
    385 /******************************************************************************/
    386 
    387 
    388 /*
    389  * Allocate a DMA able memory for overrun_buffer.
    390  * This memory can be safely shared among all the AdvanSys boards.
    391  */
    392 u_int8_t       *
    393 adv_alloc_overrunbuf(dvname, dmat)
    394 	char           *dvname;
    395 	bus_dma_tag_t   dmat;
    396 {
    397 	static u_int8_t *overrunbuf = NULL;
    398 
    399 	bus_dmamap_t    ovrbuf_dmamap;
    400 	bus_dma_segment_t seg;
    401 	int             rseg, error;
    402 
    403 
    404 	/*
    405          * if an overrun buffer has been already allocated don't allocate it
    406          * again. Instead return the address of the allocated buffer.
    407          */
    408 	if (overrunbuf)
    409 		return (overrunbuf);
    410 
    411 
    412 	if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE,
    413 			   NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    414 		printf("%s: unable to allocate overrun buffer, error = %d\n",
    415 		       dvname, error);
    416 		return (0);
    417 	}
    418 	if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE,
    419 	(caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    420 		printf("%s: unable to map overrun buffer, error = %d\n",
    421 		       dvname, error);
    422 
    423 		bus_dmamem_free(dmat, &seg, 1);
    424 		return (0);
    425 	}
    426 	if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1,
    427 	      ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) {
    428 		printf("%s: unable to create overrun buffer DMA map,"
    429 		       " error = %d\n", dvname, error);
    430 
    431 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
    432 		bus_dmamem_free(dmat, &seg, 1);
    433 		return (0);
    434 	}
    435 	if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf,
    436 			   ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) {
    437 		printf("%s: unable to load overrun buffer DMA map,"
    438 		       " error = %d\n", dvname, error);
    439 
    440 		bus_dmamap_destroy(dmat, ovrbuf_dmamap);
    441 		bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE);
    442 		bus_dmamem_free(dmat, &seg, 1);
    443 		return (0);
    444 	}
    445 	return (overrunbuf);
    446 }
    447 
    448 
    449 /******************************************************************************/
    450 /*                         SCSI layer interfacing routines                    */
    451 /******************************************************************************/
    452 
    453 
    454 int
    455 adv_init(sc)
    456 	ASC_SOFTC      *sc;
    457 {
    458 	int             warn;
    459 
    460 	if (ASC_IS_NARROW_BOARD(sc)) {
    461 		if (!AscFindSignature(sc->sc_iot, sc->sc_ioh))
    462 			panic("adv_init: adv_find_signature failed");
    463 
    464 		/*
    465                  * Read the board configuration
    466                  */
    467 		AscInitASC_SOFTC(sc);
    468 		warn = AscInitFromEEP(sc);
    469 		if (warn) {
    470 			printf("%s -get: ", sc->sc_dev.dv_xname);
    471 			switch (warn) {
    472 			case -1:
    473 				printf("Chip is not halted\n");
    474 				break;
    475 
    476 			case -2:
    477 				printf("Couldn't get MicroCode Start"
    478 				       " address\n");
    479 				break;
    480 
    481 			case ASC_WARN_IO_PORT_ROTATE:
    482 				printf("I/O port address modified\n");
    483 				break;
    484 
    485 			case ASC_WARN_AUTO_CONFIG:
    486 				printf("I/O port increment switch enabled\n");
    487 				break;
    488 
    489 			case ASC_WARN_EEPROM_CHKSUM:
    490 				printf("EEPROM checksum error\n");
    491 				break;
    492 
    493 			case ASC_WARN_IRQ_MODIFIED:
    494 				printf("IRQ modified\n");
    495 				break;
    496 
    497 			case ASC_WARN_CMD_QNG_CONFLICT:
    498 				printf("tag queuing enabled w/o disconnects\n");
    499 				break;
    500 
    501 			default:
    502 				printf("unknown warning %d\n", warn);
    503 			}
    504 		}
    505 		if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
    506 			sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
    507 
    508 		/*
    509                  * Modify the board configuration
    510                  */
    511 		warn = AscInitFromASC_SOFTC(sc);
    512 		if (warn) {
    513 			printf("%s -set: ", sc->sc_dev.dv_xname);
    514 			switch (warn) {
    515 			case ASC_WARN_CMD_QNG_CONFLICT:
    516 				printf("tag queuing enabled w/o disconnects\n");
    517 				break;
    518 
    519 			case ASC_WARN_AUTO_CONFIG:
    520 				printf("I/O port increment switch enabled\n");
    521 				break;
    522 
    523 			default:
    524 				printf("unknown warning %d\n", warn);
    525 			}
    526 		}
    527 		sc->isr_callback = (ulong) adv_narrow_isr_callback;
    528 
    529 		if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname,
    530 							     sc->sc_dmat))) {
    531 			return (1);
    532 		}
    533 	} else
    534 		//IS_WIDE_BOARD
    535 	{
    536 		printf("%s: Wide boards are not supported yet\n",
    537 		       sc->sc_dev.dv_xname);
    538 		return (1);
    539 	}
    540 
    541 	return (0);
    542 }
    543 
    544 
    545 void
    546 adv_attach(sc)
    547 	ASC_SOFTC      *sc;
    548 {
    549 	int             i, error;
    550 
    551 	if (ASC_IS_NARROW_BOARD(sc)) {
    552 		/*
    553                  * Initialize board RISC chip and enable interrupts.
    554                  */
    555 		switch (AscInitDriver(sc)) {
    556 		case 0:
    557 			/* AllOK */
    558 			break;
    559 
    560 		case 1:
    561 			panic("%s: bad signature", sc->sc_dev.dv_xname);
    562 			break;
    563 
    564 		case 2:
    565 			panic("%s: unable to load MicroCode",
    566 			      sc->sc_dev.dv_xname);
    567 			break;
    568 
    569 		case 3:
    570 			panic("%s: unable to initialize MicroCode",
    571 			      sc->sc_dev.dv_xname);
    572 			break;
    573 
    574 		default:
    575 			panic("%s: unable to initialize board RISC chip",
    576 			      sc->sc_dev.dv_xname);
    577 		}
    578 	} else
    579 		//Wide Boards
    580 	{
    581 		/* ToDo */
    582 	}
    583 
    584 
    585 	/*
    586          * fill in the prototype scsipi_link.
    587          */
    588 	sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
    589 	sc->sc_link.adapter_softc = sc;
    590 	sc->sc_link.scsipi_scsi.adapter_target = sc->chip_scsi_id;
    591 	sc->sc_link.adapter = &adv_switch;
    592 	sc->sc_link.device = &adv_dev;
    593 	sc->sc_link.openings = 4;
    594 	sc->sc_link.scsipi_scsi.max_target = ASC_IS_NARROW_BOARD(sc) ? 7 : 15;
    595 	sc->sc_link.type = BUS_SCSI;
    596 
    597 
    598 	TAILQ_INIT(&sc->sc_free_ccb);
    599 	TAILQ_INIT(&sc->sc_waiting_ccb);
    600 	LIST_INIT(&sc->sc_queue);
    601 
    602 
    603 	/*
    604          * Allocate the Control Blocks.
    605          */
    606 	error = adv_alloc_ccbs(sc);
    607 	if (error)
    608 		return; /* (error) */ ;
    609 
    610 	/*
    611          * Create and initialize the Control Blocks.
    612          */
    613 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
    614 	if (i == 0) {
    615 		printf("%s: unable to create control blocks\n",
    616 		       sc->sc_dev.dv_xname);
    617 		return; /* (ENOMEM) */ ;
    618 	} else if (i != ADV_MAX_CCB) {
    619 		printf("%s: WARNING: only %d of %d control blocks created\n",
    620 		       sc->sc_dev.dv_xname, i, ADV_MAX_CCB);
    621 	}
    622 	config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
    623 }
    624 
    625 
    626 static void
    627 advminphys(bp)
    628 	struct buf     *bp;
    629 {
    630 
    631 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
    632 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
    633 	minphys(bp);
    634 }
    635 
    636 
    637 /*
    638  * start a scsi operation given the command and the data address.  Also needs
    639  * the unit, target and lu.
    640  */
    641 static int
    642 adv_scsi_cmd(xs)
    643 	struct scsipi_xfer *xs;
    644 {
    645 	struct scsipi_link *sc_link = xs->sc_link;
    646 	ASC_SOFTC      *sc = sc_link->adapter_softc;
    647 	bus_dma_tag_t   dmat = sc->sc_dmat;
    648 	ADV_CCB        *ccb;
    649 	int             s, flags, error, nsegs;
    650 	int             fromqueue = 1, dontqueue = 0;
    651 
    652 
    653 	s = splbio();		/* protect the queue */
    654 
    655 	/*
    656          * If we're running the queue from adv_done(), we've been
    657          * called with the first queue entry as our argument.
    658          */
    659 	if (xs == sc->sc_queue.lh_first) {
    660 		xs = adv_dequeue(sc);
    661 		fromqueue = 1;
    662 	} else {
    663 
    664 		/* Polled requests can't be queued for later. */
    665 		dontqueue = xs->flags & SCSI_POLL;
    666 
    667 		/*
    668                  * If there are jobs in the queue, run them first.
    669                  */
    670 		if (sc->sc_queue.lh_first != NULL) {
    671 			/*
    672                          * If we can't queue, we have to abort, since
    673                          * we have to preserve order.
    674                          */
    675 			if (dontqueue) {
    676 				splx(s);
    677 				xs->error = XS_DRIVER_STUFFUP;
    678 				return (TRY_AGAIN_LATER);
    679 			}
    680 			/*
    681                          * Swap with the first queue entry.
    682                          */
    683 			adv_enqueue(sc, xs, 0);
    684 			xs = adv_dequeue(sc);
    685 			fromqueue = 1;
    686 		}
    687 	}
    688 
    689 
    690 	/*
    691          * get a ccb to use. If the transfer
    692          * is from a buf (possibly from interrupt time)
    693          * then we can't allow it to sleep
    694          */
    695 
    696 	flags = xs->flags;
    697 	if ((ccb = adv_get_ccb(sc, flags)) == NULL) {
    698 		/*
    699                  * If we can't queue, we lose.
    700                  */
    701 		if (dontqueue) {
    702 			splx(s);
    703 			xs->error = XS_DRIVER_STUFFUP;
    704 			return (TRY_AGAIN_LATER);
    705 		}
    706 		/*
    707                  * Stuff ourselves into the queue, in front
    708                  * if we came off in the first place.
    709                  */
    710 		adv_enqueue(sc, xs, fromqueue);
    711 		splx(s);
    712 		return (SUCCESSFULLY_QUEUED);
    713 	}
    714 	splx(s);		/* done playing with the queue */
    715 
    716 	ccb->xs = xs;
    717 	ccb->timeout = xs->timeout;
    718 
    719 	/*
    720          * Build up the request
    721          */
    722 	memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
    723 
    724 	ccb->scsiq.q2.ccb_ptr = (ulong) ccb;
    725 
    726 	ccb->scsiq.cdbptr = &xs->cmd->opcode;
    727 	ccb->scsiq.q2.cdb_len = xs->cmdlen;
    728 	ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->scsipi_scsi.target);
    729 	ccb->scsiq.q1.target_lun = sc_link->scsipi_scsi.lun;
    730 	ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->scsipi_scsi.target,
    731 						   sc_link->scsipi_scsi.lun);
    732 	ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr +
    733 		ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
    734 	ccb->scsiq.q1.sense_len = sizeof(struct scsipi_sense_data);
    735 
    736 	/*
    737          * If  there  are  any  outstanding  requests  for  the  current target,
    738          * then  every  255th request  send an  ORDERED request.  This heuristic
    739          * tries  to  retain  the  benefit  of request  sorting while preventing
    740          * request starvation. 255 is the max number of tags or pending commands
    741          * a device may have outstanding.
    742          */
    743 	sc->reqcnt[sc_link->scsipi_scsi.target]++;
    744 	if ((sc->reqcnt[sc_link->scsipi_scsi.target] > 0) &&
    745 	    (sc->reqcnt[sc_link->scsipi_scsi.target] % 255) == 0) {
    746 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
    747 	} else {
    748 		ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
    749 	}
    750 
    751 
    752 	if (xs->datalen) {
    753 		/*
    754                  * Map the DMA transfer.
    755                  */
    756 #ifdef TFS
    757 		if (flags & SCSI_DATA_UIO) {
    758 			error = bus_dmamap_load_uio(dmat,
    759 				  ccb->dmamap_xfer, (struct uio *) xs->data,
    760 						    (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    761 		} else
    762 #endif				/* TFS */
    763 		{
    764 			error = bus_dmamap_load(dmat,
    765 			      ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
    766 						(flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    767 		}
    768 
    769 		if (error) {
    770 			if (error == EFBIG) {
    771 				printf("%s: adv_scsi_cmd, more than %d dma"
    772 				       " segments\n",
    773 				       sc->sc_dev.dv_xname, ASC_MAX_SG_LIST);
    774 			} else {
    775 				printf("%s: adv_scsi_cmd, error %d loading"
    776 				       " dma map\n",
    777 				       sc->sc_dev.dv_xname, error);
    778 			}
    779 
    780 			xs->error = XS_DRIVER_STUFFUP;
    781 			adv_free_ccb(sc, ccb);
    782 			return (COMPLETE);
    783 		}
    784 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    785 				ccb->dmamap_xfer->dm_mapsize,
    786 			      (flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
    787 				BUS_DMASYNC_PREWRITE);
    788 
    789 
    790 		memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
    791 
    792 		for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
    793 
    794 			ccb->sghead.sg_list[nsegs].addr =
    795 				ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
    796 			ccb->sghead.sg_list[nsegs].bytes =
    797 				ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
    798 		}
    799 
    800 		ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
    801 			ccb->dmamap_xfer->dm_nsegs;
    802 
    803 		ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
    804 		ccb->scsiq.sg_head = &ccb->sghead;
    805 		ccb->scsiq.q1.data_addr = 0;
    806 		ccb->scsiq.q1.data_cnt = 0;
    807 	} else {
    808 		/*
    809                  * No data xfer, use non S/G values.
    810                  */
    811 		ccb->scsiq.q1.data_addr = 0;
    812 		ccb->scsiq.q1.data_cnt = 0;
    813 	}
    814 
    815 	s = splbio();
    816 	adv_queue_ccb(sc, ccb);
    817 	splx(s);
    818 
    819 	/*
    820          * Usually return SUCCESSFULLY QUEUED
    821          */
    822 	if ((flags & SCSI_POLL) == 0)
    823 		return (SUCCESSFULLY_QUEUED);
    824 
    825 	/*
    826          * If we can't use interrupts, poll on completion
    827          */
    828 	if (adv_poll(sc, xs, ccb->timeout)) {
    829 		adv_timeout(ccb);
    830 		if (adv_poll(sc, xs, ccb->timeout))
    831 			adv_timeout(ccb);
    832 	}
    833 	return (COMPLETE);
    834 }
    835 
    836 
    837 int
    838 adv_intr(arg)
    839 	void           *arg;
    840 {
    841 	ASC_SOFTC      *sc = arg;
    842 	struct scsipi_xfer *xs;
    843 
    844 	if (ASC_IS_NARROW_BOARD(sc)) {
    845 		AscISR(sc);
    846 	} else
    847 		//Wide Boards
    848 	{
    849 		/* ToDo AdvISR */
    850 	}
    851 
    852 	/*
    853          * If there are queue entries in the software queue, try to
    854          * run the first one.  We should be more or less guaranteed
    855          * to succeed, since we just freed a CCB.
    856          *
    857          * NOTE: adv_scsi_cmd() relies on our calling it with
    858          * the first entry in the queue.
    859          */
    860 	if ((xs = sc->sc_queue.lh_first) != NULL)
    861 		(void) adv_scsi_cmd(xs);
    862 
    863 	return (1);
    864 }
    865 
    866 
    867 /*
    868  * Poll a particular unit, looking for a particular xs
    869  */
    870 static int
    871 adv_poll(sc, xs, count)
    872 	ASC_SOFTC      *sc;
    873 	struct scsipi_xfer *xs;
    874 	int             count;
    875 {
    876 
    877 	/* timeouts are in msec, so we loop in 1000 usec cycles */
    878 	while (count) {
    879 		adv_intr(sc);
    880 		if (xs->flags & ITSDONE)
    881 			return (0);
    882 		delay(1000);	/* only happens in boot so ok */
    883 		count--;
    884 	}
    885 	return (1);
    886 }
    887 
    888 
    889 static void
    890 adv_timeout(arg)
    891 	void           *arg;
    892 {
    893 	ADV_CCB        *ccb = arg;
    894 	struct scsipi_xfer *xs = ccb->xs;
    895 	struct scsipi_link *sc_link = xs->sc_link;
    896 	ASC_SOFTC      *sc = sc_link->adapter_softc;
    897 	int             s;
    898 
    899 	scsi_print_addr(sc_link);
    900 	printf("timed out");
    901 
    902 	s = splbio();
    903 
    904 	/*
    905          * If it has been through before, then a previous abort has failed,
    906          * don't try abort again, reset the bus instead.
    907          */
    908 	if (ccb->flags & CCB_ABORT) {
    909 		/* abort timed out */
    910 		printf(" AGAIN. Resetting Bus\n");
    911 		/* Lets try resetting the bus! */
    912 		if (AscResetBus(sc) == ASC_ERROR) {
    913 			ccb->timeout = sc->scsi_reset_wait;
    914 			adv_queue_ccb(sc, ccb);
    915 		}
    916 	} else {
    917 		/* abort the operation that has timed out */
    918 		printf("\n");
    919 		AscAbortCCB(sc, (u_int32_t) ccb);
    920 		ccb->xs->error = XS_TIMEOUT;
    921 		ccb->timeout = ADV_ABORT_TIMEOUT;
    922 		ccb->flags |= CCB_ABORT;
    923 		adv_queue_ccb(sc, ccb);
    924 	}
    925 
    926 	splx(s);
    927 }
    928 
    929 
    930 static void
    931 adv_watchdog(arg)
    932 	void           *arg;
    933 {
    934 	ADV_CCB        *ccb = arg;
    935 	struct scsipi_xfer *xs = ccb->xs;
    936 	struct scsipi_link *sc_link = xs->sc_link;
    937 	ASC_SOFTC      *sc = sc_link->adapter_softc;
    938 	int             s;
    939 
    940 	s = splbio();
    941 
    942 	ccb->flags &= ~CCB_WATCHDOG;
    943 	adv_start_ccbs(sc);
    944 
    945 	splx(s);
    946 }
    947 
    948 
    949 /******************************************************************************/
    950 /*                  NARROW and WIDE boards Interrupt callbacks                */
    951 /******************************************************************************/
    952 
    953 
    954 /*
    955  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
    956  *
    957  * Interrupt callback function for the Narrow SCSI Asc Library.
    958  */
    959 static void
    960 adv_narrow_isr_callback(sc, qdonep)
    961 	ASC_SOFTC      *sc;
    962 	ASC_QDONE_INFO *qdonep;
    963 {
    964 	bus_dma_tag_t   dmat = sc->sc_dmat;
    965 	ADV_CCB        *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr;
    966 	struct scsipi_xfer *xs = ccb->xs;
    967 	struct scsipi_sense_data *s1, *s2;
    968 
    969 
    970 	untimeout(adv_timeout, ccb);
    971 
    972 	/*
    973          * If we were a data transfer, unload the map that described
    974          * the data buffer.
    975          */
    976 	if (xs->datalen) {
    977 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    978 				ccb->dmamap_xfer->dm_mapsize,
    979 			 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
    980 				BUS_DMASYNC_POSTWRITE);
    981 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
    982 	}
    983 	if ((ccb->flags & CCB_ALLOC) == 0) {
    984 		printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname);
    985 		Debugger();
    986 		return;
    987 	}
    988 	/*
    989          * 'qdonep' contains the command's ending status.
    990          */
    991 	switch (qdonep->d3.done_stat) {
    992 	case ASC_QD_NO_ERROR:
    993 		switch (qdonep->d3.host_stat) {
    994 		case ASC_QHSTA_NO_ERROR:
    995 			xs->error = XS_NOERROR;
    996 			xs->resid = 0;
    997 			break;
    998 
    999 		default:
   1000 			/* QHSTA error occurred */
   1001 			xs->error = XS_DRIVER_STUFFUP;
   1002 			break;
   1003 		}
   1004 
   1005 		/*
   1006                  * If an INQUIRY command completed successfully, then call
   1007                  * the AscInquiryHandling() function to patch bugged boards.
   1008                  */
   1009 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
   1010 		    (xs->sc_link->scsipi_scsi.lun == 0) &&
   1011 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
   1012 			AscInquiryHandling(sc,
   1013 				      xs->sc_link->scsipi_scsi.target & 0x7,
   1014 					   (ASC_SCSI_INQUIRY *) xs->data);
   1015 		}
   1016 		break;
   1017 
   1018 	case ASC_QD_WITH_ERROR:
   1019 		switch (qdonep->d3.host_stat) {
   1020 		case ASC_QHSTA_NO_ERROR:
   1021 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
   1022 				s1 = &ccb->scsi_sense;
   1023 				s2 = &xs->sense.scsi_sense;
   1024 				*s2 = *s1;
   1025 				xs->error = XS_SENSE;
   1026 			} else
   1027 				xs->error = XS_DRIVER_STUFFUP;
   1028 			break;
   1029 
   1030 		default:
   1031 			/* QHSTA error occurred */
   1032 			xs->error = XS_DRIVER_STUFFUP;
   1033 			break;
   1034 		}
   1035 		break;
   1036 
   1037 	case ASC_QD_ABORTED_BY_HOST:
   1038 	default:
   1039 		xs->error = XS_DRIVER_STUFFUP;
   1040 		break;
   1041 	}
   1042 
   1043 
   1044 	adv_free_ccb(sc, ccb);
   1045 	xs->flags |= ITSDONE;
   1046 	scsipi_done(xs);
   1047 }
   1048