Home | History | Annotate | Line # | Download | only in ic
adv.c revision 1.48
      1 /*	$NetBSD: adv.c,v 1.48 2018/08/29 16:51:51 rin Exp $	*/
      2 
      3 /*
      4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
      5  *
      6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * Author: Baldassare Dante Profeta <dante (at) mclink.it>
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: adv.c,v 1.48 2018/08/29 16:51:51 rin Exp $");
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/callout.h>
     46 #include <sys/kernel.h>
     47 #include <sys/errno.h>
     48 #include <sys/ioctl.h>
     49 #include <sys/device.h>
     50 #include <sys/malloc.h>
     51 #include <sys/buf.h>
     52 #include <sys/proc.h>
     53 
     54 #include <sys/bus.h>
     55 #include <sys/intr.h>
     56 
     57 #include <dev/scsipi/scsi_all.h>
     58 #include <dev/scsipi/scsipi_all.h>
     59 #include <dev/scsipi/scsiconf.h>
     60 
     61 #include <dev/ic/advlib.h>
     62 #include <dev/ic/adv.h>
     63 
     64 #ifndef DDB
     65 #define	Debugger()	panic("should call debugger here (adv.c)")
     66 #endif /* ! DDB */
     67 
     68 
     69 /* #define ASC_DEBUG */
     70 
     71 /******************************************************************************/
     72 
     73 
     74 static int adv_alloc_control_data(ASC_SOFTC *);
     75 static void adv_free_control_data(ASC_SOFTC *);
     76 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int);
     77 static void adv_free_ccb(ASC_SOFTC *, ADV_CCB *);
     78 static void adv_reset_ccb(ADV_CCB *);
     79 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *);
     80 static ADV_CCB *adv_get_ccb(ASC_SOFTC *);
     81 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *);
     82 static void adv_start_ccbs(ASC_SOFTC *);
     83 
     84 
     85 static void adv_scsipi_request(struct scsipi_channel *,
     86 	scsipi_adapter_req_t, void *);
     87 static void advminphys(struct buf *);
     88 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *);
     89 
     90 static int adv_poll(ASC_SOFTC *, struct scsipi_xfer *, int);
     91 static void adv_timeout(void *);
     92 static void adv_watchdog(void *);
     93 
     94 
     95 /******************************************************************************/
     96 
     97 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
     98 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
     99 
    100 /******************************************************************************/
    101 /*                             Control Blocks routines                        */
    102 /******************************************************************************/
    103 
    104 
    105 static int
    106 adv_alloc_control_data(ASC_SOFTC *sc)
    107 {
    108 	int error;
    109 
    110 	/*
    111  	* Allocate the control blocks.
    112 	 */
    113 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
    114 			   PAGE_SIZE, 0, &sc->sc_control_seg, 1,
    115 			   &sc->sc_control_nsegs, BUS_DMA_NOWAIT)) != 0) {
    116 		aprint_error_dev(sc->sc_dev, "unable to allocate control "
    117 		    "structures, error = %d\n", error);
    118 		return (error);
    119 	}
    120 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_control_seg,
    121 			   sc->sc_control_nsegs, sizeof(struct adv_control),
    122 			   (void **) & sc->sc_control,
    123 			   BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    124 		aprint_error_dev(sc->sc_dev,
    125 		    "unable to map control structures, error = %d\n", error);
    126 		return (error);
    127 	}
    128 	/*
    129 	 * Create and load the DMA map used for the control blocks.
    130 	 */
    131 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
    132 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
    133 				       &sc->sc_dmamap_control)) != 0) {
    134 		aprint_error_dev(sc->sc_dev,
    135 		    "unable to create control DMA map, error = %d\n", error);
    136 		return (error);
    137 	}
    138 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
    139 			   sc->sc_control, sizeof(struct adv_control), NULL,
    140 				     BUS_DMA_NOWAIT)) != 0) {
    141 		aprint_error_dev(sc->sc_dev,
    142 		    "unable to load control DMA map, error = %d\n", error);
    143 		return (error);
    144 	}
    145 
    146 	/*
    147 	 * Initialize the overrun_buf address.
    148 	 */
    149 	sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
    150 	    offsetof(struct adv_control, overrun_buf);
    151 
    152 	return (0);
    153 }
    154 
    155 static void
    156 adv_free_control_data(ASC_SOFTC *sc)
    157 {
    158 
    159 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_control);
    160 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_control);
    161 	sc->sc_dmamap_control = NULL;
    162 
    163 	bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control,
    164 	    sizeof(struct adv_control));
    165 	bus_dmamem_free(sc->sc_dmat, &sc->sc_control_seg,
    166 	    sc->sc_control_nsegs);
    167 }
    168 
    169 /*
    170  * Create a set of ccbs and add them to the free list.  Called once
    171  * by adv_init().  We return the number of CCBs successfully created.
    172  */
    173 static int
    174 adv_create_ccbs(ASC_SOFTC *sc, ADV_CCB *ccbstore, int count)
    175 {
    176 	ADV_CCB        *ccb;
    177 	int             i, error;
    178 
    179 	memset(ccbstore, 0, sizeof(ADV_CCB) * count);
    180 	for (i = 0; i < count; i++) {
    181 		ccb = &ccbstore[i];
    182 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
    183 			aprint_error_dev(sc->sc_dev,
    184 			    "unable to initialize ccb, error = %d\n", error);
    185 			return (i);
    186 		}
    187 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
    188 	}
    189 
    190 	return (i);
    191 }
    192 
    193 
    194 /*
    195  * A ccb is put onto the free list.
    196  */
    197 static void
    198 adv_free_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
    199 {
    200 	int             s;
    201 
    202 	s = splbio();
    203 	adv_reset_ccb(ccb);
    204 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
    205 	splx(s);
    206 }
    207 
    208 
    209 static void
    210 adv_reset_ccb(ADV_CCB *ccb)
    211 {
    212 
    213 	ccb->flags = 0;
    214 }
    215 
    216 
    217 static int
    218 adv_init_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
    219 {
    220 	int	hashnum, error;
    221 
    222 	callout_init(&ccb->ccb_watchdog, 0);
    223 
    224 	/*
    225 	 * Create the DMA map for this CCB.
    226 	 */
    227 	error = bus_dmamap_create(sc->sc_dmat,
    228 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    229 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    230 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
    231 	if (error) {
    232 		aprint_error_dev(sc->sc_dev,
    233 		    "unable to create DMA map, error = %d\n", error);
    234 		return (error);
    235 	}
    236 
    237 	/*
    238 	 * put in the phystokv hash table
    239 	 * Never gets taken out.
    240 	 */
    241 	ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
    242 	    ADV_CCB_OFF(ccb);
    243 	hashnum = CCB_HASH(ccb->hashkey);
    244 	ccb->nexthash = sc->sc_ccbhash[hashnum];
    245 	sc->sc_ccbhash[hashnum] = ccb;
    246 
    247 	adv_reset_ccb(ccb);
    248 	return (0);
    249 }
    250 
    251 
    252 /*
    253  * Get a free ccb
    254  *
    255  * If there are none, see if we can allocate a new one
    256  */
    257 static ADV_CCB *
    258 adv_get_ccb(ASC_SOFTC *sc)
    259 {
    260 	ADV_CCB        *ccb = 0;
    261 	int             s;
    262 
    263 	s = splbio();
    264 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
    265 	if (ccb != NULL) {
    266 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
    267 		ccb->flags |= CCB_ALLOC;
    268 	}
    269 	splx(s);
    270 	return (ccb);
    271 }
    272 
    273 
    274 /*
    275  * Given a physical address, find the ccb that it corresponds to.
    276  */
    277 ADV_CCB *
    278 adv_ccb_phys_kv(ASC_SOFTC *sc, u_long ccb_phys)
    279 {
    280 	int hashnum = CCB_HASH(ccb_phys);
    281 	ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
    282 
    283 	while (ccb) {
    284 		if (ccb->hashkey == ccb_phys)
    285 			break;
    286 		ccb = ccb->nexthash;
    287 	}
    288 	return (ccb);
    289 }
    290 
    291 
    292 /*
    293  * Queue a CCB to be sent to the controller, and send it if possible.
    294  */
    295 static void
    296 adv_queue_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
    297 {
    298 
    299 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
    300 
    301 	adv_start_ccbs(sc);
    302 }
    303 
    304 
    305 static void
    306 adv_start_ccbs(ASC_SOFTC *sc)
    307 {
    308 	ADV_CCB        *ccb;
    309 
    310 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
    311 		if (ccb->flags & CCB_WATCHDOG)
    312 			callout_stop(&ccb->ccb_watchdog);
    313 
    314 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
    315 			ccb->flags |= CCB_WATCHDOG;
    316 			callout_reset(&ccb->ccb_watchdog,
    317 			    (ADV_WATCH_TIMEOUT * hz) / 1000,
    318 			    adv_watchdog, ccb);
    319 			break;
    320 		}
    321 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
    322 
    323 		if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
    324 			callout_reset(&ccb->xs->xs_callout,
    325 			    mstohz(ccb->timeout), adv_timeout, ccb);
    326 	}
    327 }
    328 
    329 
    330 /******************************************************************************/
    331 /*                         SCSI layer interfacing routines                    */
    332 /******************************************************************************/
    333 
    334 
    335 int
    336 adv_init(ASC_SOFTC *sc)
    337 {
    338 	int             warn;
    339 
    340 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
    341 		aprint_error("adv_init: failed to find signature\n");
    342 		return (1);
    343 	}
    344 
    345 	/*
    346 	 * Read the board configuration
    347 	 */
    348 	AscInitASC_SOFTC(sc);
    349 	warn = AscInitFromEEP(sc);
    350 	if (warn) {
    351 		aprint_error_dev(sc->sc_dev, "-get: ");
    352 		switch (warn) {
    353 		case -1:
    354 			aprint_normal("Chip is not halted\n");
    355 			break;
    356 
    357 		case -2:
    358 			aprint_normal("Couldn't get MicroCode Start"
    359 			       " address\n");
    360 			break;
    361 
    362 		case ASC_WARN_IO_PORT_ROTATE:
    363 			aprint_normal("I/O port address modified\n");
    364 			break;
    365 
    366 		case ASC_WARN_AUTO_CONFIG:
    367 			aprint_normal("I/O port increment switch enabled\n");
    368 			break;
    369 
    370 		case ASC_WARN_EEPROM_CHKSUM:
    371 			aprint_normal("EEPROM checksum error\n");
    372 			break;
    373 
    374 		case ASC_WARN_IRQ_MODIFIED:
    375 			aprint_normal("IRQ modified\n");
    376 			break;
    377 
    378 		case ASC_WARN_CMD_QNG_CONFLICT:
    379 			aprint_normal("tag queuing enabled w/o disconnects\n");
    380 			break;
    381 
    382 		default:
    383 			aprint_normal("unknown warning %d\n", warn);
    384 		}
    385 	}
    386 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
    387 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
    388 
    389 	/*
    390 	 * Modify the board configuration
    391 	 */
    392 	warn = AscInitFromASC_SOFTC(sc);
    393 	if (warn) {
    394 		aprint_error_dev(sc->sc_dev, "-set: ");
    395 		switch (warn) {
    396 		case ASC_WARN_CMD_QNG_CONFLICT:
    397 			aprint_normal("tag queuing enabled w/o disconnects\n");
    398 			break;
    399 
    400 		case ASC_WARN_AUTO_CONFIG:
    401 			aprint_normal("I/O port increment switch enabled\n");
    402 			break;
    403 
    404 		default:
    405 			aprint_normal("unknown warning %d\n", warn);
    406 		}
    407 	}
    408 	sc->isr_callback = (ASC_CALLBACK) adv_narrow_isr_callback;
    409 
    410 	return (0);
    411 }
    412 
    413 
    414 void
    415 adv_attach(ASC_SOFTC *sc)
    416 {
    417 	struct scsipi_adapter *adapt = &sc->sc_adapter;
    418 	struct scsipi_channel *chan = &sc->sc_channel;
    419 	int             i, error;
    420 
    421 	/*
    422 	 * Initialize board RISC chip and enable interrupts.
    423 	 */
    424 	switch (AscInitDriver(sc)) {
    425 	case 0:
    426 		/* AllOK */
    427 		break;
    428 
    429 	case 1:
    430 		panic("%s: bad signature", device_xname(sc->sc_dev));
    431 		break;
    432 
    433 	case 2:
    434 		panic("%s: unable to load MicroCode",
    435 		      device_xname(sc->sc_dev));
    436 		break;
    437 
    438 	case 3:
    439 		panic("%s: unable to initialize MicroCode",
    440 		      device_xname(sc->sc_dev));
    441 		break;
    442 
    443 	default:
    444 		panic("%s: unable to initialize board RISC chip",
    445 		      device_xname(sc->sc_dev));
    446 	}
    447 
    448 	/*
    449 	 * Fill in the scsipi_adapter.
    450 	 */
    451 	memset(adapt, 0, sizeof(*adapt));
    452 	adapt->adapt_dev = sc->sc_dev;
    453 	adapt->adapt_nchannels = 1;
    454 	/* adapt_openings initialized below */
    455 	/* adapt_max_periph initialized below */
    456 	adapt->adapt_request = adv_scsipi_request;
    457 	adapt->adapt_minphys = advminphys;
    458 
    459 	/*
    460 	 * Fill in the scsipi_channel.
    461 	 */
    462 	memset(chan, 0, sizeof(*chan));
    463 	chan->chan_adapter = adapt;
    464 	chan->chan_bustype = &scsi_bustype;
    465 	chan->chan_channel = 0;
    466 	chan->chan_ntargets = 8;
    467 	chan->chan_nluns = 8;
    468 	chan->chan_id = sc->chip_scsi_id;
    469 
    470 	TAILQ_INIT(&sc->sc_free_ccb);
    471 	TAILQ_INIT(&sc->sc_waiting_ccb);
    472 
    473 	/*
    474 	 * Allocate the Control Blocks and the overrun buffer.
    475 	 */
    476 	error = adv_alloc_control_data(sc);
    477 	if (error)
    478 		return; /* (error) */
    479 
    480 	/*
    481 	 * Create and initialize the Control Blocks.
    482 	 */
    483 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
    484 	if (i == 0) {
    485 		aprint_error_dev(sc->sc_dev,
    486 		    "unable to create control blocks\n");
    487 		return; /* (ENOMEM) */ ;
    488 	} else if (i != ADV_MAX_CCB) {
    489 		aprint_error_dev(sc->sc_dev,
    490 		    "WARNING: only %d of %d control blocks created\n",
    491 		    i, ADV_MAX_CCB);
    492 	}
    493 
    494 	adapt->adapt_openings = i;
    495 	adapt->adapt_max_periph = adapt->adapt_openings;
    496 
    497 	sc->sc_child = config_found(sc->sc_dev, chan, scsiprint);
    498 }
    499 
    500 int
    501 adv_detach(ASC_SOFTC *sc, int flags)
    502 {
    503 	int rv = 0;
    504 
    505 	if (sc->sc_child != NULL)
    506 		rv = config_detach(sc->sc_child, flags);
    507 
    508 	adv_free_control_data(sc);
    509 
    510 	return (rv);
    511 }
    512 
    513 static void
    514 advminphys(struct buf *bp)
    515 {
    516 
    517 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
    518 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
    519 	minphys(bp);
    520 }
    521 
    522 
    523 /*
    524  * start a scsi operation given the command and the data address.  Also needs
    525  * the unit, target and lu.
    526  */
    527 
    528 static void
    529 adv_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    530     void *arg)
    531 {
    532  	struct scsipi_xfer *xs;
    533  	struct scsipi_periph *periph;
    534  	ASC_SOFTC      *sc = device_private(chan->chan_adapter->adapt_dev);
    535  	bus_dma_tag_t   dmat = sc->sc_dmat;
    536  	ADV_CCB        *ccb;
    537  	int             s, flags, error, nsegs;
    538 
    539  	switch (req) {
    540  	case ADAPTER_REQ_RUN_XFER:
    541  		xs = arg;
    542  		periph = xs->xs_periph;
    543  		flags = xs->xs_control;
    544 
    545  		/*
    546  		 * Get a CCB to use.
    547  		 */
    548  		ccb = adv_get_ccb(sc);
    549 #ifdef DIAGNOSTIC
    550  		/*
    551  		 * This should never happen as we track the resources
    552  		 * in the mid-layer.
    553  		 */
    554  		if (ccb == NULL) {
    555  			scsipi_printaddr(periph);
    556  			printf("unable to allocate ccb\n");
    557  			panic("adv_scsipi_request");
    558  		}
    559 #endif
    560 
    561  		ccb->xs = xs;
    562  		ccb->timeout = xs->timeout;
    563 
    564  		/*
    565  		 * Build up the request
    566  		 */
    567  		memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
    568 
    569  		ccb->scsiq.q2.ccb_ptr =
    570  		    sc->sc_dmamap_control->dm_segs[0].ds_addr +
    571  		    ADV_CCB_OFF(ccb);
    572 
    573  		ccb->scsiq.cdbptr = &xs->cmd->opcode;
    574  		ccb->scsiq.q2.cdb_len = xs->cmdlen;
    575  		ccb->scsiq.q1.target_id =
    576  		    ASC_TID_TO_TARGET_ID(periph->periph_target);
    577  		ccb->scsiq.q1.target_lun = periph->periph_lun;
    578  		ccb->scsiq.q2.target_ix =
    579  		    ASC_TIDLUN_TO_IX(periph->periph_target,
    580  		    periph->periph_lun);
    581  		ccb->scsiq.q1.sense_addr =
    582  		    sc->sc_dmamap_control->dm_segs[0].ds_addr +
    583  		    ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
    584  		ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data);
    585 
    586  		/*
    587  		 * If there are any outstanding requests for the current
    588  		 * target, then every 255th request send an ORDERED request.
    589  		 * This heuristic tries to retain the benefit of request
    590  		 * sorting while preventing request starvation. 255 is the
    591  		 * max number of tags or pending commands a device may have
    592  		 * outstanding.
    593  		 */
    594  		sc->reqcnt[periph->periph_target]++;
    595  		if (((sc->reqcnt[periph->periph_target] > 0) &&
    596  		    (sc->reqcnt[periph->periph_target] % 255) == 0) ||
    597 		    xs->bp == NULL || (xs->bp->b_flags & B_ASYNC) == 0) {
    598  			ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
    599  		} else {
    600  			ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
    601  		}
    602 
    603  		if (xs->datalen) {
    604  			/*
    605  			 * Map the DMA transfer.
    606  			 */
    607 #ifdef TFS
    608  			if (flags & SCSI_DATA_UIO) {
    609  				error = bus_dmamap_load_uio(dmat,
    610  				    ccb->dmamap_xfer, (struct uio *) xs->data,
    611 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
    612 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    613 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
    614 				      BUS_DMA_WRITE));
    615  			} else
    616 #endif /* TFS */
    617  			{
    618  				error = bus_dmamap_load(dmat, ccb->dmamap_xfer,
    619  				    xs->data, xs->datalen, NULL,
    620 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
    621 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    622 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
    623 				      BUS_DMA_WRITE));
    624  			}
    625 
    626  			switch (error) {
    627  			case 0:
    628  				break;
    629 
    630 
    631  			case ENOMEM:
    632  			case EAGAIN:
    633  				xs->error = XS_RESOURCE_SHORTAGE;
    634  				goto out_bad;
    635 
    636  			default:
    637  				xs->error = XS_DRIVER_STUFFUP;
    638 				if (error == EFBIG) {
    639 					aprint_error_dev(sc->sc_dev,
    640 					    "adv_scsi_cmd, more than %d"
    641 					    " DMA segments\n",
    642 					    ASC_MAX_SG_LIST);
    643 				} else {
    644 					aprint_error_dev(sc->sc_dev,
    645 					    "adv_scsi_cmd, error %d"
    646 					    " loading DMA map\n", error);
    647 				}
    648 
    649 out_bad:
    650  				adv_free_ccb(sc, ccb);
    651  				scsipi_done(xs);
    652  				return;
    653  			}
    654  			bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    655  			    ccb->dmamap_xfer->dm_mapsize,
    656  			    (flags & XS_CTL_DATA_IN) ?
    657  			     BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    658 
    659  			memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
    660 
    661  			for (nsegs = 0;
    662  			     nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
    663  				ccb->sghead.sg_list[nsegs].addr =
    664  				    ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
    665  				ccb->sghead.sg_list[nsegs].bytes =
    666  				    ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
    667  			}
    668 
    669  			ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
    670  			    ccb->dmamap_xfer->dm_nsegs;
    671 
    672  			ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
    673  			ccb->scsiq.sg_head = &ccb->sghead;
    674  			ccb->scsiq.q1.data_addr = 0;
    675  			ccb->scsiq.q1.data_cnt = 0;
    676  		} else {
    677  			/*
    678  			 * No data xfer, use non S/G values.
    679  			 */
    680  			ccb->scsiq.q1.data_addr = 0;
    681  			ccb->scsiq.q1.data_cnt = 0;
    682  		}
    683 
    684 #ifdef ASC_DEBUG
    685  		printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX\n",
    686  		    periph->periph_target,
    687  		    periph->periph_lun, xs->cmd->opcode,
    688  		    (unsigned long)ccb);
    689 #endif
    690  		s = splbio();
    691  		adv_queue_ccb(sc, ccb);
    692  		splx(s);
    693 
    694  		if ((flags & XS_CTL_POLL) == 0)
    695  			return;
    696 
    697  		/* Not allowed to use interrupts, poll for completion. */
    698  		if (adv_poll(sc, xs, ccb->timeout)) {
    699  			adv_timeout(ccb);
    700  			if (adv_poll(sc, xs, ccb->timeout))
    701  				adv_timeout(ccb);
    702  		}
    703  		return;
    704 
    705  	case ADAPTER_REQ_GROW_RESOURCES:
    706  		/* XXX Not supported. */
    707  		return;
    708 
    709  	case ADAPTER_REQ_SET_XFER_MODE:
    710  	    {
    711  		/*
    712  		 * We can't really set the mode, but we know how to
    713  		 * query what the firmware negotiated.
    714  		 */
    715  		struct scsipi_xfer_mode *xm = arg;
    716  		u_int8_t sdtr_data;
    717  		ASC_SCSI_BIT_ID_TYPE tid_bit;
    718 
    719  		tid_bit = ASC_TIX_TO_TARGET_ID(xm->xm_target);
    720 
    721  		xm->xm_mode = 0;
    722  		xm->xm_period = 0;
    723  		xm->xm_offset = 0;
    724 
    725  		if (sc->init_sdtr & tid_bit) {
    726  			xm->xm_mode |= PERIPH_CAP_SYNC;
    727  			sdtr_data = sc->sdtr_data[xm->xm_target];
    728  			xm->xm_period =
    729  			    sc->sdtr_period_tbl[(sdtr_data >> 4) &
    730  			    (sc->max_sdtr_index - 1)];
    731  			xm->xm_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
    732  		}
    733 
    734  		if (sc->use_tagged_qng & tid_bit)
    735  			xm->xm_mode |= PERIPH_CAP_TQING;
    736 
    737  		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
    738  		return;
    739  	    }
    740  	}
    741 }
    742 
    743 int
    744 adv_intr(void *arg)
    745 {
    746 	ASC_SOFTC      *sc = arg;
    747 
    748 #ifdef ASC_DEBUG
    749 	int int_pend = FALSE;
    750 
    751 	if (ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) {
    752 		int_pend = TRUE;
    753 		printf("ISR - ");
    754 	}
    755 #endif
    756 	AscISR(sc);
    757 #ifdef ASC_DEBUG
    758 	if(int_pend)
    759 		printf("\n");
    760 #endif
    761 
    762 	return (1);
    763 }
    764 
    765 
    766 /*
    767  * Poll a particular unit, looking for a particular xs
    768  */
    769 static int
    770 adv_poll(ASC_SOFTC *sc, struct scsipi_xfer *xs, int count)
    771 {
    772 
    773 	/* timeouts are in msec, so we loop in 1000 usec cycles */
    774 	while (count) {
    775 		adv_intr(sc);
    776 		if (xs->xs_status & XS_STS_DONE)
    777 			return (0);
    778 		delay(1000);	/* only happens in boot so ok */
    779 		count--;
    780 	}
    781 	return (1);
    782 }
    783 
    784 
    785 static void
    786 adv_timeout(void *arg)
    787 {
    788 	ADV_CCB        *ccb = arg;
    789 	struct scsipi_xfer *xs = ccb->xs;
    790 	struct scsipi_periph *periph = xs->xs_periph;
    791 	ASC_SOFTC      *sc =
    792 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
    793 	int             s;
    794 
    795 	scsipi_printaddr(periph);
    796 	printf("timed out");
    797 
    798 	s = splbio();
    799 
    800 	/*
    801 	 * If it has been through before, then a previous abort has failed,
    802 	 * don't try abort again, reset the bus instead.
    803 	 */
    804 	if (ccb->flags & CCB_ABORT) {
    805 		/* abort timed out */
    806 		printf(" AGAIN. Resetting Bus\n");
    807 		/* Lets try resetting the bus! */
    808 		if (AscResetBus(sc) == ASC_ERROR) {
    809 			ccb->timeout = sc->scsi_reset_wait;
    810 			adv_queue_ccb(sc, ccb);
    811 		}
    812 	} else {
    813 		/* abort the operation that has timed out */
    814 		printf("\n");
    815 		AscAbortCCB(sc, ccb);
    816 		ccb->xs->error = XS_TIMEOUT;
    817 		ccb->timeout = ADV_ABORT_TIMEOUT;
    818 		ccb->flags |= CCB_ABORT;
    819 		adv_queue_ccb(sc, ccb);
    820 	}
    821 
    822 	splx(s);
    823 }
    824 
    825 
    826 static void
    827 adv_watchdog(void *arg)
    828 {
    829 	ADV_CCB        *ccb = arg;
    830 	struct scsipi_xfer *xs = ccb->xs;
    831 	struct scsipi_periph *periph = xs->xs_periph;
    832 	ASC_SOFTC      *sc =
    833 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
    834 	int             s;
    835 
    836 	s = splbio();
    837 
    838 	ccb->flags &= ~CCB_WATCHDOG;
    839 	adv_start_ccbs(sc);
    840 
    841 	splx(s);
    842 }
    843 
    844 
    845 /******************************************************************************/
    846 /*                      NARROW boards Interrupt callbacks                     */
    847 /******************************************************************************/
    848 
    849 
    850 /*
    851  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
    852  *
    853  * Interrupt callback function for the Narrow SCSI Asc Library.
    854  */
    855 static void
    856 adv_narrow_isr_callback(ASC_SOFTC *sc, ASC_QDONE_INFO *qdonep)
    857 {
    858 	bus_dma_tag_t   dmat = sc->sc_dmat;
    859 	ADV_CCB        *ccb;
    860 	struct scsipi_xfer *xs;
    861 	struct scsi_sense_data *s1, *s2;
    862 
    863 
    864 	ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
    865 	xs = ccb->xs;
    866 
    867 #ifdef ASC_DEBUG
    868 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
    869 			(unsigned long)ccb,
    870 			xs->xs_periph->periph_target,
    871 			xs->xs_periph->periph_lun, xs->cmd->opcode);
    872 #endif
    873 	callout_stop(&ccb->xs->xs_callout);
    874 
    875 	/*
    876 	 * If we were a data transfer, unload the map that described
    877 	 * the data buffer.
    878 	 */
    879 	if (xs->datalen) {
    880 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    881 				ccb->dmamap_xfer->dm_mapsize,
    882 			 (xs->xs_control & XS_CTL_DATA_IN) ?
    883 			 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    884 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
    885 	}
    886 	if ((ccb->flags & CCB_ALLOC) == 0) {
    887 		aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n");
    888 		Debugger();
    889 		return;
    890 	}
    891 	/*
    892 	 * 'qdonep' contains the command's ending status.
    893 	 */
    894 #ifdef ASC_DEBUG
    895 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
    896 #endif
    897 	switch (qdonep->d3.done_stat) {
    898 	case ASC_QD_NO_ERROR:
    899 		switch (qdonep->d3.host_stat) {
    900 		case ASC_QHSTA_NO_ERROR:
    901 			xs->error = XS_NOERROR;
    902 			/*
    903 			 * XXX
    904 			 * According to the original Linux driver, xs->resid
    905 			 * should be qdonep->remain_bytes. However, its value
    906 			 * is bogus, which seems like a H/W bug. The best thing
    907 			 * we can do would be to ignore it, assuming that all
    908 			 * data has been successfully transferred...
    909 			 */
    910 			xs->resid = 0;
    911 			break;
    912 
    913 		default:
    914 			/* QHSTA error occurred */
    915 			xs->error = XS_DRIVER_STUFFUP;
    916 			break;
    917 		}
    918 
    919 		/*
    920 	         * If an INQUIRY command completed successfully, then call
    921 	         * the AscInquiryHandling() function to patch bugged boards.
    922 	         */
    923 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
    924 		    (xs->xs_periph->periph_lun == 0) &&
    925 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
    926 			AscInquiryHandling(sc,
    927 				      xs->xs_periph->periph_target & 0x7,
    928 					   (ASC_SCSI_INQUIRY *) xs->data);
    929 		}
    930 		break;
    931 
    932 	case ASC_QD_WITH_ERROR:
    933 		switch (qdonep->d3.host_stat) {
    934 		case ASC_QHSTA_NO_ERROR:
    935 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
    936 				s1 = &ccb->scsi_sense;
    937 				s2 = &xs->sense.scsi_sense;
    938 				*s2 = *s1;
    939 				xs->error = XS_SENSE;
    940 			} else {
    941 				xs->error = XS_DRIVER_STUFFUP;
    942 			}
    943 			break;
    944 
    945 		case ASC_QHSTA_M_SEL_TIMEOUT:
    946 			xs->error = XS_SELTIMEOUT;
    947 			break;
    948 
    949 		default:
    950 			/* QHSTA error occurred */
    951 			xs->error = XS_DRIVER_STUFFUP;
    952 			break;
    953 		}
    954 		break;
    955 
    956 	case ASC_QD_ABORTED_BY_HOST:
    957 	default:
    958 		xs->error = XS_DRIVER_STUFFUP;
    959 		break;
    960 	}
    961 
    962 	adv_free_ccb(sc, ccb);
    963 	scsipi_done(xs);
    964 }
    965