Home | History | Annotate | Line # | Download | only in ic
      1 /*	$NetBSD: adv.c,v 1.53 2022/09/25 18:43:32 thorpej Exp $	*/
      2 
      3 /*
      4  * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers
      5  *
      6  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      7  * All rights reserved.
      8  *
      9  * Author: Baldassare Dante Profeta <dante (at) mclink.it>
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: adv.c,v 1.53 2022/09/25 18:43:32 thorpej Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/callout.h>
     39 #include <sys/kernel.h>
     40 #include <sys/errno.h>
     41 #include <sys/ioctl.h>
     42 #include <sys/device.h>
     43 #include <sys/buf.h>
     44 #include <sys/proc.h>
     45 
     46 #include <sys/bus.h>
     47 #include <sys/intr.h>
     48 
     49 #include <dev/scsipi/scsi_all.h>
     50 #include <dev/scsipi/scsipi_all.h>
     51 #include <dev/scsipi/scsiconf.h>
     52 
     53 #include <dev/ic/advlib.h>
     54 #include <dev/ic/adv.h>
     55 
     56 #ifndef DDB
     57 #define	Debugger()	panic("should call debugger here (adv.c)")
     58 #endif /* ! DDB */
     59 
     60 
     61 /* #define ASC_DEBUG */
     62 
     63 /******************************************************************************/
     64 
     65 
     66 static int adv_alloc_control_data(ASC_SOFTC *);
     67 static void adv_free_control_data(ASC_SOFTC *);
     68 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int);
     69 static void adv_free_ccb(ASC_SOFTC *, ADV_CCB *);
     70 static void adv_reset_ccb(ADV_CCB *);
     71 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *);
     72 static ADV_CCB *adv_get_ccb(ASC_SOFTC *);
     73 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *);
     74 static void adv_start_ccbs(ASC_SOFTC *);
     75 
     76 
     77 static void adv_scsipi_request(struct scsipi_channel *,
     78 	scsipi_adapter_req_t, void *);
     79 static void advminphys(struct buf *);
     80 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *);
     81 
     82 static int adv_poll(ASC_SOFTC *, struct scsipi_xfer *, int);
     83 static void adv_timeout(void *);
     84 static void adv_watchdog(void *);
     85 
     86 
     87 /******************************************************************************/
     88 
     89 #define ADV_ABORT_TIMEOUT       2000	/* time to wait for abort (mSec) */
     90 #define ADV_WATCH_TIMEOUT       1000	/* time to wait for watchdog (mSec) */
     91 
     92 /******************************************************************************/
     93 /*                             Control Blocks routines                        */
     94 /******************************************************************************/
     95 
     96 
     97 static int
     98 adv_alloc_control_data(ASC_SOFTC *sc)
     99 {
    100 	int error;
    101 
    102 	/*
    103  	* Allocate the control blocks.
    104 	 */
    105 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control),
    106 			   PAGE_SIZE, 0, &sc->sc_control_seg, 1,
    107 			   &sc->sc_control_nsegs, BUS_DMA_NOWAIT)) != 0) {
    108 		aprint_error_dev(sc->sc_dev, "unable to allocate control "
    109 		    "structures, error = %d\n", error);
    110 		return (error);
    111 	}
    112 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_control_seg,
    113 			   sc->sc_control_nsegs, sizeof(struct adv_control),
    114 			   (void **) & sc->sc_control,
    115 			   BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
    116 		aprint_error_dev(sc->sc_dev,
    117 		    "unable to map control structures, error = %d\n", error);
    118 		return (error);
    119 	}
    120 	/*
    121 	 * Create and load the DMA map used for the control blocks.
    122 	 */
    123 	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control),
    124 			   1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT,
    125 				       &sc->sc_dmamap_control)) != 0) {
    126 		aprint_error_dev(sc->sc_dev,
    127 		    "unable to create control DMA map, error = %d\n", error);
    128 		return (error);
    129 	}
    130 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
    131 			   sc->sc_control, sizeof(struct adv_control), NULL,
    132 				     BUS_DMA_NOWAIT)) != 0) {
    133 		aprint_error_dev(sc->sc_dev,
    134 		    "unable to load control DMA map, error = %d\n", error);
    135 		return (error);
    136 	}
    137 
    138 	/*
    139 	 * Initialize the overrun_buf address.
    140 	 */
    141 	sc->overrun_buf = sc->sc_dmamap_control->dm_segs[0].ds_addr +
    142 	    offsetof(struct adv_control, overrun_buf);
    143 
    144 	return (0);
    145 }
    146 
    147 static void
    148 adv_free_control_data(ASC_SOFTC *sc)
    149 {
    150 
    151 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_control);
    152 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_control);
    153 	sc->sc_dmamap_control = NULL;
    154 
    155 	bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control,
    156 	    sizeof(struct adv_control));
    157 	bus_dmamem_free(sc->sc_dmat, &sc->sc_control_seg,
    158 	    sc->sc_control_nsegs);
    159 }
    160 
    161 /*
    162  * Create a set of ccbs and add them to the free list.  Called once
    163  * by adv_init().  We return the number of CCBs successfully created.
    164  */
    165 static int
    166 adv_create_ccbs(ASC_SOFTC *sc, ADV_CCB *ccbstore, int count)
    167 {
    168 	ADV_CCB        *ccb;
    169 	int             i, error;
    170 
    171 	memset(ccbstore, 0, sizeof(ADV_CCB) * count);
    172 	for (i = 0; i < count; i++) {
    173 		ccb = &ccbstore[i];
    174 		if ((error = adv_init_ccb(sc, ccb)) != 0) {
    175 			aprint_error_dev(sc->sc_dev,
    176 			    "unable to initialize ccb, error = %d\n", error);
    177 			return (i);
    178 		}
    179 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
    180 	}
    181 
    182 	return (i);
    183 }
    184 
    185 
    186 /*
    187  * A ccb is put onto the free list.
    188  */
    189 static void
    190 adv_free_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
    191 {
    192 	int             s;
    193 
    194 	s = splbio();
    195 	adv_reset_ccb(ccb);
    196 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
    197 	splx(s);
    198 }
    199 
    200 
    201 static void
    202 adv_reset_ccb(ADV_CCB *ccb)
    203 {
    204 
    205 	ccb->flags = 0;
    206 }
    207 
    208 
    209 static int
    210 adv_init_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
    211 {
    212 	int	hashnum, error;
    213 
    214 	callout_init(&ccb->ccb_watchdog, 0);
    215 
    216 	/*
    217 	 * Create the DMA map for this CCB.
    218 	 */
    219 	error = bus_dmamap_create(sc->sc_dmat,
    220 				  (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    221 			 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE,
    222 		   0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
    223 	if (error) {
    224 		aprint_error_dev(sc->sc_dev,
    225 		    "unable to create DMA map, error = %d\n", error);
    226 		return (error);
    227 	}
    228 
    229 	/*
    230 	 * put in the phystokv hash table
    231 	 * Never gets taken out.
    232 	 */
    233 	ccb->hashkey = sc->sc_dmamap_control->dm_segs[0].ds_addr +
    234 	    ADV_CCB_OFF(ccb);
    235 	hashnum = CCB_HASH(ccb->hashkey);
    236 	ccb->nexthash = sc->sc_ccbhash[hashnum];
    237 	sc->sc_ccbhash[hashnum] = ccb;
    238 
    239 	adv_reset_ccb(ccb);
    240 	return (0);
    241 }
    242 
    243 
    244 /*
    245  * Get a free ccb
    246  *
    247  * If there are none, see if we can allocate a new one
    248  */
    249 static ADV_CCB *
    250 adv_get_ccb(ASC_SOFTC *sc)
    251 {
    252 	ADV_CCB        *ccb = 0;
    253 	int             s;
    254 
    255 	s = splbio();
    256 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
    257 	if (ccb != NULL) {
    258 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
    259 		ccb->flags |= CCB_ALLOC;
    260 	}
    261 	splx(s);
    262 	return (ccb);
    263 }
    264 
    265 
    266 /*
    267  * Given a physical address, find the ccb that it corresponds to.
    268  */
    269 ADV_CCB *
    270 adv_ccb_phys_kv(ASC_SOFTC *sc, u_long ccb_phys)
    271 {
    272 	int hashnum = CCB_HASH(ccb_phys);
    273 	ADV_CCB *ccb = sc->sc_ccbhash[hashnum];
    274 
    275 	while (ccb) {
    276 		if (ccb->hashkey == ccb_phys)
    277 			break;
    278 		ccb = ccb->nexthash;
    279 	}
    280 	return (ccb);
    281 }
    282 
    283 
    284 /*
    285  * Queue a CCB to be sent to the controller, and send it if possible.
    286  */
    287 static void
    288 adv_queue_ccb(ASC_SOFTC *sc, ADV_CCB *ccb)
    289 {
    290 
    291 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
    292 
    293 	adv_start_ccbs(sc);
    294 }
    295 
    296 
    297 static void
    298 adv_start_ccbs(ASC_SOFTC *sc)
    299 {
    300 	ADV_CCB        *ccb;
    301 
    302 	while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
    303 		if (ccb->flags & CCB_WATCHDOG)
    304 			callout_stop(&ccb->ccb_watchdog);
    305 
    306 		if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) {
    307 			ccb->flags |= CCB_WATCHDOG;
    308 			callout_reset(&ccb->ccb_watchdog,
    309 			    (ADV_WATCH_TIMEOUT * hz) / 1000,
    310 			    adv_watchdog, ccb);
    311 			break;
    312 		}
    313 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
    314 
    315 		if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
    316 			callout_reset(&ccb->xs->xs_callout,
    317 			    mstohz(ccb->timeout), adv_timeout, ccb);
    318 	}
    319 }
    320 
    321 
    322 /******************************************************************************/
    323 /*                         SCSI layer interfacing routines                    */
    324 /******************************************************************************/
    325 
    326 
    327 int
    328 adv_init(ASC_SOFTC *sc)
    329 {
    330 	int             warn;
    331 
    332 	if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) {
    333 		aprint_error("adv_init: failed to find signature\n");
    334 		return (1);
    335 	}
    336 
    337 	/*
    338 	 * Read the board configuration
    339 	 */
    340 	AscInitASC_SOFTC(sc);
    341 	warn = AscInitFromEEP(sc);
    342 	if (warn) {
    343 		aprint_error_dev(sc->sc_dev, "-get: ");
    344 		switch (warn) {
    345 		case -1:
    346 			aprint_normal("Chip is not halted\n");
    347 			break;
    348 
    349 		case -2:
    350 			aprint_normal("Couldn't get MicroCode Start"
    351 			       " address\n");
    352 			break;
    353 
    354 		case ASC_WARN_IO_PORT_ROTATE:
    355 			aprint_normal("I/O port address modified\n");
    356 			break;
    357 
    358 		case ASC_WARN_AUTO_CONFIG:
    359 			aprint_normal("I/O port increment switch enabled\n");
    360 			break;
    361 
    362 		case ASC_WARN_EEPROM_CHKSUM:
    363 			aprint_normal("EEPROM checksum error\n");
    364 			break;
    365 
    366 		case ASC_WARN_IRQ_MODIFIED:
    367 			aprint_normal("IRQ modified\n");
    368 			break;
    369 
    370 		case ASC_WARN_CMD_QNG_CONFLICT:
    371 			aprint_normal("tag queuing enabled w/o disconnects\n");
    372 			break;
    373 
    374 		default:
    375 			aprint_normal("unknown warning %d\n", warn);
    376 		}
    377 	}
    378 	if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT)
    379 		sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT;
    380 
    381 	/*
    382 	 * Modify the board configuration
    383 	 */
    384 	warn = AscInitFromASC_SOFTC(sc);
    385 	if (warn) {
    386 		aprint_error_dev(sc->sc_dev, "-set: ");
    387 		switch (warn) {
    388 		case ASC_WARN_CMD_QNG_CONFLICT:
    389 			aprint_normal("tag queuing enabled w/o disconnects\n");
    390 			break;
    391 
    392 		case ASC_WARN_AUTO_CONFIG:
    393 			aprint_normal("I/O port increment switch enabled\n");
    394 			break;
    395 
    396 		default:
    397 			aprint_normal("unknown warning %d\n", warn);
    398 		}
    399 	}
    400 	sc->isr_callback = adv_narrow_isr_callback;
    401 
    402 	return (0);
    403 }
    404 
    405 
    406 void
    407 adv_attach(ASC_SOFTC *sc)
    408 {
    409 	struct scsipi_adapter *adapt = &sc->sc_adapter;
    410 	struct scsipi_channel *chan = &sc->sc_channel;
    411 	int             i, error;
    412 
    413 	/*
    414 	 * Initialize board RISC chip and enable interrupts.
    415 	 */
    416 	switch (AscInitDriver(sc)) {
    417 	case 0:
    418 		/* AllOK */
    419 		break;
    420 
    421 	case 1:
    422 		panic("%s: bad signature", device_xname(sc->sc_dev));
    423 		break;
    424 
    425 	case 2:
    426 		panic("%s: unable to load MicroCode",
    427 		      device_xname(sc->sc_dev));
    428 		break;
    429 
    430 	case 3:
    431 		panic("%s: unable to initialize MicroCode",
    432 		      device_xname(sc->sc_dev));
    433 		break;
    434 
    435 	default:
    436 		panic("%s: unable to initialize board RISC chip",
    437 		      device_xname(sc->sc_dev));
    438 	}
    439 
    440 	/*
    441 	 * Fill in the scsipi_adapter.
    442 	 */
    443 	memset(adapt, 0, sizeof(*adapt));
    444 	adapt->adapt_dev = sc->sc_dev;
    445 	adapt->adapt_nchannels = 1;
    446 	/* adapt_openings initialized below */
    447 	/* adapt_max_periph initialized below */
    448 	adapt->adapt_request = adv_scsipi_request;
    449 	adapt->adapt_minphys = advminphys;
    450 
    451 	/*
    452 	 * Fill in the scsipi_channel.
    453 	 */
    454 	memset(chan, 0, sizeof(*chan));
    455 	chan->chan_adapter = adapt;
    456 	chan->chan_bustype = &scsi_bustype;
    457 	chan->chan_channel = 0;
    458 	chan->chan_ntargets = 8;
    459 	chan->chan_nluns = 8;
    460 	chan->chan_id = sc->chip_scsi_id;
    461 
    462 	TAILQ_INIT(&sc->sc_free_ccb);
    463 	TAILQ_INIT(&sc->sc_waiting_ccb);
    464 
    465 	/*
    466 	 * Allocate the Control Blocks and the overrun buffer.
    467 	 */
    468 	error = adv_alloc_control_data(sc);
    469 	if (error)
    470 		return; /* (error) */
    471 
    472 	/*
    473 	 * Create and initialize the Control Blocks.
    474 	 */
    475 	i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB);
    476 	if (i == 0) {
    477 		aprint_error_dev(sc->sc_dev,
    478 		    "unable to create control blocks\n");
    479 		return; /* (ENOMEM) */ ;
    480 	} else if (i != ADV_MAX_CCB) {
    481 		aprint_error_dev(sc->sc_dev,
    482 		    "WARNING: only %d of %d control blocks created\n",
    483 		    i, ADV_MAX_CCB);
    484 	}
    485 
    486 	adapt->adapt_openings = i;
    487 	adapt->adapt_max_periph = adapt->adapt_openings;
    488 
    489 	sc->sc_child = config_found(sc->sc_dev, chan, scsiprint, CFARGS_NONE);
    490 }
    491 
    492 int
    493 adv_detach(ASC_SOFTC *sc, int flags)
    494 {
    495 	int rv = 0;
    496 
    497 	if (sc->sc_child != NULL)
    498 		rv = config_detach(sc->sc_child, flags);
    499 
    500 	adv_free_control_data(sc);
    501 
    502 	return (rv);
    503 }
    504 
    505 static void
    506 advminphys(struct buf *bp)
    507 {
    508 
    509 	if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE))
    510 		bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE);
    511 	minphys(bp);
    512 }
    513 
    514 
    515 /*
    516  * start a scsi operation given the command and the data address.  Also needs
    517  * the unit, target and lu.
    518  */
    519 
    520 static void
    521 adv_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    522     void *arg)
    523 {
    524  	struct scsipi_xfer *xs;
    525  	struct scsipi_periph *periph;
    526  	ASC_SOFTC      *sc = device_private(chan->chan_adapter->adapt_dev);
    527  	bus_dma_tag_t   dmat = sc->sc_dmat;
    528  	ADV_CCB        *ccb;
    529  	int             s, flags, error, nsegs;
    530 
    531  	switch (req) {
    532  	case ADAPTER_REQ_RUN_XFER:
    533  		xs = arg;
    534  		periph = xs->xs_periph;
    535  		flags = xs->xs_control;
    536 
    537  		/*
    538  		 * Get a CCB to use.
    539  		 */
    540  		ccb = adv_get_ccb(sc);
    541 #ifdef DIAGNOSTIC
    542  		/*
    543  		 * This should never happen as we track the resources
    544  		 * in the mid-layer.
    545  		 */
    546  		if (ccb == NULL) {
    547  			scsipi_printaddr(periph);
    548  			printf("unable to allocate ccb\n");
    549  			panic("adv_scsipi_request");
    550  		}
    551 #endif
    552 
    553  		ccb->xs = xs;
    554  		ccb->timeout = xs->timeout;
    555 
    556  		/*
    557  		 * Build up the request
    558  		 */
    559  		memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q));
    560 
    561  		ccb->scsiq.q2.ccb_ptr =
    562  		    sc->sc_dmamap_control->dm_segs[0].ds_addr +
    563  		    ADV_CCB_OFF(ccb);
    564 
    565  		ccb->scsiq.cdbptr = &xs->cmd->opcode;
    566  		ccb->scsiq.q2.cdb_len = xs->cmdlen;
    567  		ccb->scsiq.q1.target_id =
    568  		    ASC_TID_TO_TARGET_ID(periph->periph_target);
    569  		ccb->scsiq.q1.target_lun = periph->periph_lun;
    570  		ccb->scsiq.q2.target_ix =
    571  		    ASC_TIDLUN_TO_IX(periph->periph_target,
    572  		    periph->periph_lun);
    573  		ccb->scsiq.q1.sense_addr =
    574  		    sc->sc_dmamap_control->dm_segs[0].ds_addr +
    575  		    ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense);
    576  		ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data);
    577 
    578  		/*
    579  		 * If there are any outstanding requests for the current
    580  		 * target, then every 255th request send an ORDERED request.
    581  		 * This heuristic tries to retain the benefit of request
    582  		 * sorting while preventing request starvation. 255 is the
    583  		 * max number of tags or pending commands a device may have
    584  		 * outstanding.
    585  		 */
    586  		sc->reqcnt[periph->periph_target]++;
    587  		if (((sc->reqcnt[periph->periph_target] > 0) &&
    588  		    (sc->reqcnt[periph->periph_target] % 255) == 0) ||
    589 		    xs->bp == NULL || (xs->bp->b_flags & B_ASYNC) == 0) {
    590  			ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED;
    591  		} else {
    592  			ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE;
    593  		}
    594 
    595  		if (xs->datalen) {
    596  			/*
    597  			 * Map the DMA transfer.
    598  			 */
    599 #ifdef TFS
    600  			if (flags & SCSI_DATA_UIO) {
    601  				error = bus_dmamap_load_uio(dmat,
    602  				    ccb->dmamap_xfer, (struct uio *) xs->data,
    603 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
    604 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    605 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
    606 				      BUS_DMA_WRITE));
    607  			} else
    608 #endif /* TFS */
    609  			{
    610  				error = bus_dmamap_load(dmat, ccb->dmamap_xfer,
    611  				    xs->data, xs->datalen, NULL,
    612 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
    613 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
    614 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
    615 				      BUS_DMA_WRITE));
    616  			}
    617 
    618  			switch (error) {
    619  			case 0:
    620  				break;
    621 
    622 
    623  			case ENOMEM:
    624  			case EAGAIN:
    625  				xs->error = XS_RESOURCE_SHORTAGE;
    626  				goto out_bad;
    627 
    628  			default:
    629  				xs->error = XS_DRIVER_STUFFUP;
    630 				if (error == EFBIG) {
    631 					aprint_error_dev(sc->sc_dev,
    632 					    "adv_scsi_cmd, more than %d"
    633 					    " DMA segments\n",
    634 					    ASC_MAX_SG_LIST);
    635 				} else {
    636 					aprint_error_dev(sc->sc_dev,
    637 					    "adv_scsi_cmd, error %d"
    638 					    " loading DMA map\n", error);
    639 				}
    640 
    641 out_bad:
    642  				adv_free_ccb(sc, ccb);
    643  				scsipi_done(xs);
    644  				return;
    645  			}
    646  			bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    647  			    ccb->dmamap_xfer->dm_mapsize,
    648  			    (flags & XS_CTL_DATA_IN) ?
    649  			     BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    650 
    651  			memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD));
    652 
    653  			for (nsegs = 0;
    654  			     nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) {
    655  				ccb->sghead.sg_list[nsegs].addr =
    656  				    ccb->dmamap_xfer->dm_segs[nsegs].ds_addr;
    657  				ccb->sghead.sg_list[nsegs].bytes =
    658  				    ccb->dmamap_xfer->dm_segs[nsegs].ds_len;
    659  			}
    660 
    661  			ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt =
    662  			    ccb->dmamap_xfer->dm_nsegs;
    663 
    664  			ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD;
    665  			ccb->scsiq.sg_head = &ccb->sghead;
    666  			ccb->scsiq.q1.data_addr = 0;
    667  			ccb->scsiq.q1.data_cnt = 0;
    668  		} else {
    669  			/*
    670  			 * No data xfer, use non S/G values.
    671  			 */
    672  			ccb->scsiq.q1.data_addr = 0;
    673  			ccb->scsiq.q1.data_cnt = 0;
    674  		}
    675 
    676 #ifdef ASC_DEBUG
    677  		printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX\n",
    678  		    periph->periph_target,
    679  		    periph->periph_lun, xs->cmd->opcode,
    680  		    (unsigned long)ccb);
    681 #endif
    682  		s = splbio();
    683  		adv_queue_ccb(sc, ccb);
    684  		splx(s);
    685 
    686  		if ((flags & XS_CTL_POLL) == 0)
    687  			return;
    688 
    689  		/* Not allowed to use interrupts, poll for completion. */
    690  		if (adv_poll(sc, xs, ccb->timeout)) {
    691  			adv_timeout(ccb);
    692  			if (adv_poll(sc, xs, ccb->timeout))
    693  				adv_timeout(ccb);
    694  		}
    695  		return;
    696 
    697  	case ADAPTER_REQ_GROW_RESOURCES:
    698  		/* XXX Not supported. */
    699  		return;
    700 
    701  	case ADAPTER_REQ_SET_XFER_MODE:
    702  	    {
    703  		/*
    704  		 * We can't really set the mode, but we know how to
    705  		 * query what the firmware negotiated.
    706  		 */
    707  		struct scsipi_xfer_mode *xm = arg;
    708  		u_int8_t sdtr_data;
    709  		ASC_SCSI_BIT_ID_TYPE tid_bit;
    710 
    711  		tid_bit = ASC_TIX_TO_TARGET_ID(xm->xm_target);
    712 
    713  		xm->xm_mode = 0;
    714  		xm->xm_period = 0;
    715  		xm->xm_offset = 0;
    716 
    717  		if (sc->init_sdtr & tid_bit) {
    718  			xm->xm_mode |= PERIPH_CAP_SYNC;
    719  			sdtr_data = sc->sdtr_data[xm->xm_target];
    720  			xm->xm_period =
    721  			    sc->sdtr_period_tbl[(sdtr_data >> 4) &
    722  			    (sc->max_sdtr_index - 1)];
    723  			xm->xm_offset = sdtr_data & ASC_SYN_MAX_OFFSET;
    724  		}
    725 
    726  		if (sc->use_tagged_qng & tid_bit)
    727  			xm->xm_mode |= PERIPH_CAP_TQING;
    728 
    729  		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
    730  		return;
    731  	    }
    732  	}
    733 }
    734 
    735 int
    736 adv_intr(void *arg)
    737 {
    738 	ASC_SOFTC      *sc = arg;
    739 
    740 #ifdef ASC_DEBUG
    741 	int int_pend = FALSE;
    742 
    743 	if (ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) {
    744 		int_pend = TRUE;
    745 		printf("ISR - ");
    746 	}
    747 #endif
    748 	AscISR(sc);
    749 #ifdef ASC_DEBUG
    750 	if(int_pend)
    751 		printf("\n");
    752 #endif
    753 
    754 	return (1);
    755 }
    756 
    757 
    758 /*
    759  * Poll a particular unit, looking for a particular xs
    760  */
    761 static int
    762 adv_poll(ASC_SOFTC *sc, struct scsipi_xfer *xs, int count)
    763 {
    764 
    765 	/* timeouts are in msec, so we loop in 1000 usec cycles */
    766 	while (count) {
    767 		adv_intr(sc);
    768 		if (xs->xs_status & XS_STS_DONE)
    769 			return (0);
    770 		delay(1000);	/* only happens in boot so ok */
    771 		count--;
    772 	}
    773 	return (1);
    774 }
    775 
    776 
    777 static void
    778 adv_timeout(void *arg)
    779 {
    780 	ADV_CCB        *ccb = arg;
    781 	struct scsipi_xfer *xs = ccb->xs;
    782 	struct scsipi_periph *periph = xs->xs_periph;
    783 	ASC_SOFTC      *sc =
    784 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
    785 	int             s;
    786 
    787 	scsipi_printaddr(periph);
    788 	printf("timed out");
    789 
    790 	s = splbio();
    791 
    792 	/*
    793 	 * If it has been through before, then a previous abort has failed,
    794 	 * don't try abort again, reset the bus instead.
    795 	 */
    796 	if (ccb->flags & CCB_ABORT) {
    797 		/* abort timed out */
    798 		printf(" AGAIN. Resetting Bus\n");
    799 		/* Lets try resetting the bus! */
    800 		if (AscResetBus(sc) == ASC_ERROR) {
    801 			ccb->timeout = sc->scsi_reset_wait;
    802 			adv_queue_ccb(sc, ccb);
    803 		}
    804 	} else {
    805 		/* abort the operation that has timed out */
    806 		printf("\n");
    807 		AscAbortCCB(sc, ccb);
    808 		ccb->xs->error = XS_TIMEOUT;
    809 		ccb->timeout = ADV_ABORT_TIMEOUT;
    810 		ccb->flags |= CCB_ABORT;
    811 		adv_queue_ccb(sc, ccb);
    812 	}
    813 
    814 	splx(s);
    815 }
    816 
    817 
    818 static void
    819 adv_watchdog(void *arg)
    820 {
    821 	ADV_CCB        *ccb = arg;
    822 	struct scsipi_xfer *xs = ccb->xs;
    823 	struct scsipi_periph *periph = xs->xs_periph;
    824 	ASC_SOFTC      *sc =
    825 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
    826 	int             s;
    827 
    828 	s = splbio();
    829 
    830 	ccb->flags &= ~CCB_WATCHDOG;
    831 	adv_start_ccbs(sc);
    832 
    833 	splx(s);
    834 }
    835 
    836 
    837 /******************************************************************************/
    838 /*                      NARROW boards Interrupt callbacks                     */
    839 /******************************************************************************/
    840 
    841 
    842 /*
    843  * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR()
    844  *
    845  * Interrupt callback function for the Narrow SCSI Asc Library.
    846  */
    847 static void
    848 adv_narrow_isr_callback(ASC_SOFTC *sc, ASC_QDONE_INFO *qdonep)
    849 {
    850 	bus_dma_tag_t   dmat = sc->sc_dmat;
    851 	ADV_CCB        *ccb;
    852 	struct scsipi_xfer *xs;
    853 	struct scsi_sense_data *s1, *s2;
    854 
    855 
    856 	ccb = adv_ccb_phys_kv(sc, qdonep->d2.ccb_ptr);
    857 	xs = ccb->xs;
    858 
    859 #ifdef ASC_DEBUG
    860 	printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ",
    861 			(unsigned long)ccb,
    862 			xs->xs_periph->periph_target,
    863 			xs->xs_periph->periph_lun, xs->cmd->opcode);
    864 #endif
    865 	callout_stop(&ccb->xs->xs_callout);
    866 
    867 	/*
    868 	 * If we were a data transfer, unload the map that described
    869 	 * the data buffer.
    870 	 */
    871 	if (xs->datalen) {
    872 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
    873 				ccb->dmamap_xfer->dm_mapsize,
    874 			 (xs->xs_control & XS_CTL_DATA_IN) ?
    875 			 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    876 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
    877 	}
    878 	if ((ccb->flags & CCB_ALLOC) == 0) {
    879 		aprint_error_dev(sc->sc_dev, "exiting ccb not allocated!\n");
    880 		Debugger();
    881 		return;
    882 	}
    883 	/*
    884 	 * 'qdonep' contains the command's ending status.
    885 	 */
    886 #ifdef ASC_DEBUG
    887 	printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat);
    888 #endif
    889 	switch (qdonep->d3.done_stat) {
    890 	case ASC_QD_NO_ERROR:
    891 		switch (qdonep->d3.host_stat) {
    892 		case ASC_QHSTA_NO_ERROR:
    893 			xs->error = XS_NOERROR;
    894 			/*
    895 			 * XXX
    896 			 * According to the original Linux driver, xs->resid
    897 			 * should be qdonep->remain_bytes. However, its value
    898 			 * is bogus, which seems like a H/W bug. The best thing
    899 			 * we can do would be to ignore it, assuming that all
    900 			 * data has been successfully transferred...
    901 			 */
    902 			xs->resid = 0;
    903 			break;
    904 
    905 		default:
    906 			/* QHSTA error occurred */
    907 			xs->error = XS_DRIVER_STUFFUP;
    908 			break;
    909 		}
    910 
    911 		/*
    912 	         * If an INQUIRY command completed successfully, then call
    913 	         * the AscInquiryHandling() function to patch bugged boards.
    914 	         */
    915 		if ((xs->cmd->opcode == SCSICMD_Inquiry) &&
    916 		    (xs->xs_periph->periph_lun == 0) &&
    917 		    (xs->datalen - qdonep->remain_bytes) >= 8) {
    918 			AscInquiryHandling(sc,
    919 				      xs->xs_periph->periph_target & 0x7,
    920 					   (ASC_SCSI_INQUIRY *) xs->data);
    921 		}
    922 		break;
    923 
    924 	case ASC_QD_WITH_ERROR:
    925 		switch (qdonep->d3.host_stat) {
    926 		case ASC_QHSTA_NO_ERROR:
    927 			if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) {
    928 				s1 = &ccb->scsi_sense;
    929 				s2 = &xs->sense.scsi_sense;
    930 				*s2 = *s1;
    931 				xs->error = XS_SENSE;
    932 			} else {
    933 				xs->error = XS_DRIVER_STUFFUP;
    934 			}
    935 			break;
    936 
    937 		case ASC_QHSTA_M_SEL_TIMEOUT:
    938 			xs->error = XS_SELTIMEOUT;
    939 			break;
    940 
    941 		default:
    942 			/* QHSTA error occurred */
    943 			xs->error = XS_DRIVER_STUFFUP;
    944 			break;
    945 		}
    946 		break;
    947 
    948 	case ASC_QD_ABORTED_BY_HOST:
    949 	default:
    950 		xs->error = XS_DRIVER_STUFFUP;
    951 		break;
    952 	}
    953 
    954 	adv_free_ccb(sc, ccb);
    955 	scsipi_done(xs);
    956 }
    957