Home | History | Annotate | Line # | Download | only in sbus
isp_sbus.c revision 1.29
      1 /* $NetBSD: isp_sbus.c,v 1.29 2000/08/01 23:55:14 mjacob Exp $ */
      2 /*
      3  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
      4  *
      5  * Copyright (c) 1997 by Matthew Jacob
      6  * NASA AMES Research Center
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice immediately at the beginning of the file, without modification,
     14  *    this list of conditions, and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  */
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/device.h>
     38 #include <sys/kernel.h>
     39 #include <sys/malloc.h>
     40 #include <sys/queue.h>
     41 
     42 #include <machine/bus.h>
     43 #include <machine/intr.h>
     44 #include <machine/autoconf.h>
     45 
     46 #include <dev/ic/isp_netbsd.h>
     47 #include <dev/microcode/isp/asm_sbus.h>
     48 #include <dev/sbus/sbusvar.h>
     49 
     50 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
     51 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
     52 static int isp_sbus_mbxdma __P((struct ispsoftc *));
     53 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
     54 	ispreq_t *, u_int16_t *, u_int16_t));
     55 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
     56 	u_int32_t));
     57 
     58 #ifndef	ISP_1000_RISC_CODE
     59 #define	ISP_1000_RISC_CODE	NULL
     60 #endif
     61 
     62 static struct ispmdvec mdvec = {
     63 	isp_sbus_rd_reg,
     64 	isp_sbus_wr_reg,
     65 	isp_sbus_mbxdma,
     66 	isp_sbus_dmasetup,
     67 	isp_sbus_dmateardown,
     68 	NULL,
     69 	NULL,
     70 	NULL,
     71 	ISP_1000_RISC_CODE,
     72 	BIU_BURST_ENABLE
     73 };
     74 
     75 struct isp_sbussoftc {
     76 	struct ispsoftc	sbus_isp;
     77 	sdparam		sbus_dev;
     78 	bus_space_tag_t	sbus_bustag;
     79 	bus_dma_tag_t	sbus_dmatag;
     80 	bus_space_handle_t sbus_reg;
     81 	int		sbus_node;
     82 	int		sbus_pri;
     83 	struct ispmdvec	sbus_mdvec;
     84 	bus_dmamap_t	*sbus_dmamap;
     85 	bus_dmamap_t	sbus_request_dmamap;
     86 	bus_dmamap_t	sbus_result_dmamap;
     87 	int16_t		sbus_poff[_NREG_BLKS];
     88 };
     89 
     90 
     91 static int isp_match __P((struct device *, struct cfdata *, void *));
     92 static void isp_sbus_attach __P((struct device *, struct device *, void *));
     93 struct cfattach isp_sbus_ca = {
     94 	sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
     95 };
     96 
     97 static int
     98 isp_match(parent, cf, aux)
     99         struct device *parent;
    100         struct cfdata *cf;
    101         void *aux;
    102 {
    103 	int rv;
    104 #ifdef DEBUG
    105 	static int oneshot = 1;
    106 #endif
    107 	struct sbus_attach_args *sa = aux;
    108 
    109 	rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
    110 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
    111 		strcmp("ptisp", sa->sa_name) == 0 ||
    112 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
    113 		strcmp("QLGC,isp", sa->sa_name) == 0);
    114 #ifdef DEBUG
    115 	if (rv && oneshot) {
    116 		oneshot = 0;
    117 		printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
    118 		    "%d.%d Core Version %d.%d\n",
    119 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
    120 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
    121 	}
    122 #endif
    123 	return (rv);
    124 }
    125 
    126 
    127 static void
    128 isp_sbus_attach(parent, self, aux)
    129         struct device *parent, *self;
    130         void *aux;
    131 {
    132 	int freq;
    133 	struct sbus_attach_args *sa = aux;
    134 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
    135 	struct ispsoftc *isp = &sbc->sbus_isp;
    136 
    137 	printf(" for %s\n", sa->sa_name);
    138 
    139 	sbc->sbus_bustag = sa->sa_bustag;
    140 	sbc->sbus_dmatag = sa->sa_dmatag;
    141 	if (sa->sa_nintr != 0)
    142 		sbc->sbus_pri = sa->sa_pri;
    143 	sbc->sbus_mdvec = mdvec;
    144 
    145 	if (sa->sa_npromvaddrs != 0) {
    146 		sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
    147 	} else {
    148 		if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
    149 				 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
    150 				 &sbc->sbus_reg) != 0) {
    151 			printf("%s: cannot map registers\n", self->dv_xname);
    152 			return;
    153 		}
    154 	}
    155 	sbc->sbus_node = sa->sa_node;
    156 
    157 	freq = getpropint(sa->sa_node, "clock-frequency", 0);
    158 	if (freq) {
    159 		/*
    160 		 * Convert from HZ to MHz, rounding up.
    161 		 */
    162 		freq = (freq + 500000)/1000000;
    163 #if	0
    164 		printf("%s: %d MHz\n", self->dv_xname, freq);
    165 #endif
    166 	}
    167 	sbc->sbus_mdvec.dv_clock = freq;
    168 
    169 	/*
    170 	 * XXX: Now figure out what the proper burst sizes, etc., to use.
    171 	 */
    172 	sbc->sbus_mdvec.dv_conf1 |= BIU_SBUS_CONF1_FIFO_8;
    173 
    174 	/*
    175 	 * Some early versions of the PTI SBus adapter
    176 	 * would fail in trying to download (via poking)
    177 	 * FW. We give up on them.
    178 	 */
    179 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
    180 	    strcmp("ptisp", sa->sa_name) == 0) {
    181 		sbc->sbus_mdvec.dv_ispfw = NULL;
    182 	}
    183 
    184 	isp->isp_mdvec = &sbc->sbus_mdvec;
    185 	isp->isp_bustype = ISP_BT_SBUS;
    186 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
    187 	isp->isp_param = &sbc->sbus_dev;
    188 	bzero(isp->isp_param, sizeof (sdparam));
    189 
    190 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
    191 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
    192 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
    193 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
    194 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
    195 
    196 	/*
    197 	 * Set up logging levels.
    198 	 */
    199 #ifdef	ISP_LOGDEFAULT
    200 	isp->isp_dblev = ISP_LOGDEFAULT;
    201 #else
    202 	isp->isp_dblev = ISP_LOGCONFIG|ISP_LOGWARN|ISP_LOGERR;
    203 #ifdef	SCSIDEBUG
    204 	isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
    205 #endif
    206 #ifdef	DEBUG
    207 	isp->isp_dblev |= ISP_LOGDEBUG0;
    208 #endif
    209 #ifdef	DIAGNOSTIC
    210 	isp->isp_dblev |= ISP_LOGINFO;
    211 #endif
    212 #endif
    213 	isp->isp_confopts = self->dv_cfdata->cf_flags;
    214 	/*
    215 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
    216 	 */
    217 	isp->isp_confopts |= ISP_CFG_NONVRAM;
    218 	ISP_LOCK(isp);
    219 	isp->isp_osinfo.no_mbox_ints = 1;
    220 	isp_reset(isp);
    221 	if (isp->isp_state != ISP_RESETSTATE) {
    222 		ISP_UNLOCK(isp);
    223 		return;
    224 	}
    225 	isp_init(isp);
    226 	if (isp->isp_state != ISP_INITSTATE) {
    227 		isp_uninit(isp);
    228 		ISP_UNLOCK(isp);
    229 		return;
    230 	}
    231 	/* Establish interrupt channel */
    232 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
    233 	    (int(*)__P((void*)))isp_intr, sbc);
    234 	ENABLE_INTS(isp);
    235 	ISP_UNLOCK(isp);
    236 
    237 	/*
    238 	 * do generic attach.
    239 	 */
    240 	isp_attach(isp);
    241 	if (isp->isp_state != ISP_RUNSTATE) {
    242 		isp_uninit(isp);
    243 	}
    244 }
    245 
    246 static u_int16_t
    247 isp_sbus_rd_reg(isp, regoff)
    248 	struct ispsoftc *isp;
    249 	int regoff;
    250 {
    251 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    252 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    253 	offset += (regoff & 0xff);
    254 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
    255 }
    256 
    257 static void
    258 isp_sbus_wr_reg(isp, regoff, val)
    259 	struct ispsoftc *isp;
    260 	int regoff;
    261 	u_int16_t val;
    262 {
    263 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    264 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    265 	offset += (regoff & 0xff);
    266 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
    267 }
    268 
    269 static int
    270 isp_sbus_mbxdma(isp)
    271 	struct ispsoftc *isp;
    272 {
    273 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    274 	bus_dma_tag_t dmatag = sbc->sbus_dmatag;
    275 	bus_dma_segment_t seg;
    276 	int rs, i;
    277 	size_t n;
    278 	bus_size_t len;
    279 
    280 	if (isp->isp_rquest_dma)
    281 		return (0);
    282 
    283 	n = sizeof (XS_T **) * isp->isp_maxcmds;
    284 	isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
    285 	if (isp->isp_xflist == NULL) {
    286 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
    287 		return (1);
    288 	}
    289 	bzero(isp->isp_xflist, n);
    290 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
    291 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
    292 	if (sbc->sbus_dmamap == NULL) {
    293 		free(isp->isp_xflist, M_DEVBUF);
    294 		isp->isp_xflist = NULL;
    295 		isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
    296 		return (1);
    297 	}
    298 	for (i = 0; i < isp->isp_maxcmds; i++) {
    299 		/* Allocate a DMA handle */
    300 		if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
    301 		    BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
    302 			isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
    303 			break;
    304 		}
    305 	}
    306 	if (i < isp->isp_maxcmds) {
    307 		while (--i >= 0) {
    308 			bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
    309 		}
    310 		free(isp->isp_xflist, M_DEVBUF);
    311 		free(sbc->sbus_dmamap, M_DEVBUF);
    312 		isp->isp_xflist = NULL;
    313 		sbc->sbus_dmamap = NULL;
    314 		return (1);
    315 	}
    316 
    317 	/*
    318 	 * Allocate and map the request queue.
    319 	 */
    320 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
    321 	/* Allocate DMA map */
    322 	if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
    323 	    &sbc->sbus_request_dmamap) != 0) {
    324 		goto dmafail;
    325 	}
    326 
    327 	/* Allocate DMA buffer */
    328 	if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
    329 		goto dmafail;
    330 	}
    331 
    332 	/* Load the buffer */
    333 	if (bus_dmamap_load_raw(dmatag, sbc->sbus_request_dmamap,
    334 	    &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
    335 		bus_dmamem_free(dmatag, &seg, rs);
    336 		goto dmafail;
    337 	}
    338 	isp->isp_rquest_dma = sbc->sbus_request_dmamap->dm_segs[0].ds_addr;
    339 
    340 	/* Map DMA buffer in CPU addressable space */
    341 	if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_rquest,
    342 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    343 		bus_dmamap_unload(dmatag, sbc->sbus_request_dmamap);
    344 		bus_dmamem_free(dmatag, &seg, rs);
    345 		goto dmafail;
    346 	}
    347 
    348 	/*
    349 	 * Allocate and map the result queue.
    350 	 */
    351 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
    352 	/* Allocate DMA map */
    353 	if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
    354 	    &sbc->sbus_result_dmamap) != 0) {
    355 		goto dmafail;
    356 	}
    357 
    358 	/* Allocate DMA buffer */
    359 	if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
    360 		goto dmafail;
    361 	}
    362 
    363 	/* Load the buffer */
    364 	if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap,
    365 	    &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
    366 		bus_dmamem_free(dmatag, &seg, rs);
    367 		goto dmafail;
    368 	}
    369 
    370 	/* Map DMA buffer in CPU addressable space */
    371 	if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_result,
    372 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    373 		bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
    374 		bus_dmamem_free(dmatag, &seg, rs);
    375 		goto dmafail;
    376 	}
    377 	isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
    378 
    379 	return (0);
    380 
    381 dmafail:
    382 	for (i = 0; i < isp->isp_maxcmds; i++) {
    383 		bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
    384 	}
    385 	free(sbc->sbus_dmamap, M_DEVBUF);
    386 	free(isp->isp_xflist, M_DEVBUF);
    387 	isp->isp_xflist = NULL;
    388 	sbc->sbus_dmamap = NULL;
    389 	return (1);
    390 }
    391 
    392 /*
    393  * Map a DMA request.
    394  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
    395  */
    396 
    397 static int
    398 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
    399 	struct ispsoftc *isp;
    400 	struct scsipi_xfer *xs;
    401 	ispreq_t *rq;
    402 	u_int16_t *iptrp;
    403 	u_int16_t optr;
    404 {
    405 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    406 	bus_dmamap_t dmap;
    407 	ispcontreq_t *crq;
    408 	int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
    409 	int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
    410 
    411 	if (xs->datalen == 0) {
    412 		rq->req_seg_count = 1;
    413 		goto mbxsync;
    414 	}
    415 
    416 	dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
    417 	if (dmap->dm_nsegs != 0) {
    418 		panic("%s: dma map already allocated\n", isp->isp_name);
    419 		/* NOTREACHED */
    420 	}
    421 	if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
    422 	    NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
    423 		XS_SETERR(xs, HBA_BOTCH);
    424 		return (CMD_COMPLETE);
    425 	}
    426 
    427 	bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
    428 	    xs->datalen, in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    429 
    430 	if (in) {
    431 		rq->req_flags |= REQFLAG_DATA_IN;
    432 	} else {
    433 		rq->req_flags |= REQFLAG_DATA_OUT;
    434 	}
    435 
    436 	if (XS_CDBLEN(xs) > 12) {
    437 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
    438 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
    439 		if (*iptrp == optr) {
    440 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
    441 			bus_dmamap_unload(sbc->sbus_dmatag, dmap);
    442 			XS_SETERR(xs, HBA_BOTCH);
    443 			return (CMD_EAGAIN);
    444 		}
    445 		rq->req_seg_count = 2;
    446 		rq->req_dataseg[0].ds_count = 0;
    447 		rq->req_dataseg[0].ds_base =  0;
    448 		bzero((void *)crq, sizeof (*crq));
    449 		crq->req_header.rqs_entry_count = 1;
    450 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
    451 		crq->req_dataseg[0].ds_count = xs->datalen;
    452 		crq->req_dataseg[0].ds_base =  dmap->dm_segs[0].ds_addr;
    453 		ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
    454 	} else {
    455 		rq->req_dataseg[0].ds_count = xs->datalen;
    456 		rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
    457 		rq->req_seg_count = 1;
    458 	}
    459 
    460 mbxsync:
    461         ISP_SWIZZLE_REQUEST(isp, rq);
    462 #if	0
    463 	/*
    464 	 * If we ever map cacheable memory, we need to do something like this.
    465 	 */
    466         bus_dmamap_sync(sbc->sbus_dmat, sbc->sbus_rquest_dmap, 0,
    467             sbc->sbus_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
    468 #endif
    469 	return (CMD_QUEUED);
    470 }
    471 
    472 static void
    473 isp_sbus_dmateardown(isp, xs, handle)
    474 	struct ispsoftc *isp;
    475 	struct scsipi_xfer *xs;
    476 	u_int32_t handle;
    477 {
    478 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    479 	bus_dmamap_t dmap;
    480 
    481 	dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
    482 
    483 	if (dmap->dm_nsegs == 0) {
    484 		panic("%s: dma map not already allocated\n", isp->isp_name);
    485 		/* NOTREACHED */
    486 	}
    487 	bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
    488 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
    489 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    490 	bus_dmamap_unload(sbc->sbus_dmatag, dmap);
    491 }
    492