Home | History | Annotate | Line # | Download | only in sbus
isp_sbus.c revision 1.27
      1 /* $NetBSD: isp_sbus.c,v 1.27 2000/07/05 22:10:56 mjacob Exp $ */
      2 /*
      3  * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
      4  *
      5  * Copyright (c) 1997 by Matthew Jacob
      6  * NASA AMES Research Center
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice immediately at the beginning of the file, without modification,
     14  *    this list of conditions, and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     31  * SUCH DAMAGE.
     32  *
     33  */
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/device.h>
     38 #include <sys/kernel.h>
     39 #include <sys/malloc.h>
     40 #include <sys/queue.h>
     41 
     42 #include <machine/autoconf.h>
     43 #include <machine/cpu.h>
     44 #include <machine/param.h>
     45 #include <machine/vmparam.h>
     46 
     47 #include <dev/ic/isp_netbsd.h>
     48 #include <dev/microcode/isp/asm_sbus.h>
     49 #include <dev/sbus/sbusvar.h>
     50 
     51 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
     52 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
     53 static int isp_sbus_mbxdma __P((struct ispsoftc *));
     54 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
     55 	ispreq_t *, u_int16_t *, u_int16_t));
     56 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
     57 	u_int32_t));
     58 
     59 #ifndef	ISP_1000_RISC_CODE
     60 #define	ISP_1000_RISC_CODE	NULL
     61 #endif
     62 #ifndef	ISP_CODE_ORG
     63 #define	ISP_CODE_ORG	0x1000
     64 #endif
     65 
     66 static struct ispmdvec mdvec = {
     67 	isp_sbus_rd_reg,
     68 	isp_sbus_wr_reg,
     69 	isp_sbus_mbxdma,
     70 	isp_sbus_dmasetup,
     71 	isp_sbus_dmateardown,
     72 	NULL,
     73 	NULL,
     74 	NULL,
     75 	ISP_1000_RISC_CODE, 0, ISP_CODE_ORG, 0,
     76 	BIU_BURST_ENABLE
     77 };
     78 
     79 struct isp_sbussoftc {
     80 	struct ispsoftc	sbus_isp;
     81 	sdparam		sbus_dev;
     82 	bus_space_tag_t	sbus_bustag;
     83 	bus_dma_tag_t	sbus_dmatag;
     84 	bus_space_handle_t sbus_reg;
     85 	int		sbus_node;
     86 	int		sbus_pri;
     87 	struct ispmdvec	sbus_mdvec;
     88 	bus_dmamap_t	*sbus_dmamap;
     89 	bus_dmamap_t	sbus_request_dmamap;
     90 	bus_dmamap_t	sbus_result_dmamap;
     91 	int16_t		sbus_poff[_NREG_BLKS];
     92 };
     93 
     94 
     95 static int isp_match __P((struct device *, struct cfdata *, void *));
     96 static void isp_sbus_attach __P((struct device *, struct device *, void *));
     97 struct cfattach isp_sbus_ca = {
     98 	sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
     99 };
    100 
    101 static int
    102 isp_match(parent, cf, aux)
    103         struct device *parent;
    104         struct cfdata *cf;
    105         void *aux;
    106 {
    107 	int rv;
    108 #ifdef DEBUG
    109 	static int oneshot = 1;
    110 #endif
    111 	struct sbus_attach_args *sa = aux;
    112 
    113 	rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
    114 		strcmp("PTI,ptisp", sa->sa_name) == 0 ||
    115 		strcmp("ptisp", sa->sa_name) == 0 ||
    116 		strcmp("SUNW,isp", sa->sa_name) == 0 ||
    117 		strcmp("QLGC,isp", sa->sa_name) == 0);
    118 #ifdef DEBUG
    119 	if (rv && oneshot) {
    120 		oneshot = 0;
    121 		printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
    122 		    "%d.%d Core Version %d.%d\n",
    123 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
    124 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
    125 	}
    126 #endif
    127 	return (rv);
    128 }
    129 
    130 
    131 static void
    132 isp_sbus_attach(parent, self, aux)
    133         struct device *parent, *self;
    134         void *aux;
    135 {
    136 	int i, freq;
    137 	struct sbus_attach_args *sa = aux;
    138 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
    139 	struct ispsoftc *isp = &sbc->sbus_isp;
    140 	ISP_LOCKVAL_DECL;
    141 
    142 	printf(" for %s\n", sa->sa_name);
    143 
    144 	sbc->sbus_bustag = sa->sa_bustag;
    145 	sbc->sbus_dmatag = sa->sa_dmatag;
    146 	if (sa->sa_nintr != 0)
    147 		sbc->sbus_pri = sa->sa_pri;
    148 	sbc->sbus_mdvec = mdvec;
    149 
    150 	if (sa->sa_npromvaddrs != 0) {
    151 		sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
    152 	} else {
    153 		if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
    154 				 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
    155 				 &sbc->sbus_reg) != 0) {
    156 			printf("%s: cannot map registers\n", self->dv_xname);
    157 			return;
    158 		}
    159 	}
    160 	sbc->sbus_node = sa->sa_node;
    161 
    162 	freq = getpropint(sa->sa_node, "clock-frequency", 0);
    163 	if (freq) {
    164 		/*
    165 		 * Convert from HZ to MHz, rounding up.
    166 		 */
    167 		freq = (freq + 500000)/1000000;
    168 #if	0
    169 		printf("%s: %d MHz\n", self->dv_xname, freq);
    170 #endif
    171 	}
    172 	sbc->sbus_mdvec.dv_clock = freq;
    173 
    174 	/*
    175 	 * XXX: Now figure out what the proper burst sizes, etc., to use.
    176 	 */
    177 	sbc->sbus_mdvec.dv_conf1 |= BIU_SBUS_CONF1_FIFO_8;
    178 
    179 	/*
    180 	 * Some early versions of the PTI SBus adapter
    181 	 * would fail in trying to download (via poking)
    182 	 * FW. We give up on them.
    183 	 */
    184 	if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
    185 	    strcmp("ptisp", sa->sa_name) == 0) {
    186 		sbc->sbus_mdvec.dv_ispfw = NULL;
    187 	}
    188 
    189 	isp->isp_mdvec = &sbc->sbus_mdvec;
    190 	isp->isp_bustype = ISP_BT_SBUS;
    191 	isp->isp_type = ISP_HA_SCSI_UNKNOWN;
    192 	isp->isp_param = &sbc->sbus_dev;
    193 	bzero(isp->isp_param, sizeof (sdparam));
    194 
    195 	sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
    196 	sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
    197 	sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
    198 	sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
    199 	sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
    200 
    201 	isp->isp_confopts = self->dv_cfdata->cf_flags;
    202 	/*
    203 	 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
    204 	 */
    205 	isp->isp_confopts |= ISP_CFG_NONVRAM;
    206 	ISP_LOCK(isp);
    207 	isp_reset(isp);
    208 	if (isp->isp_state != ISP_RESETSTATE) {
    209 		ISP_UNLOCK(isp);
    210 		return;
    211 	}
    212 	isp_init(isp);
    213 	if (isp->isp_state != ISP_INITSTATE) {
    214 		isp_uninit(isp);
    215 		ISP_UNLOCK(isp);
    216 		return;
    217 	}
    218 
    219 	for (i = 0; i < isp->isp_maxcmds; i++) {
    220 		/* Allocate a DMA handle */
    221 		if (bus_dmamap_create(sbc->sbus_dmatag, MAXPHYS, 1, MAXPHYS, 0,
    222 		    BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
    223 			printf("%s: DMA map create error\n",
    224 				self->dv_xname);
    225 			return;
    226 		}
    227 	}
    228 
    229 	/* Establish interrupt channel */
    230 	bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, 0,
    231 	    (int(*)__P((void*)))isp_intr, sbc);
    232 	ENABLE_INTS(isp);
    233 
    234 	/*
    235 	 * do generic attach.
    236 	 */
    237 	isp_attach(isp);
    238 	if (isp->isp_state != ISP_RUNSTATE) {
    239 		isp_uninit(isp);
    240 	}
    241 	ISP_UNLOCK(isp);
    242 }
    243 
    244 static u_int16_t
    245 isp_sbus_rd_reg(isp, regoff)
    246 	struct ispsoftc *isp;
    247 	int regoff;
    248 {
    249 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    250 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    251 	offset += (regoff & 0xff);
    252 	return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
    253 }
    254 
    255 static void
    256 isp_sbus_wr_reg(isp, regoff, val)
    257 	struct ispsoftc *isp;
    258 	int regoff;
    259 	u_int16_t val;
    260 {
    261 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    262 	int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    263 	offset += (regoff & 0xff);
    264 	bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
    265 }
    266 
    267 static int
    268 isp_sbus_mbxdma(isp)
    269 	struct ispsoftc *isp;
    270 {
    271 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    272 	bus_dma_tag_t dmatag = sbc->sbus_dmatag;
    273 	bus_dma_segment_t seg;
    274 	int rseg;
    275 	size_t n;
    276 	bus_size_t len;
    277 
    278 	if (isp->isp_rquest_dma)
    279 		return (0);
    280 
    281 	n = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
    282 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(n, M_DEVBUF, M_WAITOK);
    283 	if (isp->isp_xflist == NULL) {
    284 		printf("%s: cannot alloc xflist array\n", isp->isp_name);
    285 		return (1);
    286 	}
    287 	bzero(isp->isp_xflist, n);
    288 	n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
    289 	sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
    290 	if (sbc->sbus_dmamap == NULL) {
    291 		free(isp->isp_xflist, M_DEVBUF);
    292 		printf("%s: cannot alloc dmamap array\n", isp->isp_name);
    293 		return (1);
    294 	}
    295 	/*
    296 	 * Allocate and map the request queue.
    297 	 */
    298 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
    299 	/* Allocate DMA map */
    300 	if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
    301 				&sbc->sbus_request_dmamap) != 0) {
    302 		goto dmafail;
    303 	}
    304 
    305 	/* Allocate DMA buffer */
    306 	if (bus_dmamem_alloc(dmatag, len, 0, 0,
    307 				&seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    308 		goto dmafail;
    309 	}
    310 
    311 	/* Load the buffer */
    312 	if (bus_dmamap_load_raw(dmatag, sbc->sbus_request_dmamap,
    313 				&seg, rseg, len, BUS_DMA_NOWAIT) != 0) {
    314 		bus_dmamem_free(dmatag, &seg, rseg);
    315 		goto dmafail;
    316 	}
    317 	isp->isp_rquest_dma = sbc->sbus_request_dmamap->dm_segs[0].ds_addr;
    318 
    319 	/* Map DMA buffer in CPU addressable space */
    320 	if (bus_dmamem_map(dmatag, &seg, rseg, len,
    321 			   (caddr_t *)&isp->isp_rquest,
    322 			   BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    323 		bus_dmamap_unload(dmatag, sbc->sbus_request_dmamap);
    324 		bus_dmamem_free(dmatag, &seg, rseg);
    325 		goto dmafail;
    326 	}
    327 
    328 	/*
    329 	 * Allocate and map the result queue.
    330 	 */
    331 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
    332 	/* Allocate DMA map */
    333 	if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
    334 				&sbc->sbus_result_dmamap) != 0) {
    335 		goto dmafail;
    336 	}
    337 
    338 	/* Allocate DMA buffer */
    339 	if (bus_dmamem_alloc(dmatag, len, 0, 0,
    340 				&seg, 1, &rseg, BUS_DMA_NOWAIT)) {
    341 		goto dmafail;
    342 	}
    343 
    344 	/* Load the buffer */
    345 	if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap,
    346 				&seg, rseg, len, BUS_DMA_NOWAIT) != 0) {
    347 		bus_dmamem_free(dmatag, &seg, rseg);
    348 		goto dmafail;
    349 	}
    350 
    351 	/* Map DMA buffer in CPU addressable space */
    352 	if (bus_dmamem_map(dmatag, &seg, rseg, len,
    353 			   (caddr_t *)&isp->isp_result,
    354 			   BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
    355 		bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
    356 		bus_dmamem_free(dmatag, &seg, rseg);
    357 		goto dmafail;
    358 	}
    359 	isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
    360 
    361 	return (0);
    362 
    363 dmafail:
    364 	free(sbc->sbus_dmamap, M_DEVBUF);
    365 	free(isp->isp_xflist, M_DEVBUF);
    366 	return (1);
    367 }
    368 
    369 /*
    370  * Map a DMA request.
    371  * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
    372  */
    373 
    374 static int
    375 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
    376 	struct ispsoftc *isp;
    377 	struct scsipi_xfer *xs;
    378 	ispreq_t *rq;
    379 	u_int16_t *iptrp;
    380 	u_int16_t optr;
    381 {
    382 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    383 	bus_dmamap_t dmap;
    384 	ispcontreq_t *crq;
    385 	int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
    386 	int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
    387 
    388 	if (xs->datalen == 0) {
    389 		rq->req_seg_count = 1;
    390 		goto mbxsync;
    391 	}
    392 
    393 	dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
    394 	if (dmap->dm_nsegs != 0) {
    395 		panic("%s: dma map already allocated\n", isp->isp_name);
    396 		/* NOTREACHED */
    397 	}
    398 	if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
    399 	    NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
    400 		XS_SETERR(xs, HBA_BOTCH);
    401 		return (CMD_COMPLETE);
    402 	}
    403 
    404 	bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
    405 	    xs->datalen, in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    406 
    407 	if (in) {
    408 		rq->req_flags |= REQFLAG_DATA_IN;
    409 	} else {
    410 		rq->req_flags |= REQFLAG_DATA_OUT;
    411 	}
    412 
    413 	if (XS_CDBLEN(xs) > 12) {
    414 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
    415 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
    416 		if (*iptrp == optr) {
    417 			printf("%s: Request Queue Overflow++\n", isp->isp_name);
    418 			bus_dmamap_unload(sbc->sbus_dmatag, dmap);
    419 			XS_SETERR(xs, HBA_BOTCH);
    420 			return (CMD_EAGAIN);
    421 		}
    422 		rq->req_seg_count = 2;
    423 		rq->req_dataseg[0].ds_count = 0;
    424 		rq->req_dataseg[0].ds_base =  0;
    425 		bzero((void *)crq, sizeof (*crq));
    426 		crq->req_header.rqs_entry_count = 1;
    427 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
    428 		crq->req_dataseg[0].ds_count = xs->datalen;
    429 		crq->req_dataseg[0].ds_base =  dmap->dm_segs[0].ds_addr;
    430 		ISP_SWIZZLE_CONTINUATION(isp, crq);
    431 	} else {
    432 		rq->req_dataseg[0].ds_count = xs->datalen;
    433 		rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
    434 		rq->req_seg_count = 1;
    435 	}
    436 
    437 mbxsync:
    438         ISP_SWIZZLE_REQUEST(isp, rq);
    439 #if	0
    440 	/*
    441 	 * If we ever map cacheable memory, we need to do something like this.
    442 	 */
    443         bus_dmamap_sync(sbc->sbus_dmat, sbc->sbus_rquest_dmap, 0,
    444             sbc->sbus_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
    445 #endif
    446 	return (CMD_QUEUED);
    447 }
    448 
    449 static void
    450 isp_sbus_dmateardown(isp, xs, handle)
    451 	struct ispsoftc *isp;
    452 	struct scsipi_xfer *xs;
    453 	u_int32_t handle;
    454 {
    455 	struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
    456 	bus_dmamap_t dmap;
    457 
    458 	dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
    459 
    460 	if (dmap->dm_nsegs == 0) {
    461 		panic("%s: dma map not already allocated\n", isp->isp_name);
    462 		/* NOTREACHED */
    463 	}
    464 	bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
    465 	    xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
    466 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    467 	bus_dmamap_unload(sbc->sbus_dmatag, dmap);
    468 }
    469