Home | History | Annotate | Line # | Download | only in pci
isp_pci.c revision 1.14.2.1
      1 /*	$NetBSD: isp_pci.c,v 1.14.2.1 1997/07/01 17:35:35 bouyer Exp $	*/
      2 
      3 /*
      4  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
      5  *
      6  * Copyright (c) 1997 by Matthew Jacob
      7  * NASA AMES Research Center
      8  * All rights reserved.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice immediately at the beginning of the file, without modification,
     15  *    this list of conditions, and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     26  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/malloc.h>
     38 #include <sys/kernel.h>
     39 #include <sys/queue.h>
     40 #include <sys/device.h>
     41 #include <machine/bus.h>
     42 #include <machine/intr.h>
     43 #include <dev/scsipi/scsi_all.h>
     44 #include <dev/scsipi/scsipi_all.h>
     45 #include <dev/scsipi/scsiconf.h>
     46 #include <dev/pci/pcireg.h>
     47 #include <dev/pci/pcivar.h>
     48 #include <dev/pci/pcidevs.h>
     49 #include <vm/vm.h>
     50 
     51 #include <dev/ic/ispreg.h>
     52 #include <dev/ic/ispvar.h>
     53 #include <dev/ic/ispmbox.h>
     54 #include <dev/microcode/isp/asm_pci.h>
     55 
     56 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
     57 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
     58 static int isp_pci_mbxdma __P((struct ispsoftc *));
     59 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
     60 	ispreq_t *, u_int8_t *, u_int8_t));
     61 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
     62 	u_int32_t));
     63 
     64 static void isp_pci_reset1 __P((struct ispsoftc *));
     65 
     66 static struct ispmdvec mdvec = {
     67 	isp_pci_rd_reg,
     68 	isp_pci_wr_reg,
     69 	isp_pci_mbxdma,
     70 	isp_pci_dmasetup,
     71 	isp_pci_dmateardown,
     72 	NULL,
     73 	isp_pci_reset1,
     74 	ISP_RISC_CODE,
     75 	ISP_CODE_LENGTH,
     76 	ISP_CODE_ORG,
     77 	BIU_PCI_CONF1_FIFO_16 | BIU_BURST_ENABLE,
     78 	60	/* MAGIC- all known PCI card implementations are 60MHz */
     79 };
     80 
     81 #define	PCI_QLOGIC_ISP	\
     82 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
     83 
     84 #define IO_MAP_REG	0x10
     85 #define MEM_MAP_REG	0x14
     86 
     87 
     88 #ifdef	__BROKEN_INDIRECT_CONFIG
     89 static int isp_pci_probe __P((struct device *, void *, void *));
     90 #else
     91 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
     92 #endif
     93 static void isp_pci_attach __P((struct device *, struct device *, void *));
     94 
     95 struct isp_pcisoftc {
     96 	struct ispsoftc		pci_isp;
     97 	bus_space_tag_t		pci_st;
     98 	bus_space_handle_t	pci_sh;
     99 	bus_dma_tag_t		pci_dmat;
    100 	bus_dmamap_t		pci_rquest_dmap;
    101 	bus_dmamap_t		pci_result_dmap;
    102 	bus_dmamap_t		pci_xfer_dmap[RQUEST_QUEUE_LEN];
    103 	void *			pci_ih;
    104 };
    105 
    106 struct cfattach isp_pci_ca = {
    107 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
    108 };
    109 
    110 static int
    111 isp_pci_probe(parent, match, aux)
    112         struct device *parent;
    113 #ifdef	__BROKEN_INDIRECT_CONFIG
    114         void *match, *aux;
    115 #else
    116         struct cfdata *match;
    117 	void *aux;
    118 #endif
    119 {
    120         struct pci_attach_args *pa = aux;
    121 
    122 	if (pa->pa_id == PCI_QLOGIC_ISP) {
    123 		return (1);
    124 	} else {
    125 		return (0);
    126 	}
    127 }
    128 
    129 
    130 static void
    131 isp_pci_attach(parent, self, aux)
    132         struct device *parent, *self;
    133         void *aux;
    134 {
    135 	struct pci_attach_args *pa = aux;
    136 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
    137 	bus_space_tag_t st, iot, memt;
    138 	bus_space_handle_t sh, ioh, memh;
    139 	pci_intr_handle_t ih;
    140 	const char *intrstr;
    141 	int ioh_valid, memh_valid;
    142 	int i;
    143 
    144 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
    145 	    PCI_MAPREG_TYPE_IO, 0,
    146 	    &iot, &ioh, NULL, NULL) == 0);
    147 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
    148 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
    149 	    &memt, &memh, NULL, NULL) == 0);
    150 
    151 	if (memh_valid) {
    152 		st = memt;
    153 		sh = memh;
    154 	} else if (ioh_valid) {
    155 		st = iot;
    156 		sh = ioh;
    157 	} else {
    158 		printf(": unable to map device registers\n");
    159 		return;
    160 	}
    161 	printf("\n");
    162 
    163 	pcs->pci_st = st;
    164 	pcs->pci_sh = sh;
    165 	pcs->pci_dmat = pa->pa_dmat;
    166 	pcs->pci_isp.isp_mdvec = &mdvec;
    167 	isp_reset(&pcs->pci_isp);
    168 	if (pcs->pci_isp.isp_state != ISP_RESETSTATE) {
    169 		return;
    170 	}
    171 	isp_init(&pcs->pci_isp);
    172 	if (pcs->pci_isp.isp_state != ISP_INITSTATE) {
    173 		isp_uninit(&pcs->pci_isp);
    174 		return;
    175 	}
    176 
    177 	if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
    178 			 pa->pa_intrline, &ih)) {
    179 		printf("%s: couldn't map interrupt\n", pcs->pci_isp.isp_name);
    180 		isp_uninit(&pcs->pci_isp);
    181 		return;
    182 	}
    183 
    184 	intrstr = pci_intr_string(pa->pa_pc, ih);
    185 	if (intrstr == NULL)
    186 		intrstr = "<I dunno>";
    187 	pcs->pci_ih =
    188 	  pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_intr, &pcs->pci_isp);
    189 	if (pcs->pci_ih == NULL) {
    190 		printf("%s: couldn't establish interrupt at %s\n",
    191 			pcs->pci_isp.isp_name, intrstr);
    192 		isp_uninit(&pcs->pci_isp);
    193 		return;
    194 	}
    195 	printf("%s: interrupting at %s\n", pcs->pci_isp.isp_name, intrstr);
    196 
    197 	/*
    198 	 * Create the DMA maps for the data transfers.
    199 	 */
    200 	for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
    201 		if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
    202 		    (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
    203 		    &pcs->pci_xfer_dmap[i])) {
    204 			printf("%s: can't create dma maps\n",
    205 			    pcs->pci_isp.isp_name);
    206 			isp_uninit(&pcs->pci_isp);
    207 			return;
    208 		}
    209 	}
    210 
    211 	/*
    212 	 * Do Generic attach now.
    213 	 */
    214 	isp_attach(&pcs->pci_isp);
    215 	if (pcs->pci_isp.isp_state != ISP_RUNSTATE) {
    216 		isp_uninit(&pcs->pci_isp);
    217 	}
    218 }
    219 
    220 #define  PCI_BIU_REGS_OFF		0x00
    221 #define	 PCI_MBOX_REGS_OFF		0x70
    222 #define	 PCI_SXP_REGS_OFF		0x80
    223 #define	 PCI_RISC_REGS_OFF		0x80
    224 
    225 static u_int16_t
    226 isp_pci_rd_reg(isp, regoff)
    227 	struct ispsoftc *isp;
    228 	int regoff;
    229 {
    230 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    231 	int offset;
    232 	if ((regoff & BIU_BLOCK) != 0) {
    233 		offset = PCI_BIU_REGS_OFF;
    234 	} else if ((regoff & MBOX_BLOCK) != 0) {
    235 		offset = PCI_MBOX_REGS_OFF;
    236 	} else if ((regoff & SXP_BLOCK) != 0) {
    237 		offset = PCI_SXP_REGS_OFF;
    238 		/*
    239 		 * XXX
    240 		 */
    241 		panic("SXP Registers not accessible yet!");
    242 	} else {
    243 		offset = PCI_RISC_REGS_OFF;
    244 	}
    245 	regoff &= 0xff;
    246 	offset += regoff;
    247 	return bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
    248 }
    249 
    250 static void
    251 isp_pci_wr_reg(isp, regoff, val)
    252 	struct ispsoftc *isp;
    253 	int regoff;
    254 	u_int16_t val;
    255 {
    256 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    257 	int offset;
    258 	if ((regoff & BIU_BLOCK) != 0) {
    259 		offset = PCI_BIU_REGS_OFF;
    260 	} else if ((regoff & MBOX_BLOCK) != 0) {
    261 		offset = PCI_MBOX_REGS_OFF;
    262 	} else if ((regoff & SXP_BLOCK) != 0) {
    263 		offset = PCI_SXP_REGS_OFF;
    264 		/*
    265 		 * XXX
    266 		 */
    267 		panic("SXP Registers not accessible yet!");
    268 	} else {
    269 		offset = PCI_RISC_REGS_OFF;
    270 	}
    271 	regoff &= 0xff;
    272 	offset += regoff;
    273 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
    274 }
    275 
    276 static int
    277 isp_pci_mbxdma(isp)
    278 	struct ispsoftc *isp;
    279 {
    280 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    281 	bus_dma_segment_t seg;
    282 	bus_size_t len;
    283 	int rseg;
    284 
    285 	/*
    286 	 * Allocate and map the request queue.
    287 	 */
    288 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
    289 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    290 	      BUS_DMA_NOWAIT) ||
    291 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    292 	      (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
    293 		return (1);
    294 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    295 	      &pci->pci_rquest_dmap) ||
    296 	    bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
    297 	      (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
    298 		return (1);
    299 
    300 	isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
    301 
    302 	/*
    303 	 * Allocate and map the result queue.
    304 	 */
    305 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
    306 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    307 	      BUS_DMA_NOWAIT) ||
    308 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    309 	      (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
    310 		return (1);
    311 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    312 	      &pci->pci_result_dmap) ||
    313 	    bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
    314 	      (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
    315 		return (1);
    316 
    317 	isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
    318 
    319 	return (0);
    320 }
    321 
    322 static int
    323 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
    324 	struct ispsoftc *isp;
    325 	struct scsipi_xfer *xs;
    326 	ispreq_t *rq;
    327 	u_int8_t *iptrp;
    328 	u_int8_t optr;
    329 {
    330 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    331 	bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle];
    332 	ispcontreq_t *crq;
    333 	int segcnt, seg, error, ovseg;
    334 
    335 	if (xs->datalen == 0) {
    336 		rq->req_seg_count = 1;
    337 		rq->req_flags |= REQFLAG_DATA_IN;
    338 		return (0);
    339 	}
    340 
    341 	if (rq->req_handle >= RQUEST_QUEUE_LEN) {
    342 		panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
    343 		    isp->isp_name, rq->req_handle);
    344 		/* NOTREACHED */
    345 	}
    346 
    347 	if (xs->flags & SCSI_DATA_IN) {
    348 		rq->req_flags |= REQFLAG_DATA_IN;
    349 	} else {
    350 		rq->req_flags |= REQFLAG_DATA_OUT;
    351 	}
    352 
    353 	error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
    354 	    NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    355 	if (error)
    356 		return (error);
    357 
    358 	segcnt = dmap->dm_nsegs;
    359 
    360 	for (seg = 0, rq->req_seg_count = 0;
    361 	    seg < segcnt && rq->req_seg_count < ISP_RQDSEG;
    362 	    seg++, rq->req_seg_count++) {
    363 		rq->req_dataseg[rq->req_seg_count].ds_count =
    364 		    dmap->dm_segs[seg].ds_len;
    365 		rq->req_dataseg[rq->req_seg_count].ds_base =
    366 		    dmap->dm_segs[seg].ds_addr;
    367 	}
    368 
    369 	if (seg == segcnt)
    370 		goto mapsync;
    371 
    372 	do {
    373 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest,
    374 		    *iptrp);
    375 		*iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
    376 		if (*iptrp == optr) {
    377 			printf("%s: Request Queue Overflow++\n",
    378 			       isp->isp_name);
    379 			bus_dmamap_unload(pci->pci_dmat, dmap);
    380 			return (EFBIG);
    381 		}
    382 		rq->req_header.rqs_entry_count++;
    383 		bzero((void *)crq, sizeof (*crq));
    384 		crq->req_header.rqs_entry_count = 1;
    385 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
    386 
    387 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
    388 		    rq->req_seg_count++, seg++, ovseg++) {
    389 			crq->req_dataseg[ovseg].ds_count =
    390 			    dmap->dm_segs[seg].ds_len;
    391 			crq->req_dataseg[ovseg].ds_base =
    392 			    dmap->dm_segs[seg].ds_addr;
    393 		}
    394 	} while (seg < segcnt);
    395 
    396  mapsync:
    397 	bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
    398 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
    399 	return (0);
    400 }
    401 
    402 static void
    403 isp_pci_dmateardown(isp, xs, handle)
    404 	struct ispsoftc *isp;
    405 	struct scsipi_xfer *xs;
    406 	u_int32_t handle;
    407 {
    408 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    409 	bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
    410 
    411 	bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
    412 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    413 
    414 	bus_dmamap_unload(pci->pci_dmat, dmap);
    415 }
    416 
    417 static void
    418 isp_pci_reset1(isp)
    419 	struct ispsoftc *isp;
    420 {
    421 	/* Make sure the BIOS is disabled */
    422 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
    423 }
    424