Home | History | Annotate | Line # | Download | only in pci
isp_pci.c revision 1.42
      1 /* $NetBSD: isp_pci.c,v 1.42 1999/09/30 23:04:42 thorpej Exp $ */
      2 /* release_6_5_99 */
      3 /*
      4  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
      5  * Matthew Jacob (mjacob (at) nas.nasa.gov)
      6  */
      7 /*
      8  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
      9  * All rights reserved.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. The name of the author may not be used to endorse or promote products
     20  *    derived from this software without specific prior written permission
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #include <dev/ic/isp_netbsd.h>
     35 #include <dev/microcode/isp/asm_pci.h>
     36 
     37 #include <dev/pci/pcireg.h>
     38 #include <dev/pci/pcivar.h>
     39 #include <dev/pci/pcidevs.h>
     40 
     41 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
     42 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
     43 #ifndef	ISP_DISABLE_1080_SUPPORT
     44 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
     45 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
     46 #endif
     47 static int isp_pci_mbxdma __P((struct ispsoftc *));
     48 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
     49 	ispreq_t *, u_int8_t *, u_int8_t));
     50 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
     51 	u_int32_t));
     52 static void isp_pci_reset1 __P((struct ispsoftc *));
     53 static void isp_pci_dumpregs __P((struct ispsoftc *));
     54 static int isp_pci_intr __P((void *));
     55 
     56 #ifndef	ISP_DISABLE_1020_SUPPORT
     57 static struct ispmdvec mdvec = {
     58 	isp_pci_rd_reg,
     59 	isp_pci_wr_reg,
     60 	isp_pci_mbxdma,
     61 	isp_pci_dmasetup,
     62 	isp_pci_dmateardown,
     63 	NULL,
     64 	isp_pci_reset1,
     65 	isp_pci_dumpregs,
     66 	ISP_RISC_CODE,
     67 	ISP_CODE_LENGTH,
     68 	ISP_CODE_ORG,
     69 	ISP_CODE_VERSION,
     70 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
     71 	0
     72 };
     73 #endif
     74 
     75 #ifndef	ISP_DISABLE_1080_SUPPORT
     76 static struct ispmdvec mdvec_1080 = {
     77 	isp_pci_rd_reg_1080,
     78 	isp_pci_wr_reg_1080,
     79 	isp_pci_mbxdma,
     80 	isp_pci_dmasetup,
     81 	isp_pci_dmateardown,
     82 	NULL,
     83 	isp_pci_reset1,
     84 	isp_pci_dumpregs,
     85 	ISP1080_RISC_CODE,
     86 	ISP1080_CODE_LENGTH,
     87 	ISP1080_CODE_ORG,
     88 	ISP1080_CODE_VERSION,
     89 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
     90 	0
     91 };
     92 #endif
     93 
     94 #ifndef	ISP_DISABLE_2100_SUPPORT
     95 static struct ispmdvec mdvec_2100 = {
     96 	isp_pci_rd_reg,
     97 	isp_pci_wr_reg,
     98 	isp_pci_mbxdma,
     99 	isp_pci_dmasetup,
    100 	isp_pci_dmateardown,
    101 	NULL,
    102 	isp_pci_reset1,
    103 	isp_pci_dumpregs,
    104 	ISP2100_RISC_CODE,
    105 	ISP2100_CODE_LENGTH,
    106 	ISP2100_CODE_ORG,
    107 	ISP2100_CODE_VERSION,
    108 	0,				/* Irrelevant to the 2100 */
    109 	0
    110 };
    111 #endif
    112 
    113 #ifndef	ISP_DISABLE_2200_SUPPORT
    114 static struct ispmdvec mdvec_2200 = {
    115 	isp_pci_rd_reg,
    116 	isp_pci_wr_reg,
    117 	isp_pci_mbxdma,
    118 	isp_pci_dmasetup,
    119 	isp_pci_dmateardown,
    120 	NULL,
    121 	isp_pci_reset1,
    122 	isp_pci_dumpregs,
    123 	ISP2200_RISC_CODE,
    124 	ISP2200_CODE_LENGTH,
    125 	ISP2200_CODE_ORG,
    126 	ISP2200_CODE_VERSION,
    127 	0,				/* Irrelevant to the 2200 */
    128 	0
    129 };
    130 #endif
    131 
    132 #ifndef	PCI_VENDOR_QLOGIC
    133 #define	PCI_VENDOR_QLOGIC	0x1077
    134 #endif
    135 
    136 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
    137 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
    138 #endif
    139 
    140 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
    141 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
    142 #endif
    143 
    144 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
    145 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
    146 #endif
    147 
    148 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
    149 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
    150 #endif
    151 
    152 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
    153 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
    154 #endif
    155 
    156 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
    157 
    158 #define	PCI_QLOGIC_ISP1080	\
    159 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
    160 
    161 #define	PCI_QLOGIC_ISP1240	\
    162 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
    163 
    164 #define	PCI_QLOGIC_ISP2100	\
    165 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
    166 
    167 #define	PCI_QLOGIC_ISP2200	\
    168 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
    169 
    170 #define IO_MAP_REG	0x10
    171 #define MEM_MAP_REG	0x14
    172 #define	PCIR_ROMADDR	0x30
    173 
    174 #define	PCI_DFLT_LTNCY	0x40
    175 #define	PCI_DFLT_LNSZ	0x10
    176 
    177 
    178 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
    179 static void isp_pci_attach __P((struct device *, struct device *, void *));
    180 
    181 struct isp_pcisoftc {
    182 	struct ispsoftc		pci_isp;
    183 	pci_chipset_tag_t	pci_pc;
    184 	pcitag_t		pci_tag;
    185 	bus_space_tag_t		pci_st;
    186 	bus_space_handle_t	pci_sh;
    187 	bus_dma_tag_t		pci_dmat;
    188 	bus_dmamap_t		pci_scratch_dmap;	/* for fcp only */
    189 	bus_dmamap_t		pci_rquest_dmap;
    190 	bus_dmamap_t		pci_result_dmap;
    191 	bus_dmamap_t		pci_xfer_dmap[MAXISPREQUEST];
    192 	void *			pci_ih;
    193 	int16_t			pci_poff[_NREG_BLKS];
    194 };
    195 
    196 struct cfattach isp_pci_ca = {
    197 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
    198 };
    199 
    200 static int
    201 isp_pci_probe(parent, match, aux)
    202         struct device *parent;
    203         struct cfdata *match;
    204 	void *aux;
    205 {
    206         struct pci_attach_args *pa = aux;
    207         switch (pa->pa_id) {
    208 #ifndef	ISP_DISABLE_1020_SUPPORT
    209 	case PCI_QLOGIC_ISP:
    210 		return (1);
    211 #endif
    212 #ifndef	ISP_DISABLE_1080_SUPPORT
    213 	case PCI_QLOGIC_ISP1080:
    214 	case PCI_QLOGIC_ISP1240:
    215 		return (1);
    216 #endif
    217 #ifndef	ISP_DISABLE_2100_SUPPORT
    218 	case PCI_QLOGIC_ISP2100:
    219 		return (1);
    220 #endif
    221 #ifndef	ISP_DISABLE_2200_SUPPORT
    222 	case PCI_QLOGIC_ISP2200:
    223 		return (1);
    224 #endif
    225 	default:
    226 		return (0);
    227 	}
    228 }
    229 
    230 
    231 static void
    232 isp_pci_attach(parent, self, aux)
    233         struct device *parent, *self;
    234         void *aux;
    235 {
    236 #ifdef	DEBUG
    237 	static char oneshot = 1;
    238 #endif
    239 	static char *nomem = "%s: no mem for sdparam table\n";
    240 	u_int32_t data, linesz = PCI_DFLT_LNSZ;
    241 	struct pci_attach_args *pa = aux;
    242 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
    243 	struct ispsoftc *isp = &pcs->pci_isp;
    244 	bus_space_tag_t st, iot, memt;
    245 	bus_space_handle_t sh, ioh, memh;
    246 	pci_intr_handle_t ih;
    247 	const char *intrstr;
    248 	int ioh_valid, memh_valid, i;
    249 	long foo;
    250 	ISP_LOCKVAL_DECL;
    251 
    252 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
    253 	    PCI_MAPREG_TYPE_IO, 0,
    254 	    &iot, &ioh, NULL, NULL) == 0);
    255 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
    256 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
    257 	    &memt, &memh, NULL, NULL) == 0);
    258 
    259 	if (memh_valid) {
    260 		st = memt;
    261 		sh = memh;
    262 	} else if (ioh_valid) {
    263 		st = iot;
    264 		sh = ioh;
    265 	} else {
    266 		printf(": unable to map device registers\n");
    267 		return;
    268 	}
    269 	printf("\n");
    270 
    271 	pcs->pci_st = st;
    272 	pcs->pci_sh = sh;
    273 	pcs->pci_dmat = pa->pa_dmat;
    274 	pcs->pci_pc = pa->pa_pc;
    275 	pcs->pci_tag = pa->pa_tag;
    276 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
    277 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
    278 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
    279 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
    280 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
    281 
    282 #ifndef	ISP_DISABLE_1020_SUPPORT
    283 	if (pa->pa_id == PCI_QLOGIC_ISP) {
    284 		isp->isp_mdvec = &mdvec;
    285 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
    286 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
    287 		if (isp->isp_param == NULL) {
    288 			printf(nomem, isp->isp_name);
    289 			return;
    290 		}
    291 		bzero(isp->isp_param, sizeof (sdparam));
    292 	}
    293 #endif
    294 #ifndef	ISP_DISABLE_1080_SUPPORT
    295 	if (pa->pa_id == PCI_QLOGIC_ISP1080) {
    296 		isp->isp_mdvec = &mdvec_1080;
    297 		isp->isp_type = ISP_HA_SCSI_1080;
    298 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
    299 		if (isp->isp_param == NULL) {
    300 			printf(nomem, isp->isp_name);
    301 			return;
    302 		}
    303 		bzero(isp->isp_param, sizeof (sdparam));
    304 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
    305 		    ISP1080_DMA_REGS_OFF;
    306 	}
    307 	if (pa->pa_id == PCI_QLOGIC_ISP1240) {
    308 		isp->isp_mdvec = &mdvec_1080;
    309 		isp->isp_type = ISP_HA_SCSI_12X0;
    310 		isp->isp_param =
    311 		    malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
    312 		if (isp->isp_param == NULL) {
    313 			printf(nomem, isp->isp_name);
    314 			return;
    315 		}
    316 		bzero(isp->isp_param, 2 * sizeof (sdparam));
    317 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
    318 		    ISP1080_DMA_REGS_OFF;
    319 	}
    320 #endif
    321 #ifndef	ISP_DISABLE_2100_SUPPORT
    322 	if (pa->pa_id == PCI_QLOGIC_ISP2100) {
    323 		isp->isp_mdvec = &mdvec_2100;
    324 		isp->isp_type = ISP_HA_FC_2100;
    325 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
    326 		if (isp->isp_param == NULL) {
    327 			printf(nomem, isp->isp_name);
    328 			return;
    329 		}
    330 		bzero(isp->isp_param, sizeof (fcparam));
    331 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
    332 		    PCI_MBOX_REGS2100_OFF;
    333 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
    334 		if ((data & 0xff) < 3) {
    335 			/*
    336 			 * XXX: Need to get the actual revision
    337 			 * XXX: number of the 2100 FB. At any rate,
    338 			 * XXX: lower cache line size for early revision
    339 			 * XXX; boards.
    340 			 */
    341 			linesz = 1;
    342 		}
    343 	}
    344 #endif
    345 #ifndef	ISP_DISABLE_2200_SUPPORT
    346 	if (pa->pa_id == PCI_QLOGIC_ISP2200) {
    347 		isp->isp_mdvec = &mdvec_2200;
    348 		isp->isp_type = ISP_HA_FC_2200;
    349 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
    350 		if (isp->isp_param == NULL) {
    351 			printf(nomem, isp->isp_name);
    352 			return;
    353 		}
    354 		bzero(isp->isp_param, sizeof (fcparam));
    355 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
    356 		    PCI_MBOX_REGS2100_OFF;
    357 		data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
    358 	}
    359 #endif
    360 
    361 	/*
    362 	 * Make sure that command register set sanely.
    363 	 */
    364 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    365 	data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
    366 
    367 	/*
    368 	 * Not so sure about these- but I think it's important that they get
    369 	 * enabled......
    370 	 */
    371 	data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
    372 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
    373 
    374 	/*
    375 	 * Make sure that the latency timer, cache line size,
    376 	 * and ROM is disabled.
    377 	 */
    378 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
    379 	data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
    380 	data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
    381 	data |= (PCI_DFLT_LTNCY	<< PCI_LATTIMER_SHIFT);
    382 	data |= (linesz << PCI_CACHELINE_SHIFT);
    383 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
    384 
    385 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
    386 	data &= ~1;
    387 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
    388 
    389 #ifdef DEBUG
    390 	if (oneshot) {
    391 		oneshot = 0;
    392 		printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
    393 		    "%d.%d Core Version %d.%d\n",
    394 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
    395 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
    396 	}
    397 #endif
    398 	if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
    399 	    pa->pa_intrline, &ih)) {
    400 		printf("%s: couldn't map interrupt\n", isp->isp_name);
    401 		free(isp->isp_param, M_DEVBUF);
    402 		return;
    403 	}
    404 	intrstr = pci_intr_string(pa->pa_pc, ih);
    405 	if (intrstr == NULL)
    406 		intrstr = "<I dunno>";
    407 	pcs->pci_ih =
    408 	  pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
    409 	if (pcs->pci_ih == NULL) {
    410 		printf("%s: couldn't establish interrupt at %s\n",
    411 			isp->isp_name, intrstr);
    412 		free(isp->isp_param, M_DEVBUF);
    413 		return;
    414 	}
    415 	printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
    416 
    417 	/*
    418 	 * This isn't very random, but it's the best we can do for
    419 	 * the real edge case of cards that don't have WWNs.
    420 	 */
    421 	foo = (long) isp;
    422 	foo >>= 4;
    423 	foo &= 0x7;
    424 	while (version[foo])
    425 		isp->isp_osinfo.seed += (int) version[foo++];
    426 	isp->isp_osinfo.seed <<= 8;
    427 	isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
    428 
    429 	ISP_LOCK(isp);
    430 	isp_reset(isp);
    431 	if (isp->isp_state != ISP_RESETSTATE) {
    432 		ISP_UNLOCK(isp);
    433 		free(isp->isp_param, M_DEVBUF);
    434 		return;
    435 	}
    436 	isp_init(isp);
    437 	if (isp->isp_state != ISP_INITSTATE) {
    438 		isp_uninit(isp);
    439 		ISP_UNLOCK(isp);
    440 		free(isp->isp_param, M_DEVBUF);
    441 		return;
    442 	}
    443 
    444 
    445 
    446 	/*
    447 	 * Create the DMA maps for the data transfers.
    448 	 */
    449 	for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
    450 		if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
    451 		    (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
    452 		    &pcs->pci_xfer_dmap[i])) {
    453 			printf("%s: can't create dma maps\n",
    454 			    isp->isp_name);
    455 			isp_uninit(isp);
    456 			ISP_UNLOCK(isp);
    457 			return;
    458 		}
    459 	}
    460 	/*
    461 	 * Do Generic attach now.
    462 	 */
    463 	isp_attach(isp);
    464 	if (isp->isp_state != ISP_RUNSTATE) {
    465 		isp_uninit(isp);
    466 		free(isp->isp_param, M_DEVBUF);
    467 	}
    468 	ISP_UNLOCK(isp);
    469 }
    470 
    471 static u_int16_t
    472 isp_pci_rd_reg(isp, regoff)
    473 	struct ispsoftc *isp;
    474 	int regoff;
    475 {
    476 	u_int16_t rv;
    477 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    478 	int offset, oldconf = 0;
    479 
    480 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    481 		/*
    482 		 * We will assume that someone has paused the RISC processor.
    483 		 */
    484 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
    485 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
    486 	}
    487 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    488 	offset += (regoff & 0xff);
    489 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
    490 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    491 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
    492 	}
    493 	return (rv);
    494 }
    495 
    496 static void
    497 isp_pci_wr_reg(isp, regoff, val)
    498 	struct ispsoftc *isp;
    499 	int regoff;
    500 	u_int16_t val;
    501 {
    502 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    503 	int offset, oldconf = 0;
    504 
    505 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    506 		/*
    507 		 * We will assume that someone has paused the RISC processor.
    508 		 */
    509 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
    510 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
    511 	}
    512 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    513 	offset += (regoff & 0xff);
    514 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
    515 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    516 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
    517 	}
    518 }
    519 
    520 #ifndef	ISP_DISABLE_1080_SUPPORT
    521 static u_int16_t
    522 isp_pci_rd_reg_1080(isp, regoff)
    523 	struct ispsoftc *isp;
    524 	int regoff;
    525 {
    526 	u_int16_t rv;
    527 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    528 	int offset, oc = 0;
    529 
    530 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    531 		/*
    532 		 * We will assume that someone has paused the RISC processor.
    533 		 */
    534 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    535 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
    536 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
    537 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    538 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
    539 	}
    540 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    541 	offset += (regoff & 0xff);
    542 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
    543 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
    544 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
    545 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
    546 	}
    547 	return (rv);
    548 }
    549 
    550 static void
    551 isp_pci_wr_reg_1080(isp, regoff, val)
    552 	struct ispsoftc *isp;
    553 	int regoff;
    554 	u_int16_t val;
    555 {
    556 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    557 	int offset, oc = 0;
    558 
    559 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    560 		/*
    561 		 * We will assume that someone has paused the RISC processor.
    562 		 */
    563 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    564 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
    565 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
    566 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    567 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
    568 	}
    569 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    570 	offset += (regoff & 0xff);
    571 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
    572 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
    573 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
    574 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
    575 	}
    576 }
    577 #endif
    578 
    579 static int
    580 isp_pci_mbxdma(isp)
    581 	struct ispsoftc *isp;
    582 {
    583 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    584 	bus_dma_segment_t seg;
    585 	bus_size_t len;
    586 	fcparam *fcp;
    587 	int rseg;
    588 
    589 	/*
    590 	 * Allocate and map the request queue.
    591 	 */
    592 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
    593 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    594 	      BUS_DMA_NOWAIT) ||
    595 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    596 	      (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
    597 		return (1);
    598 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    599 	      &pci->pci_rquest_dmap) ||
    600 	    bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
    601 	      (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
    602 		return (1);
    603 
    604 	isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
    605 
    606 	/*
    607 	 * Allocate and map the result queue.
    608 	 */
    609 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
    610 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    611 	      BUS_DMA_NOWAIT) ||
    612 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    613 	      (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
    614 		return (1);
    615 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    616 	      &pci->pci_result_dmap) ||
    617 	    bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
    618 	      (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
    619 		return (1);
    620 	isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
    621 
    622 	if (IS_SCSI(isp)) {
    623 		return (0);
    624 	}
    625 
    626 	fcp = isp->isp_param;
    627 	len = ISP2100_SCRLEN;
    628 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    629 		BUS_DMA_NOWAIT) ||
    630 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    631 	      (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
    632 		return (1);
    633 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    634 	      &pci->pci_scratch_dmap) ||
    635 	    bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
    636 	      (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
    637 		return (1);
    638 	fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
    639 	return (0);
    640 }
    641 
    642 static int
    643 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
    644 	struct ispsoftc *isp;
    645 	struct scsipi_xfer *xs;
    646 	ispreq_t *rq;
    647 	u_int8_t *iptrp;
    648 	u_int8_t optr;
    649 {
    650 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    651 	bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
    652 	ispcontreq_t *crq;
    653 	int segcnt, seg, error, ovseg, seglim, drq;
    654 
    655 	if (xs->datalen == 0) {
    656 		rq->req_seg_count = 1;
    657 		goto mbxsync;
    658 	}
    659 
    660 	if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
    661 		panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
    662 		    isp->isp_name, rq->req_handle);
    663 		/* NOTREACHED */
    664 	}
    665 
    666 	if (xs->xs_control & XS_CTL_DATA_IN) {
    667 		drq = REQFLAG_DATA_IN;
    668 	} else {
    669 		drq = REQFLAG_DATA_OUT;
    670 	}
    671 
    672 	if (IS_FC(isp)) {
    673 		seglim = ISP_RQDSEG_T2;
    674 		((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
    675 		((ispreqt2_t *)rq)->req_flags |= drq;
    676 	} else {
    677 		seglim = ISP_RQDSEG;
    678 		rq->req_flags |= drq;
    679 	}
    680 	error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
    681 	    NULL, xs->xs_control & XS_CTL_NOSLEEP ?
    682 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    683 	if (error) {
    684 		XS_SETERR(xs, HBA_BOTCH);
    685 		return (CMD_COMPLETE);
    686 	}
    687 
    688 	segcnt = dmap->dm_nsegs;
    689 
    690 	for (seg = 0, rq->req_seg_count = 0;
    691 	     seg < segcnt && rq->req_seg_count < seglim;
    692 	     seg++, rq->req_seg_count++) {
    693 		if (IS_FC(isp)) {
    694 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
    695 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
    696 			    dmap->dm_segs[seg].ds_len;
    697 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
    698 			    dmap->dm_segs[seg].ds_addr;
    699 		} else {
    700 			rq->req_dataseg[rq->req_seg_count].ds_count =
    701 			    dmap->dm_segs[seg].ds_len;
    702 			rq->req_dataseg[rq->req_seg_count].ds_base =
    703 			    dmap->dm_segs[seg].ds_addr;
    704 		}
    705 	}
    706 
    707 	if (seg == segcnt)
    708 		goto dmasync;
    709 
    710 	do {
    711 		crq = (ispcontreq_t *)
    712 			ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
    713 		*iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
    714 		if (*iptrp == optr) {
    715 			printf("%s: Request Queue Overflow++\n",
    716 			       isp->isp_name);
    717 			bus_dmamap_unload(pci->pci_dmat, dmap);
    718 			XS_SETERR(xs, HBA_BOTCH);
    719 			return (CMD_COMPLETE);
    720 		}
    721 		rq->req_header.rqs_entry_count++;
    722 		bzero((void *)crq, sizeof (*crq));
    723 		crq->req_header.rqs_entry_count = 1;
    724 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
    725 
    726 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
    727 		    rq->req_seg_count++, seg++, ovseg++) {
    728 			crq->req_dataseg[ovseg].ds_count =
    729 			    dmap->dm_segs[seg].ds_len;
    730 			crq->req_dataseg[ovseg].ds_base =
    731 			    dmap->dm_segs[seg].ds_addr;
    732 		}
    733 	} while (seg < segcnt);
    734 
    735 dmasync:
    736 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
    737 	    (xs->xs_control & XS_CTL_DATA_IN) ?  BUS_DMASYNC_PREREAD :
    738 	    BUS_DMASYNC_PREWRITE);
    739 
    740 mbxsync:
    741 
    742 	bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
    743 	    pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
    744 	return (CMD_QUEUED);
    745 }
    746 
    747 static int
    748 isp_pci_intr(arg)
    749 	void *arg;
    750 {
    751 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
    752 	bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
    753 	    pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
    754 	return (isp_intr(arg));
    755 }
    756 
    757 static void
    758 isp_pci_dmateardown(isp, xs, handle)
    759 	struct ispsoftc *isp;
    760 	struct scsipi_xfer *xs;
    761 	u_int32_t handle;
    762 {
    763 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    764 	bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
    765 
    766 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
    767 	    xs->xs_control & XS_CTL_DATA_IN ?
    768 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    769 	bus_dmamap_unload(pci->pci_dmat, dmap);
    770 }
    771 
    772 static void
    773 isp_pci_reset1(isp)
    774 	struct ispsoftc *isp;
    775 {
    776 	/* Make sure the BIOS is disabled */
    777 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
    778 }
    779 
    780 static void
    781 isp_pci_dumpregs(isp)
    782 	struct ispsoftc *isp;
    783 {
    784 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    785 	printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
    786 	    pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
    787 }
    788