Home | History | Annotate | Line # | Download | only in pci
isp_pci.c revision 1.36
      1 /* $NetBSD: isp_pci.c,v 1.36 1999/03/17 06:16:42 mjacob Exp $ */
      2 /* release_03_16_99 */
      3 /*
      4  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
      5  *
      6  *---------------------------------------
      7  * Copyright (c) 1997, 1998 by Matthew Jacob
      8  * NASA/Ames Research Center
      9  * All rights reserved.
     10  *---------------------------------------
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice immediately at the beginning of the file, without modification,
     17  *    this list of conditions, and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. The name of the author may not be used to endorse or promote products
     22  *    derived from this software without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
     28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  */
     37 
     38 #include <dev/ic/isp_netbsd.h>
     39 #include <dev/microcode/isp/asm_pci.h>
     40 
     41 #include <dev/pci/pcireg.h>
     42 #include <dev/pci/pcivar.h>
     43 #include <dev/pci/pcidevs.h>
     44 
     45 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
     46 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
     47 #ifndef	ISP_DISABLE_1080_SUPPORT
     48 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
     49 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
     50 #endif
     51 static int isp_pci_mbxdma __P((struct ispsoftc *));
     52 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
     53 	ispreq_t *, u_int8_t *, u_int8_t));
     54 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
     55 	u_int32_t));
     56 static void isp_pci_reset1 __P((struct ispsoftc *));
     57 static void isp_pci_dumpregs __P((struct ispsoftc *));
     58 static int isp_pci_intr __P((void *));
     59 
     60 #ifndef	ISP_DISABLE_1020_SUPPORT
     61 static struct ispmdvec mdvec = {
     62 	isp_pci_rd_reg,
     63 	isp_pci_wr_reg,
     64 	isp_pci_mbxdma,
     65 	isp_pci_dmasetup,
     66 	isp_pci_dmateardown,
     67 	NULL,
     68 	isp_pci_reset1,
     69 	isp_pci_dumpregs,
     70 	ISP_RISC_CODE,
     71 	ISP_CODE_LENGTH,
     72 	ISP_CODE_ORG,
     73 	ISP_CODE_VERSION,
     74 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
     75 	0
     76 };
     77 #endif
     78 
     79 #ifndef	ISP_DISABLE_1080_SUPPORT
     80 static struct ispmdvec mdvec_1080 = {
     81 	isp_pci_rd_reg_1080,
     82 	isp_pci_wr_reg_1080,
     83 	isp_pci_mbxdma,
     84 	isp_pci_dmasetup,
     85 	isp_pci_dmateardown,
     86 	NULL,
     87 	isp_pci_reset1,
     88 	isp_pci_dumpregs,
     89 	0,
     90 	0,
     91 	0,
     92 	0,
     93 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
     94 	0
     95 };
     96 #endif
     97 
     98 #ifndef	ISP_DISABLE_2100_SUPPORT
     99 static struct ispmdvec mdvec_2100 = {
    100 	isp_pci_rd_reg,
    101 	isp_pci_wr_reg,
    102 	isp_pci_mbxdma,
    103 	isp_pci_dmasetup,
    104 	isp_pci_dmateardown,
    105 	NULL,
    106 	isp_pci_reset1,
    107 	isp_pci_dumpregs,
    108 	ISP2100_RISC_CODE,
    109 	ISP2100_CODE_LENGTH,
    110 	ISP2100_CODE_ORG,
    111 	ISP2100_CODE_VERSION,
    112 	0,				/* Irrelevant to the 2100 */
    113 	0
    114 };
    115 #endif
    116 
    117 #ifndef	PCI_VENDOR_QLOGIC
    118 #define	PCI_VENDOR_QLOGIC	0x1077
    119 #endif
    120 
    121 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
    122 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
    123 #endif
    124 
    125 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
    126 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
    127 #endif
    128 
    129 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
    130 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
    131 #endif
    132 
    133 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
    134 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
    135 #endif
    136 
    137 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
    138 
    139 #define	PCI_QLOGIC_ISP1080	\
    140 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
    141 
    142 #define	PCI_QLOGIC_ISP1240	\
    143 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
    144 
    145 #define	PCI_QLOGIC_ISP2100	\
    146 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
    147 
    148 #define IO_MAP_REG	0x10
    149 #define MEM_MAP_REG	0x14
    150 
    151 
    152 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
    153 static void isp_pci_attach __P((struct device *, struct device *, void *));
    154 
    155 struct isp_pcisoftc {
    156 	struct ispsoftc		pci_isp;
    157 	pci_chipset_tag_t	pci_pc;
    158 	pcitag_t		pci_tag;
    159 	bus_space_tag_t		pci_st;
    160 	bus_space_handle_t	pci_sh;
    161 	bus_dma_tag_t		pci_dmat;
    162 	bus_dmamap_t		pci_scratch_dmap;	/* for fcp only */
    163 	bus_dmamap_t		pci_rquest_dmap;
    164 	bus_dmamap_t		pci_result_dmap;
    165 	bus_dmamap_t		pci_xfer_dmap[MAXISPREQUEST];
    166 	void *			pci_ih;
    167 	int16_t			pci_poff[_NREG_BLKS];
    168 };
    169 
    170 struct cfattach isp_pci_ca = {
    171 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
    172 };
    173 
    174 static int
    175 isp_pci_probe(parent, match, aux)
    176         struct device *parent;
    177         struct cfdata *match;
    178 	void *aux;
    179 {
    180         struct pci_attach_args *pa = aux;
    181         switch (pa->pa_id) {
    182 #ifndef	ISP_DISABLE_1020_SUPPORT
    183 	case PCI_QLOGIC_ISP:
    184 		return (1);
    185 #endif
    186 #ifndef	ISP_DISABLE_1080_SUPPORT
    187 	case PCI_QLOGIC_ISP1080:
    188 #if	0
    189 	case PCI_QLOGIC_ISP1240:	/* 1240 not ready yet */
    190 		return (1);
    191 #endif
    192 #endif
    193 #ifndef	ISP_DISABLE_2100_SUPPORT
    194 	case PCI_QLOGIC_ISP2100:
    195 		return (1);
    196 #endif
    197 	default:
    198 		return (0);
    199 	}
    200 }
    201 
    202 
    203 static void
    204 isp_pci_attach(parent, self, aux)
    205         struct device *parent, *self;
    206         void *aux;
    207 {
    208 #ifdef	DEBUG
    209 	static char oneshot = 1;
    210 #endif
    211 	u_int32_t data;
    212 	struct pci_attach_args *pa = aux;
    213 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
    214 	struct ispsoftc *isp = &pcs->pci_isp;
    215 	bus_space_tag_t st, iot, memt;
    216 	bus_space_handle_t sh, ioh, memh;
    217 	pci_intr_handle_t ih;
    218 	const char *intrstr;
    219 	int ioh_valid, memh_valid, i;
    220 	ISP_LOCKVAL_DECL;
    221 
    222 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
    223 	    PCI_MAPREG_TYPE_IO, 0,
    224 	    &iot, &ioh, NULL, NULL) == 0);
    225 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
    226 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
    227 	    &memt, &memh, NULL, NULL) == 0);
    228 
    229 	if (memh_valid) {
    230 		st = memt;
    231 		sh = memh;
    232 	} else if (ioh_valid) {
    233 		st = iot;
    234 		sh = ioh;
    235 	} else {
    236 		printf(": unable to map device registers\n");
    237 		return;
    238 	}
    239 	printf("\n");
    240 
    241 	pcs->pci_st = st;
    242 	pcs->pci_sh = sh;
    243 	pcs->pci_dmat = pa->pa_dmat;
    244 	pcs->pci_pc = pa->pa_pc;
    245 	pcs->pci_tag = pa->pa_tag;
    246 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
    247 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
    248 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
    249 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
    250 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
    251 
    252 #ifndef	ISP_DISABLE_1020_SUPPORT
    253 	if (pa->pa_id == PCI_QLOGIC_ISP) {
    254 		isp->isp_mdvec = &mdvec;
    255 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
    256 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
    257 		if (isp->isp_param == NULL) {
    258 			printf("%s: couldn't allocate sdparam table\n",
    259 			       isp->isp_name);
    260 			return;
    261 		}
    262 		bzero(isp->isp_param, sizeof (sdparam));
    263 	}
    264 #endif
    265 #ifndef	ISP_DISABLE_1080_SUPPORT
    266 	if (pa->pa_id == PCI_QLOGIC_ISP1080) {
    267 		isp->isp_mdvec = &mdvec_1080;
    268 		isp->isp_type = ISP_HA_SCSI_1080;
    269 		isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
    270 		if (isp->isp_param == NULL) {
    271 			printf("%s: couldn't allocate sdparam table\n",
    272 			       isp->isp_name);
    273 			return;
    274 		}
    275 		bzero(isp->isp_param, sizeof (sdparam));
    276 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
    277 		    ISP1080_DMA_REGS_OFF;
    278 	}
    279 #endif
    280 #ifndef	ISP_DISABLE_2100_SUPPORT
    281 	if (pa->pa_id == PCI_QLOGIC_ISP2100) {
    282 		isp->isp_mdvec = &mdvec_2100;
    283 		isp->isp_type = ISP_HA_FC_2100;
    284 		isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
    285 		if (isp->isp_param == NULL) {
    286 			printf("%s: couldn't allocate fcparam table\n",
    287 			       isp->isp_name);
    288 			return;
    289 		}
    290 		bzero(isp->isp_param, sizeof (fcparam));
    291 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
    292 		    PCI_MBOX_REGS2100_OFF;
    293 	}
    294 #endif
    295 
    296 	/*
    297 	 * Make sure that command register set sanely.
    298 	 */
    299 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
    300 	data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
    301 
    302 	/*
    303 	 * Not so sure about these- but I think it's important that they get
    304 	 * enabled......
    305 	 */
    306 	data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
    307 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
    308 
    309 	/*
    310 	 * Make sure that latency timer and cache line size is set sanely.
    311 	 */
    312 	data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
    313 	data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
    314 	data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
    315 	data |= (0x40 << PCI_LATTIMER_SHIFT);
    316 	data |= (0x10 << PCI_CACHELINE_SHIFT);
    317 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
    318 
    319 #ifdef DEBUG
    320 	if (oneshot) {
    321 		oneshot = 0;
    322 		printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
    323 		    "%d.%d Core Version %d.%d\n",
    324 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
    325 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
    326 	}
    327 #endif
    328 	if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
    329 	    pa->pa_intrline, &ih)) {
    330 		printf("%s: couldn't map interrupt\n", isp->isp_name);
    331 		free(isp->isp_param, M_DEVBUF);
    332 		return;
    333 	}
    334 	intrstr = pci_intr_string(pa->pa_pc, ih);
    335 	if (intrstr == NULL)
    336 		intrstr = "<I dunno>";
    337 	pcs->pci_ih =
    338 	  pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
    339 	if (pcs->pci_ih == NULL) {
    340 		printf("%s: couldn't establish interrupt at %s\n",
    341 			isp->isp_name, intrstr);
    342 		free(isp->isp_param, M_DEVBUF);
    343 		return;
    344 	}
    345 	printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
    346 
    347 	ISP_LOCK(isp);
    348 	isp_reset(isp);
    349 	if (isp->isp_state != ISP_RESETSTATE) {
    350 		ISP_UNLOCK(isp);
    351 		free(isp->isp_param, M_DEVBUF);
    352 		return;
    353 	}
    354 	isp_init(isp);
    355 	if (isp->isp_state != ISP_INITSTATE) {
    356 		isp_uninit(isp);
    357 		ISP_UNLOCK(isp);
    358 		free(isp->isp_param, M_DEVBUF);
    359 		return;
    360 	}
    361 
    362 
    363 
    364 	/*
    365 	 * Create the DMA maps for the data transfers.
    366 	 */
    367 	for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
    368 		if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
    369 		    (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
    370 		    &pcs->pci_xfer_dmap[i])) {
    371 			printf("%s: can't create dma maps\n",
    372 			    isp->isp_name);
    373 			isp_uninit(isp);
    374 			ISP_UNLOCK(isp);
    375 			return;
    376 		}
    377 	}
    378 	/*
    379 	 * Do Generic attach now.
    380 	 */
    381 	isp_attach(isp);
    382 	if (isp->isp_state != ISP_RUNSTATE) {
    383 		isp_uninit(isp);
    384 		free(isp->isp_param, M_DEVBUF);
    385 	}
    386 	ISP_UNLOCK(isp);
    387 }
    388 
    389 static u_int16_t
    390 isp_pci_rd_reg(isp, regoff)
    391 	struct ispsoftc *isp;
    392 	int regoff;
    393 {
    394 	u_int16_t rv;
    395 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    396 	int offset, oldconf = 0;
    397 
    398 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    399 		/*
    400 		 * We will assume that someone has paused the RISC processor.
    401 		 */
    402 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
    403 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
    404 	}
    405 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    406 	offset += (regoff & 0xff);
    407 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
    408 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    409 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
    410 	}
    411 	return (rv);
    412 }
    413 
    414 static void
    415 isp_pci_wr_reg(isp, regoff, val)
    416 	struct ispsoftc *isp;
    417 	int regoff;
    418 	u_int16_t val;
    419 {
    420 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    421 	int offset, oldconf = 0;
    422 
    423 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    424 		/*
    425 		 * We will assume that someone has paused the RISC processor.
    426 		 */
    427 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
    428 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
    429 	}
    430 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    431 	offset += (regoff & 0xff);
    432 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
    433 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    434 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
    435 	}
    436 }
    437 
    438 #ifndef	ISP_DISABLE_1080_SUPPORT
    439 static u_int16_t
    440 isp_pci_rd_reg_1080(isp, regoff)
    441 	struct ispsoftc *isp;
    442 	int regoff;
    443 {
    444 	u_int16_t rv;
    445 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    446 	int offset, oc = 0;
    447 
    448 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    449 		/*
    450 		 * We will assume that someone has paused the RISC processor.
    451 		 */
    452 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    453 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
    454 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
    455 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    456 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
    457 	}
    458 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    459 	offset += (regoff & 0xff);
    460 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
    461 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
    462 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
    463 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
    464 	}
    465 	return (rv);
    466 }
    467 
    468 static void
    469 isp_pci_wr_reg_1080(isp, regoff, val)
    470 	struct ispsoftc *isp;
    471 	int regoff;
    472 	u_int16_t val;
    473 {
    474 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
    475 	int offset, oc = 0;
    476 
    477 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
    478 		/*
    479 		 * We will assume that someone has paused the RISC processor.
    480 		 */
    481 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    482 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
    483 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
    484 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
    485 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
    486 	}
    487 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
    488 	offset += (regoff & 0xff);
    489 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
    490 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
    491 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
    492 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
    493 	}
    494 }
    495 #endif
    496 
    497 static int
    498 isp_pci_mbxdma(isp)
    499 	struct ispsoftc *isp;
    500 {
    501 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    502 	bus_dma_segment_t seg;
    503 	bus_size_t len;
    504 	fcparam *fcp;
    505 	int rseg;
    506 
    507 	/*
    508 	 * Allocate and map the request queue.
    509 	 */
    510 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
    511 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    512 	      BUS_DMA_NOWAIT) ||
    513 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    514 	      (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
    515 		return (1);
    516 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    517 	      &pci->pci_rquest_dmap) ||
    518 	    bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
    519 	      (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
    520 		return (1);
    521 
    522 	isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
    523 
    524 	/*
    525 	 * Allocate and map the result queue.
    526 	 */
    527 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
    528 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    529 	      BUS_DMA_NOWAIT) ||
    530 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    531 	      (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
    532 		return (1);
    533 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    534 	      &pci->pci_result_dmap) ||
    535 	    bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
    536 	      (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
    537 		return (1);
    538 	isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
    539 
    540 	if (isp->isp_type & ISP_HA_SCSI) {
    541 		return (0);
    542 	}
    543 
    544 	fcp = isp->isp_param;
    545 	len = ISP2100_SCRLEN;
    546 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
    547 		BUS_DMA_NOWAIT) ||
    548 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
    549 	      (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
    550 		return (1);
    551 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
    552 	      &pci->pci_scratch_dmap) ||
    553 	    bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
    554 	      (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
    555 		return (1);
    556 	fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
    557 	return (0);
    558 }
    559 
    560 static int
    561 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
    562 	struct ispsoftc *isp;
    563 	struct scsipi_xfer *xs;
    564 	ispreq_t *rq;
    565 	u_int8_t *iptrp;
    566 	u_int8_t optr;
    567 {
    568 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    569 	bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
    570 	ispcontreq_t *crq;
    571 	int segcnt, seg, error, ovseg, seglim, drq;
    572 
    573 	if (xs->datalen == 0) {
    574 		rq->req_seg_count = 1;
    575 		goto mbxsync;
    576 	}
    577 
    578 	if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
    579 		panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
    580 		    isp->isp_name, rq->req_handle);
    581 		/* NOTREACHED */
    582 	}
    583 
    584 	if (xs->flags & SCSI_DATA_IN) {
    585 		drq = REQFLAG_DATA_IN;
    586 	} else {
    587 		drq = REQFLAG_DATA_OUT;
    588 	}
    589 
    590 	if (isp->isp_type & ISP_HA_FC) {
    591 		seglim = ISP_RQDSEG_T2;
    592 		((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
    593 		((ispreqt2_t *)rq)->req_flags |= drq;
    594 	} else {
    595 		seglim = ISP_RQDSEG;
    596 		rq->req_flags |= drq;
    597 	}
    598 	error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
    599 	    NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
    600 	if (error) {
    601 		XS_SETERR(xs, HBA_BOTCH);
    602 		return (CMD_COMPLETE);
    603 	}
    604 
    605 	segcnt = dmap->dm_nsegs;
    606 
    607 	for (seg = 0, rq->req_seg_count = 0;
    608 	     seg < segcnt && rq->req_seg_count < seglim;
    609 	     seg++, rq->req_seg_count++) {
    610 		if (isp->isp_type & ISP_HA_FC) {
    611 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
    612 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
    613 			    dmap->dm_segs[seg].ds_len;
    614 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
    615 			    dmap->dm_segs[seg].ds_addr;
    616 		} else {
    617 			rq->req_dataseg[rq->req_seg_count].ds_count =
    618 			    dmap->dm_segs[seg].ds_len;
    619 			rq->req_dataseg[rq->req_seg_count].ds_base =
    620 			    dmap->dm_segs[seg].ds_addr;
    621 		}
    622 	}
    623 
    624 	if (seg == segcnt)
    625 		goto dmasync;
    626 
    627 	do {
    628 		crq = (ispcontreq_t *)
    629 			ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
    630 		*iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
    631 		if (*iptrp == optr) {
    632 			printf("%s: Request Queue Overflow++\n",
    633 			       isp->isp_name);
    634 			bus_dmamap_unload(pci->pci_dmat, dmap);
    635 			XS_SETERR(xs, HBA_BOTCH);
    636 			return (CMD_COMPLETE);
    637 		}
    638 		rq->req_header.rqs_entry_count++;
    639 		bzero((void *)crq, sizeof (*crq));
    640 		crq->req_header.rqs_entry_count = 1;
    641 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
    642 
    643 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
    644 		    rq->req_seg_count++, seg++, ovseg++) {
    645 			crq->req_dataseg[ovseg].ds_count =
    646 			    dmap->dm_segs[seg].ds_len;
    647 			crq->req_dataseg[ovseg].ds_base =
    648 			    dmap->dm_segs[seg].ds_addr;
    649 		}
    650 	} while (seg < segcnt);
    651 
    652 dmasync:
    653 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
    654 	    (xs->flags & SCSI_DATA_IN) ?  BUS_DMASYNC_PREREAD :
    655 	    BUS_DMASYNC_PREWRITE);
    656 
    657 mbxsync:
    658 
    659 	bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
    660 	    pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
    661 	return (CMD_QUEUED);
    662 }
    663 
    664 static int
    665 isp_pci_intr(arg)
    666 	void *arg;
    667 {
    668 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
    669 	bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
    670 	    pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
    671 	return (isp_intr(arg));
    672 }
    673 
    674 static void
    675 isp_pci_dmateardown(isp, xs, handle)
    676 	struct ispsoftc *isp;
    677 	struct scsipi_xfer *xs;
    678 	u_int32_t handle;
    679 {
    680 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    681 	bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
    682 
    683 	bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
    684 	    xs->flags & SCSI_DATA_IN ?
    685 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
    686 	bus_dmamap_unload(pci->pci_dmat, dmap);
    687 }
    688 
    689 static void
    690 isp_pci_reset1(isp)
    691 	struct ispsoftc *isp;
    692 {
    693 	/* Make sure the BIOS is disabled */
    694 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
    695 }
    696 
    697 static void
    698 isp_pci_dumpregs(isp)
    699 	struct ispsoftc *isp;
    700 {
    701 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
    702 	printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
    703 	    pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
    704 }
    705