isp_pci.c revision 1.17 1 /* $NetBSD: isp_pci.c,v 1.17 1997/09/10 02:16:13 mjacob Exp $ */
2
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 *
6 * Copyright (c) 1997 by Matthew Jacob
7 * NASA AMES Research Center
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice immediately at the beginning of the file, without modification,
15 * this list of conditions, and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/device.h>
41 #include <machine/bus.h>
42 #include <machine/intr.h>
43 #include <dev/scsipi/scsi_all.h>
44 #include <dev/scsipi/scsipi_all.h>
45 #include <dev/scsipi/scsiconf.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcidevs.h>
49 #include <vm/vm.h>
50
51 #include <dev/ic/ispreg.h>
52 #include <dev/ic/ispvar.h>
53 #include <dev/ic/ispmbox.h>
54 #include <dev/microcode/isp/asm_pci.h>
55
56 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
57 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
58 static int isp_pci_mbxdma __P((struct ispsoftc *));
59 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
60 ispreq_t *, u_int8_t *, u_int8_t));
61 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
62 u_int32_t));
63
64 static void isp_pci_reset1 __P((struct ispsoftc *));
65 static void isp_pci_dumpregs __P((struct ispsoftc *));
66
67 static struct ispmdvec mdvec = {
68 isp_pci_rd_reg,
69 isp_pci_wr_reg,
70 isp_pci_mbxdma,
71 isp_pci_dmasetup,
72 isp_pci_dmateardown,
73 NULL,
74 isp_pci_reset1,
75 isp_pci_dumpregs,
76 ISP_RISC_CODE,
77 ISP_CODE_LENGTH,
78 ISP_CODE_ORG,
79 ISP_CODE_VERSION,
80 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
81 60 /* MAGIC- all known PCI card implementations are 60MHz */
82 };
83
84 static struct ispmdvec mdvec_2100 = {
85 isp_pci_rd_reg,
86 isp_pci_wr_reg,
87 isp_pci_mbxdma,
88 isp_pci_dmasetup,
89 isp_pci_dmateardown,
90 NULL,
91 isp_pci_reset1,
92 isp_pci_dumpregs,
93 ISP2100_RISC_CODE,
94 ISP2100_CODE_LENGTH,
95 ISP2100_CODE_ORG,
96 ISP2100_CODE_VERSION,
97 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
98 60 /* MAGIC- all known PCI card implementations are 60MHz */
99 };
100
101 #define PCI_QLOGIC_ISP \
102 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
103
104 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
105 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
106 #endif
107 #define PCI_QLOGIC_ISP2100 \
108 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
109
110 #define IO_MAP_REG 0x10
111 #define MEM_MAP_REG 0x14
112
113
114 #ifdef __BROKEN_INDIRECT_CONFIG
115 static int isp_pci_probe __P((struct device *, void *, void *));
116 #else
117 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
118 #endif
119 static void isp_pci_attach __P((struct device *, struct device *, void *));
120
121 struct isp_pcisoftc {
122 struct ispsoftc pci_isp;
123 pci_chipset_tag_t pci_pc;
124 pcitag_t pci_tag;
125 bus_space_tag_t pci_st;
126 bus_space_handle_t pci_sh;
127 bus_dma_tag_t pci_dmat;
128 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
129 bus_dmamap_t pci_rquest_dmap;
130 bus_dmamap_t pci_result_dmap;
131 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
132 void * pci_ih;
133 };
134
135 struct cfattach isp_pci_ca = {
136 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
137 };
138
139 static int
140 isp_pci_probe(parent, match, aux)
141 struct device *parent;
142 #ifdef __BROKEN_INDIRECT_CONFIG
143 void *match, *aux;
144 #else
145 struct cfdata *match;
146 void *aux;
147 #endif
148 {
149 struct pci_attach_args *pa = aux;
150
151 if (pa->pa_id == PCI_QLOGIC_ISP ||
152 pa->pa_id == PCI_QLOGIC_ISP2100) {
153 return (1);
154 } else {
155 return (0);
156 }
157 }
158
159
160 static void
161 isp_pci_attach(parent, self, aux)
162 struct device *parent, *self;
163 void *aux;
164 {
165 struct pci_attach_args *pa = aux;
166 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
167 bus_space_tag_t st, iot, memt;
168 bus_space_handle_t sh, ioh, memh;
169 pci_intr_handle_t ih;
170 const char *intrstr;
171 int ioh_valid, memh_valid, i;
172
173 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
174 PCI_MAPREG_TYPE_IO, 0,
175 &iot, &ioh, NULL, NULL) == 0);
176 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
177 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
178 &memt, &memh, NULL, NULL) == 0);
179
180 if (memh_valid) {
181 st = memt;
182 sh = memh;
183 } else if (ioh_valid) {
184 st = iot;
185 sh = ioh;
186 } else {
187 printf(": unable to map device registers\n");
188 return;
189 }
190 printf("\n");
191
192 pcs->pci_st = st;
193 pcs->pci_sh = sh;
194 pcs->pci_dmat = pa->pa_dmat;
195 pcs->pci_pc = pa->pa_pc;
196 pcs->pci_tag = pa->pa_tag;
197 if (pa->pa_id == PCI_QLOGIC_ISP) {
198 pcs->pci_isp.isp_mdvec = &mdvec;
199 pcs->pci_isp.isp_type = ISP_HA_SCSI_UNKNOWN;
200 pcs->pci_isp.isp_param =
201 malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
202 if (pcs->pci_isp.isp_param == NULL) {
203 printf("%s: couldn't allocate sdparam table\n",
204 pcs->pci_isp.isp_name);
205 }
206 bzero(pcs->pci_isp.isp_param, sizeof (sdparam));
207 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
208 u_int32_t data;
209 pcs->pci_isp.isp_mdvec = &mdvec_2100;
210 if (ioh_valid == 0) {
211 printf("%s: warning, ISP2100 cannot use I/O Space"
212 " Mappings\n", pcs->pci_isp.isp_name);
213 } else {
214 pcs->pci_st = iot;
215 pcs->pci_sh = ioh;
216 }
217
218 #if 0
219 printf("%s: PCIREGS cmd=%x bhlc=%x\n", pcs->pci_isp.isp_name,
220 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG),
221 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG));
222 #endif
223 pcs->pci_isp.isp_type = ISP_HA_FC_2100;
224 pcs->pci_isp.isp_param =
225 malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
226 if (pcs->pci_isp.isp_param == NULL) {
227 printf("%s: couldn't allocate fcparam table\n",
228 pcs->pci_isp.isp_name);
229 }
230 bzero(pcs->pci_isp.isp_param, sizeof (fcparam));
231
232 data = pci_conf_read(pa->pa_pc, pa->pa_tag,
233 PCI_COMMAND_STATUS_REG);
234 data |= PCI_COMMAND_MASTER_ENABLE |
235 PCI_COMMAND_INVALIDATE_ENABLE;
236 pci_conf_write(pa->pa_pc, pa->pa_tag,
237 PCI_COMMAND_STATUS_REG, data);
238 /*
239 * Wierd- we need to clear the lsb in offset 0x30 to take the
240 * chip out of reset state.
241 */
242 data = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x30);
243 data &= ~1;
244 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x30, data);
245 #if 0
246 /*
247 * XXX: Need to get the actual revision number of the 2100 FB
248 */
249 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
250 data &= ~0xffff;
251 data |= 0xf801;
252 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
253 printf("%s: setting latency to %x and cache line size to %x\n",
254 pcs->pci_isp.isp_name, (data >> 8) & 0xff,
255 data & 0xff);
256 #endif
257 } else {
258 return;
259 }
260 isp_reset(&pcs->pci_isp);
261 if (pcs->pci_isp.isp_state != ISP_RESETSTATE) {
262 free(pcs->pci_isp.isp_param, M_DEVBUF);
263 return;
264 }
265 isp_init(&pcs->pci_isp);
266 if (pcs->pci_isp.isp_state != ISP_INITSTATE) {
267 isp_uninit(&pcs->pci_isp);
268 free(pcs->pci_isp.isp_param, M_DEVBUF);
269 return;
270 }
271
272 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
273 pa->pa_intrline, &ih)) {
274 printf("%s: couldn't map interrupt\n", pcs->pci_isp.isp_name);
275 isp_uninit(&pcs->pci_isp);
276 free(pcs->pci_isp.isp_param, M_DEVBUF);
277 return;
278 }
279
280 intrstr = pci_intr_string(pa->pa_pc, ih);
281 if (intrstr == NULL)
282 intrstr = "<I dunno>";
283 pcs->pci_ih =
284 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_intr, &pcs->pci_isp);
285 if (pcs->pci_ih == NULL) {
286 printf("%s: couldn't establish interrupt at %s\n",
287 pcs->pci_isp.isp_name, intrstr);
288 isp_uninit(&pcs->pci_isp);
289 free(pcs->pci_isp.isp_param, M_DEVBUF);
290 return;
291 }
292 printf("%s: interrupting at %s\n", pcs->pci_isp.isp_name, intrstr);
293
294 /*
295 * Create the DMA maps for the data transfers.
296 */
297 for (i = 0; i < RQUEST_QUEUE_LEN(&pcs->pci_isp); i++) {
298 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
299 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
300 &pcs->pci_xfer_dmap[i])) {
301 printf("%s: can't create dma maps\n",
302 pcs->pci_isp.isp_name);
303 isp_uninit(&pcs->pci_isp);
304 return;
305 }
306 }
307 /*
308 * Do Generic attach now.
309 */
310 isp_attach(&pcs->pci_isp);
311 if (pcs->pci_isp.isp_state != ISP_RUNSTATE) {
312 isp_uninit(&pcs->pci_isp);
313 free(pcs->pci_isp.isp_param, M_DEVBUF);
314 }
315 }
316
317 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
318
319 static u_int16_t
320 isp_pci_rd_reg(isp, regoff)
321 struct ispsoftc *isp;
322 int regoff;
323 {
324 u_int16_t rv;
325 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
326 int offset, oldsxp = 0;
327
328 if ((regoff & BIU_BLOCK) != 0) {
329 offset = PCI_BIU_REGS_OFF;
330 } else if ((regoff & MBOX_BLOCK) != 0) {
331 if (isp->isp_type & ISP_HA_SCSI)
332 offset = PCI_MBOX_REGS_OFF;
333 else
334 offset = PCI_MBOX_REGS2100_OFF;
335 } else if ((regoff & SXP_BLOCK) != 0) {
336 offset = PCI_SXP_REGS_OFF;
337 /*
338 * We will assume that someone has paused the RISC processor.
339 */
340 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
341 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
342 } else {
343 offset = PCI_RISC_REGS_OFF;
344 }
345 regoff &= 0xff;
346 offset += regoff;
347 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
348 if ((regoff & SXP_BLOCK) != 0) {
349 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
350 }
351 return (rv);
352 }
353
354 static void
355 isp_pci_wr_reg(isp, regoff, val)
356 struct ispsoftc *isp;
357 int regoff;
358 u_int16_t val;
359 {
360 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
361 int offset, oldsxp = 0;
362 if ((regoff & BIU_BLOCK) != 0) {
363 offset = PCI_BIU_REGS_OFF;
364 } else if ((regoff & MBOX_BLOCK) != 0) {
365 if (isp->isp_type & ISP_HA_SCSI)
366 offset = PCI_MBOX_REGS_OFF;
367 else
368 offset = PCI_MBOX_REGS2100_OFF;
369 } else if ((regoff & SXP_BLOCK) != 0) {
370 offset = PCI_SXP_REGS_OFF;
371 /*
372 * We will assume that someone has paused the RISC processor.
373 */
374 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
375 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
376 } else {
377 offset = PCI_RISC_REGS_OFF;
378 }
379 regoff &= 0xff;
380 offset += regoff;
381 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
382 if ((regoff & SXP_BLOCK) != 0) {
383 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
384 }
385 }
386
387 static int
388 isp_pci_mbxdma(isp)
389 struct ispsoftc *isp;
390 {
391 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
392 bus_dma_segment_t seg;
393 bus_size_t len;
394 fcparam *fcp;
395 int rseg;
396
397 /*
398 * Allocate and map the request queue.
399 */
400 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
401 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
402 BUS_DMA_NOWAIT) ||
403 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
404 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
405 return (1);
406 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
407 &pci->pci_rquest_dmap) ||
408 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
409 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
410 return (1);
411
412 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
413
414 /*
415 * Allocate and map the result queue.
416 */
417 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
418 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
419 BUS_DMA_NOWAIT) ||
420 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
421 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
422 return (1);
423 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
424 &pci->pci_result_dmap) ||
425 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
426 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
427 return (1);
428 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
429
430 if (isp->isp_type & ISP_HA_SCSI) {
431 return (0);
432 }
433
434 fcp = isp->isp_param;
435 len = ISP2100_SCRLEN;
436 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
437 BUS_DMA_NOWAIT) ||
438 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
439 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
440 return (1);
441 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
442 &pci->pci_scratch_dmap) ||
443 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
444 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
445 return (1);
446 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
447 return (0);
448 }
449
450 static int
451 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
452 struct ispsoftc *isp;
453 struct scsipi_xfer *xs;
454 ispreq_t *rq;
455 u_int8_t *iptrp;
456 u_int8_t optr;
457 {
458 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
459 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
460 ispcontreq_t *crq;
461 int segcnt, seg, error, ovseg, seglim, drq;
462
463 if (xs->datalen == 0) {
464 rq->req_seg_count = 1;
465 return (0);
466 }
467
468 if (rq->req_handle > RQUEST_QUEUE_LEN(isp) || rq->req_handle < 1) {
469 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
470 isp->isp_name, rq->req_handle);
471 /* NOTREACHED */
472 }
473
474 if (xs->flags & SCSI_DATA_IN) {
475 drq = REQFLAG_DATA_IN;
476 } else {
477 drq = REQFLAG_DATA_OUT;
478 }
479
480 if (isp->isp_type & ISP_HA_FC) {
481 seglim = ISP_RQDSEG_T2;
482 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
483 ((ispreqt2_t *)rq)->req_flags |= drq;
484 } else {
485 seglim = ISP_RQDSEG;
486 rq->req_flags |= drq;
487 }
488 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
489 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
490 if (error)
491 return (error);
492
493 segcnt = dmap->dm_nsegs;
494
495 for (seg = 0, rq->req_seg_count = 0;
496 seg < segcnt && rq->req_seg_count < seglim;
497 seg++, rq->req_seg_count++) {
498 if (isp->isp_type & ISP_HA_FC) {
499 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
500 rq2->req_dataseg[rq2->req_seg_count].ds_count =
501 dmap->dm_segs[seg].ds_len;
502 rq2->req_dataseg[rq2->req_seg_count].ds_base =
503 dmap->dm_segs[seg].ds_addr;
504 } else {
505 rq->req_dataseg[rq->req_seg_count].ds_count =
506 dmap->dm_segs[seg].ds_len;
507 rq->req_dataseg[rq->req_seg_count].ds_base =
508 dmap->dm_segs[seg].ds_addr;
509 }
510 }
511
512 if (seg == segcnt)
513 goto mapsync;
514
515 do {
516 crq = (ispcontreq_t *)
517 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
518 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN(isp) - 1);
519 if (*iptrp == optr) {
520 printf("%s: Request Queue Overflow++\n",
521 isp->isp_name);
522 bus_dmamap_unload(pci->pci_dmat, dmap);
523 return (EFBIG);
524 }
525 rq->req_header.rqs_entry_count++;
526 bzero((void *)crq, sizeof (*crq));
527 crq->req_header.rqs_entry_count = 1;
528 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
529
530 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
531 rq->req_seg_count++, seg++, ovseg++) {
532 crq->req_dataseg[ovseg].ds_count =
533 dmap->dm_segs[seg].ds_len;
534 crq->req_dataseg[ovseg].ds_base =
535 dmap->dm_segs[seg].ds_addr;
536 }
537 } while (seg < segcnt);
538
539 mapsync:
540 bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
541 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
542 return (0);
543 }
544
545 static void
546 isp_pci_dmateardown(isp, xs, handle)
547 struct ispsoftc *isp;
548 struct scsipi_xfer *xs;
549 u_int32_t handle;
550 {
551 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
552 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
553
554 bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
555 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
556 bus_dmamap_unload(pci->pci_dmat, dmap);
557 }
558
559 static void
560 isp_pci_reset1(isp)
561 struct ispsoftc *isp;
562 {
563 /* Make sure the BIOS is disabled */
564 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
565 }
566
567 static void
568 isp_pci_dumpregs(isp)
569 struct ispsoftc *isp;
570 {
571 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
572 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
573 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
574 }
575