isp_pci.c revision 1.27 1 /* $NetBSD: isp_pci.c,v 1.27 1998/07/31 02:14:40 thorpej Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 *
5 *---------------------------------------
6 * Copyright (c) 1997, 1998 by Matthew Jacob
7 * NASA/Ames Research Center
8 * All rights reserved.
9 *---------------------------------------
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice immediately at the beginning of the file, without modification,
16 * this list of conditions, and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 */
36
37 #include <dev/ic/isp_netbsd.h>
38 #include <dev/microcode/isp/asm_pci.h>
39
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43
44 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
45 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 ispreq_t *, u_int8_t *, u_int8_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 u_int32_t));
51
52 static void isp_pci_reset1 __P((struct ispsoftc *));
53 static void isp_pci_dumpregs __P((struct ispsoftc *));
54 static int isp_pci_intr __P((void *));
55
56 static struct ispmdvec mdvec = {
57 isp_pci_rd_reg,
58 isp_pci_wr_reg,
59 isp_pci_mbxdma,
60 isp_pci_dmasetup,
61 isp_pci_dmateardown,
62 NULL,
63 isp_pci_reset1,
64 isp_pci_dumpregs,
65 ISP_RISC_CODE,
66 ISP_CODE_LENGTH,
67 ISP_CODE_ORG,
68 ISP_CODE_VERSION,
69 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
70 60 /* MAGIC- all known PCI card implementations are 60MHz */
71 };
72
73 static struct ispmdvec mdvec_2100 = {
74 isp_pci_rd_reg,
75 isp_pci_wr_reg,
76 isp_pci_mbxdma,
77 isp_pci_dmasetup,
78 isp_pci_dmateardown,
79 NULL,
80 isp_pci_reset1,
81 isp_pci_dumpregs,
82 ISP2100_RISC_CODE,
83 ISP2100_CODE_LENGTH,
84 ISP2100_CODE_ORG,
85 ISP2100_CODE_VERSION,
86 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
87 60 /* MAGIC- all known PCI card implementations are 60MHz */
88 };
89
90 #define PCI_QLOGIC_ISP \
91 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
92
93 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
94 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
95 #endif
96 #define PCI_QLOGIC_ISP2100 \
97 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
98
99 #define IO_MAP_REG 0x10
100 #define MEM_MAP_REG 0x14
101
102
103 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
104 static void isp_pci_attach __P((struct device *, struct device *, void *));
105
106 struct isp_pcisoftc {
107 struct ispsoftc pci_isp;
108 pci_chipset_tag_t pci_pc;
109 pcitag_t pci_tag;
110 bus_space_tag_t pci_st;
111 bus_space_handle_t pci_sh;
112 bus_dma_tag_t pci_dmat;
113 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
114 bus_dmamap_t pci_rquest_dmap;
115 bus_dmamap_t pci_result_dmap;
116 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
117 void * pci_ih;
118 };
119
120 struct cfattach isp_pci_ca = {
121 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
122 };
123
124 static int
125 isp_pci_probe(parent, match, aux)
126 struct device *parent;
127 struct cfdata *match;
128 void *aux;
129 {
130 struct pci_attach_args *pa = aux;
131
132 if (pa->pa_id == PCI_QLOGIC_ISP ||
133 pa->pa_id == PCI_QLOGIC_ISP2100) {
134 return (1);
135 } else {
136 return (0);
137 }
138 }
139
140
141 static void
142 isp_pci_attach(parent, self, aux)
143 struct device *parent, *self;
144 void *aux;
145 {
146 #ifdef DEBUG
147 static char oneshot = 1;
148 #endif
149 struct pci_attach_args *pa = aux;
150 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
151 struct ispsoftc *isp = &pcs->pci_isp;
152 bus_space_tag_t st, iot, memt;
153 bus_space_handle_t sh, ioh, memh;
154 pci_intr_handle_t ih;
155 const char *intrstr;
156 int ioh_valid, memh_valid, i;
157 ISP_LOCKVAL_DECL;
158
159 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
160 PCI_MAPREG_TYPE_IO, 0,
161 &iot, &ioh, NULL, NULL) == 0);
162 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
163 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
164 &memt, &memh, NULL, NULL) == 0);
165
166 if (memh_valid) {
167 st = memt;
168 sh = memh;
169 } else if (ioh_valid) {
170 st = iot;
171 sh = ioh;
172 } else {
173 printf(": unable to map device registers\n");
174 return;
175 }
176 printf("\n");
177
178 pcs->pci_st = st;
179 pcs->pci_sh = sh;
180 pcs->pci_dmat = pa->pa_dmat;
181 pcs->pci_pc = pa->pa_pc;
182 pcs->pci_tag = pa->pa_tag;
183 if (pa->pa_id == PCI_QLOGIC_ISP) {
184 isp->isp_mdvec = &mdvec;
185 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
186 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
187 if (isp->isp_param == NULL) {
188 printf("%s: couldn't allocate sdparam table\n",
189 isp->isp_name);
190 return;
191 }
192 bzero(isp->isp_param, sizeof (sdparam));
193 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
194 u_int32_t data;
195 isp->isp_mdvec = &mdvec_2100;
196 if (ioh_valid == 0) {
197 printf("%s: warning, ISP2100 cannot use I/O Space"
198 " Mappings\n", isp->isp_name);
199 } else {
200 pcs->pci_st = iot;
201 pcs->pci_sh = ioh;
202 }
203
204 #if 0
205 printf("%s: PCIREGS cmd=%x bhlc=%x\n", isp->isp_name,
206 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG),
207 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG));
208 #endif
209 isp->isp_type = ISP_HA_FC_2100;
210 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
211 if (isp->isp_param == NULL) {
212 printf("%s: couldn't allocate fcparam table\n",
213 isp->isp_name);
214 return;
215 }
216 bzero(isp->isp_param, sizeof (fcparam));
217
218 data = pci_conf_read(pa->pa_pc, pa->pa_tag,
219 PCI_COMMAND_STATUS_REG);
220 data |= PCI_COMMAND_MASTER_ENABLE |
221 PCI_COMMAND_INVALIDATE_ENABLE;
222 pci_conf_write(pa->pa_pc, pa->pa_tag,
223 PCI_COMMAND_STATUS_REG, data);
224 /*
225 * Wierd- we need to clear the lsb in offset 0x30 to take the
226 * chip out of reset state.
227 */
228 data = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x30);
229 data &= ~1;
230 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x30, data);
231 #if 0
232 /*
233 * XXX: Need to get the actual revision number of the 2100 FB
234 */
235 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
236 data &= ~0xffff;
237 data |= 0xf801;
238 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
239 printf("%s: setting latency to %x and cache line size to %x\n",
240 isp->isp_name, (data >> 8) & 0xff,
241 data & 0xff);
242 #endif
243 } else {
244 return;
245 }
246
247 #ifdef DEBUG
248 if (oneshot) {
249 oneshot = 0;
250 printf("***Qlogic ISP Driver, NetBSD (pci) Platform Version "
251 "%d.%d Core Version %d.%d\n",
252 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
253 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
254 }
255 #endif
256
257 ISP_LOCK(isp);
258 isp_reset(isp);
259 if (isp->isp_state != ISP_RESETSTATE) {
260 ISP_UNLOCK(isp);
261 free(isp->isp_param, M_DEVBUF);
262 return;
263 }
264 isp_init(isp);
265 if (isp->isp_state != ISP_INITSTATE) {
266 isp_uninit(isp);
267 ISP_UNLOCK(isp);
268 free(isp->isp_param, M_DEVBUF);
269 return;
270 }
271
272 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
273 pa->pa_intrline, &ih)) {
274 printf("%s: couldn't map interrupt\n", isp->isp_name);
275 isp_uninit(isp);
276 ISP_UNLOCK(isp);
277 free(isp->isp_param, M_DEVBUF);
278 return;
279 }
280
281 intrstr = pci_intr_string(pa->pa_pc, ih);
282 if (intrstr == NULL)
283 intrstr = "<I dunno>";
284 pcs->pci_ih =
285 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
286 if (pcs->pci_ih == NULL) {
287 printf("%s: couldn't establish interrupt at %s\n",
288 isp->isp_name, intrstr);
289 isp_uninit(isp);
290 ISP_UNLOCK(isp);
291 free(isp->isp_param, M_DEVBUF);
292 return;
293 }
294 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
295
296 /*
297 * Create the DMA maps for the data transfers.
298 */
299 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
300 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
301 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
302 &pcs->pci_xfer_dmap[i])) {
303 printf("%s: can't create dma maps\n",
304 isp->isp_name);
305 isp_uninit(isp);
306 ISP_UNLOCK(isp);
307 return;
308 }
309 }
310 /*
311 * Do Generic attach now.
312 */
313 isp_attach(isp);
314 if (isp->isp_state != ISP_RUNSTATE) {
315 isp_uninit(isp);
316 free(isp->isp_param, M_DEVBUF);
317 }
318 ISP_UNLOCK(isp);
319 }
320
321 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
322
323 static u_int16_t
324 isp_pci_rd_reg(isp, regoff)
325 struct ispsoftc *isp;
326 int regoff;
327 {
328 u_int16_t rv;
329 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
330 int offset, oldsxp = 0;
331
332 if ((regoff & BIU_BLOCK) != 0) {
333 offset = PCI_BIU_REGS_OFF;
334 } else if ((regoff & MBOX_BLOCK) != 0) {
335 if (isp->isp_type & ISP_HA_SCSI)
336 offset = PCI_MBOX_REGS_OFF;
337 else
338 offset = PCI_MBOX_REGS2100_OFF;
339 } else if ((regoff & SXP_BLOCK) != 0) {
340 offset = PCI_SXP_REGS_OFF;
341 /*
342 * We will assume that someone has paused the RISC processor.
343 */
344 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
345 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
346 } else {
347 offset = PCI_RISC_REGS_OFF;
348 }
349 regoff &= 0xff;
350 offset += regoff;
351 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
352 if ((regoff & SXP_BLOCK) != 0) {
353 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
354 }
355 return (rv);
356 }
357
358 static void
359 isp_pci_wr_reg(isp, regoff, val)
360 struct ispsoftc *isp;
361 int regoff;
362 u_int16_t val;
363 {
364 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
365 int offset, oldsxp = 0;
366 if ((regoff & BIU_BLOCK) != 0) {
367 offset = PCI_BIU_REGS_OFF;
368 } else if ((regoff & MBOX_BLOCK) != 0) {
369 if (isp->isp_type & ISP_HA_SCSI)
370 offset = PCI_MBOX_REGS_OFF;
371 else
372 offset = PCI_MBOX_REGS2100_OFF;
373 } else if ((regoff & SXP_BLOCK) != 0) {
374 offset = PCI_SXP_REGS_OFF;
375 /*
376 * We will assume that someone has paused the RISC processor.
377 */
378 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
379 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
380 } else {
381 offset = PCI_RISC_REGS_OFF;
382 }
383 regoff &= 0xff;
384 offset += regoff;
385 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
386 if ((regoff & SXP_BLOCK) != 0) {
387 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
388 }
389 }
390
391 static int
392 isp_pci_mbxdma(isp)
393 struct ispsoftc *isp;
394 {
395 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
396 bus_dma_segment_t seg;
397 bus_size_t len;
398 fcparam *fcp;
399 int rseg;
400
401 /*
402 * Allocate and map the request queue.
403 */
404 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
405 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
406 BUS_DMA_NOWAIT) ||
407 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
408 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
409 return (1);
410 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
411 &pci->pci_rquest_dmap) ||
412 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
413 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
414 return (1);
415
416 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
417
418 /*
419 * Allocate and map the result queue.
420 */
421 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
422 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
423 BUS_DMA_NOWAIT) ||
424 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
425 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
426 return (1);
427 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
428 &pci->pci_result_dmap) ||
429 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
430 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
431 return (1);
432 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
433
434 if (isp->isp_type & ISP_HA_SCSI) {
435 return (0);
436 }
437
438 fcp = isp->isp_param;
439 len = ISP2100_SCRLEN;
440 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
441 BUS_DMA_NOWAIT) ||
442 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
443 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
444 return (1);
445 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
446 &pci->pci_scratch_dmap) ||
447 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
448 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
449 return (1);
450 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
451 return (0);
452 }
453
454 static int
455 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
456 struct ispsoftc *isp;
457 struct scsipi_xfer *xs;
458 ispreq_t *rq;
459 u_int8_t *iptrp;
460 u_int8_t optr;
461 {
462 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
463 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
464 ispcontreq_t *crq;
465 int segcnt, seg, error, ovseg, seglim, drq;
466
467 if (xs->datalen == 0) {
468 rq->req_seg_count = 1;
469 goto mbxsync;
470 }
471
472 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
473 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
474 isp->isp_name, rq->req_handle);
475 /* NOTREACHED */
476 }
477
478 if (xs->flags & SCSI_DATA_IN) {
479 drq = REQFLAG_DATA_IN;
480 } else {
481 drq = REQFLAG_DATA_OUT;
482 }
483
484 if (isp->isp_type & ISP_HA_FC) {
485 seglim = ISP_RQDSEG_T2;
486 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
487 ((ispreqt2_t *)rq)->req_flags |= drq;
488 } else {
489 seglim = ISP_RQDSEG;
490 rq->req_flags |= drq;
491 }
492 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
493 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
494 if (error) {
495 XS_SETERR(xs, HBA_BOTCH);
496 return (error);
497 }
498
499 segcnt = dmap->dm_nsegs;
500
501 for (seg = 0, rq->req_seg_count = 0;
502 seg < segcnt && rq->req_seg_count < seglim;
503 seg++, rq->req_seg_count++) {
504 if (isp->isp_type & ISP_HA_FC) {
505 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
506 rq2->req_dataseg[rq2->req_seg_count].ds_count =
507 dmap->dm_segs[seg].ds_len;
508 rq2->req_dataseg[rq2->req_seg_count].ds_base =
509 dmap->dm_segs[seg].ds_addr;
510 } else {
511 rq->req_dataseg[rq->req_seg_count].ds_count =
512 dmap->dm_segs[seg].ds_len;
513 rq->req_dataseg[rq->req_seg_count].ds_base =
514 dmap->dm_segs[seg].ds_addr;
515 }
516 }
517
518 if (seg == segcnt)
519 goto dmasync;
520
521 do {
522 crq = (ispcontreq_t *)
523 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
524 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
525 if (*iptrp == optr) {
526 printf("%s: Request Queue Overflow++\n",
527 isp->isp_name);
528 bus_dmamap_unload(pci->pci_dmat, dmap);
529 XS_SETERR(xs, HBA_BOTCH);
530 return (EFBIG);
531 }
532 rq->req_header.rqs_entry_count++;
533 bzero((void *)crq, sizeof (*crq));
534 crq->req_header.rqs_entry_count = 1;
535 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
536
537 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
538 rq->req_seg_count++, seg++, ovseg++) {
539 crq->req_dataseg[ovseg].ds_count =
540 dmap->dm_segs[seg].ds_len;
541 crq->req_dataseg[ovseg].ds_base =
542 dmap->dm_segs[seg].ds_addr;
543 }
544 } while (seg < segcnt);
545
546 dmasync:
547 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
548 xs->flags & SCSI_DATA_IN ?
549 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
550
551 mbxsync:
552
553 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
554 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
555 return (0);
556 }
557
558 static int
559 isp_pci_intr(arg)
560 void *arg;
561 {
562 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
563 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
564 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
565 return (isp_intr(arg));
566 }
567
568 static void
569 isp_pci_dmateardown(isp, xs, handle)
570 struct ispsoftc *isp;
571 struct scsipi_xfer *xs;
572 u_int32_t handle;
573 {
574 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
575 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
576
577 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
578 xs->flags & SCSI_DATA_IN ?
579 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
580 bus_dmamap_unload(pci->pci_dmat, dmap);
581 }
582
583 static void
584 isp_pci_reset1(isp)
585 struct ispsoftc *isp;
586 {
587 /* Make sure the BIOS is disabled */
588 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
589 }
590
591 static void
592 isp_pci_dumpregs(isp)
593 struct ispsoftc *isp;
594 {
595 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
596 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
597 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
598 }
599