isp_pci.c revision 1.30 1 /* $NetBSD: isp_pci.c,v 1.30 1998/09/17 23:10:20 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 *
5 *---------------------------------------
6 * Copyright (c) 1997, 1998 by Matthew Jacob
7 * NASA/Ames Research Center
8 * All rights reserved.
9 *---------------------------------------
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice immediately at the beginning of the file, without modification,
16 * this list of conditions, and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 */
36
37 #include <dev/ic/isp_netbsd.h>
38 #include <dev/microcode/isp/asm_pci.h>
39
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43
44 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
45 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 ispreq_t *, u_int8_t *, u_int8_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 u_int32_t));
51 static void isp_pci_reset1 __P((struct ispsoftc *));
52 static void isp_pci_dumpregs __P((struct ispsoftc *));
53 static int isp_pci_intr __P((void *));
54
55 static struct ispmdvec mdvec = {
56 isp_pci_rd_reg,
57 isp_pci_wr_reg,
58 isp_pci_mbxdma,
59 isp_pci_dmasetup,
60 isp_pci_dmateardown,
61 NULL,
62 isp_pci_reset1,
63 isp_pci_dumpregs,
64 ISP_RISC_CODE,
65 ISP_CODE_LENGTH,
66 ISP_CODE_ORG,
67 ISP_CODE_VERSION,
68 BIU_BURST_ENABLE, /* default to 8 byte burst */
69 0
70 };
71
72 static struct ispmdvec mdvec_2100 = {
73 isp_pci_rd_reg,
74 isp_pci_wr_reg,
75 isp_pci_mbxdma,
76 isp_pci_dmasetup,
77 isp_pci_dmateardown,
78 NULL,
79 isp_pci_reset1,
80 isp_pci_dumpregs,
81 ISP2100_RISC_CODE,
82 ISP2100_CODE_LENGTH,
83 ISP2100_CODE_ORG,
84 ISP2100_CODE_VERSION,
85 BIU_BURST_ENABLE, /* default to 8 byte burst */
86 0 /* Not relevant to the 2100 */
87 };
88
89 #define PCI_QLOGIC_ISP \
90 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
91
92 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
93 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
94 #endif
95 #define PCI_QLOGIC_ISP2100 \
96 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
97
98 #define IO_MAP_REG 0x10
99 #define MEM_MAP_REG 0x14
100
101
102 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
103 static void isp_pci_attach __P((struct device *, struct device *, void *));
104
105 struct isp_pcisoftc {
106 struct ispsoftc pci_isp;
107 pci_chipset_tag_t pci_pc;
108 pcitag_t pci_tag;
109 bus_space_tag_t pci_st;
110 bus_space_handle_t pci_sh;
111 bus_dma_tag_t pci_dmat;
112 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
113 bus_dmamap_t pci_rquest_dmap;
114 bus_dmamap_t pci_result_dmap;
115 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
116 void * pci_ih;
117 };
118
119 struct cfattach isp_pci_ca = {
120 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
121 };
122
123 static int
124 isp_pci_probe(parent, match, aux)
125 struct device *parent;
126 struct cfdata *match;
127 void *aux;
128 {
129 struct pci_attach_args *pa = aux;
130
131 if (pa->pa_id == PCI_QLOGIC_ISP ||
132 pa->pa_id == PCI_QLOGIC_ISP2100) {
133 return (1);
134 } else {
135 return (0);
136 }
137 }
138
139
140 static void
141 isp_pci_attach(parent, self, aux)
142 struct device *parent, *self;
143 void *aux;
144 {
145 #ifdef DEBUG
146 static char oneshot = 1;
147 #endif
148 struct pci_attach_args *pa = aux;
149 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
150 struct ispsoftc *isp = &pcs->pci_isp;
151 bus_space_tag_t st, iot, memt;
152 bus_space_handle_t sh, ioh, memh;
153 pci_intr_handle_t ih;
154 const char *intrstr;
155 int ioh_valid, memh_valid, i;
156 ISP_LOCKVAL_DECL;
157
158 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
159 PCI_MAPREG_TYPE_IO, 0,
160 &iot, &ioh, NULL, NULL) == 0);
161 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
162 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
163 &memt, &memh, NULL, NULL) == 0);
164
165 if (memh_valid) {
166 st = memt;
167 sh = memh;
168 } else if (ioh_valid) {
169 st = iot;
170 sh = ioh;
171 } else {
172 printf(": unable to map device registers\n");
173 return;
174 }
175 printf("\n");
176
177 pcs->pci_st = st;
178 pcs->pci_sh = sh;
179 pcs->pci_dmat = pa->pa_dmat;
180 pcs->pci_pc = pa->pa_pc;
181 pcs->pci_tag = pa->pa_tag;
182 if (pa->pa_id == PCI_QLOGIC_ISP) {
183 isp->isp_mdvec = &mdvec;
184 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
185 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
186 if (isp->isp_param == NULL) {
187 printf("%s: couldn't allocate sdparam table\n",
188 isp->isp_name);
189 return;
190 }
191 bzero(isp->isp_param, sizeof (sdparam));
192 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
193 u_int32_t data;
194 isp->isp_mdvec = &mdvec_2100;
195 if (ioh_valid == 0) {
196 printf("%s: warning, ISP2100 cannot use I/O Space"
197 " Mappings\n", isp->isp_name);
198 } else {
199 pcs->pci_st = iot;
200 pcs->pci_sh = ioh;
201 }
202
203 #if 0
204 printf("%s: PCIREGS cmd=%x bhlc=%x\n", isp->isp_name,
205 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG),
206 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG));
207 #endif
208 isp->isp_type = ISP_HA_FC_2100;
209 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
210 if (isp->isp_param == NULL) {
211 printf("%s: couldn't allocate fcparam table\n",
212 isp->isp_name);
213 return;
214 }
215 bzero(isp->isp_param, sizeof (fcparam));
216
217 data = pci_conf_read(pa->pa_pc, pa->pa_tag,
218 PCI_COMMAND_STATUS_REG);
219 data |= PCI_COMMAND_MASTER_ENABLE |
220 PCI_COMMAND_INVALIDATE_ENABLE;
221 pci_conf_write(pa->pa_pc, pa->pa_tag,
222 PCI_COMMAND_STATUS_REG, data);
223 /*
224 * Wierd- we need to clear the lsb in offset 0x30 to take the
225 * chip out of reset state.
226 */
227 data = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x30);
228 data &= ~1;
229 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x30, data);
230 #if 0
231 /*
232 * XXX: Need to get the actual revision number of the 2100 FB
233 */
234 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
235 data &= ~0xffff;
236 data |= 0xf801;
237 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
238 printf("%s: setting latency to %x and cache line size to %x\n",
239 isp->isp_name, (data >> 8) & 0xff,
240 data & 0xff);
241 #endif
242 } else {
243 return;
244 }
245 #ifdef DEBUG
246 if (oneshot) {
247 oneshot = 0;
248 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
249 "%d.%d Core Version %d.%d\n",
250 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
251 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
252 }
253 #endif
254 ISP_LOCK(isp);
255 isp_reset(isp);
256 if (isp->isp_state != ISP_RESETSTATE) {
257 ISP_UNLOCK(isp);
258 free(isp->isp_param, M_DEVBUF);
259 return;
260 }
261 isp_init(isp);
262 if (isp->isp_state != ISP_INITSTATE) {
263 isp_uninit(isp);
264 ISP_UNLOCK(isp);
265 free(isp->isp_param, M_DEVBUF);
266 return;
267 }
268
269 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
270 pa->pa_intrline, &ih)) {
271 printf("%s: couldn't map interrupt\n", isp->isp_name);
272 isp_uninit(isp);
273 ISP_UNLOCK(isp);
274 free(isp->isp_param, M_DEVBUF);
275 return;
276 }
277
278 intrstr = pci_intr_string(pa->pa_pc, ih);
279 if (intrstr == NULL)
280 intrstr = "<I dunno>";
281 pcs->pci_ih =
282 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
283 if (pcs->pci_ih == NULL) {
284 printf("%s: couldn't establish interrupt at %s\n",
285 isp->isp_name, intrstr);
286 isp_uninit(isp);
287 ISP_UNLOCK(isp);
288 free(isp->isp_param, M_DEVBUF);
289 return;
290 }
291 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
292
293 /*
294 * Create the DMA maps for the data transfers.
295 */
296 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
297 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
298 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
299 &pcs->pci_xfer_dmap[i])) {
300 printf("%s: can't create dma maps\n",
301 isp->isp_name);
302 isp_uninit(isp);
303 ISP_UNLOCK(isp);
304 return;
305 }
306 }
307 /*
308 * Do Generic attach now.
309 */
310 isp_attach(isp);
311 if (isp->isp_state != ISP_RUNSTATE) {
312 isp_uninit(isp);
313 free(isp->isp_param, M_DEVBUF);
314 }
315 ISP_UNLOCK(isp);
316 }
317
318 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
319
320 static u_int16_t
321 isp_pci_rd_reg(isp, regoff)
322 struct ispsoftc *isp;
323 int regoff;
324 {
325 u_int16_t rv;
326 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
327 int offset, oldsxp = 0;
328
329 if ((regoff & BIU_BLOCK) != 0) {
330 offset = PCI_BIU_REGS_OFF;
331 } else if ((regoff & MBOX_BLOCK) != 0) {
332 if (isp->isp_type & ISP_HA_SCSI)
333 offset = PCI_MBOX_REGS_OFF;
334 else
335 offset = PCI_MBOX_REGS2100_OFF;
336 } else if ((regoff & SXP_BLOCK) != 0) {
337 offset = PCI_SXP_REGS_OFF;
338 /*
339 * We will assume that someone has paused the RISC processor.
340 */
341 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
342 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
343 } else {
344 offset = PCI_RISC_REGS_OFF;
345 }
346 regoff &= 0xff;
347 offset += regoff;
348 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
349 if ((regoff & SXP_BLOCK) != 0) {
350 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
351 }
352 return (rv);
353 }
354
355 static void
356 isp_pci_wr_reg(isp, regoff, val)
357 struct ispsoftc *isp;
358 int regoff;
359 u_int16_t val;
360 {
361 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
362 int offset, oldsxp = 0;
363 if ((regoff & BIU_BLOCK) != 0) {
364 offset = PCI_BIU_REGS_OFF;
365 } else if ((regoff & MBOX_BLOCK) != 0) {
366 if (isp->isp_type & ISP_HA_SCSI)
367 offset = PCI_MBOX_REGS_OFF;
368 else
369 offset = PCI_MBOX_REGS2100_OFF;
370 } else if ((regoff & SXP_BLOCK) != 0) {
371 offset = PCI_SXP_REGS_OFF;
372 /*
373 * We will assume that someone has paused the RISC processor.
374 */
375 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
376 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
377 } else {
378 offset = PCI_RISC_REGS_OFF;
379 }
380 regoff &= 0xff;
381 offset += regoff;
382 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
383 if ((regoff & SXP_BLOCK) != 0) {
384 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
385 }
386 }
387
388 static int
389 isp_pci_mbxdma(isp)
390 struct ispsoftc *isp;
391 {
392 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
393 bus_dma_segment_t seg;
394 bus_size_t len;
395 fcparam *fcp;
396 int rseg;
397
398 /*
399 * Allocate and map the request queue.
400 */
401 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
402 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
403 BUS_DMA_NOWAIT) ||
404 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
405 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
406 return (1);
407 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
408 &pci->pci_rquest_dmap) ||
409 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
410 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
411 return (1);
412
413 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
414
415 /*
416 * Allocate and map the result queue.
417 */
418 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
419 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
420 BUS_DMA_NOWAIT) ||
421 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
422 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
423 return (1);
424 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
425 &pci->pci_result_dmap) ||
426 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
427 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
428 return (1);
429 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
430
431 if (isp->isp_type & ISP_HA_SCSI) {
432 return (0);
433 }
434
435 fcp = isp->isp_param;
436 len = ISP2100_SCRLEN;
437 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
438 BUS_DMA_NOWAIT) ||
439 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
440 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
441 return (1);
442 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
443 &pci->pci_scratch_dmap) ||
444 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
445 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
446 return (1);
447 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
448 return (0);
449 }
450
451 static int
452 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
453 struct ispsoftc *isp;
454 struct scsipi_xfer *xs;
455 ispreq_t *rq;
456 u_int8_t *iptrp;
457 u_int8_t optr;
458 {
459 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
460 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
461 ispcontreq_t *crq;
462 int segcnt, seg, error, ovseg, seglim, drq;
463
464 if (xs->datalen == 0) {
465 rq->req_seg_count = 1;
466 goto mbxsync;
467 }
468
469 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
470 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
471 isp->isp_name, rq->req_handle);
472 /* NOTREACHED */
473 }
474
475 if (xs->flags & SCSI_DATA_IN) {
476 drq = REQFLAG_DATA_IN;
477 } else {
478 drq = REQFLAG_DATA_OUT;
479 }
480
481 if (isp->isp_type & ISP_HA_FC) {
482 seglim = ISP_RQDSEG_T2;
483 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
484 ((ispreqt2_t *)rq)->req_flags |= drq;
485 } else {
486 seglim = ISP_RQDSEG;
487 rq->req_flags |= drq;
488 }
489 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
490 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
491 if (error) {
492 XS_SETERR(xs, HBA_BOTCH);
493 return (CMD_COMPLETE);
494 }
495
496 segcnt = dmap->dm_nsegs;
497
498 for (seg = 0, rq->req_seg_count = 0;
499 seg < segcnt && rq->req_seg_count < seglim;
500 seg++, rq->req_seg_count++) {
501 if (isp->isp_type & ISP_HA_FC) {
502 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
503 rq2->req_dataseg[rq2->req_seg_count].ds_count =
504 dmap->dm_segs[seg].ds_len;
505 rq2->req_dataseg[rq2->req_seg_count].ds_base =
506 dmap->dm_segs[seg].ds_addr;
507 } else {
508 rq->req_dataseg[rq->req_seg_count].ds_count =
509 dmap->dm_segs[seg].ds_len;
510 rq->req_dataseg[rq->req_seg_count].ds_base =
511 dmap->dm_segs[seg].ds_addr;
512 }
513 }
514
515 if (seg == segcnt)
516 goto dmasync;
517
518 do {
519 crq = (ispcontreq_t *)
520 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
521 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
522 if (*iptrp == optr) {
523 printf("%s: Request Queue Overflow++\n",
524 isp->isp_name);
525 bus_dmamap_unload(pci->pci_dmat, dmap);
526 XS_SETERR(xs, HBA_BOTCH);
527 return (CMD_COMPLETE);
528 }
529 rq->req_header.rqs_entry_count++;
530 bzero((void *)crq, sizeof (*crq));
531 crq->req_header.rqs_entry_count = 1;
532 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
533
534 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
535 rq->req_seg_count++, seg++, ovseg++) {
536 crq->req_dataseg[ovseg].ds_count =
537 dmap->dm_segs[seg].ds_len;
538 crq->req_dataseg[ovseg].ds_base =
539 dmap->dm_segs[seg].ds_addr;
540 }
541 } while (seg < segcnt);
542
543 dmasync:
544 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
545 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
546 BUS_DMASYNC_PREWRITE);
547
548 mbxsync:
549
550 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
551 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
552 return (CMD_QUEUED);
553 }
554
555 static int
556 isp_pci_intr(arg)
557 void *arg;
558 {
559 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
560 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
561 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
562 return (isp_intr(arg));
563 }
564
565 static void
566 isp_pci_dmateardown(isp, xs, handle)
567 struct ispsoftc *isp;
568 struct scsipi_xfer *xs;
569 u_int32_t handle;
570 {
571 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
572 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
573
574 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
575 xs->flags & SCSI_DATA_IN ?
576 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
577 bus_dmamap_unload(pci->pci_dmat, dmap);
578 }
579
580 static void
581 isp_pci_reset1(isp)
582 struct ispsoftc *isp;
583 {
584 /* Make sure the BIOS is disabled */
585 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
586 }
587
588 static void
589 isp_pci_dumpregs(isp)
590 struct ispsoftc *isp;
591 {
592 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
593 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
594 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
595 }
596