isp_pci.c revision 1.35 1 /* $NetBSD: isp_pci.c,v 1.35 1999/02/09 00:35:35 mjacob Exp $ */
2 /* release_02_05_99 */
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 *
6 *---------------------------------------
7 * Copyright (c) 1997, 1998 by Matthew Jacob
8 * NASA/Ames Research Center
9 * All rights reserved.
10 *---------------------------------------
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice immediately at the beginning of the file, without modification,
17 * this list of conditions, and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/microcode/isp/asm_pci.h>
40
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44
45 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
46 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
47 static int isp_pci_mbxdma __P((struct ispsoftc *));
48 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
49 ispreq_t *, u_int8_t *, u_int8_t));
50 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
51 u_int32_t));
52 static void isp_pci_reset1 __P((struct ispsoftc *));
53 static void isp_pci_dumpregs __P((struct ispsoftc *));
54 static int isp_pci_intr __P((void *));
55
56 static struct ispmdvec mdvec = {
57 isp_pci_rd_reg,
58 isp_pci_wr_reg,
59 isp_pci_mbxdma,
60 isp_pci_dmasetup,
61 isp_pci_dmateardown,
62 NULL,
63 isp_pci_reset1,
64 isp_pci_dumpregs,
65 ISP_RISC_CODE,
66 ISP_CODE_LENGTH,
67 ISP_CODE_ORG,
68 ISP_CODE_VERSION,
69 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
70 0
71 };
72
73 static struct ispmdvec mdvec_2100 = {
74 isp_pci_rd_reg,
75 isp_pci_wr_reg,
76 isp_pci_mbxdma,
77 isp_pci_dmasetup,
78 isp_pci_dmateardown,
79 NULL,
80 isp_pci_reset1,
81 isp_pci_dumpregs,
82 ISP2100_RISC_CODE,
83 ISP2100_CODE_LENGTH,
84 ISP2100_CODE_ORG,
85 ISP2100_CODE_VERSION,
86 0, /* Irrelevant to the 2100 */
87 0
88 };
89
90 #define PCI_QLOGIC_ISP \
91 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
92
93 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
94 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
95 #endif
96 #define PCI_QLOGIC_ISP2100 \
97 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
98
99 #define IO_MAP_REG 0x10
100 #define MEM_MAP_REG 0x14
101
102
103 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
104 static void isp_pci_attach __P((struct device *, struct device *, void *));
105
106 struct isp_pcisoftc {
107 struct ispsoftc pci_isp;
108 pci_chipset_tag_t pci_pc;
109 pcitag_t pci_tag;
110 bus_space_tag_t pci_st;
111 bus_space_handle_t pci_sh;
112 bus_dma_tag_t pci_dmat;
113 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
114 bus_dmamap_t pci_rquest_dmap;
115 bus_dmamap_t pci_result_dmap;
116 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
117 void * pci_ih;
118 };
119
120 struct cfattach isp_pci_ca = {
121 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
122 };
123
124 static int
125 isp_pci_probe(parent, match, aux)
126 struct device *parent;
127 struct cfdata *match;
128 void *aux;
129 {
130 struct pci_attach_args *pa = aux;
131
132 if (pa->pa_id == PCI_QLOGIC_ISP ||
133 pa->pa_id == PCI_QLOGIC_ISP2100) {
134 return (1);
135 } else {
136 return (0);
137 }
138 }
139
140
141 static void
142 isp_pci_attach(parent, self, aux)
143 struct device *parent, *self;
144 void *aux;
145 {
146 #ifdef DEBUG
147 static char oneshot = 1;
148 #endif
149 u_int32_t data;
150 struct pci_attach_args *pa = aux;
151 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
152 struct ispsoftc *isp = &pcs->pci_isp;
153 bus_space_tag_t st, iot, memt;
154 bus_space_handle_t sh, ioh, memh;
155 pci_intr_handle_t ih;
156 const char *intrstr;
157 int ioh_valid, memh_valid, i;
158 ISP_LOCKVAL_DECL;
159
160 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
161 PCI_MAPREG_TYPE_IO, 0,
162 &iot, &ioh, NULL, NULL) == 0);
163 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
164 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
165 &memt, &memh, NULL, NULL) == 0);
166
167 if (memh_valid) {
168 st = memt;
169 sh = memh;
170 } else if (ioh_valid) {
171 st = iot;
172 sh = ioh;
173 } else {
174 printf(": unable to map device registers\n");
175 return;
176 }
177 printf("\n");
178
179 pcs->pci_st = st;
180 pcs->pci_sh = sh;
181 pcs->pci_dmat = pa->pa_dmat;
182 pcs->pci_pc = pa->pa_pc;
183 pcs->pci_tag = pa->pa_tag;
184 if (pa->pa_id == PCI_QLOGIC_ISP) {
185 isp->isp_mdvec = &mdvec;
186 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
187 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
188 if (isp->isp_param == NULL) {
189 printf("%s: couldn't allocate sdparam table\n",
190 isp->isp_name);
191 return;
192 }
193 bzero(isp->isp_param, sizeof (sdparam));
194 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
195 isp->isp_mdvec = &mdvec_2100;
196 isp->isp_type = ISP_HA_FC_2100;
197 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
198 if (isp->isp_param == NULL) {
199 printf("%s: couldn't allocate fcparam table\n",
200 isp->isp_name);
201 return;
202 }
203 bzero(isp->isp_param, sizeof (fcparam));
204 } else {
205 return;
206 }
207 /*
208 * Make sure that command register set sanely.
209 */
210 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
211 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
212 /*
213 * Not so sure about these- but I think it's important that they get
214 * enabled......
215 */
216 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
217 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
218 /*
219 * Make sure that latency timer and cache line size is set sanely.
220 */
221 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
222 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
223 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
224 data |= (0x40 << PCI_LATTIMER_SHIFT);
225 data |= (0x10 << PCI_CACHELINE_SHIFT);
226 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
227
228 #ifdef DEBUG
229 if (oneshot) {
230 oneshot = 0;
231 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
232 "%d.%d Core Version %d.%d\n",
233 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
234 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
235 }
236 #endif
237 ISP_LOCK(isp);
238 isp_reset(isp);
239 if (isp->isp_state != ISP_RESETSTATE) {
240 ISP_UNLOCK(isp);
241 free(isp->isp_param, M_DEVBUF);
242 return;
243 }
244 isp_init(isp);
245 if (isp->isp_state != ISP_INITSTATE) {
246 isp_uninit(isp);
247 ISP_UNLOCK(isp);
248 free(isp->isp_param, M_DEVBUF);
249 return;
250 }
251
252 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
253 pa->pa_intrline, &ih)) {
254 printf("%s: couldn't map interrupt\n", isp->isp_name);
255 isp_uninit(isp);
256 ISP_UNLOCK(isp);
257 free(isp->isp_param, M_DEVBUF);
258 return;
259 }
260
261 intrstr = pci_intr_string(pa->pa_pc, ih);
262 if (intrstr == NULL)
263 intrstr = "<I dunno>";
264 pcs->pci_ih =
265 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
266 if (pcs->pci_ih == NULL) {
267 printf("%s: couldn't establish interrupt at %s\n",
268 isp->isp_name, intrstr);
269 isp_uninit(isp);
270 ISP_UNLOCK(isp);
271 free(isp->isp_param, M_DEVBUF);
272 return;
273 }
274 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
275
276 /*
277 * Create the DMA maps for the data transfers.
278 */
279 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
280 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
281 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
282 &pcs->pci_xfer_dmap[i])) {
283 printf("%s: can't create dma maps\n",
284 isp->isp_name);
285 isp_uninit(isp);
286 ISP_UNLOCK(isp);
287 return;
288 }
289 }
290 /*
291 * Do Generic attach now.
292 */
293 isp_attach(isp);
294 if (isp->isp_state != ISP_RUNSTATE) {
295 isp_uninit(isp);
296 free(isp->isp_param, M_DEVBUF);
297 }
298 ISP_UNLOCK(isp);
299 }
300
301 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
302
303 static u_int16_t
304 isp_pci_rd_reg(isp, regoff)
305 struct ispsoftc *isp;
306 int regoff;
307 {
308 u_int16_t rv;
309 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
310 int offset, oldsxp = 0;
311
312 if ((regoff & BIU_BLOCK) != 0) {
313 offset = PCI_BIU_REGS_OFF;
314 } else if ((regoff & MBOX_BLOCK) != 0) {
315 if (isp->isp_type & ISP_HA_SCSI)
316 offset = PCI_MBOX_REGS_OFF;
317 else
318 offset = PCI_MBOX_REGS2100_OFF;
319 } else if ((regoff & SXP_BLOCK) != 0) {
320 offset = PCI_SXP_REGS_OFF;
321 /*
322 * We will assume that someone has paused the RISC processor.
323 */
324 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
325 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
326 } else {
327 offset = PCI_RISC_REGS_OFF;
328 }
329 regoff &= 0xff;
330 offset += regoff;
331 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
332 if ((regoff & SXP_BLOCK) != 0) {
333 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
334 }
335 return (rv);
336 }
337
338 static void
339 isp_pci_wr_reg(isp, regoff, val)
340 struct ispsoftc *isp;
341 int regoff;
342 u_int16_t val;
343 {
344 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
345 int offset, oldsxp = 0;
346 if ((regoff & BIU_BLOCK) != 0) {
347 offset = PCI_BIU_REGS_OFF;
348 } else if ((regoff & MBOX_BLOCK) != 0) {
349 if (isp->isp_type & ISP_HA_SCSI)
350 offset = PCI_MBOX_REGS_OFF;
351 else
352 offset = PCI_MBOX_REGS2100_OFF;
353 } else if ((regoff & SXP_BLOCK) != 0) {
354 offset = PCI_SXP_REGS_OFF;
355 /*
356 * We will assume that someone has paused the RISC processor.
357 */
358 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
359 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
360 } else {
361 offset = PCI_RISC_REGS_OFF;
362 }
363 regoff &= 0xff;
364 offset += regoff;
365 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
366 if ((regoff & SXP_BLOCK) != 0) {
367 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
368 }
369 }
370
371 static int
372 isp_pci_mbxdma(isp)
373 struct ispsoftc *isp;
374 {
375 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
376 bus_dma_segment_t seg;
377 bus_size_t len;
378 fcparam *fcp;
379 int rseg;
380
381 /*
382 * Allocate and map the request queue.
383 */
384 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
385 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
386 BUS_DMA_NOWAIT) ||
387 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
388 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
389 return (1);
390 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
391 &pci->pci_rquest_dmap) ||
392 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
393 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
394 return (1);
395
396 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
397
398 /*
399 * Allocate and map the result queue.
400 */
401 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
402 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
403 BUS_DMA_NOWAIT) ||
404 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
405 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
406 return (1);
407 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
408 &pci->pci_result_dmap) ||
409 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
410 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
411 return (1);
412 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
413
414 if (isp->isp_type & ISP_HA_SCSI) {
415 return (0);
416 }
417
418 fcp = isp->isp_param;
419 len = ISP2100_SCRLEN;
420 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
421 BUS_DMA_NOWAIT) ||
422 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
423 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
424 return (1);
425 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
426 &pci->pci_scratch_dmap) ||
427 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
428 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
429 return (1);
430 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
431 return (0);
432 }
433
434 static int
435 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
436 struct ispsoftc *isp;
437 struct scsipi_xfer *xs;
438 ispreq_t *rq;
439 u_int8_t *iptrp;
440 u_int8_t optr;
441 {
442 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
443 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
444 ispcontreq_t *crq;
445 int segcnt, seg, error, ovseg, seglim, drq;
446
447 if (xs->datalen == 0) {
448 rq->req_seg_count = 1;
449 goto mbxsync;
450 }
451
452 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
453 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
454 isp->isp_name, rq->req_handle);
455 /* NOTREACHED */
456 }
457
458 if (xs->flags & SCSI_DATA_IN) {
459 drq = REQFLAG_DATA_IN;
460 } else {
461 drq = REQFLAG_DATA_OUT;
462 }
463
464 if (isp->isp_type & ISP_HA_FC) {
465 seglim = ISP_RQDSEG_T2;
466 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
467 ((ispreqt2_t *)rq)->req_flags |= drq;
468 } else {
469 seglim = ISP_RQDSEG;
470 rq->req_flags |= drq;
471 }
472 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
473 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
474 if (error) {
475 XS_SETERR(xs, HBA_BOTCH);
476 return (CMD_COMPLETE);
477 }
478
479 segcnt = dmap->dm_nsegs;
480
481 for (seg = 0, rq->req_seg_count = 0;
482 seg < segcnt && rq->req_seg_count < seglim;
483 seg++, rq->req_seg_count++) {
484 if (isp->isp_type & ISP_HA_FC) {
485 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
486 rq2->req_dataseg[rq2->req_seg_count].ds_count =
487 dmap->dm_segs[seg].ds_len;
488 rq2->req_dataseg[rq2->req_seg_count].ds_base =
489 dmap->dm_segs[seg].ds_addr;
490 } else {
491 rq->req_dataseg[rq->req_seg_count].ds_count =
492 dmap->dm_segs[seg].ds_len;
493 rq->req_dataseg[rq->req_seg_count].ds_base =
494 dmap->dm_segs[seg].ds_addr;
495 }
496 }
497
498 if (seg == segcnt)
499 goto dmasync;
500
501 do {
502 crq = (ispcontreq_t *)
503 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
504 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
505 if (*iptrp == optr) {
506 printf("%s: Request Queue Overflow++\n",
507 isp->isp_name);
508 bus_dmamap_unload(pci->pci_dmat, dmap);
509 XS_SETERR(xs, HBA_BOTCH);
510 return (CMD_COMPLETE);
511 }
512 rq->req_header.rqs_entry_count++;
513 bzero((void *)crq, sizeof (*crq));
514 crq->req_header.rqs_entry_count = 1;
515 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
516
517 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
518 rq->req_seg_count++, seg++, ovseg++) {
519 crq->req_dataseg[ovseg].ds_count =
520 dmap->dm_segs[seg].ds_len;
521 crq->req_dataseg[ovseg].ds_base =
522 dmap->dm_segs[seg].ds_addr;
523 }
524 } while (seg < segcnt);
525
526 dmasync:
527 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
528 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
529 BUS_DMASYNC_PREWRITE);
530
531 mbxsync:
532
533 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
534 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
535 return (CMD_QUEUED);
536 }
537
538 static int
539 isp_pci_intr(arg)
540 void *arg;
541 {
542 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
543 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
544 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
545 return (isp_intr(arg));
546 }
547
548 static void
549 isp_pci_dmateardown(isp, xs, handle)
550 struct ispsoftc *isp;
551 struct scsipi_xfer *xs;
552 u_int32_t handle;
553 {
554 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
555 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
556
557 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
558 xs->flags & SCSI_DATA_IN ?
559 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
560 bus_dmamap_unload(pci->pci_dmat, dmap);
561 }
562
563 static void
564 isp_pci_reset1(isp)
565 struct ispsoftc *isp;
566 {
567 /* Make sure the BIOS is disabled */
568 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
569 }
570
571 static void
572 isp_pci_dumpregs(isp)
573 struct ispsoftc *isp;
574 {
575 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
576 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
577 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
578 }
579