isp_pci.c revision 1.26 1 /* $NetBSD: isp_pci.c,v 1.26 1998/07/31 02:08:16 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 *
5 *---------------------------------------
6 * Copyright (c) 1997, 1998 by Matthew Jacob
7 * NASA/Ames Research Center
8 * All rights reserved.
9 *---------------------------------------
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice immediately at the beginning of the file, without modification,
16 * this list of conditions, and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 */
36
37 #include <dev/ic/isp_netbsd.h>
38 #include <dev/microcode/isp/asm_pci.h>
39
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43
44 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
45 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 ispreq_t *, u_int8_t *, u_int8_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 u_int32_t));
51
52 static void isp_pci_reset1 __P((struct ispsoftc *));
53 static void isp_pci_dumpregs __P((struct ispsoftc *));
54 static int isp_pci_intr __P((void *));
55
56 static struct ispmdvec mdvec = {
57 isp_pci_rd_reg,
58 isp_pci_wr_reg,
59 isp_pci_mbxdma,
60 isp_pci_dmasetup,
61 isp_pci_dmateardown,
62 NULL,
63 isp_pci_reset1,
64 isp_pci_dumpregs,
65 ISP_RISC_CODE,
66 ISP_CODE_LENGTH,
67 ISP_CODE_ORG,
68 ISP_CODE_VERSION,
69 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
70 60 /* MAGIC- all known PCI card implementations are 60MHz */
71 };
72
73 static struct ispmdvec mdvec_2100 = {
74 isp_pci_rd_reg,
75 isp_pci_wr_reg,
76 isp_pci_mbxdma,
77 isp_pci_dmasetup,
78 isp_pci_dmateardown,
79 NULL,
80 isp_pci_reset1,
81 isp_pci_dumpregs,
82 ISP2100_RISC_CODE,
83 ISP2100_CODE_LENGTH,
84 ISP2100_CODE_ORG,
85 ISP2100_CODE_VERSION,
86 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
87 60 /* MAGIC- all known PCI card implementations are 60MHz */
88 };
89
90 #define PCI_QLOGIC_ISP \
91 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
92
93 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
94 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
95 #endif
96 #define PCI_QLOGIC_ISP2100 \
97 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
98
99 #define IO_MAP_REG 0x10
100 #define MEM_MAP_REG 0x14
101
102
103 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
104 static void isp_pci_attach __P((struct device *, struct device *, void *));
105
106 struct isp_pcisoftc {
107 struct ispsoftc pci_isp;
108 pci_chipset_tag_t pci_pc;
109 pcitag_t pci_tag;
110 bus_space_tag_t pci_st;
111 bus_space_handle_t pci_sh;
112 bus_dma_tag_t pci_dmat;
113 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
114 bus_dmamap_t pci_rquest_dmap;
115 bus_dmamap_t pci_result_dmap;
116 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
117 void * pci_ih;
118 };
119
120 struct cfattach isp_pci_ca = {
121 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
122 };
123
124 static int
125 isp_pci_probe(parent, match, aux)
126 struct device *parent;
127 struct cfdata *match;
128 void *aux;
129 {
130 struct pci_attach_args *pa = aux;
131
132 if (pa->pa_id == PCI_QLOGIC_ISP ||
133 pa->pa_id == PCI_QLOGIC_ISP2100) {
134 return (1);
135 } else {
136 return (0);
137 }
138 }
139
140
141 static void
142 isp_pci_attach(parent, self, aux)
143 struct device *parent, *self;
144 void *aux;
145 {
146 struct pci_attach_args *pa = aux;
147 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
148 struct ispsoftc *isp = &pcs->pci_isp;
149 bus_space_tag_t st, iot, memt;
150 bus_space_handle_t sh, ioh, memh;
151 pci_intr_handle_t ih;
152 const char *intrstr;
153 int ioh_valid, memh_valid, i;
154 ISP_LOCKVAL_DECL;
155
156 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
157 PCI_MAPREG_TYPE_IO, 0,
158 &iot, &ioh, NULL, NULL) == 0);
159 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
160 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
161 &memt, &memh, NULL, NULL) == 0);
162
163 if (memh_valid) {
164 st = memt;
165 sh = memh;
166 } else if (ioh_valid) {
167 st = iot;
168 sh = ioh;
169 } else {
170 printf(": unable to map device registers\n");
171 return;
172 }
173 printf("\n");
174
175 pcs->pci_st = st;
176 pcs->pci_sh = sh;
177 pcs->pci_dmat = pa->pa_dmat;
178 pcs->pci_pc = pa->pa_pc;
179 pcs->pci_tag = pa->pa_tag;
180 if (pa->pa_id == PCI_QLOGIC_ISP) {
181 isp->isp_mdvec = &mdvec;
182 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
183 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
184 if (isp->isp_param == NULL) {
185 printf("%s: couldn't allocate sdparam table\n",
186 isp->isp_name);
187 return;
188 }
189 bzero(isp->isp_param, sizeof (sdparam));
190 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
191 u_int32_t data;
192 isp->isp_mdvec = &mdvec_2100;
193 if (ioh_valid == 0) {
194 printf("%s: warning, ISP2100 cannot use I/O Space"
195 " Mappings\n", isp->isp_name);
196 } else {
197 pcs->pci_st = iot;
198 pcs->pci_sh = ioh;
199 }
200
201 #if 0
202 printf("%s: PCIREGS cmd=%x bhlc=%x\n", isp->isp_name,
203 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG),
204 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG));
205 #endif
206 isp->isp_type = ISP_HA_FC_2100;
207 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
208 if (isp->isp_param == NULL) {
209 printf("%s: couldn't allocate fcparam table\n",
210 isp->isp_name);
211 return;
212 }
213 bzero(isp->isp_param, sizeof (fcparam));
214
215 data = pci_conf_read(pa->pa_pc, pa->pa_tag,
216 PCI_COMMAND_STATUS_REG);
217 data |= PCI_COMMAND_MASTER_ENABLE |
218 PCI_COMMAND_INVALIDATE_ENABLE;
219 pci_conf_write(pa->pa_pc, pa->pa_tag,
220 PCI_COMMAND_STATUS_REG, data);
221 /*
222 * Wierd- we need to clear the lsb in offset 0x30 to take the
223 * chip out of reset state.
224 */
225 data = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x30);
226 data &= ~1;
227 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x30, data);
228 #if 0
229 /*
230 * XXX: Need to get the actual revision number of the 2100 FB
231 */
232 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
233 data &= ~0xffff;
234 data |= 0xf801;
235 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
236 printf("%s: setting latency to %x and cache line size to %x\n",
237 isp->isp_name, (data >> 8) & 0xff,
238 data & 0xff);
239 #endif
240 } else {
241 return;
242 }
243
244 ISP_LOCK(isp);
245 isp_reset(isp);
246 if (isp->isp_state != ISP_RESETSTATE) {
247 ISP_UNLOCK(isp);
248 free(isp->isp_param, M_DEVBUF);
249 return;
250 }
251 isp_init(isp);
252 if (isp->isp_state != ISP_INITSTATE) {
253 isp_uninit(isp);
254 ISP_UNLOCK(isp);
255 free(isp->isp_param, M_DEVBUF);
256 return;
257 }
258
259 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
260 pa->pa_intrline, &ih)) {
261 printf("%s: couldn't map interrupt\n", isp->isp_name);
262 isp_uninit(isp);
263 ISP_UNLOCK(isp);
264 free(isp->isp_param, M_DEVBUF);
265 return;
266 }
267
268 intrstr = pci_intr_string(pa->pa_pc, ih);
269 if (intrstr == NULL)
270 intrstr = "<I dunno>";
271 pcs->pci_ih =
272 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
273 if (pcs->pci_ih == NULL) {
274 printf("%s: couldn't establish interrupt at %s\n",
275 isp->isp_name, intrstr);
276 isp_uninit(isp);
277 ISP_UNLOCK(isp);
278 free(isp->isp_param, M_DEVBUF);
279 return;
280 }
281 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
282
283 /*
284 * Create the DMA maps for the data transfers.
285 */
286 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
287 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
288 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
289 &pcs->pci_xfer_dmap[i])) {
290 printf("%s: can't create dma maps\n",
291 isp->isp_name);
292 isp_uninit(isp);
293 ISP_UNLOCK(isp);
294 return;
295 }
296 }
297 /*
298 * Do Generic attach now.
299 */
300 isp_attach(isp);
301 if (isp->isp_state != ISP_RUNSTATE) {
302 isp_uninit(isp);
303 free(isp->isp_param, M_DEVBUF);
304 }
305 ISP_UNLOCK(isp);
306 }
307
308 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
309
310 static u_int16_t
311 isp_pci_rd_reg(isp, regoff)
312 struct ispsoftc *isp;
313 int regoff;
314 {
315 u_int16_t rv;
316 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
317 int offset, oldsxp = 0;
318
319 if ((regoff & BIU_BLOCK) != 0) {
320 offset = PCI_BIU_REGS_OFF;
321 } else if ((regoff & MBOX_BLOCK) != 0) {
322 if (isp->isp_type & ISP_HA_SCSI)
323 offset = PCI_MBOX_REGS_OFF;
324 else
325 offset = PCI_MBOX_REGS2100_OFF;
326 } else if ((regoff & SXP_BLOCK) != 0) {
327 offset = PCI_SXP_REGS_OFF;
328 /*
329 * We will assume that someone has paused the RISC processor.
330 */
331 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
332 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
333 } else {
334 offset = PCI_RISC_REGS_OFF;
335 }
336 regoff &= 0xff;
337 offset += regoff;
338 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
339 if ((regoff & SXP_BLOCK) != 0) {
340 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
341 }
342 return (rv);
343 }
344
345 static void
346 isp_pci_wr_reg(isp, regoff, val)
347 struct ispsoftc *isp;
348 int regoff;
349 u_int16_t val;
350 {
351 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
352 int offset, oldsxp = 0;
353 if ((regoff & BIU_BLOCK) != 0) {
354 offset = PCI_BIU_REGS_OFF;
355 } else if ((regoff & MBOX_BLOCK) != 0) {
356 if (isp->isp_type & ISP_HA_SCSI)
357 offset = PCI_MBOX_REGS_OFF;
358 else
359 offset = PCI_MBOX_REGS2100_OFF;
360 } else if ((regoff & SXP_BLOCK) != 0) {
361 offset = PCI_SXP_REGS_OFF;
362 /*
363 * We will assume that someone has paused the RISC processor.
364 */
365 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
366 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
367 } else {
368 offset = PCI_RISC_REGS_OFF;
369 }
370 regoff &= 0xff;
371 offset += regoff;
372 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
373 if ((regoff & SXP_BLOCK) != 0) {
374 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
375 }
376 }
377
378 static int
379 isp_pci_mbxdma(isp)
380 struct ispsoftc *isp;
381 {
382 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
383 bus_dma_segment_t seg;
384 bus_size_t len;
385 fcparam *fcp;
386 int rseg;
387
388 /*
389 * Allocate and map the request queue.
390 */
391 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
392 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
393 BUS_DMA_NOWAIT) ||
394 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
395 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
396 return (1);
397 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
398 &pci->pci_rquest_dmap) ||
399 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
400 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
401 return (1);
402
403 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
404
405 /*
406 * Allocate and map the result queue.
407 */
408 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
409 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
410 BUS_DMA_NOWAIT) ||
411 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
412 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
413 return (1);
414 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
415 &pci->pci_result_dmap) ||
416 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
417 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
418 return (1);
419 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
420
421 if (isp->isp_type & ISP_HA_SCSI) {
422 return (0);
423 }
424
425 fcp = isp->isp_param;
426 len = ISP2100_SCRLEN;
427 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
428 BUS_DMA_NOWAIT) ||
429 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
430 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
431 return (1);
432 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
433 &pci->pci_scratch_dmap) ||
434 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
435 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
436 return (1);
437 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
438 return (0);
439 }
440
441 static int
442 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
443 struct ispsoftc *isp;
444 struct scsipi_xfer *xs;
445 ispreq_t *rq;
446 u_int8_t *iptrp;
447 u_int8_t optr;
448 {
449 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
450 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
451 ispcontreq_t *crq;
452 int segcnt, seg, error, ovseg, seglim, drq;
453
454 if (xs->datalen == 0) {
455 rq->req_seg_count = 1;
456 goto mbxsync;
457 }
458
459 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
460 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
461 isp->isp_name, rq->req_handle);
462 /* NOTREACHED */
463 }
464
465 if (xs->flags & SCSI_DATA_IN) {
466 drq = REQFLAG_DATA_IN;
467 } else {
468 drq = REQFLAG_DATA_OUT;
469 }
470
471 if (isp->isp_type & ISP_HA_FC) {
472 seglim = ISP_RQDSEG_T2;
473 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
474 ((ispreqt2_t *)rq)->req_flags |= drq;
475 } else {
476 seglim = ISP_RQDSEG;
477 rq->req_flags |= drq;
478 }
479 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
480 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
481 if (error) {
482 XS_SETERR(xs, HBA_BOTCH);
483 return (error);
484 }
485
486 segcnt = dmap->dm_nsegs;
487
488 for (seg = 0, rq->req_seg_count = 0;
489 seg < segcnt && rq->req_seg_count < seglim;
490 seg++, rq->req_seg_count++) {
491 if (isp->isp_type & ISP_HA_FC) {
492 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
493 rq2->req_dataseg[rq2->req_seg_count].ds_count =
494 dmap->dm_segs[seg].ds_len;
495 rq2->req_dataseg[rq2->req_seg_count].ds_base =
496 dmap->dm_segs[seg].ds_addr;
497 } else {
498 rq->req_dataseg[rq->req_seg_count].ds_count =
499 dmap->dm_segs[seg].ds_len;
500 rq->req_dataseg[rq->req_seg_count].ds_base =
501 dmap->dm_segs[seg].ds_addr;
502 }
503 }
504
505 if (seg == segcnt)
506 goto dmasync;
507
508 do {
509 crq = (ispcontreq_t *)
510 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
511 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
512 if (*iptrp == optr) {
513 printf("%s: Request Queue Overflow++\n",
514 isp->isp_name);
515 bus_dmamap_unload(pci->pci_dmat, dmap);
516 XS_SETERR(xs, HBA_BOTCH);
517 return (EFBIG);
518 }
519 rq->req_header.rqs_entry_count++;
520 bzero((void *)crq, sizeof (*crq));
521 crq->req_header.rqs_entry_count = 1;
522 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
523
524 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
525 rq->req_seg_count++, seg++, ovseg++) {
526 crq->req_dataseg[ovseg].ds_count =
527 dmap->dm_segs[seg].ds_len;
528 crq->req_dataseg[ovseg].ds_base =
529 dmap->dm_segs[seg].ds_addr;
530 }
531 } while (seg < segcnt);
532
533 dmasync:
534 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
535 xs->flags & SCSI_DATA_IN ?
536 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
537
538 mbxsync:
539
540 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
541 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
542 return (0);
543 }
544
545 static int
546 isp_pci_intr(arg)
547 void *arg;
548 {
549 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
550 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
551 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
552 return (isp_intr(arg));
553 }
554
555 static void
556 isp_pci_dmateardown(isp, xs, handle)
557 struct ispsoftc *isp;
558 struct scsipi_xfer *xs;
559 u_int32_t handle;
560 {
561 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
562 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
563
564 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
565 xs->flags & SCSI_DATA_IN ?
566 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
567 bus_dmamap_unload(pci->pci_dmat, dmap);
568 }
569
570 static void
571 isp_pci_reset1(isp)
572 struct ispsoftc *isp;
573 {
574 /* Make sure the BIOS is disabled */
575 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
576 }
577
578 static void
579 isp_pci_dumpregs(isp)
580 struct ispsoftc *isp;
581 {
582 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
583 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
584 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
585 }
586