isp_pci.c revision 1.15 1 /* $NetBSD: isp_pci.c,v 1.15 1997/08/16 00:28:10 mjacob Exp $ */
2
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 *
6 * Copyright (c) 1997 by Matthew Jacob
7 * NASA AMES Research Center
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice immediately at the beginning of the file, without modification,
15 * this list of conditions, and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/device.h>
41 #include <machine/bus.h>
42 #include <machine/intr.h>
43 #include <scsi/scsi_all.h>
44 #include <scsi/scsiconf.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcidevs.h>
48 #include <vm/vm.h>
49
50 #include <dev/ic/ispreg.h>
51 #include <dev/ic/ispvar.h>
52 #include <dev/ic/ispmbox.h>
53 #include <dev/microcode/isp/asm_pci.h>
54
55 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
56 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
57 static int isp_pci_mbxdma __P((struct ispsoftc *));
58 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsi_xfer *,
59 ispreq_t *, u_int8_t *, u_int8_t));
60 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsi_xfer *,
61 u_int32_t));
62
63 static void isp_pci_reset1 __P((struct ispsoftc *));
64 static void isp_pci_dumpregs __P((struct ispsoftc *));
65
66 static struct ispmdvec mdvec = {
67 isp_pci_rd_reg,
68 isp_pci_wr_reg,
69 isp_pci_mbxdma,
70 isp_pci_dmasetup,
71 isp_pci_dmateardown,
72 NULL,
73 isp_pci_reset1,
74 isp_pci_dumpregs,
75 ISP_RISC_CODE,
76 ISP_CODE_LENGTH,
77 ISP_CODE_ORG,
78 ISP_CODE_VERSION,
79 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
80 60 /* MAGIC- all known PCI card implementations are 60MHz */
81 };
82
83 static struct ispmdvec mdvec_2100 = {
84 isp_pci_rd_reg,
85 isp_pci_wr_reg,
86 isp_pci_mbxdma,
87 isp_pci_dmasetup,
88 isp_pci_dmateardown,
89 NULL,
90 isp_pci_reset1,
91 isp_pci_dumpregs,
92 ISP2100_RISC_CODE,
93 ISP2100_CODE_LENGTH,
94 ISP2100_CODE_ORG,
95 ISP2100_CODE_VERSION,
96 BIU_PCI_CONF1_FIFO_64 | BIU_BURST_ENABLE,
97 60 /* MAGIC- all known PCI card implementations are 60MHz */
98 };
99
100 #define PCI_QLOGIC_ISP \
101 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
102
103 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
104 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
105 #endif
106 #define PCI_QLOGIC_ISP2100 \
107 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
108
109 #define IO_MAP_REG 0x10
110 #define MEM_MAP_REG 0x14
111
112
113 #ifdef __BROKEN_INDIRECT_CONFIG
114 static int isp_pci_probe __P((struct device *, void *, void *));
115 #else
116 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
117 #endif
118 static void isp_pci_attach __P((struct device *, struct device *, void *));
119
120 struct isp_pcisoftc {
121 struct ispsoftc pci_isp;
122 pci_chipset_tag_t pci_pc;
123 pcitag_t pci_tag;
124 bus_space_tag_t pci_st;
125 bus_space_handle_t pci_sh;
126 bus_dma_tag_t pci_dmat;
127 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
128 bus_dmamap_t pci_rquest_dmap;
129 bus_dmamap_t pci_result_dmap;
130 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
131 void * pci_ih;
132 };
133
134 struct cfattach isp_pci_ca = {
135 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
136 };
137
138 static int
139 isp_pci_probe(parent, match, aux)
140 struct device *parent;
141 #ifdef __BROKEN_INDIRECT_CONFIG
142 void *match, *aux;
143 #else
144 struct cfdata *match;
145 void *aux;
146 #endif
147 {
148 struct pci_attach_args *pa = aux;
149
150 if (pa->pa_id == PCI_QLOGIC_ISP ||
151 pa->pa_id == PCI_QLOGIC_ISP2100) {
152 return (1);
153 } else {
154 return (0);
155 }
156 }
157
158
159 static void
160 isp_pci_attach(parent, self, aux)
161 struct device *parent, *self;
162 void *aux;
163 {
164 struct pci_attach_args *pa = aux;
165 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
166 bus_space_tag_t st, iot, memt;
167 bus_space_handle_t sh, ioh, memh;
168 pci_intr_handle_t ih;
169 const char *intrstr;
170 int ioh_valid, memh_valid, i;
171
172 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
173 PCI_MAPREG_TYPE_IO, 0,
174 &iot, &ioh, NULL, NULL) == 0);
175 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
176 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
177 &memt, &memh, NULL, NULL) == 0);
178
179 if (memh_valid) {
180 st = memt;
181 sh = memh;
182 } else if (ioh_valid) {
183 st = iot;
184 sh = ioh;
185 } else {
186 printf(": unable to map device registers\n");
187 return;
188 }
189 printf("\n");
190
191 pcs->pci_st = st;
192 pcs->pci_sh = sh;
193 pcs->pci_dmat = pa->pa_dmat;
194 pcs->pci_pc = pa->pa_pc;
195 pcs->pci_tag = pa->pa_tag;
196 if (pa->pa_id == PCI_QLOGIC_ISP) {
197 pcs->pci_isp.isp_mdvec = &mdvec;
198 pcs->pci_isp.isp_type = ISP_HA_SCSI_UNKNOWN;
199 pcs->pci_isp.isp_param =
200 malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
201 if (pcs->pci_isp.isp_param == NULL) {
202 printf("%s: couldn't allocate sdparam table\n",
203 pcs->pci_isp.isp_name);
204 }
205 bzero(pcs->pci_isp.isp_param, sizeof (sdparam));
206 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
207 u_int32_t data;
208 pcs->pci_isp.isp_mdvec = &mdvec_2100;
209 if (ioh_valid == 0) {
210 printf("%s: warning, ISP2100 cannot use I/O Space"
211 " Mappings\n", pcs->pci_isp.isp_name);
212 } else {
213 pcs->pci_st = iot;
214 pcs->pci_sh = ioh;
215 }
216
217 #if 0
218 printf("%s: PCIREGS cmd=%x bhlc=%x\n", pcs->pci_isp.isp_name,
219 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG),
220 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG));
221 #endif
222 pcs->pci_isp.isp_type = ISP_HA_FC_2100;
223 pcs->pci_isp.isp_param =
224 malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
225 if (pcs->pci_isp.isp_param == NULL) {
226 printf("%s: couldn't allocate fcparam table\n",
227 pcs->pci_isp.isp_name);
228 }
229 bzero(pcs->pci_isp.isp_param, sizeof (fcparam));
230
231 data = pci_conf_read(pa->pa_pc, pa->pa_tag,
232 PCI_COMMAND_STATUS_REG);
233 data |= PCI_COMMAND_MASTER_ENABLE |
234 PCI_COMMAND_INVALIDATE_ENABLE;
235 pci_conf_write(pa->pa_pc, pa->pa_tag,
236 PCI_COMMAND_STATUS_REG, data);
237 /*
238 * Wierd- we need to clear the lsb in offset 0x30 to take the
239 * chip out of reset state.
240 */
241 data = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x30);
242 data &= ~1;
243 pci_conf_write(pa->pa_pc, pa->pa_tag, 0x30, data);
244 #if 0
245 /*
246 * XXX: Need to get the actual revision number of the 2100 FB
247 */
248 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
249 data &= ~0xffff;
250 data |= 0xf801;
251 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
252 printf("%s: setting latency to %x and cache line size to %x\n",
253 pcs->pci_isp.isp_name, (data >> 8) & 0xff,
254 data & 0xff);
255 #endif
256 } else {
257 return;
258 }
259 isp_reset(&pcs->pci_isp);
260 if (pcs->pci_isp.isp_state != ISP_RESETSTATE) {
261 free(pcs->pci_isp.isp_param, M_DEVBUF);
262 return;
263 }
264 isp_init(&pcs->pci_isp);
265 if (pcs->pci_isp.isp_state != ISP_INITSTATE) {
266 isp_uninit(&pcs->pci_isp);
267 free(pcs->pci_isp.isp_param, M_DEVBUF);
268 return;
269 }
270
271 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
272 pa->pa_intrline, &ih)) {
273 printf("%s: couldn't map interrupt\n", pcs->pci_isp.isp_name);
274 isp_uninit(&pcs->pci_isp);
275 free(pcs->pci_isp.isp_param, M_DEVBUF);
276 return;
277 }
278
279 intrstr = pci_intr_string(pa->pa_pc, ih);
280 if (intrstr == NULL)
281 intrstr = "<I dunno>";
282 pcs->pci_ih =
283 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_intr, &pcs->pci_isp);
284 if (pcs->pci_ih == NULL) {
285 printf("%s: couldn't establish interrupt at %s\n",
286 pcs->pci_isp.isp_name, intrstr);
287 isp_uninit(&pcs->pci_isp);
288 free(pcs->pci_isp.isp_param, M_DEVBUF);
289 return;
290 }
291 printf("%s: interrupting at %s\n", pcs->pci_isp.isp_name, intrstr);
292
293 /*
294 * Create the DMA maps for the data transfers.
295 */
296 for (i = 0; i < RQUEST_QUEUE_LEN(&pcs->pci_isp); i++) {
297 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
298 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
299 &pcs->pci_xfer_dmap[i])) {
300 printf("%s: can't create dma maps\n",
301 pcs->pci_isp.isp_name);
302 isp_uninit(&pcs->pci_isp);
303 return;
304 }
305 }
306 /*
307 * Do Generic attach now.
308 */
309 isp_attach(&pcs->pci_isp);
310 if (pcs->pci_isp.isp_state != ISP_RUNSTATE) {
311 isp_uninit(&pcs->pci_isp);
312 free(pcs->pci_isp.isp_param, M_DEVBUF);
313 }
314 }
315
316 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
317
318 static u_int16_t
319 isp_pci_rd_reg(isp, regoff)
320 struct ispsoftc *isp;
321 int regoff;
322 {
323 u_int16_t rv;
324 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
325 int offset, oldsxp = 0;
326
327 if ((regoff & BIU_BLOCK) != 0) {
328 offset = PCI_BIU_REGS_OFF;
329 } else if ((regoff & MBOX_BLOCK) != 0) {
330 if (isp->isp_type & ISP_HA_SCSI)
331 offset = PCI_MBOX_REGS_OFF;
332 else
333 offset = PCI_MBOX_REGS2100_OFF;
334 } else if ((regoff & SXP_BLOCK) != 0) {
335 offset = PCI_SXP_REGS_OFF;
336 /*
337 * We will assume that someone has paused the RISC processor.
338 */
339 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
340 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
341 } else {
342 offset = PCI_RISC_REGS_OFF;
343 }
344 regoff &= 0xff;
345 offset += regoff;
346 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
347 if ((regoff & SXP_BLOCK) != 0) {
348 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
349 }
350 return (rv);
351 }
352
353 static void
354 isp_pci_wr_reg(isp, regoff, val)
355 struct ispsoftc *isp;
356 int regoff;
357 u_int16_t val;
358 {
359 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
360 int offset, oldsxp = 0;
361 if ((regoff & BIU_BLOCK) != 0) {
362 offset = PCI_BIU_REGS_OFF;
363 } else if ((regoff & MBOX_BLOCK) != 0) {
364 if (isp->isp_type & ISP_HA_SCSI)
365 offset = PCI_MBOX_REGS_OFF;
366 else
367 offset = PCI_MBOX_REGS2100_OFF;
368 } else if ((regoff & SXP_BLOCK) != 0) {
369 offset = PCI_SXP_REGS_OFF;
370 /*
371 * We will assume that someone has paused the RISC processor.
372 */
373 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
374 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
375 } else {
376 offset = PCI_RISC_REGS_OFF;
377 }
378 regoff &= 0xff;
379 offset += regoff;
380 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
381 if ((regoff & SXP_BLOCK) != 0) {
382 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
383 }
384 }
385
386 static int
387 isp_pci_mbxdma(isp)
388 struct ispsoftc *isp;
389 {
390 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
391 bus_dma_segment_t seg;
392 bus_size_t len;
393 fcparam *fcp;
394 int rseg;
395
396 /*
397 * Allocate and map the request queue.
398 */
399 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
400 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
401 BUS_DMA_NOWAIT) ||
402 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
403 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
404 return (1);
405 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
406 &pci->pci_rquest_dmap) ||
407 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
408 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
409 return (1);
410
411 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
412
413 /*
414 * Allocate and map the result queue.
415 */
416 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
417 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
418 BUS_DMA_NOWAIT) ||
419 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
420 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
421 return (1);
422 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
423 &pci->pci_result_dmap) ||
424 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
425 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
426 return (1);
427 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
428
429 if (isp->isp_type & ISP_HA_SCSI) {
430 return (0);
431 }
432
433 fcp = isp->isp_param;
434 len = ISP2100_SCRLEN;
435 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
436 BUS_DMA_NOWAIT) ||
437 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
438 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
439 return (1);
440 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
441 &pci->pci_scratch_dmap) ||
442 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
443 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
444 return (1);
445 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
446 return (0);
447 }
448
449 static int
450 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
451 struct ispsoftc *isp;
452 struct scsi_xfer *xs;
453 ispreq_t *rq;
454 u_int8_t *iptrp;
455 u_int8_t optr;
456 {
457 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
458 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
459 ispcontreq_t *crq;
460 int segcnt, seg, error, ovseg, seglim;
461
462 if (xs->datalen == 0) {
463 rq->req_seg_count = 1;
464 return (0);
465 }
466
467 if (rq->req_handle > RQUEST_QUEUE_LEN(isp) ||
468 rq->req_handle < 1) {
469 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
470 isp->isp_name, rq->req_handle);
471 /* NOTREACHED */
472 }
473
474 if (xs->flags & SCSI_DATA_IN) {
475 rq->req_flags |= REQFLAG_DATA_IN;
476 } else {
477 rq->req_flags |= REQFLAG_DATA_OUT;
478 }
479
480 if (isp->isp_type & ISP_HA_FC) {
481 seglim = ISP_RQDSEG_T2;
482 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
483 } else {
484 seglim = ISP_RQDSEG;
485 }
486 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
487 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
488 if (error)
489 return (error);
490
491 segcnt = dmap->dm_nsegs;
492
493 for (seg = 0, rq->req_seg_count = 0;
494 seg < segcnt && rq->req_seg_count < seglim;
495 seg++, rq->req_seg_count++) {
496 if (isp->isp_type & ISP_HA_FC) {
497 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
498 rq2->req_dataseg[rq2->req_seg_count].ds_count =
499 dmap->dm_segs[seg].ds_len;
500 rq2->req_dataseg[rq2->req_seg_count].ds_base =
501 dmap->dm_segs[seg].ds_addr;
502 } else {
503 rq->req_dataseg[rq->req_seg_count].ds_count =
504 dmap->dm_segs[seg].ds_len;
505 rq->req_dataseg[rq->req_seg_count].ds_base =
506 dmap->dm_segs[seg].ds_addr;
507 }
508 }
509
510 if (seg == segcnt)
511 goto mapsync;
512
513 do {
514 crq = (ispcontreq_t *)
515 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
516 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN(isp) - 1);
517 if (*iptrp == optr) {
518 printf("%s: Request Queue Overflow++\n",
519 isp->isp_name);
520 bus_dmamap_unload(pci->pci_dmat, dmap);
521 return (EFBIG);
522 }
523 rq->req_header.rqs_entry_count++;
524 bzero((void *)crq, sizeof (*crq));
525 crq->req_header.rqs_entry_count = 1;
526 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
527
528 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
529 rq->req_seg_count++, seg++, ovseg++) {
530 crq->req_dataseg[ovseg].ds_count =
531 dmap->dm_segs[seg].ds_len;
532 crq->req_dataseg[ovseg].ds_base =
533 dmap->dm_segs[seg].ds_addr;
534 }
535 } while (seg < segcnt);
536
537 mapsync:
538 bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
539 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
540 return (0);
541 }
542
543 static void
544 isp_pci_dmateardown(isp, xs, handle)
545 struct ispsoftc *isp;
546 struct scsi_xfer *xs;
547 u_int32_t handle;
548 {
549 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
550 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
551
552 bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
553 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
554 bus_dmamap_unload(pci->pci_dmat, dmap);
555 }
556
557 static void
558 isp_pci_reset1(isp)
559 struct ispsoftc *isp;
560 {
561 /* Make sure the BIOS is disabled */
562 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
563 }
564
565 static void
566 isp_pci_dumpregs(isp)
567 struct ispsoftc *isp;
568 {
569 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
570 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
571 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
572 }
573