isp_pci.c revision 1.31 1 /*
2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3 *
4 *---------------------------------------
5 * Copyright (c) 1997, 1998 by Matthew Jacob
6 * NASA/Ames Research Center
7 * All rights reserved.
8 *---------------------------------------
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice immediately at the beginning of the file, without modification,
15 * this list of conditions, and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35
36 #include <dev/ic/isp_netbsd.h>
37 #include <dev/microcode/isp/asm_pci.h>
38
39 #include <dev/pci/pcireg.h>
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
44 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
45 static int isp_pci_mbxdma __P((struct ispsoftc *));
46 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
47 ispreq_t *, u_int8_t *, u_int8_t));
48 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
49 u_int32_t));
50 static void isp_pci_reset1 __P((struct ispsoftc *));
51 static void isp_pci_dumpregs __P((struct ispsoftc *));
52 static int isp_pci_intr __P((void *));
53
54 static struct ispmdvec mdvec = {
55 isp_pci_rd_reg,
56 isp_pci_wr_reg,
57 isp_pci_mbxdma,
58 isp_pci_dmasetup,
59 isp_pci_dmateardown,
60 NULL,
61 isp_pci_reset1,
62 isp_pci_dumpregs,
63 ISP_RISC_CODE,
64 ISP_CODE_LENGTH,
65 ISP_CODE_ORG,
66 ISP_CODE_VERSION,
67 BIU_BURST_ENABLE, /* default to 8 byte burst */
68 0
69 };
70
71 static struct ispmdvec mdvec_2100 = {
72 isp_pci_rd_reg,
73 isp_pci_wr_reg,
74 isp_pci_mbxdma,
75 isp_pci_dmasetup,
76 isp_pci_dmateardown,
77 NULL,
78 isp_pci_reset1,
79 isp_pci_dumpregs,
80 ISP2100_RISC_CODE,
81 ISP2100_CODE_LENGTH,
82 ISP2100_CODE_ORG,
83 ISP2100_CODE_VERSION,
84 BIU_BURST_ENABLE, /* default to 8 byte burst */
85 0 /* Not relevant to the 2100 */
86 };
87
88 #define PCI_QLOGIC_ISP \
89 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
90
91 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
92 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
93 #endif
94 #define PCI_QLOGIC_ISP2100 \
95 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
96
97 #define IO_MAP_REG 0x10
98 #define MEM_MAP_REG 0x14
99
100
101 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
102 static void isp_pci_attach __P((struct device *, struct device *, void *));
103
104 struct isp_pcisoftc {
105 struct ispsoftc pci_isp;
106 pci_chipset_tag_t pci_pc;
107 pcitag_t pci_tag;
108 bus_space_tag_t pci_st;
109 bus_space_handle_t pci_sh;
110 bus_dma_tag_t pci_dmat;
111 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
112 bus_dmamap_t pci_rquest_dmap;
113 bus_dmamap_t pci_result_dmap;
114 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
115 void * pci_ih;
116 };
117
118 struct cfattach isp_pci_ca = {
119 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
120 };
121
122 static int
123 isp_pci_probe(parent, match, aux)
124 struct device *parent;
125 struct cfdata *match;
126 void *aux;
127 {
128 struct pci_attach_args *pa = aux;
129
130 if (pa->pa_id == PCI_QLOGIC_ISP ||
131 pa->pa_id == PCI_QLOGIC_ISP2100) {
132 return (1);
133 } else {
134 return (0);
135 }
136 }
137
138
139 static void
140 isp_pci_attach(parent, self, aux)
141 struct device *parent, *self;
142 void *aux;
143 {
144 #ifdef DEBUG
145 static char oneshot = 1;
146 #endif
147 struct pci_attach_args *pa = aux;
148 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
149 struct ispsoftc *isp = &pcs->pci_isp;
150 bus_space_tag_t st, iot, memt;
151 bus_space_handle_t sh, ioh, memh;
152 pci_intr_handle_t ih;
153 const char *intrstr;
154 int ioh_valid, memh_valid, i;
155 ISP_LOCKVAL_DECL;
156
157 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
158 PCI_MAPREG_TYPE_IO, 0,
159 &iot, &ioh, NULL, NULL) == 0);
160 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
161 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
162 &memt, &memh, NULL, NULL) == 0);
163
164 if (memh_valid) {
165 st = memt;
166 sh = memh;
167 } else if (ioh_valid) {
168 st = iot;
169 sh = ioh;
170 } else {
171 printf(": unable to map device registers\n");
172 return;
173 }
174 printf("\n");
175
176 pcs->pci_st = st;
177 pcs->pci_sh = sh;
178 pcs->pci_dmat = pa->pa_dmat;
179 pcs->pci_pc = pa->pa_pc;
180 pcs->pci_tag = pa->pa_tag;
181 if (pa->pa_id == PCI_QLOGIC_ISP) {
182 isp->isp_mdvec = &mdvec;
183 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
184 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
185 if (isp->isp_param == NULL) {
186 printf("%s: couldn't allocate sdparam table\n",
187 isp->isp_name);
188 return;
189 }
190 bzero(isp->isp_param, sizeof (sdparam));
191 } else if (pa->pa_id == PCI_QLOGIC_ISP2100) {
192 u_int32_t data;
193 isp->isp_mdvec = &mdvec_2100;
194 if (ioh_valid == 0) {
195 printf("%s: warning, ISP2100 cannot use I/O Space"
196 " Mappings\n", isp->isp_name);
197 } else {
198 pcs->pci_st = iot;
199 pcs->pci_sh = ioh;
200 }
201
202 isp->isp_type = ISP_HA_FC_2100;
203 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
204 if (isp->isp_param == NULL) {
205 printf("%s: couldn't allocate fcparam table\n",
206 isp->isp_name);
207 return;
208 }
209 bzero(isp->isp_param, sizeof (fcparam));
210
211 data = pci_conf_read(pa->pa_pc, pa->pa_tag,
212 PCI_COMMAND_STATUS_REG);
213 data |= PCI_COMMAND_MASTER_ENABLE |
214 PCI_COMMAND_INVALIDATE_ENABLE;
215 pci_conf_write(pa->pa_pc, pa->pa_tag,
216 PCI_COMMAND_STATUS_REG, data);
217 } else {
218 return;
219 }
220 #ifdef DEBUG
221 if (oneshot) {
222 oneshot = 0;
223 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
224 "%d.%d Core Version %d.%d\n",
225 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
226 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
227 }
228 #endif
229 ISP_LOCK(isp);
230 isp_reset(isp);
231 if (isp->isp_state != ISP_RESETSTATE) {
232 ISP_UNLOCK(isp);
233 free(isp->isp_param, M_DEVBUF);
234 return;
235 }
236 isp_init(isp);
237 if (isp->isp_state != ISP_INITSTATE) {
238 isp_uninit(isp);
239 ISP_UNLOCK(isp);
240 free(isp->isp_param, M_DEVBUF);
241 return;
242 }
243
244 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
245 pa->pa_intrline, &ih)) {
246 printf("%s: couldn't map interrupt\n", isp->isp_name);
247 isp_uninit(isp);
248 ISP_UNLOCK(isp);
249 free(isp->isp_param, M_DEVBUF);
250 return;
251 }
252
253 intrstr = pci_intr_string(pa->pa_pc, ih);
254 if (intrstr == NULL)
255 intrstr = "<I dunno>";
256 pcs->pci_ih =
257 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
258 if (pcs->pci_ih == NULL) {
259 printf("%s: couldn't establish interrupt at %s\n",
260 isp->isp_name, intrstr);
261 isp_uninit(isp);
262 ISP_UNLOCK(isp);
263 free(isp->isp_param, M_DEVBUF);
264 return;
265 }
266 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
267
268 /*
269 * Create the DMA maps for the data transfers.
270 */
271 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
272 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
273 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
274 &pcs->pci_xfer_dmap[i])) {
275 printf("%s: can't create dma maps\n",
276 isp->isp_name);
277 isp_uninit(isp);
278 ISP_UNLOCK(isp);
279 return;
280 }
281 }
282 /*
283 * Do Generic attach now.
284 */
285 isp_attach(isp);
286 if (isp->isp_state != ISP_RUNSTATE) {
287 isp_uninit(isp);
288 free(isp->isp_param, M_DEVBUF);
289 }
290 ISP_UNLOCK(isp);
291 }
292
293 #define PCI_BIU_REGS_OFF BIU_REGS_OFF
294
295 static u_int16_t
296 isp_pci_rd_reg(isp, regoff)
297 struct ispsoftc *isp;
298 int regoff;
299 {
300 u_int16_t rv;
301 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
302 int offset, oldsxp = 0;
303
304 if ((regoff & BIU_BLOCK) != 0) {
305 offset = PCI_BIU_REGS_OFF;
306 } else if ((regoff & MBOX_BLOCK) != 0) {
307 if (isp->isp_type & ISP_HA_SCSI)
308 offset = PCI_MBOX_REGS_OFF;
309 else
310 offset = PCI_MBOX_REGS2100_OFF;
311 } else if ((regoff & SXP_BLOCK) != 0) {
312 offset = PCI_SXP_REGS_OFF;
313 /*
314 * We will assume that someone has paused the RISC processor.
315 */
316 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
317 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
318 } else {
319 offset = PCI_RISC_REGS_OFF;
320 }
321 regoff &= 0xff;
322 offset += regoff;
323 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
324 if ((regoff & SXP_BLOCK) != 0) {
325 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
326 }
327 return (rv);
328 }
329
330 static void
331 isp_pci_wr_reg(isp, regoff, val)
332 struct ispsoftc *isp;
333 int regoff;
334 u_int16_t val;
335 {
336 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
337 int offset, oldsxp = 0;
338 if ((regoff & BIU_BLOCK) != 0) {
339 offset = PCI_BIU_REGS_OFF;
340 } else if ((regoff & MBOX_BLOCK) != 0) {
341 if (isp->isp_type & ISP_HA_SCSI)
342 offset = PCI_MBOX_REGS_OFF;
343 else
344 offset = PCI_MBOX_REGS2100_OFF;
345 } else if ((regoff & SXP_BLOCK) != 0) {
346 offset = PCI_SXP_REGS_OFF;
347 /*
348 * We will assume that someone has paused the RISC processor.
349 */
350 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1);
351 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP);
352 } else {
353 offset = PCI_RISC_REGS_OFF;
354 }
355 regoff &= 0xff;
356 offset += regoff;
357 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
358 if ((regoff & SXP_BLOCK) != 0) {
359 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp);
360 }
361 }
362
363 static int
364 isp_pci_mbxdma(isp)
365 struct ispsoftc *isp;
366 {
367 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
368 bus_dma_segment_t seg;
369 bus_size_t len;
370 fcparam *fcp;
371 int rseg;
372
373 /*
374 * Allocate and map the request queue.
375 */
376 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
377 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
378 BUS_DMA_NOWAIT) ||
379 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
380 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
381 return (1);
382 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
383 &pci->pci_rquest_dmap) ||
384 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
385 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
386 return (1);
387
388 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
389
390 /*
391 * Allocate and map the result queue.
392 */
393 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
394 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
395 BUS_DMA_NOWAIT) ||
396 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
397 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
398 return (1);
399 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
400 &pci->pci_result_dmap) ||
401 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
402 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
403 return (1);
404 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
405
406 if (isp->isp_type & ISP_HA_SCSI) {
407 return (0);
408 }
409
410 fcp = isp->isp_param;
411 len = ISP2100_SCRLEN;
412 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
413 BUS_DMA_NOWAIT) ||
414 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
415 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
416 return (1);
417 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
418 &pci->pci_scratch_dmap) ||
419 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
420 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
421 return (1);
422 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
423 return (0);
424 }
425
426 static int
427 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
428 struct ispsoftc *isp;
429 struct scsipi_xfer *xs;
430 ispreq_t *rq;
431 u_int8_t *iptrp;
432 u_int8_t optr;
433 {
434 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
435 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
436 ispcontreq_t *crq;
437 int segcnt, seg, error, ovseg, seglim, drq;
438
439 if (xs->datalen == 0) {
440 rq->req_seg_count = 1;
441 goto mbxsync;
442 }
443
444 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
445 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
446 isp->isp_name, rq->req_handle);
447 /* NOTREACHED */
448 }
449
450 if (xs->flags & SCSI_DATA_IN) {
451 drq = REQFLAG_DATA_IN;
452 } else {
453 drq = REQFLAG_DATA_OUT;
454 }
455
456 if (isp->isp_type & ISP_HA_FC) {
457 seglim = ISP_RQDSEG_T2;
458 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
459 ((ispreqt2_t *)rq)->req_flags |= drq;
460 } else {
461 seglim = ISP_RQDSEG;
462 rq->req_flags |= drq;
463 }
464 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
465 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
466 if (error) {
467 XS_SETERR(xs, HBA_BOTCH);
468 return (CMD_COMPLETE);
469 }
470
471 segcnt = dmap->dm_nsegs;
472
473 for (seg = 0, rq->req_seg_count = 0;
474 seg < segcnt && rq->req_seg_count < seglim;
475 seg++, rq->req_seg_count++) {
476 if (isp->isp_type & ISP_HA_FC) {
477 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
478 rq2->req_dataseg[rq2->req_seg_count].ds_count =
479 dmap->dm_segs[seg].ds_len;
480 rq2->req_dataseg[rq2->req_seg_count].ds_base =
481 dmap->dm_segs[seg].ds_addr;
482 } else {
483 rq->req_dataseg[rq->req_seg_count].ds_count =
484 dmap->dm_segs[seg].ds_len;
485 rq->req_dataseg[rq->req_seg_count].ds_base =
486 dmap->dm_segs[seg].ds_addr;
487 }
488 }
489
490 if (seg == segcnt)
491 goto dmasync;
492
493 do {
494 crq = (ispcontreq_t *)
495 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
496 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
497 if (*iptrp == optr) {
498 printf("%s: Request Queue Overflow++\n",
499 isp->isp_name);
500 bus_dmamap_unload(pci->pci_dmat, dmap);
501 XS_SETERR(xs, HBA_BOTCH);
502 return (CMD_COMPLETE);
503 }
504 rq->req_header.rqs_entry_count++;
505 bzero((void *)crq, sizeof (*crq));
506 crq->req_header.rqs_entry_count = 1;
507 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
508
509 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
510 rq->req_seg_count++, seg++, ovseg++) {
511 crq->req_dataseg[ovseg].ds_count =
512 dmap->dm_segs[seg].ds_len;
513 crq->req_dataseg[ovseg].ds_base =
514 dmap->dm_segs[seg].ds_addr;
515 }
516 } while (seg < segcnt);
517
518 dmasync:
519 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
520 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
521 BUS_DMASYNC_PREWRITE);
522
523 mbxsync:
524
525 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
526 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
527 return (CMD_QUEUED);
528 }
529
530 static int
531 isp_pci_intr(arg)
532 void *arg;
533 {
534 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
535 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
536 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
537 return (isp_intr(arg));
538 }
539
540 static void
541 isp_pci_dmateardown(isp, xs, handle)
542 struct ispsoftc *isp;
543 struct scsipi_xfer *xs;
544 u_int32_t handle;
545 {
546 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
547 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
548
549 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
550 xs->flags & SCSI_DATA_IN ?
551 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
552 bus_dmamap_unload(pci->pci_dmat, dmap);
553 }
554
555 static void
556 isp_pci_reset1(isp)
557 struct ispsoftc *isp;
558 {
559 /* Make sure the BIOS is disabled */
560 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
561 }
562
563 static void
564 isp_pci_dumpregs(isp)
565 struct ispsoftc *isp;
566 {
567 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
568 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
569 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
570 }
571