isp_pci.c revision 1.107 1 /* $NetBSD: isp_pci.c,v 1.107 2009/05/06 10:34:32 cegger Exp $ */
2 /*
3 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
4 * All rights reserved.
5 *
6 * Additional Copyright (C) 2000-2007 by Matthew Jacob
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
33 */
34
35 /*
36 * 24XX 4Gb material support provided by MetrumRG Associates.
37 * Many thanks are due to them.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: isp_pci.c,v 1.107 2009/05/06 10:34:32 cegger Exp $");
42
43 #include <dev/ic/isp_netbsd.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/reboot.h>
49
50 static uint32_t isp_pci_rd_reg(struct ispsoftc *, int);
51 static void isp_pci_wr_reg(struct ispsoftc *, int, uint32_t);
52 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
53 static uint32_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
54 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, uint32_t);
55 #endif
56 #if !defined(ISP_DISABLE_2100_SUPPORT) && \
57 !defined(ISP_DISABLE_2200_SUPPORT) && \
58 !defined(ISP_DISABLE_1020_SUPPORT) && \
59 !defined(ISP_DISABLE_1080_SUPPORT) && \
60 !defined(ISP_DISABLE_12160_SUPPORT)
61 static int
62 isp_pci_rd_isr(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
63 #endif
64 #if !(defined(ISP_DISABLE_2300_SUPPORT) && defined(ISP_DISABLE_2322_SUPPORT))
65 static int
66 isp_pci_rd_isr_2300(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
67 #endif
68 #if !defined(ISP_DISABLE_2400_SUPPORT)
69 static uint32_t isp_pci_rd_reg_2400(struct ispsoftc *, int);
70 static void isp_pci_wr_reg_2400(struct ispsoftc *, int, uint32_t);
71 static int
72 isp_pci_rd_isr_2400(struct ispsoftc *, uint32_t *, uint16_t *, uint16_t *);
73 static int isp2400_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
74 uint32_t *, uint32_t);
75 #endif
76 static int isp_pci_mbxdma(struct ispsoftc *);
77 static int isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
78 uint32_t *, uint32_t);
79 static void isp_pci_dmateardown(struct ispsoftc *, XS_T *, uint32_t);
80 static void isp_pci_reset0(struct ispsoftc *);
81 static void isp_pci_reset1(struct ispsoftc *);
82 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
83 static int isp_pci_intr(void *);
84
85 #if defined(ISP_DISABLE_1020_SUPPORT) || defined(ISP_DISABLE_FW)
86 #define ISP_1040_RISC_CODE NULL
87 #else
88 #define ISP_1040_RISC_CODE (const uint16_t *) isp_1040_risc_code
89 #include <dev/microcode/isp/asm_1040.h>
90 #endif
91
92 #if defined(ISP_DISABLE_1080_SUPPORT) || defined(ISP_DISABLE_FW)
93 #define ISP_1080_RISC_CODE NULL
94 #else
95 #define ISP_1080_RISC_CODE (const uint16_t *) isp_1080_risc_code
96 #include <dev/microcode/isp/asm_1080.h>
97 #endif
98
99 #if defined(ISP_DISABLE_12160_SUPPORT) || defined(ISP_DISABLE_FW)
100 #define ISP_12160_RISC_CODE NULL
101 #else
102 #define ISP_12160_RISC_CODE (const uint16_t *) isp_12160_risc_code
103 #include <dev/microcode/isp/asm_12160.h>
104 #endif
105
106 #if defined(ISP_DISABLE_2100_SUPPORT) || defined(ISP_DISABLE_FW)
107 #define ISP_2100_RISC_CODE NULL
108 #else
109 #define ISP_2100_RISC_CODE (const uint16_t *) isp_2100_risc_code
110 #include <dev/microcode/isp/asm_2100.h>
111 #endif
112
113 #if defined(ISP_DISABLE_2200_SUPPORT) || defined(ISP_DISABLE_FW)
114 #define ISP_2200_RISC_CODE NULL
115 #else
116 #define ISP_2200_RISC_CODE (const uint16_t *) isp_2200_risc_code
117 #include <dev/microcode/isp/asm_2200.h>
118 #endif
119
120 #if defined(ISP_DISABLE_2300_SUPPORT) || defined(ISP_DISABLE_FW)
121 #define ISP_2300_RISC_CODE NULL
122 #else
123 #define ISP_2300_RISC_CODE (const uint16_t *) isp_2300_risc_code
124 #include <dev/microcode/isp/asm_2300.h>
125 #endif
126 #if defined(ISP_DISABLE_2322_SUPPORT) || defined(ISP_DISABLE_FW)
127 #define ISP_2322_RISC_CODE NULL
128 #else
129 #define ISP_2322_RISC_CODE (const uint16_t *) isp_2322_risc_code
130 #include <dev/microcode/isp/asm_2322.h>
131 #endif
132
133 #if defined(ISP_DISABLE_2400_SUPPORT) || defined(ISP_DISABLE_FW)
134 #define ISP_2400_RISC_CODE NULL
135 #else
136 #define ISP_2400_RISC_CODE (const uint32_t *) isp_2400_risc_code
137 #include <dev/microcode/isp/asm_2400.h>
138 #endif
139
140 #ifndef ISP_DISABLE_1020_SUPPORT
141 static struct ispmdvec mdvec = {
142 isp_pci_rd_isr,
143 isp_pci_rd_reg,
144 isp_pci_wr_reg,
145 isp_pci_mbxdma,
146 isp_pci_dmasetup,
147 isp_pci_dmateardown,
148 isp_pci_reset0,
149 isp_pci_reset1,
150 isp_pci_dumpregs,
151 ISP_1040_RISC_CODE,
152 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
153 0
154 };
155 #endif
156
157 #ifndef ISP_DISABLE_1080_SUPPORT
158 static struct ispmdvec mdvec_1080 = {
159 isp_pci_rd_isr,
160 isp_pci_rd_reg_1080,
161 isp_pci_wr_reg_1080,
162 isp_pci_mbxdma,
163 isp_pci_dmasetup,
164 isp_pci_dmateardown,
165 isp_pci_reset0,
166 isp_pci_reset1,
167 isp_pci_dumpregs,
168 ISP_1080_RISC_CODE,
169 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
170 0
171 };
172 #endif
173
174 #ifndef ISP_DISABLE_12160_SUPPORT
175 static struct ispmdvec mdvec_12160 = {
176 isp_pci_rd_isr,
177 isp_pci_rd_reg_1080,
178 isp_pci_wr_reg_1080,
179 isp_pci_mbxdma,
180 isp_pci_dmasetup,
181 isp_pci_dmateardown,
182 isp_pci_reset0,
183 isp_pci_reset1,
184 isp_pci_dumpregs,
185 ISP_12160_RISC_CODE,
186 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
187 0
188 };
189 #endif
190
191 #ifndef ISP_DISABLE_2100_SUPPORT
192 static struct ispmdvec mdvec_2100 = {
193 isp_pci_rd_isr,
194 isp_pci_rd_reg,
195 isp_pci_wr_reg,
196 isp_pci_mbxdma,
197 isp_pci_dmasetup,
198 isp_pci_dmateardown,
199 isp_pci_reset0,
200 isp_pci_reset1,
201 isp_pci_dumpregs,
202 ISP_2100_RISC_CODE,
203 0,
204 0
205 };
206 #endif
207
208 #ifndef ISP_DISABLE_2200_SUPPORT
209 static struct ispmdvec mdvec_2200 = {
210 isp_pci_rd_isr,
211 isp_pci_rd_reg,
212 isp_pci_wr_reg,
213 isp_pci_mbxdma,
214 isp_pci_dmasetup,
215 isp_pci_dmateardown,
216 isp_pci_reset0,
217 isp_pci_reset1,
218 isp_pci_dumpregs,
219 ISP_2200_RISC_CODE,
220 0,
221 0
222 };
223 #endif
224
225 #ifndef ISP_DISABLE_2300_SUPPORT
226 static struct ispmdvec mdvec_2300 = {
227 isp_pci_rd_isr_2300,
228 isp_pci_rd_reg,
229 isp_pci_wr_reg,
230 isp_pci_mbxdma,
231 isp_pci_dmasetup,
232 isp_pci_dmateardown,
233 isp_pci_reset0,
234 isp_pci_reset1,
235 isp_pci_dumpregs,
236 ISP_2300_RISC_CODE,
237 0,
238 0
239 };
240 #endif
241
242 #ifndef ISP_DISABLE_2322_SUPPORT
243 static struct ispmdvec mdvec_2322 = {
244 isp_pci_rd_isr_2300,
245 isp_pci_rd_reg,
246 isp_pci_wr_reg,
247 isp_pci_mbxdma,
248 isp_pci_dmasetup,
249 isp_pci_dmateardown,
250 isp_pci_reset0,
251 isp_pci_reset1,
252 isp_pci_dumpregs,
253 ISP_2322_RISC_CODE,
254 0,
255 0
256 };
257 #endif
258
259 #ifndef ISP_DISABLE_2400_SUPPORT
260 static struct ispmdvec mdvec_2400 = {
261 isp_pci_rd_isr_2400,
262 isp_pci_rd_reg_2400,
263 isp_pci_wr_reg_2400,
264 isp_pci_mbxdma,
265 isp2400_pci_dmasetup,
266 isp_pci_dmateardown,
267 isp_pci_reset0,
268 isp_pci_reset1,
269 NULL,
270 ISP_2400_RISC_CODE,
271 0,
272 0
273 };
274 #endif
275
276 #ifndef PCI_VENDOR_QLOGIC
277 #define PCI_VENDOR_QLOGIC 0x1077
278 #endif
279
280 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
281 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
282 #endif
283
284 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
285 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
286 #endif
287
288 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
289 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
290 #endif
291
292 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
293 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
294 #endif
295
296 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
297 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
298 #endif
299
300 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
301 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
302 #endif
303
304 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
305 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
306 #endif
307
308 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
309 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
310 #endif
311
312 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
313 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
314 #endif
315
316 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
317 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
318 #endif
319
320 #ifndef PCI_PRODUCT_QLOGIC_ISP2322
321 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
322 #endif
323
324 #ifndef PCI_PRODUCT_QLOGIC_ISP2422
325 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
326 #endif
327
328 #ifndef PCI_PRODUCT_QLOGIC_ISP2432
329 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432
330 #endif
331
332 #ifndef PCI_PRODUCT_QLOGIC_ISP6312
333 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
334 #endif
335
336 #ifndef PCI_PRODUCT_QLOGIC_ISP6322
337 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
338 #endif
339
340
341 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
342
343 #define PCI_QLOGIC_ISP1080 \
344 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
345
346 #define PCI_QLOGIC_ISP10160 \
347 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
348
349 #define PCI_QLOGIC_ISP12160 \
350 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
351
352 #define PCI_QLOGIC_ISP1240 \
353 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
354
355 #define PCI_QLOGIC_ISP1280 \
356 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
357
358 #define PCI_QLOGIC_ISP2100 \
359 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
360
361 #define PCI_QLOGIC_ISP2200 \
362 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
363
364 #define PCI_QLOGIC_ISP2300 \
365 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
366
367 #define PCI_QLOGIC_ISP2312 \
368 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
369
370 #define PCI_QLOGIC_ISP2322 \
371 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
372
373 #define PCI_QLOGIC_ISP2422 \
374 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
375
376 #define PCI_QLOGIC_ISP2432 \
377 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
378
379 #define PCI_QLOGIC_ISP6312 \
380 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
381
382 #define PCI_QLOGIC_ISP6322 \
383 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
384
385 #define IO_MAP_REG 0x10
386 #define MEM_MAP_REG 0x14
387 #define PCIR_ROMADDR 0x30
388
389 #define PCI_DFLT_LTNCY 0x40
390 #define PCI_DFLT_LNSZ 0x10
391
392 static int isp_pci_probe(device_t, cfdata_t, void *);
393 static void isp_pci_attach(device_t, device_t, void *);
394
395 struct isp_pcisoftc {
396 struct ispsoftc pci_isp;
397 pci_chipset_tag_t pci_pc;
398 pcitag_t pci_tag;
399 bus_space_tag_t pci_st;
400 bus_space_handle_t pci_sh;
401 bus_dmamap_t *pci_xfer_dmap;
402 void * pci_ih;
403 int16_t pci_poff[_NREG_BLKS];
404 };
405
406 CFATTACH_DECL(isp_pci, sizeof (struct isp_pcisoftc),
407 isp_pci_probe, isp_pci_attach, NULL, NULL);
408
409 static int
410 isp_pci_probe(device_t parent, cfdata_t match, void *aux)
411 {
412 struct pci_attach_args *pa = aux;
413 switch (pa->pa_id) {
414 #ifndef ISP_DISABLE_1020_SUPPORT
415 case PCI_QLOGIC_ISP:
416 return (1);
417 #endif
418 #ifndef ISP_DISABLE_1080_SUPPORT
419 case PCI_QLOGIC_ISP1080:
420 case PCI_QLOGIC_ISP1240:
421 case PCI_QLOGIC_ISP1280:
422 return (1);
423 #endif
424 #ifndef ISP_DISABLE_12160_SUPPORT
425 case PCI_QLOGIC_ISP10160:
426 case PCI_QLOGIC_ISP12160:
427 return (1);
428 #endif
429 #ifndef ISP_DISABLE_2100_SUPPORT
430 case PCI_QLOGIC_ISP2100:
431 return (1);
432 #endif
433 #ifndef ISP_DISABLE_2200_SUPPORT
434 case PCI_QLOGIC_ISP2200:
435 return (1);
436 #endif
437 #ifndef ISP_DISABLE_2300_SUPPORT
438 case PCI_QLOGIC_ISP2300:
439 case PCI_QLOGIC_ISP2312:
440 case PCI_QLOGIC_ISP6312:
441 #endif
442 #ifndef ISP_DISABLE_2322_SUPPORT
443 case PCI_QLOGIC_ISP2322:
444 case PCI_QLOGIC_ISP6322:
445 return (1);
446 #endif
447 #ifndef ISP_DISABLE_2400_SUPPORT
448 case PCI_QLOGIC_ISP2422:
449 case PCI_QLOGIC_ISP2432:
450 return (1);
451 #endif
452 default:
453 return (0);
454 }
455 }
456
457 static void
458 isp_pci_attach(device_t parent, device_t self, void *aux)
459 {
460 static const char nomem[] = "\n%s: no mem for sdparam table\n";
461 uint32_t data, rev, linesz = PCI_DFLT_LNSZ;
462 struct pci_attach_args *pa = aux;
463 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
464 struct ispsoftc *isp = &pcs->pci_isp;
465 bus_space_tag_t st, iot, memt;
466 bus_space_handle_t sh, ioh, memh;
467 pci_intr_handle_t ih;
468 pcireg_t mem_type;
469 const char *dstring;
470 const char *intrstr;
471 int ioh_valid, memh_valid;
472 size_t mamt;
473
474 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
475 PCI_MAPREG_TYPE_IO, 0,
476 &iot, &ioh, NULL, NULL) == 0);
477
478 mem_type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MEM_MAP_REG);
479 if (PCI_MAPREG_TYPE(mem_type) != PCI_MAPREG_TYPE_MEM) {
480 memh_valid = 0;
481 } else if (PCI_MAPREG_MEM_TYPE(mem_type) != PCI_MAPREG_MEM_TYPE_32BIT &&
482 PCI_MAPREG_MEM_TYPE(mem_type) != PCI_MAPREG_MEM_TYPE_64BIT) {
483 memh_valid = 0;
484 } else {
485 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG, mem_type, 0,
486 &memt, &memh, NULL, NULL) == 0);
487 }
488 if (memh_valid) {
489 st = memt;
490 sh = memh;
491 } else if (ioh_valid) {
492 st = iot;
493 sh = ioh;
494 } else {
495 printf(": unable to map device registers\n");
496 return;
497 }
498 dstring = "\n";
499
500 isp->isp_nchan = 1;
501 mamt = 0;
502
503 pcs->pci_st = st;
504 pcs->pci_sh = sh;
505 pcs->pci_pc = pa->pa_pc;
506 pcs->pci_tag = pa->pa_tag;
507 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
508 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
509 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
510 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
511 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
512 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
513
514
515 #ifndef ISP_DISABLE_1020_SUPPORT
516 if (pa->pa_id == PCI_QLOGIC_ISP) {
517 dstring = ": QLogic 1020 Fast Wide SCSI HBA\n";
518 isp->isp_mdvec = &mdvec;
519 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
520 mamt = sizeof (sdparam);
521 }
522 #endif
523 #ifndef ISP_DISABLE_1080_SUPPORT
524 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
525 dstring = ": QLogic 1080 Ultra-2 Wide SCSI HBA\n";
526 isp->isp_mdvec = &mdvec_1080;
527 isp->isp_type = ISP_HA_SCSI_1080;
528 mamt = sizeof (sdparam);
529 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
530 ISP1080_DMA_REGS_OFF;
531 }
532 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
533 dstring = ": QLogic Dual Channel Ultra Wide SCSI HBA\n";
534 isp->isp_mdvec = &mdvec_1080;
535 isp->isp_type = ISP_HA_SCSI_1240;
536 isp->isp_nchan++;
537 mamt = sizeof (sdparam) * 2;
538 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
539 ISP1080_DMA_REGS_OFF;
540 }
541 if (pa->pa_id == PCI_QLOGIC_ISP1280) {
542 dstring = ": QLogic Dual Channel Ultra-2 Wide SCSI HBA\n";
543 isp->isp_mdvec = &mdvec_1080;
544 isp->isp_type = ISP_HA_SCSI_1280;
545 isp->isp_nchan++;
546 mamt = sizeof (sdparam) * 2;
547 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
548 ISP1080_DMA_REGS_OFF;
549 }
550 #endif
551 #ifndef ISP_DISABLE_12160_SUPPORT
552 if (pa->pa_id == PCI_QLOGIC_ISP10160) {
553 dstring = ": QLogic Ultra-3 Wide SCSI HBA\n";
554 isp->isp_mdvec = &mdvec_12160;
555 isp->isp_type = ISP_HA_SCSI_10160;
556 mamt = sizeof (sdparam);
557 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
558 ISP1080_DMA_REGS_OFF;
559 }
560 if (pa->pa_id == PCI_QLOGIC_ISP12160) {
561 dstring = ": QLogic Dual Channel Ultra-3 Wide SCSI HBA\n";
562 isp->isp_mdvec = &mdvec_12160;
563 isp->isp_type = ISP_HA_SCSI_12160;
564 isp->isp_nchan++;
565 mamt = sizeof (sdparam) * 2;
566 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
567 ISP1080_DMA_REGS_OFF;
568 }
569 #endif
570 #ifndef ISP_DISABLE_2100_SUPPORT
571 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
572 dstring = ": QLogic FC-AL HBA\n";
573 isp->isp_mdvec = &mdvec_2100;
574 isp->isp_type = ISP_HA_FC_2100;
575 mamt = sizeof (fcparam);
576 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
577 PCI_MBOX_REGS2100_OFF;
578 if (rev < 3) {
579 /*
580 * XXX: Need to get the actual revision
581 * XXX: number of the 2100 FB. At any rate,
582 * XXX: lower cache line size for early revision
583 * XXX; boards.
584 */
585 linesz = 1;
586 }
587 }
588 #endif
589 #ifndef ISP_DISABLE_2200_SUPPORT
590 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
591 dstring = ": QLogic FC-AL and Fabric HBA\n";
592 isp->isp_mdvec = &mdvec_2200;
593 isp->isp_type = ISP_HA_FC_2200;
594 mamt = sizeof (fcparam);
595 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
596 PCI_MBOX_REGS2100_OFF;
597 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
598 }
599 #endif
600 #ifndef ISP_DISABLE_2300_SUPPORT
601 if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
602 pa->pa_id == PCI_QLOGIC_ISP2312 ||
603 pa->pa_id == PCI_QLOGIC_ISP6312) {
604 isp->isp_mdvec = &mdvec_2300;
605 if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
606 pa->pa_id == PCI_QLOGIC_ISP6312) {
607 dstring = ": QLogic FC-AL and 2Gbps Fabric HBA\n";
608 isp->isp_type = ISP_HA_FC_2300;
609 } else {
610 dstring =
611 ": QLogic Dual Port FC-AL and 2Gbps Fabric HBA\n";
612 isp->isp_port = pa->pa_function;
613 }
614 isp->isp_type = ISP_HA_FC_2312;
615 mamt = sizeof (fcparam);
616 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
617 PCI_MBOX_REGS2300_OFF;
618 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
619 }
620 #endif
621 #ifndef ISP_DISABLE_2322_SUPPORT
622 if (pa->pa_id == PCI_QLOGIC_ISP2322 ||
623 pa->pa_id == PCI_QLOGIC_ISP6322) {
624 isp->isp_mdvec = &mdvec_2322;
625 dstring = ": QLogic FC-AL and 2Gbps Fabric PCI-E HBA\n";
626 isp->isp_type = ISP_HA_FC_2322;
627 isp->isp_port = pa->pa_function;
628 mamt = sizeof (fcparam);
629 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
630 PCI_MBOX_REGS2300_OFF;
631 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
632 }
633 #endif
634 #ifndef ISP_DISABLE_2400_SUPPORT
635 if (pa->pa_id == PCI_QLOGIC_ISP2422 ||
636 pa->pa_id == PCI_QLOGIC_ISP2432) {
637 isp->isp_mdvec = &mdvec_2400;
638 if (pa->pa_id == PCI_QLOGIC_ISP2422) {
639 dstring = ": QLogic FC-AL and 4Gbps Fabric PCI-X HBA\n";
640 } else {
641 dstring = ": QLogic FC-AL and 4Gbps Fabric PCI-E HBA\n";
642 }
643 isp->isp_type = ISP_HA_FC_2400;
644 mamt = sizeof (fcparam);
645 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
646 PCI_MBOX_REGS2400_OFF;
647 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
648 }
649 #endif
650 if (mamt == 0) {
651 return;
652 }
653
654 isp->isp_param = malloc(mamt, M_DEVBUF, M_NOWAIT);
655 if (isp->isp_param == NULL) {
656 printf(nomem, device_xname(&isp->isp_osinfo.dev));
657 return;
658 }
659 memset(isp->isp_param, 0, mamt);
660 mamt = sizeof (struct scsipi_channel) * isp->isp_nchan;
661 isp->isp_osinfo.chan = malloc(mamt, M_DEVBUF, M_NOWAIT);
662 if (isp->isp_osinfo.chan == NULL) {
663 free(isp->isp_param, M_DEVBUF);
664 printf(nomem, device_xname(&isp->isp_osinfo.dev));
665 return;
666 }
667 memset(isp->isp_osinfo.chan, 0, mamt);
668 isp->isp_osinfo.adapter.adapt_nchannels = isp->isp_nchan;
669
670 /*
671 * Set up logging levels.
672 */
673 #ifdef ISP_LOGDEFAULT
674 isp->isp_dblev = ISP_LOGDEFAULT;
675 #else
676 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
677 if (bootverbose)
678 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
679 #ifdef SCSIDEBUG
680 isp->isp_dblev |= ISP_LOGDEBUG0|ISP_LOGDEBUG1|ISP_LOGDEBUG2;
681 #endif
682 #endif
683 if (isp->isp_dblev & ISP_LOGCONFIG) {
684 printf("\n");
685 } else {
686 printf(dstring);
687 }
688
689 isp->isp_dmatag = pa->pa_dmat;
690 isp->isp_revision = rev;
691
692 /*
693 * Make sure that command register set sanely.
694 */
695 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
696 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
697
698 /*
699 * Not so sure about these- but I think it's important that they get
700 * enabled......
701 */
702 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
703 if (IS_2300(isp)) { /* per QLogic errata */
704 data &= ~PCI_COMMAND_INVALIDATE_ENABLE;
705 }
706 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
707
708 /*
709 * Make sure that the latency timer, cache line size,
710 * and ROM is disabled.
711 */
712 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
713 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
714 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
715 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
716 data |= (linesz << PCI_CACHELINE_SHIFT);
717 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
718
719 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
720 data &= ~1;
721 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
722
723 if (pci_intr_map(pa, &ih)) {
724 aprint_error_dev(&isp->isp_osinfo.dev, "couldn't map interrupt\n");
725 free(isp->isp_param, M_DEVBUF);
726 free(isp->isp_osinfo.chan, M_DEVBUF);
727 return;
728 }
729 intrstr = pci_intr_string(pa->pa_pc, ih);
730 if (intrstr == NULL)
731 intrstr = "<I dunno>";
732 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
733 isp_pci_intr, isp);
734 if (pcs->pci_ih == NULL) {
735 aprint_error_dev(&isp->isp_osinfo.dev, "couldn't establish interrupt at %s\n",
736 intrstr);
737 free(isp->isp_param, M_DEVBUF);
738 free(isp->isp_osinfo.chan, M_DEVBUF);
739 return;
740 }
741
742 printf("%s: interrupting at %s\n", device_xname(&isp->isp_osinfo.dev), intrstr);
743
744 isp->isp_confopts = self->dv_cfdata->cf_flags;
745 ISP_LOCK(isp);
746 isp_reset(isp);
747 if (isp->isp_state != ISP_RESETSTATE) {
748 ISP_UNLOCK(isp);
749 free(isp->isp_param, M_DEVBUF);
750 free(isp->isp_osinfo.chan, M_DEVBUF);
751 return;
752 }
753 isp_init(isp);
754 if (isp->isp_state != ISP_INITSTATE) {
755 isp_uninit(isp);
756 ISP_UNLOCK(isp);
757 free(isp->isp_param, M_DEVBUF);
758 free(isp->isp_osinfo.chan, M_DEVBUF);
759 return;
760 }
761 /*
762 * Do platform attach.
763 */
764 ISP_UNLOCK(isp);
765 isp_attach(isp);
766 }
767
768 #define IspVirt2Off(a, x) \
769 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
770 _BLK_REG_SHFT] + ((x) & 0xff))
771
772 #define BXR2(pcs, off) \
773 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
774 #define BXW2(pcs, off, v) \
775 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
776 #define BXR4(pcs, off) \
777 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off)
778 #define BXW4(pcs, off, v) \
779 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v)
780
781
782 static int
783 isp_pci_rd_debounced(struct ispsoftc *isp, int off, uint16_t *rp)
784 {
785 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
786 uint16_t val0, val1;
787 int i = 0;
788
789 do {
790 val0 = BXR2(pcs, IspVirt2Off(isp, off));
791 val1 = BXR2(pcs, IspVirt2Off(isp, off));
792 } while (val0 != val1 && ++i < 1000);
793 if (val0 != val1) {
794 return (1);
795 }
796 *rp = val0;
797 return (0);
798 }
799
800 #if !defined(ISP_DISABLE_2100_SUPPORT) && \
801 !defined(ISP_DISABLE_2200_SUPPORT) && \
802 !defined(ISP_DISABLE_1020_SUPPORT) && \
803 !defined(ISP_DISABLE_1080_SUPPORT) && \
804 !defined(ISP_DISABLE_12160_SUPPORT)
805 static int
806 isp_pci_rd_isr(struct ispsoftc *isp, uint32_t *isrp,
807 uint16_t *semap, uint16_t *mbp)
808 {
809 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
810 uint16_t isr, sema;
811
812 if (IS_2100(isp)) {
813 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
814 return (0);
815 }
816 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
817 return (0);
818 }
819 } else {
820 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
821 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
822 }
823 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
824 isr &= INT_PENDING_MASK(isp);
825 sema &= BIU_SEMA_LOCK;
826 if (isr == 0 && sema == 0) {
827 return (0);
828 }
829 *isrp = isr;
830 if ((*semap = sema) != 0) {
831 if (IS_2100(isp)) {
832 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
833 return (0);
834 }
835 } else {
836 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
837 }
838 }
839 return (1);
840 }
841 #endif
842
843 #if !(defined(ISP_DISABLE_2300_SUPPORT) || defined(ISP_DISABLE_2322_SUPPORT))
844 static int
845 isp_pci_rd_isr_2300(struct ispsoftc *isp, uint32_t *isrp,
846 uint16_t *semap, uint16_t *mbox0p)
847 {
848 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
849 uint32_t r2hisr;
850
851 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT)) {
852 *isrp = 0;
853 return (0);
854 }
855 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
856 IspVirt2Off(pcs, BIU_R2HSTSLO));
857 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
858 if ((r2hisr & BIU_R2HST_INTR) == 0) {
859 *isrp = 0;
860 return (0);
861 }
862 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
863 case ISPR2HST_ROM_MBX_OK:
864 case ISPR2HST_ROM_MBX_FAIL:
865 case ISPR2HST_MBX_OK:
866 case ISPR2HST_MBX_FAIL:
867 case ISPR2HST_ASYNC_EVENT:
868 *isrp = r2hisr & 0xffff;
869 *mbox0p = (r2hisr >> 16);
870 *semap = 1;
871 return (1);
872 case ISPR2HST_RIO_16:
873 *isrp = r2hisr & 0xffff;
874 *mbox0p = ASYNC_RIO1;
875 *semap = 1;
876 return (1);
877 case ISPR2HST_FPOST:
878 *isrp = r2hisr & 0xffff;
879 *mbox0p = ASYNC_CMD_CMPLT;
880 *semap = 1;
881 return (1);
882 case ISPR2HST_FPOST_CTIO:
883 *isrp = r2hisr & 0xffff;
884 *mbox0p = ASYNC_CTIO_DONE;
885 *semap = 1;
886 return (1);
887 case ISPR2HST_RSPQ_UPDATE:
888 *isrp = r2hisr & 0xffff;
889 *mbox0p = 0;
890 *semap = 0;
891 return (1);
892 default:
893 return (0);
894 }
895 }
896 #endif
897
898 #ifndef ISP_DISABLE_2400_SUPPORT
899 static int
900 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp,
901 uint16_t *semap, uint16_t *mbox0p)
902 {
903 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
904 uint32_t r2hisr;
905
906 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO));
907 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
908 if ((r2hisr & BIU2400_R2HST_INTR) == 0) {
909 *isrp = 0;
910 return (0);
911 }
912 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) {
913 case ISP2400R2HST_ROM_MBX_OK:
914 case ISP2400R2HST_ROM_MBX_FAIL:
915 case ISP2400R2HST_MBX_OK:
916 case ISP2400R2HST_MBX_FAIL:
917 case ISP2400R2HST_ASYNC_EVENT:
918 *isrp = r2hisr & 0xffff;
919 *mbox0p = (r2hisr >> 16);
920 *semap = 1;
921 return (1);
922 case ISP2400R2HST_RSPQ_UPDATE:
923 case ISP2400R2HST_ATIO_RSPQ_UPDATE:
924 case ISP2400R2HST_ATIO_RQST_UPDATE:
925 *isrp = r2hisr & 0xffff;
926 *mbox0p = 0;
927 *semap = 0;
928 return (1);
929 default:
930 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
931 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
932 return (0);
933 }
934 }
935
936 static uint32_t
937 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
938 {
939 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
940 uint32_t rv;
941 int block = regoff & _BLK_REG_MASK;
942
943 switch (block) {
944 case BIU_BLOCK:
945 break;
946 case MBOX_BLOCK:
947 return (BXR2(pcs, IspVirt2Off(pcs, regoff)));
948 case SXP_BLOCK:
949 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
950 return (0xffffffff);
951 case RISC_BLOCK:
952 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
953 return (0xffffffff);
954 case DMA_BLOCK:
955 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
956 return (0xffffffff);
957 default:
958 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
959 return (0xffffffff);
960 }
961
962
963 switch (regoff) {
964 case BIU2400_FLASH_ADDR:
965 case BIU2400_FLASH_DATA:
966 case BIU2400_ICR:
967 case BIU2400_ISR:
968 case BIU2400_CSR:
969 case BIU2400_REQINP:
970 case BIU2400_REQOUTP:
971 case BIU2400_RSPINP:
972 case BIU2400_RSPOUTP:
973 case BIU2400_PRI_REQINP:
974 case BIU2400_PRI_REQOUTP:
975 case BIU2400_ATIO_RSPINP:
976 case BIU2400_ATIO_RSPOUTP:
977 case BIU2400_HCCR:
978 case BIU2400_GPIOD:
979 case BIU2400_GPIOE:
980 case BIU2400_HSEMA:
981 rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
982 break;
983 case BIU2400_R2HSTSLO:
984 rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
985 break;
986 case BIU2400_R2HSTSHI:
987 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16;
988 break;
989 default:
990 isp_prt(isp, ISP_LOGERR,
991 "isp_pci_rd_reg_2400: unknown offset %x", regoff);
992 rv = 0xffffffff;
993 break;
994 }
995 return (rv);
996 }
997
998 static void
999 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1000 {
1001 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1002 int block = regoff & _BLK_REG_MASK;
1003 volatile int junk;
1004
1005 switch (block) {
1006 case BIU_BLOCK:
1007 break;
1008 case MBOX_BLOCK:
1009 BXW2(pcs, IspVirt2Off(pcs, regoff), val);
1010 junk = BXR2(pcs, IspVirt2Off(pcs, regoff));
1011 return;
1012 case SXP_BLOCK:
1013 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1014 return;
1015 case RISC_BLOCK:
1016 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1017 return;
1018 case DMA_BLOCK:
1019 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1020 return;
1021 default:
1022 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1023 regoff);
1024 break;
1025 }
1026
1027 switch (regoff) {
1028 case BIU2400_FLASH_ADDR:
1029 case BIU2400_FLASH_DATA:
1030 case BIU2400_ICR:
1031 case BIU2400_ISR:
1032 case BIU2400_CSR:
1033 case BIU2400_REQINP:
1034 case BIU2400_REQOUTP:
1035 case BIU2400_RSPINP:
1036 case BIU2400_RSPOUTP:
1037 case BIU2400_PRI_REQINP:
1038 case BIU2400_PRI_REQOUTP:
1039 case BIU2400_ATIO_RSPINP:
1040 case BIU2400_ATIO_RSPOUTP:
1041 case BIU2400_HCCR:
1042 case BIU2400_GPIOD:
1043 case BIU2400_GPIOE:
1044 case BIU2400_HSEMA:
1045 BXW4(pcs, IspVirt2Off(pcs, regoff), val);
1046 junk = BXR4(pcs, IspVirt2Off(pcs, regoff));
1047 break;
1048 default:
1049 isp_prt(isp, ISP_LOGERR,
1050 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1051 break;
1052 }
1053 }
1054 #endif
1055
1056 static uint32_t
1057 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
1058 {
1059 uint32_t rv;
1060 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1061 int oldconf = 0;
1062
1063 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1064 /*
1065 * We will assume that someone has paused the RISC processor.
1066 */
1067 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1068 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1069 oldconf | BIU_PCI_CONF1_SXP);
1070 }
1071 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1072 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1073 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1074 }
1075 return (rv);
1076 }
1077
1078 static void
1079 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, uint32_t val)
1080 {
1081 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1082 int oldconf = 0;
1083
1084 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1085 /*
1086 * We will assume that someone has paused the RISC processor.
1087 */
1088 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1089 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1090 oldconf | BIU_PCI_CONF1_SXP);
1091 }
1092 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1093 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1094 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1095 }
1096 }
1097
1098 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
1099 static uint32_t
1100 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
1101 {
1102 uint16_t rv, oc = 0;
1103 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1104
1105 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1106 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1107 uint16_t tc;
1108 /*
1109 * We will assume that someone has paused the RISC processor.
1110 */
1111 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1112 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1113 if (regoff & SXP_BANK1_SELECT)
1114 tc |= BIU_PCI1080_CONF1_SXP1;
1115 else
1116 tc |= BIU_PCI1080_CONF1_SXP0;
1117 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1118 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1119 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1120 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1121 oc | BIU_PCI1080_CONF1_DMA);
1122 }
1123 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1124 if (oc) {
1125 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1126 }
1127 return (rv);
1128 }
1129
1130 static void
1131 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, uint32_t val)
1132 {
1133 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1134 int oc = 0;
1135
1136 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1137 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1138 uint16_t tc;
1139 /*
1140 * We will assume that someone has paused the RISC processor.
1141 */
1142 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1143 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1144 if (regoff & SXP_BANK1_SELECT)
1145 tc |= BIU_PCI1080_CONF1_SXP1;
1146 else
1147 tc |= BIU_PCI1080_CONF1_SXP0;
1148 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1149 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1150 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1151 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1152 oc | BIU_PCI1080_CONF1_DMA);
1153 }
1154 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1155 if (oc) {
1156 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1157 }
1158 }
1159 #endif
1160
1161 static int
1162 isp_pci_mbxdma(struct ispsoftc *isp)
1163 {
1164 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1165 bus_dma_tag_t dmat = isp->isp_dmatag;
1166 bus_dma_segment_t sg;
1167 bus_size_t len, dbound;
1168 fcparam *fcp;
1169 int rs, i;
1170
1171 if (isp->isp_rquest_dma) /* been here before? */
1172 return (0);
1173
1174 if (isp->isp_type <= ISP_HA_SCSI_1040B) {
1175 dbound = 1 << 24;
1176 } else {
1177 /*
1178 * For 32-bit PCI DMA, the range is 32 bits or zero :-)
1179 */
1180 dbound = 0;
1181 }
1182 len = isp->isp_maxcmds * sizeof (XS_T *);
1183 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
1184 if (isp->isp_xflist == NULL) {
1185 isp_prt(isp, ISP_LOGERR, "cannot malloc xflist array");
1186 return (1);
1187 }
1188 memset(isp->isp_xflist, 0, len);
1189 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
1190 pcs->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1191 if (pcs->pci_xfer_dmap == NULL) {
1192 free(isp->isp_xflist, M_DEVBUF);
1193 isp->isp_xflist = NULL;
1194 isp_prt(isp, ISP_LOGERR, "cannot malloc DMA map array");
1195 return (1);
1196 }
1197 for (i = 0; i < isp->isp_maxcmds; i++) {
1198 if (bus_dmamap_create(dmat, MAXPHYS, (MAXPHYS / PAGE_SIZE) + 1,
1199 MAXPHYS, dbound, BUS_DMA_NOWAIT, &pcs->pci_xfer_dmap[i])) {
1200 isp_prt(isp, ISP_LOGERR, "cannot create DMA maps");
1201 break;
1202 }
1203 }
1204 if (i < isp->isp_maxcmds) {
1205 while (--i >= 0) {
1206 bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
1207 }
1208 free(isp->isp_xflist, M_DEVBUF);
1209 free(pcs->pci_xfer_dmap, M_DEVBUF);
1210 isp->isp_xflist = NULL;
1211 pcs->pci_xfer_dmap = NULL;
1212 return (1);
1213 }
1214
1215 /*
1216 * Allocate and map the request queue.
1217 */
1218 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1219 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs, 0)) {
1220 goto dmafail;
1221 }
1222 if (bus_dmamem_map(isp->isp_dmatag, &sg, rs, len,
1223 (void *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
1224 goto dmafail;
1225 }
1226 if (bus_dmamap_create(dmat, len, 1, len, dbound, BUS_DMA_NOWAIT,
1227 &isp->isp_rqdmap)) {
1228 goto dmafail;
1229 }
1230 if (bus_dmamap_load(dmat, isp->isp_rqdmap, isp->isp_rquest, len, NULL,
1231 BUS_DMA_NOWAIT)) {
1232 goto dmafail;
1233 }
1234 isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
1235
1236 /*
1237 * Allocate and map the result queue.
1238 */
1239 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1240 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
1241 BUS_DMA_NOWAIT)) {
1242 goto dmafail;
1243 }
1244 if (bus_dmamem_map(dmat, &sg, rs, len,
1245 (void *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
1246 goto dmafail;
1247 }
1248 if (bus_dmamap_create(dmat, len, 1, len, dbound, BUS_DMA_NOWAIT,
1249 &isp->isp_rsdmap)) {
1250 goto dmafail;
1251 }
1252 if (bus_dmamap_load(dmat, isp->isp_rsdmap, isp->isp_result, len, NULL,
1253 BUS_DMA_NOWAIT)) {
1254 goto dmafail;
1255 }
1256 isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
1257
1258 if (IS_SCSI(isp)) {
1259 return (0);
1260 }
1261
1262 /*
1263 * Allocate and map an FC scratch area
1264 */
1265 fcp = isp->isp_param;
1266 len = ISP_FC_SCRLEN;
1267 if (bus_dmamem_alloc(dmat, len, sizeof (uint64_t), 0, &sg, 1, &rs,
1268 BUS_DMA_NOWAIT)) {
1269 goto dmafail;
1270 }
1271 if (bus_dmamem_map(dmat, &sg, rs, len,
1272 (void *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
1273 goto dmafail;
1274 }
1275 if (bus_dmamap_create(dmat, len, 1, len, dbound, BUS_DMA_NOWAIT,
1276 &isp->isp_scdmap)) {
1277 goto dmafail;
1278 }
1279 if (bus_dmamap_load(dmat, isp->isp_scdmap, fcp->isp_scratch, len, NULL,
1280 BUS_DMA_NOWAIT)) {
1281 goto dmafail;
1282 }
1283 fcp->isp_scdma = isp->isp_scdmap->dm_segs[0].ds_addr;
1284 return (0);
1285 dmafail:
1286 isp_prt(isp, ISP_LOGERR, "mailbox DMA setup failure");
1287 for (i = 0; i < isp->isp_maxcmds; i++) {
1288 bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
1289 }
1290 free(isp->isp_xflist, M_DEVBUF);
1291 free(pcs->pci_xfer_dmap, M_DEVBUF);
1292 isp->isp_xflist = NULL;
1293 pcs->pci_xfer_dmap = NULL;
1294 return (1);
1295 }
1296
1297 static int
1298 isp_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, ispreq_t *rq,
1299 uint32_t *nxtip, uint32_t optr)
1300 {
1301 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1302 bus_dmamap_t dmap;
1303 uint32_t starti = isp->isp_reqidx, nxti = *nxtip;
1304 ispreq_t *qep;
1305 int segcnt, seg, error, ovseg, seglim, drq;
1306
1307 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
1308 dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
1309 if (xs->datalen == 0) {
1310 rq->req_seg_count = 1;
1311 goto mbxsync;
1312 }
1313 if (xs->xs_control & XS_CTL_DATA_IN) {
1314 drq = REQFLAG_DATA_IN;
1315 } else {
1316 drq = REQFLAG_DATA_OUT;
1317 }
1318
1319 if (IS_FC(isp)) {
1320 seglim = ISP_RQDSEG_T2;
1321 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
1322 ((ispreqt2_t *)rq)->req_flags |= drq;
1323 } else {
1324 rq->req_flags |= drq;
1325 if (XS_CDBLEN(xs) > 12) {
1326 seglim = 0;
1327 } else {
1328 seglim = ISP_RQDSEG;
1329 }
1330 }
1331 error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
1332 NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
1333 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
1334 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1335 if (error) {
1336 isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
1337 XS_SETERR(xs, HBA_BOTCH);
1338 if (error == EAGAIN || error == ENOMEM)
1339 return (CMD_EAGAIN);
1340 else
1341 return (CMD_COMPLETE);
1342 }
1343
1344 segcnt = dmap->dm_nsegs;
1345
1346 isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
1347 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
1348 "write from", xs->data, segcnt);
1349
1350 for (seg = 0, rq->req_seg_count = 0;
1351 seglim && seg < segcnt && rq->req_seg_count < seglim;
1352 seg++, rq->req_seg_count++) {
1353 if (IS_FC(isp)) {
1354 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1355 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1356 dmap->dm_segs[seg].ds_len;
1357 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1358 dmap->dm_segs[seg].ds_addr;
1359 } else {
1360 rq->req_dataseg[rq->req_seg_count].ds_count =
1361 dmap->dm_segs[seg].ds_len;
1362 rq->req_dataseg[rq->req_seg_count].ds_base =
1363 dmap->dm_segs[seg].ds_addr;
1364 }
1365 isp_prt(isp, ISP_LOGDEBUG2, "seg0.[%d]={0x%lx,%lu}",
1366 rq->req_seg_count, (long) dmap->dm_segs[seg].ds_addr,
1367 (unsigned long) dmap->dm_segs[seg].ds_len);
1368 }
1369
1370 if (seg == segcnt) {
1371 goto dmasync;
1372 }
1373
1374 do {
1375 uint32_t onxti;
1376 ispcontreq_t *crq, *cqe, local;
1377
1378 crq = &local;
1379
1380 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1381 onxti = nxti;
1382 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1383 if (nxti == optr) {
1384 isp_prt(isp, ISP_LOGERR, "Request Queue Overflow++");
1385 bus_dmamap_unload(isp->isp_dmatag, dmap);
1386 XS_SETERR(xs, HBA_BOTCH);
1387 return (CMD_EAGAIN);
1388 }
1389 rq->req_header.rqs_entry_count++;
1390 memset((void *)crq, 0, sizeof (*crq));
1391 crq->req_header.rqs_entry_count = 1;
1392 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1393
1394 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
1395 rq->req_seg_count++, seg++, ovseg++) {
1396 crq->req_dataseg[ovseg].ds_count =
1397 dmap->dm_segs[seg].ds_len;
1398 crq->req_dataseg[ovseg].ds_base =
1399 dmap->dm_segs[seg].ds_addr;
1400 isp_prt(isp, ISP_LOGDEBUG2, "seg%d.[%d]={0x%lx,%lu}",
1401 rq->req_header.rqs_entry_count - 1,
1402 rq->req_seg_count, (long)dmap->dm_segs[seg].ds_addr,
1403 (unsigned long) dmap->dm_segs[seg].ds_len);
1404 }
1405 isp_put_cont_req(isp, crq, cqe);
1406 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1407 } while (seg < segcnt);
1408
1409
1410 dmasync:
1411 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1412 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
1413 BUS_DMASYNC_PREWRITE);
1414
1415 mbxsync:
1416 switch (rq->req_header.rqs_entry_type) {
1417 case RQSTYPE_REQUEST:
1418 isp_put_request(isp, rq, qep);
1419 break;
1420 case RQSTYPE_CMDONLY:
1421 isp_put_extended_request(isp, (ispextreq_t *)rq,
1422 (ispextreq_t *)qep);
1423 break;
1424 case RQSTYPE_T2RQS:
1425 if (ISP_CAP_2KLOGIN(isp)) {
1426 isp_put_request_t2e(isp,
1427 (ispreqt2e_t *) rq, (ispreqt2e_t *) qep);
1428 } else {
1429 isp_put_request_t2(isp,
1430 (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1431 }
1432 break;
1433 }
1434 *nxtip = nxti;
1435 return (CMD_QUEUED);
1436 }
1437
1438
1439 #if !defined(ISP_DISABLE_2400_SUPPORT)
1440 static int
1441 isp2400_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs,
1442 ispreq_t *ispreq, uint32_t *nxtip, uint32_t optr)
1443 {
1444 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1445 bus_dmamap_t dmap;
1446 bus_dma_segment_t *dm_segs, *eseg;
1447 uint32_t starti = isp->isp_reqidx, nxti = *nxtip;
1448 ispreqt7_t *rq;
1449 void *qep;
1450 int nseg, datalen, error, seglim;
1451
1452 rq = (ispreqt7_t *) ispreq;
1453 qep = ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
1454 dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
1455 if (xs->datalen == 0) {
1456 rq->req_seg_count = 1;
1457 goto mbxsync;
1458 }
1459
1460 error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
1461 NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
1462 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
1463 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1464 if (error) {
1465 isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
1466 XS_SETERR(xs, HBA_BOTCH);
1467 if (error == EAGAIN || error == ENOMEM) {
1468 return (CMD_EAGAIN);
1469 } else {
1470 return (CMD_COMPLETE);
1471 }
1472 }
1473
1474 nseg = dmap->dm_nsegs;
1475 dm_segs = dmap->dm_segs;
1476
1477 isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
1478 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
1479 "write from", xs->data, nseg);
1480
1481 /*
1482 * We're passed an initial partially filled in entry that
1483 * has most fields filled in except for data transfer
1484 * related values.
1485 *
1486 * Our job is to fill in the initial request queue entry and
1487 * then to start allocating and filling in continuation entries
1488 * until we've covered the entire transfer.
1489 */
1490 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS;
1491 rq->req_dl = xs->datalen;
1492 datalen = xs->datalen;
1493 if (xs->xs_control & XS_CTL_DATA_IN) {
1494 rq->req_alen_datadir = 0x2;
1495 } else {
1496 rq->req_alen_datadir = 0x1;
1497 }
1498
1499 eseg = dm_segs + nseg;
1500
1501 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr);
1502 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr);
1503 rq->req_dataseg.ds_count = dm_segs->ds_len;
1504
1505 datalen -= dm_segs->ds_len;
1506
1507 dm_segs++;
1508 rq->req_seg_count++;
1509
1510 while (datalen > 0 && dm_segs != eseg) {
1511 uint32_t onxti;
1512 ispcontreq64_t local, *crq = &local, *cqe;
1513
1514 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1515 onxti = nxti;
1516 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1517 if (nxti == optr) {
1518 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1519 return (CMD_EAGAIN);
1520 }
1521 rq->req_header.rqs_entry_count++;
1522 MEMZERO((void *)crq, sizeof (*crq));
1523 crq->req_header.rqs_entry_count = 1;
1524 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1525
1526 seglim = 0;
1527 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
1528 crq->req_dataseg[seglim].ds_base =
1529 DMA_LO32(dm_segs->ds_addr);
1530 crq->req_dataseg[seglim].ds_basehi =
1531 DMA_HI32(dm_segs->ds_addr);
1532 crq->req_dataseg[seglim].ds_count =
1533 dm_segs->ds_len;
1534 rq->req_seg_count++;
1535 dm_segs++;
1536 seglim++;
1537 datalen -= dm_segs->ds_len;
1538 }
1539 if (isp->isp_dblev & ISP_LOGDEBUG1) {
1540 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
1541 }
1542 isp_put_cont64_req(isp, crq, cqe);
1543 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1544 }
1545 *nxtip = nxti;
1546
1547
1548 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1549 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
1550 BUS_DMASYNC_PREWRITE);
1551
1552 mbxsync:
1553 isp_put_request_t7(isp, rq, qep);
1554 *nxtip = nxti;
1555 return (CMD_QUEUED);
1556 }
1557 #endif
1558
1559 static int
1560 isp_pci_intr(void *arg)
1561 {
1562 uint32_t isr;
1563 uint16_t sema, mbox;
1564 struct ispsoftc *isp = arg;
1565
1566 isp->isp_intcnt++;
1567 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1568 isp->isp_intbogus++;
1569 return (0);
1570 } else {
1571 isp->isp_osinfo.onintstack = 1;
1572 isp_intr(isp, isr, sema, mbox);
1573 isp->isp_osinfo.onintstack = 0;
1574 return (1);
1575 }
1576 }
1577
1578 static void
1579 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, uint32_t handle)
1580 {
1581 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1582 bus_dmamap_t dmap = pcs->pci_xfer_dmap[isp_handle_index(handle)];
1583 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1584 xs->xs_control & XS_CTL_DATA_IN ?
1585 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1586 bus_dmamap_unload(isp->isp_dmatag, dmap);
1587 }
1588
1589 static void
1590 isp_pci_reset0(ispsoftc_t *isp)
1591 {
1592 ISP_DISABLE_INTS(isp);
1593 }
1594
1595 static void
1596 isp_pci_reset1(ispsoftc_t *isp)
1597 {
1598 if (!IS_24XX(isp)) {
1599 /* Make sure the BIOS is disabled */
1600 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1601 }
1602 /* and enable interrupts */
1603 ISP_ENABLE_INTS(isp);
1604 }
1605
1606 static void
1607 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1608 {
1609 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1610 if (msg)
1611 printf("%s: %s\n", device_xname(&isp->isp_osinfo.dev), msg);
1612 if (IS_SCSI(isp))
1613 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1614 else
1615 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1616 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1617 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1618 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1619
1620
1621 if (IS_SCSI(isp)) {
1622 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1623 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1624 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1625 ISP_READ(isp, CDMA_FIFO_STS));
1626 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1627 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1628 ISP_READ(isp, DDMA_FIFO_STS));
1629 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1630 ISP_READ(isp, SXP_INTERRUPT),
1631 ISP_READ(isp, SXP_GROSS_ERR),
1632 ISP_READ(isp, SXP_PINS_CTRL));
1633 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1634 }
1635 printf(" mbox regs: %x %x %x %x %x\n",
1636 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1637 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1638 ISP_READ(isp, OUTMAILBOX4));
1639 printf(" PCI Status Command/Status=%x\n",
1640 pci_conf_read(pcs->pci_pc, pcs->pci_tag, PCI_COMMAND_STATUS_REG));
1641 }
1642