isp_pci.c revision 1.85 1 /* $NetBSD: isp_pci.c,v 1.85 2002/10/02 16:51:40 thorpej Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
32 */
33 /*
34 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
35 * All rights reserved.
36 *
37 * Additional Copyright (C) 2000, 2001 by Matthew Jacob
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: isp_pci.c,v 1.85 2002/10/02 16:51:40 thorpej Exp $");
61
62 #include <dev/ic/isp_netbsd.h>
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcidevs.h>
66 #include <uvm/uvm_extern.h>
67 #include <sys/reboot.h>
68
69 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
70 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
71 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
72 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
73 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
74 #endif
75 static int
76 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
77 static int
78 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
79 static int isp_pci_mbxdma(struct ispsoftc *);
80 static int isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
81 u_int16_t *, u_int16_t);
82 static void isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
83 static void isp_pci_reset1(struct ispsoftc *);
84 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
85 static int isp_pci_intr(void *);
86
87 #if defined(ISP_DISABLE_1020_SUPPORT)
88 #define ISP_1040_RISC_CODE NULL
89 #else
90 #define ISP_1040_RISC_CODE (u_int16_t *) isp_1040_risc_code
91 #include <dev/microcode/isp/asm_1040.h>
92 #endif
93
94 #if defined(ISP_DISABLE_1080_SUPPORT)
95 #define ISP_1080_RISC_CODE NULL
96 #else
97 #define ISP_1080_RISC_CODE (u_int16_t *) isp_1080_risc_code
98 #include <dev/microcode/isp/asm_1080.h>
99 #endif
100
101 #if defined(ISP_DISABLE_12160_SUPPORT)
102 #define ISP_12160_RISC_CODE NULL
103 #else
104 #define ISP_12160_RISC_CODE (u_int16_t *) isp_12160_risc_code
105 #include <dev/microcode/isp/asm_12160.h>
106 #endif
107
108 #if defined(ISP_DISABLE_2100_SUPPORT)
109 #define ISP_2100_RISC_CODE NULL
110 #else
111 #define ISP_2100_RISC_CODE (u_int16_t *) isp_2100_risc_code
112 #include <dev/microcode/isp/asm_2100.h>
113 #endif
114
115 #if defined(ISP_DISABLE_2200_SUPPORT)
116 #define ISP_2200_RISC_CODE NULL
117 #else
118 #define ISP_2200_RISC_CODE (u_int16_t *) isp_2200_risc_code
119 #include <dev/microcode/isp/asm_2200.h>
120 #endif
121
122 #if defined(ISP_DISABLE_2300_SUPPORT)
123 #define ISP_2300_RISC_CODE NULL
124 #else
125 #define ISP_2300_RISC_CODE (u_int16_t *) isp_2300_risc_code
126 #include <dev/microcode/isp/asm_2300.h>
127 #endif
128
129 #ifndef ISP_DISABLE_1020_SUPPORT
130 static struct ispmdvec mdvec = {
131 isp_pci_rd_isr,
132 isp_pci_rd_reg,
133 isp_pci_wr_reg,
134 isp_pci_mbxdma,
135 isp_pci_dmasetup,
136 isp_pci_dmateardown,
137 NULL,
138 isp_pci_reset1,
139 isp_pci_dumpregs,
140 ISP_1040_RISC_CODE,
141 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
142 };
143 #endif
144
145 #ifndef ISP_DISABLE_1080_SUPPORT
146 static struct ispmdvec mdvec_1080 = {
147 isp_pci_rd_isr,
148 isp_pci_rd_reg_1080,
149 isp_pci_wr_reg_1080,
150 isp_pci_mbxdma,
151 isp_pci_dmasetup,
152 isp_pci_dmateardown,
153 NULL,
154 isp_pci_reset1,
155 isp_pci_dumpregs,
156 ISP_1080_RISC_CODE,
157 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
158 };
159 #endif
160
161 #ifndef ISP_DISABLE_12160_SUPPORT
162 static struct ispmdvec mdvec_12160 = {
163 isp_pci_rd_isr,
164 isp_pci_rd_reg_1080,
165 isp_pci_wr_reg_1080,
166 isp_pci_mbxdma,
167 isp_pci_dmasetup,
168 isp_pci_dmateardown,
169 NULL,
170 isp_pci_reset1,
171 isp_pci_dumpregs,
172 ISP_12160_RISC_CODE,
173 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
174 };
175 #endif
176
177 #ifndef ISP_DISABLE_2100_SUPPORT
178 static struct ispmdvec mdvec_2100 = {
179 isp_pci_rd_isr,
180 isp_pci_rd_reg,
181 isp_pci_wr_reg,
182 isp_pci_mbxdma,
183 isp_pci_dmasetup,
184 isp_pci_dmateardown,
185 NULL,
186 isp_pci_reset1,
187 isp_pci_dumpregs,
188 ISP_2100_RISC_CODE
189 };
190 #endif
191
192 #ifndef ISP_DISABLE_2200_SUPPORT
193 static struct ispmdvec mdvec_2200 = {
194 isp_pci_rd_isr,
195 isp_pci_rd_reg,
196 isp_pci_wr_reg,
197 isp_pci_mbxdma,
198 isp_pci_dmasetup,
199 isp_pci_dmateardown,
200 NULL,
201 isp_pci_reset1,
202 isp_pci_dumpregs,
203 ISP_2200_RISC_CODE
204 };
205 #endif
206
207 #ifndef ISP_DISABLE_2300_SUPPORT
208 static struct ispmdvec mdvec_2300 = {
209 isp_pci_rd_isr_2300,
210 isp_pci_rd_reg,
211 isp_pci_wr_reg,
212 isp_pci_mbxdma,
213 isp_pci_dmasetup,
214 isp_pci_dmateardown,
215 NULL,
216 isp_pci_reset1,
217 isp_pci_dumpregs,
218 ISP_2300_RISC_CODE
219 };
220 #endif
221
222 #ifndef PCI_VENDOR_QLOGIC
223 #define PCI_VENDOR_QLOGIC 0x1077
224 #endif
225
226 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
227 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
228 #endif
229
230 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
231 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
232 #endif
233
234 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
235 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
236 #endif
237
238 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
239 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
240 #endif
241
242 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
243 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
244 #endif
245
246 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
247 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
248 #endif
249
250 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
251 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
252 #endif
253
254 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
255 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
256 #endif
257
258 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
259 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
260 #endif
261
262 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
263
264 #define PCI_QLOGIC_ISP1080 \
265 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
266
267 #define PCI_QLOGIC_ISP1240 \
268 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
269
270 #define PCI_QLOGIC_ISP1280 \
271 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
272
273 #define PCI_QLOGIC_ISP12160 \
274 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
275
276 #define PCI_QLOGIC_ISP2100 \
277 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
278
279 #define PCI_QLOGIC_ISP2200 \
280 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
281
282 #define PCI_QLOGIC_ISP2300 \
283 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
284
285 #define PCI_QLOGIC_ISP2312 \
286 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
287
288 #define IO_MAP_REG 0x10
289 #define MEM_MAP_REG 0x14
290 #define PCIR_ROMADDR 0x30
291
292 #define PCI_DFLT_LTNCY 0x40
293 #define PCI_DFLT_LNSZ 0x10
294
295
296 static int isp_pci_probe(struct device *, struct cfdata *, void *);
297 static void isp_pci_attach(struct device *, struct device *, void *);
298
299 struct isp_pcisoftc {
300 struct ispsoftc pci_isp;
301 pci_chipset_tag_t pci_pc;
302 pcitag_t pci_tag;
303 bus_space_tag_t pci_st;
304 bus_space_handle_t pci_sh;
305 bus_dmamap_t *pci_xfer_dmap;
306 void * pci_ih;
307 int16_t pci_poff[_NREG_BLKS];
308 };
309
310 CFATTACH_DECL(isp_pci, sizeof (struct isp_pcisoftc),
311 isp_pci_probe, isp_pci_attach, NULL, NULL);
312
313 #ifdef DEBUG
314 const char vstring[] =
315 "Qlogic ISP Driver, NetBSD (pci) Platform Version %d.%d Core Version %d.%d";
316 #endif
317
318 static int
319 isp_pci_probe(struct device *parent, struct cfdata *match, void *aux)
320 {
321 struct pci_attach_args *pa = aux;
322 switch (pa->pa_id) {
323 #ifndef ISP_DISABLE_1020_SUPPORT
324 case PCI_QLOGIC_ISP:
325 return (1);
326 #endif
327 #ifndef ISP_DISABLE_1080_SUPPORT
328 case PCI_QLOGIC_ISP1080:
329 case PCI_QLOGIC_ISP1240:
330 case PCI_QLOGIC_ISP1280:
331 return (1);
332 #endif
333 #ifndef ISP_DISABLE_12160_SUPPORT
334 case PCI_QLOGIC_ISP12160:
335 return (1);
336 #endif
337 #ifndef ISP_DISABLE_2100_SUPPORT
338 case PCI_QLOGIC_ISP2100:
339 return (1);
340 #endif
341 #ifndef ISP_DISABLE_2200_SUPPORT
342 case PCI_QLOGIC_ISP2200:
343 return (1);
344 #endif
345 #ifndef ISP_DISABLE_2300_SUPPORT
346 case PCI_QLOGIC_ISP2300:
347 case PCI_QLOGIC_ISP2312:
348 return (1);
349 #endif
350 default:
351 return (0);
352 }
353 }
354
355
356 static void
357 isp_pci_attach(struct device *parent, struct device *self, void *aux)
358 {
359 #ifdef DEBUG
360 static char oneshot = 1;
361 #endif
362 static const char nomem[] = "\n%s: no mem for sdparam table\n";
363 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
364 struct pci_attach_args *pa = aux;
365 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
366 struct ispsoftc *isp = &pcs->pci_isp;
367 bus_space_tag_t st, iot, memt;
368 bus_space_handle_t sh, ioh, memh;
369 pci_intr_handle_t ih;
370 char *dstring;
371 const char *intrstr;
372 int ioh_valid, memh_valid;
373
374 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
375 PCI_MAPREG_TYPE_IO, 0,
376 &iot, &ioh, NULL, NULL) == 0);
377 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
378 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
379 &memt, &memh, NULL, NULL) == 0);
380 if (memh_valid) {
381 st = memt;
382 sh = memh;
383 } else if (ioh_valid) {
384 st = iot;
385 sh = ioh;
386 } else {
387 printf(": unable to map device registers\n");
388 return;
389 }
390 dstring = "\n";
391
392 pcs->pci_st = st;
393 pcs->pci_sh = sh;
394 pcs->pci_pc = pa->pa_pc;
395 pcs->pci_tag = pa->pa_tag;
396 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
397 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
398 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
399 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
400 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
401 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
402
403 #ifndef ISP_DISABLE_1020_SUPPORT
404 if (pa->pa_id == PCI_QLOGIC_ISP) {
405 dstring = ": QLogic 1020 Ultra Wide SCSI HBA\n";
406 isp->isp_mdvec = &mdvec;
407 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
408 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
409 if (isp->isp_param == NULL) {
410 printf(nomem, isp->isp_name);
411 return;
412 }
413 memset(isp->isp_param, 0, sizeof (sdparam));
414 }
415 #endif
416 #ifndef ISP_DISABLE_1080_SUPPORT
417 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
418 dstring = ": QLogic 1080 Ultra-2 Wide SCSI HBA\n";
419 isp->isp_mdvec = &mdvec_1080;
420 isp->isp_type = ISP_HA_SCSI_1080;
421 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
422 if (isp->isp_param == NULL) {
423 printf(nomem, isp->isp_name);
424 return;
425 }
426 memset(isp->isp_param, 0, sizeof (sdparam));
427 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
428 ISP1080_DMA_REGS_OFF;
429 }
430 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
431 dstring = ": QLogic Dual Channel Ultra Wide SCSI HBA\n";
432 isp->isp_mdvec = &mdvec_1080;
433 isp->isp_type = ISP_HA_SCSI_1240;
434 isp->isp_param =
435 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
436 if (isp->isp_param == NULL) {
437 printf(nomem, isp->isp_name);
438 return;
439 }
440 memset(isp->isp_param, 0, 2 * sizeof (sdparam));
441 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
442 ISP1080_DMA_REGS_OFF;
443 }
444 if (pa->pa_id == PCI_QLOGIC_ISP1280) {
445 dstring = ": QLogic Dual Channel Ultra-2 Wide SCSI HBA\n";
446 isp->isp_mdvec = &mdvec_1080;
447 isp->isp_type = ISP_HA_SCSI_1280;
448 isp->isp_param =
449 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
450 if (isp->isp_param == NULL) {
451 printf(nomem, isp->isp_name);
452 return;
453 }
454 memset(isp->isp_param, 0, 2 * sizeof (sdparam));
455 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
456 ISP1080_DMA_REGS_OFF;
457 }
458 #endif
459 #ifndef ISP_DISABLE_12160_SUPPORT
460 if (pa->pa_id == PCI_QLOGIC_ISP12160) {
461 dstring = ": QLogic Dual Channel Ultra-3 Wide SCSI HBA\n";
462 isp->isp_mdvec = &mdvec_12160;
463 isp->isp_type = ISP_HA_SCSI_12160;
464 isp->isp_param =
465 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
466 if (isp->isp_param == NULL) {
467 printf(nomem, isp->isp_name);
468 return;
469 }
470 memset(isp->isp_param, 0, 2 * sizeof (sdparam));
471 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
472 ISP1080_DMA_REGS_OFF;
473 }
474 #endif
475 #ifndef ISP_DISABLE_2100_SUPPORT
476 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
477 dstring = ": QLogic FC-AL HBA\n";
478 isp->isp_mdvec = &mdvec_2100;
479 isp->isp_type = ISP_HA_FC_2100;
480 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
481 if (isp->isp_param == NULL) {
482 printf(nomem, isp->isp_name);
483 return;
484 }
485 memset(isp->isp_param, 0, sizeof (fcparam));
486 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
487 PCI_MBOX_REGS2100_OFF;
488 if (rev < 3) {
489 /*
490 * XXX: Need to get the actual revision
491 * XXX: number of the 2100 FB. At any rate,
492 * XXX: lower cache line size for early revision
493 * XXX; boards.
494 */
495 linesz = 1;
496 }
497 }
498 #endif
499 #ifndef ISP_DISABLE_2200_SUPPORT
500 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
501 dstring = ": QLogic FC-AL and Fabric HBA\n";
502 isp->isp_mdvec = &mdvec_2200;
503 isp->isp_type = ISP_HA_FC_2200;
504 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
505 if (isp->isp_param == NULL) {
506 printf(nomem, isp->isp_name);
507 return;
508 }
509 memset(isp->isp_param, 0, sizeof (fcparam));
510 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
511 PCI_MBOX_REGS2100_OFF;
512 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
513 }
514 #endif
515 #ifndef ISP_DISABLE_2300_SUPPORT
516 if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
517 pa->pa_id == PCI_QLOGIC_ISP2312) {
518 isp->isp_mdvec = &mdvec_2300;
519 if (pa->pa_id == PCI_QLOGIC_ISP2300) {
520 dstring = ": QLogic FC-AL and 2Gbps Fabric HBA\n";
521 isp->isp_type = ISP_HA_FC_2300;
522 } else {
523 dstring =
524 ": QLogic Dual Port FC-AL and 2Gbps Fabric HBA\n";
525 isp->isp_type = ISP_HA_FC_2312;
526 isp->isp_port = pa->pa_function;
527 }
528 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
529 if (isp->isp_param == NULL) {
530 printf(nomem, isp->isp_name);
531 return;
532 }
533 memset(isp->isp_param, 0, sizeof (fcparam));
534 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
535 PCI_MBOX_REGS2300_OFF;
536 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
537 }
538 #endif
539 /*
540 * Set up logging levels.
541 */
542 #ifdef ISP_LOGDEFAULT
543 isp->isp_dblev = ISP_LOGDEFAULT;
544 #else
545 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
546 if (bootverbose)
547 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
548 #ifdef SCSIDEBUG
549 isp->isp_dblev |= ISP_LOGDEBUG0|ISP_LOGDEBUG1|ISP_LOGDEBUG2;
550 #endif
551 #endif
552 if (isp->isp_dblev & ISP_LOGCONFIG) {
553 printf("\n");
554 } else {
555 printf(dstring);
556 }
557
558 #ifdef DEBUG
559 if (oneshot) {
560 oneshot = 0;
561 isp_prt(isp, ISP_LOGCONFIG, vstring,
562 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
563 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
564 }
565 #endif
566
567 isp->isp_dmatag = pa->pa_dmat;
568 isp->isp_revision = rev;
569
570 /*
571 * Make sure that command register set sanely.
572 */
573 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
574 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
575
576 /*
577 * Not so sure about these- but I think it's important that they get
578 * enabled......
579 */
580 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
581 if (IS_2300(isp)) { /* per QLogic errata */
582 data &= ~PCI_COMMAND_INVALIDATE_ENABLE;
583 }
584 if (IS_23XX(isp)) {
585 isp->isp_touched = 1;
586 }
587 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
588
589 /*
590 * Make sure that the latency timer, cache line size,
591 * and ROM is disabled.
592 */
593 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
594 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
595 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
596 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
597 data |= (linesz << PCI_CACHELINE_SHIFT);
598 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
599
600 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
601 data &= ~1;
602 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
603
604 if (pci_intr_map(pa, &ih)) {
605 printf("%s: couldn't map interrupt\n", isp->isp_name);
606 free(isp->isp_param, M_DEVBUF);
607 return;
608 }
609 intrstr = pci_intr_string(pa->pa_pc, ih);
610 if (intrstr == NULL)
611 intrstr = "<I dunno>";
612 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
613 isp_pci_intr, isp);
614 if (pcs->pci_ih == NULL) {
615 printf("%s: couldn't establish interrupt at %s\n",
616 isp->isp_name, intrstr);
617 free(isp->isp_param, M_DEVBUF);
618 return;
619 }
620
621 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
622
623 if (IS_FC(isp)) {
624 DEFAULT_NODEWWN(isp) = 0x400000007F000002;
625 DEFAULT_PORTWWN(isp) = 0x400000007F000002;
626 }
627
628 isp->isp_confopts = self->dv_cfdata->cf_flags;
629 isp->isp_role = ISP_DEFAULT_ROLES;
630 ISP_LOCK(isp);
631 isp->isp_osinfo.no_mbox_ints = 1;
632 isp_reset(isp);
633 if (isp->isp_state != ISP_RESETSTATE) {
634 ISP_UNLOCK(isp);
635 free(isp->isp_param, M_DEVBUF);
636 return;
637 }
638 ENABLE_INTS(isp);
639 isp_init(isp);
640 if (isp->isp_state != ISP_INITSTATE) {
641 isp_uninit(isp);
642 ISP_UNLOCK(isp);
643 free(isp->isp_param, M_DEVBUF);
644 return;
645 }
646 /*
647 * Do platform attach.
648 */
649 ISP_UNLOCK(isp);
650 isp_attach(isp);
651 if (isp->isp_state != ISP_RUNSTATE) {
652 ISP_LOCK(isp);
653 isp_uninit(isp);
654 free(isp->isp_param, M_DEVBUF);
655 ISP_UNLOCK(isp);
656 }
657 }
658
659 #define IspVirt2Off(a, x) \
660 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
661 _BLK_REG_SHFT] + ((x) & 0xff))
662
663 #define BXR2(pcs, off) \
664 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
665 #define BXW2(pcs, off, v) \
666 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
667
668
669 static INLINE int
670 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
671 {
672 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
673 u_int16_t val0, val1;
674 int i = 0;
675
676 do {
677 val0 = BXR2(pcs, IspVirt2Off(isp, off));
678 val1 = BXR2(pcs, IspVirt2Off(isp, off));
679 } while (val0 != val1 && ++i < 1000);
680 if (val0 != val1) {
681 return (1);
682 }
683 *rp = val0;
684 return (0);
685 }
686
687 static int
688 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
689 u_int16_t *semap, u_int16_t *mbp)
690 {
691 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
692 u_int16_t isr, sema;
693
694 if (IS_2100(isp)) {
695 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
696 return (0);
697 }
698 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
699 return (0);
700 }
701 } else {
702 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
703 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
704 }
705 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
706 isr &= INT_PENDING_MASK(isp);
707 sema &= BIU_SEMA_LOCK;
708 if (isr == 0 && sema == 0) {
709 return (0);
710 }
711 *isrp = isr;
712 if ((*semap = sema) != 0) {
713 if (IS_2100(isp)) {
714 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
715 return (0);
716 }
717 } else {
718 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
719 }
720 }
721 return (1);
722 }
723
724 #ifndef ISP_DISABLE_2300_SUPPORT
725 static int
726 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
727 u_int16_t *semap, u_int16_t *mbox0p)
728 {
729 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
730 u_int32_t r2hisr;
731
732 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT)) {
733 *isrp = 0;
734 return (0);
735 }
736 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
737 IspVirt2Off(pcs, BIU_R2HSTSLO));
738 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
739 if ((r2hisr & BIU_R2HST_INTR) == 0) {
740 *isrp = 0;
741 return (0);
742 }
743 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
744 case ISPR2HST_ROM_MBX_OK:
745 case ISPR2HST_ROM_MBX_FAIL:
746 case ISPR2HST_MBX_OK:
747 case ISPR2HST_MBX_FAIL:
748 case ISPR2HST_ASYNC_EVENT:
749 *isrp = r2hisr & 0xffff;
750 *mbox0p = (r2hisr >> 16);
751 *semap = 1;
752 return (1);
753 case ISPR2HST_RIO_16:
754 *isrp = r2hisr & 0xffff;
755 *mbox0p = ASYNC_RIO1;
756 *semap = 1;
757 return (1);
758 case ISPR2HST_FPOST:
759 *isrp = r2hisr & 0xffff;
760 *mbox0p = ASYNC_CMD_CMPLT;
761 *semap = 1;
762 return (1);
763 case ISPR2HST_FPOST_CTIO:
764 *isrp = r2hisr & 0xffff;
765 *mbox0p = ASYNC_CTIO_DONE;
766 *semap = 1;
767 return (1);
768 case ISPR2HST_RSPQ_UPDATE:
769 *isrp = r2hisr & 0xffff;
770 *mbox0p = 0;
771 *semap = 0;
772 return (1);
773 default:
774 return (0);
775 }
776 }
777 #endif
778
779 static u_int16_t
780 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
781 {
782 u_int16_t rv;
783 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
784 int oldconf = 0;
785
786 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
787 /*
788 * We will assume that someone has paused the RISC processor.
789 */
790 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
791 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
792 oldconf | BIU_PCI_CONF1_SXP);
793 }
794 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
795 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
796 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
797 }
798 return (rv);
799 }
800
801 static void
802 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
803 {
804 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
805 int oldconf = 0;
806
807 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
808 /*
809 * We will assume that someone has paused the RISC processor.
810 */
811 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
812 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
813 oldconf | BIU_PCI_CONF1_SXP);
814 }
815 BXW2(pcs, IspVirt2Off(isp, regoff), val);
816 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
817 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
818 }
819 }
820
821 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
822 static u_int16_t
823 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
824 {
825 u_int16_t rv, oc = 0;
826 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
827
828 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
829 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
830 u_int16_t tc;
831 /*
832 * We will assume that someone has paused the RISC processor.
833 */
834 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
835 tc = oc & ~BIU_PCI1080_CONF1_DMA;
836 if (regoff & SXP_BANK1_SELECT)
837 tc |= BIU_PCI1080_CONF1_SXP1;
838 else
839 tc |= BIU_PCI1080_CONF1_SXP0;
840 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
841 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
842 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
843 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
844 oc | BIU_PCI1080_CONF1_DMA);
845 }
846 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
847 if (oc) {
848 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
849 }
850 return (rv);
851 }
852
853 static void
854 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
855 {
856 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
857 int oc = 0;
858
859 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
860 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
861 u_int16_t tc;
862 /*
863 * We will assume that someone has paused the RISC processor.
864 */
865 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
866 tc = oc & ~BIU_PCI1080_CONF1_DMA;
867 if (regoff & SXP_BANK1_SELECT)
868 tc |= BIU_PCI1080_CONF1_SXP1;
869 else
870 tc |= BIU_PCI1080_CONF1_SXP0;
871 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
872 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
873 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
874 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
875 oc | BIU_PCI1080_CONF1_DMA);
876 }
877 BXW2(pcs, IspVirt2Off(isp, regoff), val);
878 if (oc) {
879 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
880 }
881 }
882 #endif
883
884 static int
885 isp_pci_mbxdma(struct ispsoftc *isp)
886 {
887 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
888 bus_dma_tag_t dmat = isp->isp_dmatag;
889 bus_dma_segment_t sg;
890 bus_size_t len;
891 fcparam *fcp;
892 int rs, i;
893
894 if (isp->isp_rquest_dma) /* been here before? */
895 return (0);
896
897 len = isp->isp_maxcmds * sizeof (XS_T *);
898 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
899 if (isp->isp_xflist == NULL) {
900 isp_prt(isp, ISP_LOGERR, "cannot malloc xflist array");
901 return (1);
902 }
903 memset(isp->isp_xflist, 0, len);
904 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
905 pcs->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
906 if (pcs->pci_xfer_dmap == NULL) {
907 free(isp->isp_xflist, M_DEVBUF);
908 isp->isp_xflist = NULL;
909 isp_prt(isp, ISP_LOGERR, "cannot malloc dma map array");
910 return (1);
911 }
912 for (i = 0; i < isp->isp_maxcmds; i++) {
913 if (bus_dmamap_create(dmat, MAXPHYS, (MAXPHYS / PAGE_SIZE) + 1,
914 MAXPHYS, 0, BUS_DMA_NOWAIT, &pcs->pci_xfer_dmap[i])) {
915 isp_prt(isp, ISP_LOGERR, "cannot create dma maps");
916 break;
917 }
918 }
919 if (i < isp->isp_maxcmds) {
920 while (--i >= 0) {
921 bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
922 }
923 free(isp->isp_xflist, M_DEVBUF);
924 free(pcs->pci_xfer_dmap, M_DEVBUF);
925 isp->isp_xflist = NULL;
926 pcs->pci_xfer_dmap = NULL;
927 return (1);
928 }
929
930 /*
931 * Allocate and map the request queue.
932 */
933 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
934 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
935 BUS_DMA_NOWAIT) ||
936 bus_dmamem_map(isp->isp_dmatag, &sg, rs, len,
937 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
938 goto dmafail;
939 }
940
941 if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
942 &isp->isp_rqdmap) || bus_dmamap_load(dmat, isp->isp_rqdmap,
943 (caddr_t)isp->isp_rquest, len, NULL,
944 BUS_DMA_NOWAIT)) {
945 goto dmafail;
946 }
947 isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
948
949 /*
950 * Allocate and map the result queue.
951 */
952 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
953 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
954 BUS_DMA_NOWAIT) ||
955 bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&isp->isp_result,
956 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
957 goto dmafail;
958 }
959 if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
960 &isp->isp_rsdmap) || bus_dmamap_load(isp->isp_dmatag,
961 isp->isp_rsdmap, (caddr_t)isp->isp_result, len, NULL,
962 BUS_DMA_NOWAIT)) {
963 goto dmafail;
964 }
965 isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
966
967 if (IS_SCSI(isp)) {
968 return (0);
969 }
970
971 fcp = isp->isp_param;
972 len = ISP2100_SCRLEN;
973 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
974 BUS_DMA_NOWAIT) ||
975 bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&fcp->isp_scratch,
976 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
977 goto dmafail;
978 }
979 if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
980 &isp->isp_scdmap) || bus_dmamap_load(dmat,
981 isp->isp_scdmap, (caddr_t)fcp->isp_scratch, len, NULL,
982 BUS_DMA_NOWAIT)) {
983 goto dmafail;
984 }
985 fcp->isp_scdma = isp->isp_scdmap->dm_segs[0].ds_addr;
986 return (0);
987 dmafail:
988 isp_prt(isp, ISP_LOGERR, "mailbox dma setup failure");
989 for (i = 0; i < isp->isp_maxcmds; i++) {
990 bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
991 }
992 free(isp->isp_xflist, M_DEVBUF);
993 free(pcs->pci_xfer_dmap, M_DEVBUF);
994 isp->isp_xflist = NULL;
995 pcs->pci_xfer_dmap = NULL;
996 return (1);
997 }
998
999 static int
1000 isp_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, ispreq_t *rq,
1001 u_int16_t *nxtip, u_int16_t optr)
1002 {
1003 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1004 bus_dmamap_t dmap;
1005 u_int16_t starti = isp->isp_reqidx, nxti = *nxtip;
1006 ispreq_t *qep;
1007 int segcnt, seg, error, ovseg, seglim, drq;
1008
1009 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
1010 dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
1011 if (xs->datalen == 0) {
1012 rq->req_seg_count = 1;
1013 goto mbxsync;
1014 }
1015 if (xs->xs_control & XS_CTL_DATA_IN) {
1016 drq = REQFLAG_DATA_IN;
1017 } else {
1018 drq = REQFLAG_DATA_OUT;
1019 }
1020
1021 if (IS_FC(isp)) {
1022 seglim = ISP_RQDSEG_T2;
1023 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
1024 ((ispreqt2_t *)rq)->req_flags |= drq;
1025 } else {
1026 rq->req_flags |= drq;
1027 if (XS_CDBLEN(xs) > 12) {
1028 seglim = 0;
1029 } else {
1030 seglim = ISP_RQDSEG;
1031 }
1032 }
1033 error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
1034 NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
1035 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
1036 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1037 if (error) {
1038 isp_prt(isp, ISP_LOGWARN, "unable to load dma (%d)", error);
1039 XS_SETERR(xs, HBA_BOTCH);
1040 if (error == EAGAIN || error == ENOMEM)
1041 return (CMD_EAGAIN);
1042 else
1043 return (CMD_COMPLETE);
1044 }
1045
1046 segcnt = dmap->dm_nsegs;
1047
1048 isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
1049 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
1050 "write from", xs->data, segcnt);
1051
1052 for (seg = 0, rq->req_seg_count = 0;
1053 seglim && seg < segcnt && rq->req_seg_count < seglim;
1054 seg++, rq->req_seg_count++) {
1055 if (IS_FC(isp)) {
1056 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1057 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1058 dmap->dm_segs[seg].ds_len;
1059 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1060 dmap->dm_segs[seg].ds_addr;
1061 } else {
1062 rq->req_dataseg[rq->req_seg_count].ds_count =
1063 dmap->dm_segs[seg].ds_len;
1064 rq->req_dataseg[rq->req_seg_count].ds_base =
1065 dmap->dm_segs[seg].ds_addr;
1066 }
1067 isp_prt(isp, ISP_LOGDEBUG2, "seg0.[%d]={0x%lx,%lu}",
1068 rq->req_seg_count, (long) dmap->dm_segs[seg].ds_addr,
1069 (unsigned long) dmap->dm_segs[seg].ds_len);
1070 }
1071
1072 if (seg == segcnt) {
1073 goto dmasync;
1074 }
1075
1076 do {
1077 u_int16_t onxti;
1078 ispcontreq_t *crq, *cqe, local;
1079
1080 crq = &local;
1081
1082 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1083 onxti = nxti;
1084 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1085 if (nxti == optr) {
1086 isp_prt(isp, /* ISP_LOGDEBUG0 */ ISP_LOGERR, "Request Queue Overflow++");
1087 bus_dmamap_unload(isp->isp_dmatag, dmap);
1088 XS_SETERR(xs, HBA_BOTCH);
1089 return (CMD_EAGAIN);
1090 }
1091 rq->req_header.rqs_entry_count++;
1092 memset((void *)crq, 0, sizeof (*crq));
1093 crq->req_header.rqs_entry_count = 1;
1094 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1095
1096 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
1097 rq->req_seg_count++, seg++, ovseg++) {
1098 crq->req_dataseg[ovseg].ds_count =
1099 dmap->dm_segs[seg].ds_len;
1100 crq->req_dataseg[ovseg].ds_base =
1101 dmap->dm_segs[seg].ds_addr;
1102 isp_prt(isp, ISP_LOGDEBUG2, "seg%d.[%d]={0x%lx,%lu}",
1103 rq->req_header.rqs_entry_count - 1,
1104 rq->req_seg_count, (long)dmap->dm_segs[seg].ds_addr,
1105 (unsigned long) dmap->dm_segs[seg].ds_len);
1106 }
1107 isp_put_cont_req(isp, crq, cqe);
1108 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1109 } while (seg < segcnt);
1110
1111
1112 dmasync:
1113 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1114 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
1115 BUS_DMASYNC_PREWRITE);
1116
1117 mbxsync:
1118 switch (rq->req_header.rqs_entry_type) {
1119 case RQSTYPE_REQUEST:
1120 isp_put_request(isp, rq, qep);
1121 break;
1122 case RQSTYPE_CMDONLY:
1123 isp_put_extended_request(isp, (ispextreq_t *)rq,
1124 (ispextreq_t *)qep);
1125 break;
1126 case RQSTYPE_T2RQS:
1127 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1128 break;
1129 }
1130 *nxtip = nxti;
1131 return (CMD_QUEUED);
1132 }
1133
1134 static int
1135 isp_pci_intr(void *arg)
1136 {
1137 u_int16_t isr, sema, mbox;
1138 struct ispsoftc *isp = arg;
1139
1140 isp->isp_intcnt++;
1141 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1142 isp->isp_intbogus++;
1143 return (0);
1144 } else {
1145 isp->isp_osinfo.onintstack = 1;
1146 isp_intr(isp, isr, sema, mbox);
1147 isp->isp_osinfo.onintstack = 0;
1148 return (1);
1149 }
1150 }
1151
1152 static void
1153 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1154 {
1155 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1156 bus_dmamap_t dmap = pcs->pci_xfer_dmap[isp_handle_index(handle)];
1157 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1158 xs->xs_control & XS_CTL_DATA_IN ?
1159 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1160 bus_dmamap_unload(isp->isp_dmatag, dmap);
1161 }
1162
1163 static void
1164 isp_pci_reset1(struct ispsoftc *isp)
1165 {
1166 /* Make sure the BIOS is disabled */
1167 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1168 if (isp->isp_osinfo.no_mbox_ints == 0) {
1169 ENABLE_INTS(isp);
1170 }
1171
1172 }
1173
1174 static void
1175 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1176 {
1177 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1178 if (msg)
1179 printf("%s: %s\n", isp->isp_name, msg);
1180 if (IS_SCSI(isp))
1181 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1182 else
1183 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1184 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1185 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1186 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1187
1188
1189 if (IS_SCSI(isp)) {
1190 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1191 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1192 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1193 ISP_READ(isp, CDMA_FIFO_STS));
1194 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1195 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1196 ISP_READ(isp, DDMA_FIFO_STS));
1197 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1198 ISP_READ(isp, SXP_INTERRUPT),
1199 ISP_READ(isp, SXP_GROSS_ERR),
1200 ISP_READ(isp, SXP_PINS_CTRL));
1201 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1202 }
1203 printf(" mbox regs: %x %x %x %x %x\n",
1204 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1205 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1206 ISP_READ(isp, OUTMAILBOX4));
1207 printf(" PCI Status Command/Status=%x\n",
1208 pci_conf_read(pcs->pci_pc, pcs->pci_tag, PCI_COMMAND_STATUS_REG));
1209 }
1210