isp_pci.c revision 1.82 1 /* $NetBSD: isp_pci.c,v 1.82 2002/08/12 21:33:41 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/isp_inline.h
7 * sys/dev/ic/isp_netbsd.c
8 * sys/dev/ic/isp_netbsd.h
9 * sys/dev/ic/isp_target.c
10 * sys/dev/ic/isp_target.h
11 * sys/dev/ic/isp_tpublic.h
12 * sys/dev/ic/ispmbox.h
13 * sys/dev/ic/ispreg.h
14 * sys/dev/ic/ispvar.h
15 * sys/microcode/isp/asm_sbus.h
16 * sys/microcode/isp/asm_1040.h
17 * sys/microcode/isp/asm_1080.h
18 * sys/microcode/isp/asm_12160.h
19 * sys/microcode/isp/asm_2100.h
20 * sys/microcode/isp/asm_2200.h
21 * sys/pci/isp_pci.c
22 * sys/sbus/isp_sbus.c
23 *
24 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
25 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26 * Linux versions. This tends to be an interesting maintenance problem.
27 *
28 * Please coordinate with Matthew Jacob on changes you wish to make here.
29 */
30 /*
31 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
32 */
33 /*
34 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
35 * All rights reserved.
36 *
37 * Additional Copyright (C) 2000, 2001 by Matthew Jacob
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(0, "$NetBSD: isp_pci.c,v 1.82 2002/08/12 21:33:41 mjacob Exp $");
61
62 #include <dev/ic/isp_netbsd.h>
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcidevs.h>
66 #include <uvm/uvm_extern.h>
67 #include <sys/reboot.h>
68
69 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
70 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
71 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
72 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
73 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
74 #endif
75 static int
76 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
77 static int
78 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
79 static int isp_pci_mbxdma(struct ispsoftc *);
80 static int isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
81 u_int16_t *, u_int16_t);
82 static void isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
83 static void isp_pci_reset1(struct ispsoftc *);
84 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
85 static int isp_pci_intr(void *);
86
87 #if defined(ISP_DISABLE_1020_SUPPORT)
88 #define ISP_1040_RISC_CODE NULL
89 #else
90 #define ISP_1040_RISC_CODE (u_int16_t *) isp_1040_risc_code
91 #include <dev/microcode/isp/asm_1040.h>
92 #endif
93
94 #if defined(ISP_DISABLE_1080_SUPPORT)
95 #define ISP_1080_RISC_CODE NULL
96 #else
97 #define ISP_1080_RISC_CODE (u_int16_t *) isp_1080_risc_code
98 #include <dev/microcode/isp/asm_1080.h>
99 #endif
100
101 #if defined(ISP_DISABLE_12160_SUPPORT)
102 #define ISP_12160_RISC_CODE NULL
103 #else
104 #define ISP_12160_RISC_CODE (u_int16_t *) isp_12160_risc_code
105 #include <dev/microcode/isp/asm_12160.h>
106 #endif
107
108 #if defined(ISP_DISABLE_2100_SUPPORT)
109 #define ISP_2100_RISC_CODE NULL
110 #else
111 #define ISP_2100_RISC_CODE (u_int16_t *) isp_2100_risc_code
112 #include <dev/microcode/isp/asm_2100.h>
113 #endif
114
115 #if defined(ISP_DISABLE_2200_SUPPORT)
116 #define ISP_2200_RISC_CODE NULL
117 #else
118 #define ISP_2200_RISC_CODE (u_int16_t *) isp_2200_risc_code
119 #include <dev/microcode/isp/asm_2200.h>
120 #endif
121
122 #if defined(ISP_DISABLE_2300_SUPPORT)
123 #define ISP_2300_RISC_CODE NULL
124 #else
125 #define ISP_2300_RISC_CODE (u_int16_t *) isp_2300_risc_code
126 #include <dev/microcode/isp/asm_2300.h>
127 #endif
128
129 #ifndef ISP_DISABLE_1020_SUPPORT
130 static struct ispmdvec mdvec = {
131 isp_pci_rd_isr,
132 isp_pci_rd_reg,
133 isp_pci_wr_reg,
134 isp_pci_mbxdma,
135 isp_pci_dmasetup,
136 isp_pci_dmateardown,
137 NULL,
138 isp_pci_reset1,
139 isp_pci_dumpregs,
140 ISP_1040_RISC_CODE,
141 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
142 };
143 #endif
144
145 #ifndef ISP_DISABLE_1080_SUPPORT
146 static struct ispmdvec mdvec_1080 = {
147 isp_pci_rd_isr,
148 isp_pci_rd_reg_1080,
149 isp_pci_wr_reg_1080,
150 isp_pci_mbxdma,
151 isp_pci_dmasetup,
152 isp_pci_dmateardown,
153 NULL,
154 isp_pci_reset1,
155 isp_pci_dumpregs,
156 ISP_1080_RISC_CODE,
157 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
158 };
159 #endif
160
161 #ifndef ISP_DISABLE_12160_SUPPORT
162 static struct ispmdvec mdvec_12160 = {
163 isp_pci_rd_isr,
164 isp_pci_rd_reg_1080,
165 isp_pci_wr_reg_1080,
166 isp_pci_mbxdma,
167 isp_pci_dmasetup,
168 isp_pci_dmateardown,
169 NULL,
170 isp_pci_reset1,
171 isp_pci_dumpregs,
172 ISP_12160_RISC_CODE,
173 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
174 };
175 #endif
176
177 #ifndef ISP_DISABLE_2100_SUPPORT
178 static struct ispmdvec mdvec_2100 = {
179 isp_pci_rd_isr,
180 isp_pci_rd_reg,
181 isp_pci_wr_reg,
182 isp_pci_mbxdma,
183 isp_pci_dmasetup,
184 isp_pci_dmateardown,
185 NULL,
186 isp_pci_reset1,
187 isp_pci_dumpregs,
188 ISP_2100_RISC_CODE
189 };
190 #endif
191
192 #ifndef ISP_DISABLE_2200_SUPPORT
193 static struct ispmdvec mdvec_2200 = {
194 isp_pci_rd_isr,
195 isp_pci_rd_reg,
196 isp_pci_wr_reg,
197 isp_pci_mbxdma,
198 isp_pci_dmasetup,
199 isp_pci_dmateardown,
200 NULL,
201 isp_pci_reset1,
202 isp_pci_dumpregs,
203 ISP_2200_RISC_CODE
204 };
205 #endif
206
207 #ifndef ISP_DISABLE_2300_SUPPORT
208 static struct ispmdvec mdvec_2300 = {
209 isp_pci_rd_isr_2300,
210 isp_pci_rd_reg,
211 isp_pci_wr_reg,
212 isp_pci_mbxdma,
213 isp_pci_dmasetup,
214 isp_pci_dmateardown,
215 NULL,
216 isp_pci_reset1,
217 isp_pci_dumpregs,
218 ISP_2300_RISC_CODE
219 };
220 #endif
221
222 #ifndef PCI_VENDOR_QLOGIC
223 #define PCI_VENDOR_QLOGIC 0x1077
224 #endif
225
226 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
227 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
228 #endif
229
230 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
231 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
232 #endif
233
234 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
235 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
236 #endif
237
238 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
239 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
240 #endif
241
242 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
243 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
244 #endif
245
246 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
247 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
248 #endif
249
250 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
251 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
252 #endif
253
254 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
255 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
256 #endif
257
258 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
259 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
260 #endif
261
262 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
263
264 #define PCI_QLOGIC_ISP1080 \
265 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
266
267 #define PCI_QLOGIC_ISP1240 \
268 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
269
270 #define PCI_QLOGIC_ISP1280 \
271 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
272
273 #define PCI_QLOGIC_ISP12160 \
274 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
275
276 #define PCI_QLOGIC_ISP2100 \
277 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
278
279 #define PCI_QLOGIC_ISP2200 \
280 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
281
282 #define PCI_QLOGIC_ISP2300 \
283 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
284
285 #define PCI_QLOGIC_ISP2312 \
286 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
287
288 #define IO_MAP_REG 0x10
289 #define MEM_MAP_REG 0x14
290 #define PCIR_ROMADDR 0x30
291
292 #define PCI_DFLT_LTNCY 0x40
293 #define PCI_DFLT_LNSZ 0x10
294
295
296 static int isp_pci_probe(struct device *, struct cfdata *, void *);
297 static void isp_pci_attach(struct device *, struct device *, void *);
298
299 struct isp_pcisoftc {
300 struct ispsoftc pci_isp;
301 pci_chipset_tag_t pci_pc;
302 pcitag_t pci_tag;
303 bus_space_tag_t pci_st;
304 bus_space_handle_t pci_sh;
305 bus_dmamap_t *pci_xfer_dmap;
306 void * pci_ih;
307 int16_t pci_poff[_NREG_BLKS];
308 };
309
310 struct cfattach isp_pci_ca = {
311 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
312 };
313
314 #ifdef DEBUG
315 const char vstring[] =
316 "Qlogic ISP Driver, NetBSD (pci) Platform Version %d.%d Core Version %d.%d";
317 #endif
318
319 static int
320 isp_pci_probe(struct device *parent, struct cfdata *match, void *aux)
321 {
322 struct pci_attach_args *pa = aux;
323 switch (pa->pa_id) {
324 #ifndef ISP_DISABLE_1020_SUPPORT
325 case PCI_QLOGIC_ISP:
326 return (1);
327 #endif
328 #ifndef ISP_DISABLE_1080_SUPPORT
329 case PCI_QLOGIC_ISP1080:
330 case PCI_QLOGIC_ISP1240:
331 case PCI_QLOGIC_ISP1280:
332 return (1);
333 #endif
334 #ifndef ISP_DISABLE_12160_SUPPORT
335 case PCI_QLOGIC_ISP12160:
336 return (1);
337 #endif
338 #ifndef ISP_DISABLE_2100_SUPPORT
339 case PCI_QLOGIC_ISP2100:
340 return (1);
341 #endif
342 #ifndef ISP_DISABLE_2200_SUPPORT
343 case PCI_QLOGIC_ISP2200:
344 return (1);
345 #endif
346 #ifndef ISP_DISABLE_2300_SUPPORT
347 case PCI_QLOGIC_ISP2300:
348 case PCI_QLOGIC_ISP2312:
349 return (1);
350 #endif
351 default:
352 return (0);
353 }
354 }
355
356
357 static void
358 isp_pci_attach(struct device *parent, struct device *self, void *aux)
359 {
360 #ifdef DEBUG
361 static char oneshot = 1;
362 #endif
363 static const char nomem[] = "\n%s: no mem for sdparam table\n";
364 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
365 struct pci_attach_args *pa = aux;
366 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
367 struct ispsoftc *isp = &pcs->pci_isp;
368 bus_space_tag_t st, iot, memt;
369 bus_space_handle_t sh, ioh, memh;
370 pci_intr_handle_t ih;
371 char *dstring;
372 const char *intrstr;
373 int ioh_valid, memh_valid;
374
375 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
376 PCI_MAPREG_TYPE_IO, 0,
377 &iot, &ioh, NULL, NULL) == 0);
378 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
379 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
380 &memt, &memh, NULL, NULL) == 0);
381 if (memh_valid) {
382 st = memt;
383 sh = memh;
384 } else if (ioh_valid) {
385 st = iot;
386 sh = ioh;
387 } else {
388 printf(": unable to map device registers\n");
389 return;
390 }
391 dstring = "\n";
392
393 pcs->pci_st = st;
394 pcs->pci_sh = sh;
395 pcs->pci_pc = pa->pa_pc;
396 pcs->pci_tag = pa->pa_tag;
397 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
398 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
399 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
400 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
401 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
402 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
403
404 #ifndef ISP_DISABLE_1020_SUPPORT
405 if (pa->pa_id == PCI_QLOGIC_ISP) {
406 dstring = ": QLogic 1020 Ultra Wide SCSI HBA\n";
407 isp->isp_mdvec = &mdvec;
408 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
409 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
410 if (isp->isp_param == NULL) {
411 printf(nomem, isp->isp_name);
412 return;
413 }
414 memset(isp->isp_param, 0, sizeof (sdparam));
415 }
416 #endif
417 #ifndef ISP_DISABLE_1080_SUPPORT
418 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
419 dstring = ": QLogic 1080 Ultra-2 Wide SCSI HBA\n";
420 isp->isp_mdvec = &mdvec_1080;
421 isp->isp_type = ISP_HA_SCSI_1080;
422 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
423 if (isp->isp_param == NULL) {
424 printf(nomem, isp->isp_name);
425 return;
426 }
427 memset(isp->isp_param, 0, sizeof (sdparam));
428 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
429 ISP1080_DMA_REGS_OFF;
430 }
431 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
432 dstring = ": QLogic Dual Channel Ultra Wide SCSI HBA\n";
433 isp->isp_mdvec = &mdvec_1080;
434 isp->isp_type = ISP_HA_SCSI_1240;
435 isp->isp_param =
436 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
437 if (isp->isp_param == NULL) {
438 printf(nomem, isp->isp_name);
439 return;
440 }
441 memset(isp->isp_param, 0, 2 * sizeof (sdparam));
442 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
443 ISP1080_DMA_REGS_OFF;
444 }
445 if (pa->pa_id == PCI_QLOGIC_ISP1280) {
446 dstring = ": QLogic Dual Channel Ultra-2 Wide SCSI HBA\n";
447 isp->isp_mdvec = &mdvec_1080;
448 isp->isp_type = ISP_HA_SCSI_1280;
449 isp->isp_param =
450 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
451 if (isp->isp_param == NULL) {
452 printf(nomem, isp->isp_name);
453 return;
454 }
455 memset(isp->isp_param, 0, 2 * sizeof (sdparam));
456 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
457 ISP1080_DMA_REGS_OFF;
458 }
459 #endif
460 #ifndef ISP_DISABLE_12160_SUPPORT
461 if (pa->pa_id == PCI_QLOGIC_ISP12160) {
462 dstring = ": QLogic Dual Channel Ultra-3 Wide SCSI HBA\n";
463 isp->isp_mdvec = &mdvec_12160;
464 isp->isp_type = ISP_HA_SCSI_12160;
465 isp->isp_param =
466 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
467 if (isp->isp_param == NULL) {
468 printf(nomem, isp->isp_name);
469 return;
470 }
471 memset(isp->isp_param, 0, 2 * sizeof (sdparam));
472 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
473 ISP1080_DMA_REGS_OFF;
474 }
475 #endif
476 #ifndef ISP_DISABLE_2100_SUPPORT
477 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
478 dstring = ": QLogic FC-AL HBA\n";
479 isp->isp_mdvec = &mdvec_2100;
480 isp->isp_type = ISP_HA_FC_2100;
481 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
482 if (isp->isp_param == NULL) {
483 printf(nomem, isp->isp_name);
484 return;
485 }
486 memset(isp->isp_param, 0, sizeof (fcparam));
487 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
488 PCI_MBOX_REGS2100_OFF;
489 if (rev < 3) {
490 /*
491 * XXX: Need to get the actual revision
492 * XXX: number of the 2100 FB. At any rate,
493 * XXX: lower cache line size for early revision
494 * XXX; boards.
495 */
496 linesz = 1;
497 }
498 }
499 #endif
500 #ifndef ISP_DISABLE_2200_SUPPORT
501 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
502 dstring = ": QLogic FC-AL and Fabric HBA\n";
503 isp->isp_mdvec = &mdvec_2200;
504 isp->isp_type = ISP_HA_FC_2200;
505 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
506 if (isp->isp_param == NULL) {
507 printf(nomem, isp->isp_name);
508 return;
509 }
510 memset(isp->isp_param, 0, sizeof (fcparam));
511 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
512 PCI_MBOX_REGS2100_OFF;
513 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
514 }
515 #endif
516 #ifndef ISP_DISABLE_2300_SUPPORT
517 if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
518 pa->pa_id == PCI_QLOGIC_ISP2312) {
519 isp->isp_mdvec = &mdvec_2300;
520 if (pa->pa_id == PCI_QLOGIC_ISP2300) {
521 dstring = ": QLogic FC-AL and 2Gbps Fabric HBA\n";
522 isp->isp_type = ISP_HA_FC_2300;
523 } else {
524 dstring =
525 ": QLogic Dual Port FC-AL and 2Gbps Fabric HBA\n";
526 isp->isp_type = ISP_HA_FC_2312;
527 isp->isp_port = pa->pa_function;
528 }
529 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
530 if (isp->isp_param == NULL) {
531 printf(nomem, isp->isp_name);
532 return;
533 }
534 memset(isp->isp_param, 0, sizeof (fcparam));
535 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
536 PCI_MBOX_REGS2300_OFF;
537 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
538 }
539 #endif
540 /*
541 * Set up logging levels.
542 */
543 #ifdef ISP_LOGDEFAULT
544 isp->isp_dblev = ISP_LOGDEFAULT;
545 #else
546 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
547 if (bootverbose)
548 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
549 #ifdef SCSIDEBUG
550 isp->isp_dblev |= ISP_LOGDEBUG0|ISP_LOGDEBUG1|ISP_LOGDEBUG2;
551 #endif
552 #endif
553 if (isp->isp_dblev & ISP_LOGCONFIG) {
554 printf("\n");
555 } else {
556 printf(dstring);
557 }
558
559 #ifdef DEBUG
560 if (oneshot) {
561 oneshot = 0;
562 isp_prt(isp, ISP_LOGCONFIG, vstring,
563 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
564 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
565 }
566 #endif
567
568 isp->isp_dmatag = pa->pa_dmat;
569 isp->isp_revision = rev;
570
571 /*
572 * Make sure that command register set sanely.
573 */
574 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
575 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
576
577 /*
578 * Not so sure about these- but I think it's important that they get
579 * enabled......
580 */
581 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
582 if (IS_2300(isp)) { /* per QLogic errata */
583 data &= ~PCI_COMMAND_INVALIDATE_ENABLE;
584 }
585 if (IS_23XX(isp)) {
586 isp->isp_touched = 1;
587 }
588 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
589
590 /*
591 * Make sure that the latency timer, cache line size,
592 * and ROM is disabled.
593 */
594 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
595 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
596 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
597 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
598 data |= (linesz << PCI_CACHELINE_SHIFT);
599 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
600
601 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
602 data &= ~1;
603 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
604
605 if (pci_intr_map(pa, &ih)) {
606 printf("%s: couldn't map interrupt\n", isp->isp_name);
607 free(isp->isp_param, M_DEVBUF);
608 return;
609 }
610 intrstr = pci_intr_string(pa->pa_pc, ih);
611 if (intrstr == NULL)
612 intrstr = "<I dunno>";
613 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
614 isp_pci_intr, isp);
615 if (pcs->pci_ih == NULL) {
616 printf("%s: couldn't establish interrupt at %s\n",
617 isp->isp_name, intrstr);
618 free(isp->isp_param, M_DEVBUF);
619 return;
620 }
621
622 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
623
624 if (IS_FC(isp)) {
625 DEFAULT_NODEWWN(isp) = 0x400000007F000002;
626 DEFAULT_PORTWWN(isp) = 0x400000007F000002;
627 }
628
629 isp->isp_confopts = self->dv_cfdata->cf_flags;
630 isp->isp_role = ISP_DEFAULT_ROLES;
631 ISP_LOCK(isp);
632 isp->isp_osinfo.no_mbox_ints = 1;
633 isp_reset(isp);
634 if (isp->isp_state != ISP_RESETSTATE) {
635 ISP_UNLOCK(isp);
636 free(isp->isp_param, M_DEVBUF);
637 return;
638 }
639 ENABLE_INTS(isp);
640 isp_init(isp);
641 if (isp->isp_state != ISP_INITSTATE) {
642 isp_uninit(isp);
643 ISP_UNLOCK(isp);
644 free(isp->isp_param, M_DEVBUF);
645 return;
646 }
647 /*
648 * Do platform attach.
649 */
650 ISP_UNLOCK(isp);
651 isp_attach(isp);
652 if (isp->isp_state != ISP_RUNSTATE) {
653 ISP_LOCK(isp);
654 isp_uninit(isp);
655 free(isp->isp_param, M_DEVBUF);
656 ISP_UNLOCK(isp);
657 }
658 }
659
660 #define IspVirt2Off(a, x) \
661 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
662 _BLK_REG_SHFT] + ((x) & 0xff))
663
664 #define BXR2(pcs, off) \
665 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
666 #define BXW2(pcs, off, v) \
667 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
668
669
670 static INLINE int
671 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
672 {
673 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
674 u_int16_t val0, val1;
675 int i = 0;
676
677 do {
678 val0 = BXR2(pcs, IspVirt2Off(isp, off));
679 val1 = BXR2(pcs, IspVirt2Off(isp, off));
680 } while (val0 != val1 && ++i < 1000);
681 if (val0 != val1) {
682 return (1);
683 }
684 *rp = val0;
685 return (0);
686 }
687
688 static int
689 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
690 u_int16_t *semap, u_int16_t *mbp)
691 {
692 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
693 u_int16_t isr, sema;
694
695 if (IS_2100(isp)) {
696 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
697 return (0);
698 }
699 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
700 return (0);
701 }
702 } else {
703 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
704 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
705 }
706 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
707 isr &= INT_PENDING_MASK(isp);
708 sema &= BIU_SEMA_LOCK;
709 if (isr == 0 && sema == 0) {
710 return (0);
711 }
712 *isrp = isr;
713 if ((*semap = sema) != 0) {
714 if (IS_2100(isp)) {
715 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
716 return (0);
717 }
718 } else {
719 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
720 }
721 }
722 return (1);
723 }
724
725 #ifndef ISP_DISABLE_2300_SUPPORT
726 static int
727 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
728 u_int16_t *semap, u_int16_t *mbox0p)
729 {
730 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
731 u_int32_t r2hisr;
732
733 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT)) {
734 *isrp = 0;
735 return (0);
736 }
737 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
738 IspVirt2Off(pcs, BIU_R2HSTSLO));
739 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
740 if ((r2hisr & BIU_R2HST_INTR) == 0) {
741 *isrp = 0;
742 return (0);
743 }
744 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
745 case ISPR2HST_ROM_MBX_OK:
746 case ISPR2HST_ROM_MBX_FAIL:
747 case ISPR2HST_MBX_OK:
748 case ISPR2HST_MBX_FAIL:
749 case ISPR2HST_ASYNC_EVENT:
750 *isrp = r2hisr & 0xffff;
751 *mbox0p = (r2hisr >> 16);
752 *semap = 1;
753 return (1);
754 case ISPR2HST_RIO_16:
755 *isrp = r2hisr & 0xffff;
756 *mbox0p = ASYNC_RIO1;
757 *semap = 1;
758 return (1);
759 case ISPR2HST_FPOST:
760 *isrp = r2hisr & 0xffff;
761 *mbox0p = ASYNC_CMD_CMPLT;
762 *semap = 1;
763 return (1);
764 case ISPR2HST_FPOST_CTIO:
765 *isrp = r2hisr & 0xffff;
766 *mbox0p = ASYNC_CTIO_DONE;
767 *semap = 1;
768 return (1);
769 case ISPR2HST_RSPQ_UPDATE:
770 *isrp = r2hisr & 0xffff;
771 *mbox0p = 0;
772 *semap = 0;
773 return (1);
774 default:
775 return (0);
776 }
777 }
778 #endif
779
780 static u_int16_t
781 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
782 {
783 u_int16_t rv;
784 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
785 int oldconf = 0;
786
787 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
788 /*
789 * We will assume that someone has paused the RISC processor.
790 */
791 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
792 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
793 oldconf | BIU_PCI_CONF1_SXP);
794 }
795 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
796 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
797 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
798 }
799 return (rv);
800 }
801
802 static void
803 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
804 {
805 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
806 int oldconf = 0;
807
808 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
809 /*
810 * We will assume that someone has paused the RISC processor.
811 */
812 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
813 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
814 oldconf | BIU_PCI_CONF1_SXP);
815 }
816 BXW2(pcs, IspVirt2Off(isp, regoff), val);
817 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
818 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
819 }
820 }
821
822 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
823 static u_int16_t
824 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
825 {
826 u_int16_t rv, oc = 0;
827 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
828
829 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
830 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
831 u_int16_t tc;
832 /*
833 * We will assume that someone has paused the RISC processor.
834 */
835 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
836 tc = oc & ~BIU_PCI1080_CONF1_DMA;
837 if (regoff & SXP_BANK1_SELECT)
838 tc |= BIU_PCI1080_CONF1_SXP1;
839 else
840 tc |= BIU_PCI1080_CONF1_SXP0;
841 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
842 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
843 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
844 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
845 oc | BIU_PCI1080_CONF1_DMA);
846 }
847 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
848 if (oc) {
849 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
850 }
851 return (rv);
852 }
853
854 static void
855 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
856 {
857 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
858 int oc = 0;
859
860 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
861 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
862 u_int16_t tc;
863 /*
864 * We will assume that someone has paused the RISC processor.
865 */
866 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
867 tc = oc & ~BIU_PCI1080_CONF1_DMA;
868 if (regoff & SXP_BANK1_SELECT)
869 tc |= BIU_PCI1080_CONF1_SXP1;
870 else
871 tc |= BIU_PCI1080_CONF1_SXP0;
872 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
873 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
874 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
875 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
876 oc | BIU_PCI1080_CONF1_DMA);
877 }
878 BXW2(pcs, IspVirt2Off(isp, regoff), val);
879 if (oc) {
880 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
881 }
882 }
883 #endif
884
885 static int
886 isp_pci_mbxdma(struct ispsoftc *isp)
887 {
888 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
889 bus_dma_tag_t dmat = isp->isp_dmatag;
890 bus_dma_segment_t sg;
891 bus_size_t len;
892 fcparam *fcp;
893 int rs, i;
894
895 if (isp->isp_rquest_dma) /* been here before? */
896 return (0);
897
898 len = isp->isp_maxcmds * sizeof (XS_T *);
899 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
900 if (isp->isp_xflist == NULL) {
901 isp_prt(isp, ISP_LOGERR, "cannot malloc xflist array");
902 return (1);
903 }
904 memset(isp->isp_xflist, 0, len);
905 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
906 pcs->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
907 if (pcs->pci_xfer_dmap == NULL) {
908 free(isp->isp_xflist, M_DEVBUF);
909 isp->isp_xflist = NULL;
910 isp_prt(isp, ISP_LOGERR, "cannot malloc dma map array");
911 return (1);
912 }
913 for (i = 0; i < isp->isp_maxcmds; i++) {
914 if (bus_dmamap_create(dmat, MAXPHYS, (MAXPHYS / PAGE_SIZE) + 1,
915 MAXPHYS, 0, BUS_DMA_NOWAIT, &pcs->pci_xfer_dmap[i])) {
916 isp_prt(isp, ISP_LOGERR, "cannot create dma maps");
917 break;
918 }
919 }
920 if (i < isp->isp_maxcmds) {
921 while (--i >= 0) {
922 bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
923 }
924 free(isp->isp_xflist, M_DEVBUF);
925 free(pcs->pci_xfer_dmap, M_DEVBUF);
926 isp->isp_xflist = NULL;
927 pcs->pci_xfer_dmap = NULL;
928 return (1);
929 }
930
931 /*
932 * Allocate and map the request queue.
933 */
934 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
935 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
936 BUS_DMA_NOWAIT) ||
937 bus_dmamem_map(isp->isp_dmatag, &sg, rs, len,
938 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
939 goto dmafail;
940 }
941
942 if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
943 &isp->isp_rqdmap) || bus_dmamap_load(dmat, isp->isp_rqdmap,
944 (caddr_t)isp->isp_rquest, len, NULL,
945 BUS_DMA_NOWAIT)) {
946 goto dmafail;
947 }
948 isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
949
950 /*
951 * Allocate and map the result queue.
952 */
953 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
954 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
955 BUS_DMA_NOWAIT) ||
956 bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&isp->isp_result,
957 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
958 goto dmafail;
959 }
960 if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
961 &isp->isp_rsdmap) || bus_dmamap_load(isp->isp_dmatag,
962 isp->isp_rsdmap, (caddr_t)isp->isp_result, len, NULL,
963 BUS_DMA_NOWAIT)) {
964 goto dmafail;
965 }
966 isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
967
968 if (IS_SCSI(isp)) {
969 return (0);
970 }
971
972 fcp = isp->isp_param;
973 len = ISP2100_SCRLEN;
974 if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
975 BUS_DMA_NOWAIT) ||
976 bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&fcp->isp_scratch,
977 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
978 goto dmafail;
979 }
980 if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
981 &isp->isp_scdmap) || bus_dmamap_load(dmat,
982 isp->isp_scdmap, (caddr_t)fcp->isp_scratch, len, NULL,
983 BUS_DMA_NOWAIT)) {
984 goto dmafail;
985 }
986 fcp->isp_scdma = isp->isp_scdmap->dm_segs[0].ds_addr;
987 return (0);
988 dmafail:
989 isp_prt(isp, ISP_LOGERR, "mailbox dma setup failure");
990 for (i = 0; i < isp->isp_maxcmds; i++) {
991 bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
992 }
993 free(isp->isp_xflist, M_DEVBUF);
994 free(pcs->pci_xfer_dmap, M_DEVBUF);
995 isp->isp_xflist = NULL;
996 pcs->pci_xfer_dmap = NULL;
997 return (1);
998 }
999
1000 static int
1001 isp_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, ispreq_t *rq,
1002 u_int16_t *nxtip, u_int16_t optr)
1003 {
1004 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1005 bus_dmamap_t dmap;
1006 u_int16_t starti = isp->isp_reqidx, nxti = *nxtip;
1007 ispreq_t *qep;
1008 int segcnt, seg, error, ovseg, seglim, drq;
1009
1010 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
1011 dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
1012 if (xs->datalen == 0) {
1013 rq->req_seg_count = 1;
1014 goto mbxsync;
1015 }
1016 if (xs->xs_control & XS_CTL_DATA_IN) {
1017 drq = REQFLAG_DATA_IN;
1018 } else {
1019 drq = REQFLAG_DATA_OUT;
1020 }
1021
1022 if (IS_FC(isp)) {
1023 seglim = ISP_RQDSEG_T2;
1024 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
1025 ((ispreqt2_t *)rq)->req_flags |= drq;
1026 } else {
1027 rq->req_flags |= drq;
1028 if (XS_CDBLEN(xs) > 12) {
1029 seglim = 0;
1030 } else {
1031 seglim = ISP_RQDSEG;
1032 }
1033 }
1034 error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
1035 NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
1036 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
1037 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1038 if (error) {
1039 isp_prt(isp, ISP_LOGWARN, "unable to load dma (%d)", error);
1040 XS_SETERR(xs, HBA_BOTCH);
1041 if (error == EAGAIN || error == ENOMEM)
1042 return (CMD_EAGAIN);
1043 else
1044 return (CMD_COMPLETE);
1045 }
1046
1047 segcnt = dmap->dm_nsegs;
1048
1049 isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
1050 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
1051 "write from", xs->data, segcnt);
1052
1053 for (seg = 0, rq->req_seg_count = 0;
1054 seglim && seg < segcnt && rq->req_seg_count < seglim;
1055 seg++, rq->req_seg_count++) {
1056 if (IS_FC(isp)) {
1057 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1058 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1059 dmap->dm_segs[seg].ds_len;
1060 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1061 dmap->dm_segs[seg].ds_addr;
1062 } else {
1063 rq->req_dataseg[rq->req_seg_count].ds_count =
1064 dmap->dm_segs[seg].ds_len;
1065 rq->req_dataseg[rq->req_seg_count].ds_base =
1066 dmap->dm_segs[seg].ds_addr;
1067 }
1068 isp_prt(isp, ISP_LOGDEBUG2, "seg0.[%d]={0x%lx,%lu}",
1069 rq->req_seg_count, (long) dmap->dm_segs[seg].ds_addr,
1070 (unsigned long) dmap->dm_segs[seg].ds_len);
1071 }
1072
1073 if (seg == segcnt) {
1074 goto dmasync;
1075 }
1076
1077 do {
1078 u_int16_t onxti;
1079 ispcontreq_t *crq, *cqe, local;
1080
1081 crq = &local;
1082
1083 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1084 onxti = nxti;
1085 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1086 if (nxti == optr) {
1087 isp_prt(isp, /* ISP_LOGDEBUG0 */ ISP_LOGERR, "Request Queue Overflow++");
1088 bus_dmamap_unload(isp->isp_dmatag, dmap);
1089 XS_SETERR(xs, HBA_BOTCH);
1090 return (CMD_EAGAIN);
1091 }
1092 rq->req_header.rqs_entry_count++;
1093 memset((void *)crq, 0, sizeof (*crq));
1094 crq->req_header.rqs_entry_count = 1;
1095 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1096
1097 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
1098 rq->req_seg_count++, seg++, ovseg++) {
1099 crq->req_dataseg[ovseg].ds_count =
1100 dmap->dm_segs[seg].ds_len;
1101 crq->req_dataseg[ovseg].ds_base =
1102 dmap->dm_segs[seg].ds_addr;
1103 isp_prt(isp, ISP_LOGDEBUG2, "seg%d.[%d]={0x%lx,%lu}",
1104 rq->req_header.rqs_entry_count - 1,
1105 rq->req_seg_count, (long)dmap->dm_segs[seg].ds_addr,
1106 (unsigned long) dmap->dm_segs[seg].ds_len);
1107 }
1108 isp_put_cont_req(isp, crq, cqe);
1109 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1110 } while (seg < segcnt);
1111
1112
1113 dmasync:
1114 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1115 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
1116 BUS_DMASYNC_PREWRITE);
1117
1118 mbxsync:
1119 switch (rq->req_header.rqs_entry_type) {
1120 case RQSTYPE_REQUEST:
1121 isp_put_request(isp, rq, qep);
1122 break;
1123 case RQSTYPE_CMDONLY:
1124 isp_put_extended_request(isp, (ispextreq_t *)rq,
1125 (ispextreq_t *)qep);
1126 break;
1127 case RQSTYPE_T2RQS:
1128 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1129 break;
1130 }
1131 *nxtip = nxti;
1132 return (CMD_QUEUED);
1133 }
1134
1135 static int
1136 isp_pci_intr(void *arg)
1137 {
1138 u_int16_t isr, sema, mbox;
1139 struct ispsoftc *isp = arg;
1140
1141 isp->isp_intcnt++;
1142 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1143 isp->isp_intbogus++;
1144 return (0);
1145 } else {
1146 isp->isp_osinfo.onintstack = 1;
1147 isp_intr(isp, isr, sema, mbox);
1148 isp->isp_osinfo.onintstack = 0;
1149 return (1);
1150 }
1151 }
1152
1153 static void
1154 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1155 {
1156 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1157 bus_dmamap_t dmap = pcs->pci_xfer_dmap[isp_handle_index(handle)];
1158 bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
1159 xs->xs_control & XS_CTL_DATA_IN ?
1160 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1161 bus_dmamap_unload(isp->isp_dmatag, dmap);
1162 }
1163
1164 static void
1165 isp_pci_reset1(struct ispsoftc *isp)
1166 {
1167 /* Make sure the BIOS is disabled */
1168 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1169 if (isp->isp_osinfo.no_mbox_ints == 0) {
1170 ENABLE_INTS(isp);
1171 }
1172
1173 }
1174
1175 static void
1176 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1177 {
1178 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1179 if (msg)
1180 printf("%s: %s\n", isp->isp_name, msg);
1181 if (IS_SCSI(isp))
1182 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1183 else
1184 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1185 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1186 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1187 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1188
1189
1190 if (IS_SCSI(isp)) {
1191 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1192 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1193 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1194 ISP_READ(isp, CDMA_FIFO_STS));
1195 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1196 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1197 ISP_READ(isp, DDMA_FIFO_STS));
1198 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1199 ISP_READ(isp, SXP_INTERRUPT),
1200 ISP_READ(isp, SXP_GROSS_ERR),
1201 ISP_READ(isp, SXP_PINS_CTRL));
1202 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1203 }
1204 printf(" mbox regs: %x %x %x %x %x\n",
1205 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1206 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1207 ISP_READ(isp, OUTMAILBOX4));
1208 printf(" PCI Status Command/Status=%x\n",
1209 pci_conf_read(pcs->pci_pc, pcs->pci_tag, PCI_COMMAND_STATUS_REG));
1210 }
1211