isp_pci.c revision 1.52 1 /* $NetBSD: isp_pci.c,v 1.52 2000/07/05 22:12:23 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * Matthew Jacob (mjacob (at) nas.nasa.gov)
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34 #include <dev/pci/pcireg.h>
35 #include <dev/pci/pcivar.h>
36 #include <dev/pci/pcidevs.h>
37
38 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
39 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
40 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
41 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
42 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
43 #endif
44 static int isp_pci_mbxdma __P((struct ispsoftc *));
45 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
46 ispreq_t *, u_int16_t *, u_int16_t));
47 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
48 u_int32_t));
49 static void isp_pci_reset1 __P((struct ispsoftc *));
50 static void isp_pci_dumpregs __P((struct ispsoftc *));
51 static int isp_pci_intr __P((void *));
52
53 #ifndef ISP_CODE_ORG
54 #define ISP_CODE_ORG 0x1000
55 #endif
56
57 #if defined(ISP_DISABLE_1020_SUPPORT)
58 #define ISP_1040_RISC_CODE NULL
59 #else
60 #define ISP_1040_RISC_CODE isp_1040_risc_code
61 #include <dev/microcode/isp/asm_1040.h>
62 #endif
63
64 #if defined(ISP_DISABLE_1080_SUPPORT)
65 #define ISP_1080_RISC_CODE NULL
66 #else
67 #define ISP_1080_RISC_CODE isp_1080_risc_code
68 #include <dev/microcode/isp/asm_1080.h>
69 #endif
70
71 #if defined(ISP_DISABLE_12160_SUPPORT)
72 #define ISP_12160_RISC_CODE NULL
73 #else
74 #define ISP_12160_RISC_CODE isp_12160_risc_code
75 #include <dev/microcode/isp/asm_12160.h>
76 #endif
77
78 #if defined(ISP_DISABLE_2100_SUPPORT)
79 #define ISP_2100_RISC_CODE NULL
80 #else
81 #define ISP_2100_RISC_CODE isp_2100_risc_code
82 #include <dev/microcode/isp/asm_2100.h>
83 #endif
84
85 #if defined(ISP_DISABLE_2200_SUPPORT)
86 #define ISP_2200_RISC_CODE NULL
87 #else
88 #define ISP_2200_RISC_CODE isp_2200_risc_code
89 #include <dev/microcode/isp/asm_2200.h>
90 #endif
91
92 #ifndef ISP_DISABLE_1020_SUPPORT
93 static struct ispmdvec mdvec = {
94 isp_pci_rd_reg,
95 isp_pci_wr_reg,
96 isp_pci_mbxdma,
97 isp_pci_dmasetup,
98 isp_pci_dmateardown,
99 NULL,
100 isp_pci_reset1,
101 isp_pci_dumpregs,
102 ISP_1040_RISC_CODE,
103 0,
104 ISP_CODE_ORG,
105 0,
106 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
107 0
108 };
109 #endif
110
111 #ifndef ISP_DISABLE_1080_SUPPORT
112 static struct ispmdvec mdvec_1080 = {
113 isp_pci_rd_reg_1080,
114 isp_pci_wr_reg_1080,
115 isp_pci_mbxdma,
116 isp_pci_dmasetup,
117 isp_pci_dmateardown,
118 NULL,
119 isp_pci_reset1,
120 isp_pci_dumpregs,
121 ISP_1080_RISC_CODE,
122 0,
123 ISP_CODE_ORG,
124 0,
125 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
126 0
127 };
128 #endif
129
130 #ifndef ISP_DISABLE_12160_SUPPORT
131 static struct ispmdvec mdvec_12160 = {
132 isp_pci_rd_reg_1080,
133 isp_pci_wr_reg_1080,
134 isp_pci_mbxdma,
135 isp_pci_dmasetup,
136 isp_pci_dmateardown,
137 NULL,
138 isp_pci_reset1,
139 isp_pci_dumpregs,
140 ISP_12160_RISC_CODE,
141 0,
142 ISP_CODE_ORG,
143 0,
144 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
145 0
146 };
147 #endif
148
149 #ifndef ISP_DISABLE_2100_SUPPORT
150 static struct ispmdvec mdvec_2100 = {
151 isp_pci_rd_reg,
152 isp_pci_wr_reg,
153 isp_pci_mbxdma,
154 isp_pci_dmasetup,
155 isp_pci_dmateardown,
156 NULL,
157 isp_pci_reset1,
158 isp_pci_dumpregs,
159 ISP_2100_RISC_CODE,
160 0,
161 ISP_CODE_ORG,
162 0,
163 0,
164 0
165 };
166 #endif
167
168 #ifndef ISP_DISABLE_2200_SUPPORT
169 static struct ispmdvec mdvec_2200 = {
170 isp_pci_rd_reg,
171 isp_pci_wr_reg,
172 isp_pci_mbxdma,
173 isp_pci_dmasetup,
174 isp_pci_dmateardown,
175 NULL,
176 isp_pci_reset1,
177 isp_pci_dumpregs,
178 ISP_2200_RISC_CODE,
179 0,
180 ISP_CODE_ORG,
181 0,
182 0,
183 0
184 };
185 #endif
186
187 #ifndef PCI_VENDOR_QLOGIC
188 #define PCI_VENDOR_QLOGIC 0x1077
189 #endif
190
191 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
192 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
193 #endif
194
195 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
196 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
197 #endif
198
199 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
200 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
201 #endif
202
203 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
204 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
205 #endif
206
207 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
208 #define PCI_PRODUCT_QLOGIC_ISP12160 0x12160
209 #endif
210
211 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
212 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
213 #endif
214
215 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
216 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
217 #endif
218
219 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
220
221 #define PCI_QLOGIC_ISP1080 \
222 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
223
224 #define PCI_QLOGIC_ISP1240 \
225 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
226
227 #define PCI_QLOGIC_ISP1280 \
228 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
229
230 #define PCI_QLOGIC_ISP12160 \
231 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
232
233 #define PCI_QLOGIC_ISP2100 \
234 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
235
236 #define PCI_QLOGIC_ISP2200 \
237 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
238
239 #define IO_MAP_REG 0x10
240 #define MEM_MAP_REG 0x14
241 #define PCIR_ROMADDR 0x30
242
243 #define PCI_DFLT_LTNCY 0x40
244 #define PCI_DFLT_LNSZ 0x10
245
246
247 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
248 static void isp_pci_attach __P((struct device *, struct device *, void *));
249
250 struct isp_pcisoftc {
251 struct ispsoftc pci_isp;
252 pci_chipset_tag_t pci_pc;
253 pcitag_t pci_tag;
254 bus_space_tag_t pci_st;
255 bus_space_handle_t pci_sh;
256 bus_dma_tag_t pci_dmat;
257 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
258 bus_dmamap_t pci_rquest_dmap;
259 bus_dmamap_t pci_result_dmap;
260 bus_dmamap_t *pci_xfer_dmap;
261 void * pci_ih;
262 int16_t pci_poff[_NREG_BLKS];
263 };
264
265 struct cfattach isp_pci_ca = {
266 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
267 };
268
269 static int
270 isp_pci_probe(parent, match, aux)
271 struct device *parent;
272 struct cfdata *match;
273 void *aux;
274 {
275 struct pci_attach_args *pa = aux;
276 switch (pa->pa_id) {
277 #ifndef ISP_DISABLE_1020_SUPPORT
278 case PCI_QLOGIC_ISP:
279 return (1);
280 #endif
281 #ifndef ISP_DISABLE_1080_SUPPORT
282 case PCI_QLOGIC_ISP1080:
283 case PCI_QLOGIC_ISP1240:
284 case PCI_QLOGIC_ISP1280:
285 return (1);
286 #endif
287 #ifndef ISP_DISABLE_12160_SUPPORT
288 case PCI_QLOGIC_ISP12160:
289 return (1);
290 #endif
291 #ifndef ISP_DISABLE_2100_SUPPORT
292 case PCI_QLOGIC_ISP2100:
293 return (1);
294 #endif
295 #ifndef ISP_DISABLE_2200_SUPPORT
296 case PCI_QLOGIC_ISP2200:
297 return (1);
298 #endif
299 default:
300 return (0);
301 }
302 }
303
304
305 static void
306 isp_pci_attach(parent, self, aux)
307 struct device *parent, *self;
308 void *aux;
309 {
310 #ifdef DEBUG
311 static char oneshot = 1;
312 #endif
313 static char *nomem = "%s: no mem for sdparam table\n";
314 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
315 struct pci_attach_args *pa = aux;
316 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
317 struct ispsoftc *isp = &pcs->pci_isp;
318 bus_space_tag_t st, iot, memt;
319 bus_space_handle_t sh, ioh, memh;
320 pci_intr_handle_t ih;
321 const char *intrstr;
322 int ioh_valid, memh_valid, i;
323 ISP_LOCKVAL_DECL;
324
325 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
326 PCI_MAPREG_TYPE_IO, 0,
327 &iot, &ioh, NULL, NULL) == 0);
328 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
329 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
330 &memt, &memh, NULL, NULL) == 0);
331
332 if (memh_valid) {
333 st = memt;
334 sh = memh;
335 } else if (ioh_valid) {
336 st = iot;
337 sh = ioh;
338 } else {
339 printf(": unable to map device registers\n");
340 return;
341 }
342 printf("\n");
343
344 pcs->pci_st = st;
345 pcs->pci_sh = sh;
346 pcs->pci_dmat = pa->pa_dmat;
347 pcs->pci_pc = pa->pa_pc;
348 pcs->pci_tag = pa->pa_tag;
349 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
350 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
351 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
352 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
353 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
354 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
355
356 #ifndef ISP_DISABLE_1020_SUPPORT
357 if (pa->pa_id == PCI_QLOGIC_ISP) {
358 isp->isp_mdvec = &mdvec;
359 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
360 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
361 if (isp->isp_param == NULL) {
362 printf(nomem, isp->isp_name);
363 return;
364 }
365 bzero(isp->isp_param, sizeof (sdparam));
366 }
367 #endif
368 #ifndef ISP_DISABLE_1080_SUPPORT
369 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
370 isp->isp_mdvec = &mdvec_1080;
371 isp->isp_type = ISP_HA_SCSI_1080;
372 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
373 if (isp->isp_param == NULL) {
374 printf(nomem, isp->isp_name);
375 return;
376 }
377 bzero(isp->isp_param, sizeof (sdparam));
378 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
379 ISP1080_DMA_REGS_OFF;
380 }
381 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
382 isp->isp_mdvec = &mdvec_1080;
383 isp->isp_type = ISP_HA_SCSI_1240;
384 isp->isp_param =
385 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
386 if (isp->isp_param == NULL) {
387 printf(nomem, isp->isp_name);
388 return;
389 }
390 bzero(isp->isp_param, 2 * sizeof (sdparam));
391 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
392 ISP1080_DMA_REGS_OFF;
393 }
394 if (pa->pa_id == PCI_QLOGIC_ISP1280) {
395 isp->isp_mdvec = &mdvec_1080;
396 isp->isp_type = ISP_HA_SCSI_1280;
397 isp->isp_param =
398 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
399 if (isp->isp_param == NULL) {
400 printf(nomem, isp->isp_name);
401 return;
402 }
403 bzero(isp->isp_param, 2 * sizeof (sdparam));
404 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
405 ISP1080_DMA_REGS_OFF;
406 }
407 #endif
408 #ifndef ISP_DISABLE_12160_SUPPORT
409 if (pa->pa_id == PCI_QLOGIC_ISP12160) {
410 isp->isp_mdvec = &mdvec_12160;
411 isp->isp_type = ISP_HA_SCSI_12160;
412 isp->isp_param =
413 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
414 if (isp->isp_param == NULL) {
415 printf(nomem, isp->isp_name);
416 return;
417 }
418 bzero(isp->isp_param, 2 * sizeof (sdparam));
419 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
420 ISP1080_DMA_REGS_OFF;
421 }
422 #endif
423 #ifndef ISP_DISABLE_2100_SUPPORT
424 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
425 isp->isp_mdvec = &mdvec_2100;
426 isp->isp_type = ISP_HA_FC_2100;
427 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
428 if (isp->isp_param == NULL) {
429 printf(nomem, isp->isp_name);
430 return;
431 }
432 bzero(isp->isp_param, sizeof (fcparam));
433 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
434 PCI_MBOX_REGS2100_OFF;
435 if (rev < 3) {
436 /*
437 * XXX: Need to get the actual revision
438 * XXX: number of the 2100 FB. At any rate,
439 * XXX: lower cache line size for early revision
440 * XXX; boards.
441 */
442 linesz = 1;
443 }
444 }
445 #endif
446 #ifndef ISP_DISABLE_2200_SUPPORT
447 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
448 isp->isp_mdvec = &mdvec_2200;
449 isp->isp_type = ISP_HA_FC_2200;
450 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
451 if (isp->isp_param == NULL) {
452 printf(nomem, isp->isp_name);
453 return;
454 }
455 bzero(isp->isp_param, sizeof (fcparam));
456 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
457 PCI_MBOX_REGS2100_OFF;
458 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
459 }
460 #endif
461 isp->isp_revision = rev;
462
463 /*
464 * Make sure that command register set sanely.
465 */
466 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
467 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
468
469 /*
470 * Not so sure about these- but I think it's important that they get
471 * enabled......
472 */
473 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
474 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
475
476 /*
477 * Make sure that the latency timer, cache line size,
478 * and ROM is disabled.
479 */
480 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
481 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
482 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
483 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
484 data |= (linesz << PCI_CACHELINE_SHIFT);
485 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
486
487 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
488 data &= ~1;
489 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
490
491 #ifdef DEBUG
492 if (oneshot) {
493 oneshot = 0;
494 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
495 "%d.%d Core Version %d.%d\n",
496 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
497 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
498 }
499 #endif
500 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
501 pa->pa_intrline, &ih)) {
502 printf("%s: couldn't map interrupt\n", isp->isp_name);
503 free(isp->isp_param, M_DEVBUF);
504 return;
505 }
506 intrstr = pci_intr_string(pa->pa_pc, ih);
507 if (intrstr == NULL)
508 intrstr = "<I dunno>";
509 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
510 isp_pci_intr, isp);
511 if (pcs->pci_ih == NULL) {
512 printf("%s: couldn't establish interrupt at %s\n",
513 isp->isp_name, intrstr);
514 free(isp->isp_param, M_DEVBUF);
515 return;
516 }
517 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
518
519 if (IS_FC(isp)) {
520 long foo;
521 /*
522 * This isn't very random, but it's the best we can do for
523 * the real edge case of cards that don't have WWNs.
524 */
525 foo = (long) isp;
526 foo >>= 4;
527 foo &= 0x7;
528 while (version[foo])
529 isp->isp_osinfo.seed += (int) version[foo++];
530 isp->isp_osinfo.seed <<= 8;
531 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
532 }
533
534 isp->isp_confopts = self->dv_cfdata->cf_flags;
535 ISP_LOCK(isp);
536 isp_reset(isp);
537 if (isp->isp_state != ISP_RESETSTATE) {
538 ISP_UNLOCK(isp);
539 free(isp->isp_param, M_DEVBUF);
540 return;
541 }
542 isp_init(isp);
543 if (isp->isp_state != ISP_INITSTATE) {
544 isp_uninit(isp);
545 ISP_UNLOCK(isp);
546 free(isp->isp_param, M_DEVBUF);
547 return;
548 }
549
550 /*
551 * Create the DMA maps for the data transfers.
552 */
553 for (i = 0; i < isp->isp_maxcmds; i++) {
554 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
555 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
556 &pcs->pci_xfer_dmap[i])) {
557 printf("%s: can't create dma maps\n",
558 isp->isp_name);
559 isp_uninit(isp);
560 ISP_UNLOCK(isp);
561 return;
562 }
563 }
564
565 ENABLE_INTS(isp);
566
567 /*
568 * Do Generic attach now.
569 */
570 isp_attach(isp);
571 if (isp->isp_state != ISP_RUNSTATE) {
572 isp_uninit(isp);
573 free(isp->isp_param, M_DEVBUF);
574 }
575 ISP_UNLOCK(isp);
576 }
577
578 static u_int16_t
579 isp_pci_rd_reg(isp, regoff)
580 struct ispsoftc *isp;
581 int regoff;
582 {
583 u_int16_t rv;
584 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
585 int offset, oldconf = 0;
586
587 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
588 /*
589 * We will assume that someone has paused the RISC processor.
590 */
591 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
592 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
593 delay(250);
594 }
595 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
596 offset += (regoff & 0xff);
597 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
598 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
599 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
600 delay(250);
601 }
602 return (rv);
603 }
604
605 static void
606 isp_pci_wr_reg(isp, regoff, val)
607 struct ispsoftc *isp;
608 int regoff;
609 u_int16_t val;
610 {
611 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
612 int offset, oldconf = 0;
613
614 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
615 /*
616 * We will assume that someone has paused the RISC processor.
617 */
618 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
619 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
620 delay(250);
621 }
622 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
623 offset += (regoff & 0xff);
624 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
625 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
626 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
627 delay(250);
628 }
629 }
630
631 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
632 static u_int16_t
633 isp_pci_rd_reg_1080(isp, regoff)
634 struct ispsoftc *isp;
635 int regoff;
636 {
637 u_int16_t rv, oc = 0;
638 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
639 int offset;
640
641 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
642 u_int16_t tc;
643 /*
644 * We will assume that someone has paused the RISC processor.
645 */
646 oc = isp_pci_rd_reg(isp, BIU_CONF1);
647 tc = oc & ~BIU_PCI1080_CONF1_DMA;
648 if (IS_1280(isp)) {
649 if (regoff & SXP_BANK1_SELECT)
650 tc |= BIU_PCI1080_CONF1_SXP0;
651 else
652 tc |= BIU_PCI1080_CONF1_SXP1;
653 } else {
654 tc |= BIU_PCI1080_CONF1_SXP0;
655 }
656 isp_pci_wr_reg(isp, BIU_CONF1, tc);
657 delay(250);
658 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
659 oc = isp_pci_rd_reg(isp, BIU_CONF1);
660 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
661 delay(250);
662 }
663 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
664 offset += (regoff & 0xff);
665 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
666 /*
667 * Okay, because BIU_CONF1 is always nonzero
668 */
669 if (oc) {
670 isp_pci_wr_reg(isp, BIU_CONF1, oc);
671 delay(250);
672 }
673 return (rv);
674 }
675
676 static void
677 isp_pci_wr_reg_1080(isp, regoff, val)
678 struct ispsoftc *isp;
679 int regoff;
680 u_int16_t val;
681 {
682 u_int16_t oc = 0;
683 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
684 int offset;
685
686 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
687 u_int16_t tc;
688 /*
689 * We will assume that someone has paused the RISC processor.
690 */
691 oc = isp_pci_rd_reg(isp, BIU_CONF1);
692 tc = oc & ~BIU_PCI1080_CONF1_DMA;
693 if (IS_1280(isp)) {
694 if (regoff & SXP_BANK1_SELECT)
695 tc |= BIU_PCI1080_CONF1_SXP0;
696 else
697 tc |= BIU_PCI1080_CONF1_SXP1;
698 } else {
699 tc |= BIU_PCI1080_CONF1_SXP0;
700 }
701 isp_pci_wr_reg(isp, BIU_CONF1, tc);
702 delay(250);
703 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
704 oc = isp_pci_rd_reg(isp, BIU_CONF1);
705 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
706 delay(250);
707 }
708 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
709 offset += (regoff & 0xff);
710 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
711 /*
712 * Okay, because BIU_CONF1 is always nonzero
713 */
714 if (oc) {
715 isp_pci_wr_reg(isp, BIU_CONF1, oc);
716 delay(250);
717 }
718 }
719 #endif
720
721 static int
722 isp_pci_mbxdma(isp)
723 struct ispsoftc *isp;
724 {
725 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
726 bus_dma_segment_t seg;
727 bus_size_t len;
728 fcparam *fcp;
729 int rseg;
730
731 if (isp->isp_rquest_dma) /* been here before? */
732 return (0);
733
734 len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T);
735 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
736 if (isp->isp_xflist == NULL) {
737 printf("%s: cannot malloc xflist array\n", isp->isp_name);
738 return (1);
739 }
740 bzero(isp->isp_xflist, len);
741 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
742 pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
743 if (pci->pci_xfer_dmap == NULL) {
744 printf("%s: cannot malloc xflist array\n", isp->isp_name);
745 return (1);
746 }
747
748 /*
749 * Allocate and map the request queue.
750 */
751 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
752 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
753 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
754 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
755 return (1);
756 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
757 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat,
758 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
759 BUS_DMA_NOWAIT))
760 return (1);
761
762 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
763
764 /*
765 * Allocate and map the result queue.
766 */
767 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
768 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
769 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
770 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
771 return (1);
772 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
773 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat,
774 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
775 BUS_DMA_NOWAIT))
776 return (1);
777 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
778
779 if (IS_SCSI(isp)) {
780 return (0);
781 }
782
783 fcp = isp->isp_param;
784 len = ISP2100_SCRLEN;
785 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
786 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
787 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
788 return (1);
789 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
790 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat,
791 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
792 BUS_DMA_NOWAIT))
793 return (1);
794 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
795 return (0);
796 }
797
798 static int
799 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
800 struct ispsoftc *isp;
801 struct scsipi_xfer *xs;
802 ispreq_t *rq;
803 u_int16_t *iptrp;
804 u_int16_t optr;
805 {
806 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
807 bus_dmamap_t dmap;
808 ispcontreq_t *crq;
809 int segcnt, seg, error, ovseg, seglim, drq;
810
811 dmap = pci->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
812
813 if (xs->datalen == 0) {
814 rq->req_seg_count = 1;
815 goto mbxsync;
816 }
817 if (xs->xs_control & XS_CTL_DATA_IN) {
818 drq = REQFLAG_DATA_IN;
819 } else {
820 drq = REQFLAG_DATA_OUT;
821 }
822
823 if (IS_FC(isp)) {
824 seglim = ISP_RQDSEG_T2;
825 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
826 ((ispreqt2_t *)rq)->req_flags |= drq;
827 } else {
828 rq->req_flags |= drq;
829 if (XS_CDBLEN(xs) > 12) {
830 seglim = 0;
831 } else {
832 seglim = ISP_RQDSEG;
833 }
834 }
835 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
836 NULL, xs->xs_control & XS_CTL_NOSLEEP ?
837 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
838 if (error) {
839 XS_SETERR(xs, HBA_BOTCH);
840 return (CMD_COMPLETE);
841 }
842
843 segcnt = dmap->dm_nsegs;
844
845 for (seg = 0, rq->req_seg_count = 0;
846 seg < segcnt && rq->req_seg_count < seglim;
847 seg++, rq->req_seg_count++) {
848 if (IS_FC(isp)) {
849 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
850 rq2->req_dataseg[rq2->req_seg_count].ds_count =
851 dmap->dm_segs[seg].ds_len;
852 rq2->req_dataseg[rq2->req_seg_count].ds_base =
853 dmap->dm_segs[seg].ds_addr;
854 } else {
855 rq->req_dataseg[rq->req_seg_count].ds_count =
856 dmap->dm_segs[seg].ds_len;
857 rq->req_dataseg[rq->req_seg_count].ds_base =
858 dmap->dm_segs[seg].ds_addr;
859 }
860 }
861
862 if (seg == segcnt)
863 goto dmasync;
864
865 do {
866 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
867 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
868 if (*iptrp == optr) {
869 printf("%s: Request Queue Overflow++\n", isp->isp_name);
870 bus_dmamap_unload(pci->pci_dmat, dmap);
871 XS_SETERR(xs, HBA_BOTCH);
872 return (CMD_EAGAIN);
873 }
874 rq->req_header.rqs_entry_count++;
875 bzero((void *)crq, sizeof (*crq));
876 crq->req_header.rqs_entry_count = 1;
877 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
878
879 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
880 rq->req_seg_count++, seg++, ovseg++) {
881 crq->req_dataseg[ovseg].ds_count =
882 dmap->dm_segs[seg].ds_len;
883 crq->req_dataseg[ovseg].ds_base =
884 dmap->dm_segs[seg].ds_addr;
885 }
886 } while (seg < segcnt);
887
888 dmasync:
889 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
890 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
891 BUS_DMASYNC_PREWRITE);
892
893 mbxsync:
894 ISP_SWIZZLE_REQUEST(isp, rq);
895 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
896 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
897 return (CMD_QUEUED);
898 }
899
900 static int
901 isp_pci_intr(arg)
902 void *arg;
903 {
904 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
905 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
906 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
907 return (isp_intr(arg));
908 }
909
910 static void
911 isp_pci_dmateardown(isp, xs, handle)
912 struct ispsoftc *isp;
913 struct scsipi_xfer *xs;
914 u_int32_t handle;
915 {
916 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
917 bus_dmamap_t dmap = pci->pci_xfer_dmap[isp_handle_index(handle)];
918 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
919 xs->xs_control & XS_CTL_DATA_IN ?
920 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
921 bus_dmamap_unload(pci->pci_dmat, dmap);
922 }
923
924 static void
925 isp_pci_reset1(isp)
926 struct ispsoftc *isp;
927 {
928 /* Make sure the BIOS is disabled */
929 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
930 }
931
932 static void
933 isp_pci_dumpregs(isp)
934 struct ispsoftc *isp;
935 {
936 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
937 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
938 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
939 }
940