isp_pci.c revision 1.47 1 /* $NetBSD: isp_pci.c,v 1.47 1999/12/04 02:54:54 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * Matthew Jacob (mjacob (at) nas.nasa.gov)
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34 #include <dev/microcode/isp/asm_pci.h>
35
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/pcidevs.h>
39
40 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
41 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
42 #ifndef ISP_DISABLE_1080_SUPPORT
43 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
44 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
45 #endif
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 ispreq_t *, u_int16_t *, u_int16_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 u_int32_t));
51 static void isp_pci_reset1 __P((struct ispsoftc *));
52 static void isp_pci_dumpregs __P((struct ispsoftc *));
53 static int isp_pci_intr __P((void *));
54
55 #ifndef ISP_CODE_ORG
56 #define ISP_CODE_ORG 0x1000
57 #endif
58 #ifndef ISP_1040_RISC_CODE
59 #define ISP_1040_RISC_CODE NULL
60 #endif
61 #ifndef ISP_1080_RISC_CODE
62 #define ISP_1080_RISC_CODE NULL
63 #endif
64 #ifndef ISP_2100_RISC_CODE
65 #define ISP_2100_RISC_CODE NULL
66 #endif
67 #ifndef ISP_2200_RISC_CODE
68 #define ISP_2200_RISC_CODE NULL
69 #endif
70
71 #ifndef ISP_DISABLE_1020_SUPPORT
72 static struct ispmdvec mdvec = {
73 isp_pci_rd_reg,
74 isp_pci_wr_reg,
75 isp_pci_mbxdma,
76 isp_pci_dmasetup,
77 isp_pci_dmateardown,
78 NULL,
79 isp_pci_reset1,
80 isp_pci_dumpregs,
81 ISP_1040_RISC_CODE,
82 0,
83 ISP_CODE_ORG,
84 0,
85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
86 0
87 };
88 #endif
89
90 #ifndef ISP_DISABLE_1080_SUPPORT
91 static struct ispmdvec mdvec_1080 = {
92 isp_pci_rd_reg_1080,
93 isp_pci_wr_reg_1080,
94 isp_pci_mbxdma,
95 isp_pci_dmasetup,
96 isp_pci_dmateardown,
97 NULL,
98 isp_pci_reset1,
99 isp_pci_dumpregs,
100 ISP_1080_RISC_CODE,
101 0,
102 ISP_CODE_ORG,
103 0,
104 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
105 0
106 };
107 #endif
108
109 #ifndef ISP_DISABLE_2100_SUPPORT
110 static struct ispmdvec mdvec_2100 = {
111 isp_pci_rd_reg,
112 isp_pci_wr_reg,
113 isp_pci_mbxdma,
114 isp_pci_dmasetup,
115 isp_pci_dmateardown,
116 NULL,
117 isp_pci_reset1,
118 isp_pci_dumpregs,
119 ISP_2100_RISC_CODE,
120 0,
121 ISP_CODE_ORG,
122 0,
123 0,
124 0
125 };
126 #endif
127
128 #ifndef ISP_DISABLE_2200_SUPPORT
129 static struct ispmdvec mdvec_2200 = {
130 isp_pci_rd_reg,
131 isp_pci_wr_reg,
132 isp_pci_mbxdma,
133 isp_pci_dmasetup,
134 isp_pci_dmateardown,
135 NULL,
136 isp_pci_reset1,
137 isp_pci_dumpregs,
138 ISP_2200_RISC_CODE,
139 0,
140 ISP_CODE_ORG,
141 0,
142 0,
143 0
144 };
145 #endif
146
147 #ifndef PCI_VENDOR_QLOGIC
148 #define PCI_VENDOR_QLOGIC 0x1077
149 #endif
150
151 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
152 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
153 #endif
154
155 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
156 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
157 #endif
158
159 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
160 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
161 #endif
162
163 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
164 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
165 #endif
166
167 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
168 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
169 #endif
170
171 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
172
173 #define PCI_QLOGIC_ISP1080 \
174 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
175
176 #define PCI_QLOGIC_ISP1240 \
177 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
178
179 #define PCI_QLOGIC_ISP2100 \
180 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
181
182 #define PCI_QLOGIC_ISP2200 \
183 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
184
185 #define IO_MAP_REG 0x10
186 #define MEM_MAP_REG 0x14
187 #define PCIR_ROMADDR 0x30
188
189 #define PCI_DFLT_LTNCY 0x40
190 #define PCI_DFLT_LNSZ 0x10
191
192
193 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
194 static void isp_pci_attach __P((struct device *, struct device *, void *));
195
196 struct isp_pcisoftc {
197 struct ispsoftc pci_isp;
198 pci_chipset_tag_t pci_pc;
199 pcitag_t pci_tag;
200 bus_space_tag_t pci_st;
201 bus_space_handle_t pci_sh;
202 bus_dma_tag_t pci_dmat;
203 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
204 bus_dmamap_t pci_rquest_dmap;
205 bus_dmamap_t pci_result_dmap;
206 bus_dmamap_t *pci_xfer_dmap;
207 void * pci_ih;
208 int16_t pci_poff[_NREG_BLKS];
209 };
210
211 struct cfattach isp_pci_ca = {
212 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
213 };
214
215 static int
216 isp_pci_probe(parent, match, aux)
217 struct device *parent;
218 struct cfdata *match;
219 void *aux;
220 {
221 struct pci_attach_args *pa = aux;
222 switch (pa->pa_id) {
223 #ifndef ISP_DISABLE_1020_SUPPORT
224 case PCI_QLOGIC_ISP:
225 return (1);
226 #endif
227 #ifndef ISP_DISABLE_1080_SUPPORT
228 case PCI_QLOGIC_ISP1080:
229 case PCI_QLOGIC_ISP1240:
230 return (1);
231 #endif
232 #ifndef ISP_DISABLE_2100_SUPPORT
233 case PCI_QLOGIC_ISP2100:
234 return (1);
235 #endif
236 #ifndef ISP_DISABLE_2200_SUPPORT
237 case PCI_QLOGIC_ISP2200:
238 return (1);
239 #endif
240 default:
241 return (0);
242 }
243 }
244
245
246 static void
247 isp_pci_attach(parent, self, aux)
248 struct device *parent, *self;
249 void *aux;
250 {
251 #ifdef DEBUG
252 static char oneshot = 1;
253 #endif
254 static char *nomem = "%s: no mem for sdparam table\n";
255 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
256 struct pci_attach_args *pa = aux;
257 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
258 struct ispsoftc *isp = &pcs->pci_isp;
259 bus_space_tag_t st, iot, memt;
260 bus_space_handle_t sh, ioh, memh;
261 pci_intr_handle_t ih;
262 const char *intrstr;
263 int ioh_valid, memh_valid, i;
264 ISP_LOCKVAL_DECL;
265
266 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
267 PCI_MAPREG_TYPE_IO, 0,
268 &iot, &ioh, NULL, NULL) == 0);
269 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
270 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
271 &memt, &memh, NULL, NULL) == 0);
272
273 if (memh_valid) {
274 st = memt;
275 sh = memh;
276 } else if (ioh_valid) {
277 st = iot;
278 sh = ioh;
279 } else {
280 printf(": unable to map device registers\n");
281 return;
282 }
283 printf("\n");
284
285 pcs->pci_st = st;
286 pcs->pci_sh = sh;
287 pcs->pci_dmat = pa->pa_dmat;
288 pcs->pci_pc = pa->pa_pc;
289 pcs->pci_tag = pa->pa_tag;
290 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
291 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
292 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
293 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
294 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
295 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
296
297 #ifndef ISP_DISABLE_1020_SUPPORT
298 if (pa->pa_id == PCI_QLOGIC_ISP) {
299 isp->isp_mdvec = &mdvec;
300 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
301 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
302 if (isp->isp_param == NULL) {
303 printf(nomem, isp->isp_name);
304 return;
305 }
306 bzero(isp->isp_param, sizeof (sdparam));
307 }
308 #endif
309 #ifndef ISP_DISABLE_1080_SUPPORT
310 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
311 isp->isp_mdvec = &mdvec_1080;
312 isp->isp_type = ISP_HA_SCSI_1080;
313 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
314 if (isp->isp_param == NULL) {
315 printf(nomem, isp->isp_name);
316 return;
317 }
318 bzero(isp->isp_param, sizeof (sdparam));
319 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
320 ISP1080_DMA_REGS_OFF;
321 }
322 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
323 isp->isp_mdvec = &mdvec_1080;
324 isp->isp_type = ISP_HA_SCSI_12X0;
325 isp->isp_param =
326 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
327 if (isp->isp_param == NULL) {
328 printf(nomem, isp->isp_name);
329 return;
330 }
331 bzero(isp->isp_param, 2 * sizeof (sdparam));
332 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
333 ISP1080_DMA_REGS_OFF;
334 }
335 #endif
336 #ifndef ISP_DISABLE_2100_SUPPORT
337 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
338 isp->isp_mdvec = &mdvec_2100;
339 isp->isp_type = ISP_HA_FC_2100;
340 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
341 if (isp->isp_param == NULL) {
342 printf(nomem, isp->isp_name);
343 return;
344 }
345 bzero(isp->isp_param, sizeof (fcparam));
346 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
347 PCI_MBOX_REGS2100_OFF;
348 if (rev < 3) {
349 /*
350 * XXX: Need to get the actual revision
351 * XXX: number of the 2100 FB. At any rate,
352 * XXX: lower cache line size for early revision
353 * XXX; boards.
354 */
355 linesz = 1;
356 }
357 }
358 #endif
359 #ifndef ISP_DISABLE_2200_SUPPORT
360 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
361 isp->isp_mdvec = &mdvec_2200;
362 isp->isp_type = ISP_HA_FC_2200;
363 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
364 if (isp->isp_param == NULL) {
365 printf(nomem, isp->isp_name);
366 return;
367 }
368 bzero(isp->isp_param, sizeof (fcparam));
369 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
370 PCI_MBOX_REGS2100_OFF;
371 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
372 }
373 #endif
374 isp->isp_revision = rev;
375
376 /*
377 * Make sure that command register set sanely.
378 */
379 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
380 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
381
382 /*
383 * Not so sure about these- but I think it's important that they get
384 * enabled......
385 */
386 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
387 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
388
389 /*
390 * Make sure that the latency timer, cache line size,
391 * and ROM is disabled.
392 */
393 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
394 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
395 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
396 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
397 data |= (linesz << PCI_CACHELINE_SHIFT);
398 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
399
400 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
401 data &= ~1;
402 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
403
404 #ifdef DEBUG
405 if (oneshot) {
406 oneshot = 0;
407 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
408 "%d.%d Core Version %d.%d\n",
409 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
410 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
411 }
412 #endif
413 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
414 pa->pa_intrline, &ih)) {
415 printf("%s: couldn't map interrupt\n", isp->isp_name);
416 free(isp->isp_param, M_DEVBUF);
417 return;
418 }
419 intrstr = pci_intr_string(pa->pa_pc, ih);
420 if (intrstr == NULL)
421 intrstr = "<I dunno>";
422 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
423 isp_pci_intr, isp);
424 if (pcs->pci_ih == NULL) {
425 printf("%s: couldn't establish interrupt at %s\n",
426 isp->isp_name, intrstr);
427 free(isp->isp_param, M_DEVBUF);
428 return;
429 }
430 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
431
432 if (IS_FC(isp)) {
433 long foo;
434 /*
435 * This isn't very random, but it's the best we can do for
436 * the real edge case of cards that don't have WWNs.
437 */
438 foo = (long) isp;
439 foo >>= 4;
440 foo &= 0x7;
441 while (version[foo])
442 isp->isp_osinfo.seed += (int) version[foo++];
443 isp->isp_osinfo.seed <<= 8;
444 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
445 }
446
447 ISP_LOCK(isp);
448 isp_reset(isp);
449 if (isp->isp_state != ISP_RESETSTATE) {
450 ISP_UNLOCK(isp);
451 free(isp->isp_param, M_DEVBUF);
452 return;
453 }
454 isp_init(isp);
455 if (isp->isp_state != ISP_INITSTATE) {
456 isp_uninit(isp);
457 ISP_UNLOCK(isp);
458 free(isp->isp_param, M_DEVBUF);
459 return;
460 }
461
462 /*
463 * Create the DMA maps for the data transfers.
464 */
465 for (i = 0; i < isp->isp_maxcmds; i++) {
466 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
467 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
468 &pcs->pci_xfer_dmap[i])) {
469 printf("%s: can't create dma maps\n",
470 isp->isp_name);
471 isp_uninit(isp);
472 ISP_UNLOCK(isp);
473 return;
474 }
475 }
476 /*
477 * Do Generic attach now.
478 */
479 isp_attach(isp);
480 if (isp->isp_state != ISP_RUNSTATE) {
481 isp_uninit(isp);
482 free(isp->isp_param, M_DEVBUF);
483 }
484 ISP_UNLOCK(isp);
485 }
486
487 static u_int16_t
488 isp_pci_rd_reg(isp, regoff)
489 struct ispsoftc *isp;
490 int regoff;
491 {
492 u_int16_t rv;
493 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
494 int offset, oldconf = 0;
495
496 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
497 /*
498 * We will assume that someone has paused the RISC processor.
499 */
500 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
501 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
502 }
503 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
504 offset += (regoff & 0xff);
505 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
506 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
507 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
508 }
509 return (rv);
510 }
511
512 static void
513 isp_pci_wr_reg(isp, regoff, val)
514 struct ispsoftc *isp;
515 int regoff;
516 u_int16_t val;
517 {
518 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
519 int offset, oldconf = 0;
520
521 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
522 /*
523 * We will assume that someone has paused the RISC processor.
524 */
525 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
526 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
527 }
528 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
529 offset += (regoff & 0xff);
530 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
531 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
532 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
533 }
534 }
535
536 #ifndef ISP_DISABLE_1080_SUPPORT
537 static u_int16_t
538 isp_pci_rd_reg_1080(isp, regoff)
539 struct ispsoftc *isp;
540 int regoff;
541 {
542 u_int16_t rv;
543 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
544 int offset, oc = 0;
545
546 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
547 /*
548 * We will assume that someone has paused the RISC processor.
549 */
550 oc = isp_pci_rd_reg(isp, BIU_CONF1);
551 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
552 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
553 oc = isp_pci_rd_reg(isp, BIU_CONF1);
554 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
555 }
556 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
557 offset += (regoff & 0xff);
558 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
559 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
560 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
561 isp_pci_wr_reg(isp, BIU_CONF1, oc);
562 }
563 return (rv);
564 }
565
566 static void
567 isp_pci_wr_reg_1080(isp, regoff, val)
568 struct ispsoftc *isp;
569 int regoff;
570 u_int16_t val;
571 {
572 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
573 int offset, oc = 0;
574
575 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
576 /*
577 * We will assume that someone has paused the RISC processor.
578 */
579 oc = isp_pci_rd_reg(isp, BIU_CONF1);
580 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
581 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
582 oc = isp_pci_rd_reg(isp, BIU_CONF1);
583 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
584 }
585 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
586 offset += (regoff & 0xff);
587 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
588 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
589 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
590 isp_pci_wr_reg(isp, BIU_CONF1, oc);
591 }
592 }
593 #endif
594
595 static int
596 isp_pci_mbxdma(isp)
597 struct ispsoftc *isp;
598 {
599 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
600 bus_dma_segment_t seg;
601 bus_size_t len;
602 fcparam *fcp;
603 int rseg;
604
605 if (isp->isp_rquest_dma) /* been here before? */
606 return (0);
607
608 len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T);
609 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
610 if (isp->isp_xflist == NULL) {
611 printf("%s: cannot malloc xflist array\n", isp->isp_name);
612 return (1);
613 }
614 bzero(isp->isp_xflist, len);
615 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
616 pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
617 if (pci->pci_xfer_dmap == NULL) {
618 printf("%s: cannot malloc xflist array\n", isp->isp_name);
619 return (1);
620 }
621
622 /*
623 * Allocate and map the request queue.
624 */
625 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
626 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
627 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
628 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
629 return (1);
630 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
631 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat,
632 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
633 BUS_DMA_NOWAIT))
634 return (1);
635
636 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
637
638 /*
639 * Allocate and map the result queue.
640 */
641 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
642 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
643 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
644 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
645 return (1);
646 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
647 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat,
648 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
649 BUS_DMA_NOWAIT))
650 return (1);
651 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
652
653 if (IS_SCSI(isp)) {
654 return (0);
655 }
656
657 fcp = isp->isp_param;
658 len = ISP2100_SCRLEN;
659 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
660 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
661 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
662 return (1);
663 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
664 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat,
665 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
666 BUS_DMA_NOWAIT))
667 return (1);
668 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
669 return (0);
670 }
671
672 static int
673 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
674 struct ispsoftc *isp;
675 struct scsipi_xfer *xs;
676 ispreq_t *rq;
677 u_int16_t *iptrp;
678 u_int16_t optr;
679 {
680 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
681 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
682 ispcontreq_t *crq;
683 int segcnt, seg, error, ovseg, seglim, drq;
684
685 if (xs->datalen == 0) {
686 rq->req_seg_count = 1;
687 goto mbxsync;
688 }
689 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
690 if (xs->xs_control & XS_CTL_DATA_IN) {
691 drq = REQFLAG_DATA_IN;
692 } else {
693 drq = REQFLAG_DATA_OUT;
694 }
695
696 if (IS_FC(isp)) {
697 seglim = ISP_RQDSEG_T2;
698 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
699 ((ispreqt2_t *)rq)->req_flags |= drq;
700 } else {
701 seglim = ISP_RQDSEG;
702 rq->req_flags |= drq;
703 }
704 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
705 NULL, xs->xs_control & XS_CTL_NOSLEEP ?
706 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
707 if (error) {
708 XS_SETERR(xs, HBA_BOTCH);
709 return (CMD_COMPLETE);
710 }
711
712 segcnt = dmap->dm_nsegs;
713
714 for (seg = 0, rq->req_seg_count = 0;
715 seg < segcnt && rq->req_seg_count < seglim;
716 seg++, rq->req_seg_count++) {
717 if (IS_FC(isp)) {
718 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
719 rq2->req_dataseg[rq2->req_seg_count].ds_count =
720 dmap->dm_segs[seg].ds_len;
721 rq2->req_dataseg[rq2->req_seg_count].ds_base =
722 dmap->dm_segs[seg].ds_addr;
723 } else {
724 rq->req_dataseg[rq->req_seg_count].ds_count =
725 dmap->dm_segs[seg].ds_len;
726 rq->req_dataseg[rq->req_seg_count].ds_base =
727 dmap->dm_segs[seg].ds_addr;
728 }
729 }
730
731 if (seg == segcnt)
732 goto dmasync;
733
734 do {
735 crq = (ispcontreq_t *)
736 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
737 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
738 if (*iptrp == optr) {
739 printf("%s: Request Queue Overflow++\n", isp->isp_name);
740 bus_dmamap_unload(pci->pci_dmat, dmap);
741 XS_SETERR(xs, HBA_BOTCH);
742 return (CMD_COMPLETE);
743 }
744 rq->req_header.rqs_entry_count++;
745 bzero((void *)crq, sizeof (*crq));
746 crq->req_header.rqs_entry_count = 1;
747 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
748
749 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
750 rq->req_seg_count++, seg++, ovseg++) {
751 crq->req_dataseg[ovseg].ds_count =
752 dmap->dm_segs[seg].ds_len;
753 crq->req_dataseg[ovseg].ds_base =
754 dmap->dm_segs[seg].ds_addr;
755 }
756 } while (seg < segcnt);
757
758 dmasync:
759 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
760 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
761 BUS_DMASYNC_PREWRITE);
762
763 mbxsync:
764 ISP_SWIZZLE_REQUEST(isp, rq);
765 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
766 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
767 return (CMD_QUEUED);
768 }
769
770 static int
771 isp_pci_intr(arg)
772 void *arg;
773 {
774 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
775 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
776 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
777 return (isp_intr(arg));
778 }
779
780 static void
781 isp_pci_dmateardown(isp, xs, handle)
782 struct ispsoftc *isp;
783 struct scsipi_xfer *xs;
784 u_int32_t handle;
785 {
786 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
787 bus_dmamap_t dmap;
788 assert(handle != 0 && handle <= isp->isp_maxcmds);
789 dmap = pci->pci_xfer_dmap[handle-1];
790 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
791 xs->xs_control & XS_CTL_DATA_IN ?
792 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
793 bus_dmamap_unload(pci->pci_dmat, dmap);
794 }
795
796 static void
797 isp_pci_reset1(isp)
798 struct ispsoftc *isp;
799 {
800 /* Make sure the BIOS is disabled */
801 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
802 }
803
804 static void
805 isp_pci_dumpregs(isp)
806 struct ispsoftc *isp;
807 {
808 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
809 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
810 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
811 }
812