isp_pci.c revision 1.43 1 /* $NetBSD: isp_pci.c,v 1.43 1999/10/14 02:14:35 mjacob Exp $ */
2 /* release_6_5_99 */
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * Matthew Jacob (mjacob (at) nas.nasa.gov)
6 */
7 /*
8 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <dev/ic/isp_netbsd.h>
35 #include <dev/microcode/isp/asm_pci.h>
36
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40
41 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
42 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
43 #ifndef ISP_DISABLE_1080_SUPPORT
44 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
45 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
46 #endif
47 static int isp_pci_mbxdma __P((struct ispsoftc *));
48 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
49 ispreq_t *, u_int8_t *, u_int8_t));
50 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
51 u_int32_t));
52 static void isp_pci_reset1 __P((struct ispsoftc *));
53 static void isp_pci_dumpregs __P((struct ispsoftc *));
54 static int isp_pci_intr __P((void *));
55
56 #ifndef ISP_DISABLE_1020_SUPPORT
57 static struct ispmdvec mdvec = {
58 isp_pci_rd_reg,
59 isp_pci_wr_reg,
60 isp_pci_mbxdma,
61 isp_pci_dmasetup,
62 isp_pci_dmateardown,
63 NULL,
64 isp_pci_reset1,
65 isp_pci_dumpregs,
66 ISP_RISC_CODE,
67 ISP_CODE_LENGTH,
68 ISP_CODE_ORG,
69 0,
70 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
71 0
72 };
73 #endif
74
75 #ifndef ISP_DISABLE_1080_SUPPORT
76 static struct ispmdvec mdvec_1080 = {
77 isp_pci_rd_reg_1080,
78 isp_pci_wr_reg_1080,
79 isp_pci_mbxdma,
80 isp_pci_dmasetup,
81 isp_pci_dmateardown,
82 NULL,
83 isp_pci_reset1,
84 isp_pci_dumpregs,
85 ISP1080_RISC_CODE,
86 ISP1080_CODE_LENGTH,
87 ISP1080_CODE_ORG,
88 0,
89 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
90 0
91 };
92 #endif
93
94 #ifndef ISP_DISABLE_2100_SUPPORT
95 static struct ispmdvec mdvec_2100 = {
96 isp_pci_rd_reg,
97 isp_pci_wr_reg,
98 isp_pci_mbxdma,
99 isp_pci_dmasetup,
100 isp_pci_dmateardown,
101 NULL,
102 isp_pci_reset1,
103 isp_pci_dumpregs,
104 ISP2100_RISC_CODE,
105 ISP2100_CODE_LENGTH,
106 ISP2100_CODE_ORG,
107 0,
108 0,
109 0
110 };
111 #endif
112
113 #ifndef ISP_DISABLE_2200_SUPPORT
114 static struct ispmdvec mdvec_2200 = {
115 isp_pci_rd_reg,
116 isp_pci_wr_reg,
117 isp_pci_mbxdma,
118 isp_pci_dmasetup,
119 isp_pci_dmateardown,
120 NULL,
121 isp_pci_reset1,
122 isp_pci_dumpregs,
123 ISP2200_RISC_CODE,
124 ISP2200_CODE_LENGTH,
125 ISP2200_CODE_ORG,
126 0,
127 0,
128 0
129 };
130 #endif
131
132 #ifndef PCI_VENDOR_QLOGIC
133 #define PCI_VENDOR_QLOGIC 0x1077
134 #endif
135
136 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
137 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
138 #endif
139
140 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
141 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
142 #endif
143
144 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
145 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
146 #endif
147
148 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
149 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
150 #endif
151
152 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
153 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
154 #endif
155
156 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
157
158 #define PCI_QLOGIC_ISP1080 \
159 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
160
161 #define PCI_QLOGIC_ISP1240 \
162 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
163
164 #define PCI_QLOGIC_ISP2100 \
165 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
166
167 #define PCI_QLOGIC_ISP2200 \
168 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
169
170 #define IO_MAP_REG 0x10
171 #define MEM_MAP_REG 0x14
172 #define PCIR_ROMADDR 0x30
173
174 #define PCI_DFLT_LTNCY 0x40
175 #define PCI_DFLT_LNSZ 0x10
176
177
178 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
179 static void isp_pci_attach __P((struct device *, struct device *, void *));
180
181 struct isp_pcisoftc {
182 struct ispsoftc pci_isp;
183 pci_chipset_tag_t pci_pc;
184 pcitag_t pci_tag;
185 bus_space_tag_t pci_st;
186 bus_space_handle_t pci_sh;
187 bus_dma_tag_t pci_dmat;
188 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
189 bus_dmamap_t pci_rquest_dmap;
190 bus_dmamap_t pci_result_dmap;
191 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
192 void * pci_ih;
193 int16_t pci_poff[_NREG_BLKS];
194 };
195
196 struct cfattach isp_pci_ca = {
197 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
198 };
199
200 static int
201 isp_pci_probe(parent, match, aux)
202 struct device *parent;
203 struct cfdata *match;
204 void *aux;
205 {
206 struct pci_attach_args *pa = aux;
207 switch (pa->pa_id) {
208 #ifndef ISP_DISABLE_1020_SUPPORT
209 case PCI_QLOGIC_ISP:
210 return (1);
211 #endif
212 #ifndef ISP_DISABLE_1080_SUPPORT
213 case PCI_QLOGIC_ISP1080:
214 case PCI_QLOGIC_ISP1240:
215 return (1);
216 #endif
217 #ifndef ISP_DISABLE_2100_SUPPORT
218 case PCI_QLOGIC_ISP2100:
219 return (1);
220 #endif
221 #ifndef ISP_DISABLE_2200_SUPPORT
222 case PCI_QLOGIC_ISP2200:
223 return (1);
224 #endif
225 default:
226 return (0);
227 }
228 }
229
230
231 static void
232 isp_pci_attach(parent, self, aux)
233 struct device *parent, *self;
234 void *aux;
235 {
236 #ifdef DEBUG
237 static char oneshot = 1;
238 #endif
239 static char *nomem = "%s: no mem for sdparam table\n";
240 u_int32_t data, linesz = PCI_DFLT_LNSZ;
241 struct pci_attach_args *pa = aux;
242 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
243 struct ispsoftc *isp = &pcs->pci_isp;
244 bus_space_tag_t st, iot, memt;
245 bus_space_handle_t sh, ioh, memh;
246 pci_intr_handle_t ih;
247 const char *intrstr;
248 int ioh_valid, memh_valid, i;
249 long foo;
250 ISP_LOCKVAL_DECL;
251
252 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
253 PCI_MAPREG_TYPE_IO, 0,
254 &iot, &ioh, NULL, NULL) == 0);
255 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
256 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
257 &memt, &memh, NULL, NULL) == 0);
258
259 if (memh_valid) {
260 st = memt;
261 sh = memh;
262 } else if (ioh_valid) {
263 st = iot;
264 sh = ioh;
265 } else {
266 printf(": unable to map device registers\n");
267 return;
268 }
269 printf("\n");
270
271 pcs->pci_st = st;
272 pcs->pci_sh = sh;
273 pcs->pci_dmat = pa->pa_dmat;
274 pcs->pci_pc = pa->pa_pc;
275 pcs->pci_tag = pa->pa_tag;
276 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
277 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
278 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
279 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
280 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
281
282 #ifndef ISP_DISABLE_1020_SUPPORT
283 if (pa->pa_id == PCI_QLOGIC_ISP) {
284 isp->isp_mdvec = &mdvec;
285 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
286 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
287 if (isp->isp_param == NULL) {
288 printf(nomem, isp->isp_name);
289 return;
290 }
291 bzero(isp->isp_param, sizeof (sdparam));
292 }
293 #endif
294 #ifndef ISP_DISABLE_1080_SUPPORT
295 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
296 isp->isp_mdvec = &mdvec_1080;
297 isp->isp_type = ISP_HA_SCSI_1080;
298 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
299 if (isp->isp_param == NULL) {
300 printf(nomem, isp->isp_name);
301 return;
302 }
303 bzero(isp->isp_param, sizeof (sdparam));
304 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
305 ISP1080_DMA_REGS_OFF;
306 }
307 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
308 isp->isp_mdvec = &mdvec_1080;
309 isp->isp_type = ISP_HA_SCSI_12X0;
310 isp->isp_param =
311 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
312 if (isp->isp_param == NULL) {
313 printf(nomem, isp->isp_name);
314 return;
315 }
316 bzero(isp->isp_param, 2 * sizeof (sdparam));
317 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
318 ISP1080_DMA_REGS_OFF;
319 }
320 #endif
321 #ifndef ISP_DISABLE_2100_SUPPORT
322 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
323 isp->isp_mdvec = &mdvec_2100;
324 isp->isp_type = ISP_HA_FC_2100;
325 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
326 if (isp->isp_param == NULL) {
327 printf(nomem, isp->isp_name);
328 return;
329 }
330 bzero(isp->isp_param, sizeof (fcparam));
331 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
332 PCI_MBOX_REGS2100_OFF;
333 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
334 if ((data & 0xff) < 3) {
335 /*
336 * XXX: Need to get the actual revision
337 * XXX: number of the 2100 FB. At any rate,
338 * XXX: lower cache line size for early revision
339 * XXX; boards.
340 */
341 linesz = 1;
342 }
343 }
344 #endif
345 #ifndef ISP_DISABLE_2200_SUPPORT
346 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
347 isp->isp_mdvec = &mdvec_2200;
348 isp->isp_type = ISP_HA_FC_2200;
349 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
350 if (isp->isp_param == NULL) {
351 printf(nomem, isp->isp_name);
352 return;
353 }
354 bzero(isp->isp_param, sizeof (fcparam));
355 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
356 PCI_MBOX_REGS2100_OFF;
357 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
358 }
359 #endif
360
361 /*
362 * Make sure that command register set sanely.
363 */
364 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
365 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
366
367 /*
368 * Not so sure about these- but I think it's important that they get
369 * enabled......
370 */
371 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
372 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
373
374 /*
375 * Make sure that the latency timer, cache line size,
376 * and ROM is disabled.
377 */
378 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
379 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
380 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
381 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
382 data |= (linesz << PCI_CACHELINE_SHIFT);
383 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
384
385 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
386 data &= ~1;
387 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
388
389 #ifdef DEBUG
390 if (oneshot) {
391 oneshot = 0;
392 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
393 "%d.%d Core Version %d.%d\n",
394 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
395 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
396 }
397 #endif
398 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
399 pa->pa_intrline, &ih)) {
400 printf("%s: couldn't map interrupt\n", isp->isp_name);
401 free(isp->isp_param, M_DEVBUF);
402 return;
403 }
404 intrstr = pci_intr_string(pa->pa_pc, ih);
405 if (intrstr == NULL)
406 intrstr = "<I dunno>";
407 pcs->pci_ih =
408 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
409 if (pcs->pci_ih == NULL) {
410 printf("%s: couldn't establish interrupt at %s\n",
411 isp->isp_name, intrstr);
412 free(isp->isp_param, M_DEVBUF);
413 return;
414 }
415 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
416
417 if (IS_FC(isp)) {
418 /*
419 * This isn't very random, but it's the best we can do for
420 * the real edge case of cards that don't have WWNs.
421 */
422 foo = (long) isp;
423 foo >>= 4;
424 foo &= 0x7;
425 while (version[foo])
426 isp->isp_osinfo.seed += (int) version[foo++];
427 isp->isp_osinfo.seed <<= 8;
428 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
429 }
430
431 ISP_LOCK(isp);
432 isp_reset(isp);
433 if (isp->isp_state != ISP_RESETSTATE) {
434 ISP_UNLOCK(isp);
435 free(isp->isp_param, M_DEVBUF);
436 return;
437 }
438 isp_init(isp);
439 if (isp->isp_state != ISP_INITSTATE) {
440 isp_uninit(isp);
441 ISP_UNLOCK(isp);
442 free(isp->isp_param, M_DEVBUF);
443 return;
444 }
445
446
447
448 /*
449 * Create the DMA maps for the data transfers.
450 */
451 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
452 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
453 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
454 &pcs->pci_xfer_dmap[i])) {
455 printf("%s: can't create dma maps\n",
456 isp->isp_name);
457 isp_uninit(isp);
458 ISP_UNLOCK(isp);
459 return;
460 }
461 }
462 /*
463 * Do Generic attach now.
464 */
465 isp_attach(isp);
466 if (isp->isp_state != ISP_RUNSTATE) {
467 isp_uninit(isp);
468 free(isp->isp_param, M_DEVBUF);
469 }
470 ISP_UNLOCK(isp);
471 }
472
473 static u_int16_t
474 isp_pci_rd_reg(isp, regoff)
475 struct ispsoftc *isp;
476 int regoff;
477 {
478 u_int16_t rv;
479 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
480 int offset, oldconf = 0;
481
482 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
483 /*
484 * We will assume that someone has paused the RISC processor.
485 */
486 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
487 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
488 }
489 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
490 offset += (regoff & 0xff);
491 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
492 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
493 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
494 }
495 return (rv);
496 }
497
498 static void
499 isp_pci_wr_reg(isp, regoff, val)
500 struct ispsoftc *isp;
501 int regoff;
502 u_int16_t val;
503 {
504 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
505 int offset, oldconf = 0;
506
507 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
508 /*
509 * We will assume that someone has paused the RISC processor.
510 */
511 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
512 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
513 }
514 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
515 offset += (regoff & 0xff);
516 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
517 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
518 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
519 }
520 }
521
522 #ifndef ISP_DISABLE_1080_SUPPORT
523 static u_int16_t
524 isp_pci_rd_reg_1080(isp, regoff)
525 struct ispsoftc *isp;
526 int regoff;
527 {
528 u_int16_t rv;
529 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
530 int offset, oc = 0;
531
532 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
533 /*
534 * We will assume that someone has paused the RISC processor.
535 */
536 oc = isp_pci_rd_reg(isp, BIU_CONF1);
537 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
538 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
539 oc = isp_pci_rd_reg(isp, BIU_CONF1);
540 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
541 }
542 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
543 offset += (regoff & 0xff);
544 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
545 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
546 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
547 isp_pci_wr_reg(isp, BIU_CONF1, oc);
548 }
549 return (rv);
550 }
551
552 static void
553 isp_pci_wr_reg_1080(isp, regoff, val)
554 struct ispsoftc *isp;
555 int regoff;
556 u_int16_t val;
557 {
558 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
559 int offset, oc = 0;
560
561 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
562 /*
563 * We will assume that someone has paused the RISC processor.
564 */
565 oc = isp_pci_rd_reg(isp, BIU_CONF1);
566 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
567 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
568 oc = isp_pci_rd_reg(isp, BIU_CONF1);
569 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
570 }
571 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
572 offset += (regoff & 0xff);
573 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
574 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
575 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
576 isp_pci_wr_reg(isp, BIU_CONF1, oc);
577 }
578 }
579 #endif
580
581 static int
582 isp_pci_mbxdma(isp)
583 struct ispsoftc *isp;
584 {
585 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
586 bus_dma_segment_t seg;
587 bus_size_t len;
588 fcparam *fcp;
589 int rseg;
590
591 if (isp->isp_rquest_dma) /* been here before? */
592 return (0);
593
594 isp->isp_xflist = (ISP_SCSI_XFER_T **)
595 malloc(isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T),
596 M_DEVBUF, M_WAITOK);
597
598 if (isp->isp_xflist == NULL) {
599 printf("%s: cannot malloc xflist array\n", isp->isp_name);
600 return (1);
601 }
602 bzero(isp->isp_xflist, isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T));
603
604 /*
605 * Allocate and map the request queue.
606 */
607 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
608 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
609 BUS_DMA_NOWAIT) ||
610 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
611 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
612 return (1);
613 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
614 &pci->pci_rquest_dmap) ||
615 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
616 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
617 return (1);
618
619 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
620
621 /*
622 * Allocate and map the result queue.
623 */
624 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
625 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
626 BUS_DMA_NOWAIT) ||
627 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
628 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
629 return (1);
630 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
631 &pci->pci_result_dmap) ||
632 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
633 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
634 return (1);
635 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
636
637 if (IS_SCSI(isp)) {
638 return (0);
639 }
640
641 fcp = isp->isp_param;
642 len = ISP2100_SCRLEN;
643 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
644 BUS_DMA_NOWAIT) ||
645 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
646 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
647 return (1);
648 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
649 &pci->pci_scratch_dmap) ||
650 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
651 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
652 return (1);
653 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
654 return (0);
655 }
656
657 static int
658 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
659 struct ispsoftc *isp;
660 struct scsipi_xfer *xs;
661 ispreq_t *rq;
662 u_int8_t *iptrp;
663 u_int8_t optr;
664 {
665 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
666 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
667 ispcontreq_t *crq;
668 int segcnt, seg, error, ovseg, seglim, drq;
669
670 if (xs->datalen == 0) {
671 rq->req_seg_count = 1;
672 goto mbxsync;
673 }
674 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
675 if (xs->xs_control & XS_CTL_DATA_IN) {
676 drq = REQFLAG_DATA_IN;
677 } else {
678 drq = REQFLAG_DATA_OUT;
679 }
680
681 if (IS_FC(isp)) {
682 seglim = ISP_RQDSEG_T2;
683 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
684 ((ispreqt2_t *)rq)->req_flags |= drq;
685 } else {
686 seglim = ISP_RQDSEG;
687 rq->req_flags |= drq;
688 }
689 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
690 NULL, xs->xs_control & XS_CTL_NOSLEEP ?
691 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
692 if (error) {
693 XS_SETERR(xs, HBA_BOTCH);
694 return (CMD_COMPLETE);
695 }
696
697 segcnt = dmap->dm_nsegs;
698
699 for (seg = 0, rq->req_seg_count = 0;
700 seg < segcnt && rq->req_seg_count < seglim;
701 seg++, rq->req_seg_count++) {
702 if (IS_FC(isp)) {
703 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
704 rq2->req_dataseg[rq2->req_seg_count].ds_count =
705 dmap->dm_segs[seg].ds_len;
706 rq2->req_dataseg[rq2->req_seg_count].ds_base =
707 dmap->dm_segs[seg].ds_addr;
708 } else {
709 rq->req_dataseg[rq->req_seg_count].ds_count =
710 dmap->dm_segs[seg].ds_len;
711 rq->req_dataseg[rq->req_seg_count].ds_base =
712 dmap->dm_segs[seg].ds_addr;
713 }
714 }
715
716 if (seg == segcnt)
717 goto dmasync;
718
719 do {
720 crq = (ispcontreq_t *)
721 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
722 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
723 if (*iptrp == optr) {
724 printf("%s: Request Queue Overflow++\n",
725 isp->isp_name);
726 bus_dmamap_unload(pci->pci_dmat, dmap);
727 XS_SETERR(xs, HBA_BOTCH);
728 return (CMD_COMPLETE);
729 }
730 rq->req_header.rqs_entry_count++;
731 bzero((void *)crq, sizeof (*crq));
732 crq->req_header.rqs_entry_count = 1;
733 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
734
735 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
736 rq->req_seg_count++, seg++, ovseg++) {
737 crq->req_dataseg[ovseg].ds_count =
738 dmap->dm_segs[seg].ds_len;
739 crq->req_dataseg[ovseg].ds_base =
740 dmap->dm_segs[seg].ds_addr;
741 }
742 } while (seg < segcnt);
743
744 dmasync:
745 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
746 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
747 BUS_DMASYNC_PREWRITE);
748
749 mbxsync:
750 ISP_SWIZZLE_REQUEST(isp, rq);
751 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
752 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
753 return (CMD_QUEUED);
754 }
755
756 static int
757 isp_pci_intr(arg)
758 void *arg;
759 {
760 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
761 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
762 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
763 return (isp_intr(arg));
764 }
765
766 static void
767 isp_pci_dmateardown(isp, xs, handle)
768 struct ispsoftc *isp;
769 struct scsipi_xfer *xs;
770 u_int32_t handle;
771 {
772 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
773 bus_dmamap_t dmap;
774 assert(handle != 0 && handle <= isp->isp_maxcmds);
775 dmap = pci->pci_xfer_dmap[handle-1];
776 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
777 xs->xs_control & XS_CTL_DATA_IN ?
778 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
779 bus_dmamap_unload(pci->pci_dmat, dmap);
780 }
781
782 static void
783 isp_pci_reset1(isp)
784 struct ispsoftc *isp;
785 {
786 /* Make sure the BIOS is disabled */
787 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
788 }
789
790 static void
791 isp_pci_dumpregs(isp)
792 struct ispsoftc *isp;
793 {
794 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
795 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
796 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
797 }
798