isp_pci.c revision 1.48 1 /* $NetBSD: isp_pci.c,v 1.48 1999/12/16 05:35:46 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * Matthew Jacob (mjacob (at) nas.nasa.gov)
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34 #include <dev/microcode/isp/asm_pci.h>
35
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/pcidevs.h>
39
40 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
41 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
42 #ifndef ISP_DISABLE_1080_SUPPORT
43 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
44 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
45 #endif
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 ispreq_t *, u_int16_t *, u_int16_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 u_int32_t));
51 static void isp_pci_reset1 __P((struct ispsoftc *));
52 static void isp_pci_dumpregs __P((struct ispsoftc *));
53 static int isp_pci_intr __P((void *));
54
55 #ifndef ISP_CODE_ORG
56 #define ISP_CODE_ORG 0x1000
57 #endif
58 #ifndef ISP_1040_RISC_CODE
59 #define ISP_1040_RISC_CODE NULL
60 #endif
61 #ifndef ISP_1080_RISC_CODE
62 #define ISP_1080_RISC_CODE NULL
63 #endif
64 #ifndef ISP_2100_RISC_CODE
65 #define ISP_2100_RISC_CODE NULL
66 #endif
67 #ifndef ISP_2200_RISC_CODE
68 #define ISP_2200_RISC_CODE NULL
69 #endif
70
71 #ifndef ISP_DISABLE_1020_SUPPORT
72 static struct ispmdvec mdvec = {
73 isp_pci_rd_reg,
74 isp_pci_wr_reg,
75 isp_pci_mbxdma,
76 isp_pci_dmasetup,
77 isp_pci_dmateardown,
78 NULL,
79 isp_pci_reset1,
80 isp_pci_dumpregs,
81 ISP_1040_RISC_CODE,
82 0,
83 ISP_CODE_ORG,
84 0,
85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
86 0
87 };
88 #endif
89
90 #ifndef ISP_DISABLE_1080_SUPPORT
91 static struct ispmdvec mdvec_1080 = {
92 isp_pci_rd_reg_1080,
93 isp_pci_wr_reg_1080,
94 isp_pci_mbxdma,
95 isp_pci_dmasetup,
96 isp_pci_dmateardown,
97 NULL,
98 isp_pci_reset1,
99 isp_pci_dumpregs,
100 ISP_1080_RISC_CODE,
101 0,
102 ISP_CODE_ORG,
103 0,
104 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
105 0
106 };
107 #endif
108
109 #ifndef ISP_DISABLE_2100_SUPPORT
110 static struct ispmdvec mdvec_2100 = {
111 isp_pci_rd_reg,
112 isp_pci_wr_reg,
113 isp_pci_mbxdma,
114 isp_pci_dmasetup,
115 isp_pci_dmateardown,
116 NULL,
117 isp_pci_reset1,
118 isp_pci_dumpregs,
119 ISP_2100_RISC_CODE,
120 0,
121 ISP_CODE_ORG,
122 0,
123 0,
124 0
125 };
126 #endif
127
128 #ifndef ISP_DISABLE_2200_SUPPORT
129 static struct ispmdvec mdvec_2200 = {
130 isp_pci_rd_reg,
131 isp_pci_wr_reg,
132 isp_pci_mbxdma,
133 isp_pci_dmasetup,
134 isp_pci_dmateardown,
135 NULL,
136 isp_pci_reset1,
137 isp_pci_dumpregs,
138 ISP_2200_RISC_CODE,
139 0,
140 ISP_CODE_ORG,
141 0,
142 0,
143 0
144 };
145 #endif
146
147 #ifndef PCI_VENDOR_QLOGIC
148 #define PCI_VENDOR_QLOGIC 0x1077
149 #endif
150
151 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
152 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
153 #endif
154
155 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
156 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
157 #endif
158
159 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
160 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
161 #endif
162
163 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
164 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
165 #endif
166
167 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
168 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
169 #endif
170
171 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
172 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
173 #endif
174
175 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
176
177 #define PCI_QLOGIC_ISP1080 \
178 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
179
180 #define PCI_QLOGIC_ISP1240 \
181 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
182
183 #define PCI_QLOGIC_ISP1280 \
184 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
185
186 #define PCI_QLOGIC_ISP2100 \
187 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
188
189 #define PCI_QLOGIC_ISP2200 \
190 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
191
192 #define IO_MAP_REG 0x10
193 #define MEM_MAP_REG 0x14
194 #define PCIR_ROMADDR 0x30
195
196 #define PCI_DFLT_LTNCY 0x40
197 #define PCI_DFLT_LNSZ 0x10
198
199
200 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
201 static void isp_pci_attach __P((struct device *, struct device *, void *));
202
203 struct isp_pcisoftc {
204 struct ispsoftc pci_isp;
205 pci_chipset_tag_t pci_pc;
206 pcitag_t pci_tag;
207 bus_space_tag_t pci_st;
208 bus_space_handle_t pci_sh;
209 bus_dma_tag_t pci_dmat;
210 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
211 bus_dmamap_t pci_rquest_dmap;
212 bus_dmamap_t pci_result_dmap;
213 bus_dmamap_t *pci_xfer_dmap;
214 void * pci_ih;
215 int16_t pci_poff[_NREG_BLKS];
216 };
217
218 struct cfattach isp_pci_ca = {
219 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
220 };
221
222 static int
223 isp_pci_probe(parent, match, aux)
224 struct device *parent;
225 struct cfdata *match;
226 void *aux;
227 {
228 struct pci_attach_args *pa = aux;
229 switch (pa->pa_id) {
230 #ifndef ISP_DISABLE_1020_SUPPORT
231 case PCI_QLOGIC_ISP:
232 return (1);
233 #endif
234 #ifndef ISP_DISABLE_1080_SUPPORT
235 case PCI_QLOGIC_ISP1080:
236 case PCI_QLOGIC_ISP1240:
237 case PCI_QLOGIC_ISP1280:
238 return (1);
239 #endif
240 #ifndef ISP_DISABLE_2100_SUPPORT
241 case PCI_QLOGIC_ISP2100:
242 return (1);
243 #endif
244 #ifndef ISP_DISABLE_2200_SUPPORT
245 case PCI_QLOGIC_ISP2200:
246 return (1);
247 #endif
248 default:
249 return (0);
250 }
251 }
252
253
254 static void
255 isp_pci_attach(parent, self, aux)
256 struct device *parent, *self;
257 void *aux;
258 {
259 #ifdef DEBUG
260 static char oneshot = 1;
261 #endif
262 static char *nomem = "%s: no mem for sdparam table\n";
263 u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
264 struct pci_attach_args *pa = aux;
265 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
266 struct ispsoftc *isp = &pcs->pci_isp;
267 bus_space_tag_t st, iot, memt;
268 bus_space_handle_t sh, ioh, memh;
269 pci_intr_handle_t ih;
270 const char *intrstr;
271 int ioh_valid, memh_valid, i;
272 ISP_LOCKVAL_DECL;
273
274 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
275 PCI_MAPREG_TYPE_IO, 0,
276 &iot, &ioh, NULL, NULL) == 0);
277 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
278 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
279 &memt, &memh, NULL, NULL) == 0);
280
281 if (memh_valid) {
282 st = memt;
283 sh = memh;
284 } else if (ioh_valid) {
285 st = iot;
286 sh = ioh;
287 } else {
288 printf(": unable to map device registers\n");
289 return;
290 }
291 printf("\n");
292
293 pcs->pci_st = st;
294 pcs->pci_sh = sh;
295 pcs->pci_dmat = pa->pa_dmat;
296 pcs->pci_pc = pa->pa_pc;
297 pcs->pci_tag = pa->pa_tag;
298 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
299 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
300 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
301 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
302 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
303 rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
304
305 #ifndef ISP_DISABLE_1020_SUPPORT
306 if (pa->pa_id == PCI_QLOGIC_ISP) {
307 isp->isp_mdvec = &mdvec;
308 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
309 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
310 if (isp->isp_param == NULL) {
311 printf(nomem, isp->isp_name);
312 return;
313 }
314 bzero(isp->isp_param, sizeof (sdparam));
315 }
316 #endif
317 #ifndef ISP_DISABLE_1080_SUPPORT
318 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
319 isp->isp_mdvec = &mdvec_1080;
320 isp->isp_type = ISP_HA_SCSI_1080;
321 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
322 if (isp->isp_param == NULL) {
323 printf(nomem, isp->isp_name);
324 return;
325 }
326 bzero(isp->isp_param, sizeof (sdparam));
327 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
328 ISP1080_DMA_REGS_OFF;
329 }
330 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
331 isp->isp_mdvec = &mdvec_1080;
332 isp->isp_type = ISP_HA_SCSI_1240;
333 isp->isp_param =
334 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
335 if (isp->isp_param == NULL) {
336 printf(nomem, isp->isp_name);
337 return;
338 }
339 bzero(isp->isp_param, 2 * sizeof (sdparam));
340 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
341 ISP1080_DMA_REGS_OFF;
342 }
343 if (pa->pa_id == PCI_QLOGIC_ISP1280) {
344 isp->isp_mdvec = &mdvec_1080;
345 isp->isp_type = ISP_HA_SCSI_1280;
346 isp->isp_param =
347 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
348 if (isp->isp_param == NULL) {
349 printf(nomem, isp->isp_name);
350 return;
351 }
352 bzero(isp->isp_param, 2 * sizeof (sdparam));
353 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
354 ISP1080_DMA_REGS_OFF;
355 }
356 #endif
357 #ifndef ISP_DISABLE_2100_SUPPORT
358 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
359 isp->isp_mdvec = &mdvec_2100;
360 isp->isp_type = ISP_HA_FC_2100;
361 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
362 if (isp->isp_param == NULL) {
363 printf(nomem, isp->isp_name);
364 return;
365 }
366 bzero(isp->isp_param, sizeof (fcparam));
367 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
368 PCI_MBOX_REGS2100_OFF;
369 if (rev < 3) {
370 /*
371 * XXX: Need to get the actual revision
372 * XXX: number of the 2100 FB. At any rate,
373 * XXX: lower cache line size for early revision
374 * XXX; boards.
375 */
376 linesz = 1;
377 }
378 }
379 #endif
380 #ifndef ISP_DISABLE_2200_SUPPORT
381 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
382 isp->isp_mdvec = &mdvec_2200;
383 isp->isp_type = ISP_HA_FC_2200;
384 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
385 if (isp->isp_param == NULL) {
386 printf(nomem, isp->isp_name);
387 return;
388 }
389 bzero(isp->isp_param, sizeof (fcparam));
390 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
391 PCI_MBOX_REGS2100_OFF;
392 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
393 }
394 #endif
395 isp->isp_revision = rev;
396
397 /*
398 * Make sure that command register set sanely.
399 */
400 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
401 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
402
403 /*
404 * Not so sure about these- but I think it's important that they get
405 * enabled......
406 */
407 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
408 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
409
410 /*
411 * Make sure that the latency timer, cache line size,
412 * and ROM is disabled.
413 */
414 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
415 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
416 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
417 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
418 data |= (linesz << PCI_CACHELINE_SHIFT);
419 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
420
421 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
422 data &= ~1;
423 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
424
425 #ifdef DEBUG
426 if (oneshot) {
427 oneshot = 0;
428 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
429 "%d.%d Core Version %d.%d\n",
430 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
431 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
432 }
433 #endif
434 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
435 pa->pa_intrline, &ih)) {
436 printf("%s: couldn't map interrupt\n", isp->isp_name);
437 free(isp->isp_param, M_DEVBUF);
438 return;
439 }
440 intrstr = pci_intr_string(pa->pa_pc, ih);
441 if (intrstr == NULL)
442 intrstr = "<I dunno>";
443 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
444 isp_pci_intr, isp);
445 if (pcs->pci_ih == NULL) {
446 printf("%s: couldn't establish interrupt at %s\n",
447 isp->isp_name, intrstr);
448 free(isp->isp_param, M_DEVBUF);
449 return;
450 }
451 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
452
453 if (IS_FC(isp)) {
454 long foo;
455 /*
456 * This isn't very random, but it's the best we can do for
457 * the real edge case of cards that don't have WWNs.
458 */
459 foo = (long) isp;
460 foo >>= 4;
461 foo &= 0x7;
462 while (version[foo])
463 isp->isp_osinfo.seed += (int) version[foo++];
464 isp->isp_osinfo.seed <<= 8;
465 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
466 }
467
468 ISP_LOCK(isp);
469 isp_reset(isp);
470 if (isp->isp_state != ISP_RESETSTATE) {
471 ISP_UNLOCK(isp);
472 free(isp->isp_param, M_DEVBUF);
473 return;
474 }
475 isp_init(isp);
476 if (isp->isp_state != ISP_INITSTATE) {
477 isp_uninit(isp);
478 ISP_UNLOCK(isp);
479 free(isp->isp_param, M_DEVBUF);
480 return;
481 }
482
483 /*
484 * Create the DMA maps for the data transfers.
485 */
486 for (i = 0; i < isp->isp_maxcmds; i++) {
487 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
488 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
489 &pcs->pci_xfer_dmap[i])) {
490 printf("%s: can't create dma maps\n",
491 isp->isp_name);
492 isp_uninit(isp);
493 ISP_UNLOCK(isp);
494 return;
495 }
496 }
497 /*
498 * Do Generic attach now.
499 */
500 isp_attach(isp);
501 if (isp->isp_state != ISP_RUNSTATE) {
502 isp_uninit(isp);
503 free(isp->isp_param, M_DEVBUF);
504 }
505 ISP_UNLOCK(isp);
506 }
507
508 static u_int16_t
509 isp_pci_rd_reg(isp, regoff)
510 struct ispsoftc *isp;
511 int regoff;
512 {
513 u_int16_t rv;
514 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
515 int offset, oldconf = 0;
516
517 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
518 /*
519 * We will assume that someone has paused the RISC processor.
520 */
521 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
522 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
523 delay(250);
524 }
525 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
526 offset += (regoff & 0xff);
527 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
528 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
529 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
530 delay(250);
531 }
532 return (rv);
533 }
534
535 static void
536 isp_pci_wr_reg(isp, regoff, val)
537 struct ispsoftc *isp;
538 int regoff;
539 u_int16_t val;
540 {
541 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
542 int offset, oldconf = 0;
543
544 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
545 /*
546 * We will assume that someone has paused the RISC processor.
547 */
548 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
549 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
550 delay(250);
551 }
552 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
553 offset += (regoff & 0xff);
554 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
555 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
556 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
557 delay(250);
558 }
559 }
560
561 #ifndef ISP_DISABLE_1080_SUPPORT
562 static u_int16_t
563 isp_pci_rd_reg_1080(isp, regoff)
564 struct ispsoftc *isp;
565 int regoff;
566 {
567 u_int16_t rv, oc = 0;
568 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
569 int offset;
570
571 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
572 u_int16_t tc;
573 /*
574 * We will assume that someone has paused the RISC processor.
575 */
576 oc = isp_pci_rd_reg(isp, BIU_CONF1);
577 tc = oc & ~BIU_PCI1080_CONF1_DMA;
578 if (IS_1280(isp)) {
579 if (regoff & SXP_BANK1_SELECT)
580 tc |= BIU_PCI1080_CONF1_SXP0;
581 else
582 tc |= BIU_PCI1080_CONF1_SXP1;
583 } else {
584 tc |= BIU_PCI1080_CONF1_SXP0;
585 }
586 isp_pci_wr_reg(isp, BIU_CONF1, tc);
587 delay(250);
588 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
589 oc = isp_pci_rd_reg(isp, BIU_CONF1);
590 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
591 delay(250);
592 }
593 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
594 offset += (regoff & 0xff);
595 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
596 /*
597 * Okay, because BIU_CONF1 is always nonzero
598 */
599 if (oc) {
600 isp_pci_wr_reg(isp, BIU_CONF1, oc);
601 delay(250);
602 }
603 return (rv);
604 }
605
606 static void
607 isp_pci_wr_reg_1080(isp, regoff, val)
608 struct ispsoftc *isp;
609 int regoff;
610 u_int16_t val;
611 {
612 u_int16_t oc = 0;
613 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
614 int offset;
615
616 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
617 u_int16_t tc;
618 /*
619 * We will assume that someone has paused the RISC processor.
620 */
621 oc = isp_pci_rd_reg(isp, BIU_CONF1);
622 tc = oc & ~BIU_PCI1080_CONF1_DMA;
623 if (IS_1280(isp)) {
624 if (regoff & SXP_BANK1_SELECT)
625 tc |= BIU_PCI1080_CONF1_SXP0;
626 else
627 tc |= BIU_PCI1080_CONF1_SXP1;
628 } else {
629 tc |= BIU_PCI1080_CONF1_SXP0;
630 }
631 isp_pci_wr_reg(isp, BIU_CONF1, tc);
632 delay(250);
633 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
634 oc = isp_pci_rd_reg(isp, BIU_CONF1);
635 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
636 delay(250);
637 }
638 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
639 offset += (regoff & 0xff);
640 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
641 /*
642 * Okay, because BIU_CONF1 is always nonzero
643 */
644 if (oc) {
645 isp_pci_wr_reg(isp, BIU_CONF1, oc);
646 delay(250);
647 }
648 }
649 #endif
650
651 static int
652 isp_pci_mbxdma(isp)
653 struct ispsoftc *isp;
654 {
655 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
656 bus_dma_segment_t seg;
657 bus_size_t len;
658 fcparam *fcp;
659 int rseg;
660
661 if (isp->isp_rquest_dma) /* been here before? */
662 return (0);
663
664 len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T);
665 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
666 if (isp->isp_xflist == NULL) {
667 printf("%s: cannot malloc xflist array\n", isp->isp_name);
668 return (1);
669 }
670 bzero(isp->isp_xflist, len);
671 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
672 pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
673 if (pci->pci_xfer_dmap == NULL) {
674 printf("%s: cannot malloc xflist array\n", isp->isp_name);
675 return (1);
676 }
677
678 /*
679 * Allocate and map the request queue.
680 */
681 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
682 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
683 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
684 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
685 return (1);
686 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
687 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat,
688 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
689 BUS_DMA_NOWAIT))
690 return (1);
691
692 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
693
694 /*
695 * Allocate and map the result queue.
696 */
697 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
698 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
699 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
700 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
701 return (1);
702 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
703 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat,
704 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
705 BUS_DMA_NOWAIT))
706 return (1);
707 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
708
709 if (IS_SCSI(isp)) {
710 return (0);
711 }
712
713 fcp = isp->isp_param;
714 len = ISP2100_SCRLEN;
715 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
716 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
717 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
718 return (1);
719 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
720 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat,
721 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
722 BUS_DMA_NOWAIT))
723 return (1);
724 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
725 return (0);
726 }
727
728 static int
729 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
730 struct ispsoftc *isp;
731 struct scsipi_xfer *xs;
732 ispreq_t *rq;
733 u_int16_t *iptrp;
734 u_int16_t optr;
735 {
736 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
737 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
738 ispcontreq_t *crq;
739 int segcnt, seg, error, ovseg, seglim, drq;
740
741 if (xs->datalen == 0) {
742 rq->req_seg_count = 1;
743 goto mbxsync;
744 }
745 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
746 if (xs->xs_control & XS_CTL_DATA_IN) {
747 drq = REQFLAG_DATA_IN;
748 } else {
749 drq = REQFLAG_DATA_OUT;
750 }
751
752 if (IS_FC(isp)) {
753 seglim = ISP_RQDSEG_T2;
754 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
755 ((ispreqt2_t *)rq)->req_flags |= drq;
756 } else {
757 seglim = ISP_RQDSEG;
758 rq->req_flags |= drq;
759 }
760 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
761 NULL, xs->xs_control & XS_CTL_NOSLEEP ?
762 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
763 if (error) {
764 XS_SETERR(xs, HBA_BOTCH);
765 return (CMD_COMPLETE);
766 }
767
768 segcnt = dmap->dm_nsegs;
769
770 for (seg = 0, rq->req_seg_count = 0;
771 seg < segcnt && rq->req_seg_count < seglim;
772 seg++, rq->req_seg_count++) {
773 if (IS_FC(isp)) {
774 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
775 rq2->req_dataseg[rq2->req_seg_count].ds_count =
776 dmap->dm_segs[seg].ds_len;
777 rq2->req_dataseg[rq2->req_seg_count].ds_base =
778 dmap->dm_segs[seg].ds_addr;
779 } else {
780 rq->req_dataseg[rq->req_seg_count].ds_count =
781 dmap->dm_segs[seg].ds_len;
782 rq->req_dataseg[rq->req_seg_count].ds_base =
783 dmap->dm_segs[seg].ds_addr;
784 }
785 }
786
787 if (seg == segcnt)
788 goto dmasync;
789
790 do {
791 crq = (ispcontreq_t *)
792 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
793 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
794 if (*iptrp == optr) {
795 printf("%s: Request Queue Overflow++\n", isp->isp_name);
796 bus_dmamap_unload(pci->pci_dmat, dmap);
797 XS_SETERR(xs, HBA_BOTCH);
798 return (CMD_COMPLETE);
799 }
800 rq->req_header.rqs_entry_count++;
801 bzero((void *)crq, sizeof (*crq));
802 crq->req_header.rqs_entry_count = 1;
803 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
804
805 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
806 rq->req_seg_count++, seg++, ovseg++) {
807 crq->req_dataseg[ovseg].ds_count =
808 dmap->dm_segs[seg].ds_len;
809 crq->req_dataseg[ovseg].ds_base =
810 dmap->dm_segs[seg].ds_addr;
811 }
812 } while (seg < segcnt);
813
814 dmasync:
815 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
816 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
817 BUS_DMASYNC_PREWRITE);
818
819 mbxsync:
820 ISP_SWIZZLE_REQUEST(isp, rq);
821 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
822 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
823 return (CMD_QUEUED);
824 }
825
826 static int
827 isp_pci_intr(arg)
828 void *arg;
829 {
830 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
831 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
832 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
833 return (isp_intr(arg));
834 }
835
836 static void
837 isp_pci_dmateardown(isp, xs, handle)
838 struct ispsoftc *isp;
839 struct scsipi_xfer *xs;
840 u_int32_t handle;
841 {
842 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
843 bus_dmamap_t dmap;
844 assert(handle != 0 && handle <= isp->isp_maxcmds);
845 dmap = pci->pci_xfer_dmap[handle-1];
846 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
847 xs->xs_control & XS_CTL_DATA_IN ?
848 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
849 bus_dmamap_unload(pci->pci_dmat, dmap);
850 }
851
852 static void
853 isp_pci_reset1(isp)
854 struct ispsoftc *isp;
855 {
856 /* Make sure the BIOS is disabled */
857 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
858 }
859
860 static void
861 isp_pci_dumpregs(isp)
862 struct ispsoftc *isp;
863 {
864 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
865 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
866 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
867 }
868