isp_pci.c revision 1.44 1 /* $NetBSD: isp_pci.c,v 1.44 1999/10/17 01:22:08 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * Matthew Jacob (mjacob (at) nas.nasa.gov)
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34 #include <dev/microcode/isp/asm_pci.h>
35
36 #include <dev/pci/pcireg.h>
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/pcidevs.h>
39
40 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
41 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
42 #ifndef ISP_DISABLE_1080_SUPPORT
43 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
44 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
45 #endif
46 static int isp_pci_mbxdma __P((struct ispsoftc *));
47 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
48 ispreq_t *, u_int8_t *, u_int8_t));
49 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
50 u_int32_t));
51 static void isp_pci_reset1 __P((struct ispsoftc *));
52 static void isp_pci_dumpregs __P((struct ispsoftc *));
53 static int isp_pci_intr __P((void *));
54
55 #ifndef ISP_DISABLE_1020_SUPPORT
56 static struct ispmdvec mdvec = {
57 isp_pci_rd_reg,
58 isp_pci_wr_reg,
59 isp_pci_mbxdma,
60 isp_pci_dmasetup,
61 isp_pci_dmateardown,
62 NULL,
63 isp_pci_reset1,
64 isp_pci_dumpregs,
65 ISP_RISC_CODE,
66 ISP_CODE_LENGTH,
67 ISP_CODE_ORG,
68 0,
69 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
70 0
71 };
72 #endif
73
74 #ifndef ISP_DISABLE_1080_SUPPORT
75 static struct ispmdvec mdvec_1080 = {
76 isp_pci_rd_reg_1080,
77 isp_pci_wr_reg_1080,
78 isp_pci_mbxdma,
79 isp_pci_dmasetup,
80 isp_pci_dmateardown,
81 NULL,
82 isp_pci_reset1,
83 isp_pci_dumpregs,
84 ISP1080_RISC_CODE,
85 ISP1080_CODE_LENGTH,
86 ISP1080_CODE_ORG,
87 0,
88 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
89 0
90 };
91 #endif
92
93 #ifndef ISP_DISABLE_2100_SUPPORT
94 static struct ispmdvec mdvec_2100 = {
95 isp_pci_rd_reg,
96 isp_pci_wr_reg,
97 isp_pci_mbxdma,
98 isp_pci_dmasetup,
99 isp_pci_dmateardown,
100 NULL,
101 isp_pci_reset1,
102 isp_pci_dumpregs,
103 ISP2100_RISC_CODE,
104 ISP2100_CODE_LENGTH,
105 ISP2100_CODE_ORG,
106 0,
107 0,
108 0
109 };
110 #endif
111
112 #ifndef ISP_DISABLE_2200_SUPPORT
113 static struct ispmdvec mdvec_2200 = {
114 isp_pci_rd_reg,
115 isp_pci_wr_reg,
116 isp_pci_mbxdma,
117 isp_pci_dmasetup,
118 isp_pci_dmateardown,
119 NULL,
120 isp_pci_reset1,
121 isp_pci_dumpregs,
122 ISP2200_RISC_CODE,
123 ISP2200_CODE_LENGTH,
124 ISP2200_CODE_ORG,
125 0,
126 0,
127 0
128 };
129 #endif
130
131 #ifndef PCI_VENDOR_QLOGIC
132 #define PCI_VENDOR_QLOGIC 0x1077
133 #endif
134
135 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
136 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
137 #endif
138
139 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
140 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
141 #endif
142
143 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
144 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
145 #endif
146
147 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
148 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
149 #endif
150
151 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
152 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
153 #endif
154
155 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
156
157 #define PCI_QLOGIC_ISP1080 \
158 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
159
160 #define PCI_QLOGIC_ISP1240 \
161 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
162
163 #define PCI_QLOGIC_ISP2100 \
164 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
165
166 #define PCI_QLOGIC_ISP2200 \
167 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
168
169 #define IO_MAP_REG 0x10
170 #define MEM_MAP_REG 0x14
171 #define PCIR_ROMADDR 0x30
172
173 #define PCI_DFLT_LTNCY 0x40
174 #define PCI_DFLT_LNSZ 0x10
175
176
177 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
178 static void isp_pci_attach __P((struct device *, struct device *, void *));
179
180 struct isp_pcisoftc {
181 struct ispsoftc pci_isp;
182 pci_chipset_tag_t pci_pc;
183 pcitag_t pci_tag;
184 bus_space_tag_t pci_st;
185 bus_space_handle_t pci_sh;
186 bus_dma_tag_t pci_dmat;
187 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
188 bus_dmamap_t pci_rquest_dmap;
189 bus_dmamap_t pci_result_dmap;
190 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
191 void * pci_ih;
192 int16_t pci_poff[_NREG_BLKS];
193 };
194
195 struct cfattach isp_pci_ca = {
196 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
197 };
198
199 static int
200 isp_pci_probe(parent, match, aux)
201 struct device *parent;
202 struct cfdata *match;
203 void *aux;
204 {
205 struct pci_attach_args *pa = aux;
206 switch (pa->pa_id) {
207 #ifndef ISP_DISABLE_1020_SUPPORT
208 case PCI_QLOGIC_ISP:
209 return (1);
210 #endif
211 #ifndef ISP_DISABLE_1080_SUPPORT
212 case PCI_QLOGIC_ISP1080:
213 case PCI_QLOGIC_ISP1240:
214 return (1);
215 #endif
216 #ifndef ISP_DISABLE_2100_SUPPORT
217 case PCI_QLOGIC_ISP2100:
218 return (1);
219 #endif
220 #ifndef ISP_DISABLE_2200_SUPPORT
221 case PCI_QLOGIC_ISP2200:
222 return (1);
223 #endif
224 default:
225 return (0);
226 }
227 }
228
229
230 static void
231 isp_pci_attach(parent, self, aux)
232 struct device *parent, *self;
233 void *aux;
234 {
235 #ifdef DEBUG
236 static char oneshot = 1;
237 #endif
238 static char *nomem = "%s: no mem for sdparam table\n";
239 u_int32_t data, linesz = PCI_DFLT_LNSZ;
240 struct pci_attach_args *pa = aux;
241 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
242 struct ispsoftc *isp = &pcs->pci_isp;
243 bus_space_tag_t st, iot, memt;
244 bus_space_handle_t sh, ioh, memh;
245 pci_intr_handle_t ih;
246 const char *intrstr;
247 int ioh_valid, memh_valid, i;
248 long foo;
249 ISP_LOCKVAL_DECL;
250
251 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
252 PCI_MAPREG_TYPE_IO, 0,
253 &iot, &ioh, NULL, NULL) == 0);
254 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
255 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
256 &memt, &memh, NULL, NULL) == 0);
257
258 if (memh_valid) {
259 st = memt;
260 sh = memh;
261 } else if (ioh_valid) {
262 st = iot;
263 sh = ioh;
264 } else {
265 printf(": unable to map device registers\n");
266 return;
267 }
268 printf("\n");
269
270 pcs->pci_st = st;
271 pcs->pci_sh = sh;
272 pcs->pci_dmat = pa->pa_dmat;
273 pcs->pci_pc = pa->pa_pc;
274 pcs->pci_tag = pa->pa_tag;
275 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
276 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
277 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
278 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
279 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
280
281 #ifndef ISP_DISABLE_1020_SUPPORT
282 if (pa->pa_id == PCI_QLOGIC_ISP) {
283 isp->isp_mdvec = &mdvec;
284 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
285 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
286 if (isp->isp_param == NULL) {
287 printf(nomem, isp->isp_name);
288 return;
289 }
290 bzero(isp->isp_param, sizeof (sdparam));
291 }
292 #endif
293 #ifndef ISP_DISABLE_1080_SUPPORT
294 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
295 isp->isp_mdvec = &mdvec_1080;
296 isp->isp_type = ISP_HA_SCSI_1080;
297 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
298 if (isp->isp_param == NULL) {
299 printf(nomem, isp->isp_name);
300 return;
301 }
302 bzero(isp->isp_param, sizeof (sdparam));
303 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
304 ISP1080_DMA_REGS_OFF;
305 }
306 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
307 isp->isp_mdvec = &mdvec_1080;
308 isp->isp_type = ISP_HA_SCSI_12X0;
309 isp->isp_param =
310 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
311 if (isp->isp_param == NULL) {
312 printf(nomem, isp->isp_name);
313 return;
314 }
315 bzero(isp->isp_param, 2 * sizeof (sdparam));
316 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
317 ISP1080_DMA_REGS_OFF;
318 }
319 #endif
320 #ifndef ISP_DISABLE_2100_SUPPORT
321 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
322 isp->isp_mdvec = &mdvec_2100;
323 isp->isp_type = ISP_HA_FC_2100;
324 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
325 if (isp->isp_param == NULL) {
326 printf(nomem, isp->isp_name);
327 return;
328 }
329 bzero(isp->isp_param, sizeof (fcparam));
330 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
331 PCI_MBOX_REGS2100_OFF;
332 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
333 if ((data & 0xff) < 3) {
334 /*
335 * XXX: Need to get the actual revision
336 * XXX: number of the 2100 FB. At any rate,
337 * XXX: lower cache line size for early revision
338 * XXX; boards.
339 */
340 linesz = 1;
341 }
342 }
343 #endif
344 #ifndef ISP_DISABLE_2200_SUPPORT
345 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
346 isp->isp_mdvec = &mdvec_2200;
347 isp->isp_type = ISP_HA_FC_2200;
348 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
349 if (isp->isp_param == NULL) {
350 printf(nomem, isp->isp_name);
351 return;
352 }
353 bzero(isp->isp_param, sizeof (fcparam));
354 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
355 PCI_MBOX_REGS2100_OFF;
356 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
357 }
358 #endif
359
360 /*
361 * Make sure that command register set sanely.
362 */
363 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
364 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
365
366 /*
367 * Not so sure about these- but I think it's important that they get
368 * enabled......
369 */
370 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
371 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
372
373 /*
374 * Make sure that the latency timer, cache line size,
375 * and ROM is disabled.
376 */
377 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
378 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
379 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
380 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
381 data |= (linesz << PCI_CACHELINE_SHIFT);
382 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
383
384 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
385 data &= ~1;
386 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
387
388 #ifdef DEBUG
389 if (oneshot) {
390 oneshot = 0;
391 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
392 "%d.%d Core Version %d.%d\n",
393 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
394 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
395 }
396 #endif
397 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
398 pa->pa_intrline, &ih)) {
399 printf("%s: couldn't map interrupt\n", isp->isp_name);
400 free(isp->isp_param, M_DEVBUF);
401 return;
402 }
403 intrstr = pci_intr_string(pa->pa_pc, ih);
404 if (intrstr == NULL)
405 intrstr = "<I dunno>";
406 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
407 isp_pci_intr, isp);
408 if (pcs->pci_ih == NULL) {
409 printf("%s: couldn't establish interrupt at %s\n",
410 isp->isp_name, intrstr);
411 free(isp->isp_param, M_DEVBUF);
412 return;
413 }
414 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
415
416 if (IS_FC(isp)) {
417 /*
418 * This isn't very random, but it's the best we can do for
419 * the real edge case of cards that don't have WWNs.
420 */
421 foo = (long) isp;
422 foo >>= 4;
423 foo &= 0x7;
424 while (version[foo])
425 isp->isp_osinfo.seed += (int) version[foo++];
426 isp->isp_osinfo.seed <<= 8;
427 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
428 }
429
430 ISP_LOCK(isp);
431 isp_reset(isp);
432 if (isp->isp_state != ISP_RESETSTATE) {
433 ISP_UNLOCK(isp);
434 free(isp->isp_param, M_DEVBUF);
435 return;
436 }
437 isp_init(isp);
438 if (isp->isp_state != ISP_INITSTATE) {
439 isp_uninit(isp);
440 ISP_UNLOCK(isp);
441 free(isp->isp_param, M_DEVBUF);
442 return;
443 }
444
445
446
447 /*
448 * Create the DMA maps for the data transfers.
449 */
450 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
451 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
452 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
453 &pcs->pci_xfer_dmap[i])) {
454 printf("%s: can't create dma maps\n",
455 isp->isp_name);
456 isp_uninit(isp);
457 ISP_UNLOCK(isp);
458 return;
459 }
460 }
461 /*
462 * Do Generic attach now.
463 */
464 isp_attach(isp);
465 if (isp->isp_state != ISP_RUNSTATE) {
466 isp_uninit(isp);
467 free(isp->isp_param, M_DEVBUF);
468 }
469 ISP_UNLOCK(isp);
470 }
471
472 static u_int16_t
473 isp_pci_rd_reg(isp, regoff)
474 struct ispsoftc *isp;
475 int regoff;
476 {
477 u_int16_t rv;
478 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
479 int offset, oldconf = 0;
480
481 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
482 /*
483 * We will assume that someone has paused the RISC processor.
484 */
485 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
486 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
487 }
488 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
489 offset += (regoff & 0xff);
490 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
491 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
492 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
493 }
494 return (rv);
495 }
496
497 static void
498 isp_pci_wr_reg(isp, regoff, val)
499 struct ispsoftc *isp;
500 int regoff;
501 u_int16_t val;
502 {
503 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
504 int offset, oldconf = 0;
505
506 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
507 /*
508 * We will assume that someone has paused the RISC processor.
509 */
510 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
511 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
512 }
513 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
514 offset += (regoff & 0xff);
515 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
516 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
517 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
518 }
519 }
520
521 #ifndef ISP_DISABLE_1080_SUPPORT
522 static u_int16_t
523 isp_pci_rd_reg_1080(isp, regoff)
524 struct ispsoftc *isp;
525 int regoff;
526 {
527 u_int16_t rv;
528 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
529 int offset, oc = 0;
530
531 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
532 /*
533 * We will assume that someone has paused the RISC processor.
534 */
535 oc = isp_pci_rd_reg(isp, BIU_CONF1);
536 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
537 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
538 oc = isp_pci_rd_reg(isp, BIU_CONF1);
539 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
540 }
541 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
542 offset += (regoff & 0xff);
543 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
544 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
545 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
546 isp_pci_wr_reg(isp, BIU_CONF1, oc);
547 }
548 return (rv);
549 }
550
551 static void
552 isp_pci_wr_reg_1080(isp, regoff, val)
553 struct ispsoftc *isp;
554 int regoff;
555 u_int16_t val;
556 {
557 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
558 int offset, oc = 0;
559
560 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
561 /*
562 * We will assume that someone has paused the RISC processor.
563 */
564 oc = isp_pci_rd_reg(isp, BIU_CONF1);
565 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
566 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
567 oc = isp_pci_rd_reg(isp, BIU_CONF1);
568 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
569 }
570 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
571 offset += (regoff & 0xff);
572 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
573 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
574 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
575 isp_pci_wr_reg(isp, BIU_CONF1, oc);
576 }
577 }
578 #endif
579
580 static int
581 isp_pci_mbxdma(isp)
582 struct ispsoftc *isp;
583 {
584 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
585 bus_dma_segment_t seg;
586 bus_size_t len;
587 fcparam *fcp;
588 int rseg;
589
590 if (isp->isp_rquest_dma) /* been here before? */
591 return (0);
592
593 isp->isp_xflist = (ISP_SCSI_XFER_T **)
594 malloc(isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T),
595 M_DEVBUF, M_WAITOK);
596
597 if (isp->isp_xflist == NULL) {
598 printf("%s: cannot malloc xflist array\n", isp->isp_name);
599 return (1);
600 }
601 bzero(isp->isp_xflist, isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T));
602
603 /*
604 * Allocate and map the request queue.
605 */
606 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
607 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
608 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
609 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
610 return (1);
611 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
612 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat,
613 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
614 BUS_DMA_NOWAIT))
615 return (1);
616
617 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
618
619 /*
620 * Allocate and map the result queue.
621 */
622 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
623 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
624 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
625 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
626 return (1);
627 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
628 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat,
629 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
630 BUS_DMA_NOWAIT))
631 return (1);
632 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
633
634 if (IS_SCSI(isp)) {
635 return (0);
636 }
637
638 fcp = isp->isp_param;
639 len = ISP2100_SCRLEN;
640 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
641 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
642 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
643 return (1);
644 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
645 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat,
646 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
647 BUS_DMA_NOWAIT))
648 return (1);
649 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
650 return (0);
651 }
652
653 static int
654 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
655 struct ispsoftc *isp;
656 struct scsipi_xfer *xs;
657 ispreq_t *rq;
658 u_int8_t *iptrp;
659 u_int8_t optr;
660 {
661 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
662 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
663 ispcontreq_t *crq;
664 int segcnt, seg, error, ovseg, seglim, drq;
665
666 if (xs->datalen == 0) {
667 rq->req_seg_count = 1;
668 goto mbxsync;
669 }
670 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
671 if (xs->xs_control & XS_CTL_DATA_IN) {
672 drq = REQFLAG_DATA_IN;
673 } else {
674 drq = REQFLAG_DATA_OUT;
675 }
676
677 if (IS_FC(isp)) {
678 seglim = ISP_RQDSEG_T2;
679 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
680 ((ispreqt2_t *)rq)->req_flags |= drq;
681 } else {
682 seglim = ISP_RQDSEG;
683 rq->req_flags |= drq;
684 }
685 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
686 NULL, xs->xs_control & XS_CTL_NOSLEEP ?
687 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
688 if (error) {
689 XS_SETERR(xs, HBA_BOTCH);
690 return (CMD_COMPLETE);
691 }
692
693 segcnt = dmap->dm_nsegs;
694
695 for (seg = 0, rq->req_seg_count = 0;
696 seg < segcnt && rq->req_seg_count < seglim;
697 seg++, rq->req_seg_count++) {
698 if (IS_FC(isp)) {
699 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
700 rq2->req_dataseg[rq2->req_seg_count].ds_count =
701 dmap->dm_segs[seg].ds_len;
702 rq2->req_dataseg[rq2->req_seg_count].ds_base =
703 dmap->dm_segs[seg].ds_addr;
704 } else {
705 rq->req_dataseg[rq->req_seg_count].ds_count =
706 dmap->dm_segs[seg].ds_len;
707 rq->req_dataseg[rq->req_seg_count].ds_base =
708 dmap->dm_segs[seg].ds_addr;
709 }
710 }
711
712 if (seg == segcnt)
713 goto dmasync;
714
715 do {
716 crq = (ispcontreq_t *)
717 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
718 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
719 if (*iptrp == optr) {
720 printf("%s: Request Queue Overflow++\n", isp->isp_name);
721 bus_dmamap_unload(pci->pci_dmat, dmap);
722 XS_SETERR(xs, HBA_BOTCH);
723 return (CMD_COMPLETE);
724 }
725 rq->req_header.rqs_entry_count++;
726 bzero((void *)crq, sizeof (*crq));
727 crq->req_header.rqs_entry_count = 1;
728 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
729
730 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
731 rq->req_seg_count++, seg++, ovseg++) {
732 crq->req_dataseg[ovseg].ds_count =
733 dmap->dm_segs[seg].ds_len;
734 crq->req_dataseg[ovseg].ds_base =
735 dmap->dm_segs[seg].ds_addr;
736 }
737 } while (seg < segcnt);
738
739 dmasync:
740 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
741 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
742 BUS_DMASYNC_PREWRITE);
743
744 mbxsync:
745 ISP_SWIZZLE_REQUEST(isp, rq);
746 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
747 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
748 return (CMD_QUEUED);
749 }
750
751 static int
752 isp_pci_intr(arg)
753 void *arg;
754 {
755 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
756 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
757 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
758 return (isp_intr(arg));
759 }
760
761 static void
762 isp_pci_dmateardown(isp, xs, handle)
763 struct ispsoftc *isp;
764 struct scsipi_xfer *xs;
765 u_int32_t handle;
766 {
767 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
768 bus_dmamap_t dmap;
769 assert(handle != 0 && handle <= isp->isp_maxcmds);
770 dmap = pci->pci_xfer_dmap[handle-1];
771 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
772 xs->xs_control & XS_CTL_DATA_IN ?
773 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
774 bus_dmamap_unload(pci->pci_dmat, dmap);
775 }
776
777 static void
778 isp_pci_reset1(isp)
779 struct ispsoftc *isp;
780 {
781 /* Make sure the BIOS is disabled */
782 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
783 }
784
785 static void
786 isp_pci_dumpregs(isp)
787 struct ispsoftc *isp;
788 {
789 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
790 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
791 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
792 }
793