isp_pci.c revision 1.46 1 /* $NetBSD: isp_pci.c,v 1.46 1999/10/28 16:11:19 mjacob Exp $ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * Matthew Jacob (mjacob (at) nas.nasa.gov)
5 */
6 /*
7 * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <dev/ic/isp_netbsd.h>
34
35 #include <dev/pci/pcireg.h>
36 #include <dev/pci/pcivar.h>
37 #include <dev/pci/pcidevs.h>
38
39 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
40 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
41 #ifndef ISP_DISABLE_1080_SUPPORT
42 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
43 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
44 #endif
45 static int isp_pci_mbxdma __P((struct ispsoftc *));
46 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
47 ispreq_t *, u_int8_t *, u_int8_t));
48 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
49 u_int32_t));
50 static void isp_pci_reset1 __P((struct ispsoftc *));
51 static void isp_pci_dumpregs __P((struct ispsoftc *));
52 static int isp_pci_intr __P((void *));
53
54 #ifndef ISP_DISABLE_1020_SUPPORT
55 static struct ispmdvec mdvec = {
56 isp_pci_rd_reg,
57 isp_pci_wr_reg,
58 isp_pci_mbxdma,
59 isp_pci_dmasetup,
60 isp_pci_dmateardown,
61 NULL,
62 isp_pci_reset1,
63 isp_pci_dumpregs,
64 0,
65 0,
66 0,
67 0,
68 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
69 0
70 };
71 #endif
72
73 #ifndef ISP_DISABLE_1080_SUPPORT
74 static struct ispmdvec mdvec_1080 = {
75 isp_pci_rd_reg_1080,
76 isp_pci_wr_reg_1080,
77 isp_pci_mbxdma,
78 isp_pci_dmasetup,
79 isp_pci_dmateardown,
80 NULL,
81 isp_pci_reset1,
82 isp_pci_dumpregs,
83 0,
84 0,
85 0,
86 0,
87 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
88 0
89 };
90 #endif
91
92 #ifndef ISP_DISABLE_2100_SUPPORT
93 static struct ispmdvec mdvec_2100 = {
94 isp_pci_rd_reg,
95 isp_pci_wr_reg,
96 isp_pci_mbxdma,
97 isp_pci_dmasetup,
98 isp_pci_dmateardown,
99 NULL,
100 isp_pci_reset1,
101 isp_pci_dumpregs,
102 0,
103 0,
104 0,
105 0,
106 0,
107 0
108 };
109 #endif
110
111 #ifndef ISP_DISABLE_2200_SUPPORT
112 static struct ispmdvec mdvec_2200 = {
113 isp_pci_rd_reg,
114 isp_pci_wr_reg,
115 isp_pci_mbxdma,
116 isp_pci_dmasetup,
117 isp_pci_dmateardown,
118 NULL,
119 isp_pci_reset1,
120 isp_pci_dumpregs,
121 0,
122 0,
123 0,
124 0,
125 0,
126 0
127 };
128 #endif
129
130 #ifndef PCI_VENDOR_QLOGIC
131 #define PCI_VENDOR_QLOGIC 0x1077
132 #endif
133
134 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
135 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
136 #endif
137
138 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
139 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
140 #endif
141
142 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
143 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
144 #endif
145
146 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
147 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
148 #endif
149
150 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
151 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
152 #endif
153
154 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
155
156 #define PCI_QLOGIC_ISP1080 \
157 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
158
159 #define PCI_QLOGIC_ISP1240 \
160 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
161
162 #define PCI_QLOGIC_ISP2100 \
163 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
164
165 #define PCI_QLOGIC_ISP2200 \
166 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
167
168 #define IO_MAP_REG 0x10
169 #define MEM_MAP_REG 0x14
170 #define PCIR_ROMADDR 0x30
171
172 #define PCI_DFLT_LTNCY 0x40
173 #define PCI_DFLT_LNSZ 0x10
174
175
176 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
177 static void isp_pci_attach __P((struct device *, struct device *, void *));
178
179 struct isp_pcisoftc {
180 struct ispsoftc pci_isp;
181 pci_chipset_tag_t pci_pc;
182 pcitag_t pci_tag;
183 bus_space_tag_t pci_st;
184 bus_space_handle_t pci_sh;
185 bus_dma_tag_t pci_dmat;
186 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
187 bus_dmamap_t pci_rquest_dmap;
188 bus_dmamap_t pci_result_dmap;
189 bus_dmamap_t *pci_xfer_dmap;
190 void * pci_ih;
191 int16_t pci_poff[_NREG_BLKS];
192 };
193
194 struct cfattach isp_pci_ca = {
195 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
196 };
197
198 static int
199 isp_pci_probe(parent, match, aux)
200 struct device *parent;
201 struct cfdata *match;
202 void *aux;
203 {
204 struct pci_attach_args *pa = aux;
205 switch (pa->pa_id) {
206 #ifndef ISP_DISABLE_1020_SUPPORT
207 case PCI_QLOGIC_ISP:
208 return (1);
209 #endif
210 #ifndef ISP_DISABLE_1080_SUPPORT
211 case PCI_QLOGIC_ISP1080:
212 case PCI_QLOGIC_ISP1240:
213 return (1);
214 #endif
215 #ifndef ISP_DISABLE_2100_SUPPORT
216 case PCI_QLOGIC_ISP2100:
217 return (1);
218 #endif
219 #ifndef ISP_DISABLE_2200_SUPPORT
220 case PCI_QLOGIC_ISP2200:
221 return (1);
222 #endif
223 default:
224 return (0);
225 }
226 }
227
228
229 static void
230 isp_pci_attach(parent, self, aux)
231 struct device *parent, *self;
232 void *aux;
233 {
234 #ifdef DEBUG
235 static char oneshot = 1;
236 #endif
237 static char *nomem = "%s: no mem for sdparam table\n";
238 u_int32_t data, linesz = PCI_DFLT_LNSZ;
239 struct pci_attach_args *pa = aux;
240 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
241 struct ispsoftc *isp = &pcs->pci_isp;
242 bus_space_tag_t st, iot, memt;
243 bus_space_handle_t sh, ioh, memh;
244 pci_intr_handle_t ih;
245 const char *intrstr;
246 int ioh_valid, memh_valid, i;
247 long foo;
248 ISP_LOCKVAL_DECL;
249
250 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
251 PCI_MAPREG_TYPE_IO, 0,
252 &iot, &ioh, NULL, NULL) == 0);
253 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
254 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
255 &memt, &memh, NULL, NULL) == 0);
256
257 if (memh_valid) {
258 st = memt;
259 sh = memh;
260 } else if (ioh_valid) {
261 st = iot;
262 sh = ioh;
263 } else {
264 printf(": unable to map device registers\n");
265 return;
266 }
267 printf("\n");
268
269 pcs->pci_st = st;
270 pcs->pci_sh = sh;
271 pcs->pci_dmat = pa->pa_dmat;
272 pcs->pci_pc = pa->pa_pc;
273 pcs->pci_tag = pa->pa_tag;
274 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
275 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
276 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
277 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
278 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
279
280 #ifndef ISP_DISABLE_1020_SUPPORT
281 if (pa->pa_id == PCI_QLOGIC_ISP) {
282 isp->isp_mdvec = &mdvec;
283 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
284 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
285 if (isp->isp_param == NULL) {
286 printf(nomem, isp->isp_name);
287 return;
288 }
289 bzero(isp->isp_param, sizeof (sdparam));
290 }
291 #endif
292 #ifndef ISP_DISABLE_1080_SUPPORT
293 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
294 isp->isp_mdvec = &mdvec_1080;
295 isp->isp_type = ISP_HA_SCSI_1080;
296 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
297 if (isp->isp_param == NULL) {
298 printf(nomem, isp->isp_name);
299 return;
300 }
301 bzero(isp->isp_param, sizeof (sdparam));
302 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
303 ISP1080_DMA_REGS_OFF;
304 }
305 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
306 isp->isp_mdvec = &mdvec_1080;
307 isp->isp_type = ISP_HA_SCSI_12X0;
308 isp->isp_param =
309 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
310 if (isp->isp_param == NULL) {
311 printf(nomem, isp->isp_name);
312 return;
313 }
314 bzero(isp->isp_param, 2 * sizeof (sdparam));
315 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
316 ISP1080_DMA_REGS_OFF;
317 }
318 #endif
319 #ifndef ISP_DISABLE_2100_SUPPORT
320 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
321 isp->isp_mdvec = &mdvec_2100;
322 isp->isp_type = ISP_HA_FC_2100;
323 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
324 if (isp->isp_param == NULL) {
325 printf(nomem, isp->isp_name);
326 return;
327 }
328 bzero(isp->isp_param, sizeof (fcparam));
329 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
330 PCI_MBOX_REGS2100_OFF;
331 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
332 if ((data & 0xff) < 3) {
333 /*
334 * XXX: Need to get the actual revision
335 * XXX: number of the 2100 FB. At any rate,
336 * XXX: lower cache line size for early revision
337 * XXX; boards.
338 */
339 linesz = 1;
340 }
341 }
342 #endif
343 #ifndef ISP_DISABLE_2200_SUPPORT
344 if (pa->pa_id == PCI_QLOGIC_ISP2200) {
345 isp->isp_mdvec = &mdvec_2200;
346 isp->isp_type = ISP_HA_FC_2200;
347 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
348 if (isp->isp_param == NULL) {
349 printf(nomem, isp->isp_name);
350 return;
351 }
352 bzero(isp->isp_param, sizeof (fcparam));
353 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
354 PCI_MBOX_REGS2100_OFF;
355 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
356 }
357 #endif
358
359 /*
360 * Make sure that command register set sanely.
361 */
362 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
363 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
364
365 /*
366 * Not so sure about these- but I think it's important that they get
367 * enabled......
368 */
369 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
370 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
371
372 /*
373 * Make sure that the latency timer, cache line size,
374 * and ROM is disabled.
375 */
376 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
377 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
378 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
379 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
380 data |= (linesz << PCI_CACHELINE_SHIFT);
381 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
382
383 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
384 data &= ~1;
385 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
386
387 #ifdef DEBUG
388 if (oneshot) {
389 oneshot = 0;
390 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
391 "%d.%d Core Version %d.%d\n",
392 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
393 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
394 }
395 #endif
396 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
397 pa->pa_intrline, &ih)) {
398 printf("%s: couldn't map interrupt\n", isp->isp_name);
399 free(isp->isp_param, M_DEVBUF);
400 return;
401 }
402 intrstr = pci_intr_string(pa->pa_pc, ih);
403 if (intrstr == NULL)
404 intrstr = "<I dunno>";
405 pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
406 isp_pci_intr, isp);
407 if (pcs->pci_ih == NULL) {
408 printf("%s: couldn't establish interrupt at %s\n",
409 isp->isp_name, intrstr);
410 free(isp->isp_param, M_DEVBUF);
411 return;
412 }
413 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
414
415 if (IS_FC(isp)) {
416 /*
417 * This isn't very random, but it's the best we can do for
418 * the real edge case of cards that don't have WWNs.
419 */
420 foo = (long) isp;
421 foo >>= 4;
422 foo &= 0x7;
423 while (version[foo])
424 isp->isp_osinfo.seed += (int) version[foo++];
425 isp->isp_osinfo.seed <<= 8;
426 isp->isp_osinfo.seed += (isp->isp_osinfo._dev.dv_unit + 1);
427 }
428
429 ISP_LOCK(isp);
430 isp_reset(isp);
431 if (isp->isp_state != ISP_RESETSTATE) {
432 ISP_UNLOCK(isp);
433 free(isp->isp_param, M_DEVBUF);
434 return;
435 }
436 isp_init(isp);
437 if (isp->isp_state != ISP_INITSTATE) {
438 isp_uninit(isp);
439 ISP_UNLOCK(isp);
440 free(isp->isp_param, M_DEVBUF);
441 return;
442 }
443
444
445
446 /*
447 * Create the DMA maps for the data transfers.
448 */
449 for (i = 0; i < isp->isp_maxcmds; i++) {
450 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
451 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
452 &pcs->pci_xfer_dmap[i])) {
453 printf("%s: can't create dma maps\n",
454 isp->isp_name);
455 isp_uninit(isp);
456 ISP_UNLOCK(isp);
457 return;
458 }
459 }
460 /*
461 * Do Generic attach now.
462 */
463 isp_attach(isp);
464 if (isp->isp_state != ISP_RUNSTATE) {
465 isp_uninit(isp);
466 free(isp->isp_param, M_DEVBUF);
467 }
468 ISP_UNLOCK(isp);
469 }
470
471 static u_int16_t
472 isp_pci_rd_reg(isp, regoff)
473 struct ispsoftc *isp;
474 int regoff;
475 {
476 u_int16_t rv;
477 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
478 int offset, oldconf = 0;
479
480 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
481 /*
482 * We will assume that someone has paused the RISC processor.
483 */
484 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
485 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
486 }
487 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
488 offset += (regoff & 0xff);
489 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
490 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
491 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
492 }
493 return (rv);
494 }
495
496 static void
497 isp_pci_wr_reg(isp, regoff, val)
498 struct ispsoftc *isp;
499 int regoff;
500 u_int16_t val;
501 {
502 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
503 int offset, oldconf = 0;
504
505 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
506 /*
507 * We will assume that someone has paused the RISC processor.
508 */
509 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
510 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
511 }
512 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
513 offset += (regoff & 0xff);
514 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
515 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
516 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
517 }
518 }
519
520 #ifndef ISP_DISABLE_1080_SUPPORT
521 static u_int16_t
522 isp_pci_rd_reg_1080(isp, regoff)
523 struct ispsoftc *isp;
524 int regoff;
525 {
526 u_int16_t rv;
527 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
528 int offset, oc = 0;
529
530 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
531 /*
532 * We will assume that someone has paused the RISC processor.
533 */
534 oc = isp_pci_rd_reg(isp, BIU_CONF1);
535 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
536 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
537 oc = isp_pci_rd_reg(isp, BIU_CONF1);
538 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
539 }
540 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
541 offset += (regoff & 0xff);
542 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
543 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
544 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
545 isp_pci_wr_reg(isp, BIU_CONF1, oc);
546 }
547 return (rv);
548 }
549
550 static void
551 isp_pci_wr_reg_1080(isp, regoff, val)
552 struct ispsoftc *isp;
553 int regoff;
554 u_int16_t val;
555 {
556 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
557 int offset, oc = 0;
558
559 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
560 /*
561 * We will assume that someone has paused the RISC processor.
562 */
563 oc = isp_pci_rd_reg(isp, BIU_CONF1);
564 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
565 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
566 oc = isp_pci_rd_reg(isp, BIU_CONF1);
567 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
568 }
569 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
570 offset += (regoff & 0xff);
571 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
572 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
573 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
574 isp_pci_wr_reg(isp, BIU_CONF1, oc);
575 }
576 }
577 #endif
578
579 static int
580 isp_pci_mbxdma(isp)
581 struct ispsoftc *isp;
582 {
583 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
584 bus_dma_segment_t seg;
585 bus_size_t len;
586 fcparam *fcp;
587 int rseg;
588
589 if (isp->isp_rquest_dma) /* been here before? */
590 return (0);
591
592 len = isp->isp_maxcmds * sizeof (ISP_SCSI_XFER_T);
593 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
594 if (isp->isp_xflist == NULL) {
595 printf("%s: cannot malloc xflist array\n", isp->isp_name);
596 return (1);
597 }
598 bzero(isp->isp_xflist, len);
599 len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
600 pci->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
601 if (pci->pci_xfer_dmap == NULL) {
602 printf("%s: cannot malloc xflist array\n", isp->isp_name);
603 return (1);
604 }
605
606 /*
607 * Allocate and map the request queue.
608 */
609 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
610 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
611 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
612 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
613 return (1);
614 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
615 &pci->pci_rquest_dmap) || bus_dmamap_load(pci->pci_dmat,
616 pci->pci_rquest_dmap, (caddr_t)isp->isp_rquest, len, NULL,
617 BUS_DMA_NOWAIT))
618 return (1);
619
620 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
621
622 /*
623 * Allocate and map the result queue.
624 */
625 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
626 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
627 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
628 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
629 return (1);
630 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
631 &pci->pci_result_dmap) || bus_dmamap_load(pci->pci_dmat,
632 pci->pci_result_dmap, (caddr_t)isp->isp_result, len, NULL,
633 BUS_DMA_NOWAIT))
634 return (1);
635 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
636
637 if (IS_SCSI(isp)) {
638 return (0);
639 }
640
641 fcp = isp->isp_param;
642 len = ISP2100_SCRLEN;
643 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
644 BUS_DMA_NOWAIT) || bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
645 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
646 return (1);
647 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
648 &pci->pci_scratch_dmap) || bus_dmamap_load(pci->pci_dmat,
649 pci->pci_scratch_dmap, (caddr_t)fcp->isp_scratch, len, NULL,
650 BUS_DMA_NOWAIT))
651 return (1);
652 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
653 return (0);
654 }
655
656 static int
657 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
658 struct ispsoftc *isp;
659 struct scsipi_xfer *xs;
660 ispreq_t *rq;
661 u_int8_t *iptrp;
662 u_int8_t optr;
663 {
664 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
665 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
666 ispcontreq_t *crq;
667 int segcnt, seg, error, ovseg, seglim, drq;
668
669 if (xs->datalen == 0) {
670 rq->req_seg_count = 1;
671 goto mbxsync;
672 }
673 assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
674 if (xs->xs_control & XS_CTL_DATA_IN) {
675 drq = REQFLAG_DATA_IN;
676 } else {
677 drq = REQFLAG_DATA_OUT;
678 }
679
680 if (IS_FC(isp)) {
681 seglim = ISP_RQDSEG_T2;
682 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
683 ((ispreqt2_t *)rq)->req_flags |= drq;
684 } else {
685 seglim = ISP_RQDSEG;
686 rq->req_flags |= drq;
687 }
688 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
689 NULL, xs->xs_control & XS_CTL_NOSLEEP ?
690 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
691 if (error) {
692 XS_SETERR(xs, HBA_BOTCH);
693 return (CMD_COMPLETE);
694 }
695
696 segcnt = dmap->dm_nsegs;
697
698 for (seg = 0, rq->req_seg_count = 0;
699 seg < segcnt && rq->req_seg_count < seglim;
700 seg++, rq->req_seg_count++) {
701 if (IS_FC(isp)) {
702 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
703 rq2->req_dataseg[rq2->req_seg_count].ds_count =
704 dmap->dm_segs[seg].ds_len;
705 rq2->req_dataseg[rq2->req_seg_count].ds_base =
706 dmap->dm_segs[seg].ds_addr;
707 } else {
708 rq->req_dataseg[rq->req_seg_count].ds_count =
709 dmap->dm_segs[seg].ds_len;
710 rq->req_dataseg[rq->req_seg_count].ds_base =
711 dmap->dm_segs[seg].ds_addr;
712 }
713 }
714
715 if (seg == segcnt)
716 goto dmasync;
717
718 do {
719 crq = (ispcontreq_t *)
720 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
721 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
722 if (*iptrp == optr) {
723 printf("%s: Request Queue Overflow++\n", isp->isp_name);
724 bus_dmamap_unload(pci->pci_dmat, dmap);
725 XS_SETERR(xs, HBA_BOTCH);
726 return (CMD_COMPLETE);
727 }
728 rq->req_header.rqs_entry_count++;
729 bzero((void *)crq, sizeof (*crq));
730 crq->req_header.rqs_entry_count = 1;
731 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
732
733 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
734 rq->req_seg_count++, seg++, ovseg++) {
735 crq->req_dataseg[ovseg].ds_count =
736 dmap->dm_segs[seg].ds_len;
737 crq->req_dataseg[ovseg].ds_base =
738 dmap->dm_segs[seg].ds_addr;
739 }
740 } while (seg < segcnt);
741
742 dmasync:
743 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
744 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
745 BUS_DMASYNC_PREWRITE);
746
747 mbxsync:
748 ISP_SWIZZLE_REQUEST(isp, rq);
749 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
750 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
751 return (CMD_QUEUED);
752 }
753
754 static int
755 isp_pci_intr(arg)
756 void *arg;
757 {
758 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
759 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
760 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
761 return (isp_intr(arg));
762 }
763
764 static void
765 isp_pci_dmateardown(isp, xs, handle)
766 struct ispsoftc *isp;
767 struct scsipi_xfer *xs;
768 u_int32_t handle;
769 {
770 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
771 bus_dmamap_t dmap;
772 assert(handle != 0 && handle <= isp->isp_maxcmds);
773 dmap = pci->pci_xfer_dmap[handle-1];
774 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
775 xs->xs_control & XS_CTL_DATA_IN ?
776 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
777 bus_dmamap_unload(pci->pci_dmat, dmap);
778 }
779
780 static void
781 isp_pci_reset1(isp)
782 struct ispsoftc *isp;
783 {
784 /* Make sure the BIOS is disabled */
785 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
786 }
787
788 static void
789 isp_pci_dumpregs(isp)
790 struct ispsoftc *isp;
791 {
792 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
793 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
794 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
795 }
796