isp_pci.c revision 1.40 1 /* $NetBSD: isp_pci.c,v 1.40 1999/05/12 18:59:23 mjacob Exp $ */
2 /* release_5_11_99 */
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 *
6 *---------------------------------------
7 * Copyright (c) 1997, 1998 by Matthew Jacob
8 * NASA/Ames Research Center
9 * All rights reserved.
10 *---------------------------------------
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice immediately at the beginning of the file, without modification,
17 * this list of conditions, and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/microcode/isp/asm_pci.h>
40
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44
45 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
46 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
47 #ifndef ISP_DISABLE_1080_SUPPORT
48 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
49 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
50 #endif
51 static int isp_pci_mbxdma __P((struct ispsoftc *));
52 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
53 ispreq_t *, u_int8_t *, u_int8_t));
54 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
55 u_int32_t));
56 static void isp_pci_reset1 __P((struct ispsoftc *));
57 static void isp_pci_dumpregs __P((struct ispsoftc *));
58 static int isp_pci_intr __P((void *));
59
60 #ifndef ISP_DISABLE_1020_SUPPORT
61 static struct ispmdvec mdvec = {
62 isp_pci_rd_reg,
63 isp_pci_wr_reg,
64 isp_pci_mbxdma,
65 isp_pci_dmasetup,
66 isp_pci_dmateardown,
67 NULL,
68 isp_pci_reset1,
69 isp_pci_dumpregs,
70 ISP_RISC_CODE,
71 ISP_CODE_LENGTH,
72 ISP_CODE_ORG,
73 ISP_CODE_VERSION,
74 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
75 0
76 };
77 #endif
78
79 #ifndef ISP_DISABLE_1080_SUPPORT
80 static struct ispmdvec mdvec_1080 = {
81 isp_pci_rd_reg_1080,
82 isp_pci_wr_reg_1080,
83 isp_pci_mbxdma,
84 isp_pci_dmasetup,
85 isp_pci_dmateardown,
86 NULL,
87 isp_pci_reset1,
88 isp_pci_dumpregs,
89 ISP1080_RISC_CODE,
90 ISP1080_CODE_LENGTH,
91 ISP1080_CODE_ORG,
92 ISP1080_CODE_VERSION,
93 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
94 0
95 };
96 #endif
97
98 #ifndef ISP_DISABLE_2100_SUPPORT
99 static struct ispmdvec mdvec_2100 = {
100 isp_pci_rd_reg,
101 isp_pci_wr_reg,
102 isp_pci_mbxdma,
103 isp_pci_dmasetup,
104 isp_pci_dmateardown,
105 NULL,
106 isp_pci_reset1,
107 isp_pci_dumpregs,
108 ISP2100_RISC_CODE,
109 ISP2100_CODE_LENGTH,
110 ISP2100_CODE_ORG,
111 ISP2100_CODE_VERSION,
112 0, /* Irrelevant to the 2100 */
113 0
114 };
115 #endif
116
117 #ifndef PCI_VENDOR_QLOGIC
118 #define PCI_VENDOR_QLOGIC 0x1077
119 #endif
120
121 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
122 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
123 #endif
124
125 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
126 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
127 #endif
128
129 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
130 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
131 #endif
132
133 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
134 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
135 #endif
136
137 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
138
139 #define PCI_QLOGIC_ISP1080 \
140 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
141
142 #define PCI_QLOGIC_ISP1240 \
143 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
144
145 #define PCI_QLOGIC_ISP2100 \
146 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
147
148 #define IO_MAP_REG 0x10
149 #define MEM_MAP_REG 0x14
150 #define PCIR_ROMADDR 0x30
151
152 #define PCI_DFLT_LTNCY 0x40
153 #define PCI_DFLT_LNSZ 0x10
154
155
156 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
157 static void isp_pci_attach __P((struct device *, struct device *, void *));
158
159 struct isp_pcisoftc {
160 struct ispsoftc pci_isp;
161 pci_chipset_tag_t pci_pc;
162 pcitag_t pci_tag;
163 bus_space_tag_t pci_st;
164 bus_space_handle_t pci_sh;
165 bus_dma_tag_t pci_dmat;
166 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
167 bus_dmamap_t pci_rquest_dmap;
168 bus_dmamap_t pci_result_dmap;
169 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
170 void * pci_ih;
171 int16_t pci_poff[_NREG_BLKS];
172 };
173
174 struct cfattach isp_pci_ca = {
175 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
176 };
177
178 static int
179 isp_pci_probe(parent, match, aux)
180 struct device *parent;
181 struct cfdata *match;
182 void *aux;
183 {
184 struct pci_attach_args *pa = aux;
185 switch (pa->pa_id) {
186 #ifndef ISP_DISABLE_1020_SUPPORT
187 case PCI_QLOGIC_ISP:
188 return (1);
189 #endif
190 #ifndef ISP_DISABLE_1080_SUPPORT
191 case PCI_QLOGIC_ISP1080:
192 case PCI_QLOGIC_ISP1240:
193 return (1);
194 #endif
195 #ifndef ISP_DISABLE_2100_SUPPORT
196 case PCI_QLOGIC_ISP2100:
197 return (1);
198 #endif
199 default:
200 return (0);
201 }
202 }
203
204
205 static void
206 isp_pci_attach(parent, self, aux)
207 struct device *parent, *self;
208 void *aux;
209 {
210 #ifdef DEBUG
211 static char oneshot = 1;
212 #endif
213 static char *nomem = "%s: no mem for sdparam table\n";
214 u_int32_t data, linesz = PCI_DFLT_LNSZ;
215 struct pci_attach_args *pa = aux;
216 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
217 struct ispsoftc *isp = &pcs->pci_isp;
218 bus_space_tag_t st, iot, memt;
219 bus_space_handle_t sh, ioh, memh;
220 pci_intr_handle_t ih;
221 const char *intrstr;
222 int ioh_valid, memh_valid, i;
223 ISP_LOCKVAL_DECL;
224
225 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
226 PCI_MAPREG_TYPE_IO, 0,
227 &iot, &ioh, NULL, NULL) == 0);
228 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
229 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
230 &memt, &memh, NULL, NULL) == 0);
231
232 if (memh_valid) {
233 st = memt;
234 sh = memh;
235 } else if (ioh_valid) {
236 st = iot;
237 sh = ioh;
238 } else {
239 printf(": unable to map device registers\n");
240 return;
241 }
242 printf("\n");
243
244 pcs->pci_st = st;
245 pcs->pci_sh = sh;
246 pcs->pci_dmat = pa->pa_dmat;
247 pcs->pci_pc = pa->pa_pc;
248 pcs->pci_tag = pa->pa_tag;
249 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
250 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
251 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
252 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
253 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
254
255 #ifndef ISP_DISABLE_1020_SUPPORT
256 if (pa->pa_id == PCI_QLOGIC_ISP) {
257 isp->isp_mdvec = &mdvec;
258 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
259 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
260 if (isp->isp_param == NULL) {
261 printf(nomem, isp->isp_name);
262 return;
263 }
264 bzero(isp->isp_param, sizeof (sdparam));
265 }
266 #endif
267 #ifndef ISP_DISABLE_1080_SUPPORT
268 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
269 isp->isp_mdvec = &mdvec_1080;
270 isp->isp_type = ISP_HA_SCSI_1080;
271 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
272 if (isp->isp_param == NULL) {
273 printf(nomem, isp->isp_name);
274 return;
275 }
276 bzero(isp->isp_param, sizeof (sdparam));
277 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
278 ISP1080_DMA_REGS_OFF;
279 }
280 if (pa->pa_id == PCI_QLOGIC_ISP1240) {
281 isp->isp_mdvec = &mdvec_1080;
282 isp->isp_type = ISP_HA_SCSI_12X0;
283 isp->isp_param =
284 malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
285 if (isp->isp_param == NULL) {
286 printf(nomem, isp->isp_name);
287 return;
288 }
289 bzero(isp->isp_param, 2 * sizeof (sdparam));
290 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
291 ISP1080_DMA_REGS_OFF;
292 }
293 #endif
294 #ifndef ISP_DISABLE_2100_SUPPORT
295 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
296 isp->isp_mdvec = &mdvec_2100;
297 isp->isp_type = ISP_HA_FC_2100;
298 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
299 if (isp->isp_param == NULL) {
300 printf(nomem, isp->isp_name);
301 return;
302 }
303 bzero(isp->isp_param, sizeof (fcparam));
304 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
305 PCI_MBOX_REGS2100_OFF;
306 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
307 if ((data & 0xff) < 3) {
308 /*
309 * XXX: Need to get the actual revision
310 * XXX: number of the 2100 FB. At any rate,
311 * XXX: lower cache line size for early revision
312 * XXX; boards.
313 */
314 linesz = 1;
315 }
316 }
317 #endif
318
319 /*
320 * Make sure that command register set sanely.
321 */
322 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
323 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
324
325 /*
326 * Not so sure about these- but I think it's important that they get
327 * enabled......
328 */
329 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
330 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
331
332 /*
333 * Make sure that the latency timer, cache line size,
334 * and ROM is disabled.
335 */
336 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
337 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
338 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
339 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
340 data |= (linesz << PCI_CACHELINE_SHIFT);
341 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
342
343 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
344 data &= ~1;
345 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
346
347 #ifdef DEBUG
348 if (oneshot) {
349 oneshot = 0;
350 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
351 "%d.%d Core Version %d.%d\n",
352 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
353 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
354 }
355 #endif
356 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
357 pa->pa_intrline, &ih)) {
358 printf("%s: couldn't map interrupt\n", isp->isp_name);
359 free(isp->isp_param, M_DEVBUF);
360 return;
361 }
362 intrstr = pci_intr_string(pa->pa_pc, ih);
363 if (intrstr == NULL)
364 intrstr = "<I dunno>";
365 pcs->pci_ih =
366 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
367 if (pcs->pci_ih == NULL) {
368 printf("%s: couldn't establish interrupt at %s\n",
369 isp->isp_name, intrstr);
370 free(isp->isp_param, M_DEVBUF);
371 return;
372 }
373 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
374
375 ISP_LOCK(isp);
376 isp_reset(isp);
377 if (isp->isp_state != ISP_RESETSTATE) {
378 ISP_UNLOCK(isp);
379 free(isp->isp_param, M_DEVBUF);
380 return;
381 }
382 isp_init(isp);
383 if (isp->isp_state != ISP_INITSTATE) {
384 isp_uninit(isp);
385 ISP_UNLOCK(isp);
386 free(isp->isp_param, M_DEVBUF);
387 return;
388 }
389
390
391
392 /*
393 * Create the DMA maps for the data transfers.
394 */
395 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
396 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
397 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
398 &pcs->pci_xfer_dmap[i])) {
399 printf("%s: can't create dma maps\n",
400 isp->isp_name);
401 isp_uninit(isp);
402 ISP_UNLOCK(isp);
403 return;
404 }
405 }
406 /*
407 * Do Generic attach now.
408 */
409 isp_attach(isp);
410 if (isp->isp_state != ISP_RUNSTATE) {
411 isp_uninit(isp);
412 free(isp->isp_param, M_DEVBUF);
413 }
414 ISP_UNLOCK(isp);
415 }
416
417 static u_int16_t
418 isp_pci_rd_reg(isp, regoff)
419 struct ispsoftc *isp;
420 int regoff;
421 {
422 u_int16_t rv;
423 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
424 int offset, oldconf = 0;
425
426 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
427 /*
428 * We will assume that someone has paused the RISC processor.
429 */
430 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
431 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
432 }
433 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
434 offset += (regoff & 0xff);
435 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
436 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
437 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
438 }
439 return (rv);
440 }
441
442 static void
443 isp_pci_wr_reg(isp, regoff, val)
444 struct ispsoftc *isp;
445 int regoff;
446 u_int16_t val;
447 {
448 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
449 int offset, oldconf = 0;
450
451 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
452 /*
453 * We will assume that someone has paused the RISC processor.
454 */
455 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
456 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
457 }
458 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
459 offset += (regoff & 0xff);
460 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
461 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
462 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
463 }
464 }
465
466 #ifndef ISP_DISABLE_1080_SUPPORT
467 static u_int16_t
468 isp_pci_rd_reg_1080(isp, regoff)
469 struct ispsoftc *isp;
470 int regoff;
471 {
472 u_int16_t rv;
473 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
474 int offset, oc = 0;
475
476 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
477 /*
478 * We will assume that someone has paused the RISC processor.
479 */
480 oc = isp_pci_rd_reg(isp, BIU_CONF1);
481 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
482 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
483 oc = isp_pci_rd_reg(isp, BIU_CONF1);
484 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
485 }
486 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
487 offset += (regoff & 0xff);
488 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
489 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
490 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
491 isp_pci_wr_reg(isp, BIU_CONF1, oc);
492 }
493 return (rv);
494 }
495
496 static void
497 isp_pci_wr_reg_1080(isp, regoff, val)
498 struct ispsoftc *isp;
499 int regoff;
500 u_int16_t val;
501 {
502 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
503 int offset, oc = 0;
504
505 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
506 /*
507 * We will assume that someone has paused the RISC processor.
508 */
509 oc = isp_pci_rd_reg(isp, BIU_CONF1);
510 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
511 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
512 oc = isp_pci_rd_reg(isp, BIU_CONF1);
513 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
514 }
515 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
516 offset += (regoff & 0xff);
517 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
518 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
519 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
520 isp_pci_wr_reg(isp, BIU_CONF1, oc);
521 }
522 }
523 #endif
524
525 static int
526 isp_pci_mbxdma(isp)
527 struct ispsoftc *isp;
528 {
529 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
530 bus_dma_segment_t seg;
531 bus_size_t len;
532 fcparam *fcp;
533 int rseg;
534
535 /*
536 * Allocate and map the request queue.
537 */
538 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
539 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
540 BUS_DMA_NOWAIT) ||
541 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
542 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
543 return (1);
544 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
545 &pci->pci_rquest_dmap) ||
546 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
547 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
548 return (1);
549
550 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
551
552 /*
553 * Allocate and map the result queue.
554 */
555 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
556 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
557 BUS_DMA_NOWAIT) ||
558 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
559 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
560 return (1);
561 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
562 &pci->pci_result_dmap) ||
563 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
564 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
565 return (1);
566 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
567
568 if (isp->isp_type & ISP_HA_SCSI) {
569 return (0);
570 }
571
572 fcp = isp->isp_param;
573 len = ISP2100_SCRLEN;
574 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
575 BUS_DMA_NOWAIT) ||
576 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
577 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
578 return (1);
579 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
580 &pci->pci_scratch_dmap) ||
581 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
582 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
583 return (1);
584 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
585 return (0);
586 }
587
588 static int
589 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
590 struct ispsoftc *isp;
591 struct scsipi_xfer *xs;
592 ispreq_t *rq;
593 u_int8_t *iptrp;
594 u_int8_t optr;
595 {
596 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
597 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
598 ispcontreq_t *crq;
599 int segcnt, seg, error, ovseg, seglim, drq;
600
601 if (xs->datalen == 0) {
602 rq->req_seg_count = 1;
603 goto mbxsync;
604 }
605
606 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
607 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
608 isp->isp_name, rq->req_handle);
609 /* NOTREACHED */
610 }
611
612 if (xs->flags & SCSI_DATA_IN) {
613 drq = REQFLAG_DATA_IN;
614 } else {
615 drq = REQFLAG_DATA_OUT;
616 }
617
618 if (isp->isp_type & ISP_HA_FC) {
619 seglim = ISP_RQDSEG_T2;
620 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
621 ((ispreqt2_t *)rq)->req_flags |= drq;
622 } else {
623 seglim = ISP_RQDSEG;
624 rq->req_flags |= drq;
625 }
626 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
627 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
628 if (error) {
629 XS_SETERR(xs, HBA_BOTCH);
630 return (CMD_COMPLETE);
631 }
632
633 segcnt = dmap->dm_nsegs;
634
635 for (seg = 0, rq->req_seg_count = 0;
636 seg < segcnt && rq->req_seg_count < seglim;
637 seg++, rq->req_seg_count++) {
638 if (isp->isp_type & ISP_HA_FC) {
639 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
640 rq2->req_dataseg[rq2->req_seg_count].ds_count =
641 dmap->dm_segs[seg].ds_len;
642 rq2->req_dataseg[rq2->req_seg_count].ds_base =
643 dmap->dm_segs[seg].ds_addr;
644 } else {
645 rq->req_dataseg[rq->req_seg_count].ds_count =
646 dmap->dm_segs[seg].ds_len;
647 rq->req_dataseg[rq->req_seg_count].ds_base =
648 dmap->dm_segs[seg].ds_addr;
649 }
650 }
651
652 if (seg == segcnt)
653 goto dmasync;
654
655 do {
656 crq = (ispcontreq_t *)
657 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
658 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
659 if (*iptrp == optr) {
660 printf("%s: Request Queue Overflow++\n",
661 isp->isp_name);
662 bus_dmamap_unload(pci->pci_dmat, dmap);
663 XS_SETERR(xs, HBA_BOTCH);
664 return (CMD_COMPLETE);
665 }
666 rq->req_header.rqs_entry_count++;
667 bzero((void *)crq, sizeof (*crq));
668 crq->req_header.rqs_entry_count = 1;
669 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
670
671 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
672 rq->req_seg_count++, seg++, ovseg++) {
673 crq->req_dataseg[ovseg].ds_count =
674 dmap->dm_segs[seg].ds_len;
675 crq->req_dataseg[ovseg].ds_base =
676 dmap->dm_segs[seg].ds_addr;
677 }
678 } while (seg < segcnt);
679
680 dmasync:
681 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
682 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
683 BUS_DMASYNC_PREWRITE);
684
685 mbxsync:
686
687 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
688 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
689 return (CMD_QUEUED);
690 }
691
692 static int
693 isp_pci_intr(arg)
694 void *arg;
695 {
696 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
697 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
698 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
699 return (isp_intr(arg));
700 }
701
702 static void
703 isp_pci_dmateardown(isp, xs, handle)
704 struct ispsoftc *isp;
705 struct scsipi_xfer *xs;
706 u_int32_t handle;
707 {
708 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
709 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
710
711 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
712 xs->flags & SCSI_DATA_IN ?
713 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
714 bus_dmamap_unload(pci->pci_dmat, dmap);
715 }
716
717 static void
718 isp_pci_reset1(isp)
719 struct ispsoftc *isp;
720 {
721 /* Make sure the BIOS is disabled */
722 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
723 }
724
725 static void
726 isp_pci_dumpregs(isp)
727 struct ispsoftc *isp;
728 {
729 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
730 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
731 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
732 }
733