isp_pci.c revision 1.39 1 /* $NetBSD: isp_pci.c,v 1.39 1999/04/04 01:14:58 mjacob Exp $ */
2 /* release_4_3_99 */
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 *
6 *---------------------------------------
7 * Copyright (c) 1997, 1998 by Matthew Jacob
8 * NASA/Ames Research Center
9 * All rights reserved.
10 *---------------------------------------
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice immediately at the beginning of the file, without modification,
17 * this list of conditions, and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 */
37
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/microcode/isp/asm_pci.h>
40
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
44
45 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
46 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
47 #ifndef ISP_DISABLE_1080_SUPPORT
48 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
49 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
50 #endif
51 static int isp_pci_mbxdma __P((struct ispsoftc *));
52 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
53 ispreq_t *, u_int8_t *, u_int8_t));
54 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
55 u_int32_t));
56 static void isp_pci_reset1 __P((struct ispsoftc *));
57 static void isp_pci_dumpregs __P((struct ispsoftc *));
58 static int isp_pci_intr __P((void *));
59
60 #ifndef ISP_DISABLE_1020_SUPPORT
61 static struct ispmdvec mdvec = {
62 isp_pci_rd_reg,
63 isp_pci_wr_reg,
64 isp_pci_mbxdma,
65 isp_pci_dmasetup,
66 isp_pci_dmateardown,
67 NULL,
68 isp_pci_reset1,
69 isp_pci_dumpregs,
70 ISP_RISC_CODE,
71 ISP_CODE_LENGTH,
72 ISP_CODE_ORG,
73 ISP_CODE_VERSION,
74 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
75 0
76 };
77 #endif
78
79 #ifndef ISP_DISABLE_1080_SUPPORT
80 static struct ispmdvec mdvec_1080 = {
81 isp_pci_rd_reg_1080,
82 isp_pci_wr_reg_1080,
83 isp_pci_mbxdma,
84 isp_pci_dmasetup,
85 isp_pci_dmateardown,
86 NULL,
87 isp_pci_reset1,
88 isp_pci_dumpregs,
89 ISP1080_RISC_CODE,
90 ISP1080_CODE_LENGTH,
91 ISP1080_CODE_ORG,
92 ISP1080_CODE_VERSION,
93 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
94 0
95 };
96 #endif
97
98 #ifndef ISP_DISABLE_2100_SUPPORT
99 static struct ispmdvec mdvec_2100 = {
100 isp_pci_rd_reg,
101 isp_pci_wr_reg,
102 isp_pci_mbxdma,
103 isp_pci_dmasetup,
104 isp_pci_dmateardown,
105 NULL,
106 isp_pci_reset1,
107 isp_pci_dumpregs,
108 ISP2100_RISC_CODE,
109 ISP2100_CODE_LENGTH,
110 ISP2100_CODE_ORG,
111 ISP2100_CODE_VERSION,
112 0, /* Irrelevant to the 2100 */
113 0
114 };
115 #endif
116
117 #ifndef PCI_VENDOR_QLOGIC
118 #define PCI_VENDOR_QLOGIC 0x1077
119 #endif
120
121 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
122 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
123 #endif
124
125 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
126 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
127 #endif
128
129 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
130 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
131 #endif
132
133 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
134 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
135 #endif
136
137 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
138
139 #define PCI_QLOGIC_ISP1080 \
140 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
141
142 #define PCI_QLOGIC_ISP1240 \
143 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
144
145 #define PCI_QLOGIC_ISP2100 \
146 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
147
148 #define IO_MAP_REG 0x10
149 #define MEM_MAP_REG 0x14
150 #define PCIR_ROMADDR 0x30
151
152 #define PCI_DFLT_LTNCY 0x40
153 #define PCI_DFLT_LNSZ 0x10
154
155
156 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
157 static void isp_pci_attach __P((struct device *, struct device *, void *));
158
159 struct isp_pcisoftc {
160 struct ispsoftc pci_isp;
161 pci_chipset_tag_t pci_pc;
162 pcitag_t pci_tag;
163 bus_space_tag_t pci_st;
164 bus_space_handle_t pci_sh;
165 bus_dma_tag_t pci_dmat;
166 bus_dmamap_t pci_scratch_dmap; /* for fcp only */
167 bus_dmamap_t pci_rquest_dmap;
168 bus_dmamap_t pci_result_dmap;
169 bus_dmamap_t pci_xfer_dmap[MAXISPREQUEST];
170 void * pci_ih;
171 int16_t pci_poff[_NREG_BLKS];
172 };
173
174 struct cfattach isp_pci_ca = {
175 sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
176 };
177
178 static int
179 isp_pci_probe(parent, match, aux)
180 struct device *parent;
181 struct cfdata *match;
182 void *aux;
183 {
184 struct pci_attach_args *pa = aux;
185 switch (pa->pa_id) {
186 #ifndef ISP_DISABLE_1020_SUPPORT
187 case PCI_QLOGIC_ISP:
188 return (1);
189 #endif
190 #ifndef ISP_DISABLE_1080_SUPPORT
191 case PCI_QLOGIC_ISP1080:
192 #if 0
193 case PCI_QLOGIC_ISP1240: /* 1240 not ready yet */
194 return (1);
195 #endif
196 #endif
197 #ifndef ISP_DISABLE_2100_SUPPORT
198 case PCI_QLOGIC_ISP2100:
199 return (1);
200 #endif
201 default:
202 return (0);
203 }
204 }
205
206
207 static void
208 isp_pci_attach(parent, self, aux)
209 struct device *parent, *self;
210 void *aux;
211 {
212 #ifdef DEBUG
213 static char oneshot = 1;
214 #endif
215 u_int32_t data, linesz = PCI_DFLT_LNSZ;
216 struct pci_attach_args *pa = aux;
217 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
218 struct ispsoftc *isp = &pcs->pci_isp;
219 bus_space_tag_t st, iot, memt;
220 bus_space_handle_t sh, ioh, memh;
221 pci_intr_handle_t ih;
222 const char *intrstr;
223 int ioh_valid, memh_valid, i;
224 ISP_LOCKVAL_DECL;
225
226 ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
227 PCI_MAPREG_TYPE_IO, 0,
228 &iot, &ioh, NULL, NULL) == 0);
229 memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
230 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
231 &memt, &memh, NULL, NULL) == 0);
232
233 if (memh_valid) {
234 st = memt;
235 sh = memh;
236 } else if (ioh_valid) {
237 st = iot;
238 sh = ioh;
239 } else {
240 printf(": unable to map device registers\n");
241 return;
242 }
243 printf("\n");
244
245 pcs->pci_st = st;
246 pcs->pci_sh = sh;
247 pcs->pci_dmat = pa->pa_dmat;
248 pcs->pci_pc = pa->pa_pc;
249 pcs->pci_tag = pa->pa_tag;
250 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
251 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
252 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
253 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
254 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
255
256 #ifndef ISP_DISABLE_1020_SUPPORT
257 if (pa->pa_id == PCI_QLOGIC_ISP) {
258 isp->isp_mdvec = &mdvec;
259 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
260 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
261 if (isp->isp_param == NULL) {
262 printf("%s: couldn't allocate sdparam table\n",
263 isp->isp_name);
264 return;
265 }
266 bzero(isp->isp_param, sizeof (sdparam));
267 }
268 #endif
269 #ifndef ISP_DISABLE_1080_SUPPORT
270 if (pa->pa_id == PCI_QLOGIC_ISP1080) {
271 isp->isp_mdvec = &mdvec_1080;
272 isp->isp_type = ISP_HA_SCSI_1080;
273 isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
274 if (isp->isp_param == NULL) {
275 printf("%s: couldn't allocate sdparam table\n",
276 isp->isp_name);
277 return;
278 }
279 bzero(isp->isp_param, sizeof (sdparam));
280 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
281 ISP1080_DMA_REGS_OFF;
282 }
283 #endif
284 #ifndef ISP_DISABLE_2100_SUPPORT
285 if (pa->pa_id == PCI_QLOGIC_ISP2100) {
286 isp->isp_mdvec = &mdvec_2100;
287 isp->isp_type = ISP_HA_FC_2100;
288 isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
289 if (isp->isp_param == NULL) {
290 printf("%s: couldn't allocate fcparam table\n",
291 isp->isp_name);
292 return;
293 }
294 bzero(isp->isp_param, sizeof (fcparam));
295 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
296 PCI_MBOX_REGS2100_OFF;
297 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
298 if ((data & 0xff) < 3) {
299 /*
300 * XXX: Need to get the actual revision
301 * XXX: number of the 2100 FB. At any rate,
302 * XXX: lower cache line size for early revision
303 * XXX; boards.
304 */
305 linesz = 1;
306 }
307 }
308 #endif
309
310 /*
311 * Make sure that command register set sanely.
312 */
313 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
314 data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
315
316 /*
317 * Not so sure about these- but I think it's important that they get
318 * enabled......
319 */
320 data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
321 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
322
323 /*
324 * Make sure that the latency timer, cache line size,
325 * and ROM is disabled.
326 */
327 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
328 data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
329 data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
330 data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
331 data |= (linesz << PCI_CACHELINE_SHIFT);
332 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
333
334 data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
335 data &= ~1;
336 pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
337
338 #ifdef DEBUG
339 if (oneshot) {
340 oneshot = 0;
341 printf("Qlogic ISP Driver, NetBSD (pci) Platform Version "
342 "%d.%d Core Version %d.%d\n",
343 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
344 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
345 }
346 #endif
347 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
348 pa->pa_intrline, &ih)) {
349 printf("%s: couldn't map interrupt\n", isp->isp_name);
350 free(isp->isp_param, M_DEVBUF);
351 return;
352 }
353 intrstr = pci_intr_string(pa->pa_pc, ih);
354 if (intrstr == NULL)
355 intrstr = "<I dunno>";
356 pcs->pci_ih =
357 pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_pci_intr, isp);
358 if (pcs->pci_ih == NULL) {
359 printf("%s: couldn't establish interrupt at %s\n",
360 isp->isp_name, intrstr);
361 free(isp->isp_param, M_DEVBUF);
362 return;
363 }
364 printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
365
366 ISP_LOCK(isp);
367 isp_reset(isp);
368 if (isp->isp_state != ISP_RESETSTATE) {
369 ISP_UNLOCK(isp);
370 free(isp->isp_param, M_DEVBUF);
371 return;
372 }
373 isp_init(isp);
374 if (isp->isp_state != ISP_INITSTATE) {
375 isp_uninit(isp);
376 ISP_UNLOCK(isp);
377 free(isp->isp_param, M_DEVBUF);
378 return;
379 }
380
381
382
383 /*
384 * Create the DMA maps for the data transfers.
385 */
386 for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
387 if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
388 (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
389 &pcs->pci_xfer_dmap[i])) {
390 printf("%s: can't create dma maps\n",
391 isp->isp_name);
392 isp_uninit(isp);
393 ISP_UNLOCK(isp);
394 return;
395 }
396 }
397 /*
398 * Do Generic attach now.
399 */
400 isp_attach(isp);
401 if (isp->isp_state != ISP_RUNSTATE) {
402 isp_uninit(isp);
403 free(isp->isp_param, M_DEVBUF);
404 }
405 ISP_UNLOCK(isp);
406 }
407
408 static u_int16_t
409 isp_pci_rd_reg(isp, regoff)
410 struct ispsoftc *isp;
411 int regoff;
412 {
413 u_int16_t rv;
414 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
415 int offset, oldconf = 0;
416
417 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
418 /*
419 * We will assume that someone has paused the RISC processor.
420 */
421 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
422 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
423 }
424 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
425 offset += (regoff & 0xff);
426 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
427 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
428 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
429 }
430 return (rv);
431 }
432
433 static void
434 isp_pci_wr_reg(isp, regoff, val)
435 struct ispsoftc *isp;
436 int regoff;
437 u_int16_t val;
438 {
439 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
440 int offset, oldconf = 0;
441
442 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
443 /*
444 * We will assume that someone has paused the RISC processor.
445 */
446 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
447 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
448 }
449 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
450 offset += (regoff & 0xff);
451 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
452 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
453 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
454 }
455 }
456
457 #ifndef ISP_DISABLE_1080_SUPPORT
458 static u_int16_t
459 isp_pci_rd_reg_1080(isp, regoff)
460 struct ispsoftc *isp;
461 int regoff;
462 {
463 u_int16_t rv;
464 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
465 int offset, oc = 0;
466
467 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
468 /*
469 * We will assume that someone has paused the RISC processor.
470 */
471 oc = isp_pci_rd_reg(isp, BIU_CONF1);
472 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
473 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
474 oc = isp_pci_rd_reg(isp, BIU_CONF1);
475 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
476 }
477 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
478 offset += (regoff & 0xff);
479 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
480 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
481 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
482 isp_pci_wr_reg(isp, BIU_CONF1, oc);
483 }
484 return (rv);
485 }
486
487 static void
488 isp_pci_wr_reg_1080(isp, regoff, val)
489 struct ispsoftc *isp;
490 int regoff;
491 u_int16_t val;
492 {
493 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
494 int offset, oc = 0;
495
496 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
497 /*
498 * We will assume that someone has paused the RISC processor.
499 */
500 oc = isp_pci_rd_reg(isp, BIU_CONF1);
501 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
502 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
503 oc = isp_pci_rd_reg(isp, BIU_CONF1);
504 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
505 }
506 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
507 offset += (regoff & 0xff);
508 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
509 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
510 ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
511 isp_pci_wr_reg(isp, BIU_CONF1, oc);
512 }
513 }
514 #endif
515
516 static int
517 isp_pci_mbxdma(isp)
518 struct ispsoftc *isp;
519 {
520 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
521 bus_dma_segment_t seg;
522 bus_size_t len;
523 fcparam *fcp;
524 int rseg;
525
526 /*
527 * Allocate and map the request queue.
528 */
529 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
530 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
531 BUS_DMA_NOWAIT) ||
532 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
533 (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
534 return (1);
535 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
536 &pci->pci_rquest_dmap) ||
537 bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
538 (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
539 return (1);
540
541 isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
542
543 /*
544 * Allocate and map the result queue.
545 */
546 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
547 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
548 BUS_DMA_NOWAIT) ||
549 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
550 (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
551 return (1);
552 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
553 &pci->pci_result_dmap) ||
554 bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
555 (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
556 return (1);
557 isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
558
559 if (isp->isp_type & ISP_HA_SCSI) {
560 return (0);
561 }
562
563 fcp = isp->isp_param;
564 len = ISP2100_SCRLEN;
565 if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
566 BUS_DMA_NOWAIT) ||
567 bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
568 (caddr_t *)&fcp->isp_scratch, BUS_DMA_NOWAIT|BUS_DMA_COHERENT))
569 return (1);
570 if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
571 &pci->pci_scratch_dmap) ||
572 bus_dmamap_load(pci->pci_dmat, pci->pci_scratch_dmap,
573 (caddr_t)fcp->isp_scratch, len, NULL, BUS_DMA_NOWAIT))
574 return (1);
575 fcp->isp_scdma = pci->pci_scratch_dmap->dm_segs[0].ds_addr;
576 return (0);
577 }
578
579 static int
580 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
581 struct ispsoftc *isp;
582 struct scsipi_xfer *xs;
583 ispreq_t *rq;
584 u_int8_t *iptrp;
585 u_int8_t optr;
586 {
587 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
588 bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle - 1];
589 ispcontreq_t *crq;
590 int segcnt, seg, error, ovseg, seglim, drq;
591
592 if (xs->datalen == 0) {
593 rq->req_seg_count = 1;
594 goto mbxsync;
595 }
596
597 if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
598 panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
599 isp->isp_name, rq->req_handle);
600 /* NOTREACHED */
601 }
602
603 if (xs->flags & SCSI_DATA_IN) {
604 drq = REQFLAG_DATA_IN;
605 } else {
606 drq = REQFLAG_DATA_OUT;
607 }
608
609 if (isp->isp_type & ISP_HA_FC) {
610 seglim = ISP_RQDSEG_T2;
611 ((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
612 ((ispreqt2_t *)rq)->req_flags |= drq;
613 } else {
614 seglim = ISP_RQDSEG;
615 rq->req_flags |= drq;
616 }
617 error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
618 NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
619 if (error) {
620 XS_SETERR(xs, HBA_BOTCH);
621 return (CMD_COMPLETE);
622 }
623
624 segcnt = dmap->dm_nsegs;
625
626 for (seg = 0, rq->req_seg_count = 0;
627 seg < segcnt && rq->req_seg_count < seglim;
628 seg++, rq->req_seg_count++) {
629 if (isp->isp_type & ISP_HA_FC) {
630 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
631 rq2->req_dataseg[rq2->req_seg_count].ds_count =
632 dmap->dm_segs[seg].ds_len;
633 rq2->req_dataseg[rq2->req_seg_count].ds_base =
634 dmap->dm_segs[seg].ds_addr;
635 } else {
636 rq->req_dataseg[rq->req_seg_count].ds_count =
637 dmap->dm_segs[seg].ds_len;
638 rq->req_dataseg[rq->req_seg_count].ds_base =
639 dmap->dm_segs[seg].ds_addr;
640 }
641 }
642
643 if (seg == segcnt)
644 goto dmasync;
645
646 do {
647 crq = (ispcontreq_t *)
648 ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
649 *iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
650 if (*iptrp == optr) {
651 printf("%s: Request Queue Overflow++\n",
652 isp->isp_name);
653 bus_dmamap_unload(pci->pci_dmat, dmap);
654 XS_SETERR(xs, HBA_BOTCH);
655 return (CMD_COMPLETE);
656 }
657 rq->req_header.rqs_entry_count++;
658 bzero((void *)crq, sizeof (*crq));
659 crq->req_header.rqs_entry_count = 1;
660 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
661
662 for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
663 rq->req_seg_count++, seg++, ovseg++) {
664 crq->req_dataseg[ovseg].ds_count =
665 dmap->dm_segs[seg].ds_len;
666 crq->req_dataseg[ovseg].ds_base =
667 dmap->dm_segs[seg].ds_addr;
668 }
669 } while (seg < segcnt);
670
671 dmasync:
672 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
673 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
674 BUS_DMASYNC_PREWRITE);
675
676 mbxsync:
677
678 bus_dmamap_sync(pci->pci_dmat, pci->pci_rquest_dmap, 0,
679 pci->pci_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
680 return (CMD_QUEUED);
681 }
682
683 static int
684 isp_pci_intr(arg)
685 void *arg;
686 {
687 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)arg;
688 bus_dmamap_sync(pci->pci_dmat, pci->pci_result_dmap, 0,
689 pci->pci_result_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
690 return (isp_intr(arg));
691 }
692
693 static void
694 isp_pci_dmateardown(isp, xs, handle)
695 struct ispsoftc *isp;
696 struct scsipi_xfer *xs;
697 u_int32_t handle;
698 {
699 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
700 bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
701
702 bus_dmamap_sync(pci->pci_dmat, dmap, 0, dmap->dm_mapsize,
703 xs->flags & SCSI_DATA_IN ?
704 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
705 bus_dmamap_unload(pci->pci_dmat, dmap);
706 }
707
708 static void
709 isp_pci_reset1(isp)
710 struct ispsoftc *isp;
711 {
712 /* Make sure the BIOS is disabled */
713 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
714 }
715
716 static void
717 isp_pci_dumpregs(isp)
718 struct ispsoftc *isp;
719 {
720 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
721 printf("%s: PCI Status Command/Status=%x\n", pci->pci_isp.isp_name,
722 pci_conf_read(pci->pci_pc, pci->pci_tag, PCI_COMMAND_STATUS_REG));
723 }
724