isp_sbus.c revision 1.30 1 /* $NetBSD: isp_sbus.c,v 1.30 2000/08/14 07:00:08 mjacob Exp $ */
2 /*
3 * This driver, which is contained in NetBSD in the files:
4 *
5 * sys/dev/ic/isp.c
6 * sys/dev/ic/ic/isp.c
7 * sys/dev/ic/ic/isp_inline.h
8 * sys/dev/ic/ic/isp_netbsd.c
9 * sys/dev/ic/ic/isp_netbsd.h
10 * sys/dev/ic/ic/isp_target.c
11 * sys/dev/ic/ic/isp_target.h
12 * sys/dev/ic/ic/isp_tpublic.h
13 * sys/dev/ic/ic/ispmbox.h
14 * sys/dev/ic/ic/ispreg.h
15 * sys/dev/ic/ic/ispvar.h
16 * sys/microcode/isp/asm_sbus.h
17 * sys/microcode/isp/asm_1040.h
18 * sys/microcode/isp/asm_1080.h
19 * sys/microcode/isp/asm_12160.h
20 * sys/microcode/isp/asm_2100.h
21 * sys/microcode/isp/asm_2200.h
22 * sys/pci/isp_pci.c
23 * sys/sbus/isp_sbus.c
24 *
25 * Is being actively maintained by Matthew Jacob (mjacob (at) netbsd.org).
26 * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
27 * Linux versions. This tends to be an interesting maintenance problem.
28 *
29 * Please coordinate with Matthew Jacob on changes you wish to make here.
30 */
31 /*
32 * SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
33 *
34 * Copyright (c) 1997 by Matthew Jacob
35 * NASA AMES Research Center
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice immediately at the beginning of the file, without modification,
43 * this list of conditions, and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. The name of the author may not be used to endorse or promote products
48 * derived from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
54 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 */
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/queue.h>
70
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73 #include <machine/autoconf.h>
74
75 #include <dev/ic/isp_netbsd.h>
76 #include <dev/microcode/isp/asm_sbus.h>
77 #include <dev/sbus/sbusvar.h>
78
79 static int isp_sbus_intr __P((void *));
80 static u_int16_t isp_sbus_rd_reg __P((struct ispsoftc *, int));
81 static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
82 static int isp_sbus_mbxdma __P((struct ispsoftc *));
83 static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
84 ispreq_t *, u_int16_t *, u_int16_t));
85 static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
86 u_int32_t));
87
88 #ifndef ISP_1000_RISC_CODE
89 #define ISP_1000_RISC_CODE NULL
90 #endif
91
92 static struct ispmdvec mdvec = {
93 isp_sbus_rd_reg,
94 isp_sbus_wr_reg,
95 isp_sbus_mbxdma,
96 isp_sbus_dmasetup,
97 isp_sbus_dmateardown,
98 NULL,
99 NULL,
100 NULL,
101 ISP_1000_RISC_CODE,
102 BIU_BURST_ENABLE
103 };
104
105 struct isp_sbussoftc {
106 struct ispsoftc sbus_isp;
107 sdparam sbus_dev;
108 bus_space_tag_t sbus_bustag;
109 bus_dma_tag_t sbus_dmatag;
110 bus_space_handle_t sbus_reg;
111 int sbus_node;
112 int sbus_pri;
113 struct ispmdvec sbus_mdvec;
114 bus_dmamap_t *sbus_dmamap;
115 bus_dmamap_t sbus_request_dmamap;
116 bus_dmamap_t sbus_result_dmamap;
117 int16_t sbus_poff[_NREG_BLKS];
118 };
119
120
121 static int isp_match __P((struct device *, struct cfdata *, void *));
122 static void isp_sbus_attach __P((struct device *, struct device *, void *));
123 struct cfattach isp_sbus_ca = {
124 sizeof (struct isp_sbussoftc), isp_match, isp_sbus_attach
125 };
126
127 static int
128 isp_match(parent, cf, aux)
129 struct device *parent;
130 struct cfdata *cf;
131 void *aux;
132 {
133 int rv;
134 #ifdef DEBUG
135 static int oneshot = 1;
136 #endif
137 struct sbus_attach_args *sa = aux;
138
139 rv = (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0 ||
140 strcmp("PTI,ptisp", sa->sa_name) == 0 ||
141 strcmp("ptisp", sa->sa_name) == 0 ||
142 strcmp("SUNW,isp", sa->sa_name) == 0 ||
143 strcmp("QLGC,isp", sa->sa_name) == 0);
144 #ifdef DEBUG
145 if (rv && oneshot) {
146 oneshot = 0;
147 printf("Qlogic ISP Driver, NetBSD (sbus) Platform Version "
148 "%d.%d Core Version %d.%d\n",
149 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
150 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
151 }
152 #endif
153 return (rv);
154 }
155
156
157 static void
158 isp_sbus_attach(parent, self, aux)
159 struct device *parent, *self;
160 void *aux;
161 {
162 int freq;
163 struct sbus_attach_args *sa = aux;
164 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) self;
165 struct ispsoftc *isp = &sbc->sbus_isp;
166
167 printf(" for %s\n", sa->sa_name);
168
169 sbc->sbus_bustag = sa->sa_bustag;
170 sbc->sbus_dmatag = sa->sa_dmatag;
171 if (sa->sa_nintr != 0)
172 sbc->sbus_pri = sa->sa_pri;
173 sbc->sbus_mdvec = mdvec;
174
175 if (sa->sa_npromvaddrs != 0) {
176 sbc->sbus_reg = (bus_space_handle_t)sa->sa_promvaddrs[0];
177 } else {
178 if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset,
179 sa->sa_size, BUS_SPACE_MAP_LINEAR, 0,
180 &sbc->sbus_reg) != 0) {
181 printf("%s: cannot map registers\n", self->dv_xname);
182 return;
183 }
184 }
185 sbc->sbus_node = sa->sa_node;
186
187 freq = getpropint(sa->sa_node, "clock-frequency", 0);
188 if (freq) {
189 /*
190 * Convert from HZ to MHz, rounding up.
191 */
192 freq = (freq + 500000)/1000000;
193 #if 0
194 printf("%s: %d MHz\n", self->dv_xname, freq);
195 #endif
196 }
197 sbc->sbus_mdvec.dv_clock = freq;
198
199 /*
200 * XXX: Now figure out what the proper burst sizes, etc., to use.
201 */
202 sbc->sbus_mdvec.dv_conf1 |= BIU_SBUS_CONF1_FIFO_8;
203
204 /*
205 * Some early versions of the PTI SBus adapter
206 * would fail in trying to download (via poking)
207 * FW. We give up on them.
208 */
209 if (strcmp("PTI,ptisp", sa->sa_name) == 0 ||
210 strcmp("ptisp", sa->sa_name) == 0) {
211 sbc->sbus_mdvec.dv_ispfw = NULL;
212 }
213
214 isp->isp_mdvec = &sbc->sbus_mdvec;
215 isp->isp_bustype = ISP_BT_SBUS;
216 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
217 isp->isp_param = &sbc->sbus_dev;
218 bzero(isp->isp_param, sizeof (sdparam));
219
220 sbc->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
221 sbc->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF;
222 sbc->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF;
223 sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
224 sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
225
226 /*
227 * Set up logging levels.
228 */
229 #ifdef ISP_LOGDEFAULT
230 isp->isp_dblev = ISP_LOGDEFAULT;
231 #else
232 isp->isp_dblev = ISP_LOGCONFIG|ISP_LOGWARN|ISP_LOGERR;
233 #ifdef SCSIDEBUG
234 isp->isp_dblev |= ISP_LOGDEBUG1|ISP_LOGDEBUG2;
235 #endif
236 #ifdef DEBUG
237 isp->isp_dblev |= ISP_LOGDEBUG0;
238 #endif
239 #ifdef DIAGNOSTIC
240 isp->isp_dblev |= ISP_LOGINFO;
241 #endif
242 #endif
243 isp->isp_confopts = self->dv_cfdata->cf_flags;
244 /*
245 * There's no tool on sparc to set NVRAM for ISPs, so ignore it.
246 */
247 isp->isp_confopts |= ISP_CFG_NONVRAM;
248 ISP_LOCK(isp);
249 isp->isp_osinfo.no_mbox_ints = 1;
250 isp_reset(isp);
251 if (isp->isp_state != ISP_RESETSTATE) {
252 ISP_UNLOCK(isp);
253 return;
254 }
255 isp_init(isp);
256 if (isp->isp_state != ISP_INITSTATE) {
257 isp_uninit(isp);
258 ISP_UNLOCK(isp);
259 return;
260 }
261 /* Establish interrupt channel */
262 bus_intr_establish(sbc->sbus_bustag, sbc->sbus_pri, IPL_BIO, 0,
263 isp_sbus_intr, sbc);
264 ENABLE_INTS(isp);
265 ISP_UNLOCK(isp);
266
267 /*
268 * do generic attach.
269 */
270 isp_attach(isp);
271 if (isp->isp_state != ISP_RUNSTATE) {
272 isp_uninit(isp);
273 }
274 }
275
276 static int
277 isp_sbus_intr(arg)
278 void *arg;
279 {
280 int rv;
281 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *)arg;
282 bus_dmamap_sync(sbc->sbus_dmatag, sbc->sbus_result_dmamap, 0,
283 sbc->sbus_result_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
284 sbc->sbus_isp.isp_osinfo.onintstack = 1;
285 rv = isp_intr(arg);
286 sbc->sbus_isp.isp_osinfo.onintstack = 0;
287 return (rv);
288 }
289
290 static u_int16_t
291 isp_sbus_rd_reg(isp, regoff)
292 struct ispsoftc *isp;
293 int regoff;
294 {
295 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
296 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
297 offset += (regoff & 0xff);
298 return (bus_space_read_2(sbc->sbus_bustag, sbc->sbus_reg, offset));
299 }
300
301 static void
302 isp_sbus_wr_reg(isp, regoff, val)
303 struct ispsoftc *isp;
304 int regoff;
305 u_int16_t val;
306 {
307 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
308 int offset = sbc->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
309 offset += (regoff & 0xff);
310 bus_space_write_2(sbc->sbus_bustag, sbc->sbus_reg, offset, val);
311 }
312
313 static int
314 isp_sbus_mbxdma(isp)
315 struct ispsoftc *isp;
316 {
317 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
318 bus_dma_tag_t dmatag = sbc->sbus_dmatag;
319 bus_dma_segment_t seg;
320 int rs, i;
321 size_t n;
322 bus_size_t len;
323
324 if (isp->isp_rquest_dma)
325 return (0);
326
327 n = sizeof (XS_T **) * isp->isp_maxcmds;
328 isp->isp_xflist = (XS_T **) malloc(n, M_DEVBUF, M_WAITOK);
329 if (isp->isp_xflist == NULL) {
330 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
331 return (1);
332 }
333 bzero(isp->isp_xflist, n);
334 n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
335 sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
336 if (sbc->sbus_dmamap == NULL) {
337 free(isp->isp_xflist, M_DEVBUF);
338 isp->isp_xflist = NULL;
339 isp_prt(isp, ISP_LOGERR, "cannot alloc dmamap array");
340 return (1);
341 }
342 for (i = 0; i < isp->isp_maxcmds; i++) {
343 /* Allocate a DMA handle */
344 if (bus_dmamap_create(dmatag, MAXPHYS, 1, MAXPHYS, 0,
345 BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
346 isp_prt(isp, ISP_LOGERR, "cmd DMA maps create error");
347 break;
348 }
349 }
350 if (i < isp->isp_maxcmds) {
351 while (--i >= 0) {
352 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
353 }
354 free(isp->isp_xflist, M_DEVBUF);
355 free(sbc->sbus_dmamap, M_DEVBUF);
356 isp->isp_xflist = NULL;
357 sbc->sbus_dmamap = NULL;
358 return (1);
359 }
360
361 /*
362 * Allocate and map the request queue.
363 */
364 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
365 /* Allocate DMA map */
366 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
367 &sbc->sbus_request_dmamap) != 0) {
368 goto dmafail;
369 }
370
371 /* Allocate DMA buffer */
372 if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
373 goto dmafail;
374 }
375
376 /* Load the buffer */
377 if (bus_dmamap_load_raw(dmatag, sbc->sbus_request_dmamap,
378 &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
379 bus_dmamem_free(dmatag, &seg, rs);
380 goto dmafail;
381 }
382 isp->isp_rquest_dma = sbc->sbus_request_dmamap->dm_segs[0].ds_addr;
383
384 /* Map DMA buffer in CPU addressable space */
385 if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_rquest,
386 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
387 bus_dmamap_unload(dmatag, sbc->sbus_request_dmamap);
388 bus_dmamem_free(dmatag, &seg, rs);
389 goto dmafail;
390 }
391
392 /*
393 * Allocate and map the result queue.
394 */
395 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
396 /* Allocate DMA map */
397 if (bus_dmamap_create(dmatag, len, 1, len, 0, BUS_DMA_NOWAIT,
398 &sbc->sbus_result_dmamap) != 0) {
399 goto dmafail;
400 }
401
402 /* Allocate DMA buffer */
403 if (bus_dmamem_alloc(dmatag, len, 0, 0, &seg, 1, &rs, BUS_DMA_NOWAIT)) {
404 goto dmafail;
405 }
406
407 /* Load the buffer */
408 if (bus_dmamap_load_raw(dmatag, sbc->sbus_result_dmamap,
409 &seg, rs, len, BUS_DMA_NOWAIT) != 0) {
410 bus_dmamem_free(dmatag, &seg, rs);
411 goto dmafail;
412 }
413
414 /* Map DMA buffer in CPU addressable space */
415 if (bus_dmamem_map(dmatag, &seg, rs, len, (caddr_t *)&isp->isp_result,
416 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
417 bus_dmamap_unload(dmatag, sbc->sbus_result_dmamap);
418 bus_dmamem_free(dmatag, &seg, rs);
419 goto dmafail;
420 }
421 isp->isp_result_dma = sbc->sbus_result_dmamap->dm_segs[0].ds_addr;
422
423 return (0);
424
425 dmafail:
426 for (i = 0; i < isp->isp_maxcmds; i++) {
427 bus_dmamap_destroy(dmatag, sbc->sbus_dmamap[i]);
428 }
429 free(sbc->sbus_dmamap, M_DEVBUF);
430 free(isp->isp_xflist, M_DEVBUF);
431 isp->isp_xflist = NULL;
432 sbc->sbus_dmamap = NULL;
433 return (1);
434 }
435
436 /*
437 * Map a DMA request.
438 * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
439 */
440
441 static int
442 isp_sbus_dmasetup(isp, xs, rq, iptrp, optr)
443 struct ispsoftc *isp;
444 struct scsipi_xfer *xs;
445 ispreq_t *rq;
446 u_int16_t *iptrp;
447 u_int16_t optr;
448 {
449 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
450 bus_dmamap_t dmap;
451 ispcontreq_t *crq;
452 int cansleep = (xs->xs_control & XS_CTL_NOSLEEP) == 0;
453 int in = (xs->xs_control & XS_CTL_DATA_IN) != 0;
454
455 if (xs->datalen == 0) {
456 rq->req_seg_count = 1;
457 goto mbxsync;
458 }
459
460 dmap = sbc->sbus_dmamap[isp_handle_index(rq->req_handle)];
461 if (dmap->dm_nsegs != 0) {
462 panic("%s: dma map already allocated\n", isp->isp_name);
463 /* NOTREACHED */
464 }
465 if (bus_dmamap_load(sbc->sbus_dmatag, dmap, xs->data, xs->datalen,
466 NULL, cansleep? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
467 XS_SETERR(xs, HBA_BOTCH);
468 return (CMD_COMPLETE);
469 }
470
471 bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
472 xs->datalen, in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
473
474 if (in) {
475 rq->req_flags |= REQFLAG_DATA_IN;
476 } else {
477 rq->req_flags |= REQFLAG_DATA_OUT;
478 }
479
480 if (XS_CDBLEN(xs) > 12) {
481 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
482 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN(isp));
483 if (*iptrp == optr) {
484 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
485 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
486 XS_SETERR(xs, HBA_BOTCH);
487 return (CMD_EAGAIN);
488 }
489 rq->req_seg_count = 2;
490 rq->req_dataseg[0].ds_count = 0;
491 rq->req_dataseg[0].ds_base = 0;
492 bzero((void *)crq, sizeof (*crq));
493 crq->req_header.rqs_entry_count = 1;
494 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
495 crq->req_dataseg[0].ds_count = xs->datalen;
496 crq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
497 ISP_SBUSIFY_ISPHDR(isp, &crq->req_header)
498 } else {
499 rq->req_dataseg[0].ds_count = xs->datalen;
500 rq->req_dataseg[0].ds_base = dmap->dm_segs[0].ds_addr;
501 rq->req_seg_count = 1;
502 }
503
504 mbxsync:
505 ISP_SWIZZLE_REQUEST(isp, rq);
506 #if 0
507 /*
508 * If we ever map cacheable memory, we need to do something like this.
509 */
510 bus_dmamap_sync(sbc->sbus_dmat, sbc->sbus_rquest_dmap, 0,
511 sbc->sbus_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
512 #endif
513 return (CMD_QUEUED);
514 }
515
516 static void
517 isp_sbus_dmateardown(isp, xs, handle)
518 struct ispsoftc *isp;
519 struct scsipi_xfer *xs;
520 u_int32_t handle;
521 {
522 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
523 bus_dmamap_t dmap;
524
525 dmap = sbc->sbus_dmamap[isp_handle_index(handle)];
526
527 if (dmap->dm_nsegs == 0) {
528 panic("%s: dma map not already allocated\n", isp->isp_name);
529 /* NOTREACHED */
530 }
531 bus_dmamap_sync(sbc->sbus_dmatag, dmap, dmap->dm_segs[0].ds_addr,
532 xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)?
533 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
534 bus_dmamap_unload(sbc->sbus_dmatag, dmap);
535 }
536