sw.c revision 1.10 1 /* $NetBSD: sw.c,v 1.10 2002/10/02 16:02:16 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains only the machine-dependent parts of the
41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
42 * The machine-independent parts are in ncr5380sbc.c
43 *
44 * Supported hardware includes:
45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47 *
48 * The VME variant has a bit to enable or disable the DMA engine,
49 * but that bit also gates the interrupt line from the NCR5380!
50 * Therefore, in order to get any interrupt from the 5380, (i.e.
51 * for reselect) one must clear the DMA engine transfer count and
52 * then enable DMA. This has the further complication that you
53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54 * we have to turn DMA back off before we even look at the 5380.
55 *
56 * What wonderfully whacky hardware this is!
57 *
58 * David Jones wrote the initial version of this module for NetBSD/sun3,
59 * which included support for the VME adapter only. (no reselection).
60 *
61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62 * both the VME and OBIO code to support disconnect/reselect.
63 * (Required figuring out the hardware "features" noted above.)
64 *
65 * The autoconfiguration boilerplate came from Adam Glass.
66 *
67 * Jason R. Thorpe ported the autoconfiguration and VME portions to
68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
71 * and Chris Torek for bits of insight needed along the way. Thanks to
72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73 * for the sake of testing. Andrew Gillham helped work out the bugs
74 * the 4/100 DMA code.
75 */
76
77 /*
78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
79 * works, but interrupts (and, thus, reselection) don't. I don't know
80 * why, and I don't have a machine to test this on further.
81 *
82 * DMA, DMA completion interrupts, and reselection work fine on my
83 * 4/260 with modern SCSI-II disks attached. I've had reports of
84 * reselection failing on Sun Shoebox-type configurations where
85 * there are multiple non-SCSI devices behind Emulex or Adaptec
86 * bridges. These devices pre-date the SCSI-I spec, and might not
87 * bahve the way the 5380 code expects. For this reason, only
88 * DMA is enabled by default in this driver.
89 *
90 * Jason R. Thorpe <thorpej (at) NetBSD.ORG>
91 * December 8, 1995
92 */
93
94 #include "opt_ddb.h"
95
96 #include <sys/types.h>
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/malloc.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/buf.h>
104
105 #include <machine/bus.h>
106 #include <machine/intr.h>
107 #include <machine/autoconf.h>
108
109 #include <dev/scsipi/scsi_all.h>
110 #include <dev/scsipi/scsipi_all.h>
111 #include <dev/scsipi/scsipi_debug.h>
112 #include <dev/scsipi/scsiconf.h>
113
114 #ifndef DDB
115 #define Debugger()
116 #endif
117
118 #ifndef DEBUG
119 #define DEBUG XXX
120 #endif
121
122 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
123
124 #include <dev/ic/ncr5380reg.h>
125 #include <dev/ic/ncr5380var.h>
126
127 #include <sparc/dev/swreg.h>
128
129 /*
130 * Transfers smaller than this are done using PIO
131 * (on assumption they're not worth DMA overhead)
132 */
133 #define MIN_DMA_LEN 128
134
135 /*
136 * Transfers lager than 65535 bytes need to be split-up.
137 * (Some of the FIFO logic has only 16 bits counters.)
138 * Make the size an integer multiple of the page size
139 * to avoid buf/cluster remap problems. (paranoid?)
140 */
141 #define MAX_DMA_LEN 0xE000
142
143 #ifdef DEBUG
144 int sw_debug = 0;
145 #endif
146
147 /*
148 * This structure is used to keep track of mapped DMA requests.
149 */
150 struct sw_dma_handle {
151 int dh_flags;
152 #define SIDH_BUSY 0x01 /* This DH is in use */
153 #define SIDH_OUT 0x02 /* DMA does data out (write) */
154 u_char *dh_addr; /* KVA of start of buffer */
155 int dh_maplen; /* Original data length */
156 long dh_startingpa; /* PA of buffer; for "sw" */
157 bus_dmamap_t dh_dmamap;
158 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
159 };
160
161 /*
162 * The first structure member has to be the ncr5380_softc
163 * so we can just cast to go back and fourth between them.
164 */
165 struct sw_softc {
166 struct ncr5380_softc ncr_sc;
167 bus_space_tag_t sc_bustag; /* bus tags */
168 bus_dma_tag_t sc_dmatag;
169
170 struct sw_dma_handle *sc_dma;
171 int sc_xlen; /* length of current DMA segment. */
172 int sc_options; /* options for this instance. */
173 };
174
175 /*
176 * Options. By default, DMA is enabled and DMA completion interrupts
177 * and reselect are disabled. You may enable additional features
178 * the `flags' directive in your kernel's configuration file.
179 *
180 * Alternatively, you can patch your kernel with DDB or some other
181 * mechanism. The sc_options member of the softc is OR'd with
182 * the value in sw_options.
183 *
184 * On the "sw", interrupts (and thus) reselection don't work, so they're
185 * disabled by default. DMA is still a little dangerous, too.
186 *
187 * Note, there's a separate sw_options to make life easier.
188 */
189 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
190 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
191 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
192 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
193 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
194 int sw_options = SW_ENABLE_DMA;
195
196 static int sw_match __P((struct device *, struct cfdata *, void *));
197 static void sw_attach __P((struct device *, struct device *, void *));
198 static int sw_intr __P((void *));
199 static void sw_reset_adapter __P((struct ncr5380_softc *));
200 static void sw_minphys __P((struct buf *));
201
202 void sw_dma_alloc __P((struct ncr5380_softc *));
203 void sw_dma_free __P((struct ncr5380_softc *));
204 void sw_dma_poll __P((struct ncr5380_softc *));
205
206 void sw_dma_setup __P((struct ncr5380_softc *));
207 void sw_dma_start __P((struct ncr5380_softc *));
208 void sw_dma_eop __P((struct ncr5380_softc *));
209 void sw_dma_stop __P((struct ncr5380_softc *));
210
211 void sw_intr_on __P((struct ncr5380_softc *));
212 void sw_intr_off __P((struct ncr5380_softc *));
213
214 /* Shorthand bus space access */
215 #define SWREG_READ(sc, index) \
216 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
217 #define SWREG_WRITE(sc, index, v) \
218 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
219
220
221 /* The Sun "SCSI Weird" 4/100 obio controller. */
222 CFATTACH_DECL(sw, sizeof(struct sw_softc),
223 sw_match, sw_attach, NULL, NULL);
224
225 static int
226 sw_match(parent, cf, aux)
227 struct device *parent;
228 struct cfdata *cf;
229 void *aux;
230 {
231 union obio_attach_args *uoba = aux;
232 struct obio4_attach_args *oba;
233
234 /* Nothing but a Sun 4/100 is going to have these devices. */
235 if (cpuinfo.cpu_type != CPUTYP_4_100)
236 return (0);
237
238 if (uoba->uoba_isobio4 == 0)
239 return (0);
240
241 /* Make sure there is something there... */
242 oba = &uoba->uoba_oba4;
243 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
244 1, /* probe size */
245 1, /* offset */
246 0, /* flags */
247 NULL, NULL));
248 }
249
250 static void
251 sw_attach(parent, self, aux)
252 struct device *parent, *self;
253 void *aux;
254 {
255 struct sw_softc *sc = (struct sw_softc *) self;
256 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
257 union obio_attach_args *uoba = aux;
258 struct obio4_attach_args *oba = &uoba->uoba_oba4;
259 bus_space_handle_t bh;
260 char bits[64];
261 int i;
262
263 sc->sc_dmatag = oba->oba_dmatag;
264
265 /* Map the controller registers. */
266 if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
267 SWREG_BANK_SZ,
268 BUS_SPACE_MAP_LINEAR,
269 &bh) != 0) {
270 printf("%s: cannot map registers\n", self->dv_xname);
271 return;
272 }
273
274 ncr_sc->sc_regt = oba->oba_bustag;
275 ncr_sc->sc_regh = bh;
276
277 sc->sc_options = sw_options;
278
279 ncr_sc->sc_dma_setup = sw_dma_setup;
280 ncr_sc->sc_dma_start = sw_dma_start;
281 ncr_sc->sc_dma_eop = sw_dma_stop;
282 ncr_sc->sc_dma_stop = sw_dma_stop;
283 ncr_sc->sc_intr_on = sw_intr_on;
284 ncr_sc->sc_intr_off = sw_intr_off;
285
286 /*
287 * Establish interrupt channel.
288 * Default interrupt priority always is 3. At least, that's
289 * what my board seems to be at. --thorpej
290 */
291 if (oba->oba_pri == -1)
292 oba->oba_pri = 3;
293
294 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO, 0,
295 sw_intr, sc);
296
297 printf(" pri %d\n", oba->oba_pri);
298
299
300 /*
301 * Pull in the options flags. Allow the user to completely
302 * override the default values.
303 */
304 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK) != 0)
305 sc->sc_options =
306 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK);
307
308 /*
309 * Initialize fields used by the MI code
310 */
311
312 /* NCR5380 register bank offsets */
313 ncr_sc->sci_r0 = 0;
314 ncr_sc->sci_r1 = 1;
315 ncr_sc->sci_r2 = 2;
316 ncr_sc->sci_r3 = 3;
317 ncr_sc->sci_r4 = 4;
318 ncr_sc->sci_r5 = 5;
319 ncr_sc->sci_r6 = 6;
320 ncr_sc->sci_r7 = 7;
321
322 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
323
324 /*
325 * MD function pointers used by the MI code.
326 */
327 ncr_sc->sc_pio_out = ncr5380_pio_out;
328 ncr_sc->sc_pio_in = ncr5380_pio_in;
329 ncr_sc->sc_dma_alloc = sw_dma_alloc;
330 ncr_sc->sc_dma_free = sw_dma_free;
331 ncr_sc->sc_dma_poll = sw_dma_poll;
332
333 ncr_sc->sc_flags = 0;
334 if ((sc->sc_options & SW_DO_RESELECT) == 0)
335 ncr_sc->sc_no_disconnect = 0xFF;
336 if ((sc->sc_options & SW_DMA_INTR) == 0)
337 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
338 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
339
340
341 /*
342 * Allocate DMA handles.
343 */
344 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
345 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
346 if (sc->sc_dma == NULL)
347 panic("sw: dma handle malloc failed");
348
349 for (i = 0; i < SCI_OPENINGS; i++) {
350 sc->sc_dma[i].dh_flags = 0;
351
352 /* Allocate a DMA handle */
353 if (bus_dmamap_create(
354 sc->sc_dmatag, /* tag */
355 MAXPHYS, /* size */
356 1, /* nsegments */
357 MAXPHYS, /* maxsegsz */
358 0, /* boundary */
359 BUS_DMA_NOWAIT,
360 &sc->sc_dma[i].dh_dmamap) != 0) {
361
362 printf("%s: DMA buffer map create error\n",
363 ncr_sc->sc_dev.dv_xname);
364 return;
365 }
366 }
367
368 if (sc->sc_options) {
369 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
370 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
371 bits, sizeof(bits)));
372 }
373
374 ncr_sc->sc_channel.chan_id = 7;
375 ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
376
377 /* Initialize sw board */
378 sw_reset_adapter(ncr_sc);
379
380 /* Attach the ncr5380 chip driver */
381 ncr5380_attach(ncr_sc);
382 }
383
384 static void
385 sw_minphys(struct buf *bp)
386 {
387 if (bp->b_bcount > MAX_DMA_LEN) {
388 #ifdef DEBUG
389 if (sw_debug) {
390 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
391 Debugger();
392 }
393 #endif
394 bp->b_bcount = MAX_DMA_LEN;
395 }
396 minphys(bp);
397 }
398
399 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
400 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
401
402 static int
403 sw_intr(void *arg)
404 {
405 struct sw_softc *sc = arg;
406 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
407 int dma_error, claimed;
408 u_short csr;
409
410 claimed = 0;
411 dma_error = 0;
412
413 /* SBC interrupt? DMA interrupt? */
414 csr = SWREG_READ(ncr_sc, SWREG_CSR);
415
416 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
417
418 if (csr & SW_CSR_DMA_CONFLICT) {
419 dma_error |= SW_CSR_DMA_CONFLICT;
420 printf("sw_intr: DMA conflict\n");
421 }
422 if (csr & SW_CSR_DMA_BUS_ERR) {
423 dma_error |= SW_CSR_DMA_BUS_ERR;
424 printf("sw_intr: DMA bus error\n");
425 }
426 if (dma_error) {
427 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
428 sc->ncr_sc.sc_state |= NCR_ABORTING;
429 /* Make sure we will call the main isr. */
430 csr |= SW_CSR_DMA_IP;
431 }
432
433 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
434 claimed = ncr5380_intr(&sc->ncr_sc);
435 #ifdef DEBUG
436 if (!claimed) {
437 printf("sw_intr: spurious from SBC\n");
438 if (sw_debug & 4) {
439 Debugger(); /* XXX */
440 }
441 }
442 #endif
443 }
444
445 return (claimed);
446 }
447
448
449 static void
450 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
451 {
452
453 #ifdef DEBUG
454 if (sw_debug) {
455 printf("sw_reset_adapter\n");
456 }
457 #endif
458
459 /*
460 * The reset bits in the CSR are active low.
461 */
462 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
463 delay(10);
464 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
465
466 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
467 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
468 delay(10);
469 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
470
471 SCI_CLR_INTR(ncr_sc);
472 }
473
474
475 /*****************************************************************
476 * Common functions for DMA
477 ****************************************************************/
478
479 /*
480 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
481 * for DMA transfer. On the Sun4, this means mapping the buffer
482 * into DVMA space.
483 */
484 void
485 sw_dma_alloc(ncr_sc)
486 struct ncr5380_softc *ncr_sc;
487 {
488 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
489 struct sci_req *sr = ncr_sc->sc_current;
490 struct scsipi_xfer *xs = sr->sr_xs;
491 struct sw_dma_handle *dh;
492 int i, xlen;
493 u_long addr;
494
495 #ifdef DIAGNOSTIC
496 if (sr->sr_dma_hand != NULL)
497 panic("sw_dma_alloc: already have DMA handle");
498 #endif
499
500 #if 1 /* XXX - Temporary */
501 /* XXX - In case we think DMA is completely broken... */
502 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
503 return;
504 #endif
505
506 addr = (u_long) ncr_sc->sc_dataptr;
507 xlen = ncr_sc->sc_datalen;
508
509 /* If the DMA start addr is misaligned then do PIO */
510 if ((addr & 1) || (xlen & 1)) {
511 printf("sw_dma_alloc: misaligned.\n");
512 return;
513 }
514
515 /* Make sure our caller checked sc_min_dma_len. */
516 if (xlen < MIN_DMA_LEN)
517 panic("sw_dma_alloc: xlen=0x%x", xlen);
518
519 /* Find free DMA handle. Guaranteed to find one since we have
520 as many DMA handles as the driver has processes. */
521 for (i = 0; i < SCI_OPENINGS; i++) {
522 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
523 goto found;
524 }
525 panic("sw: no free DMA handles.");
526
527 found:
528 dh = &sc->sc_dma[i];
529 dh->dh_flags = SIDH_BUSY;
530 dh->dh_addr = (u_char *)addr;
531 dh->dh_maplen = xlen;
532
533 /* Copy the "write" flag for convenience. */
534 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
535 dh->dh_flags |= SIDH_OUT;
536
537 /*
538 * Double-map the buffer into DVMA space. If we can't re-map
539 * the buffer, we print a warning and fall back to PIO mode.
540 *
541 * NOTE: it is not safe to sleep here!
542 */
543 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
544 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
545 /* Can't remap segment */
546 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
547 addr, dh->dh_maplen);
548 dh->dh_flags = 0;
549 return;
550 }
551 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
552 (dh->dh_flags & SIDH_OUT)
553 ? BUS_DMASYNC_PREWRITE
554 : BUS_DMASYNC_PREREAD);
555
556 /* success */
557 sr->sr_dma_hand = dh;
558
559 return;
560 }
561
562
563 void
564 sw_dma_free(ncr_sc)
565 struct ncr5380_softc *ncr_sc;
566 {
567 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
568 struct sci_req *sr = ncr_sc->sc_current;
569 struct sw_dma_handle *dh = sr->sr_dma_hand;
570
571 #ifdef DIAGNOSTIC
572 if (dh == NULL)
573 panic("sw_dma_free: no DMA handle");
574 #endif
575
576 if (ncr_sc->sc_state & NCR_DOINGDMA)
577 panic("sw_dma_free: free while in progress");
578
579 if (dh->dh_flags & SIDH_BUSY) {
580 /* Give back the DVMA space. */
581 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
582 dh->dh_dvma, dh->dh_maplen,
583 (dh->dh_flags & SIDH_OUT)
584 ? BUS_DMASYNC_POSTWRITE
585 : BUS_DMASYNC_POSTREAD);
586 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
587 dh->dh_flags = 0;
588 }
589 sr->sr_dma_hand = NULL;
590 }
591
592
593 /*
594 * Poll (spin-wait) for DMA completion.
595 * Called right after xx_dma_start(), and
596 * xx_dma_stop() will be called next.
597 * Same for either VME or OBIO.
598 */
599 void
600 sw_dma_poll(ncr_sc)
601 struct ncr5380_softc *ncr_sc;
602 {
603 struct sci_req *sr = ncr_sc->sc_current;
604 int tmo, csr_mask, csr;
605
606 /* Make sure DMA started successfully. */
607 if (ncr_sc->sc_state & NCR_ABORTING)
608 return;
609
610 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
611 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
612
613 tmo = 50000; /* X100 = 5 sec. */
614 for (;;) {
615 csr = SWREG_READ(ncr_sc, SWREG_CSR);
616 if (csr & csr_mask)
617 break;
618 if (--tmo <= 0) {
619 printf("%s: DMA timeout (while polling)\n",
620 ncr_sc->sc_dev.dv_xname);
621 /* Indicate timeout as MI code would. */
622 sr->sr_flags |= SR_OVERDUE;
623 break;
624 }
625 delay(100);
626 }
627
628 #ifdef DEBUG
629 if (sw_debug) {
630 printf("sw_dma_poll: done, csr=0x%x\n", csr);
631 }
632 #endif
633 }
634
635
636 /*
637 * This is called when the bus is going idle,
638 * so we want to enable the SBC interrupts.
639 * That is controlled by the DMA enable!
640 * Who would have guessed!
641 * What a NASTY trick!
642 *
643 * XXX THIS MIGHT NOT WORK RIGHT!
644 */
645 void
646 sw_intr_on(ncr_sc)
647 struct ncr5380_softc *ncr_sc;
648 {
649 u_int32_t csr;
650
651 sw_dma_setup(ncr_sc);
652 csr = SWREG_READ(ncr_sc, SWREG_CSR);
653 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
654 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
655 }
656
657 /*
658 * This is called when the bus is idle and we are
659 * about to start playing with the SBC chip.
660 *
661 * XXX THIS MIGHT NOT WORK RIGHT!
662 */
663 void
664 sw_intr_off(ncr_sc)
665 struct ncr5380_softc *ncr_sc;
666 {
667 u_int32_t csr;
668
669 csr = SWREG_READ(ncr_sc, SWREG_CSR);
670 csr &= ~SW_CSR_DMA_EN;
671 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
672 }
673
674
675 /*
676 * This function is called during the COMMAND or MSG_IN phase
677 * that precedes a DATA_IN or DATA_OUT phase, in case we need
678 * to setup the DMA engine before the bus enters a DATA phase.
679 *
680 * On the OBIO version we just clear the DMA count and address
681 * here (to make sure it stays idle) and do the real setup
682 * later, in dma_start.
683 */
684 void
685 sw_dma_setup(ncr_sc)
686 struct ncr5380_softc *ncr_sc;
687 {
688 u_int32_t csr;
689
690 /* No FIFO to reset on "sw". */
691
692 /* Set direction (assume recv here) */
693 csr = SWREG_READ(ncr_sc, SWREG_CSR);
694 csr &= ~SW_CSR_SEND;
695 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
696
697 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
698 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
699 }
700
701
702 void
703 sw_dma_start(ncr_sc)
704 struct ncr5380_softc *ncr_sc;
705 {
706 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
707 struct sci_req *sr = ncr_sc->sc_current;
708 struct sw_dma_handle *dh = sr->sr_dma_hand;
709 u_long dva;
710 int xlen, adj, adjlen;
711 u_int mode;
712 u_int32_t csr;
713
714 /*
715 * Get the DVMA mapping for this segment.
716 */
717 dva = (u_long)(dh->dh_dvma);
718 if (dva & 1)
719 panic("sw_dma_start: bad dva=0x%lx", dva);
720
721 xlen = ncr_sc->sc_datalen;
722 xlen &= ~1;
723 sc->sc_xlen = xlen; /* XXX: or less... */
724
725 #ifdef DEBUG
726 if (sw_debug & 2) {
727 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n",
728 dh, dva, xlen);
729 }
730 #endif
731
732 /*
733 * Set up the DMA controller.
734 * Note that (dh->dh_len < sc_datalen)
735 */
736
737 /* Set direction (send/recv) */
738 csr = SWREG_READ(ncr_sc, SWREG_CSR);
739 if (dh->dh_flags & SIDH_OUT) {
740 csr |= SW_CSR_SEND;
741 } else {
742 csr &= ~SW_CSR_SEND;
743 }
744 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
745
746 /*
747 * The "sw" needs longword aligned transfers. We
748 * detect a shortword aligned transfer here, and adjust the
749 * DMA transfer by 2 bytes. These two bytes are read/written
750 * in PIO mode just before the DMA is started.
751 */
752 adj = 0;
753 if (dva & 2) {
754 adj = 2;
755 #ifdef DEBUG
756 if (sw_debug & 2)
757 printf("sw_dma_start: adjusted up %d bytes\n", adj);
758 #endif
759 }
760
761 /* We have to frob the address on the "sw". */
762 dh->dh_startingpa = (dva | 0xF00000);
763 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
764 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
765
766 /*
767 * Acknowledge the phase change. (After DMA setup!)
768 * Put the SBIC into DMA mode, and start the transfer.
769 */
770 if (dh->dh_flags & SIDH_OUT) {
771 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
772 if (adj) {
773 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
774 adj, dh->dh_addr);
775 if (adjlen != adj)
776 printf("%s: bad outgoing adj, %d != %d\n",
777 ncr_sc->sc_dev.dv_xname, adjlen, adj);
778 }
779 SCI_CLR_INTR(ncr_sc);
780 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
781 mode = NCR5380_READ(ncr_sc, sci_mode);
782 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
783 NCR5380_WRITE(ncr_sc, sci_mode, mode);
784 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
785 } else {
786 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
787 if (adj) {
788 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
789 adj, dh->dh_addr);
790 if (adjlen != adj)
791 printf("%s: bad incoming adj, %d != %d\n",
792 ncr_sc->sc_dev.dv_xname, adjlen, adj);
793 }
794 SCI_CLR_INTR(ncr_sc);
795 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
796 mode = NCR5380_READ(ncr_sc, sci_mode);
797 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
798 NCR5380_WRITE(ncr_sc, sci_mode, mode);
799 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
800 }
801
802 /* Let'er rip! */
803 csr |= SW_CSR_DMA_EN;
804 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
805
806 ncr_sc->sc_state |= NCR_DOINGDMA;
807
808 #ifdef DEBUG
809 if (sw_debug & 2) {
810 printf("sw_dma_start: started, flags=0x%x\n",
811 ncr_sc->sc_state);
812 }
813 #endif
814 }
815
816
817 void
818 sw_dma_eop(ncr_sc)
819 struct ncr5380_softc *ncr_sc;
820 {
821
822 /* Not needed - DMA was stopped prior to examining sci_csr */
823 }
824
825 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
826 #define COUNT_SW_LEFTOVERS
827 #endif
828 #ifdef COUNT_SW_LEFTOVERS
829 /*
830 * Let's find out how often these occur. Read these with DDB from time
831 * to time.
832 */
833 int sw_3_leftover = 0;
834 int sw_2_leftover = 0;
835 int sw_1_leftover = 0;
836 int sw_0_leftover = 0;
837 #endif
838
839 void
840 sw_dma_stop(ncr_sc)
841 struct ncr5380_softc *ncr_sc;
842 {
843 struct sci_req *sr = ncr_sc->sc_current;
844 struct sw_dma_handle *dh = sr->sr_dma_hand;
845 int ntrans = 0, dva;
846 u_int mode;
847 u_int32_t csr;
848
849 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
850 #ifdef DEBUG
851 printf("sw_dma_stop: dma not running\n");
852 #endif
853 return;
854 }
855 ncr_sc->sc_state &= ~NCR_DOINGDMA;
856
857 /* First, halt the DMA engine. */
858 csr = SWREG_READ(ncr_sc, SWREG_CSR);
859 csr &= ~SW_CSR_DMA_EN;
860 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
861
862 /*
863 * XXX HARDWARE BUG!
864 * Apparently, some early 4/100 SCSI controllers had a hardware
865 * bug that caused the controller to do illegal memory access.
866 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
867 * this, we simply need to clean up after ourselves ... there will
868 * be as many as 3 bytes left over. Since we clean up "left-over"
869 * bytes on every read anyway, we just continue to chug along
870 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
871 * around in hardware later with the "left-over byte" indicator
872 * in the VME controller.)
873 */
874 #if 0
875 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) {
876 #else
877 if (csr & (SW_CSR_DMA_CONFLICT)) {
878 #endif
879 printf("sw: DMA error, csr=0x%x, reset\n", csr);
880 sr->sr_xs->error = XS_DRIVER_STUFFUP;
881 ncr_sc->sc_state |= NCR_ABORTING;
882 sw_reset_adapter(ncr_sc);
883 }
884
885 /* Note that timeout may have set the error flag. */
886 if (ncr_sc->sc_state & NCR_ABORTING)
887 goto out;
888
889 /*
890 * Now try to figure out how much actually transferred
891 *
892 * The "sw" doesn't have a FIFO or a bcr, so we've stored
893 * the starting PA of the transfer in the DMA handle,
894 * and subtract it from the ending PA left in the dma_addr
895 * register.
896 */
897 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
898 ntrans = (dva - dh->dh_startingpa);
899
900 #ifdef DEBUG
901 if (sw_debug & 2) {
902 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
903 }
904 #endif
905
906 if (ntrans > ncr_sc->sc_datalen)
907 panic("sw_dma_stop: excess transfer");
908
909 /* Adjust data pointer */
910 ncr_sc->sc_dataptr += ntrans;
911 ncr_sc->sc_datalen -= ntrans;
912
913 /*
914 * After a read, we may need to clean-up
915 * "Left-over bytes" (yuck!) The "sw" doesn't
916 * have a "left-over" indicator, so we have to so
917 * this no matter what. Ick.
918 */
919 if ((dh->dh_flags & SIDH_OUT) == 0) {
920 char *cp = ncr_sc->sc_dataptr;
921 u_int32_t bpr;
922
923 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
924
925 switch (dva & 3) {
926 case 3:
927 cp[0] = (bpr & 0xff000000) >> 24;
928 cp[1] = (bpr & 0x00ff0000) >> 16;
929 cp[2] = (bpr & 0x0000ff00) >> 8;
930 #ifdef COUNT_SW_LEFTOVERS
931 ++sw_3_leftover;
932 #endif
933 break;
934
935 case 2:
936 cp[0] = (bpr & 0xff000000) >> 24;
937 cp[1] = (bpr & 0x00ff0000) >> 16;
938 #ifdef COUNT_SW_LEFTOVERS
939 ++sw_2_leftover;
940 #endif
941 break;
942
943 case 1:
944 cp[0] = (bpr & 0xff000000) >> 24;
945 #ifdef COUNT_SW_LEFTOVERS
946 ++sw_1_leftover;
947 #endif
948 break;
949
950 #ifdef COUNT_SW_LEFTOVERS
951 default:
952 ++sw_0_leftover;
953 break;
954 #endif
955 }
956 }
957
958 out:
959 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
960 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
961
962 /* Put SBIC back in PIO mode. */
963 mode = NCR5380_READ(ncr_sc, sci_mode);
964 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
965 NCR5380_WRITE(ncr_sc, sci_mode, mode);
966 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
967
968 #ifdef DEBUG
969 if (sw_debug & 2) {
970 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
971 }
972 #endif
973 }
974