sw.c revision 1.7 1 /* $NetBSD: sw.c,v 1.7 2002/09/27 15:36:46 provos Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains only the machine-dependent parts of the
41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
42 * The machine-independent parts are in ncr5380sbc.c
43 *
44 * Supported hardware includes:
45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47 *
48 * The VME variant has a bit to enable or disable the DMA engine,
49 * but that bit also gates the interrupt line from the NCR5380!
50 * Therefore, in order to get any interrupt from the 5380, (i.e.
51 * for reselect) one must clear the DMA engine transfer count and
52 * then enable DMA. This has the further complication that you
53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54 * we have to turn DMA back off before we even look at the 5380.
55 *
56 * What wonderfully whacky hardware this is!
57 *
58 * David Jones wrote the initial version of this module for NetBSD/sun3,
59 * which included support for the VME adapter only. (no reselection).
60 *
61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62 * both the VME and OBIO code to support disconnect/reselect.
63 * (Required figuring out the hardware "features" noted above.)
64 *
65 * The autoconfiguration boilerplate came from Adam Glass.
66 *
67 * Jason R. Thorpe ported the autoconfiguration and VME portions to
68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
71 * and Chris Torek for bits of insight needed along the way. Thanks to
72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73 * for the sake of testing. Andrew Gillham helped work out the bugs
74 * the 4/100 DMA code.
75 */
76
77 /*
78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
79 * works, but interrupts (and, thus, reselection) don't. I don't know
80 * why, and I don't have a machine to test this on further.
81 *
82 * DMA, DMA completion interrupts, and reselection work fine on my
83 * 4/260 with modern SCSI-II disks attached. I've had reports of
84 * reselection failing on Sun Shoebox-type configurations where
85 * there are multiple non-SCSI devices behind Emulex or Adaptec
86 * bridges. These devices pre-date the SCSI-I spec, and might not
87 * bahve the way the 5380 code expects. For this reason, only
88 * DMA is enabled by default in this driver.
89 *
90 * Jason R. Thorpe <thorpej (at) NetBSD.ORG>
91 * December 8, 1995
92 */
93
94 #include "opt_ddb.h"
95
96 #include <sys/types.h>
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/malloc.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/buf.h>
104
105 #include <machine/bus.h>
106 #include <machine/intr.h>
107 #include <machine/autoconf.h>
108
109 #include <dev/scsipi/scsi_all.h>
110 #include <dev/scsipi/scsipi_all.h>
111 #include <dev/scsipi/scsipi_debug.h>
112 #include <dev/scsipi/scsiconf.h>
113
114 #ifndef DDB
115 #define Debugger()
116 #endif
117
118 #ifndef DEBUG
119 #define DEBUG XXX
120 #endif
121
122 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
123
124 #include <dev/ic/ncr5380reg.h>
125 #include <dev/ic/ncr5380var.h>
126
127 #include <sparc/dev/swreg.h>
128
129 /*
130 * Transfers smaller than this are done using PIO
131 * (on assumption they're not worth DMA overhead)
132 */
133 #define MIN_DMA_LEN 128
134
135 /*
136 * Transfers lager than 65535 bytes need to be split-up.
137 * (Some of the FIFO logic has only 16 bits counters.)
138 * Make the size an integer multiple of the page size
139 * to avoid buf/cluster remap problems. (paranoid?)
140 */
141 #define MAX_DMA_LEN 0xE000
142
143 #ifdef DEBUG
144 int sw_debug = 0;
145 #endif
146
147 /*
148 * This structure is used to keep track of mapped DMA requests.
149 */
150 struct sw_dma_handle {
151 int dh_flags;
152 #define SIDH_BUSY 0x01 /* This DH is in use */
153 #define SIDH_OUT 0x02 /* DMA does data out (write) */
154 u_char *dh_addr; /* KVA of start of buffer */
155 int dh_maplen; /* Original data length */
156 long dh_startingpa; /* PA of buffer; for "sw" */
157 bus_dmamap_t dh_dmamap;
158 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
159 };
160
161 /*
162 * The first structure member has to be the ncr5380_softc
163 * so we can just cast to go back and fourth between them.
164 */
165 struct sw_softc {
166 struct ncr5380_softc ncr_sc;
167 bus_space_tag_t sc_bustag; /* bus tags */
168 bus_dma_tag_t sc_dmatag;
169
170 struct sw_dma_handle *sc_dma;
171 int sc_xlen; /* length of current DMA segment. */
172 int sc_options; /* options for this instance. */
173 };
174
175 /*
176 * Options. By default, DMA is enabled and DMA completion interrupts
177 * and reselect are disabled. You may enable additional features
178 * the `flags' directive in your kernel's configuration file.
179 *
180 * Alternatively, you can patch your kernel with DDB or some other
181 * mechanism. The sc_options member of the softc is OR'd with
182 * the value in sw_options.
183 *
184 * On the "sw", interrupts (and thus) reselection don't work, so they're
185 * disabled by default. DMA is still a little dangerous, too.
186 *
187 * Note, there's a separate sw_options to make life easier.
188 */
189 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
190 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
191 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
192 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
193 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
194 int sw_options = SW_ENABLE_DMA;
195
196 static int sw_match __P((struct device *, struct cfdata *, void *));
197 static void sw_attach __P((struct device *, struct device *, void *));
198 static int sw_intr __P((void *));
199 static void sw_reset_adapter __P((struct ncr5380_softc *));
200 static void sw_minphys __P((struct buf *));
201
202 void sw_dma_alloc __P((struct ncr5380_softc *));
203 void sw_dma_free __P((struct ncr5380_softc *));
204 void sw_dma_poll __P((struct ncr5380_softc *));
205
206 void sw_dma_setup __P((struct ncr5380_softc *));
207 void sw_dma_start __P((struct ncr5380_softc *));
208 void sw_dma_eop __P((struct ncr5380_softc *));
209 void sw_dma_stop __P((struct ncr5380_softc *));
210
211 void sw_intr_on __P((struct ncr5380_softc *));
212 void sw_intr_off __P((struct ncr5380_softc *));
213
214 /* Shorthand bus space access */
215 #define SWREG_READ(sc, index) \
216 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
217 #define SWREG_WRITE(sc, index, v) \
218 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
219
220
221 /* The Sun "SCSI Weird" 4/100 obio controller. */
222 struct cfattach sw_ca = {
223 sizeof(struct sw_softc), sw_match, sw_attach
224 };
225
226 static int
227 sw_match(parent, cf, aux)
228 struct device *parent;
229 struct cfdata *cf;
230 void *aux;
231 {
232 union obio_attach_args *uoba = aux;
233 struct obio4_attach_args *oba;
234
235 /* Nothing but a Sun 4/100 is going to have these devices. */
236 if (cpuinfo.cpu_type != CPUTYP_4_100)
237 return (0);
238
239 if (uoba->uoba_isobio4 == 0)
240 return (0);
241
242 /* Make sure there is something there... */
243 oba = &uoba->uoba_oba4;
244 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
245 1, /* probe size */
246 1, /* offset */
247 0, /* flags */
248 NULL, NULL));
249 }
250
251 static void
252 sw_attach(parent, self, aux)
253 struct device *parent, *self;
254 void *aux;
255 {
256 struct sw_softc *sc = (struct sw_softc *) self;
257 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
258 union obio_attach_args *uoba = aux;
259 struct obio4_attach_args *oba = &uoba->uoba_oba4;
260 bus_space_handle_t bh;
261 char bits[64];
262 int i;
263
264 sc->sc_dmatag = oba->oba_dmatag;
265
266 /* Map the controller registers. */
267 if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
268 SWREG_BANK_SZ,
269 BUS_SPACE_MAP_LINEAR,
270 &bh) != 0) {
271 printf("%s: cannot map registers\n", self->dv_xname);
272 return;
273 }
274
275 ncr_sc->sc_regt = oba->oba_bustag;
276 ncr_sc->sc_regh = bh;
277
278 sc->sc_options = sw_options;
279
280 ncr_sc->sc_dma_setup = sw_dma_setup;
281 ncr_sc->sc_dma_start = sw_dma_start;
282 ncr_sc->sc_dma_eop = sw_dma_stop;
283 ncr_sc->sc_dma_stop = sw_dma_stop;
284 ncr_sc->sc_intr_on = sw_intr_on;
285 ncr_sc->sc_intr_off = sw_intr_off;
286
287 /*
288 * Establish interrupt channel.
289 * Default interrupt priority always is 3. At least, that's
290 * what my board seems to be at. --thorpej
291 */
292 if (oba->oba_pri == -1)
293 oba->oba_pri = 3;
294
295 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO, 0,
296 sw_intr, sc);
297
298 printf(" pri %d\n", oba->oba_pri);
299
300
301 /*
302 * Pull in the options flags. Allow the user to completely
303 * override the default values.
304 */
305 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK) != 0)
306 sc->sc_options =
307 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK);
308
309 /*
310 * Initialize fields used by the MI code
311 */
312
313 /* NCR5380 register bank offsets */
314 ncr_sc->sci_r0 = 0;
315 ncr_sc->sci_r1 = 1;
316 ncr_sc->sci_r2 = 2;
317 ncr_sc->sci_r3 = 3;
318 ncr_sc->sci_r4 = 4;
319 ncr_sc->sci_r5 = 5;
320 ncr_sc->sci_r6 = 6;
321 ncr_sc->sci_r7 = 7;
322
323 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
324
325 /*
326 * MD function pointers used by the MI code.
327 */
328 ncr_sc->sc_pio_out = ncr5380_pio_out;
329 ncr_sc->sc_pio_in = ncr5380_pio_in;
330 ncr_sc->sc_dma_alloc = sw_dma_alloc;
331 ncr_sc->sc_dma_free = sw_dma_free;
332 ncr_sc->sc_dma_poll = sw_dma_poll;
333
334 ncr_sc->sc_flags = 0;
335 if ((sc->sc_options & SW_DO_RESELECT) == 0)
336 ncr_sc->sc_no_disconnect = 0xFF;
337 if ((sc->sc_options & SW_DMA_INTR) == 0)
338 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
339 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
340
341
342 /*
343 * Allocate DMA handles.
344 */
345 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
346 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
347 if (sc->sc_dma == NULL)
348 panic("sw: dma handle malloc failed");
349
350 for (i = 0; i < SCI_OPENINGS; i++) {
351 sc->sc_dma[i].dh_flags = 0;
352
353 /* Allocate a DMA handle */
354 if (bus_dmamap_create(
355 sc->sc_dmatag, /* tag */
356 MAXPHYS, /* size */
357 1, /* nsegments */
358 MAXPHYS, /* maxsegsz */
359 0, /* boundary */
360 BUS_DMA_NOWAIT,
361 &sc->sc_dma[i].dh_dmamap) != 0) {
362
363 printf("%s: DMA buffer map create error\n",
364 ncr_sc->sc_dev.dv_xname);
365 return;
366 }
367 }
368
369 if (sc->sc_options) {
370 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
371 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
372 bits, sizeof(bits)));
373 }
374
375 ncr_sc->sc_channel.chan_id = 7;
376 ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
377
378 /* Initialize sw board */
379 sw_reset_adapter(ncr_sc);
380
381 /* Attach the ncr5380 chip driver */
382 ncr5380_attach(ncr_sc);
383 }
384
385 static void
386 sw_minphys(struct buf *bp)
387 {
388 if (bp->b_bcount > MAX_DMA_LEN) {
389 #ifdef DEBUG
390 if (sw_debug) {
391 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
392 Debugger();
393 }
394 #endif
395 bp->b_bcount = MAX_DMA_LEN;
396 }
397 minphys(bp);
398 }
399
400 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
401 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
402
403 static int
404 sw_intr(void *arg)
405 {
406 struct sw_softc *sc = arg;
407 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
408 int dma_error, claimed;
409 u_short csr;
410
411 claimed = 0;
412 dma_error = 0;
413
414 /* SBC interrupt? DMA interrupt? */
415 csr = SWREG_READ(ncr_sc, SWREG_CSR);
416
417 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
418
419 if (csr & SW_CSR_DMA_CONFLICT) {
420 dma_error |= SW_CSR_DMA_CONFLICT;
421 printf("sw_intr: DMA conflict\n");
422 }
423 if (csr & SW_CSR_DMA_BUS_ERR) {
424 dma_error |= SW_CSR_DMA_BUS_ERR;
425 printf("sw_intr: DMA bus error\n");
426 }
427 if (dma_error) {
428 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
429 sc->ncr_sc.sc_state |= NCR_ABORTING;
430 /* Make sure we will call the main isr. */
431 csr |= SW_CSR_DMA_IP;
432 }
433
434 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
435 claimed = ncr5380_intr(&sc->ncr_sc);
436 #ifdef DEBUG
437 if (!claimed) {
438 printf("sw_intr: spurious from SBC\n");
439 if (sw_debug & 4) {
440 Debugger(); /* XXX */
441 }
442 }
443 #endif
444 }
445
446 return (claimed);
447 }
448
449
450 static void
451 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
452 {
453
454 #ifdef DEBUG
455 if (sw_debug) {
456 printf("sw_reset_adapter\n");
457 }
458 #endif
459
460 /*
461 * The reset bits in the CSR are active low.
462 */
463 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
464 delay(10);
465 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
466
467 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
468 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
469 delay(10);
470 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
471
472 SCI_CLR_INTR(ncr_sc);
473 }
474
475
476 /*****************************************************************
477 * Common functions for DMA
478 ****************************************************************/
479
480 /*
481 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
482 * for DMA transfer. On the Sun4, this means mapping the buffer
483 * into DVMA space.
484 */
485 void
486 sw_dma_alloc(ncr_sc)
487 struct ncr5380_softc *ncr_sc;
488 {
489 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
490 struct sci_req *sr = ncr_sc->sc_current;
491 struct scsipi_xfer *xs = sr->sr_xs;
492 struct sw_dma_handle *dh;
493 int i, xlen;
494 u_long addr;
495
496 #ifdef DIAGNOSTIC
497 if (sr->sr_dma_hand != NULL)
498 panic("sw_dma_alloc: already have DMA handle");
499 #endif
500
501 #if 1 /* XXX - Temporary */
502 /* XXX - In case we think DMA is completely broken... */
503 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
504 return;
505 #endif
506
507 addr = (u_long) ncr_sc->sc_dataptr;
508 xlen = ncr_sc->sc_datalen;
509
510 /* If the DMA start addr is misaligned then do PIO */
511 if ((addr & 1) || (xlen & 1)) {
512 printf("sw_dma_alloc: misaligned.\n");
513 return;
514 }
515
516 /* Make sure our caller checked sc_min_dma_len. */
517 if (xlen < MIN_DMA_LEN)
518 panic("sw_dma_alloc: xlen=0x%x", xlen);
519
520 /* Find free DMA handle. Guaranteed to find one since we have
521 as many DMA handles as the driver has processes. */
522 for (i = 0; i < SCI_OPENINGS; i++) {
523 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
524 goto found;
525 }
526 panic("sw: no free DMA handles.");
527
528 found:
529 dh = &sc->sc_dma[i];
530 dh->dh_flags = SIDH_BUSY;
531 dh->dh_addr = (u_char *)addr;
532 dh->dh_maplen = xlen;
533
534 /* Copy the "write" flag for convenience. */
535 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
536 dh->dh_flags |= SIDH_OUT;
537
538 /*
539 * Double-map the buffer into DVMA space. If we can't re-map
540 * the buffer, we print a warning and fall back to PIO mode.
541 *
542 * NOTE: it is not safe to sleep here!
543 */
544 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
545 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
546 /* Can't remap segment */
547 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
548 addr, dh->dh_maplen);
549 dh->dh_flags = 0;
550 return;
551 }
552 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
553 (dh->dh_flags & SIDH_OUT)
554 ? BUS_DMASYNC_PREWRITE
555 : BUS_DMASYNC_PREREAD);
556
557 /* success */
558 sr->sr_dma_hand = dh;
559
560 return;
561 }
562
563
564 void
565 sw_dma_free(ncr_sc)
566 struct ncr5380_softc *ncr_sc;
567 {
568 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
569 struct sci_req *sr = ncr_sc->sc_current;
570 struct sw_dma_handle *dh = sr->sr_dma_hand;
571
572 #ifdef DIAGNOSTIC
573 if (dh == NULL)
574 panic("sw_dma_free: no DMA handle");
575 #endif
576
577 if (ncr_sc->sc_state & NCR_DOINGDMA)
578 panic("sw_dma_free: free while in progress");
579
580 if (dh->dh_flags & SIDH_BUSY) {
581 /* Give back the DVMA space. */
582 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
583 dh->dh_dvma, dh->dh_maplen,
584 (dh->dh_flags & SIDH_OUT)
585 ? BUS_DMASYNC_POSTWRITE
586 : BUS_DMASYNC_POSTREAD);
587 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
588 dh->dh_flags = 0;
589 }
590 sr->sr_dma_hand = NULL;
591 }
592
593
594 /*
595 * Poll (spin-wait) for DMA completion.
596 * Called right after xx_dma_start(), and
597 * xx_dma_stop() will be called next.
598 * Same for either VME or OBIO.
599 */
600 void
601 sw_dma_poll(ncr_sc)
602 struct ncr5380_softc *ncr_sc;
603 {
604 struct sci_req *sr = ncr_sc->sc_current;
605 int tmo, csr_mask, csr;
606
607 /* Make sure DMA started successfully. */
608 if (ncr_sc->sc_state & NCR_ABORTING)
609 return;
610
611 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
612 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
613
614 tmo = 50000; /* X100 = 5 sec. */
615 for (;;) {
616 csr = SWREG_READ(ncr_sc, SWREG_CSR);
617 if (csr & csr_mask)
618 break;
619 if (--tmo <= 0) {
620 printf("%s: DMA timeout (while polling)\n",
621 ncr_sc->sc_dev.dv_xname);
622 /* Indicate timeout as MI code would. */
623 sr->sr_flags |= SR_OVERDUE;
624 break;
625 }
626 delay(100);
627 }
628
629 #ifdef DEBUG
630 if (sw_debug) {
631 printf("sw_dma_poll: done, csr=0x%x\n", csr);
632 }
633 #endif
634 }
635
636
637 /*
638 * This is called when the bus is going idle,
639 * so we want to enable the SBC interrupts.
640 * That is controlled by the DMA enable!
641 * Who would have guessed!
642 * What a NASTY trick!
643 *
644 * XXX THIS MIGHT NOT WORK RIGHT!
645 */
646 void
647 sw_intr_on(ncr_sc)
648 struct ncr5380_softc *ncr_sc;
649 {
650 u_int32_t csr;
651
652 sw_dma_setup(ncr_sc);
653 csr = SWREG_READ(ncr_sc, SWREG_CSR);
654 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
655 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
656 }
657
658 /*
659 * This is called when the bus is idle and we are
660 * about to start playing with the SBC chip.
661 *
662 * XXX THIS MIGHT NOT WORK RIGHT!
663 */
664 void
665 sw_intr_off(ncr_sc)
666 struct ncr5380_softc *ncr_sc;
667 {
668 u_int32_t csr;
669
670 csr = SWREG_READ(ncr_sc, SWREG_CSR);
671 csr &= ~SW_CSR_DMA_EN;
672 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
673 }
674
675
676 /*
677 * This function is called during the COMMAND or MSG_IN phase
678 * that precedes a DATA_IN or DATA_OUT phase, in case we need
679 * to setup the DMA engine before the bus enters a DATA phase.
680 *
681 * On the OBIO version we just clear the DMA count and address
682 * here (to make sure it stays idle) and do the real setup
683 * later, in dma_start.
684 */
685 void
686 sw_dma_setup(ncr_sc)
687 struct ncr5380_softc *ncr_sc;
688 {
689 u_int32_t csr;
690
691 /* No FIFO to reset on "sw". */
692
693 /* Set direction (assume recv here) */
694 csr = SWREG_READ(ncr_sc, SWREG_CSR);
695 csr &= ~SW_CSR_SEND;
696 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
697
698 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
699 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
700 }
701
702
703 void
704 sw_dma_start(ncr_sc)
705 struct ncr5380_softc *ncr_sc;
706 {
707 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
708 struct sci_req *sr = ncr_sc->sc_current;
709 struct sw_dma_handle *dh = sr->sr_dma_hand;
710 u_long dva;
711 int xlen, adj, adjlen;
712 u_int mode;
713 u_int32_t csr;
714
715 /*
716 * Get the DVMA mapping for this segment.
717 */
718 dva = (u_long)(dh->dh_dvma);
719 if (dva & 1)
720 panic("sw_dma_start: bad dva=0x%lx", dva);
721
722 xlen = ncr_sc->sc_datalen;
723 xlen &= ~1;
724 sc->sc_xlen = xlen; /* XXX: or less... */
725
726 #ifdef DEBUG
727 if (sw_debug & 2) {
728 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n",
729 dh, dva, xlen);
730 }
731 #endif
732
733 /*
734 * Set up the DMA controller.
735 * Note that (dh->dh_len < sc_datalen)
736 */
737
738 /* Set direction (send/recv) */
739 csr = SWREG_READ(ncr_sc, SWREG_CSR);
740 if (dh->dh_flags & SIDH_OUT) {
741 csr |= SW_CSR_SEND;
742 } else {
743 csr &= ~SW_CSR_SEND;
744 }
745 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
746
747 /*
748 * The "sw" needs longword aligned transfers. We
749 * detect a shortword aligned transfer here, and adjust the
750 * DMA transfer by 2 bytes. These two bytes are read/written
751 * in PIO mode just before the DMA is started.
752 */
753 adj = 0;
754 if (dva & 2) {
755 adj = 2;
756 #ifdef DEBUG
757 if (sw_debug & 2)
758 printf("sw_dma_start: adjusted up %d bytes\n", adj);
759 #endif
760 }
761
762 /* We have to frob the address on the "sw". */
763 dh->dh_startingpa = (dva | 0xF00000);
764 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
765 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
766
767 /*
768 * Acknowledge the phase change. (After DMA setup!)
769 * Put the SBIC into DMA mode, and start the transfer.
770 */
771 if (dh->dh_flags & SIDH_OUT) {
772 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
773 if (adj) {
774 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
775 adj, dh->dh_addr);
776 if (adjlen != adj)
777 printf("%s: bad outgoing adj, %d != %d\n",
778 ncr_sc->sc_dev.dv_xname, adjlen, adj);
779 }
780 SCI_CLR_INTR(ncr_sc);
781 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
782 mode = NCR5380_READ(ncr_sc, sci_mode);
783 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
784 NCR5380_WRITE(ncr_sc, sci_mode, mode);
785 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
786 } else {
787 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
788 if (adj) {
789 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
790 adj, dh->dh_addr);
791 if (adjlen != adj)
792 printf("%s: bad incoming adj, %d != %d\n",
793 ncr_sc->sc_dev.dv_xname, adjlen, adj);
794 }
795 SCI_CLR_INTR(ncr_sc);
796 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
797 mode = NCR5380_READ(ncr_sc, sci_mode);
798 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
799 NCR5380_WRITE(ncr_sc, sci_mode, mode);
800 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
801 }
802
803 /* Let'er rip! */
804 csr |= SW_CSR_DMA_EN;
805 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
806
807 ncr_sc->sc_state |= NCR_DOINGDMA;
808
809 #ifdef DEBUG
810 if (sw_debug & 2) {
811 printf("sw_dma_start: started, flags=0x%x\n",
812 ncr_sc->sc_state);
813 }
814 #endif
815 }
816
817
818 void
819 sw_dma_eop(ncr_sc)
820 struct ncr5380_softc *ncr_sc;
821 {
822
823 /* Not needed - DMA was stopped prior to examining sci_csr */
824 }
825
826 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
827 #define COUNT_SW_LEFTOVERS
828 #endif
829 #ifdef COUNT_SW_LEFTOVERS
830 /*
831 * Let's find out how often these occur. Read these with DDB from time
832 * to time.
833 */
834 int sw_3_leftover = 0;
835 int sw_2_leftover = 0;
836 int sw_1_leftover = 0;
837 int sw_0_leftover = 0;
838 #endif
839
840 void
841 sw_dma_stop(ncr_sc)
842 struct ncr5380_softc *ncr_sc;
843 {
844 struct sci_req *sr = ncr_sc->sc_current;
845 struct sw_dma_handle *dh = sr->sr_dma_hand;
846 int ntrans = 0, dva;
847 u_int mode;
848 u_int32_t csr;
849
850 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
851 #ifdef DEBUG
852 printf("sw_dma_stop: dma not running\n");
853 #endif
854 return;
855 }
856 ncr_sc->sc_state &= ~NCR_DOINGDMA;
857
858 /* First, halt the DMA engine. */
859 csr = SWREG_READ(ncr_sc, SWREG_CSR);
860 csr &= ~SW_CSR_DMA_EN;
861 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
862
863 /*
864 * XXX HARDWARE BUG!
865 * Apparently, some early 4/100 SCSI controllers had a hardware
866 * bug that caused the controller to do illegal memory access.
867 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
868 * this, we simply need to clean up after ourselves ... there will
869 * be as many as 3 bytes left over. Since we clean up "left-over"
870 * bytes on every read anyway, we just continue to chug along
871 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
872 * around in hardware later with the "left-over byte" indicator
873 * in the VME controller.)
874 */
875 #if 0
876 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) {
877 #else
878 if (csr & (SW_CSR_DMA_CONFLICT)) {
879 #endif
880 printf("sw: DMA error, csr=0x%x, reset\n", csr);
881 sr->sr_xs->error = XS_DRIVER_STUFFUP;
882 ncr_sc->sc_state |= NCR_ABORTING;
883 sw_reset_adapter(ncr_sc);
884 }
885
886 /* Note that timeout may have set the error flag. */
887 if (ncr_sc->sc_state & NCR_ABORTING)
888 goto out;
889
890 /*
891 * Now try to figure out how much actually transferred
892 *
893 * The "sw" doesn't have a FIFO or a bcr, so we've stored
894 * the starting PA of the transfer in the DMA handle,
895 * and subtract it from the ending PA left in the dma_addr
896 * register.
897 */
898 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
899 ntrans = (dva - dh->dh_startingpa);
900
901 #ifdef DEBUG
902 if (sw_debug & 2) {
903 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
904 }
905 #endif
906
907 if (ntrans > ncr_sc->sc_datalen)
908 panic("sw_dma_stop: excess transfer");
909
910 /* Adjust data pointer */
911 ncr_sc->sc_dataptr += ntrans;
912 ncr_sc->sc_datalen -= ntrans;
913
914 /*
915 * After a read, we may need to clean-up
916 * "Left-over bytes" (yuck!) The "sw" doesn't
917 * have a "left-over" indicator, so we have to so
918 * this no matter what. Ick.
919 */
920 if ((dh->dh_flags & SIDH_OUT) == 0) {
921 char *cp = ncr_sc->sc_dataptr;
922 u_int32_t bpr;
923
924 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
925
926 switch (dva & 3) {
927 case 3:
928 cp[0] = (bpr & 0xff000000) >> 24;
929 cp[1] = (bpr & 0x00ff0000) >> 16;
930 cp[2] = (bpr & 0x0000ff00) >> 8;
931 #ifdef COUNT_SW_LEFTOVERS
932 ++sw_3_leftover;
933 #endif
934 break;
935
936 case 2:
937 cp[0] = (bpr & 0xff000000) >> 24;
938 cp[1] = (bpr & 0x00ff0000) >> 16;
939 #ifdef COUNT_SW_LEFTOVERS
940 ++sw_2_leftover;
941 #endif
942 break;
943
944 case 1:
945 cp[0] = (bpr & 0xff000000) >> 24;
946 #ifdef COUNT_SW_LEFTOVERS
947 ++sw_1_leftover;
948 #endif
949 break;
950
951 #ifdef COUNT_SW_LEFTOVERS
952 default:
953 ++sw_0_leftover;
954 break;
955 #endif
956 }
957 }
958
959 out:
960 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
961 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
962
963 /* Put SBIC back in PIO mode. */
964 mode = NCR5380_READ(ncr_sc, sci_mode);
965 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
966 NCR5380_WRITE(ncr_sc, sci_mode, mode);
967 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
968
969 #ifdef DEBUG
970 if (sw_debug & 2) {
971 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
972 }
973 #endif
974 }
975