sw.c revision 1.4 1 /* $NetBSD: sw.c,v 1.4 2001/04/25 17:53:22 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains only the machine-dependent parts of the
41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
42 * The machine-independent parts are in ncr5380sbc.c
43 *
44 * Supported hardware includes:
45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47 *
48 * The VME variant has a bit to enable or disable the DMA engine,
49 * but that bit also gates the interrupt line from the NCR5380!
50 * Therefore, in order to get any interrupt from the 5380, (i.e.
51 * for reselect) one must clear the DMA engine transfer count and
52 * then enable DMA. This has the further complication that you
53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54 * we have to turn DMA back off before we even look at the 5380.
55 *
56 * What wonderfully whacky hardware this is!
57 *
58 * David Jones wrote the initial version of this module for NetBSD/sun3,
59 * which included support for the VME adapter only. (no reselection).
60 *
61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62 * both the VME and OBIO code to support disconnect/reselect.
63 * (Required figuring out the hardware "features" noted above.)
64 *
65 * The autoconfiguration boilerplate came from Adam Glass.
66 *
67 * Jason R. Thorpe ported the autoconfiguration and VME portions to
68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
71 * and Chris Torek for bits of insight needed along the way. Thanks to
72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73 * for the sake of testing. Andrew Gillham helped work out the bugs
74 * the 4/100 DMA code.
75 */
76
77 /*
78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
79 * works, but interrupts (and, thus, reselection) don't. I don't know
80 * why, and I don't have a machine to test this on further.
81 *
82 * DMA, DMA completion interrupts, and reselection work fine on my
83 * 4/260 with modern SCSI-II disks attached. I've had reports of
84 * reselection failing on Sun Shoebox-type configurations where
85 * there are multiple non-SCSI devices behind Emulex or Adaptec
86 * bridges. These devices pre-date the SCSI-I spec, and might not
87 * bahve the way the 5380 code expects. For this reason, only
88 * DMA is enabled by default in this driver.
89 *
90 * Jason R. Thorpe <thorpej (at) NetBSD.ORG>
91 * December 8, 1995
92 */
93
94 #include "opt_ddb.h"
95
96 #include <sys/types.h>
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/malloc.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/buf.h>
104
105 #include <machine/bus.h>
106 #include <machine/intr.h>
107 #include <machine/autoconf.h>
108
109 #include <dev/scsipi/scsi_all.h>
110 #include <dev/scsipi/scsipi_all.h>
111 #include <dev/scsipi/scsipi_debug.h>
112 #include <dev/scsipi/scsiconf.h>
113
114 #ifndef DDB
115 #define Debugger()
116 #endif
117
118 #ifndef DEBUG
119 #define DEBUG XXX
120 #endif
121
122 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
123
124 #include <dev/ic/ncr5380reg.h>
125 #include <dev/ic/ncr5380var.h>
126
127 #include <sparc/dev/swreg.h>
128
129 /*
130 * Transfers smaller than this are done using PIO
131 * (on assumption they're not worth DMA overhead)
132 */
133 #define MIN_DMA_LEN 128
134
135 /*
136 * Transfers lager than 65535 bytes need to be split-up.
137 * (Some of the FIFO logic has only 16 bits counters.)
138 * Make the size an integer multiple of the page size
139 * to avoid buf/cluster remap problems. (paranoid?)
140 */
141 #define MAX_DMA_LEN 0xE000
142
143 #ifdef DEBUG
144 int sw_debug = 0;
145 #endif
146
147 /*
148 * This structure is used to keep track of mapped DMA requests.
149 */
150 struct sw_dma_handle {
151 int dh_flags;
152 #define SIDH_BUSY 0x01 /* This DH is in use */
153 #define SIDH_OUT 0x02 /* DMA does data out (write) */
154 u_char *dh_addr; /* KVA of start of buffer */
155 int dh_maplen; /* Original data length */
156 long dh_startingpa; /* PA of buffer; for "sw" */
157 bus_dmamap_t dh_dmamap;
158 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
159 };
160
161 /*
162 * The first structure member has to be the ncr5380_softc
163 * so we can just cast to go back and fourth between them.
164 */
165 struct sw_softc {
166 struct ncr5380_softc ncr_sc;
167 bus_space_tag_t sc_bustag; /* bus tags */
168 bus_dma_tag_t sc_dmatag;
169
170 struct sw_dma_handle *sc_dma;
171 int sc_xlen; /* length of current DMA segment. */
172 int sc_options; /* options for this instance. */
173 };
174
175 /*
176 * Options. By default, DMA is enabled and DMA completion interrupts
177 * and reselect are disabled. You may enable additional features
178 * the `flags' directive in your kernel's configuration file.
179 *
180 * Alternatively, you can patch your kernel with DDB or some other
181 * mechanism. The sc_options member of the softc is OR'd with
182 * the value in sw_options.
183 *
184 * On the "sw", interrupts (and thus) reselection don't work, so they're
185 * disabled by default. DMA is still a little dangerous, too.
186 *
187 * Note, there's a separate sw_options to make life easier.
188 */
189 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
190 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
191 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
192 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
193 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
194 int sw_options = SW_ENABLE_DMA;
195
196 static int sw_match __P((struct device *, struct cfdata *, void *));
197 static void sw_attach __P((struct device *, struct device *, void *));
198 static int sw_intr __P((void *));
199 static void sw_reset_adapter __P((struct ncr5380_softc *));
200 static void sw_minphys __P((struct buf *));
201
202 void sw_dma_alloc __P((struct ncr5380_softc *));
203 void sw_dma_free __P((struct ncr5380_softc *));
204 void sw_dma_poll __P((struct ncr5380_softc *));
205
206 void sw_dma_setup __P((struct ncr5380_softc *));
207 void sw_dma_start __P((struct ncr5380_softc *));
208 void sw_dma_eop __P((struct ncr5380_softc *));
209 void sw_dma_stop __P((struct ncr5380_softc *));
210
211 void sw_intr_on __P((struct ncr5380_softc *));
212 void sw_intr_off __P((struct ncr5380_softc *));
213
214 /* Shorthand bus space access */
215 #define SWREG_READ(sc, index) \
216 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
217 #define SWREG_WRITE(sc, index, v) \
218 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
219
220
221 /* The Sun "SCSI Weird" 4/100 obio controller. */
222 struct cfattach sw_ca = {
223 sizeof(struct sw_softc), sw_match, sw_attach
224 };
225
226 static int
227 sw_match(parent, cf, aux)
228 struct device *parent;
229 struct cfdata *cf;
230 void *aux;
231 {
232 union obio_attach_args *uoba = aux;
233 struct obio4_attach_args *oba;
234
235 /* Nothing but a Sun 4/100 is going to have these devices. */
236 if (cpuinfo.cpu_type != CPUTYP_4_100)
237 return (0);
238
239 if (uoba->uoba_isobio4 == 0)
240 return (0);
241
242 /* Make sure there is something there... */
243 oba = &uoba->uoba_oba4;
244 return (bus_space_probe(oba->oba_bustag, 0, oba->oba_paddr,
245 1, /* probe size */
246 1, /* offset */
247 0, /* flags */
248 NULL, NULL));
249 }
250
251 static void
252 sw_attach(parent, self, aux)
253 struct device *parent, *self;
254 void *aux;
255 {
256 struct sw_softc *sc = (struct sw_softc *) self;
257 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
258 union obio_attach_args *uoba = aux;
259 struct obio4_attach_args *oba = &uoba->uoba_oba4;
260 bus_space_handle_t bh;
261 char bits[64];
262 int i;
263
264 sc->sc_dmatag = oba->oba_dmatag;
265
266 /* Map the controller registers. */
267 if (obio_bus_map(oba->oba_bustag, oba->oba_paddr,
268 0,
269 SWREG_BANK_SZ,
270 BUS_SPACE_MAP_LINEAR,
271 0, &bh) != 0) {
272 printf("%s: cannot map registers\n", self->dv_xname);
273 return;
274 }
275
276 ncr_sc->sc_regt = oba->oba_bustag;
277 ncr_sc->sc_regh = bh;
278
279 sc->sc_options = sw_options;
280
281 ncr_sc->sc_dma_setup = sw_dma_setup;
282 ncr_sc->sc_dma_start = sw_dma_start;
283 ncr_sc->sc_dma_eop = sw_dma_stop;
284 ncr_sc->sc_dma_stop = sw_dma_stop;
285 ncr_sc->sc_intr_on = sw_intr_on;
286 ncr_sc->sc_intr_off = sw_intr_off;
287
288 /*
289 * Establish interrupt channel.
290 * Default interrupt priority always is 3. At least, that's
291 * what my board seems to be at. --thorpej
292 */
293 if (oba->oba_pri == -1)
294 oba->oba_pri = 3;
295
296 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO, 0,
297 sw_intr, sc);
298
299 printf(" pri %d\n", oba->oba_pri);
300
301
302 /*
303 * Pull in the options flags. Allow the user to completely
304 * override the default values.
305 */
306 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK) != 0)
307 sc->sc_options =
308 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK);
309
310 /*
311 * Initialize fields used by the MI code
312 */
313
314 /* NCR5380 register bank offsets */
315 ncr_sc->sci_r0 = 0;
316 ncr_sc->sci_r1 = 1;
317 ncr_sc->sci_r2 = 2;
318 ncr_sc->sci_r3 = 3;
319 ncr_sc->sci_r4 = 4;
320 ncr_sc->sci_r5 = 5;
321 ncr_sc->sci_r6 = 6;
322 ncr_sc->sci_r7 = 7;
323
324 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
325
326 /*
327 * MD function pointers used by the MI code.
328 */
329 ncr_sc->sc_pio_out = ncr5380_pio_out;
330 ncr_sc->sc_pio_in = ncr5380_pio_in;
331 ncr_sc->sc_dma_alloc = sw_dma_alloc;
332 ncr_sc->sc_dma_free = sw_dma_free;
333 ncr_sc->sc_dma_poll = sw_dma_poll;
334
335 ncr_sc->sc_flags = 0;
336 if ((sc->sc_options & SW_DO_RESELECT) == 0)
337 ncr_sc->sc_no_disconnect = 0xFF;
338 if ((sc->sc_options & SW_DMA_INTR) == 0)
339 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
340 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
341
342
343 /*
344 * Allocate DMA handles.
345 */
346 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
347 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
348 if (sc->sc_dma == NULL)
349 panic("sw: dma handle malloc failed\n");
350
351 for (i = 0; i < SCI_OPENINGS; i++) {
352 sc->sc_dma[i].dh_flags = 0;
353
354 /* Allocate a DMA handle */
355 if (bus_dmamap_create(
356 sc->sc_dmatag, /* tag */
357 MAXPHYS, /* size */
358 1, /* nsegments */
359 MAXPHYS, /* maxsegsz */
360 0, /* boundary */
361 BUS_DMA_NOWAIT,
362 &sc->sc_dma[i].dh_dmamap) != 0) {
363
364 printf("%s: DMA buffer map create error\n",
365 ncr_sc->sc_dev.dv_xname);
366 return;
367 }
368 }
369
370 if (sc->sc_options) {
371 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
372 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
373 bits, sizeof(bits)));
374 }
375
376 ncr_sc->sc_channel.chan_id = 7;
377 ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
378
379 /* Initialize sw board */
380 sw_reset_adapter(ncr_sc);
381
382 /* Attach the ncr5380 chip driver */
383 ncr5380_attach(ncr_sc);
384 }
385
386 static void
387 sw_minphys(struct buf *bp)
388 {
389 if (bp->b_bcount > MAX_DMA_LEN) {
390 #ifdef DEBUG
391 if (sw_debug) {
392 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
393 Debugger();
394 }
395 #endif
396 bp->b_bcount = MAX_DMA_LEN;
397 }
398 minphys(bp);
399 }
400
401 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
402 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
403
404 static int
405 sw_intr(void *arg)
406 {
407 struct sw_softc *sc = arg;
408 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
409 int dma_error, claimed;
410 u_short csr;
411
412 claimed = 0;
413 dma_error = 0;
414
415 /* SBC interrupt? DMA interrupt? */
416 csr = SWREG_READ(ncr_sc, SWREG_CSR);
417
418 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
419
420 if (csr & SW_CSR_DMA_CONFLICT) {
421 dma_error |= SW_CSR_DMA_CONFLICT;
422 printf("sw_intr: DMA conflict\n");
423 }
424 if (csr & SW_CSR_DMA_BUS_ERR) {
425 dma_error |= SW_CSR_DMA_BUS_ERR;
426 printf("sw_intr: DMA bus error\n");
427 }
428 if (dma_error) {
429 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
430 sc->ncr_sc.sc_state |= NCR_ABORTING;
431 /* Make sure we will call the main isr. */
432 csr |= SW_CSR_DMA_IP;
433 }
434
435 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
436 claimed = ncr5380_intr(&sc->ncr_sc);
437 #ifdef DEBUG
438 if (!claimed) {
439 printf("sw_intr: spurious from SBC\n");
440 if (sw_debug & 4) {
441 Debugger(); /* XXX */
442 }
443 }
444 #endif
445 }
446
447 return (claimed);
448 }
449
450
451 static void
452 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
453 {
454
455 #ifdef DEBUG
456 if (sw_debug) {
457 printf("sw_reset_adapter\n");
458 }
459 #endif
460
461 /*
462 * The reset bits in the CSR are active low.
463 */
464 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
465 delay(10);
466 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
467
468 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
469 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
470 delay(10);
471 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
472
473 SCI_CLR_INTR(ncr_sc);
474 }
475
476
477 /*****************************************************************
478 * Common functions for DMA
479 ****************************************************************/
480
481 /*
482 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
483 * for DMA transfer. On the Sun4, this means mapping the buffer
484 * into DVMA space.
485 */
486 void
487 sw_dma_alloc(ncr_sc)
488 struct ncr5380_softc *ncr_sc;
489 {
490 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
491 struct sci_req *sr = ncr_sc->sc_current;
492 struct scsipi_xfer *xs = sr->sr_xs;
493 struct sw_dma_handle *dh;
494 int i, xlen;
495 u_long addr;
496
497 #ifdef DIAGNOSTIC
498 if (sr->sr_dma_hand != NULL)
499 panic("sw_dma_alloc: already have DMA handle");
500 #endif
501
502 #if 1 /* XXX - Temporary */
503 /* XXX - In case we think DMA is completely broken... */
504 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
505 return;
506 #endif
507
508 addr = (u_long) ncr_sc->sc_dataptr;
509 xlen = ncr_sc->sc_datalen;
510
511 /* If the DMA start addr is misaligned then do PIO */
512 if ((addr & 1) || (xlen & 1)) {
513 printf("sw_dma_alloc: misaligned.\n");
514 return;
515 }
516
517 /* Make sure our caller checked sc_min_dma_len. */
518 if (xlen < MIN_DMA_LEN)
519 panic("sw_dma_alloc: xlen=0x%x\n", xlen);
520
521 /* Find free DMA handle. Guaranteed to find one since we have
522 as many DMA handles as the driver has processes. */
523 for (i = 0; i < SCI_OPENINGS; i++) {
524 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
525 goto found;
526 }
527 panic("sw: no free DMA handles.");
528
529 found:
530 dh = &sc->sc_dma[i];
531 dh->dh_flags = SIDH_BUSY;
532 dh->dh_addr = (u_char *)addr;
533 dh->dh_maplen = xlen;
534
535 /* Copy the "write" flag for convenience. */
536 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
537 dh->dh_flags |= SIDH_OUT;
538
539 /*
540 * Double-map the buffer into DVMA space. If we can't re-map
541 * the buffer, we print a warning and fall back to PIO mode.
542 *
543 * NOTE: it is not safe to sleep here!
544 */
545 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
546 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
547 /* Can't remap segment */
548 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
549 addr, dh->dh_maplen);
550 dh->dh_flags = 0;
551 return;
552 }
553 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
554 (dh->dh_flags & SIDH_OUT)
555 ? BUS_DMASYNC_PREWRITE
556 : BUS_DMASYNC_PREREAD);
557
558 /* success */
559 sr->sr_dma_hand = dh;
560
561 return;
562 }
563
564
565 void
566 sw_dma_free(ncr_sc)
567 struct ncr5380_softc *ncr_sc;
568 {
569 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
570 struct sci_req *sr = ncr_sc->sc_current;
571 struct sw_dma_handle *dh = sr->sr_dma_hand;
572
573 #ifdef DIAGNOSTIC
574 if (dh == NULL)
575 panic("sw_dma_free: no DMA handle");
576 #endif
577
578 if (ncr_sc->sc_state & NCR_DOINGDMA)
579 panic("sw_dma_free: free while in progress");
580
581 if (dh->dh_flags & SIDH_BUSY) {
582 /* Give back the DVMA space. */
583 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
584 dh->dh_dvma, dh->dh_maplen,
585 (dh->dh_flags & SIDH_OUT)
586 ? BUS_DMASYNC_POSTWRITE
587 : BUS_DMASYNC_POSTREAD);
588 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
589 dh->dh_flags = 0;
590 }
591 sr->sr_dma_hand = NULL;
592 }
593
594
595 /*
596 * Poll (spin-wait) for DMA completion.
597 * Called right after xx_dma_start(), and
598 * xx_dma_stop() will be called next.
599 * Same for either VME or OBIO.
600 */
601 void
602 sw_dma_poll(ncr_sc)
603 struct ncr5380_softc *ncr_sc;
604 {
605 struct sci_req *sr = ncr_sc->sc_current;
606 int tmo, csr_mask, csr;
607
608 /* Make sure DMA started successfully. */
609 if (ncr_sc->sc_state & NCR_ABORTING)
610 return;
611
612 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
613 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
614
615 tmo = 50000; /* X100 = 5 sec. */
616 for (;;) {
617 csr = SWREG_READ(ncr_sc, SWREG_CSR);
618 if (csr & csr_mask)
619 break;
620 if (--tmo <= 0) {
621 printf("%s: DMA timeout (while polling)\n",
622 ncr_sc->sc_dev.dv_xname);
623 /* Indicate timeout as MI code would. */
624 sr->sr_flags |= SR_OVERDUE;
625 break;
626 }
627 delay(100);
628 }
629
630 #ifdef DEBUG
631 if (sw_debug) {
632 printf("sw_dma_poll: done, csr=0x%x\n", csr);
633 }
634 #endif
635 }
636
637
638 /*
639 * This is called when the bus is going idle,
640 * so we want to enable the SBC interrupts.
641 * That is controlled by the DMA enable!
642 * Who would have guessed!
643 * What a NASTY trick!
644 *
645 * XXX THIS MIGHT NOT WORK RIGHT!
646 */
647 void
648 sw_intr_on(ncr_sc)
649 struct ncr5380_softc *ncr_sc;
650 {
651 u_int32_t csr;
652
653 sw_dma_setup(ncr_sc);
654 csr = SWREG_READ(ncr_sc, SWREG_CSR);
655 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
656 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
657 }
658
659 /*
660 * This is called when the bus is idle and we are
661 * about to start playing with the SBC chip.
662 *
663 * XXX THIS MIGHT NOT WORK RIGHT!
664 */
665 void
666 sw_intr_off(ncr_sc)
667 struct ncr5380_softc *ncr_sc;
668 {
669 u_int32_t csr;
670
671 csr = SWREG_READ(ncr_sc, SWREG_CSR);
672 csr &= ~SW_CSR_DMA_EN;
673 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
674 }
675
676
677 /*
678 * This function is called during the COMMAND or MSG_IN phase
679 * that preceeds a DATA_IN or DATA_OUT phase, in case we need
680 * to setup the DMA engine before the bus enters a DATA phase.
681 *
682 * On the OBIO version we just clear the DMA count and address
683 * here (to make sure it stays idle) and do the real setup
684 * later, in dma_start.
685 */
686 void
687 sw_dma_setup(ncr_sc)
688 struct ncr5380_softc *ncr_sc;
689 {
690 u_int32_t csr;
691
692 /* No FIFO to reset on "sw". */
693
694 /* Set direction (assume recv here) */
695 csr = SWREG_READ(ncr_sc, SWREG_CSR);
696 csr &= ~SW_CSR_SEND;
697 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
698
699 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
700 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
701 }
702
703
704 void
705 sw_dma_start(ncr_sc)
706 struct ncr5380_softc *ncr_sc;
707 {
708 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
709 struct sci_req *sr = ncr_sc->sc_current;
710 struct sw_dma_handle *dh = sr->sr_dma_hand;
711 u_long dva;
712 int xlen, adj, adjlen;
713 u_int mode;
714 u_int32_t csr;
715
716 /*
717 * Get the DVMA mapping for this segment.
718 */
719 dva = (u_long)(dh->dh_dvma);
720 if (dva & 1)
721 panic("sw_dma_start: bad dva=0x%lx", dva);
722
723 xlen = ncr_sc->sc_datalen;
724 xlen &= ~1;
725 sc->sc_xlen = xlen; /* XXX: or less... */
726
727 #ifdef DEBUG
728 if (sw_debug & 2) {
729 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n",
730 dh, dva, xlen);
731 }
732 #endif
733
734 /*
735 * Set up the DMA controller.
736 * Note that (dh->dh_len < sc_datalen)
737 */
738
739 /* Set direction (send/recv) */
740 csr = SWREG_READ(ncr_sc, SWREG_CSR);
741 if (dh->dh_flags & SIDH_OUT) {
742 csr |= SW_CSR_SEND;
743 } else {
744 csr &= ~SW_CSR_SEND;
745 }
746 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
747
748 /*
749 * The "sw" needs longword aligned transfers. We
750 * detect a shortword aligned transfer here, and adjust the
751 * DMA transfer by 2 bytes. These two bytes are read/written
752 * in PIO mode just before the DMA is started.
753 */
754 adj = 0;
755 if (dva & 2) {
756 adj = 2;
757 #ifdef DEBUG
758 if (sw_debug & 2)
759 printf("sw_dma_start: adjusted up %d bytes\n", adj);
760 #endif
761 }
762
763 /* We have to frob the address on the "sw". */
764 dh->dh_startingpa = (dva | 0xF00000);
765 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
766 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
767
768 /*
769 * Acknowledge the phase change. (After DMA setup!)
770 * Put the SBIC into DMA mode, and start the transfer.
771 */
772 if (dh->dh_flags & SIDH_OUT) {
773 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
774 if (adj) {
775 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
776 adj, dh->dh_addr);
777 if (adjlen != adj)
778 printf("%s: bad outgoing adj, %d != %d\n",
779 ncr_sc->sc_dev.dv_xname, adjlen, adj);
780 }
781 SCI_CLR_INTR(ncr_sc);
782 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
783 mode = NCR5380_READ(ncr_sc, sci_mode);
784 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
785 NCR5380_WRITE(ncr_sc, sci_mode, mode);
786 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
787 } else {
788 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
789 if (adj) {
790 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
791 adj, dh->dh_addr);
792 if (adjlen != adj)
793 printf("%s: bad incoming adj, %d != %d\n",
794 ncr_sc->sc_dev.dv_xname, adjlen, adj);
795 }
796 SCI_CLR_INTR(ncr_sc);
797 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
798 mode = NCR5380_READ(ncr_sc, sci_mode);
799 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
800 NCR5380_WRITE(ncr_sc, sci_mode, mode);
801 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
802 }
803
804 /* Let'er rip! */
805 csr |= SW_CSR_DMA_EN;
806 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
807
808 ncr_sc->sc_state |= NCR_DOINGDMA;
809
810 #ifdef DEBUG
811 if (sw_debug & 2) {
812 printf("sw_dma_start: started, flags=0x%x\n",
813 ncr_sc->sc_state);
814 }
815 #endif
816 }
817
818
819 void
820 sw_dma_eop(ncr_sc)
821 struct ncr5380_softc *ncr_sc;
822 {
823
824 /* Not needed - DMA was stopped prior to examining sci_csr */
825 }
826
827 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
828 #define COUNT_SW_LEFTOVERS
829 #endif
830 #ifdef COUNT_SW_LEFTOVERS
831 /*
832 * Let's find out how often these occur. Read these with DDB from time
833 * to time.
834 */
835 int sw_3_leftover = 0;
836 int sw_2_leftover = 0;
837 int sw_1_leftover = 0;
838 int sw_0_leftover = 0;
839 #endif
840
841 void
842 sw_dma_stop(ncr_sc)
843 struct ncr5380_softc *ncr_sc;
844 {
845 struct sci_req *sr = ncr_sc->sc_current;
846 struct sw_dma_handle *dh = sr->sr_dma_hand;
847 int ntrans = 0, dva;
848 u_int mode;
849 u_int32_t csr;
850
851 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
852 #ifdef DEBUG
853 printf("sw_dma_stop: dma not running\n");
854 #endif
855 return;
856 }
857 ncr_sc->sc_state &= ~NCR_DOINGDMA;
858
859 /* First, halt the DMA engine. */
860 csr = SWREG_READ(ncr_sc, SWREG_CSR);
861 csr &= ~SW_CSR_DMA_EN;
862 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
863
864 /*
865 * XXX HARDWARE BUG!
866 * Apparently, some early 4/100 SCSI controllers had a hardware
867 * bug that caused the controller to do illegal memory access.
868 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
869 * this, we simply need to clean up after ourselves ... there will
870 * be as many as 3 bytes left over. Since we clean up "left-over"
871 * bytes on every read anyway, we just continue to chug along
872 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
873 * around in hardware later with the "left-over byte" indicator
874 * in the VME controller.)
875 */
876 #if 0
877 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) {
878 #else
879 if (csr & (SW_CSR_DMA_CONFLICT)) {
880 #endif
881 printf("sw: DMA error, csr=0x%x, reset\n", csr);
882 sr->sr_xs->error = XS_DRIVER_STUFFUP;
883 ncr_sc->sc_state |= NCR_ABORTING;
884 sw_reset_adapter(ncr_sc);
885 }
886
887 /* Note that timeout may have set the error flag. */
888 if (ncr_sc->sc_state & NCR_ABORTING)
889 goto out;
890
891 /*
892 * Now try to figure out how much actually transferred
893 *
894 * The "sw" doesn't have a FIFO or a bcr, so we've stored
895 * the starting PA of the transfer in the DMA handle,
896 * and subtract it from the ending PA left in the dma_addr
897 * register.
898 */
899 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
900 ntrans = (dva - dh->dh_startingpa);
901
902 #ifdef DEBUG
903 if (sw_debug & 2) {
904 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
905 }
906 #endif
907
908 if (ntrans > ncr_sc->sc_datalen)
909 panic("sw_dma_stop: excess transfer");
910
911 /* Adjust data pointer */
912 ncr_sc->sc_dataptr += ntrans;
913 ncr_sc->sc_datalen -= ntrans;
914
915 /*
916 * After a read, we may need to clean-up
917 * "Left-over bytes" (yuck!) The "sw" doesn't
918 * have a "left-over" indicator, so we have to so
919 * this no matter what. Ick.
920 */
921 if ((dh->dh_flags & SIDH_OUT) == 0) {
922 char *cp = ncr_sc->sc_dataptr;
923 u_int32_t bpr;
924
925 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
926
927 switch (dva & 3) {
928 case 3:
929 cp[0] = (bpr & 0xff000000) >> 24;
930 cp[1] = (bpr & 0x00ff0000) >> 16;
931 cp[2] = (bpr & 0x0000ff00) >> 8;
932 #ifdef COUNT_SW_LEFTOVERS
933 ++sw_3_leftover;
934 #endif
935 break;
936
937 case 2:
938 cp[0] = (bpr & 0xff000000) >> 24;
939 cp[1] = (bpr & 0x00ff0000) >> 16;
940 #ifdef COUNT_SW_LEFTOVERS
941 ++sw_2_leftover;
942 #endif
943 break;
944
945 case 1:
946 cp[0] = (bpr & 0xff000000) >> 24;
947 #ifdef COUNT_SW_LEFTOVERS
948 ++sw_1_leftover;
949 #endif
950 break;
951
952 #ifdef COUNT_SW_LEFTOVERS
953 default:
954 ++sw_0_leftover;
955 break;
956 #endif
957 }
958 }
959
960 out:
961 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
962 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
963
964 /* Put SBIC back in PIO mode. */
965 mode = NCR5380_READ(ncr_sc, sci_mode);
966 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
967 NCR5380_WRITE(ncr_sc, sci_mode, mode);
968 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
969
970 #ifdef DEBUG
971 if (sw_debug & 2) {
972 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
973 }
974 #endif
975 }
976