sw.c revision 1.3 1 /* $NetBSD: sw.c,v 1.3 2000/07/09 20:57:47 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains only the machine-dependent parts of the
41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
42 * The machine-independent parts are in ncr5380sbc.c
43 *
44 * Supported hardware includes:
45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47 *
48 * The VME variant has a bit to enable or disable the DMA engine,
49 * but that bit also gates the interrupt line from the NCR5380!
50 * Therefore, in order to get any interrupt from the 5380, (i.e.
51 * for reselect) one must clear the DMA engine transfer count and
52 * then enable DMA. This has the further complication that you
53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54 * we have to turn DMA back off before we even look at the 5380.
55 *
56 * What wonderfully whacky hardware this is!
57 *
58 * David Jones wrote the initial version of this module for NetBSD/sun3,
59 * which included support for the VME adapter only. (no reselection).
60 *
61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62 * both the VME and OBIO code to support disconnect/reselect.
63 * (Required figuring out the hardware "features" noted above.)
64 *
65 * The autoconfiguration boilerplate came from Adam Glass.
66 *
67 * Jason R. Thorpe ported the autoconfiguration and VME portions to
68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
71 * and Chris Torek for bits of insight needed along the way. Thanks to
72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73 * for the sake of testing. Andrew Gillham helped work out the bugs
74 * the 4/100 DMA code.
75 */
76
77 /*
78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
79 * works, but interrupts (and, thus, reselection) don't. I don't know
80 * why, and I don't have a machine to test this on further.
81 *
82 * DMA, DMA completion interrupts, and reselection work fine on my
83 * 4/260 with modern SCSI-II disks attached. I've had reports of
84 * reselection failing on Sun Shoebox-type configurations where
85 * there are multiple non-SCSI devices behind Emulex or Adaptec
86 * bridges. These devices pre-date the SCSI-I spec, and might not
87 * bahve the way the 5380 code expects. For this reason, only
88 * DMA is enabled by default in this driver.
89 *
90 * Jason R. Thorpe <thorpej (at) NetBSD.ORG>
91 * December 8, 1995
92 */
93
94 #include "opt_ddb.h"
95
96 #include <sys/types.h>
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/malloc.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/buf.h>
104
105 #include <machine/bus.h>
106 #include <machine/intr.h>
107 #include <machine/autoconf.h>
108
109 #include <dev/scsipi/scsi_all.h>
110 #include <dev/scsipi/scsipi_all.h>
111 #include <dev/scsipi/scsipi_debug.h>
112 #include <dev/scsipi/scsiconf.h>
113
114 #ifndef DDB
115 #define Debugger()
116 #endif
117
118 #ifndef DEBUG
119 #define DEBUG XXX
120 #endif
121
122 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
123
124 #include <dev/ic/ncr5380reg.h>
125 #include <dev/ic/ncr5380var.h>
126
127 #include <sparc/dev/swreg.h>
128
129 /*
130 * Transfers smaller than this are done using PIO
131 * (on assumption they're not worth DMA overhead)
132 */
133 #define MIN_DMA_LEN 128
134
135 /*
136 * Transfers lager than 65535 bytes need to be split-up.
137 * (Some of the FIFO logic has only 16 bits counters.)
138 * Make the size an integer multiple of the page size
139 * to avoid buf/cluster remap problems. (paranoid?)
140 */
141 #define MAX_DMA_LEN 0xE000
142
143 #ifdef DEBUG
144 int sw_debug = 0;
145 static int sw_link_flags = 0 /* | SDEV_DB2 */ ;
146 #endif
147
148 /*
149 * This structure is used to keep track of mapped DMA requests.
150 */
151 struct sw_dma_handle {
152 int dh_flags;
153 #define SIDH_BUSY 0x01 /* This DH is in use */
154 #define SIDH_OUT 0x02 /* DMA does data out (write) */
155 u_char *dh_addr; /* KVA of start of buffer */
156 int dh_maplen; /* Original data length */
157 long dh_startingpa; /* PA of buffer; for "sw" */
158 bus_dmamap_t dh_dmamap;
159 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
160 };
161
162 /*
163 * The first structure member has to be the ncr5380_softc
164 * so we can just cast to go back and fourth between them.
165 */
166 struct sw_softc {
167 struct ncr5380_softc ncr_sc;
168 bus_space_tag_t sc_bustag; /* bus tags */
169 bus_dma_tag_t sc_dmatag;
170
171 struct sw_dma_handle *sc_dma;
172 int sc_xlen; /* length of current DMA segment. */
173 int sc_options; /* options for this instance. */
174 };
175
176 /*
177 * Options. By default, DMA is enabled and DMA completion interrupts
178 * and reselect are disabled. You may enable additional features
179 * the `flags' directive in your kernel's configuration file.
180 *
181 * Alternatively, you can patch your kernel with DDB or some other
182 * mechanism. The sc_options member of the softc is OR'd with
183 * the value in sw_options.
184 *
185 * On the "sw", interrupts (and thus) reselection don't work, so they're
186 * disabled by default. DMA is still a little dangerous, too.
187 *
188 * Note, there's a separate sw_options to make life easier.
189 */
190 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
191 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
192 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
193 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
194 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
195 int sw_options = SW_ENABLE_DMA;
196
197 static int sw_match __P((struct device *, struct cfdata *, void *));
198 static void sw_attach __P((struct device *, struct device *, void *));
199 static int sw_intr __P((void *));
200 static void sw_reset_adapter __P((struct ncr5380_softc *));
201 static void sw_minphys __P((struct buf *));
202
203 void sw_dma_alloc __P((struct ncr5380_softc *));
204 void sw_dma_free __P((struct ncr5380_softc *));
205 void sw_dma_poll __P((struct ncr5380_softc *));
206
207 void sw_dma_setup __P((struct ncr5380_softc *));
208 void sw_dma_start __P((struct ncr5380_softc *));
209 void sw_dma_eop __P((struct ncr5380_softc *));
210 void sw_dma_stop __P((struct ncr5380_softc *));
211
212 void sw_intr_on __P((struct ncr5380_softc *));
213 void sw_intr_off __P((struct ncr5380_softc *));
214
215 /* Shorthand bus space access */
216 #define SWREG_READ(sc, index) \
217 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
218 #define SWREG_WRITE(sc, index, v) \
219 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
220
221
222 /* The Sun "SCSI Weird" 4/100 obio controller. */
223 struct cfattach sw_ca = {
224 sizeof(struct sw_softc), sw_match, sw_attach
225 };
226
227 static int
228 sw_match(parent, cf, aux)
229 struct device *parent;
230 struct cfdata *cf;
231 void *aux;
232 {
233 union obio_attach_args *uoba = aux;
234 struct obio4_attach_args *oba;
235
236 /* Nothing but a Sun 4/100 is going to have these devices. */
237 if (cpuinfo.cpu_type != CPUTYP_4_100)
238 return (0);
239
240 if (uoba->uoba_isobio4 == 0)
241 return (0);
242
243 /* Make sure there is something there... */
244 oba = &uoba->uoba_oba4;
245 return (bus_space_probe(oba->oba_bustag, 0, oba->oba_paddr,
246 1, /* probe size */
247 1, /* offset */
248 0, /* flags */
249 NULL, NULL));
250 }
251
252 static void
253 sw_attach(parent, self, aux)
254 struct device *parent, *self;
255 void *aux;
256 {
257 struct sw_softc *sc = (struct sw_softc *) self;
258 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
259 union obio_attach_args *uoba = aux;
260 struct obio4_attach_args *oba = &uoba->uoba_oba4;
261 bus_space_handle_t bh;
262 char bits[64];
263 int i;
264
265 sc->sc_dmatag = oba->oba_dmatag;
266
267 /* Map the controller registers. */
268 if (obio_bus_map(oba->oba_bustag, oba->oba_paddr,
269 0,
270 SWREG_BANK_SZ,
271 BUS_SPACE_MAP_LINEAR,
272 0, &bh) != 0) {
273 printf("%s: cannot map registers\n", self->dv_xname);
274 return;
275 }
276
277 ncr_sc->sc_regt = oba->oba_bustag;
278 ncr_sc->sc_regh = bh;
279
280 sc->sc_options = sw_options;
281
282 ncr_sc->sc_dma_setup = sw_dma_setup;
283 ncr_sc->sc_dma_start = sw_dma_start;
284 ncr_sc->sc_dma_eop = sw_dma_stop;
285 ncr_sc->sc_dma_stop = sw_dma_stop;
286 ncr_sc->sc_intr_on = sw_intr_on;
287 ncr_sc->sc_intr_off = sw_intr_off;
288
289 /*
290 * Establish interrupt channel.
291 * Default interrupt priority always is 3. At least, that's
292 * what my board seems to be at. --thorpej
293 */
294 if (oba->oba_pri == -1)
295 oba->oba_pri = 3;
296
297 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO, 0,
298 sw_intr, sc);
299
300 printf(" pri %d\n", oba->oba_pri);
301
302
303 /*
304 * Pull in the options flags. Allow the user to completely
305 * override the default values.
306 */
307 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK) != 0)
308 sc->sc_options =
309 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK);
310
311 /*
312 * Initialize fields used by the MI code
313 */
314
315 /* NCR5380 register bank offsets */
316 ncr_sc->sci_r0 = 0;
317 ncr_sc->sci_r1 = 1;
318 ncr_sc->sci_r2 = 2;
319 ncr_sc->sci_r3 = 3;
320 ncr_sc->sci_r4 = 4;
321 ncr_sc->sci_r5 = 5;
322 ncr_sc->sci_r6 = 6;
323 ncr_sc->sci_r7 = 7;
324
325 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
326
327 /*
328 * MD function pointers used by the MI code.
329 */
330 ncr_sc->sc_pio_out = ncr5380_pio_out;
331 ncr_sc->sc_pio_in = ncr5380_pio_in;
332 ncr_sc->sc_dma_alloc = sw_dma_alloc;
333 ncr_sc->sc_dma_free = sw_dma_free;
334 ncr_sc->sc_dma_poll = sw_dma_poll;
335
336 ncr_sc->sc_flags = 0;
337 if ((sc->sc_options & SW_DO_RESELECT) == 0)
338 ncr_sc->sc_no_disconnect = 0xFF;
339 if ((sc->sc_options & SW_DMA_INTR) == 0)
340 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
341 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
342
343
344 /*
345 * Allocate DMA handles.
346 */
347 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
348 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
349 if (sc->sc_dma == NULL)
350 panic("sw: dma handle malloc failed\n");
351
352 for (i = 0; i < SCI_OPENINGS; i++) {
353 sc->sc_dma[i].dh_flags = 0;
354
355 /* Allocate a DMA handle */
356 if (bus_dmamap_create(
357 sc->sc_dmatag, /* tag */
358 MAXPHYS, /* size */
359 1, /* nsegments */
360 MAXPHYS, /* maxsegsz */
361 0, /* boundary */
362 BUS_DMA_NOWAIT,
363 &sc->sc_dma[i].dh_dmamap) != 0) {
364
365 printf("%s: DMA buffer map create error\n",
366 ncr_sc->sc_dev.dv_xname);
367 return;
368 }
369 }
370
371 if (sc->sc_options) {
372 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
373 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
374 bits, sizeof(bits)));
375 }
376 #ifdef DEBUG
377 ncr_sc->sc_link.flags |= sw_link_flags;
378 #endif
379
380 ncr_sc->sc_link.scsipi_scsi.adapter_target = 7;
381 ncr_sc->sc_adapter.scsipi_minphys = sw_minphys;
382
383 /* Initialize sw board */
384 sw_reset_adapter(ncr_sc);
385
386 /* Attach the ncr5380 chip driver */
387 ncr5380_attach(ncr_sc);
388 }
389
390 static void
391 sw_minphys(struct buf *bp)
392 {
393 if (bp->b_bcount > MAX_DMA_LEN) {
394 #ifdef DEBUG
395 if (sw_debug) {
396 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
397 Debugger();
398 }
399 #endif
400 bp->b_bcount = MAX_DMA_LEN;
401 }
402 return (minphys(bp));
403 }
404
405 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
406 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
407
408 static int
409 sw_intr(void *arg)
410 {
411 struct sw_softc *sc = arg;
412 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
413 int dma_error, claimed;
414 u_short csr;
415
416 claimed = 0;
417 dma_error = 0;
418
419 /* SBC interrupt? DMA interrupt? */
420 csr = SWREG_READ(ncr_sc, SWREG_CSR);
421
422 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
423
424 if (csr & SW_CSR_DMA_CONFLICT) {
425 dma_error |= SW_CSR_DMA_CONFLICT;
426 printf("sw_intr: DMA conflict\n");
427 }
428 if (csr & SW_CSR_DMA_BUS_ERR) {
429 dma_error |= SW_CSR_DMA_BUS_ERR;
430 printf("sw_intr: DMA bus error\n");
431 }
432 if (dma_error) {
433 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
434 sc->ncr_sc.sc_state |= NCR_ABORTING;
435 /* Make sure we will call the main isr. */
436 csr |= SW_CSR_DMA_IP;
437 }
438
439 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
440 claimed = ncr5380_intr(&sc->ncr_sc);
441 #ifdef DEBUG
442 if (!claimed) {
443 printf("sw_intr: spurious from SBC\n");
444 if (sw_debug & 4) {
445 Debugger(); /* XXX */
446 }
447 }
448 #endif
449 }
450
451 return (claimed);
452 }
453
454
455 static void
456 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
457 {
458
459 #ifdef DEBUG
460 if (sw_debug) {
461 printf("sw_reset_adapter\n");
462 }
463 #endif
464
465 /*
466 * The reset bits in the CSR are active low.
467 */
468 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
469 delay(10);
470 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
471
472 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
473 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
474 delay(10);
475 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
476
477 SCI_CLR_INTR(ncr_sc);
478 }
479
480
481 /*****************************************************************
482 * Common functions for DMA
483 ****************************************************************/
484
485 /*
486 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
487 * for DMA transfer. On the Sun4, this means mapping the buffer
488 * into DVMA space.
489 */
490 void
491 sw_dma_alloc(ncr_sc)
492 struct ncr5380_softc *ncr_sc;
493 {
494 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
495 struct sci_req *sr = ncr_sc->sc_current;
496 struct scsipi_xfer *xs = sr->sr_xs;
497 struct sw_dma_handle *dh;
498 int i, xlen;
499 u_long addr;
500
501 #ifdef DIAGNOSTIC
502 if (sr->sr_dma_hand != NULL)
503 panic("sw_dma_alloc: already have DMA handle");
504 #endif
505
506 #if 1 /* XXX - Temporary */
507 /* XXX - In case we think DMA is completely broken... */
508 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
509 return;
510 #endif
511
512 addr = (u_long) ncr_sc->sc_dataptr;
513 xlen = ncr_sc->sc_datalen;
514
515 /* If the DMA start addr is misaligned then do PIO */
516 if ((addr & 1) || (xlen & 1)) {
517 printf("sw_dma_alloc: misaligned.\n");
518 return;
519 }
520
521 /* Make sure our caller checked sc_min_dma_len. */
522 if (xlen < MIN_DMA_LEN)
523 panic("sw_dma_alloc: xlen=0x%x\n", xlen);
524
525 /* Find free DMA handle. Guaranteed to find one since we have
526 as many DMA handles as the driver has processes. */
527 for (i = 0; i < SCI_OPENINGS; i++) {
528 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
529 goto found;
530 }
531 panic("sw: no free DMA handles.");
532
533 found:
534 dh = &sc->sc_dma[i];
535 dh->dh_flags = SIDH_BUSY;
536 dh->dh_addr = (u_char *)addr;
537 dh->dh_maplen = xlen;
538
539 /* Copy the "write" flag for convenience. */
540 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
541 dh->dh_flags |= SIDH_OUT;
542
543 /*
544 * Double-map the buffer into DVMA space. If we can't re-map
545 * the buffer, we print a warning and fall back to PIO mode.
546 *
547 * NOTE: it is not safe to sleep here!
548 */
549 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
550 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
551 /* Can't remap segment */
552 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
553 addr, dh->dh_maplen);
554 dh->dh_flags = 0;
555 return;
556 }
557 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
558 (dh->dh_flags & SIDH_OUT)
559 ? BUS_DMASYNC_PREWRITE
560 : BUS_DMASYNC_PREREAD);
561
562 /* success */
563 sr->sr_dma_hand = dh;
564
565 return;
566 }
567
568
569 void
570 sw_dma_free(ncr_sc)
571 struct ncr5380_softc *ncr_sc;
572 {
573 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
574 struct sci_req *sr = ncr_sc->sc_current;
575 struct sw_dma_handle *dh = sr->sr_dma_hand;
576
577 #ifdef DIAGNOSTIC
578 if (dh == NULL)
579 panic("sw_dma_free: no DMA handle");
580 #endif
581
582 if (ncr_sc->sc_state & NCR_DOINGDMA)
583 panic("sw_dma_free: free while in progress");
584
585 if (dh->dh_flags & SIDH_BUSY) {
586 /* Give back the DVMA space. */
587 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
588 dh->dh_dvma, dh->dh_maplen,
589 (dh->dh_flags & SIDH_OUT)
590 ? BUS_DMASYNC_POSTWRITE
591 : BUS_DMASYNC_POSTREAD);
592 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
593 dh->dh_flags = 0;
594 }
595 sr->sr_dma_hand = NULL;
596 }
597
598
599 /*
600 * Poll (spin-wait) for DMA completion.
601 * Called right after xx_dma_start(), and
602 * xx_dma_stop() will be called next.
603 * Same for either VME or OBIO.
604 */
605 void
606 sw_dma_poll(ncr_sc)
607 struct ncr5380_softc *ncr_sc;
608 {
609 struct sci_req *sr = ncr_sc->sc_current;
610 int tmo, csr_mask, csr;
611
612 /* Make sure DMA started successfully. */
613 if (ncr_sc->sc_state & NCR_ABORTING)
614 return;
615
616 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
617 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
618
619 tmo = 50000; /* X100 = 5 sec. */
620 for (;;) {
621 csr = SWREG_READ(ncr_sc, SWREG_CSR);
622 if (csr & csr_mask)
623 break;
624 if (--tmo <= 0) {
625 printf("%s: DMA timeout (while polling)\n",
626 ncr_sc->sc_dev.dv_xname);
627 /* Indicate timeout as MI code would. */
628 sr->sr_flags |= SR_OVERDUE;
629 break;
630 }
631 delay(100);
632 }
633
634 #ifdef DEBUG
635 if (sw_debug) {
636 printf("sw_dma_poll: done, csr=0x%x\n", csr);
637 }
638 #endif
639 }
640
641
642 /*
643 * This is called when the bus is going idle,
644 * so we want to enable the SBC interrupts.
645 * That is controlled by the DMA enable!
646 * Who would have guessed!
647 * What a NASTY trick!
648 *
649 * XXX THIS MIGHT NOT WORK RIGHT!
650 */
651 void
652 sw_intr_on(ncr_sc)
653 struct ncr5380_softc *ncr_sc;
654 {
655 u_int32_t csr;
656
657 sw_dma_setup(ncr_sc);
658 csr = SWREG_READ(ncr_sc, SWREG_CSR);
659 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
660 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
661 }
662
663 /*
664 * This is called when the bus is idle and we are
665 * about to start playing with the SBC chip.
666 *
667 * XXX THIS MIGHT NOT WORK RIGHT!
668 */
669 void
670 sw_intr_off(ncr_sc)
671 struct ncr5380_softc *ncr_sc;
672 {
673 u_int32_t csr;
674
675 csr = SWREG_READ(ncr_sc, SWREG_CSR);
676 csr &= ~SW_CSR_DMA_EN;
677 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
678 }
679
680
681 /*
682 * This function is called during the COMMAND or MSG_IN phase
683 * that preceeds a DATA_IN or DATA_OUT phase, in case we need
684 * to setup the DMA engine before the bus enters a DATA phase.
685 *
686 * On the OBIO version we just clear the DMA count and address
687 * here (to make sure it stays idle) and do the real setup
688 * later, in dma_start.
689 */
690 void
691 sw_dma_setup(ncr_sc)
692 struct ncr5380_softc *ncr_sc;
693 {
694 u_int32_t csr;
695
696 /* No FIFO to reset on "sw". */
697
698 /* Set direction (assume recv here) */
699 csr = SWREG_READ(ncr_sc, SWREG_CSR);
700 csr &= ~SW_CSR_SEND;
701 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
702
703 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
704 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
705 }
706
707
708 void
709 sw_dma_start(ncr_sc)
710 struct ncr5380_softc *ncr_sc;
711 {
712 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
713 struct sci_req *sr = ncr_sc->sc_current;
714 struct sw_dma_handle *dh = sr->sr_dma_hand;
715 u_long dva;
716 int xlen, adj, adjlen;
717 u_int mode;
718 u_int32_t csr;
719
720 /*
721 * Get the DVMA mapping for this segment.
722 */
723 dva = (u_long)(dh->dh_dvma);
724 if (dva & 1)
725 panic("sw_dma_start: bad dva=0x%lx", dva);
726
727 xlen = ncr_sc->sc_datalen;
728 xlen &= ~1;
729 sc->sc_xlen = xlen; /* XXX: or less... */
730
731 #ifdef DEBUG
732 if (sw_debug & 2) {
733 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n",
734 dh, dva, xlen);
735 }
736 #endif
737
738 /*
739 * Set up the DMA controller.
740 * Note that (dh->dh_len < sc_datalen)
741 */
742
743 /* Set direction (send/recv) */
744 csr = SWREG_READ(ncr_sc, SWREG_CSR);
745 if (dh->dh_flags & SIDH_OUT) {
746 csr |= SW_CSR_SEND;
747 } else {
748 csr &= ~SW_CSR_SEND;
749 }
750 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
751
752 /*
753 * The "sw" needs longword aligned transfers. We
754 * detect a shortword aligned transfer here, and adjust the
755 * DMA transfer by 2 bytes. These two bytes are read/written
756 * in PIO mode just before the DMA is started.
757 */
758 adj = 0;
759 if (dva & 2) {
760 adj = 2;
761 #ifdef DEBUG
762 if (sw_debug & 2)
763 printf("sw_dma_start: adjusted up %d bytes\n", adj);
764 #endif
765 }
766
767 /* We have to frob the address on the "sw". */
768 dh->dh_startingpa = (dva | 0xF00000);
769 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
770 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
771
772 /*
773 * Acknowledge the phase change. (After DMA setup!)
774 * Put the SBIC into DMA mode, and start the transfer.
775 */
776 if (dh->dh_flags & SIDH_OUT) {
777 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
778 if (adj) {
779 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
780 adj, dh->dh_addr);
781 if (adjlen != adj)
782 printf("%s: bad outgoing adj, %d != %d\n",
783 ncr_sc->sc_dev.dv_xname, adjlen, adj);
784 }
785 SCI_CLR_INTR(ncr_sc);
786 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
787 mode = NCR5380_READ(ncr_sc, sci_mode);
788 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
789 NCR5380_WRITE(ncr_sc, sci_mode, mode);
790 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
791 } else {
792 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
793 if (adj) {
794 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
795 adj, dh->dh_addr);
796 if (adjlen != adj)
797 printf("%s: bad incoming adj, %d != %d\n",
798 ncr_sc->sc_dev.dv_xname, adjlen, adj);
799 }
800 SCI_CLR_INTR(ncr_sc);
801 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
802 mode = NCR5380_READ(ncr_sc, sci_mode);
803 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
804 NCR5380_WRITE(ncr_sc, sci_mode, mode);
805 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
806 }
807
808 /* Let'er rip! */
809 csr |= SW_CSR_DMA_EN;
810 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
811
812 ncr_sc->sc_state |= NCR_DOINGDMA;
813
814 #ifdef DEBUG
815 if (sw_debug & 2) {
816 printf("sw_dma_start: started, flags=0x%x\n",
817 ncr_sc->sc_state);
818 }
819 #endif
820 }
821
822
823 void
824 sw_dma_eop(ncr_sc)
825 struct ncr5380_softc *ncr_sc;
826 {
827
828 /* Not needed - DMA was stopped prior to examining sci_csr */
829 }
830
831 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
832 #define COUNT_SW_LEFTOVERS
833 #endif
834 #ifdef COUNT_SW_LEFTOVERS
835 /*
836 * Let's find out how often these occur. Read these with DDB from time
837 * to time.
838 */
839 int sw_3_leftover = 0;
840 int sw_2_leftover = 0;
841 int sw_1_leftover = 0;
842 int sw_0_leftover = 0;
843 #endif
844
845 void
846 sw_dma_stop(ncr_sc)
847 struct ncr5380_softc *ncr_sc;
848 {
849 struct sci_req *sr = ncr_sc->sc_current;
850 struct sw_dma_handle *dh = sr->sr_dma_hand;
851 int ntrans = 0, dva;
852 u_int mode;
853 u_int32_t csr;
854
855 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
856 #ifdef DEBUG
857 printf("sw_dma_stop: dma not running\n");
858 #endif
859 return;
860 }
861 ncr_sc->sc_state &= ~NCR_DOINGDMA;
862
863 /* First, halt the DMA engine. */
864 csr = SWREG_READ(ncr_sc, SWREG_CSR);
865 csr &= ~SW_CSR_DMA_EN;
866 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
867
868 /*
869 * XXX HARDWARE BUG!
870 * Apparently, some early 4/100 SCSI controllers had a hardware
871 * bug that caused the controller to do illegal memory access.
872 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
873 * this, we simply need to clean up after ourselves ... there will
874 * be as many as 3 bytes left over. Since we clean up "left-over"
875 * bytes on every read anyway, we just continue to chug along
876 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
877 * around in hardware later with the "left-over byte" indicator
878 * in the VME controller.)
879 */
880 #if 0
881 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) {
882 #else
883 if (csr & (SW_CSR_DMA_CONFLICT)) {
884 #endif
885 printf("sw: DMA error, csr=0x%x, reset\n", csr);
886 sr->sr_xs->error = XS_DRIVER_STUFFUP;
887 ncr_sc->sc_state |= NCR_ABORTING;
888 sw_reset_adapter(ncr_sc);
889 }
890
891 /* Note that timeout may have set the error flag. */
892 if (ncr_sc->sc_state & NCR_ABORTING)
893 goto out;
894
895 /*
896 * Now try to figure out how much actually transferred
897 *
898 * The "sw" doesn't have a FIFO or a bcr, so we've stored
899 * the starting PA of the transfer in the DMA handle,
900 * and subtract it from the ending PA left in the dma_addr
901 * register.
902 */
903 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
904 ntrans = (dva - dh->dh_startingpa);
905
906 #ifdef DEBUG
907 if (sw_debug & 2) {
908 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
909 }
910 #endif
911
912 if (ntrans > ncr_sc->sc_datalen)
913 panic("sw_dma_stop: excess transfer");
914
915 /* Adjust data pointer */
916 ncr_sc->sc_dataptr += ntrans;
917 ncr_sc->sc_datalen -= ntrans;
918
919 /*
920 * After a read, we may need to clean-up
921 * "Left-over bytes" (yuck!) The "sw" doesn't
922 * have a "left-over" indicator, so we have to so
923 * this no matter what. Ick.
924 */
925 if ((dh->dh_flags & SIDH_OUT) == 0) {
926 char *cp = ncr_sc->sc_dataptr;
927 u_int32_t bpr;
928
929 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
930
931 switch (dva & 3) {
932 case 3:
933 cp[0] = (bpr & 0xff000000) >> 24;
934 cp[1] = (bpr & 0x00ff0000) >> 16;
935 cp[2] = (bpr & 0x0000ff00) >> 8;
936 #ifdef COUNT_SW_LEFTOVERS
937 ++sw_3_leftover;
938 #endif
939 break;
940
941 case 2:
942 cp[0] = (bpr & 0xff000000) >> 24;
943 cp[1] = (bpr & 0x00ff0000) >> 16;
944 #ifdef COUNT_SW_LEFTOVERS
945 ++sw_2_leftover;
946 #endif
947 break;
948
949 case 1:
950 cp[0] = (bpr & 0xff000000) >> 24;
951 #ifdef COUNT_SW_LEFTOVERS
952 ++sw_1_leftover;
953 #endif
954 break;
955
956 #ifdef COUNT_SW_LEFTOVERS
957 default:
958 ++sw_0_leftover;
959 break;
960 #endif
961 }
962 }
963
964 out:
965 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
966 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
967
968 /* Put SBIC back in PIO mode. */
969 mode = NCR5380_READ(ncr_sc, sci_mode);
970 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
971 NCR5380_WRITE(ncr_sc, sci_mode, mode);
972 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
973
974 #ifdef DEBUG
975 if (sw_debug & 2) {
976 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
977 }
978 #endif
979 }
980