sw.c revision 1.14 1 /* $NetBSD: sw.c,v 1.14 2003/12/04 12:42:54 keihan Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains only the machine-dependent parts of the
41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
42 * The machine-independent parts are in ncr5380sbc.c
43 *
44 * Supported hardware includes:
45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47 *
48 * The VME variant has a bit to enable or disable the DMA engine,
49 * but that bit also gates the interrupt line from the NCR5380!
50 * Therefore, in order to get any interrupt from the 5380, (i.e.
51 * for reselect) one must clear the DMA engine transfer count and
52 * then enable DMA. This has the further complication that you
53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54 * we have to turn DMA back off before we even look at the 5380.
55 *
56 * What wonderfully whacky hardware this is!
57 *
58 * David Jones wrote the initial version of this module for NetBSD/sun3,
59 * which included support for the VME adapter only. (no reselection).
60 *
61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62 * both the VME and OBIO code to support disconnect/reselect.
63 * (Required figuring out the hardware "features" noted above.)
64 *
65 * The autoconfiguration boilerplate came from Adam Glass.
66 *
67 * Jason R. Thorpe ported the autoconfiguration and VME portions to
68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
71 * and Chris Torek for bits of insight needed along the way. Thanks to
72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73 * for the sake of testing. Andrew Gillham helped work out the bugs
74 * the 4/100 DMA code.
75 */
76
77 /*
78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
79 * works, but interrupts (and, thus, reselection) don't. I don't know
80 * why, and I don't have a machine to test this on further.
81 *
82 * DMA, DMA completion interrupts, and reselection work fine on my
83 * 4/260 with modern SCSI-II disks attached. I've had reports of
84 * reselection failing on Sun Shoebox-type configurations where
85 * there are multiple non-SCSI devices behind Emulex or Adaptec
86 * bridges. These devices pre-date the SCSI-I spec, and might not
87 * bahve the way the 5380 code expects. For this reason, only
88 * DMA is enabled by default in this driver.
89 *
90 * Jason R. Thorpe <thorpej (at) NetBSD.org>
91 * December 8, 1995
92 */
93
94 #include <sys/cdefs.h>
95 __KERNEL_RCSID(0, "$NetBSD: sw.c,v 1.14 2003/12/04 12:42:54 keihan Exp $");
96
97 #include "opt_ddb.h"
98
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/kernel.h>
103 #include <sys/malloc.h>
104 #include <sys/errno.h>
105 #include <sys/device.h>
106 #include <sys/buf.h>
107
108 #include <machine/bus.h>
109 #include <machine/intr.h>
110 #include <machine/autoconf.h>
111
112 #include <dev/scsipi/scsi_all.h>
113 #include <dev/scsipi/scsipi_all.h>
114 #include <dev/scsipi/scsipi_debug.h>
115 #include <dev/scsipi/scsiconf.h>
116
117 #ifndef DDB
118 #define Debugger()
119 #endif
120
121 #ifndef DEBUG
122 #define DEBUG XXX
123 #endif
124
125 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
126
127 #include <dev/ic/ncr5380reg.h>
128 #include <dev/ic/ncr5380var.h>
129
130 #include <sparc/dev/swreg.h>
131
132 /*
133 * Transfers smaller than this are done using PIO
134 * (on assumption they're not worth DMA overhead)
135 */
136 #define MIN_DMA_LEN 128
137
138 /*
139 * Transfers lager than 65535 bytes need to be split-up.
140 * (Some of the FIFO logic has only 16 bits counters.)
141 * Make the size an integer multiple of the page size
142 * to avoid buf/cluster remap problems. (paranoid?)
143 */
144 #define MAX_DMA_LEN 0xE000
145
146 #ifdef DEBUG
147 int sw_debug = 0;
148 #endif
149
150 /*
151 * This structure is used to keep track of mapped DMA requests.
152 */
153 struct sw_dma_handle {
154 int dh_flags;
155 #define SIDH_BUSY 0x01 /* This DH is in use */
156 #define SIDH_OUT 0x02 /* DMA does data out (write) */
157 u_char *dh_addr; /* KVA of start of buffer */
158 int dh_maplen; /* Original data length */
159 long dh_startingpa; /* PA of buffer; for "sw" */
160 bus_dmamap_t dh_dmamap;
161 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
162 };
163
164 /*
165 * The first structure member has to be the ncr5380_softc
166 * so we can just cast to go back and fourth between them.
167 */
168 struct sw_softc {
169 struct ncr5380_softc ncr_sc;
170 bus_space_tag_t sc_bustag; /* bus tags */
171 bus_dma_tag_t sc_dmatag;
172
173 struct sw_dma_handle *sc_dma;
174 int sc_xlen; /* length of current DMA segment. */
175 int sc_options; /* options for this instance. */
176 };
177
178 /*
179 * Options. By default, DMA is enabled and DMA completion interrupts
180 * and reselect are disabled. You may enable additional features
181 * the `flags' directive in your kernel's configuration file.
182 *
183 * Alternatively, you can patch your kernel with DDB or some other
184 * mechanism. The sc_options member of the softc is OR'd with
185 * the value in sw_options.
186 *
187 * On the "sw", interrupts (and thus) reselection don't work, so they're
188 * disabled by default. DMA is still a little dangerous, too.
189 *
190 * Note, there's a separate sw_options to make life easier.
191 */
192 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
193 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
194 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
195 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
196 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
197 int sw_options = SW_ENABLE_DMA;
198
199 static int sw_match __P((struct device *, struct cfdata *, void *));
200 static void sw_attach __P((struct device *, struct device *, void *));
201 static int sw_intr __P((void *));
202 static void sw_reset_adapter __P((struct ncr5380_softc *));
203 static void sw_minphys __P((struct buf *));
204
205 void sw_dma_alloc __P((struct ncr5380_softc *));
206 void sw_dma_free __P((struct ncr5380_softc *));
207 void sw_dma_poll __P((struct ncr5380_softc *));
208
209 void sw_dma_setup __P((struct ncr5380_softc *));
210 void sw_dma_start __P((struct ncr5380_softc *));
211 void sw_dma_eop __P((struct ncr5380_softc *));
212 void sw_dma_stop __P((struct ncr5380_softc *));
213
214 void sw_intr_on __P((struct ncr5380_softc *));
215 void sw_intr_off __P((struct ncr5380_softc *));
216
217 /* Shorthand bus space access */
218 #define SWREG_READ(sc, index) \
219 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
220 #define SWREG_WRITE(sc, index, v) \
221 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
222
223
224 /* The Sun "SCSI Weird" 4/100 obio controller. */
225 CFATTACH_DECL(sw, sizeof(struct sw_softc),
226 sw_match, sw_attach, NULL, NULL);
227
228 static int
229 sw_match(parent, cf, aux)
230 struct device *parent;
231 struct cfdata *cf;
232 void *aux;
233 {
234 union obio_attach_args *uoba = aux;
235 struct obio4_attach_args *oba;
236
237 /* Nothing but a Sun 4/100 is going to have these devices. */
238 if (cpuinfo.cpu_type != CPUTYP_4_100)
239 return (0);
240
241 if (uoba->uoba_isobio4 == 0)
242 return (0);
243
244 /* Make sure there is something there... */
245 oba = &uoba->uoba_oba4;
246 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
247 1, /* probe size */
248 1, /* offset */
249 0, /* flags */
250 NULL, NULL));
251 }
252
253 static void
254 sw_attach(parent, self, aux)
255 struct device *parent, *self;
256 void *aux;
257 {
258 struct sw_softc *sc = (struct sw_softc *) self;
259 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
260 union obio_attach_args *uoba = aux;
261 struct obio4_attach_args *oba = &uoba->uoba_oba4;
262 bus_space_handle_t bh;
263 char bits[64];
264 int i;
265
266 sc->sc_dmatag = oba->oba_dmatag;
267
268 /* Map the controller registers. */
269 if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
270 SWREG_BANK_SZ,
271 BUS_SPACE_MAP_LINEAR,
272 &bh) != 0) {
273 printf("%s: cannot map registers\n", self->dv_xname);
274 return;
275 }
276
277 ncr_sc->sc_regt = oba->oba_bustag;
278 ncr_sc->sc_regh = bh;
279
280 sc->sc_options = sw_options;
281
282 ncr_sc->sc_dma_setup = sw_dma_setup;
283 ncr_sc->sc_dma_start = sw_dma_start;
284 ncr_sc->sc_dma_eop = sw_dma_stop;
285 ncr_sc->sc_dma_stop = sw_dma_stop;
286 ncr_sc->sc_intr_on = sw_intr_on;
287 ncr_sc->sc_intr_off = sw_intr_off;
288
289 /*
290 * Establish interrupt channel.
291 * Default interrupt priority always is 3. At least, that's
292 * what my board seems to be at. --thorpej
293 */
294 if (oba->oba_pri == -1)
295 oba->oba_pri = 3;
296
297 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO,
298 sw_intr, sc);
299
300 printf(" pri %d\n", oba->oba_pri);
301
302
303 /*
304 * Pull in the options flags. Allow the user to completely
305 * override the default values.
306 */
307 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK) != 0)
308 sc->sc_options =
309 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK);
310
311 /*
312 * Initialize fields used by the MI code
313 */
314
315 /* NCR5380 register bank offsets */
316 ncr_sc->sci_r0 = 0;
317 ncr_sc->sci_r1 = 1;
318 ncr_sc->sci_r2 = 2;
319 ncr_sc->sci_r3 = 3;
320 ncr_sc->sci_r4 = 4;
321 ncr_sc->sci_r5 = 5;
322 ncr_sc->sci_r6 = 6;
323 ncr_sc->sci_r7 = 7;
324
325 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
326
327 /*
328 * MD function pointers used by the MI code.
329 */
330 ncr_sc->sc_pio_out = ncr5380_pio_out;
331 ncr_sc->sc_pio_in = ncr5380_pio_in;
332 ncr_sc->sc_dma_alloc = sw_dma_alloc;
333 ncr_sc->sc_dma_free = sw_dma_free;
334 ncr_sc->sc_dma_poll = sw_dma_poll;
335
336 ncr_sc->sc_flags = 0;
337 if ((sc->sc_options & SW_DO_RESELECT) == 0)
338 ncr_sc->sc_no_disconnect = 0xFF;
339 if ((sc->sc_options & SW_DMA_INTR) == 0)
340 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
341 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
342
343
344 /*
345 * Allocate DMA handles.
346 */
347 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
348 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
349 if (sc->sc_dma == NULL)
350 panic("sw: DMA handle malloc failed");
351
352 for (i = 0; i < SCI_OPENINGS; i++) {
353 sc->sc_dma[i].dh_flags = 0;
354
355 /* Allocate a DMA handle */
356 if (bus_dmamap_create(
357 sc->sc_dmatag, /* tag */
358 MAXPHYS, /* size */
359 1, /* nsegments */
360 MAXPHYS, /* maxsegsz */
361 0, /* boundary */
362 BUS_DMA_NOWAIT,
363 &sc->sc_dma[i].dh_dmamap) != 0) {
364
365 printf("%s: DMA buffer map create error\n",
366 ncr_sc->sc_dev.dv_xname);
367 return;
368 }
369 }
370
371 if (sc->sc_options) {
372 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
373 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
374 bits, sizeof(bits)));
375 }
376
377 ncr_sc->sc_channel.chan_id = 7;
378 ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
379
380 /* Initialize sw board */
381 sw_reset_adapter(ncr_sc);
382
383 /* Attach the ncr5380 chip driver */
384 ncr5380_attach(ncr_sc);
385 }
386
387 static void
388 sw_minphys(struct buf *bp)
389 {
390 if (bp->b_bcount > MAX_DMA_LEN) {
391 #ifdef DEBUG
392 if (sw_debug) {
393 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
394 Debugger();
395 }
396 #endif
397 bp->b_bcount = MAX_DMA_LEN;
398 }
399 minphys(bp);
400 }
401
402 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
403 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
404
405 static int
406 sw_intr(void *arg)
407 {
408 struct sw_softc *sc = arg;
409 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
410 int dma_error, claimed;
411 u_short csr;
412
413 claimed = 0;
414 dma_error = 0;
415
416 /* SBC interrupt? DMA interrupt? */
417 csr = SWREG_READ(ncr_sc, SWREG_CSR);
418
419 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
420
421 if (csr & SW_CSR_DMA_CONFLICT) {
422 dma_error |= SW_CSR_DMA_CONFLICT;
423 printf("sw_intr: DMA conflict\n");
424 }
425 if (csr & SW_CSR_DMA_BUS_ERR) {
426 dma_error |= SW_CSR_DMA_BUS_ERR;
427 printf("sw_intr: DMA bus error\n");
428 }
429 if (dma_error) {
430 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
431 sc->ncr_sc.sc_state |= NCR_ABORTING;
432 /* Make sure we will call the main isr. */
433 csr |= SW_CSR_DMA_IP;
434 }
435
436 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
437 claimed = ncr5380_intr(&sc->ncr_sc);
438 #ifdef DEBUG
439 if (!claimed) {
440 printf("sw_intr: spurious from SBC\n");
441 if (sw_debug & 4) {
442 Debugger(); /* XXX */
443 }
444 }
445 #endif
446 }
447
448 return (claimed);
449 }
450
451
452 static void
453 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
454 {
455
456 #ifdef DEBUG
457 if (sw_debug) {
458 printf("sw_reset_adapter\n");
459 }
460 #endif
461
462 /*
463 * The reset bits in the CSR are active low.
464 */
465 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
466 delay(10);
467 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
468
469 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
470 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
471 delay(10);
472 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
473
474 SCI_CLR_INTR(ncr_sc);
475 }
476
477
478 /*****************************************************************
479 * Common functions for DMA
480 ****************************************************************/
481
482 /*
483 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
484 * for DMA transfer. On the Sun4, this means mapping the buffer
485 * into DVMA space.
486 */
487 void
488 sw_dma_alloc(ncr_sc)
489 struct ncr5380_softc *ncr_sc;
490 {
491 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
492 struct sci_req *sr = ncr_sc->sc_current;
493 struct scsipi_xfer *xs = sr->sr_xs;
494 struct sw_dma_handle *dh;
495 int i, xlen;
496 u_long addr;
497
498 #ifdef DIAGNOSTIC
499 if (sr->sr_dma_hand != NULL)
500 panic("sw_dma_alloc: already have DMA handle");
501 #endif
502
503 #if 1 /* XXX - Temporary */
504 /* XXX - In case we think DMA is completely broken... */
505 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
506 return;
507 #endif
508
509 addr = (u_long) ncr_sc->sc_dataptr;
510 xlen = ncr_sc->sc_datalen;
511
512 /* If the DMA start addr is misaligned then do PIO */
513 if ((addr & 1) || (xlen & 1)) {
514 printf("sw_dma_alloc: misaligned.\n");
515 return;
516 }
517
518 /* Make sure our caller checked sc_min_dma_len. */
519 if (xlen < MIN_DMA_LEN)
520 panic("sw_dma_alloc: xlen=0x%x", xlen);
521
522 /* Find free DMA handle. Guaranteed to find one since we have
523 as many DMA handles as the driver has processes. */
524 for (i = 0; i < SCI_OPENINGS; i++) {
525 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
526 goto found;
527 }
528 panic("sw: no free DMA handles.");
529
530 found:
531 dh = &sc->sc_dma[i];
532 dh->dh_flags = SIDH_BUSY;
533 dh->dh_addr = (u_char *)addr;
534 dh->dh_maplen = xlen;
535
536 /* Copy the "write" flag for convenience. */
537 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
538 dh->dh_flags |= SIDH_OUT;
539
540 /*
541 * Double-map the buffer into DVMA space. If we can't re-map
542 * the buffer, we print a warning and fall back to PIO mode.
543 *
544 * NOTE: it is not safe to sleep here!
545 */
546 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
547 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
548 /* Can't remap segment */
549 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
550 addr, dh->dh_maplen);
551 dh->dh_flags = 0;
552 return;
553 }
554 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
555 (dh->dh_flags & SIDH_OUT)
556 ? BUS_DMASYNC_PREWRITE
557 : BUS_DMASYNC_PREREAD);
558
559 /* success */
560 sr->sr_dma_hand = dh;
561
562 return;
563 }
564
565
566 void
567 sw_dma_free(ncr_sc)
568 struct ncr5380_softc *ncr_sc;
569 {
570 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
571 struct sci_req *sr = ncr_sc->sc_current;
572 struct sw_dma_handle *dh = sr->sr_dma_hand;
573
574 #ifdef DIAGNOSTIC
575 if (dh == NULL)
576 panic("sw_dma_free: no DMA handle");
577 #endif
578
579 if (ncr_sc->sc_state & NCR_DOINGDMA)
580 panic("sw_dma_free: free while in progress");
581
582 if (dh->dh_flags & SIDH_BUSY) {
583 /* Give back the DVMA space. */
584 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
585 dh->dh_dvma, dh->dh_maplen,
586 (dh->dh_flags & SIDH_OUT)
587 ? BUS_DMASYNC_POSTWRITE
588 : BUS_DMASYNC_POSTREAD);
589 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
590 dh->dh_flags = 0;
591 }
592 sr->sr_dma_hand = NULL;
593 }
594
595
596 /*
597 * Poll (spin-wait) for DMA completion.
598 * Called right after xx_dma_start(), and
599 * xx_dma_stop() will be called next.
600 * Same for either VME or OBIO.
601 */
602 void
603 sw_dma_poll(ncr_sc)
604 struct ncr5380_softc *ncr_sc;
605 {
606 struct sci_req *sr = ncr_sc->sc_current;
607 int tmo, csr_mask, csr;
608
609 /* Make sure DMA started successfully. */
610 if (ncr_sc->sc_state & NCR_ABORTING)
611 return;
612
613 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
614 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
615
616 tmo = 50000; /* X100 = 5 sec. */
617 for (;;) {
618 csr = SWREG_READ(ncr_sc, SWREG_CSR);
619 if (csr & csr_mask)
620 break;
621 if (--tmo <= 0) {
622 printf("%s: DMA timeout (while polling)\n",
623 ncr_sc->sc_dev.dv_xname);
624 /* Indicate timeout as MI code would. */
625 sr->sr_flags |= SR_OVERDUE;
626 break;
627 }
628 delay(100);
629 }
630
631 #ifdef DEBUG
632 if (sw_debug) {
633 printf("sw_dma_poll: done, csr=0x%x\n", csr);
634 }
635 #endif
636 }
637
638
639 /*
640 * This is called when the bus is going idle,
641 * so we want to enable the SBC interrupts.
642 * That is controlled by the DMA enable!
643 * Who would have guessed!
644 * What a NASTY trick!
645 *
646 * XXX THIS MIGHT NOT WORK RIGHT!
647 */
648 void
649 sw_intr_on(ncr_sc)
650 struct ncr5380_softc *ncr_sc;
651 {
652 u_int32_t csr;
653
654 sw_dma_setup(ncr_sc);
655 csr = SWREG_READ(ncr_sc, SWREG_CSR);
656 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
657 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
658 }
659
660 /*
661 * This is called when the bus is idle and we are
662 * about to start playing with the SBC chip.
663 *
664 * XXX THIS MIGHT NOT WORK RIGHT!
665 */
666 void
667 sw_intr_off(ncr_sc)
668 struct ncr5380_softc *ncr_sc;
669 {
670 u_int32_t csr;
671
672 csr = SWREG_READ(ncr_sc, SWREG_CSR);
673 csr &= ~SW_CSR_DMA_EN;
674 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
675 }
676
677
678 /*
679 * This function is called during the COMMAND or MSG_IN phase
680 * that precedes a DATA_IN or DATA_OUT phase, in case we need
681 * to setup the DMA engine before the bus enters a DATA phase.
682 *
683 * On the OBIO version we just clear the DMA count and address
684 * here (to make sure it stays idle) and do the real setup
685 * later, in dma_start.
686 */
687 void
688 sw_dma_setup(ncr_sc)
689 struct ncr5380_softc *ncr_sc;
690 {
691 u_int32_t csr;
692
693 /* No FIFO to reset on "sw". */
694
695 /* Set direction (assume recv here) */
696 csr = SWREG_READ(ncr_sc, SWREG_CSR);
697 csr &= ~SW_CSR_SEND;
698 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
699
700 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
701 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
702 }
703
704
705 void
706 sw_dma_start(ncr_sc)
707 struct ncr5380_softc *ncr_sc;
708 {
709 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
710 struct sci_req *sr = ncr_sc->sc_current;
711 struct sw_dma_handle *dh = sr->sr_dma_hand;
712 u_long dva;
713 int xlen, adj, adjlen;
714 u_int mode;
715 u_int32_t csr;
716
717 /*
718 * Get the DVMA mapping for this segment.
719 */
720 dva = (u_long)(dh->dh_dvma);
721 if (dva & 1)
722 panic("sw_dma_start: bad dva=0x%lx", dva);
723
724 xlen = ncr_sc->sc_datalen;
725 xlen &= ~1;
726 sc->sc_xlen = xlen; /* XXX: or less... */
727
728 #ifdef DEBUG
729 if (sw_debug & 2) {
730 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n",
731 dh, dva, xlen);
732 }
733 #endif
734
735 /*
736 * Set up the DMA controller.
737 * Note that (dh->dh_len < sc_datalen)
738 */
739
740 /* Set direction (send/recv) */
741 csr = SWREG_READ(ncr_sc, SWREG_CSR);
742 if (dh->dh_flags & SIDH_OUT) {
743 csr |= SW_CSR_SEND;
744 } else {
745 csr &= ~SW_CSR_SEND;
746 }
747 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
748
749 /*
750 * The "sw" needs longword aligned transfers. We
751 * detect a shortword aligned transfer here, and adjust the
752 * DMA transfer by 2 bytes. These two bytes are read/written
753 * in PIO mode just before the DMA is started.
754 */
755 adj = 0;
756 if (dva & 2) {
757 adj = 2;
758 #ifdef DEBUG
759 if (sw_debug & 2)
760 printf("sw_dma_start: adjusted up %d bytes\n", adj);
761 #endif
762 }
763
764 /* We have to frob the address on the "sw". */
765 dh->dh_startingpa = (dva | 0xF00000);
766 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
767 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
768
769 /*
770 * Acknowledge the phase change. (After DMA setup!)
771 * Put the SBIC into DMA mode, and start the transfer.
772 */
773 if (dh->dh_flags & SIDH_OUT) {
774 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
775 if (adj) {
776 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
777 adj, dh->dh_addr);
778 if (adjlen != adj)
779 printf("%s: bad outgoing adj, %d != %d\n",
780 ncr_sc->sc_dev.dv_xname, adjlen, adj);
781 }
782 SCI_CLR_INTR(ncr_sc);
783 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
784 mode = NCR5380_READ(ncr_sc, sci_mode);
785 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
786 NCR5380_WRITE(ncr_sc, sci_mode, mode);
787 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
788 } else {
789 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
790 if (adj) {
791 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
792 adj, dh->dh_addr);
793 if (adjlen != adj)
794 printf("%s: bad incoming adj, %d != %d\n",
795 ncr_sc->sc_dev.dv_xname, adjlen, adj);
796 }
797 SCI_CLR_INTR(ncr_sc);
798 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
799 mode = NCR5380_READ(ncr_sc, sci_mode);
800 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
801 NCR5380_WRITE(ncr_sc, sci_mode, mode);
802 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
803 }
804
805 /* Let'er rip! */
806 csr |= SW_CSR_DMA_EN;
807 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
808
809 ncr_sc->sc_state |= NCR_DOINGDMA;
810
811 #ifdef DEBUG
812 if (sw_debug & 2) {
813 printf("sw_dma_start: started, flags=0x%x\n",
814 ncr_sc->sc_state);
815 }
816 #endif
817 }
818
819
820 void
821 sw_dma_eop(ncr_sc)
822 struct ncr5380_softc *ncr_sc;
823 {
824
825 /* Not needed - DMA was stopped prior to examining sci_csr */
826 }
827
828 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
829 #define COUNT_SW_LEFTOVERS
830 #endif
831 #ifdef COUNT_SW_LEFTOVERS
832 /*
833 * Let's find out how often these occur. Read these with DDB from time
834 * to time.
835 */
836 int sw_3_leftover = 0;
837 int sw_2_leftover = 0;
838 int sw_1_leftover = 0;
839 int sw_0_leftover = 0;
840 #endif
841
842 void
843 sw_dma_stop(ncr_sc)
844 struct ncr5380_softc *ncr_sc;
845 {
846 struct sci_req *sr = ncr_sc->sc_current;
847 struct sw_dma_handle *dh = sr->sr_dma_hand;
848 int ntrans = 0, dva;
849 u_int mode;
850 u_int32_t csr;
851
852 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
853 #ifdef DEBUG
854 printf("sw_dma_stop: DMA not running\n");
855 #endif
856 return;
857 }
858 ncr_sc->sc_state &= ~NCR_DOINGDMA;
859
860 /* First, halt the DMA engine. */
861 csr = SWREG_READ(ncr_sc, SWREG_CSR);
862 csr &= ~SW_CSR_DMA_EN;
863 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
864
865 /*
866 * XXX HARDWARE BUG!
867 * Apparently, some early 4/100 SCSI controllers had a hardware
868 * bug that caused the controller to do illegal memory access.
869 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
870 * this, we simply need to clean up after ourselves ... there will
871 * be as many as 3 bytes left over. Since we clean up "left-over"
872 * bytes on every read anyway, we just continue to chug along
873 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
874 * around in hardware later with the "left-over byte" indicator
875 * in the VME controller.)
876 */
877 #if 0
878 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) {
879 #else
880 if (csr & (SW_CSR_DMA_CONFLICT)) {
881 #endif
882 printf("sw: DMA error, csr=0x%x, reset\n", csr);
883 sr->sr_xs->error = XS_DRIVER_STUFFUP;
884 ncr_sc->sc_state |= NCR_ABORTING;
885 sw_reset_adapter(ncr_sc);
886 }
887
888 /* Note that timeout may have set the error flag. */
889 if (ncr_sc->sc_state & NCR_ABORTING)
890 goto out;
891
892 /*
893 * Now try to figure out how much actually transferred
894 *
895 * The "sw" doesn't have a FIFO or a bcr, so we've stored
896 * the starting PA of the transfer in the DMA handle,
897 * and subtract it from the ending PA left in the dma_addr
898 * register.
899 */
900 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
901 ntrans = (dva - dh->dh_startingpa);
902
903 #ifdef DEBUG
904 if (sw_debug & 2) {
905 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
906 }
907 #endif
908
909 if (ntrans > ncr_sc->sc_datalen)
910 panic("sw_dma_stop: excess transfer");
911
912 /* Adjust data pointer */
913 ncr_sc->sc_dataptr += ntrans;
914 ncr_sc->sc_datalen -= ntrans;
915
916 /*
917 * After a read, we may need to clean-up
918 * "Left-over bytes" (yuck!) The "sw" doesn't
919 * have a "left-over" indicator, so we have to so
920 * this no matter what. Ick.
921 */
922 if ((dh->dh_flags & SIDH_OUT) == 0) {
923 char *cp = ncr_sc->sc_dataptr;
924 u_int32_t bpr;
925
926 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
927
928 switch (dva & 3) {
929 case 3:
930 cp[0] = (bpr & 0xff000000) >> 24;
931 cp[1] = (bpr & 0x00ff0000) >> 16;
932 cp[2] = (bpr & 0x0000ff00) >> 8;
933 #ifdef COUNT_SW_LEFTOVERS
934 ++sw_3_leftover;
935 #endif
936 break;
937
938 case 2:
939 cp[0] = (bpr & 0xff000000) >> 24;
940 cp[1] = (bpr & 0x00ff0000) >> 16;
941 #ifdef COUNT_SW_LEFTOVERS
942 ++sw_2_leftover;
943 #endif
944 break;
945
946 case 1:
947 cp[0] = (bpr & 0xff000000) >> 24;
948 #ifdef COUNT_SW_LEFTOVERS
949 ++sw_1_leftover;
950 #endif
951 break;
952
953 #ifdef COUNT_SW_LEFTOVERS
954 default:
955 ++sw_0_leftover;
956 break;
957 #endif
958 }
959 }
960
961 out:
962 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
963 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
964
965 /* Put SBIC back in PIO mode. */
966 mode = NCR5380_READ(ncr_sc, sci_mode);
967 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
968 NCR5380_WRITE(ncr_sc, sci_mode, mode);
969 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
970
971 #ifdef DEBUG
972 if (sw_debug & 2) {
973 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
974 }
975 #endif
976 }
977