sw.c revision 1.1 1 /* $NetBSD: sw.c,v 1.1 2000/06/26 19:54:09 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * This file contains only the machine-dependent parts of the
41 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
42 * The machine-independent parts are in ncr5380sbc.c
43 *
44 * Supported hardware includes:
45 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
46 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
47 *
48 * The VME variant has a bit to enable or disable the DMA engine,
49 * but that bit also gates the interrupt line from the NCR5380!
50 * Therefore, in order to get any interrupt from the 5380, (i.e.
51 * for reselect) one must clear the DMA engine transfer count and
52 * then enable DMA. This has the further complication that you
53 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
54 * we have to turn DMA back off before we even look at the 5380.
55 *
56 * What wonderfully whacky hardware this is!
57 *
58 * David Jones wrote the initial version of this module for NetBSD/sun3,
59 * which included support for the VME adapter only. (no reselection).
60 *
61 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
62 * both the VME and OBIO code to support disconnect/reselect.
63 * (Required figuring out the hardware "features" noted above.)
64 *
65 * The autoconfiguration boilerplate came from Adam Glass.
66 *
67 * Jason R. Thorpe ported the autoconfiguration and VME portions to
68 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
69 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
70 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
71 * and Chris Torek for bits of insight needed along the way. Thanks to
72 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
73 * for the sake of testing. Andrew Gillham helped work out the bugs
74 * the 4/100 DMA code.
75 */
76
77 /*
78 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
79 * works, but interrupts (and, thus, reselection) don't. I don't know
80 * why, and I don't have a machine to test this on further.
81 *
82 * DMA, DMA completion interrupts, and reselection work fine on my
83 * 4/260 with modern SCSI-II disks attached. I've had reports of
84 * reselection failing on Sun Shoebox-type configurations where
85 * there are multiple non-SCSI devices behind Emulex or Adaptec
86 * bridges. These devices pre-date the SCSI-I spec, and might not
87 * bahve the way the 5380 code expects. For this reason, only
88 * DMA is enabled by default in this driver.
89 *
90 * Jason R. Thorpe <thorpej (at) NetBSD.ORG>
91 * December 8, 1995
92 */
93
94 #include "opt_ddb.h"
95
96 #include <sys/types.h>
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/kernel.h>
100 #include <sys/malloc.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/buf.h>
104
105 #include <machine/bus.h>
106 #include <machine/autoconf.h>
107 #include <machine/cpu.h>
108
109 #include <dev/scsipi/scsi_all.h>
110 #include <dev/scsipi/scsipi_all.h>
111 #include <dev/scsipi/scsipi_debug.h>
112 #include <dev/scsipi/scsiconf.h>
113
114 #ifndef DDB
115 #define Debugger()
116 #endif
117
118 #ifndef DEBUG
119 #define DEBUG XXX
120 #endif
121
122 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
123
124 #include <dev/ic/ncr5380reg.h>
125 #include <dev/ic/ncr5380var.h>
126
127 #include <sparc/dev/swreg.h>
128
129 /*
130 * Transfers smaller than this are done using PIO
131 * (on assumption they're not worth DMA overhead)
132 */
133 #define MIN_DMA_LEN 128
134
135 /*
136 * Transfers lager than 65535 bytes need to be split-up.
137 * (Some of the FIFO logic has only 16 bits counters.)
138 * Make the size an integer multiple of the page size
139 * to avoid buf/cluster remap problems. (paranoid?)
140 */
141 #define MAX_DMA_LEN 0xE000
142
143 #ifdef DEBUG
144 int sw_debug = 0;
145 static int sw_link_flags = 0 /* | SDEV_DB2 */ ;
146 #endif
147
148 /*
149 * This structure is used to keep track of mapped DMA requests.
150 */
151 struct sw_dma_handle {
152 int dh_flags;
153 #define SIDH_BUSY 0x01 /* This DH is in use */
154 #define SIDH_OUT 0x02 /* DMA does data out (write) */
155 u_char *dh_addr; /* KVA of start of buffer */
156 int dh_maplen; /* Original data length */
157 long dh_startingpa; /* PA of buffer; for "sw" */
158 bus_dmamap_t dh_dmamap;
159 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
160 };
161
162 /*
163 * The first structure member has to be the ncr5380_softc
164 * so we can just cast to go back and fourth between them.
165 */
166 struct sw_softc {
167 struct ncr5380_softc ncr_sc;
168 bus_space_tag_t sc_bustag; /* bus tags */
169 bus_dma_tag_t sc_dmatag;
170
171 struct sw_dma_handle *sc_dma;
172 int sc_xlen; /* length of current DMA segment. */
173 int sc_options; /* options for this instance. */
174 };
175
176 /*
177 * Options. By default, DMA is enabled and DMA completion interrupts
178 * and reselect are disabled. You may enable additional features
179 * the `flags' directive in your kernel's configuration file.
180 *
181 * Alternatively, you can patch your kernel with DDB or some other
182 * mechanism. The sc_options member of the softc is OR'd with
183 * the value in sw_options.
184 *
185 * On the "sw", interrupts (and thus) reselection don't work, so they're
186 * disabled by default. DMA is still a little dangerous, too.
187 *
188 * Note, there's a separate sw_options to make life easier.
189 */
190 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
191 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
192 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
193 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
194 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
195 int sw_options = SW_ENABLE_DMA;
196
197 static int sw_match __P((struct device *, struct cfdata *, void *));
198 static void sw_attach __P((struct device *, struct device *, void *));
199 static int sw_intr __P((void *));
200 static void sw_reset_adapter __P((struct ncr5380_softc *));
201 static void sw_minphys __P((struct buf *));
202
203 void sw_dma_alloc __P((struct ncr5380_softc *));
204 void sw_dma_free __P((struct ncr5380_softc *));
205 void sw_dma_poll __P((struct ncr5380_softc *));
206
207 void sw_dma_setup __P((struct ncr5380_softc *));
208 void sw_dma_start __P((struct ncr5380_softc *));
209 void sw_dma_eop __P((struct ncr5380_softc *));
210 void sw_dma_stop __P((struct ncr5380_softc *));
211
212 void sw_intr_on __P((struct ncr5380_softc *));
213 void sw_intr_off __P((struct ncr5380_softc *));
214
215 /* Shorthand bus space access */
216 #define SWREG_READ(sc, index) \
217 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
218 #define SWREG_WRITE(sc, index, v) \
219 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
220
221
222 /* The Sun "SCSI Weird" 4/100 obio controller. */
223 struct cfattach sw_ca = {
224 sizeof(struct sw_softc), sw_match, sw_attach
225 };
226
227 static int
228 sw_match(parent, cf, aux)
229 struct device *parent;
230 struct cfdata *cf;
231 void *aux;
232 {
233 union obio_attach_args *uoba = aux;
234 struct obio4_attach_args *oba;
235
236 /* Nothing but a Sun 4/100 is going to have these devices. */
237 if (cpuinfo.cpu_type != CPUTYP_4_100)
238 return (0);
239
240 if (uoba->uoba_isobio4 == 0)
241 return (0);
242
243 /* Make sure there is something there... */
244 oba = &uoba->uoba_oba4;
245 return (bus_space_probe(oba->oba_bustag, 0, oba->oba_paddr,
246 1, /* probe size */
247 1, /* offset */
248 0, /* flags */
249 NULL, NULL));
250 }
251
252 static void
253 sw_attach(parent, self, aux)
254 struct device *parent, *self;
255 void *aux;
256 {
257 struct sw_softc *sc = (struct sw_softc *) self;
258 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
259 union obio_attach_args *uoba = aux;
260 struct obio4_attach_args *oba = &uoba->uoba_oba4;
261 bus_space_handle_t bh;
262 char bits[64];
263 int i;
264
265 sc->sc_dmatag = oba->oba_dmatag;
266
267 /* Map the controller registers. */
268 if (obio_bus_map(oba->oba_bustag, oba->oba_paddr,
269 0,
270 SWREG_BANK_SZ,
271 BUS_SPACE_MAP_LINEAR,
272 0, &bh) != 0) {
273 printf("%s: cannot map registers\n", self->dv_xname);
274 return;
275 }
276
277 ncr_sc->sc_regt = oba->oba_bustag;
278 ncr_sc->sc_regh = bh;
279
280 sc->sc_options = sw_options;
281
282 ncr_sc->sc_dma_setup = sw_dma_setup;
283 ncr_sc->sc_dma_start = sw_dma_start;
284 ncr_sc->sc_dma_eop = sw_dma_stop;
285 ncr_sc->sc_dma_stop = sw_dma_stop;
286 ncr_sc->sc_intr_on = sw_intr_on;
287 ncr_sc->sc_intr_off = sw_intr_off;
288
289 /*
290 * Establish interrupt channel.
291 * Default interrupt priority always is 3. At least, that's
292 * what my board seems to be at. --thorpej
293 */
294 if (oba->oba_pri == -1)
295 oba->oba_pri = 3;
296
297 (void)bus_intr_establish(oba->oba_bustag,
298 oba->oba_pri, 0,
299 sw_intr, sc);
300
301 printf(" pri %d\n", oba->oba_pri);
302
303
304 /*
305 * Pull in the options flags. Allow the user to completely
306 * override the default values.
307 */
308 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK) != 0)
309 sc->sc_options =
310 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SW_OPTIONS_MASK);
311
312 /*
313 * Initialize fields used by the MI code
314 */
315
316 /* NCR5380 register bank offsets */
317 ncr_sc->sci_r0 = 0;
318 ncr_sc->sci_r1 = 1;
319 ncr_sc->sci_r2 = 2;
320 ncr_sc->sci_r3 = 3;
321 ncr_sc->sci_r4 = 4;
322 ncr_sc->sci_r5 = 5;
323 ncr_sc->sci_r6 = 6;
324 ncr_sc->sci_r7 = 7;
325
326 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
327
328 /*
329 * MD function pointers used by the MI code.
330 */
331 ncr_sc->sc_pio_out = ncr5380_pio_out;
332 ncr_sc->sc_pio_in = ncr5380_pio_in;
333 ncr_sc->sc_dma_alloc = sw_dma_alloc;
334 ncr_sc->sc_dma_free = sw_dma_free;
335 ncr_sc->sc_dma_poll = sw_dma_poll;
336
337 ncr_sc->sc_flags = 0;
338 if ((sc->sc_options & SW_DO_RESELECT) == 0)
339 ncr_sc->sc_no_disconnect = 0xFF;
340 if ((sc->sc_options & SW_DMA_INTR) == 0)
341 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
342 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
343
344
345 /*
346 * Allocate DMA handles.
347 */
348 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
349 sc->sc_dma = (struct sw_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
350 if (sc->sc_dma == NULL)
351 panic("sw: dma handle malloc failed\n");
352
353 for (i = 0; i < SCI_OPENINGS; i++) {
354 sc->sc_dma[i].dh_flags = 0;
355
356 /* Allocate a DMA handle */
357 if (bus_dmamap_create(
358 sc->sc_dmatag, /* tag */
359 MAXPHYS, /* size */
360 1, /* nsegments */
361 MAXPHYS, /* maxsegsz */
362 0, /* boundary */
363 BUS_DMA_NOWAIT,
364 &sc->sc_dma[i].dh_dmamap) != 0) {
365
366 printf("%s: DMA buffer map create error\n",
367 ncr_sc->sc_dev.dv_xname);
368 return;
369 }
370 }
371
372 if (sc->sc_options) {
373 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
374 bitmask_snprintf(sc->sc_options, SW_OPTIONS_BITS,
375 bits, sizeof(bits)));
376 }
377 #ifdef DEBUG
378 ncr_sc->sc_link.flags |= sw_link_flags;
379 #endif
380
381 ncr_sc->sc_link.scsipi_scsi.adapter_target = 7;
382 ncr_sc->sc_adapter.scsipi_minphys = sw_minphys;
383
384 /* Initialize sw board */
385 sw_reset_adapter(ncr_sc);
386
387 /* Attach the ncr5380 chip driver */
388 ncr5380_attach(ncr_sc);
389 }
390
391 static void
392 sw_minphys(struct buf *bp)
393 {
394 if (bp->b_bcount > MAX_DMA_LEN) {
395 #ifdef DEBUG
396 if (sw_debug) {
397 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
398 Debugger();
399 }
400 #endif
401 bp->b_bcount = MAX_DMA_LEN;
402 }
403 return (minphys(bp));
404 }
405
406 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
407 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
408
409 static int
410 sw_intr(void *arg)
411 {
412 struct sw_softc *sc = arg;
413 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
414 int dma_error, claimed;
415 u_short csr;
416
417 claimed = 0;
418 dma_error = 0;
419
420 /* SBC interrupt? DMA interrupt? */
421 csr = SWREG_READ(ncr_sc, SWREG_CSR);
422
423 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
424
425 if (csr & SW_CSR_DMA_CONFLICT) {
426 dma_error |= SW_CSR_DMA_CONFLICT;
427 printf("sw_intr: DMA conflict\n");
428 }
429 if (csr & SW_CSR_DMA_BUS_ERR) {
430 dma_error |= SW_CSR_DMA_BUS_ERR;
431 printf("sw_intr: DMA bus error\n");
432 }
433 if (dma_error) {
434 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
435 sc->ncr_sc.sc_state |= NCR_ABORTING;
436 /* Make sure we will call the main isr. */
437 csr |= SW_CSR_DMA_IP;
438 }
439
440 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
441 claimed = ncr5380_intr(&sc->ncr_sc);
442 #ifdef DEBUG
443 if (!claimed) {
444 printf("sw_intr: spurious from SBC\n");
445 if (sw_debug & 4) {
446 Debugger(); /* XXX */
447 }
448 }
449 #endif
450 }
451
452 return (claimed);
453 }
454
455
456 static void
457 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
458 {
459
460 #ifdef DEBUG
461 if (sw_debug) {
462 printf("sw_reset_adapter\n");
463 }
464 #endif
465
466 /*
467 * The reset bits in the CSR are active low.
468 */
469 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
470 delay(10);
471 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
472
473 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
474 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
475 delay(10);
476 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
477
478 SCI_CLR_INTR(ncr_sc);
479 }
480
481
482 /*****************************************************************
483 * Common functions for DMA
484 ****************************************************************/
485
486 /*
487 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
488 * for DMA transfer. On the Sun4, this means mapping the buffer
489 * into DVMA space.
490 */
491 void
492 sw_dma_alloc(ncr_sc)
493 struct ncr5380_softc *ncr_sc;
494 {
495 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
496 struct sci_req *sr = ncr_sc->sc_current;
497 struct scsipi_xfer *xs = sr->sr_xs;
498 struct sw_dma_handle *dh;
499 int i, xlen;
500 u_long addr;
501
502 #ifdef DIAGNOSTIC
503 if (sr->sr_dma_hand != NULL)
504 panic("sw_dma_alloc: already have DMA handle");
505 #endif
506
507 #if 1 /* XXX - Temporary */
508 /* XXX - In case we think DMA is completely broken... */
509 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
510 return;
511 #endif
512
513 addr = (u_long) ncr_sc->sc_dataptr;
514 xlen = ncr_sc->sc_datalen;
515
516 /* If the DMA start addr is misaligned then do PIO */
517 if ((addr & 1) || (xlen & 1)) {
518 printf("sw_dma_alloc: misaligned.\n");
519 return;
520 }
521
522 /* Make sure our caller checked sc_min_dma_len. */
523 if (xlen < MIN_DMA_LEN)
524 panic("sw_dma_alloc: xlen=0x%x\n", xlen);
525
526 /* Find free DMA handle. Guaranteed to find one since we have
527 as many DMA handles as the driver has processes. */
528 for (i = 0; i < SCI_OPENINGS; i++) {
529 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
530 goto found;
531 }
532 panic("sw: no free DMA handles.");
533
534 found:
535 dh = &sc->sc_dma[i];
536 dh->dh_flags = SIDH_BUSY;
537 dh->dh_addr = (u_char *)addr;
538 dh->dh_maplen = xlen;
539
540 /* Copy the "write" flag for convenience. */
541 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
542 dh->dh_flags |= SIDH_OUT;
543
544 /*
545 * Double-map the buffer into DVMA space. If we can't re-map
546 * the buffer, we print a warning and fall back to PIO mode.
547 *
548 * NOTE: it is not safe to sleep here!
549 */
550 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
551 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
552 /* Can't remap segment */
553 printf("sw_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
554 addr, dh->dh_maplen);
555 dh->dh_flags = 0;
556 return;
557 }
558 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
559 (dh->dh_flags & SIDH_OUT)
560 ? BUS_DMASYNC_PREWRITE
561 : BUS_DMASYNC_PREREAD);
562
563 /* success */
564 sr->sr_dma_hand = dh;
565
566 return;
567 }
568
569
570 void
571 sw_dma_free(ncr_sc)
572 struct ncr5380_softc *ncr_sc;
573 {
574 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
575 struct sci_req *sr = ncr_sc->sc_current;
576 struct sw_dma_handle *dh = sr->sr_dma_hand;
577
578 #ifdef DIAGNOSTIC
579 if (dh == NULL)
580 panic("sw_dma_free: no DMA handle");
581 #endif
582
583 if (ncr_sc->sc_state & NCR_DOINGDMA)
584 panic("sw_dma_free: free while in progress");
585
586 if (dh->dh_flags & SIDH_BUSY) {
587 /* Give back the DVMA space. */
588 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
589 dh->dh_dvma, dh->dh_maplen,
590 (dh->dh_flags & SIDH_OUT)
591 ? BUS_DMASYNC_POSTWRITE
592 : BUS_DMASYNC_POSTREAD);
593 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
594 dh->dh_flags = 0;
595 }
596 sr->sr_dma_hand = NULL;
597 }
598
599
600 /*
601 * Poll (spin-wait) for DMA completion.
602 * Called right after xx_dma_start(), and
603 * xx_dma_stop() will be called next.
604 * Same for either VME or OBIO.
605 */
606 void
607 sw_dma_poll(ncr_sc)
608 struct ncr5380_softc *ncr_sc;
609 {
610 struct sci_req *sr = ncr_sc->sc_current;
611 int tmo, csr_mask, csr;
612
613 /* Make sure DMA started successfully. */
614 if (ncr_sc->sc_state & NCR_ABORTING)
615 return;
616
617 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
618 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
619
620 tmo = 50000; /* X100 = 5 sec. */
621 for (;;) {
622 csr = SWREG_READ(ncr_sc, SWREG_CSR);
623 if (csr & csr_mask)
624 break;
625 if (--tmo <= 0) {
626 printf("%s: DMA timeout (while polling)\n",
627 ncr_sc->sc_dev.dv_xname);
628 /* Indicate timeout as MI code would. */
629 sr->sr_flags |= SR_OVERDUE;
630 break;
631 }
632 delay(100);
633 }
634
635 #ifdef DEBUG
636 if (sw_debug) {
637 printf("sw_dma_poll: done, csr=0x%x\n", csr);
638 }
639 #endif
640 }
641
642
643 /*
644 * This is called when the bus is going idle,
645 * so we want to enable the SBC interrupts.
646 * That is controlled by the DMA enable!
647 * Who would have guessed!
648 * What a NASTY trick!
649 *
650 * XXX THIS MIGHT NOT WORK RIGHT!
651 */
652 void
653 sw_intr_on(ncr_sc)
654 struct ncr5380_softc *ncr_sc;
655 {
656 u_int32_t csr;
657
658 sw_dma_setup(ncr_sc);
659 csr = SWREG_READ(ncr_sc, SWREG_CSR);
660 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
661 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
662 }
663
664 /*
665 * This is called when the bus is idle and we are
666 * about to start playing with the SBC chip.
667 *
668 * XXX THIS MIGHT NOT WORK RIGHT!
669 */
670 void
671 sw_intr_off(ncr_sc)
672 struct ncr5380_softc *ncr_sc;
673 {
674 u_int32_t csr;
675
676 csr = SWREG_READ(ncr_sc, SWREG_CSR);
677 csr &= ~SW_CSR_DMA_EN;
678 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
679 }
680
681
682 /*
683 * This function is called during the COMMAND or MSG_IN phase
684 * that preceeds a DATA_IN or DATA_OUT phase, in case we need
685 * to setup the DMA engine before the bus enters a DATA phase.
686 *
687 * On the OBIO version we just clear the DMA count and address
688 * here (to make sure it stays idle) and do the real setup
689 * later, in dma_start.
690 */
691 void
692 sw_dma_setup(ncr_sc)
693 struct ncr5380_softc *ncr_sc;
694 {
695 u_int32_t csr;
696
697 /* No FIFO to reset on "sw". */
698
699 /* Set direction (assume recv here) */
700 csr = SWREG_READ(ncr_sc, SWREG_CSR);
701 csr &= ~SW_CSR_SEND;
702 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
703
704 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
705 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
706 }
707
708
709 void
710 sw_dma_start(ncr_sc)
711 struct ncr5380_softc *ncr_sc;
712 {
713 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
714 struct sci_req *sr = ncr_sc->sc_current;
715 struct sw_dma_handle *dh = sr->sr_dma_hand;
716 u_long dva;
717 int xlen, adj, adjlen;
718 u_int mode;
719 u_int32_t csr;
720
721 /*
722 * Get the DVMA mapping for this segment.
723 */
724 dva = (u_long)(dh->dh_dvma);
725 if (dva & 1)
726 panic("sw_dma_start: bad dva=0x%lx", dva);
727
728 xlen = ncr_sc->sc_datalen;
729 xlen &= ~1;
730 sc->sc_xlen = xlen; /* XXX: or less... */
731
732 #ifdef DEBUG
733 if (sw_debug & 2) {
734 printf("sw_dma_start: dh=%p, dva=0x%lx, xlen=%d\n",
735 dh, dva, xlen);
736 }
737 #endif
738
739 /*
740 * Set up the DMA controller.
741 * Note that (dh->dh_len < sc_datalen)
742 */
743
744 /* Set direction (send/recv) */
745 csr = SWREG_READ(ncr_sc, SWREG_CSR);
746 if (dh->dh_flags & SIDH_OUT) {
747 csr |= SW_CSR_SEND;
748 } else {
749 csr &= ~SW_CSR_SEND;
750 }
751 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
752
753 /*
754 * The "sw" needs longword aligned transfers. We
755 * detect a shortword aligned transfer here, and adjust the
756 * DMA transfer by 2 bytes. These two bytes are read/written
757 * in PIO mode just before the DMA is started.
758 */
759 adj = 0;
760 if (dva & 2) {
761 adj = 2;
762 #ifdef DEBUG
763 if (sw_debug & 2)
764 printf("sw_dma_start: adjusted up %d bytes\n", adj);
765 #endif
766 }
767
768 /* We have to frob the address on the "sw". */
769 dh->dh_startingpa = (dva | 0xF00000);
770 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
771 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
772
773 /*
774 * Acknowledge the phase change. (After DMA setup!)
775 * Put the SBIC into DMA mode, and start the transfer.
776 */
777 if (dh->dh_flags & SIDH_OUT) {
778 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
779 if (adj) {
780 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
781 adj, dh->dh_addr);
782 if (adjlen != adj)
783 printf("%s: bad outgoing adj, %d != %d\n",
784 ncr_sc->sc_dev.dv_xname, adjlen, adj);
785 }
786 SCI_CLR_INTR(ncr_sc);
787 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
788 mode = NCR5380_READ(ncr_sc, sci_mode);
789 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
790 NCR5380_WRITE(ncr_sc, sci_mode, mode);
791 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
792 } else {
793 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
794 if (adj) {
795 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
796 adj, dh->dh_addr);
797 if (adjlen != adj)
798 printf("%s: bad incoming adj, %d != %d\n",
799 ncr_sc->sc_dev.dv_xname, adjlen, adj);
800 }
801 SCI_CLR_INTR(ncr_sc);
802 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
803 mode = NCR5380_READ(ncr_sc, sci_mode);
804 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
805 NCR5380_WRITE(ncr_sc, sci_mode, mode);
806 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
807 }
808
809 /* Let'er rip! */
810 csr |= SW_CSR_DMA_EN;
811 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
812
813 ncr_sc->sc_state |= NCR_DOINGDMA;
814
815 #ifdef DEBUG
816 if (sw_debug & 2) {
817 printf("sw_dma_start: started, flags=0x%x\n",
818 ncr_sc->sc_state);
819 }
820 #endif
821 }
822
823
824 void
825 sw_dma_eop(ncr_sc)
826 struct ncr5380_softc *ncr_sc;
827 {
828
829 /* Not needed - DMA was stopped prior to examining sci_csr */
830 }
831
832 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
833 #define COUNT_SW_LEFTOVERS
834 #endif
835 #ifdef COUNT_SW_LEFTOVERS
836 /*
837 * Let's find out how often these occur. Read these with DDB from time
838 * to time.
839 */
840 int sw_3_leftover = 0;
841 int sw_2_leftover = 0;
842 int sw_1_leftover = 0;
843 int sw_0_leftover = 0;
844 #endif
845
846 void
847 sw_dma_stop(ncr_sc)
848 struct ncr5380_softc *ncr_sc;
849 {
850 struct sci_req *sr = ncr_sc->sc_current;
851 struct sw_dma_handle *dh = sr->sr_dma_hand;
852 int ntrans = 0, dva;
853 u_int mode;
854 u_int32_t csr;
855
856 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
857 #ifdef DEBUG
858 printf("sw_dma_stop: dma not running\n");
859 #endif
860 return;
861 }
862 ncr_sc->sc_state &= ~NCR_DOINGDMA;
863
864 /* First, halt the DMA engine. */
865 csr = SWREG_READ(ncr_sc, SWREG_CSR);
866 csr &= ~SW_CSR_DMA_EN;
867 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
868
869 /*
870 * XXX HARDWARE BUG!
871 * Apparently, some early 4/100 SCSI controllers had a hardware
872 * bug that caused the controller to do illegal memory access.
873 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
874 * this, we simply need to clean up after ourselves ... there will
875 * be as many as 3 bytes left over. Since we clean up "left-over"
876 * bytes on every read anyway, we just continue to chug along
877 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
878 * around in hardware later with the "left-over byte" indicator
879 * in the VME controller.)
880 */
881 #if 0
882 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR)) {
883 #else
884 if (csr & (SW_CSR_DMA_CONFLICT)) {
885 #endif
886 printf("sw: DMA error, csr=0x%x, reset\n", csr);
887 sr->sr_xs->error = XS_DRIVER_STUFFUP;
888 ncr_sc->sc_state |= NCR_ABORTING;
889 sw_reset_adapter(ncr_sc);
890 }
891
892 /* Note that timeout may have set the error flag. */
893 if (ncr_sc->sc_state & NCR_ABORTING)
894 goto out;
895
896 /*
897 * Now try to figure out how much actually transferred
898 *
899 * The "sw" doesn't have a FIFO or a bcr, so we've stored
900 * the starting PA of the transfer in the DMA handle,
901 * and subtract it from the ending PA left in the dma_addr
902 * register.
903 */
904 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
905 ntrans = (dva - dh->dh_startingpa);
906
907 #ifdef DEBUG
908 if (sw_debug & 2) {
909 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
910 }
911 #endif
912
913 if (ntrans < MIN_DMA_LEN) {
914 printf("sw: short transfer\n");
915 ncr_sc->sc_state |= NCR_ABORTING;
916 goto out;
917 }
918
919 if (ntrans > ncr_sc->sc_datalen)
920 panic("sw_dma_stop: excess transfer");
921
922 /* Adjust data pointer */
923 ncr_sc->sc_dataptr += ntrans;
924 ncr_sc->sc_datalen -= ntrans;
925
926 /*
927 * After a read, we may need to clean-up
928 * "Left-over bytes" (yuck!) The "sw" doesn't
929 * have a "left-over" indicator, so we have to so
930 * this no matter what. Ick.
931 */
932 if ((dh->dh_flags & SIDH_OUT) == 0) {
933 char *cp = ncr_sc->sc_dataptr;
934 u_int32_t bpr;
935
936 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
937
938 switch (dva & 3) {
939 case 3:
940 cp[0] = (bpr & 0xff000000) >> 24;
941 cp[1] = (bpr & 0x00ff0000) >> 16;
942 cp[2] = (bpr & 0x0000ff00) >> 8;
943 #ifdef COUNT_SW_LEFTOVERS
944 ++sw_3_leftover;
945 #endif
946 break;
947
948 case 2:
949 cp[0] = (bpr & 0xff000000) >> 24;
950 cp[1] = (bpr & 0x00ff0000) >> 16;
951 #ifdef COUNT_SW_LEFTOVERS
952 ++sw_2_leftover;
953 #endif
954 break;
955
956 case 1:
957 cp[0] = (bpr & 0xff000000) >> 24;
958 #ifdef COUNT_SW_LEFTOVERS
959 ++sw_1_leftover;
960 #endif
961 break;
962
963 #ifdef COUNT_SW_LEFTOVERS
964 default:
965 ++sw_0_leftover;
966 break;
967 #endif
968 }
969 }
970
971 out:
972 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
973 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
974
975 /* Put SBIC back in PIO mode. */
976 mode = NCR5380_READ(ncr_sc, sci_mode);
977 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
978 NCR5380_WRITE(ncr_sc, sci_mode, mode);
979 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
980
981 #ifdef DEBUG
982 if (sw_debug & 2) {
983 printf("sw_dma_stop: ntrans=0x%x\n", ntrans);
984 }
985 #endif
986 }
987