si.c revision 1.4 1 /* $NetBSD: si.c,v 1.4 2001/08/20 12:00:53 wiz Exp $ */
2
3 /*-
4 * Copyright (c) 1996,2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, Jason R. Thorpe and
9 * Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * This file contains VME bus-dependent of the `si' SCSI adapter.
42 * This hardware is frequently found on Sun 3 and Sun 4 machines.
43 *
44 * The SCSI machinery on this adapter is implemented by an NCR5380,
45 * which is taken care of by the chipset driver in /sys/dev/ic/ncr5380sbc.c
46 *
47 * The logic has a bit to enable or disable the DMA engine,
48 * but that bit also gates the interrupt line from the NCR5380!
49 * Therefore, in order to get any interrupt from the 5380, (i.e.
50 * for reselect) one must clear the DMA engine transfer count and
51 * then enable DMA. This has the further complication that you
52 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
53 * we have to turn DMA back off before we even look at the 5380.
54 *
55 * What wonderfully whacky hardware this is!
56 *
57 */
58
59 /*
60 * This driver originated as an MD implementation for the sun3 and sun4
61 * ports. The notes pertaining to that history are included below.
62 *
63 * David Jones wrote the initial version of this module for NetBSD/sun3,
64 * which included support for the VME adapter only. (no reselection).
65 *
66 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
67 * both the VME and OBIO code to support disconnect/reselect.
68 * (Required figuring out the hardware "features" noted above.)
69 *
70 * The autoconfiguration boilerplate came from Adam Glass.
71 *
72 * Jason R. Thorpe ported the autoconfiguration and VME portions to
73 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
74 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
75 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
76 * and Chris Torek for bits of insight needed along the way. Thanks to
77 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
78 * for the sake of testing. Andrew Gillham helped work out the bugs
79 * the 4/100 DMA code.
80 */
81
82 #include "opt_ddb.h"
83
84 #include <sys/types.h>
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/kernel.h>
88 #include <sys/malloc.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/buf.h>
92
93 #include <machine/bus.h>
94 #include <machine/intr.h>
95
96 #include <dev/vme/vmereg.h>
97 #include <dev/vme/vmevar.h>
98
99 #include <dev/scsipi/scsi_all.h>
100 #include <dev/scsipi/scsipi_all.h>
101 #include <dev/scsipi/scsipi_debug.h>
102 #include <dev/scsipi/scsiconf.h>
103
104 #ifndef DDB
105 #define Debugger()
106 #endif
107
108 #ifndef DEBUG
109 #define DEBUG XXX
110 #endif
111
112 #include <dev/ic/ncr5380reg.h>
113 #include <dev/ic/ncr5380var.h>
114
115 #include <dev/vme/sireg.h>
116
117 /*
118 * Transfers smaller than this are done using PIO
119 * (on assumption they're not worth DMA overhead)
120 */
121 #define MIN_DMA_LEN 128
122
123 #ifdef DEBUG
124 int si_debug = 0;
125 #endif
126
127 /*
128 * This structure is used to keep track of mapped DMA requests.
129 */
130 struct si_dma_handle {
131 int dh_flags;
132 #define SIDH_BUSY 0x01 /* This DH is in use */
133 #define SIDH_OUT 0x02 /* DMA does data out (write) */
134 int dh_maplen; /* Original data length */
135 bus_dmamap_t dh_dmamap;
136 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
137 };
138
139 /*
140 * The first structure member has to be the ncr5380_softc
141 * so we can just cast to go back and fourth between them.
142 */
143 struct si_softc {
144 struct ncr5380_softc ncr_sc;
145 bus_space_tag_t sc_bustag; /* bus tags */
146 bus_dma_tag_t sc_dmatag;
147 vme_chipset_tag_t sc_vctag;
148
149 int sc_adapter_iv_am; /* int. vec + address modifier */
150 struct si_dma_handle *sc_dma;
151 int sc_xlen; /* length of current DMA segment. */
152 int sc_options; /* options for this instance. */
153 };
154
155 /*
156 * Options. By default, DMA is enabled and DMA completion interrupts
157 * and reselect are disabled. You may enable additional features
158 * the `flags' directive in your kernel's configuration file.
159 *
160 * Alternatively, you can patch your kernel with DDB or some other
161 * mechanism. The sc_options member of the softc is OR'd with
162 * the value in si_options.
163 *
164 * Note, there's a separate sw_options to make life easier.
165 */
166 #define SI_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
167 #define SI_DMA_INTR 0x02 /* DMA completion interrupts */
168 #define SI_DO_RESELECT 0x04 /* Allow disconnect/reselect */
169 #define SI_OPTIONS_MASK (SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT)
170 #define SI_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
171 int si_options = SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT;
172
173 static int si_match __P((struct device *, struct cfdata *, void *));
174 static void si_attach __P((struct device *, struct device *, void *));
175 static int si_intr __P((void *));
176 static void si_reset_adapter __P((struct ncr5380_softc *));
177
178 void si_dma_alloc __P((struct ncr5380_softc *));
179 void si_dma_free __P((struct ncr5380_softc *));
180 void si_dma_poll __P((struct ncr5380_softc *));
181
182 void si_dma_setup __P((struct ncr5380_softc *));
183 void si_dma_start __P((struct ncr5380_softc *));
184 void si_dma_eop __P((struct ncr5380_softc *));
185 void si_dma_stop __P((struct ncr5380_softc *));
186
187 void si_intr_on __P((struct ncr5380_softc *));
188 void si_intr_off __P((struct ncr5380_softc *));
189
190 /*
191 * Shorthand bus space access
192 * XXX - must look into endian issues here.
193 */
194 #define SIREG_READ(sc, index) \
195 bus_space_read_2((sc)->sc_regt, (sc)->sc_regh, index)
196 #define SIREG_WRITE(sc, index, v) \
197 bus_space_write_2((sc)->sc_regt, (sc)->sc_regh, index, v)
198
199
200 /* Auto-configuration glue. */
201 struct cfattach si_ca = {
202 sizeof(struct si_softc), si_match, si_attach
203 };
204
205 static int
206 si_match(parent, cf, aux)
207 struct device *parent;
208 struct cfdata *cf;
209 void *aux;
210 {
211 struct vme_attach_args *va = aux;
212 vme_chipset_tag_t ct = va->va_vct;
213 vme_am_t mod;
214 vme_addr_t vme_addr;
215
216 /* Make sure there is something there... */
217 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
218 vme_addr = va->r[0].offset;
219
220 if (vme_probe(ct, vme_addr, 1, mod, VME_D8, NULL, 0) != 0)
221 return (0);
222
223 /*
224 * If this is a VME SCSI board, we have to determine whether
225 * it is an "sc" (Sun2) or "si" (Sun3) SCSI board. This can
226 * be determined using the fact that the "sc" board occupies
227 * 4K bytes in VME space but the "si" board occupies 2K bytes.
228 */
229 return (vme_probe(ct, vme_addr + 0x801, 1, mod, VME_D8, NULL, 0) != 0);
230 }
231
232 static void
233 si_attach(parent, self, aux)
234 struct device *parent, *self;
235 void *aux;
236 {
237 struct si_softc *sc = (struct si_softc *) self;
238 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
239 struct vme_attach_args *va = aux;
240 vme_chipset_tag_t ct = va->va_vct;
241 bus_space_tag_t bt;
242 bus_space_handle_t bh;
243 vme_mapresc_t resc;
244 vme_intr_handle_t ih;
245 vme_am_t mod;
246 char bits[64];
247 int i;
248
249 sc->sc_dmatag = va->va_bdt;
250 sc->sc_vctag = ct;
251
252 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
253
254 if (vme_space_map(ct, va->r[0].offset, SIREG_BANK_SZ,
255 mod, VME_D8, 0, &bt, &bh, &resc) != 0)
256 panic("%s: vme_space_map", ncr_sc->sc_dev.dv_xname);
257
258 ncr_sc->sc_regt = bt;
259 ncr_sc->sc_regh = bh;
260
261 sc->sc_options = si_options;
262
263 ncr_sc->sc_dma_setup = si_dma_setup;
264 ncr_sc->sc_dma_start = si_dma_start;
265 ncr_sc->sc_dma_eop = si_dma_stop;
266 ncr_sc->sc_dma_stop = si_dma_stop;
267
268 vme_intr_map(ct, va->ilevel, va->ivector, &ih);
269 vme_intr_establish(ct, ih, IPL_BIO, si_intr, sc);
270
271 printf("\n");
272
273 sc->sc_adapter_iv_am = (mod << 8) | (va->ivector & 0xFF);
274
275 /*
276 * Pull in the options flags. Allow the user to completely
277 * override the default values.
278 */
279 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK) != 0)
280 sc->sc_options =
281 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK);
282
283 /*
284 * Initialize fields used by the MI code
285 */
286
287 /* NCR5380 register bank offsets */
288 ncr_sc->sci_r0 = 0;
289 ncr_sc->sci_r1 = 1;
290 ncr_sc->sci_r2 = 2;
291 ncr_sc->sci_r3 = 3;
292 ncr_sc->sci_r4 = 4;
293 ncr_sc->sci_r5 = 5;
294 ncr_sc->sci_r6 = 6;
295 ncr_sc->sci_r7 = 7;
296
297 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
298
299 /*
300 * MD function pointers used by the MI code.
301 */
302 ncr_sc->sc_pio_out = ncr5380_pio_out;
303 ncr_sc->sc_pio_in = ncr5380_pio_in;
304 ncr_sc->sc_dma_alloc = si_dma_alloc;
305 ncr_sc->sc_dma_free = si_dma_free;
306 ncr_sc->sc_dma_poll = si_dma_poll;
307
308 ncr_sc->sc_flags = 0;
309 if ((sc->sc_options & SI_DO_RESELECT) == 0)
310 ncr_sc->sc_no_disconnect = 0xFF;
311 if ((sc->sc_options & SI_DMA_INTR) == 0)
312 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
313 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
314
315 /*
316 * Allocate DMA handles.
317 */
318 i = SCI_OPENINGS * sizeof(struct si_dma_handle);
319 sc->sc_dma = (struct si_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
320 if (sc->sc_dma == NULL)
321 panic("si: dma handle malloc failed\n");
322
323 for (i = 0; i < SCI_OPENINGS; i++) {
324 sc->sc_dma[i].dh_flags = 0;
325
326 /* Allocate a DMA handle */
327 if (vme_dmamap_create(
328 sc->sc_vctag, /* VME chip tag */
329 MAXPHYS, /* size */
330 VME_AM_A24, /* address modifier */
331 VME_D16, /* data size */
332 0, /* swap */
333 1, /* nsegments */
334 MAXPHYS, /* maxsegsz */
335 0, /* boundary */
336 BUS_DMA_NOWAIT,
337 &sc->sc_dma[i].dh_dmamap) != 0) {
338
339 printf("%s: DMA buffer map create error\n",
340 ncr_sc->sc_dev.dv_xname);
341 return;
342 }
343 }
344
345 if (sc->sc_options) {
346 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
347 bitmask_snprintf(sc->sc_options, SI_OPTIONS_BITS,
348 bits, sizeof(bits)));
349 }
350
351 ncr_sc->sc_channel.chan_id = 7;
352 ncr_sc->sc_adapter.adapt_minphys = minphys;
353
354 /*
355 * Initialize si board itself.
356 */
357 si_reset_adapter(ncr_sc);
358 ncr5380_attach(ncr_sc);
359
360 if (sc->sc_options & SI_DO_RESELECT) {
361 /*
362 * Need to enable interrupts (and DMA!)
363 * on this H/W for reselect to work.
364 */
365 ncr_sc->sc_intr_on = si_intr_on;
366 ncr_sc->sc_intr_off = si_intr_off;
367 }
368 }
369
370 #define CSR_WANT (SI_CSR_SBC_IP | SI_CSR_DMA_IP | \
371 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR )
372
373 static int
374 si_intr(void *arg)
375 {
376 struct si_softc *sc = arg;
377 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
378 int dma_error, claimed;
379 u_short csr;
380
381 claimed = 0;
382 dma_error = 0;
383
384 /* SBC interrupt? DMA interrupt? */
385 csr = SIREG_READ(ncr_sc, SIREG_CSR);
386
387 NCR_TRACE("si_intr: csr=0x%x\n", csr);
388
389 if (csr & SI_CSR_DMA_CONFLICT) {
390 dma_error |= SI_CSR_DMA_CONFLICT;
391 printf("si_intr: DMA conflict\n");
392 }
393 if (csr & SI_CSR_DMA_BUS_ERR) {
394 dma_error |= SI_CSR_DMA_BUS_ERR;
395 printf("si_intr: DMA bus error\n");
396 }
397 if (dma_error) {
398 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
399 sc->ncr_sc.sc_state |= NCR_ABORTING;
400 /* Make sure we will call the main isr. */
401 csr |= SI_CSR_DMA_IP;
402 }
403
404 if (csr & (SI_CSR_SBC_IP | SI_CSR_DMA_IP)) {
405 claimed = ncr5380_intr(&sc->ncr_sc);
406 #ifdef DEBUG
407 if (!claimed) {
408 printf("si_intr: spurious from SBC\n");
409 if (si_debug & 4) {
410 Debugger(); /* XXX */
411 }
412 }
413 #endif
414 }
415
416 return (claimed);
417 }
418
419
420 static void
421 si_reset_adapter(struct ncr5380_softc *ncr_sc)
422 {
423 struct si_softc *sc = (struct si_softc *)ncr_sc;
424
425 #ifdef DEBUG
426 if (si_debug) {
427 printf("si_reset_adapter\n");
428 }
429 #endif
430
431 /*
432 * The SCSI3 controller has an 8K FIFO to buffer data between the
433 * 5380 and the DMA. Make sure it starts out empty.
434 *
435 * The reset bits in the CSR are active low.
436 */
437 SIREG_WRITE(ncr_sc, SIREG_CSR, 0);
438 delay(10);
439 SIREG_WRITE(ncr_sc, SIREG_CSR,
440 SI_CSR_FIFO_RES | SI_CSR_SCSI_RES | SI_CSR_INTR_EN);
441 delay(10);
442
443 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
444 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
445 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
446 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
447 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
448 SIREG_WRITE(ncr_sc, SIREG_IV_AM, sc->sc_adapter_iv_am);
449 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
450
451 SCI_CLR_INTR(ncr_sc);
452 }
453
454 /*****************************************************************
455 * Common functions for DMA
456 ****************************************************************/
457
458 /*
459 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
460 * for DMA transfer.
461 */
462 void
463 si_dma_alloc(ncr_sc)
464 struct ncr5380_softc *ncr_sc;
465 {
466 struct si_softc *sc = (struct si_softc *)ncr_sc;
467 struct sci_req *sr = ncr_sc->sc_current;
468 struct scsipi_xfer *xs = sr->sr_xs;
469 struct si_dma_handle *dh;
470 int i, xlen;
471 u_long addr;
472
473 #ifdef DIAGNOSTIC
474 if (sr->sr_dma_hand != NULL)
475 panic("si_dma_alloc: already have DMA handle");
476 #endif
477
478 #if 1 /* XXX - Temporary */
479 /* XXX - In case we think DMA is completely broken... */
480 if ((sc->sc_options & SI_ENABLE_DMA) == 0)
481 return;
482 #endif
483
484 addr = (u_long) ncr_sc->sc_dataptr;
485 xlen = ncr_sc->sc_datalen;
486
487 /* If the DMA start addr is misaligned then do PIO */
488 if ((addr & 1) || (xlen & 1)) {
489 printf("si_dma_alloc: misaligned.\n");
490 return;
491 }
492
493 /* Make sure our caller checked sc_min_dma_len. */
494 if (xlen < MIN_DMA_LEN)
495 panic("si_dma_alloc: xlen=0x%x\n", xlen);
496
497 /* Find free DMA handle. Guaranteed to find one since we have
498 as many DMA handles as the driver has processes. */
499 for (i = 0; i < SCI_OPENINGS; i++) {
500 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
501 goto found;
502 }
503 panic("si: no free DMA handles.");
504
505 found:
506 dh = &sc->sc_dma[i];
507 dh->dh_flags = SIDH_BUSY;
508 dh->dh_maplen = xlen;
509
510 /* Copy the "write" flag for convenience. */
511 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
512 dh->dh_flags |= SIDH_OUT;
513
514 /*
515 * Double-map the buffer into DVMA space. If we can't re-map
516 * the buffer, we print a warning and fall back to PIO mode.
517 *
518 * NOTE: it is not safe to sleep here!
519 */
520 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
521 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
522 /* Can't remap segment */
523 printf("si_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
524 addr, dh->dh_maplen);
525 dh->dh_flags = 0;
526 return;
527 }
528 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
529 (dh->dh_flags & SIDH_OUT)
530 ? BUS_DMASYNC_PREWRITE
531 : BUS_DMASYNC_PREREAD);
532
533 /* success */
534 sr->sr_dma_hand = dh;
535
536 return;
537 }
538
539
540 void
541 si_dma_free(ncr_sc)
542 struct ncr5380_softc *ncr_sc;
543 {
544 struct si_softc *sc = (struct si_softc *)ncr_sc;
545 struct sci_req *sr = ncr_sc->sc_current;
546 struct si_dma_handle *dh = sr->sr_dma_hand;
547
548 #ifdef DIAGNOSTIC
549 if (dh == NULL)
550 panic("si_dma_free: no DMA handle");
551 #endif
552
553 if (ncr_sc->sc_state & NCR_DOINGDMA)
554 panic("si_dma_free: free while in progress");
555
556 if (dh->dh_flags & SIDH_BUSY) {
557 /* Give back the DVMA space. */
558 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
559 dh->dh_dvma, dh->dh_maplen,
560 (dh->dh_flags & SIDH_OUT)
561 ? BUS_DMASYNC_POSTWRITE
562 : BUS_DMASYNC_POSTREAD);
563 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
564 dh->dh_flags = 0;
565 }
566 sr->sr_dma_hand = NULL;
567 }
568
569
570 /*
571 * Poll (spin-wait) for DMA completion.
572 * Called right after xx_dma_start(), and
573 * xx_dma_stop() will be called next.
574 * Same for either VME or OBIO.
575 */
576 void
577 si_dma_poll(ncr_sc)
578 struct ncr5380_softc *ncr_sc;
579 {
580 struct sci_req *sr = ncr_sc->sc_current;
581 int tmo, csr_mask, csr;
582
583 /* Make sure DMA started successfully. */
584 if (ncr_sc->sc_state & NCR_ABORTING)
585 return;
586
587 csr_mask = SI_CSR_SBC_IP | SI_CSR_DMA_IP |
588 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR;
589
590 tmo = 50000; /* X100 = 5 sec. */
591 for (;;) {
592 csr = SIREG_READ(ncr_sc, SIREG_CSR);
593 if (csr & csr_mask)
594 break;
595 if (--tmo <= 0) {
596 printf("%s: DMA timeout (while polling)\n",
597 ncr_sc->sc_dev.dv_xname);
598 /* Indicate timeout as MI code would. */
599 sr->sr_flags |= SR_OVERDUE;
600 break;
601 }
602 delay(100);
603 }
604
605 #ifdef DEBUG
606 if (si_debug) {
607 printf("si_dma_poll: done, csr=0x%x\n", csr);
608 }
609 #endif
610 }
611
612
613 /*****************************************************************
614 * VME functions for DMA
615 ****************************************************************/
616
617
618 /*
619 * This is called when the bus is going idle,
620 * so we want to enable the SBC interrupts.
621 * That is controlled by the DMA enable!
622 * Who would have guessed!
623 * What a NASTY trick!
624 */
625 void
626 si_intr_on(ncr_sc)
627 struct ncr5380_softc *ncr_sc;
628 {
629 u_int16_t csr;
630
631 si_dma_setup(ncr_sc);
632 csr = SIREG_READ(ncr_sc, SIREG_CSR);
633 csr |= SI_CSR_DMA_EN;
634 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
635 }
636
637 /*
638 * This is called when the bus is idle and we are
639 * about to start playing with the SBC chip.
640 */
641 void
642 si_intr_off(ncr_sc)
643 struct ncr5380_softc *ncr_sc;
644 {
645 u_int16_t csr;
646
647 csr = SIREG_READ(ncr_sc, SIREG_CSR);
648 csr &= ~SI_CSR_DMA_EN;
649 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
650 }
651
652 /*
653 * This function is called during the COMMAND or MSG_IN phase
654 * that precedes a DATA_IN or DATA_OUT phase, in case we need
655 * to setup the DMA engine before the bus enters a DATA phase.
656 *
657 * XXX: The VME adapter appears to suppress SBC interrupts
658 * when the FIFO is not empty or the FIFO count is non-zero!
659 *
660 * On the VME version we just clear the DMA count and address
661 * here (to make sure it stays idle) and do the real setup
662 * later, in dma_start.
663 */
664 void
665 si_dma_setup(ncr_sc)
666 struct ncr5380_softc *ncr_sc;
667 {
668 u_int16_t csr;
669
670 csr = SIREG_READ(ncr_sc, SIREG_CSR);
671
672 /* Reset the FIFO */
673 csr &= ~SI_CSR_FIFO_RES; /* active low */
674 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
675 csr |= SI_CSR_FIFO_RES;
676 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
677
678 /* Set direction (assume recv here) */
679 csr &= ~SI_CSR_SEND;
680 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
681 /* Assume worst alignment */
682 csr |= SI_CSR_BPCON;
683 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
684
685 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
686 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
687
688 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
689 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
690
691 /* Clear FIFO counter. (also hits dma_count) */
692 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
693 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
694 }
695
696
697 void
698 si_dma_start(ncr_sc)
699 struct ncr5380_softc *ncr_sc;
700 {
701 struct si_softc *sc = (struct si_softc *)ncr_sc;
702 struct sci_req *sr = ncr_sc->sc_current;
703 struct si_dma_handle *dh = sr->sr_dma_hand;
704 u_long dva;
705 int xlen;
706 u_int mode;
707 u_int16_t csr;
708
709 /*
710 * Get the DVMA mapping for this segment.
711 */
712 dva = (u_long)(dh->dh_dvma);
713 if (dva & 1)
714 panic("si_dma_start: bad dmaaddr=0x%lx", dva);
715 xlen = ncr_sc->sc_datalen;
716 xlen &= ~1;
717 sc->sc_xlen = xlen; /* XXX: or less... */
718
719 #ifdef DEBUG
720 if (si_debug & 2) {
721 printf("si_dma_start: dh=%p, dmaaddr=0x%lx, xlen=%d\n",
722 dh, dva, xlen);
723 }
724 #endif
725
726 /*
727 * Set up the DMA controller.
728 * Note that (dh->dh_len < sc_datalen)
729 */
730
731 csr = SIREG_READ(ncr_sc, SIREG_CSR);
732
733 /* Disable DMA while we're setting up the transfer */
734 csr &= ~SI_CSR_DMA_EN;
735
736 /* Reset FIFO (again?) */
737 csr &= ~SI_CSR_FIFO_RES; /* active low */
738 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
739 csr |= SI_CSR_FIFO_RES;
740 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
741
742 /* Set direction (send/recv) */
743 if (dh->dh_flags & SIDH_OUT) {
744 csr |= SI_CSR_SEND;
745 } else {
746 csr &= ~SI_CSR_SEND;
747 }
748 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
749
750 if (dva & 2) {
751 csr |= SI_CSR_BPCON;
752 } else {
753 csr &= ~SI_CSR_BPCON;
754 }
755 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
756
757 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, (u_int16_t)(dva >> 16));
758 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, (u_int16_t)(dva & 0xFFFF));
759 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, (u_int16_t)(xlen >> 16));
760 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, (u_int16_t)(xlen & 0xFFFF));
761 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, (u_int16_t)(xlen >> 16));
762 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, (u_int16_t)(xlen & 0xFFFF));
763
764 /*
765 * Acknowledge the phase change. (After DMA setup!)
766 * Put the SBIC into DMA mode, and start the transfer.
767 */
768 if (dh->dh_flags & SIDH_OUT) {
769 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
770 SCI_CLR_INTR(ncr_sc);
771 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
772
773 mode = NCR5380_READ(ncr_sc, sci_mode);
774 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
775 NCR5380_WRITE(ncr_sc, sci_mode, mode);
776
777 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
778 } else {
779 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
780 SCI_CLR_INTR(ncr_sc);
781 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
782
783 mode = NCR5380_READ(ncr_sc, sci_mode);
784 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
785 NCR5380_WRITE(ncr_sc, sci_mode, mode);
786
787 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
788 }
789
790 /* Enable DMA engine */
791 csr |= SI_CSR_DMA_EN;
792 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
793
794 ncr_sc->sc_state |= NCR_DOINGDMA;
795
796 #ifdef DEBUG
797 if (si_debug & 2) {
798 printf("si_dma_start: started, flags=0x%x\n",
799 ncr_sc->sc_state);
800 }
801 #endif
802 }
803
804
805 void
806 si_dma_eop(ncr_sc)
807 struct ncr5380_softc *ncr_sc;
808 {
809
810 /* Not needed - DMA was stopped prior to examining sci_csr */
811 }
812
813
814 void
815 si_dma_stop(ncr_sc)
816 struct ncr5380_softc *ncr_sc;
817 {
818 struct si_softc *sc = (struct si_softc *)ncr_sc;
819 struct sci_req *sr = ncr_sc->sc_current;
820 struct si_dma_handle *dh = sr->sr_dma_hand;
821 int resid, ntrans;
822 u_int16_t csr;
823 u_int mode;
824
825 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
826 #ifdef DEBUG
827 printf("si_dma_stop: dma not running\n");
828 #endif
829 return;
830 }
831
832 ncr_sc->sc_state &= ~NCR_DOINGDMA;
833
834 csr = SIREG_READ(ncr_sc, SIREG_CSR);
835
836 /* First, halt the DMA engine. */
837 csr &= ~SI_CSR_DMA_EN;
838 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
839
840 if (csr & (SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR)) {
841 printf("si: DMA error, csr=0x%x, reset\n", csr);
842 sr->sr_xs->error = XS_DRIVER_STUFFUP;
843 ncr_sc->sc_state |= NCR_ABORTING;
844 si_reset_adapter(ncr_sc);
845 }
846
847 /* Note that timeout may have set the error flag. */
848 if (ncr_sc->sc_state & NCR_ABORTING)
849 goto out;
850
851 /*
852 * Now try to figure out how much actually transferred
853 *
854 * The fifo_count does not reflect how many bytes were
855 * actually transferred for VME.
856 *
857 * SCSI-3 VME interface is a little funny on writes:
858 * if we have a disconnect, the dma has overshot by
859 * one byte and the resid needs to be incremented.
860 * Only happens for partial transfers.
861 * (Thanks to Matt Jacob)
862 */
863
864 resid = SIREG_READ(ncr_sc, SIREG_FIFO_CNTH) << 16;
865 resid |= SIREG_READ(ncr_sc, SIREG_FIFO_CNT) & 0xFFFF;
866 if (dh->dh_flags & SIDH_OUT)
867 if ((resid > 0) && (resid < sc->sc_xlen))
868 resid++;
869 ntrans = sc->sc_xlen - resid;
870
871 #ifdef DEBUG
872 if (si_debug & 2) {
873 printf("si_dma_stop: resid=0x%x ntrans=0x%x\n",
874 resid, ntrans);
875 }
876 #endif
877
878 if (ntrans > ncr_sc->sc_datalen)
879 panic("si_dma_stop: excess transfer");
880
881 /* Adjust data pointer */
882 ncr_sc->sc_dataptr += ntrans;
883 ncr_sc->sc_datalen -= ntrans;
884
885 #ifdef DEBUG
886 if (si_debug & 2) {
887 printf("si_dma_stop: ntrans=0x%x\n", ntrans);
888 }
889 #endif
890
891 /*
892 * After a read, we may need to clean-up
893 * "Left-over bytes" (yuck!)
894 */
895 if (((dh->dh_flags & SIDH_OUT) == 0) &&
896 ((csr & SI_CSR_LOB) != 0))
897 {
898 char *cp = ncr_sc->sc_dataptr;
899 u_int16_t bprh, bprl;
900
901 bprh = SIREG_READ(ncr_sc, SIREG_BPRH);
902 bprl = SIREG_READ(ncr_sc, SIREG_BPRL);
903
904 #ifdef DEBUG
905 printf("si: got left-over bytes: bprh=%x, bprl=%x, csr=%x\n",
906 bprh, bprl, csr);
907 #endif
908
909 if (csr & SI_CSR_BPCON) {
910 /* have SI_CSR_BPCON */
911 cp[-1] = (bprl & 0xff00) >> 8;
912 } else {
913 switch (csr & SI_CSR_LOB) {
914 case SI_CSR_LOB_THREE:
915 cp[-3] = (bprh & 0xff00) >> 8;
916 cp[-2] = (bprh & 0x00ff);
917 cp[-1] = (bprl & 0xff00) >> 8;
918 break;
919 case SI_CSR_LOB_TWO:
920 cp[-2] = (bprh & 0xff00) >> 8;
921 cp[-1] = (bprh & 0x00ff);
922 break;
923 case SI_CSR_LOB_ONE:
924 cp[-1] = (bprh & 0xff00) >> 8;
925 break;
926 }
927 }
928 }
929
930 out:
931 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
932 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
933
934 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
935 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
936
937 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
938 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
939
940 mode = NCR5380_READ(ncr_sc, sci_mode);
941 /* Put SBIC back in PIO mode. */
942 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
943 NCR5380_WRITE(ncr_sc, sci_mode, mode);
944 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
945 }
946