si.c revision 1.8 1 /* $NetBSD: si.c,v 1.8 2002/03/26 23:14:49 fredette Exp $ */
2
3 /*-
4 * Copyright (c) 1996,2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, Jason R. Thorpe and
9 * Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * This file contains VME bus-dependent of the `si' SCSI adapter.
42 * This hardware is frequently found on Sun 3 and Sun 4 machines.
43 *
44 * The SCSI machinery on this adapter is implemented by an NCR5380,
45 * which is taken care of by the chipset driver in /sys/dev/ic/ncr5380sbc.c
46 *
47 * The logic has a bit to enable or disable the DMA engine,
48 * but that bit also gates the interrupt line from the NCR5380!
49 * Therefore, in order to get any interrupt from the 5380, (i.e.
50 * for reselect) one must clear the DMA engine transfer count and
51 * then enable DMA. This has the further complication that you
52 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
53 * we have to turn DMA back off before we even look at the 5380.
54 *
55 * What wonderfully whacky hardware this is!
56 *
57 */
58
59 /*
60 * This driver originated as an MD implementation for the sun3 and sun4
61 * ports. The notes pertaining to that history are included below.
62 *
63 * David Jones wrote the initial version of this module for NetBSD/sun3,
64 * which included support for the VME adapter only. (no reselection).
65 *
66 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
67 * both the VME and OBIO code to support disconnect/reselect.
68 * (Required figuring out the hardware "features" noted above.)
69 *
70 * The autoconfiguration boilerplate came from Adam Glass.
71 *
72 * Jason R. Thorpe ported the autoconfiguration and VME portions to
73 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
74 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
75 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
76 * and Chris Torek for bits of insight needed along the way. Thanks to
77 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
78 * for the sake of testing. Andrew Gillham helped work out the bugs
79 * the 4/100 DMA code.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: si.c,v 1.8 2002/03/26 23:14:49 fredette Exp $");
84
85 #include "opt_ddb.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/malloc.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/buf.h>
94
95 #include <machine/bus.h>
96 #include <machine/intr.h>
97
98 #include <dev/vme/vmereg.h>
99 #include <dev/vme/vmevar.h>
100
101 #include <dev/scsipi/scsi_all.h>
102 #include <dev/scsipi/scsipi_all.h>
103 #include <dev/scsipi/scsipi_debug.h>
104 #include <dev/scsipi/scsiconf.h>
105
106 #ifndef Debugger
107 #define Debugger()
108 #endif
109
110 #ifndef DEBUG
111 #define DEBUG XXX
112 #endif
113
114 #include <dev/ic/ncr5380reg.h>
115 #include <dev/ic/ncr5380var.h>
116
117 #include <dev/vme/sireg.h>
118
119 /*
120 * Transfers smaller than this are done using PIO
121 * (on assumption they're not worth DMA overhead)
122 */
123 #define MIN_DMA_LEN 128
124
125 #ifdef DEBUG
126 int si_debug = 0;
127 #endif
128
129 /*
130 * This structure is used to keep track of mapped DMA requests.
131 */
132 struct si_dma_handle {
133 int dh_flags;
134 #define SIDH_BUSY 0x01 /* This DH is in use */
135 #define SIDH_OUT 0x02 /* DMA does data out (write) */
136 int dh_maplen; /* Original data length */
137 bus_dmamap_t dh_dmamap;
138 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
139 };
140
141 /*
142 * The first structure member has to be the ncr5380_softc
143 * so we can just cast to go back and fourth between them.
144 */
145 struct si_softc {
146 struct ncr5380_softc ncr_sc;
147 bus_space_tag_t sc_bustag; /* bus tags */
148 bus_dma_tag_t sc_dmatag;
149 vme_chipset_tag_t sc_vctag;
150
151 int sc_adapter_iv_am; /* int. vec + address modifier */
152 struct si_dma_handle *sc_dma;
153 int sc_xlen; /* length of current DMA segment. */
154 int sc_options; /* options for this instance. */
155 };
156
157 /*
158 * Options. By default, DMA is enabled and DMA completion interrupts
159 * and reselect are disabled. You may enable additional features
160 * the `flags' directive in your kernel's configuration file.
161 *
162 * Alternatively, you can patch your kernel with DDB or some other
163 * mechanism. The sc_options member of the softc is OR'd with
164 * the value in si_options.
165 *
166 * Note, there's a separate sw_options to make life easier.
167 */
168 #define SI_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
169 #define SI_DMA_INTR 0x02 /* DMA completion interrupts */
170 #define SI_DO_RESELECT 0x04 /* Allow disconnect/reselect */
171 #define SI_OPTIONS_MASK (SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT)
172 #define SI_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
173 int si_options = SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT;
174
175 static int si_match __P((struct device *, struct cfdata *, void *));
176 static void si_attach __P((struct device *, struct device *, void *));
177 static int si_intr __P((void *));
178 static void si_reset_adapter __P((struct ncr5380_softc *));
179
180 void si_dma_alloc __P((struct ncr5380_softc *));
181 void si_dma_free __P((struct ncr5380_softc *));
182 void si_dma_poll __P((struct ncr5380_softc *));
183
184 void si_dma_setup __P((struct ncr5380_softc *));
185 void si_dma_start __P((struct ncr5380_softc *));
186 void si_dma_eop __P((struct ncr5380_softc *));
187 void si_dma_stop __P((struct ncr5380_softc *));
188
189 void si_intr_on __P((struct ncr5380_softc *));
190 void si_intr_off __P((struct ncr5380_softc *));
191
192 /*
193 * Shorthand bus space access
194 * XXX - must look into endian issues here.
195 */
196 #define SIREG_READ(sc, index) \
197 bus_space_read_2((sc)->sc_regt, (sc)->sc_regh, index)
198 #define SIREG_WRITE(sc, index, v) \
199 bus_space_write_2((sc)->sc_regt, (sc)->sc_regh, index, v)
200
201
202 /* Auto-configuration glue. */
203 struct cfattach si_ca = {
204 sizeof(struct si_softc), si_match, si_attach
205 };
206
207 static int
208 si_match(parent, cf, aux)
209 struct device *parent;
210 struct cfdata *cf;
211 void *aux;
212 {
213 struct vme_attach_args *va = aux;
214 vme_chipset_tag_t ct = va->va_vct;
215 vme_am_t mod;
216 vme_addr_t vme_addr;
217
218 /* Make sure there is something there... */
219 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
220 vme_addr = va->r[0].offset;
221
222 if (vme_probe(ct, vme_addr, 1, mod, VME_D8, NULL, 0) != 0)
223 return (0);
224
225 /*
226 * If this is a VME SCSI board, we have to determine whether
227 * it is an "sc" (Sun2) or "si" (Sun3) SCSI board. This can
228 * be determined using the fact that the "sc" board occupies
229 * 4K bytes in VME space but the "si" board occupies 2K bytes.
230 */
231 return (vme_probe(ct, vme_addr + 0x801, 1, mod, VME_D8, NULL, 0) != 0);
232 }
233
234 static void
235 si_attach(parent, self, aux)
236 struct device *parent, *self;
237 void *aux;
238 {
239 struct si_softc *sc = (struct si_softc *) self;
240 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
241 struct vme_attach_args *va = aux;
242 vme_chipset_tag_t ct = va->va_vct;
243 bus_space_tag_t bt;
244 bus_space_handle_t bh;
245 vme_mapresc_t resc;
246 vme_intr_handle_t ih;
247 vme_am_t mod;
248 char bits[64];
249 int i;
250
251 sc->sc_dmatag = va->va_bdt;
252 sc->sc_vctag = ct;
253
254 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
255
256 if (vme_space_map(ct, va->r[0].offset, SIREG_BANK_SZ,
257 mod, VME_D8, 0, &bt, &bh, &resc) != 0)
258 panic("%s: vme_space_map", ncr_sc->sc_dev.dv_xname);
259
260 ncr_sc->sc_regt = bt;
261 ncr_sc->sc_regh = bh;
262
263 sc->sc_options = si_options;
264
265 ncr_sc->sc_dma_setup = si_dma_setup;
266 ncr_sc->sc_dma_start = si_dma_start;
267 ncr_sc->sc_dma_eop = si_dma_stop;
268 ncr_sc->sc_dma_stop = si_dma_stop;
269
270 vme_intr_map(ct, va->ilevel, va->ivector, &ih);
271 vme_intr_establish(ct, ih, IPL_BIO, si_intr, sc);
272
273 printf("\n");
274
275 sc->sc_adapter_iv_am = (mod << 8) | (va->ivector & 0xFF);
276
277 /*
278 * Pull in the options flags. Allow the user to completely
279 * override the default values.
280 */
281 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK) != 0)
282 sc->sc_options =
283 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK);
284
285 /*
286 * Initialize fields used by the MI code
287 */
288
289 /* NCR5380 register bank offsets */
290 ncr_sc->sci_r0 = 0;
291 ncr_sc->sci_r1 = 1;
292 ncr_sc->sci_r2 = 2;
293 ncr_sc->sci_r3 = 3;
294 ncr_sc->sci_r4 = 4;
295 ncr_sc->sci_r5 = 5;
296 ncr_sc->sci_r6 = 6;
297 ncr_sc->sci_r7 = 7;
298
299 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
300
301 /*
302 * MD function pointers used by the MI code.
303 */
304 ncr_sc->sc_pio_out = ncr5380_pio_out;
305 ncr_sc->sc_pio_in = ncr5380_pio_in;
306 ncr_sc->sc_dma_alloc = si_dma_alloc;
307 ncr_sc->sc_dma_free = si_dma_free;
308 ncr_sc->sc_dma_poll = si_dma_poll;
309
310 ncr_sc->sc_flags = 0;
311 if ((sc->sc_options & SI_DO_RESELECT) == 0)
312 ncr_sc->sc_no_disconnect = 0xFF;
313 if ((sc->sc_options & SI_DMA_INTR) == 0)
314 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
315 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
316
317 /*
318 * Allocate DMA handles.
319 */
320 i = SCI_OPENINGS * sizeof(struct si_dma_handle);
321 sc->sc_dma = (struct si_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
322 if (sc->sc_dma == NULL)
323 panic("si: dma handle malloc failed\n");
324
325 for (i = 0; i < SCI_OPENINGS; i++) {
326 sc->sc_dma[i].dh_flags = 0;
327
328 /* Allocate a DMA handle */
329 if (vme_dmamap_create(
330 sc->sc_vctag, /* VME chip tag */
331 MAXPHYS, /* size */
332 VME_AM_A24, /* address modifier */
333 VME_D16, /* data size */
334 0, /* swap */
335 1, /* nsegments */
336 MAXPHYS, /* maxsegsz */
337 0, /* boundary */
338 BUS_DMA_NOWAIT,
339 &sc->sc_dma[i].dh_dmamap) != 0) {
340
341 printf("%s: DMA buffer map create error\n",
342 ncr_sc->sc_dev.dv_xname);
343 return;
344 }
345 }
346
347 if (sc->sc_options) {
348 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
349 bitmask_snprintf(sc->sc_options, SI_OPTIONS_BITS,
350 bits, sizeof(bits)));
351 }
352
353 ncr_sc->sc_channel.chan_id = 7;
354 ncr_sc->sc_adapter.adapt_minphys = minphys;
355
356 /*
357 * Initialize si board itself.
358 */
359 si_reset_adapter(ncr_sc);
360 ncr5380_attach(ncr_sc);
361
362 if (sc->sc_options & SI_DO_RESELECT) {
363 /*
364 * Need to enable interrupts (and DMA!)
365 * on this H/W for reselect to work.
366 */
367 ncr_sc->sc_intr_on = si_intr_on;
368 ncr_sc->sc_intr_off = si_intr_off;
369 }
370 }
371
372 #define CSR_WANT (SI_CSR_SBC_IP | SI_CSR_DMA_IP | \
373 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR )
374
375 static int
376 si_intr(void *arg)
377 {
378 struct si_softc *sc = arg;
379 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
380 int dma_error, claimed;
381 u_short csr;
382
383 claimed = 0;
384 dma_error = 0;
385
386 /* SBC interrupt? DMA interrupt? */
387 csr = SIREG_READ(ncr_sc, SIREG_CSR);
388
389 NCR_TRACE("si_intr: csr=0x%x\n", csr);
390
391 if (csr & SI_CSR_DMA_CONFLICT) {
392 dma_error |= SI_CSR_DMA_CONFLICT;
393 printf("si_intr: DMA conflict\n");
394 }
395 if (csr & SI_CSR_DMA_BUS_ERR) {
396 dma_error |= SI_CSR_DMA_BUS_ERR;
397 printf("si_intr: DMA bus error\n");
398 }
399 if (dma_error) {
400 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
401 sc->ncr_sc.sc_state |= NCR_ABORTING;
402 /* Make sure we will call the main isr. */
403 csr |= SI_CSR_DMA_IP;
404 }
405
406 if (csr & (SI_CSR_SBC_IP | SI_CSR_DMA_IP)) {
407 claimed = ncr5380_intr(&sc->ncr_sc);
408 #ifdef DEBUG
409 if (!claimed) {
410 printf("si_intr: spurious from SBC\n");
411 if (si_debug & 4) {
412 Debugger(); /* XXX */
413 }
414 }
415 #endif
416 }
417
418 return (claimed);
419 }
420
421
422 static void
423 si_reset_adapter(struct ncr5380_softc *ncr_sc)
424 {
425 struct si_softc *sc = (struct si_softc *)ncr_sc;
426
427 #ifdef DEBUG
428 if (si_debug) {
429 printf("si_reset_adapter\n");
430 }
431 #endif
432
433 /*
434 * The SCSI3 controller has an 8K FIFO to buffer data between the
435 * 5380 and the DMA. Make sure it starts out empty.
436 *
437 * The reset bits in the CSR are active low.
438 */
439 SIREG_WRITE(ncr_sc, SIREG_CSR, 0);
440 delay(10);
441 SIREG_WRITE(ncr_sc, SIREG_CSR,
442 SI_CSR_FIFO_RES | SI_CSR_SCSI_RES | SI_CSR_INTR_EN);
443 delay(10);
444
445 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
446 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
447 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
448 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
449 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
450 SIREG_WRITE(ncr_sc, SIREG_IV_AM, sc->sc_adapter_iv_am);
451 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
452
453 SCI_CLR_INTR(ncr_sc);
454 }
455
456 /*****************************************************************
457 * Common functions for DMA
458 ****************************************************************/
459
460 /*
461 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
462 * for DMA transfer.
463 */
464 void
465 si_dma_alloc(ncr_sc)
466 struct ncr5380_softc *ncr_sc;
467 {
468 struct si_softc *sc = (struct si_softc *)ncr_sc;
469 struct sci_req *sr = ncr_sc->sc_current;
470 struct scsipi_xfer *xs = sr->sr_xs;
471 struct si_dma_handle *dh;
472 int i, xlen;
473 u_long addr;
474
475 #ifdef DIAGNOSTIC
476 if (sr->sr_dma_hand != NULL)
477 panic("si_dma_alloc: already have DMA handle");
478 #endif
479
480 #if 1 /* XXX - Temporary */
481 /* XXX - In case we think DMA is completely broken... */
482 if ((sc->sc_options & SI_ENABLE_DMA) == 0)
483 return;
484 #endif
485
486 addr = (u_long) ncr_sc->sc_dataptr;
487 xlen = ncr_sc->sc_datalen;
488
489 /* If the DMA start addr is misaligned then do PIO */
490 if ((addr & 1) || (xlen & 1)) {
491 printf("si_dma_alloc: misaligned.\n");
492 return;
493 }
494
495 /* Make sure our caller checked sc_min_dma_len. */
496 if (xlen < MIN_DMA_LEN)
497 panic("si_dma_alloc: xlen=0x%x\n", xlen);
498
499 /* Find free DMA handle. Guaranteed to find one since we have
500 as many DMA handles as the driver has processes. */
501 for (i = 0; i < SCI_OPENINGS; i++) {
502 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
503 goto found;
504 }
505 panic("si: no free DMA handles.");
506
507 found:
508 dh = &sc->sc_dma[i];
509 dh->dh_flags = SIDH_BUSY;
510 dh->dh_maplen = xlen;
511
512 /* Copy the "write" flag for convenience. */
513 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
514 dh->dh_flags |= SIDH_OUT;
515
516 /*
517 * Double-map the buffer into DVMA space. If we can't re-map
518 * the buffer, we print a warning and fall back to PIO mode.
519 *
520 * NOTE: it is not safe to sleep here!
521 */
522 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
523 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
524 /* Can't remap segment */
525 printf("si_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
526 addr, dh->dh_maplen);
527 dh->dh_flags = 0;
528 return;
529 }
530 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
531 (dh->dh_flags & SIDH_OUT)
532 ? BUS_DMASYNC_PREWRITE
533 : BUS_DMASYNC_PREREAD);
534
535 /* success */
536 sr->sr_dma_hand = dh;
537
538 return;
539 }
540
541
542 void
543 si_dma_free(ncr_sc)
544 struct ncr5380_softc *ncr_sc;
545 {
546 struct si_softc *sc = (struct si_softc *)ncr_sc;
547 struct sci_req *sr = ncr_sc->sc_current;
548 struct si_dma_handle *dh = sr->sr_dma_hand;
549
550 #ifdef DIAGNOSTIC
551 if (dh == NULL)
552 panic("si_dma_free: no DMA handle");
553 #endif
554
555 if (ncr_sc->sc_state & NCR_DOINGDMA)
556 panic("si_dma_free: free while in progress");
557
558 if (dh->dh_flags & SIDH_BUSY) {
559 /* Give back the DVMA space. */
560 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
561 dh->dh_dvma, dh->dh_maplen,
562 (dh->dh_flags & SIDH_OUT)
563 ? BUS_DMASYNC_POSTWRITE
564 : BUS_DMASYNC_POSTREAD);
565 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
566 dh->dh_flags = 0;
567 }
568 sr->sr_dma_hand = NULL;
569 }
570
571
572 /*
573 * Poll (spin-wait) for DMA completion.
574 * Called right after xx_dma_start(), and
575 * xx_dma_stop() will be called next.
576 * Same for either VME or OBIO.
577 */
578 void
579 si_dma_poll(ncr_sc)
580 struct ncr5380_softc *ncr_sc;
581 {
582 struct sci_req *sr = ncr_sc->sc_current;
583 int tmo, csr_mask, csr;
584
585 /* Make sure DMA started successfully. */
586 if (ncr_sc->sc_state & NCR_ABORTING)
587 return;
588
589 csr_mask = SI_CSR_SBC_IP | SI_CSR_DMA_IP |
590 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR;
591
592 tmo = 50000; /* X100 = 5 sec. */
593 for (;;) {
594 csr = SIREG_READ(ncr_sc, SIREG_CSR);
595 if (csr & csr_mask)
596 break;
597 if (--tmo <= 0) {
598 printf("%s: DMA timeout (while polling)\n",
599 ncr_sc->sc_dev.dv_xname);
600 /* Indicate timeout as MI code would. */
601 sr->sr_flags |= SR_OVERDUE;
602 break;
603 }
604 delay(100);
605 }
606
607 #ifdef DEBUG
608 if (si_debug) {
609 printf("si_dma_poll: done, csr=0x%x\n", csr);
610 }
611 #endif
612 }
613
614
615 /*****************************************************************
616 * VME functions for DMA
617 ****************************************************************/
618
619
620 /*
621 * This is called when the bus is going idle,
622 * so we want to enable the SBC interrupts.
623 * That is controlled by the DMA enable!
624 * Who would have guessed!
625 * What a NASTY trick!
626 */
627 void
628 si_intr_on(ncr_sc)
629 struct ncr5380_softc *ncr_sc;
630 {
631 u_int16_t csr;
632
633 /* Clear DMA start address and counters */
634 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
635 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
636 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
637 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
638
639 /* Enter receive mode (for safety) and enable DMA engine */
640 csr = SIREG_READ(ncr_sc, SIREG_CSR);
641 csr &= ~SI_CSR_SEND;
642 csr |= SI_CSR_DMA_EN;
643 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
644 }
645
646 /*
647 * This is called when the bus is idle and we are
648 * about to start playing with the SBC chip.
649 */
650 void
651 si_intr_off(ncr_sc)
652 struct ncr5380_softc *ncr_sc;
653 {
654 u_int16_t csr;
655
656 csr = SIREG_READ(ncr_sc, SIREG_CSR);
657 csr &= ~SI_CSR_DMA_EN;
658 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
659 }
660
661 /*
662 * This function is called during the COMMAND or MSG_IN phase
663 * that precedes a DATA_IN or DATA_OUT phase, in case we need
664 * to setup the DMA engine before the bus enters a DATA phase.
665 *
666 * XXX: The VME adapter appears to suppress SBC interrupts
667 * when the FIFO is not empty or the FIFO count is non-zero!
668 *
669 * On the VME version we just clear the DMA count and address
670 * here (to make sure it stays idle) and do the real setup
671 * later, in dma_start.
672 */
673 void
674 si_dma_setup(ncr_sc)
675 struct ncr5380_softc *ncr_sc;
676 {
677 struct si_softc *sc = (struct si_softc *)ncr_sc;
678 struct sci_req *sr = ncr_sc->sc_current;
679 struct si_dma_handle *dh = sr->sr_dma_hand;
680 u_int16_t csr;
681 u_long dva;
682 int xlen;
683
684 /*
685 * Set up the DMA controller.
686 * Note that (dh->dh_len < sc_datalen)
687 */
688
689 csr = SIREG_READ(ncr_sc, SIREG_CSR);
690
691 /* Disable DMA while we're setting up the transfer */
692 csr &= ~SI_CSR_DMA_EN;
693
694 /* Reset the FIFO */
695 csr &= ~SI_CSR_FIFO_RES; /* active low */
696 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
697 csr |= SI_CSR_FIFO_RES;
698 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
699
700 /*
701 * Get the DVMA mapping for this segment.
702 */
703 dva = (u_long)(dh->dh_dvma);
704 if (dva & 1)
705 panic("si_dma_setup: bad dmaaddr=0x%lx", dva);
706 xlen = ncr_sc->sc_datalen;
707 xlen &= ~1;
708 sc->sc_xlen = xlen; /* XXX: or less... */
709
710 #ifdef DEBUG
711 if (si_debug & 2) {
712 printf("si_dma_start: dh=%p, dmaaddr=0x%lx, xlen=%d\n",
713 dh, dva, xlen);
714 }
715 #endif
716 /* Set direction (send/recv) */
717 if (dh->dh_flags & SIDH_OUT) {
718 csr |= SI_CSR_SEND;
719 } else {
720 csr &= ~SI_CSR_SEND;
721 }
722
723 /* Set byte-packing control */
724 if (dva & 2) {
725 csr |= SI_CSR_BPCON;
726 } else {
727 csr &= ~SI_CSR_BPCON;
728 }
729
730 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
731
732 /* Load start address */
733 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, (u_int16_t)(dva >> 16));
734 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, (u_int16_t)(dva & 0xFFFF));
735
736 /* Clear DMA counters; these will be set in si_dma_start() */
737 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
738 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
739
740 /* Clear FIFO counter. (also hits dma_count) */
741 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
742 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
743 }
744
745
746 void
747 si_dma_start(ncr_sc)
748 struct ncr5380_softc *ncr_sc;
749 {
750 struct si_softc *sc = (struct si_softc *)ncr_sc;
751 struct sci_req *sr = ncr_sc->sc_current;
752 struct si_dma_handle *dh = sr->sr_dma_hand;
753 int xlen;
754 u_int mode;
755 u_int16_t csr;
756
757 xlen = sc->sc_xlen;
758
759 /* Load transfer length */
760 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, (u_int16_t)(xlen >> 16));
761 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, (u_int16_t)(xlen & 0xFFFF));
762 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, (u_int16_t)(xlen >> 16));
763 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, (u_int16_t)(xlen & 0xFFFF));
764
765 /*
766 * Acknowledge the phase change. (After DMA setup!)
767 * Put the SBIC into DMA mode, and start the transfer.
768 */
769 if (dh->dh_flags & SIDH_OUT) {
770 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
771 SCI_CLR_INTR(ncr_sc);
772 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
773
774 mode = NCR5380_READ(ncr_sc, sci_mode);
775 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
776 NCR5380_WRITE(ncr_sc, sci_mode, mode);
777
778 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
779 } else {
780 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
781 SCI_CLR_INTR(ncr_sc);
782 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
783
784 mode = NCR5380_READ(ncr_sc, sci_mode);
785 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
786 NCR5380_WRITE(ncr_sc, sci_mode, mode);
787
788 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
789 }
790
791 ncr_sc->sc_state |= NCR_DOINGDMA;
792
793 /* Enable DMA engine */
794 csr = SIREG_READ(ncr_sc, SIREG_CSR);
795 csr |= SI_CSR_DMA_EN;
796 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
797
798 #ifdef DEBUG
799 if (si_debug & 2) {
800 printf("si_dma_start: started, flags=0x%x\n",
801 ncr_sc->sc_state);
802 }
803 #endif
804 }
805
806
807 void
808 si_dma_eop(ncr_sc)
809 struct ncr5380_softc *ncr_sc;
810 {
811
812 /* Not needed - DMA was stopped prior to examining sci_csr */
813 }
814
815
816 void
817 si_dma_stop(ncr_sc)
818 struct ncr5380_softc *ncr_sc;
819 {
820 struct si_softc *sc = (struct si_softc *)ncr_sc;
821 struct sci_req *sr = ncr_sc->sc_current;
822 struct si_dma_handle *dh = sr->sr_dma_hand;
823 int resid, ntrans;
824 u_int16_t csr;
825 u_int mode;
826
827 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
828 #ifdef DEBUG
829 printf("si_dma_stop: dma not running\n");
830 #endif
831 return;
832 }
833
834 ncr_sc->sc_state &= ~NCR_DOINGDMA;
835
836 csr = SIREG_READ(ncr_sc, SIREG_CSR);
837
838 /* First, halt the DMA engine. */
839 csr &= ~SI_CSR_DMA_EN;
840 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
841
842 if (csr & (SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR)) {
843 printf("si: DMA error, csr=0x%x, reset\n", csr);
844 sr->sr_xs->error = XS_DRIVER_STUFFUP;
845 ncr_sc->sc_state |= NCR_ABORTING;
846 si_reset_adapter(ncr_sc);
847 }
848
849 /* Note that timeout may have set the error flag. */
850 if (ncr_sc->sc_state & NCR_ABORTING)
851 goto out;
852
853 /*
854 * Now try to figure out how much actually transferred
855 *
856 * The fifo_count does not reflect how many bytes were
857 * actually transferred for VME.
858 *
859 * SCSI-3 VME interface is a little funny on writes:
860 * if we have a disconnect, the dma has overshot by
861 * one byte and the resid needs to be incremented.
862 * Only happens for partial transfers.
863 * (Thanks to Matt Jacob)
864 */
865
866 resid = SIREG_READ(ncr_sc, SIREG_FIFO_CNTH) << 16;
867 resid |= SIREG_READ(ncr_sc, SIREG_FIFO_CNT) & 0xFFFF;
868 if (dh->dh_flags & SIDH_OUT)
869 if ((resid > 0) && (resid < sc->sc_xlen))
870 resid++;
871 ntrans = sc->sc_xlen - resid;
872
873 #ifdef DEBUG
874 if (si_debug & 2) {
875 printf("si_dma_stop: resid=0x%x ntrans=0x%x\n",
876 resid, ntrans);
877 }
878 #endif
879
880 if (ntrans > ncr_sc->sc_datalen)
881 panic("si_dma_stop: excess transfer");
882
883 /* Adjust data pointer */
884 ncr_sc->sc_dataptr += ntrans;
885 ncr_sc->sc_datalen -= ntrans;
886
887 #ifdef DEBUG
888 if (si_debug & 2) {
889 printf("si_dma_stop: ntrans=0x%x\n", ntrans);
890 }
891 #endif
892
893 /*
894 * After a read, we may need to clean-up
895 * "Left-over bytes" (yuck!)
896 */
897 if (((dh->dh_flags & SIDH_OUT) == 0) &&
898 ((csr & SI_CSR_LOB) != 0))
899 {
900 char *cp = ncr_sc->sc_dataptr;
901 u_int16_t bprh, bprl;
902
903 bprh = SIREG_READ(ncr_sc, SIREG_BPRH);
904 bprl = SIREG_READ(ncr_sc, SIREG_BPRL);
905
906 #ifdef DEBUG
907 printf("si: got left-over bytes: bprh=%x, bprl=%x, csr=%x\n",
908 bprh, bprl, csr);
909 #endif
910
911 if (csr & SI_CSR_BPCON) {
912 /* have SI_CSR_BPCON */
913 cp[-1] = (bprl & 0xff00) >> 8;
914 } else {
915 switch (csr & SI_CSR_LOB) {
916 case SI_CSR_LOB_THREE:
917 cp[-3] = (bprh & 0xff00) >> 8;
918 cp[-2] = (bprh & 0x00ff);
919 cp[-1] = (bprl & 0xff00) >> 8;
920 break;
921 case SI_CSR_LOB_TWO:
922 cp[-2] = (bprh & 0xff00) >> 8;
923 cp[-1] = (bprh & 0x00ff);
924 break;
925 case SI_CSR_LOB_ONE:
926 cp[-1] = (bprh & 0xff00) >> 8;
927 break;
928 }
929 }
930 }
931
932 out:
933 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
934 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
935
936 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
937 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
938
939 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
940 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
941
942 mode = NCR5380_READ(ncr_sc, sci_mode);
943 /* Put SBIC back in PIO mode. */
944 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
945 NCR5380_WRITE(ncr_sc, sci_mode, mode);
946 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
947 }
948