si.c revision 1.6 1 /* $NetBSD: si.c,v 1.6 2001/11/13 06:17:07 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1996,2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, Jason R. Thorpe and
9 * Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * This file contains VME bus-dependent of the `si' SCSI adapter.
42 * This hardware is frequently found on Sun 3 and Sun 4 machines.
43 *
44 * The SCSI machinery on this adapter is implemented by an NCR5380,
45 * which is taken care of by the chipset driver in /sys/dev/ic/ncr5380sbc.c
46 *
47 * The logic has a bit to enable or disable the DMA engine,
48 * but that bit also gates the interrupt line from the NCR5380!
49 * Therefore, in order to get any interrupt from the 5380, (i.e.
50 * for reselect) one must clear the DMA engine transfer count and
51 * then enable DMA. This has the further complication that you
52 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
53 * we have to turn DMA back off before we even look at the 5380.
54 *
55 * What wonderfully whacky hardware this is!
56 *
57 */
58
59 /*
60 * This driver originated as an MD implementation for the sun3 and sun4
61 * ports. The notes pertaining to that history are included below.
62 *
63 * David Jones wrote the initial version of this module for NetBSD/sun3,
64 * which included support for the VME adapter only. (no reselection).
65 *
66 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
67 * both the VME and OBIO code to support disconnect/reselect.
68 * (Required figuring out the hardware "features" noted above.)
69 *
70 * The autoconfiguration boilerplate came from Adam Glass.
71 *
72 * Jason R. Thorpe ported the autoconfiguration and VME portions to
73 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
74 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
75 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
76 * and Chris Torek for bits of insight needed along the way. Thanks to
77 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
78 * for the sake of testing. Andrew Gillham helped work out the bugs
79 * the 4/100 DMA code.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: si.c,v 1.6 2001/11/13 06:17:07 lukem Exp $");
84
85 #include "opt_ddb.h"
86
87 #include <sys/types.h>
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/kernel.h>
91 #include <sys/malloc.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/buf.h>
95
96 #include <machine/bus.h>
97 #include <machine/intr.h>
98
99 #include <dev/vme/vmereg.h>
100 #include <dev/vme/vmevar.h>
101
102 #include <dev/scsipi/scsi_all.h>
103 #include <dev/scsipi/scsipi_all.h>
104 #include <dev/scsipi/scsipi_debug.h>
105 #include <dev/scsipi/scsiconf.h>
106
107 #ifndef DDB
108 #define Debugger()
109 #endif
110
111 #ifndef DEBUG
112 #define DEBUG XXX
113 #endif
114
115 #include <dev/ic/ncr5380reg.h>
116 #include <dev/ic/ncr5380var.h>
117
118 #include <dev/vme/sireg.h>
119
120 /*
121 * Transfers smaller than this are done using PIO
122 * (on assumption they're not worth DMA overhead)
123 */
124 #define MIN_DMA_LEN 128
125
126 #ifdef DEBUG
127 int si_debug = 0;
128 #endif
129
130 /*
131 * This structure is used to keep track of mapped DMA requests.
132 */
133 struct si_dma_handle {
134 int dh_flags;
135 #define SIDH_BUSY 0x01 /* This DH is in use */
136 #define SIDH_OUT 0x02 /* DMA does data out (write) */
137 int dh_maplen; /* Original data length */
138 bus_dmamap_t dh_dmamap;
139 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
140 };
141
142 /*
143 * The first structure member has to be the ncr5380_softc
144 * so we can just cast to go back and fourth between them.
145 */
146 struct si_softc {
147 struct ncr5380_softc ncr_sc;
148 bus_space_tag_t sc_bustag; /* bus tags */
149 bus_dma_tag_t sc_dmatag;
150 vme_chipset_tag_t sc_vctag;
151
152 int sc_adapter_iv_am; /* int. vec + address modifier */
153 struct si_dma_handle *sc_dma;
154 int sc_xlen; /* length of current DMA segment. */
155 int sc_options; /* options for this instance. */
156 };
157
158 /*
159 * Options. By default, DMA is enabled and DMA completion interrupts
160 * and reselect are disabled. You may enable additional features
161 * the `flags' directive in your kernel's configuration file.
162 *
163 * Alternatively, you can patch your kernel with DDB or some other
164 * mechanism. The sc_options member of the softc is OR'd with
165 * the value in si_options.
166 *
167 * Note, there's a separate sw_options to make life easier.
168 */
169 #define SI_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
170 #define SI_DMA_INTR 0x02 /* DMA completion interrupts */
171 #define SI_DO_RESELECT 0x04 /* Allow disconnect/reselect */
172 #define SI_OPTIONS_MASK (SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT)
173 #define SI_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
174 int si_options = SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT;
175
176 static int si_match __P((struct device *, struct cfdata *, void *));
177 static void si_attach __P((struct device *, struct device *, void *));
178 static int si_intr __P((void *));
179 static void si_reset_adapter __P((struct ncr5380_softc *));
180
181 void si_dma_alloc __P((struct ncr5380_softc *));
182 void si_dma_free __P((struct ncr5380_softc *));
183 void si_dma_poll __P((struct ncr5380_softc *));
184
185 void si_dma_setup __P((struct ncr5380_softc *));
186 void si_dma_start __P((struct ncr5380_softc *));
187 void si_dma_eop __P((struct ncr5380_softc *));
188 void si_dma_stop __P((struct ncr5380_softc *));
189
190 void si_intr_on __P((struct ncr5380_softc *));
191 void si_intr_off __P((struct ncr5380_softc *));
192
193 /*
194 * Shorthand bus space access
195 * XXX - must look into endian issues here.
196 */
197 #define SIREG_READ(sc, index) \
198 bus_space_read_2((sc)->sc_regt, (sc)->sc_regh, index)
199 #define SIREG_WRITE(sc, index, v) \
200 bus_space_write_2((sc)->sc_regt, (sc)->sc_regh, index, v)
201
202
203 /* Auto-configuration glue. */
204 struct cfattach si_ca = {
205 sizeof(struct si_softc), si_match, si_attach
206 };
207
208 static int
209 si_match(parent, cf, aux)
210 struct device *parent;
211 struct cfdata *cf;
212 void *aux;
213 {
214 struct vme_attach_args *va = aux;
215 vme_chipset_tag_t ct = va->va_vct;
216 vme_am_t mod;
217 vme_addr_t vme_addr;
218
219 /* Make sure there is something there... */
220 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
221 vme_addr = va->r[0].offset;
222
223 if (vme_probe(ct, vme_addr, 1, mod, VME_D8, NULL, 0) != 0)
224 return (0);
225
226 /*
227 * If this is a VME SCSI board, we have to determine whether
228 * it is an "sc" (Sun2) or "si" (Sun3) SCSI board. This can
229 * be determined using the fact that the "sc" board occupies
230 * 4K bytes in VME space but the "si" board occupies 2K bytes.
231 */
232 return (vme_probe(ct, vme_addr + 0x801, 1, mod, VME_D8, NULL, 0) != 0);
233 }
234
235 static void
236 si_attach(parent, self, aux)
237 struct device *parent, *self;
238 void *aux;
239 {
240 struct si_softc *sc = (struct si_softc *) self;
241 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
242 struct vme_attach_args *va = aux;
243 vme_chipset_tag_t ct = va->va_vct;
244 bus_space_tag_t bt;
245 bus_space_handle_t bh;
246 vme_mapresc_t resc;
247 vme_intr_handle_t ih;
248 vme_am_t mod;
249 char bits[64];
250 int i;
251
252 sc->sc_dmatag = va->va_bdt;
253 sc->sc_vctag = ct;
254
255 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
256
257 if (vme_space_map(ct, va->r[0].offset, SIREG_BANK_SZ,
258 mod, VME_D8, 0, &bt, &bh, &resc) != 0)
259 panic("%s: vme_space_map", ncr_sc->sc_dev.dv_xname);
260
261 ncr_sc->sc_regt = bt;
262 ncr_sc->sc_regh = bh;
263
264 sc->sc_options = si_options;
265
266 ncr_sc->sc_dma_setup = si_dma_setup;
267 ncr_sc->sc_dma_start = si_dma_start;
268 ncr_sc->sc_dma_eop = si_dma_stop;
269 ncr_sc->sc_dma_stop = si_dma_stop;
270
271 vme_intr_map(ct, va->ilevel, va->ivector, &ih);
272 vme_intr_establish(ct, ih, IPL_BIO, si_intr, sc);
273
274 printf("\n");
275
276 sc->sc_adapter_iv_am = (mod << 8) | (va->ivector & 0xFF);
277
278 /*
279 * Pull in the options flags. Allow the user to completely
280 * override the default values.
281 */
282 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK) != 0)
283 sc->sc_options =
284 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK);
285
286 /*
287 * Initialize fields used by the MI code
288 */
289
290 /* NCR5380 register bank offsets */
291 ncr_sc->sci_r0 = 0;
292 ncr_sc->sci_r1 = 1;
293 ncr_sc->sci_r2 = 2;
294 ncr_sc->sci_r3 = 3;
295 ncr_sc->sci_r4 = 4;
296 ncr_sc->sci_r5 = 5;
297 ncr_sc->sci_r6 = 6;
298 ncr_sc->sci_r7 = 7;
299
300 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
301
302 /*
303 * MD function pointers used by the MI code.
304 */
305 ncr_sc->sc_pio_out = ncr5380_pio_out;
306 ncr_sc->sc_pio_in = ncr5380_pio_in;
307 ncr_sc->sc_dma_alloc = si_dma_alloc;
308 ncr_sc->sc_dma_free = si_dma_free;
309 ncr_sc->sc_dma_poll = si_dma_poll;
310
311 ncr_sc->sc_flags = 0;
312 if ((sc->sc_options & SI_DO_RESELECT) == 0)
313 ncr_sc->sc_no_disconnect = 0xFF;
314 if ((sc->sc_options & SI_DMA_INTR) == 0)
315 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
316 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
317
318 /*
319 * Allocate DMA handles.
320 */
321 i = SCI_OPENINGS * sizeof(struct si_dma_handle);
322 sc->sc_dma = (struct si_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
323 if (sc->sc_dma == NULL)
324 panic("si: dma handle malloc failed\n");
325
326 for (i = 0; i < SCI_OPENINGS; i++) {
327 sc->sc_dma[i].dh_flags = 0;
328
329 /* Allocate a DMA handle */
330 if (vme_dmamap_create(
331 sc->sc_vctag, /* VME chip tag */
332 MAXPHYS, /* size */
333 VME_AM_A24, /* address modifier */
334 VME_D16, /* data size */
335 0, /* swap */
336 1, /* nsegments */
337 MAXPHYS, /* maxsegsz */
338 0, /* boundary */
339 BUS_DMA_NOWAIT,
340 &sc->sc_dma[i].dh_dmamap) != 0) {
341
342 printf("%s: DMA buffer map create error\n",
343 ncr_sc->sc_dev.dv_xname);
344 return;
345 }
346 }
347
348 if (sc->sc_options) {
349 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
350 bitmask_snprintf(sc->sc_options, SI_OPTIONS_BITS,
351 bits, sizeof(bits)));
352 }
353
354 ncr_sc->sc_channel.chan_id = 7;
355 ncr_sc->sc_adapter.adapt_minphys = minphys;
356
357 /*
358 * Initialize si board itself.
359 */
360 si_reset_adapter(ncr_sc);
361 ncr5380_attach(ncr_sc);
362
363 if (sc->sc_options & SI_DO_RESELECT) {
364 /*
365 * Need to enable interrupts (and DMA!)
366 * on this H/W for reselect to work.
367 */
368 ncr_sc->sc_intr_on = si_intr_on;
369 ncr_sc->sc_intr_off = si_intr_off;
370 }
371 }
372
373 #define CSR_WANT (SI_CSR_SBC_IP | SI_CSR_DMA_IP | \
374 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR )
375
376 static int
377 si_intr(void *arg)
378 {
379 struct si_softc *sc = arg;
380 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
381 int dma_error, claimed;
382 u_short csr;
383
384 claimed = 0;
385 dma_error = 0;
386
387 /* SBC interrupt? DMA interrupt? */
388 csr = SIREG_READ(ncr_sc, SIREG_CSR);
389
390 NCR_TRACE("si_intr: csr=0x%x\n", csr);
391
392 if (csr & SI_CSR_DMA_CONFLICT) {
393 dma_error |= SI_CSR_DMA_CONFLICT;
394 printf("si_intr: DMA conflict\n");
395 }
396 if (csr & SI_CSR_DMA_BUS_ERR) {
397 dma_error |= SI_CSR_DMA_BUS_ERR;
398 printf("si_intr: DMA bus error\n");
399 }
400 if (dma_error) {
401 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
402 sc->ncr_sc.sc_state |= NCR_ABORTING;
403 /* Make sure we will call the main isr. */
404 csr |= SI_CSR_DMA_IP;
405 }
406
407 if (csr & (SI_CSR_SBC_IP | SI_CSR_DMA_IP)) {
408 claimed = ncr5380_intr(&sc->ncr_sc);
409 #ifdef DEBUG
410 if (!claimed) {
411 printf("si_intr: spurious from SBC\n");
412 if (si_debug & 4) {
413 Debugger(); /* XXX */
414 }
415 }
416 #endif
417 }
418
419 return (claimed);
420 }
421
422
423 static void
424 si_reset_adapter(struct ncr5380_softc *ncr_sc)
425 {
426 struct si_softc *sc = (struct si_softc *)ncr_sc;
427
428 #ifdef DEBUG
429 if (si_debug) {
430 printf("si_reset_adapter\n");
431 }
432 #endif
433
434 /*
435 * The SCSI3 controller has an 8K FIFO to buffer data between the
436 * 5380 and the DMA. Make sure it starts out empty.
437 *
438 * The reset bits in the CSR are active low.
439 */
440 SIREG_WRITE(ncr_sc, SIREG_CSR, 0);
441 delay(10);
442 SIREG_WRITE(ncr_sc, SIREG_CSR,
443 SI_CSR_FIFO_RES | SI_CSR_SCSI_RES | SI_CSR_INTR_EN);
444 delay(10);
445
446 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
447 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
448 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
449 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
450 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
451 SIREG_WRITE(ncr_sc, SIREG_IV_AM, sc->sc_adapter_iv_am);
452 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
453
454 SCI_CLR_INTR(ncr_sc);
455 }
456
457 /*****************************************************************
458 * Common functions for DMA
459 ****************************************************************/
460
461 /*
462 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
463 * for DMA transfer.
464 */
465 void
466 si_dma_alloc(ncr_sc)
467 struct ncr5380_softc *ncr_sc;
468 {
469 struct si_softc *sc = (struct si_softc *)ncr_sc;
470 struct sci_req *sr = ncr_sc->sc_current;
471 struct scsipi_xfer *xs = sr->sr_xs;
472 struct si_dma_handle *dh;
473 int i, xlen;
474 u_long addr;
475
476 #ifdef DIAGNOSTIC
477 if (sr->sr_dma_hand != NULL)
478 panic("si_dma_alloc: already have DMA handle");
479 #endif
480
481 #if 1 /* XXX - Temporary */
482 /* XXX - In case we think DMA is completely broken... */
483 if ((sc->sc_options & SI_ENABLE_DMA) == 0)
484 return;
485 #endif
486
487 addr = (u_long) ncr_sc->sc_dataptr;
488 xlen = ncr_sc->sc_datalen;
489
490 /* If the DMA start addr is misaligned then do PIO */
491 if ((addr & 1) || (xlen & 1)) {
492 printf("si_dma_alloc: misaligned.\n");
493 return;
494 }
495
496 /* Make sure our caller checked sc_min_dma_len. */
497 if (xlen < MIN_DMA_LEN)
498 panic("si_dma_alloc: xlen=0x%x\n", xlen);
499
500 /* Find free DMA handle. Guaranteed to find one since we have
501 as many DMA handles as the driver has processes. */
502 for (i = 0; i < SCI_OPENINGS; i++) {
503 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
504 goto found;
505 }
506 panic("si: no free DMA handles.");
507
508 found:
509 dh = &sc->sc_dma[i];
510 dh->dh_flags = SIDH_BUSY;
511 dh->dh_maplen = xlen;
512
513 /* Copy the "write" flag for convenience. */
514 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
515 dh->dh_flags |= SIDH_OUT;
516
517 /*
518 * Double-map the buffer into DVMA space. If we can't re-map
519 * the buffer, we print a warning and fall back to PIO mode.
520 *
521 * NOTE: it is not safe to sleep here!
522 */
523 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
524 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
525 /* Can't remap segment */
526 printf("si_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
527 addr, dh->dh_maplen);
528 dh->dh_flags = 0;
529 return;
530 }
531 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
532 (dh->dh_flags & SIDH_OUT)
533 ? BUS_DMASYNC_PREWRITE
534 : BUS_DMASYNC_PREREAD);
535
536 /* success */
537 sr->sr_dma_hand = dh;
538
539 return;
540 }
541
542
543 void
544 si_dma_free(ncr_sc)
545 struct ncr5380_softc *ncr_sc;
546 {
547 struct si_softc *sc = (struct si_softc *)ncr_sc;
548 struct sci_req *sr = ncr_sc->sc_current;
549 struct si_dma_handle *dh = sr->sr_dma_hand;
550
551 #ifdef DIAGNOSTIC
552 if (dh == NULL)
553 panic("si_dma_free: no DMA handle");
554 #endif
555
556 if (ncr_sc->sc_state & NCR_DOINGDMA)
557 panic("si_dma_free: free while in progress");
558
559 if (dh->dh_flags & SIDH_BUSY) {
560 /* Give back the DVMA space. */
561 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
562 dh->dh_dvma, dh->dh_maplen,
563 (dh->dh_flags & SIDH_OUT)
564 ? BUS_DMASYNC_POSTWRITE
565 : BUS_DMASYNC_POSTREAD);
566 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
567 dh->dh_flags = 0;
568 }
569 sr->sr_dma_hand = NULL;
570 }
571
572
573 /*
574 * Poll (spin-wait) for DMA completion.
575 * Called right after xx_dma_start(), and
576 * xx_dma_stop() will be called next.
577 * Same for either VME or OBIO.
578 */
579 void
580 si_dma_poll(ncr_sc)
581 struct ncr5380_softc *ncr_sc;
582 {
583 struct sci_req *sr = ncr_sc->sc_current;
584 int tmo, csr_mask, csr;
585
586 /* Make sure DMA started successfully. */
587 if (ncr_sc->sc_state & NCR_ABORTING)
588 return;
589
590 csr_mask = SI_CSR_SBC_IP | SI_CSR_DMA_IP |
591 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR;
592
593 tmo = 50000; /* X100 = 5 sec. */
594 for (;;) {
595 csr = SIREG_READ(ncr_sc, SIREG_CSR);
596 if (csr & csr_mask)
597 break;
598 if (--tmo <= 0) {
599 printf("%s: DMA timeout (while polling)\n",
600 ncr_sc->sc_dev.dv_xname);
601 /* Indicate timeout as MI code would. */
602 sr->sr_flags |= SR_OVERDUE;
603 break;
604 }
605 delay(100);
606 }
607
608 #ifdef DEBUG
609 if (si_debug) {
610 printf("si_dma_poll: done, csr=0x%x\n", csr);
611 }
612 #endif
613 }
614
615
616 /*****************************************************************
617 * VME functions for DMA
618 ****************************************************************/
619
620
621 /*
622 * This is called when the bus is going idle,
623 * so we want to enable the SBC interrupts.
624 * That is controlled by the DMA enable!
625 * Who would have guessed!
626 * What a NASTY trick!
627 */
628 void
629 si_intr_on(ncr_sc)
630 struct ncr5380_softc *ncr_sc;
631 {
632 u_int16_t csr;
633
634 /* Clear DMA start address and counters */
635 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
636 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
637 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
638 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
639
640 /* Enter receive mode (for safety) and enable DMA engine */
641 csr = SIREG_READ(ncr_sc, SIREG_CSR);
642 csr &= ~SI_CSR_SEND;
643 csr |= SI_CSR_DMA_EN;
644 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
645 }
646
647 /*
648 * This is called when the bus is idle and we are
649 * about to start playing with the SBC chip.
650 */
651 void
652 si_intr_off(ncr_sc)
653 struct ncr5380_softc *ncr_sc;
654 {
655 u_int16_t csr;
656
657 csr = SIREG_READ(ncr_sc, SIREG_CSR);
658 csr &= ~SI_CSR_DMA_EN;
659 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
660 }
661
662 /*
663 * This function is called during the COMMAND or MSG_IN phase
664 * that precedes a DATA_IN or DATA_OUT phase, in case we need
665 * to setup the DMA engine before the bus enters a DATA phase.
666 *
667 * XXX: The VME adapter appears to suppress SBC interrupts
668 * when the FIFO is not empty or the FIFO count is non-zero!
669 *
670 * On the VME version we just clear the DMA count and address
671 * here (to make sure it stays idle) and do the real setup
672 * later, in dma_start.
673 */
674 void
675 si_dma_setup(ncr_sc)
676 struct ncr5380_softc *ncr_sc;
677 {
678 struct si_softc *sc = (struct si_softc *)ncr_sc;
679 struct sci_req *sr = ncr_sc->sc_current;
680 struct si_dma_handle *dh = sr->sr_dma_hand;
681 u_int16_t csr;
682 u_long dva;
683 int xlen;
684
685 /*
686 * Set up the DMA controller.
687 * Note that (dh->dh_len < sc_datalen)
688 */
689
690 csr = SIREG_READ(ncr_sc, SIREG_CSR);
691
692 /* Disable DMA while we're setting up the transfer */
693 csr &= ~SI_CSR_DMA_EN;
694
695 /* Reset the FIFO */
696 csr &= ~SI_CSR_FIFO_RES; /* active low */
697 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
698 csr |= SI_CSR_FIFO_RES;
699 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
700
701 /*
702 * Get the DVMA mapping for this segment.
703 */
704 dva = (u_long)(dh->dh_dvma);
705 if (dva & 1)
706 panic("si_dma_setup: bad dmaaddr=0x%lx", dva);
707 xlen = ncr_sc->sc_datalen;
708 xlen &= ~1;
709 sc->sc_xlen = xlen; /* XXX: or less... */
710
711 #ifdef DEBUG
712 if (si_debug & 2) {
713 printf("si_dma_start: dh=%p, dmaaddr=0x%lx, xlen=%d\n",
714 dh, dva, xlen);
715 }
716 #endif
717 /* Set direction (send/recv) */
718 if (dh->dh_flags & SIDH_OUT) {
719 csr |= SI_CSR_SEND;
720 } else {
721 csr &= ~SI_CSR_SEND;
722 }
723
724 /* Set byte-packing control */
725 if (dva & 2) {
726 csr |= SI_CSR_BPCON;
727 } else {
728 csr &= ~SI_CSR_BPCON;
729 }
730
731 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
732
733 /* Load start address */
734 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, (u_int16_t)(dva >> 16));
735 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, (u_int16_t)(dva & 0xFFFF));
736
737 /* Clear DMA counters; these will be set in si_dma_start() */
738 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
739 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
740
741 /* Clear FIFO counter. (also hits dma_count) */
742 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
743 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
744 }
745
746
747 void
748 si_dma_start(ncr_sc)
749 struct ncr5380_softc *ncr_sc;
750 {
751 struct si_softc *sc = (struct si_softc *)ncr_sc;
752 struct sci_req *sr = ncr_sc->sc_current;
753 struct si_dma_handle *dh = sr->sr_dma_hand;
754 int xlen;
755 u_int mode;
756 u_int16_t csr;
757
758 xlen = sc->sc_xlen;
759
760 /* Load transfer length */
761 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, (u_int16_t)(xlen >> 16));
762 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, (u_int16_t)(xlen & 0xFFFF));
763 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, (u_int16_t)(xlen >> 16));
764 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, (u_int16_t)(xlen & 0xFFFF));
765
766 /*
767 * Acknowledge the phase change. (After DMA setup!)
768 * Put the SBIC into DMA mode, and start the transfer.
769 */
770 if (dh->dh_flags & SIDH_OUT) {
771 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
772 SCI_CLR_INTR(ncr_sc);
773 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
774
775 mode = NCR5380_READ(ncr_sc, sci_mode);
776 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
777 NCR5380_WRITE(ncr_sc, sci_mode, mode);
778
779 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
780 } else {
781 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
782 SCI_CLR_INTR(ncr_sc);
783 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
784
785 mode = NCR5380_READ(ncr_sc, sci_mode);
786 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
787 NCR5380_WRITE(ncr_sc, sci_mode, mode);
788
789 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
790 }
791
792 ncr_sc->sc_state |= NCR_DOINGDMA;
793
794 /* Enable DMA engine */
795 csr = SIREG_READ(ncr_sc, SIREG_CSR);
796 csr |= SI_CSR_DMA_EN;
797 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
798
799 #ifdef DEBUG
800 if (si_debug & 2) {
801 printf("si_dma_start: started, flags=0x%x\n",
802 ncr_sc->sc_state);
803 }
804 #endif
805 }
806
807
808 void
809 si_dma_eop(ncr_sc)
810 struct ncr5380_softc *ncr_sc;
811 {
812
813 /* Not needed - DMA was stopped prior to examining sci_csr */
814 }
815
816
817 void
818 si_dma_stop(ncr_sc)
819 struct ncr5380_softc *ncr_sc;
820 {
821 struct si_softc *sc = (struct si_softc *)ncr_sc;
822 struct sci_req *sr = ncr_sc->sc_current;
823 struct si_dma_handle *dh = sr->sr_dma_hand;
824 int resid, ntrans;
825 u_int16_t csr;
826 u_int mode;
827
828 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
829 #ifdef DEBUG
830 printf("si_dma_stop: dma not running\n");
831 #endif
832 return;
833 }
834
835 ncr_sc->sc_state &= ~NCR_DOINGDMA;
836
837 csr = SIREG_READ(ncr_sc, SIREG_CSR);
838
839 /* First, halt the DMA engine. */
840 csr &= ~SI_CSR_DMA_EN;
841 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
842
843 if (csr & (SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR)) {
844 printf("si: DMA error, csr=0x%x, reset\n", csr);
845 sr->sr_xs->error = XS_DRIVER_STUFFUP;
846 ncr_sc->sc_state |= NCR_ABORTING;
847 si_reset_adapter(ncr_sc);
848 }
849
850 /* Note that timeout may have set the error flag. */
851 if (ncr_sc->sc_state & NCR_ABORTING)
852 goto out;
853
854 /*
855 * Now try to figure out how much actually transferred
856 *
857 * The fifo_count does not reflect how many bytes were
858 * actually transferred for VME.
859 *
860 * SCSI-3 VME interface is a little funny on writes:
861 * if we have a disconnect, the dma has overshot by
862 * one byte and the resid needs to be incremented.
863 * Only happens for partial transfers.
864 * (Thanks to Matt Jacob)
865 */
866
867 resid = SIREG_READ(ncr_sc, SIREG_FIFO_CNTH) << 16;
868 resid |= SIREG_READ(ncr_sc, SIREG_FIFO_CNT) & 0xFFFF;
869 if (dh->dh_flags & SIDH_OUT)
870 if ((resid > 0) && (resid < sc->sc_xlen))
871 resid++;
872 ntrans = sc->sc_xlen - resid;
873
874 #ifdef DEBUG
875 if (si_debug & 2) {
876 printf("si_dma_stop: resid=0x%x ntrans=0x%x\n",
877 resid, ntrans);
878 }
879 #endif
880
881 if (ntrans > ncr_sc->sc_datalen)
882 panic("si_dma_stop: excess transfer");
883
884 /* Adjust data pointer */
885 ncr_sc->sc_dataptr += ntrans;
886 ncr_sc->sc_datalen -= ntrans;
887
888 #ifdef DEBUG
889 if (si_debug & 2) {
890 printf("si_dma_stop: ntrans=0x%x\n", ntrans);
891 }
892 #endif
893
894 /*
895 * After a read, we may need to clean-up
896 * "Left-over bytes" (yuck!)
897 */
898 if (((dh->dh_flags & SIDH_OUT) == 0) &&
899 ((csr & SI_CSR_LOB) != 0))
900 {
901 char *cp = ncr_sc->sc_dataptr;
902 u_int16_t bprh, bprl;
903
904 bprh = SIREG_READ(ncr_sc, SIREG_BPRH);
905 bprl = SIREG_READ(ncr_sc, SIREG_BPRL);
906
907 #ifdef DEBUG
908 printf("si: got left-over bytes: bprh=%x, bprl=%x, csr=%x\n",
909 bprh, bprl, csr);
910 #endif
911
912 if (csr & SI_CSR_BPCON) {
913 /* have SI_CSR_BPCON */
914 cp[-1] = (bprl & 0xff00) >> 8;
915 } else {
916 switch (csr & SI_CSR_LOB) {
917 case SI_CSR_LOB_THREE:
918 cp[-3] = (bprh & 0xff00) >> 8;
919 cp[-2] = (bprh & 0x00ff);
920 cp[-1] = (bprl & 0xff00) >> 8;
921 break;
922 case SI_CSR_LOB_TWO:
923 cp[-2] = (bprh & 0xff00) >> 8;
924 cp[-1] = (bprh & 0x00ff);
925 break;
926 case SI_CSR_LOB_ONE:
927 cp[-1] = (bprh & 0xff00) >> 8;
928 break;
929 }
930 }
931 }
932
933 out:
934 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
935 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
936
937 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
938 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
939
940 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
941 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
942
943 mode = NCR5380_READ(ncr_sc, sci_mode);
944 /* Put SBIC back in PIO mode. */
945 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
946 NCR5380_WRITE(ncr_sc, sci_mode, mode);
947 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
948 }
949