si_sebuf.c revision 1.1 1 /* $NetBSD: si_sebuf.c,v 1.1 1997/10/17 03:39:46 gwr Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Sun3/E SCSI driver (machine-dependent portion).
41 * The machine-independent parts are in ncr5380sbc.c
42 *
43 * XXX - Mostly from the si driver. Merge?
44 */
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/errno.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/buf.h>
53 #include <sys/proc.h>
54 #include <sys/user.h>
55
56 #include <dev/scsipi/scsi_all.h>
57 #include <dev/scsipi/scsipi_all.h>
58 #include <dev/scsipi/scsipi_debug.h>
59 #include <dev/scsipi/scsiconf.h>
60
61 #include <machine/autoconf.h>
62
63 #define DEBUG XXX
64
65 #include <dev/ic/ncr5380reg.h>
66 #include <dev/ic/ncr5380var.h>
67
68 #include "sereg.h"
69 #include "sevar.h"
70
71 /*
72 * Transfers smaller than this are done using PIO
73 * (on assumption they're not worth DMA overhead)
74 */
75 #define MIN_DMA_LEN 128
76
77 /*
78 * Transfers lager than 65535 bytes need to be split-up.
79 * (Some of the FIFO logic has only 16 bits counters.)
80 * Make the size an integer multiple of the page size
81 * to avoid buf/cluster remap problems. (paranoid?)
82 */
83 #define MAX_DMA_LEN 0xE000
84
85 /*
86 * This structure is used to keep track of mapped DMA requests.
87 */
88 struct se_dma_handle {
89 int dh_flags;
90 #define SIDH_BUSY 1 /* This DH is in use */
91 #define SIDH_OUT 2 /* DMA does data out (write) */
92 u_char * dh_addr; /* KVA of start of buffer */
93 int dh_maplen; /* Length of KVA mapping. */
94 long dh_dma; /* Offset in DMA buffer. */
95 };
96
97 /*
98 * The first structure member has to be the ncr5380_softc
99 * so we can just cast to go back and fourth between them.
100 */
101 struct se_softc {
102 struct ncr5380_softc ncr_sc;
103 volatile struct se_regs *sc_regs;
104 int sc_adapter_type;
105 int sc_adapter_iv; /* int. vec */
106 int sc_options; /* options for this instance */
107 int sc_reqlen; /* requested transfer length */
108 struct se_dma_handle *sc_dma;
109 /* DMA command block for the OBIO controller. */
110 void *sc_dmacmd;
111 };
112
113 /* Options for disconnect/reselect, DMA, and interrupts. */
114 #define SE_NO_DISCONNECT 0xff
115 #define SE_NO_PARITY_CHK 0xff00
116 #define SE_FORCE_POLLING 0x10000
117 #define SE_DISABLE_DMA 0x20000
118
119 void se_dma_alloc __P((struct ncr5380_softc *));
120 void se_dma_free __P((struct ncr5380_softc *));
121 void se_dma_poll __P((struct ncr5380_softc *));
122
123 void se_dma_setup __P((struct ncr5380_softc *));
124 void se_dma_start __P((struct ncr5380_softc *));
125 void se_dma_eop __P((struct ncr5380_softc *));
126 void se_dma_stop __P((struct ncr5380_softc *));
127
128 void se_intr_on __P((struct ncr5380_softc *));
129 void se_intr_off __P((struct ncr5380_softc *));
130
131 static int se_intr __P((void *));
132 static void se_reset __P((struct ncr5380_softc *));
133
134 /*
135 * New-style autoconfig attachment
136 */
137
138 static int se_match __P((struct device *, struct cfdata *, void *));
139 static void se_attach __P((struct device *, struct device *, void *));
140
141 struct cfattach si_sebuf_ca = {
142 sizeof(struct se_softc), se_match, se_attach
143 };
144
145 static void se_minphys __P((struct buf *));
146 static struct scsipi_adapter se_ops = {
147 ncr5380_scsi_cmd, /* scsi_cmd() */
148 se_minphys, /* scsi_minphys() */
149 NULL, /* open_target_lu() */
150 NULL, /* close_target_lu() */
151 };
152
153 /* This is copied from julian's bt driver */
154 /* "so we have a default dev struct for our link struct." */
155 static struct scsipi_device se_dev = {
156 NULL, /* Use default error handler. */
157 NULL, /* Use default start handler. */
158 NULL, /* Use default async handler. */
159 NULL, /* Use default "done" routine. */
160 };
161
162 /* Options for disconnect/reselect, DMA, and interrupts. */
163 int se_options = 0;
164
165 /* How long to wait for DMA before declaring an error. */
166 int se_dma_intr_timo = 500; /* ticks (sec. X 100) */
167
168 int se_debug = 0;
169 #ifdef DEBUG
170 static int se_link_flags = 0 /* | SDEV_DB2 */ ;
171 #endif
172
173
174 static int
175 se_match(parent, cf, args)
176 struct device *parent;
177 struct cfdata *cf;
178 void *args;
179 {
180 struct sebuf_attach_args *aa = args;
181
182 /* Match by name. */
183 if (strcmp(aa->name, "se"))
184 return (0);
185
186 /* Force same unit number as parent. */
187 if (parent->dv_unit != cf->cf_unit)
188 return (0);
189
190 return (1);
191 }
192
193 static void
194 se_attach(parent, self, args)
195 struct device *parent, *self;
196 void *args;
197 {
198 struct se_softc *sc = (struct se_softc *) self;
199 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
200 struct cfdata *cf = self->dv_cfdata;
201 struct sebuf_attach_args *aa = args;
202 volatile struct se_regs *regs;
203 int i;
204
205 /* Get options from config flags if specified. */
206 if (cf->cf_flags)
207 sc->sc_options = cf->cf_flags;
208 else
209 sc->sc_options = se_options;
210
211 printf(": options=0x%x\n", sc->sc_options);
212
213 sc->sc_adapter_type = aa->ca.ca_bustype;
214 sc->sc_adapter_iv = aa->ca.ca_intvec;
215 sc->sc_regs = regs = aa->regs;
216
217 /*
218 * MD function pointers used by the MI code.
219 */
220 ncr_sc->sc_pio_out = ncr5380_pio_out;
221 ncr_sc->sc_pio_in = ncr5380_pio_in;
222
223 #if 0 /* XXX - not yet... */
224 ncr_sc->sc_dma_alloc = se_dma_alloc;
225 ncr_sc->sc_dma_free = se_dma_free;
226 ncr_sc->sc_dma_setup = se_dma_setup;
227 ncr_sc->sc_dma_start = se_dma_start;
228 ncr_sc->sc_dma_poll = se_dma_poll;
229 ncr_sc->sc_dma_eop = se_dma_eop;
230 ncr_sc->sc_dma_stop = se_dma_stop;
231 ncr_sc->sc_intr_on = se_intr_on;
232 ncr_sc->sc_intr_off = se_intr_off;
233 #endif /* XXX */
234
235 /* Attach interrupt handler. */
236 isr_add_vectored(se_intr, (void *)sc,
237 aa->ca.ca_intpri, aa->ca.ca_intvec);
238
239 /* Reset the hardware. */
240 se_reset(ncr_sc);
241
242 /* Do the common attach stuff. */
243
244 /*
245 * Support the "options" (config file flags).
246 * Disconnect/reselect is a per-target mask.
247 * Interrupts and DMA are per-controller.
248 */
249 ncr_sc->sc_no_disconnect =
250 (sc->sc_options & SE_NO_DISCONNECT);
251 ncr_sc->sc_parity_disable =
252 (sc->sc_options & SE_NO_PARITY_CHK) >> 8;
253 if (sc->sc_options & SE_FORCE_POLLING)
254 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
255
256 #if 1 /* XXX - Temporary */
257 /* XXX - In case we think DMA is completely broken... */
258 if (sc->sc_options & SE_DISABLE_DMA) {
259 /* Override this function pointer. */
260 ncr_sc->sc_dma_alloc = NULL;
261 }
262 #endif
263 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
264
265 /*
266 * Fill in the prototype scsi_link.
267 */
268 ncr_sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
269 ncr_sc->sc_link.adapter_softc = sc;
270 ncr_sc->sc_link.scsipi_scsi.adapter_target = 7;
271 ncr_sc->sc_link.adapter = &se_ops;
272 ncr_sc->sc_link.device = &se_dev;
273 ncr_sc->sc_link.type = BUS_SCSI;
274
275 #ifdef DEBUG
276 if (se_debug)
277 printf("se: Set TheSoftC=%p TheRegs=%p\n", sc, regs);
278 ncr_sc->sc_link.flags |= se_link_flags;
279 #endif
280
281 /*
282 * Initialize fields used by the MI code
283 */
284 ncr_sc->sci_r0 = ®s->ncrregs[0];
285 ncr_sc->sci_r1 = ®s->ncrregs[1];
286 ncr_sc->sci_r2 = ®s->ncrregs[2];
287 ncr_sc->sci_r3 = ®s->ncrregs[3];
288 ncr_sc->sci_r4 = ®s->ncrregs[4];
289 ncr_sc->sci_r5 = ®s->ncrregs[5];
290 ncr_sc->sci_r6 = ®s->ncrregs[6];
291 ncr_sc->sci_r7 = ®s->ncrregs[7];
292
293 /*
294 * Allocate DMA handles.
295 */
296 i = SCI_OPENINGS * sizeof(struct se_dma_handle);
297 sc->sc_dma = (struct se_dma_handle *)
298 malloc(i, M_DEVBUF, M_WAITOK);
299 if (sc->sc_dma == NULL)
300 panic("se: dma_malloc failed\n");
301 for (i = 0; i < SCI_OPENINGS; i++)
302 sc->sc_dma[i].dh_flags = 0;
303
304 /*
305 * Initialize se board itself.
306 */
307 ncr5380_init(ncr_sc);
308 ncr5380_reset_scsibus(ncr_sc);
309 config_found(&(ncr_sc->sc_dev), &(ncr_sc->sc_link), scsiprint);
310 }
311
312 static void
313 se_reset(struct ncr5380_softc *ncr_sc)
314 {
315 struct se_softc *sc = (struct se_softc *)ncr_sc;
316 volatile struct se_regs *se = sc->sc_regs;
317
318 #ifdef DEBUG
319 if (se_debug) {
320 printf("se_reset\n");
321 }
322 #endif
323
324 /* The reset bits in the CSR are active low. */
325 se->se_csr = 0;
326 delay(10);
327 se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
328 delay(10);
329
330 /* Make sure the DMA engine is stopped. */
331 se->dma_addr = 0;
332 se->dma_cntr = 0;
333 se->se_ivec = sc->sc_adapter_iv;
334 }
335
336 /*
337 * This is called when the bus is going idle,
338 * so we want to enable the SBC interrupts.
339 * That is controlled by the DMA enable!
340 * Who would have guessed!
341 * What a NASTY trick!
342 */
343 void
344 se_intr_on(ncr_sc)
345 struct ncr5380_softc *ncr_sc;
346 {
347 struct se_softc *sc = (struct se_softc *)ncr_sc;
348 volatile struct se_regs *se = sc->sc_regs;
349
350 /* receive mode should be safer */
351 se->se_csr &= ~SE_CSR_SEND;
352
353 /* Clear the count so nothing happens. */
354 se->dma_cntr = 0;
355
356 /* Clear the start address too. (paranoid?) */
357 se->dma_addr = 0;
358
359 /* Finally, enable the DMA engine. */
360 se->se_csr |= SE_CSR_INTR_EN;
361 }
362
363 /*
364 * This is called when the bus is idle and we are
365 * about to start playing with the SBC chip.
366 */
367 void
368 se_intr_off(ncr_sc)
369 struct ncr5380_softc *ncr_sc;
370 {
371 struct se_softc *sc = (struct se_softc *)ncr_sc;
372 volatile struct se_regs *se = sc->sc_regs;
373
374 se->se_csr &= ~SE_CSR_INTR_EN;
375 }
376
377 /*
378 * This function is called during the COMMAND or MSG_IN phase
379 * that preceeds a DATA_IN or DATA_OUT phase, in case we need
380 * to setup the DMA engine before the bus enters a DATA phase.
381 *
382 * On the VME version, setup the start addres, but clear the
383 * count (to make sure it stays idle) and set that later.
384 * XXX: The VME adapter appears to suppress SBC interrupts
385 * when the FIFO is not empty or the FIFO count is non-zero!
386 * XXX: Need to copy data into the DMA buffer...
387 */
388 void
389 se_dma_setup(ncr_sc)
390 struct ncr5380_softc *ncr_sc;
391 {
392 struct se_softc *sc = (struct se_softc *)ncr_sc;
393 struct sci_req *sr = ncr_sc->sc_current;
394 struct se_dma_handle *dh = sr->sr_dma_hand;
395 volatile struct se_regs *se = sc->sc_regs;
396 long data_pa;
397 int xlen;
398
399 /*
400 * Get the DMA mapping for this segment.
401 * XXX - Should separate allocation and mapin.
402 */
403 data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
404 data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
405 if (data_pa & 1)
406 panic("se_dma_start: bad pa=0x%lx", data_pa);
407 xlen = ncr_sc->sc_datalen;
408 xlen &= ~1; /* XXX: necessary? */
409 sc->sc_reqlen = xlen; /* XXX: or less? */
410
411 #ifdef DEBUG
412 if (se_debug & 2) {
413 printf("se_dma_setup: dh=%p, pa=0x%lx, xlen=0x%x\n",
414 dh, data_pa, xlen);
415 }
416 #endif
417
418 /* Set direction (send/recv) */
419 if (dh->dh_flags & SIDH_OUT) {
420 se->se_csr |= SE_CSR_SEND;
421 } else {
422 se->se_csr &= ~SE_CSR_SEND;
423 }
424
425 /* Load the start address. */
426 se->dma_addr = (ushort)(data_pa & 0xFFFF);
427
428 /*
429 * Keep the count zero or it may start early!
430 */
431 se->dma_cntr = 0;
432 }
433
434
435 void
436 se_dma_start(ncr_sc)
437 struct ncr5380_softc *ncr_sc;
438 {
439 struct se_softc *sc = (struct se_softc *)ncr_sc;
440 struct sci_req *sr = ncr_sc->sc_current;
441 struct se_dma_handle *dh = sr->sr_dma_hand;
442 volatile struct se_regs *se = sc->sc_regs;
443 int s, xlen;
444
445 xlen = sc->sc_reqlen;
446
447 /* This MAY be time critical (not sure). */
448 s = splhigh();
449
450 se->dma_cntr = (ushort)(xlen & 0xFFFF);
451
452 /*
453 * Acknowledge the phase change. (After DMA setup!)
454 * Put the SBIC into DMA mode, and start the transfer.
455 */
456 if (dh->dh_flags & SIDH_OUT) {
457 *ncr_sc->sci_tcmd = PHASE_DATA_OUT;
458 SCI_CLR_INTR(ncr_sc);
459 *ncr_sc->sci_icmd = SCI_ICMD_DATA;
460 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
461 *ncr_sc->sci_dma_send = 0; /* start it */
462 } else {
463 *ncr_sc->sci_tcmd = PHASE_DATA_IN;
464 SCI_CLR_INTR(ncr_sc);
465 *ncr_sc->sci_icmd = 0;
466 *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
467 *ncr_sc->sci_irecv = 0; /* start it */
468 }
469
470 /* Let'er rip! */
471 se->se_csr |= SE_CSR_INTR_EN;
472
473 splx(s);
474 ncr_sc->sc_state |= NCR_DOINGDMA;
475
476 #ifdef DEBUG
477 if (se_debug & 2) {
478 printf("se_dma_start: started, flags=0x%x\n",
479 ncr_sc->sc_state);
480 }
481 #endif
482 }
483
484
485 void
486 se_dma_eop(ncr_sc)
487 struct ncr5380_softc *ncr_sc;
488 {
489
490 /* Not needed - DMA was stopped prior to examining sci_csr */
491 }
492
493
494 void
495 se_dma_stop(ncr_sc)
496 struct ncr5380_softc *ncr_sc;
497 {
498 struct se_softc *sc = (struct se_softc *)ncr_sc;
499 struct sci_req *sr = ncr_sc->sc_current;
500 struct se_dma_handle *dh = sr->sr_dma_hand;
501 volatile struct se_regs *se = sc->sc_regs;
502 int resid, ntrans;
503
504 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
505 #ifdef DEBUG
506 printf("se_dma_stop: dma not running\n");
507 #endif
508 return;
509 }
510 ncr_sc->sc_state &= ~NCR_DOINGDMA;
511
512 /* First, halt the DMA engine. */
513 se->se_csr &= ~SE_CSR_INTR_EN; /* VME only */
514
515 /* Set an impossible phase to prevent data movement? */
516 *ncr_sc->sci_tcmd = PHASE_INVALID;
517
518 /* Note that timeout may have set the error flag. */
519 if (ncr_sc->sc_state & NCR_ABORTING)
520 goto out;
521
522 /* XXX: Wait for DMA to actually finish? */
523
524 /*
525 * Now try to figure out how much actually transferred
526 */
527 resid = se->dma_cntr & 0xFFFF;
528 if (dh->dh_flags & SIDH_OUT)
529 if ((resid > 0) && (resid < sc->sc_reqlen))
530 resid++;
531 ntrans = sc->sc_reqlen - resid;
532
533 #ifdef DEBUG
534 if (se_debug & 2) {
535 printf("se_dma_stop: resid=0x%x ntrans=0x%x\n",
536 resid, ntrans);
537 }
538 #endif
539
540 if (ntrans < MIN_DMA_LEN) {
541 printf("se: fifo count: 0x%x\n", resid);
542 ncr_sc->sc_state |= NCR_ABORTING;
543 goto out;
544 }
545 if (ntrans > ncr_sc->sc_datalen)
546 panic("se_dma_stop: excess transfer");
547
548 /* Adjust data pointer */
549 ncr_sc->sc_dataptr += ntrans;
550 ncr_sc->sc_datalen -= ntrans;
551
552 out:
553 se->dma_addr = 0;
554 se->dma_cntr = 0;
555
556 /* Put SBIC back in PIO mode. */
557 *ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
558 *ncr_sc->sci_icmd = 0;
559 }
560
561 /*****************************************************************/
562
563 static void
564 se_minphys(struct buf *bp)
565 {
566 if (bp->b_bcount > MAX_DMA_LEN) {
567 #ifdef DEBUG
568 if (se_debug) {
569 printf("se_minphys len = 0x%x.\n", bp->b_bcount);
570 Debugger();
571 }
572 #endif
573 bp->b_bcount = MAX_DMA_LEN;
574 }
575 return (minphys(bp));
576 }
577
578
579 int
580 se_intr(void *arg)
581 {
582 struct se_softc *sc = arg;
583 volatile struct se_regs *se = sc->sc_regs;
584 int dma_error, claimed;
585 u_short csr;
586
587 claimed = 0;
588 dma_error = 0;
589
590 /* SBC interrupt? DMA interrupt? */
591 csr = se->se_csr;
592 NCR_TRACE("se_intr: csr=0x%x\n", csr);
593
594 if (csr & SE_CSR_SBC_IP) {
595 claimed = ncr5380_intr(&sc->ncr_sc);
596 #ifdef DEBUG
597 if (!claimed) {
598 printf("se_intr: spurious from SBC\n");
599 if (se_debug & 4) {
600 Debugger(); /* XXX */
601 }
602 }
603 #endif
604 /* Yes, we DID cause this interrupt. */
605 claimed = 1;
606 }
607
608 return (claimed);
609 }
610
611
612 /*****************************************************************
613 * Common functions for DMA
614 ****************************************************************/
615
616 /*
617 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
618 * for DMA transfer. On the Sun3/E, this means we have to
619 * allocate space in the DMA buffer for this transfer.
620 */
621 void
622 se_dma_alloc(ncr_sc)
623 struct ncr5380_softc *ncr_sc;
624 {
625 struct se_softc *sc = (struct se_softc *)ncr_sc;
626 struct sci_req *sr = ncr_sc->sc_current;
627 struct scsipi_xfer *xs = sr->sr_xs;
628 struct se_dma_handle *dh;
629 int i, xlen;
630 u_long addr;
631
632 #ifdef DIAGNOSTIC
633 if (sr->sr_dma_hand != NULL)
634 panic("se_dma_alloc: already have DMA handle");
635 #endif
636
637 addr = (u_long) ncr_sc->sc_dataptr;
638 xlen = ncr_sc->sc_datalen;
639
640 /* If the DMA start addr is misaligned then do PIO */
641 if ((addr & 1) || (xlen & 1)) {
642 printf("se_dma_alloc: misaligned.\n");
643 return;
644 }
645
646 /* Make sure our caller checked sc_min_dma_len. */
647 if (xlen < MIN_DMA_LEN)
648 panic("se_dma_alloc: xlen=0x%x\n", xlen);
649
650 /*
651 * Never attempt single transfers of more than 63k, because
652 * our count register may be only 16 bits (an OBIO adapter).
653 * This should never happen since already bounded by minphys().
654 * XXX - Should just segment these...
655 */
656 if (xlen > MAX_DMA_LEN) {
657 printf("se_dma_alloc: excessive xlen=0x%x\n", xlen);
658 Debugger();
659 ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
660 }
661
662 /* Find free DMA handle. Guaranteed to find one since we have
663 as many DMA handles as the driver has processes. */
664 for (i = 0; i < SCI_OPENINGS; i++) {
665 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
666 goto found;
667 }
668 panic("se: no free DMA handles.");
669 found:
670
671 dh = &sc->sc_dma[i];
672 dh->dh_flags = SIDH_BUSY;
673
674 /* Copy the "write" flag for convenience. */
675 if (xs->flags & SCSI_DATA_OUT)
676 dh->dh_flags |= SIDH_OUT;
677
678 dh->dh_addr = (u_char*) addr;
679 dh->dh_maplen = xlen;
680 dh->dh_dma = 0; /* XXX - Allocate space in DMA buffer. */
681 /* XXX: dh->dh_dma = alloc(xlen) */
682 if (!dh->dh_dma) {
683 /* Can't remap segment */
684 printf("se_dma_alloc: can't remap %p/0x%x\n",
685 dh->dh_addr, dh->dh_maplen);
686 dh->dh_flags = 0;
687 return;
688 }
689
690 /* success */
691 sr->sr_dma_hand = dh;
692
693 return;
694 }
695
696
697 void
698 se_dma_free(ncr_sc)
699 struct ncr5380_softc *ncr_sc;
700 {
701 struct sci_req *sr = ncr_sc->sc_current;
702 struct se_dma_handle *dh = sr->sr_dma_hand;
703
704 #ifdef DIAGNOSTIC
705 if (dh == NULL)
706 panic("se_dma_free: no DMA handle");
707 #endif
708
709 if (ncr_sc->sc_state & NCR_DOINGDMA)
710 panic("se_dma_free: free while in progress");
711
712 if (dh->dh_flags & SIDH_BUSY) {
713 /* XXX: Should separate allocation and mapping. */
714 /* XXX: Give back the DMA space. */
715 /* XXX: free((caddr_t)dh->dh_dma, dh->dh_maplen); */
716 dh->dh_dma = 0;
717 dh->dh_flags = 0;
718 }
719 sr->sr_dma_hand = NULL;
720 }
721
722
723 #define CSR_MASK SE_CSR_SBC_IP
724 #define POLL_TIMO 50000 /* X100 = 5 sec. */
725
726 /*
727 * Poll (spin-wait) for DMA completion.
728 * Called right after xx_dma_start(), and
729 * xx_dma_stop() will be called next.
730 * Same for either VME or OBIO.
731 */
732 void
733 se_dma_poll(ncr_sc)
734 struct ncr5380_softc *ncr_sc;
735 {
736 struct se_softc *sc = (struct se_softc *)ncr_sc;
737 struct sci_req *sr = ncr_sc->sc_current;
738 volatile struct se_regs *se = sc->sc_regs;
739 int tmo;
740
741 /* Make sure DMA started successfully. */
742 if (ncr_sc->sc_state & NCR_ABORTING)
743 return;
744
745 /*
746 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
747 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
748 * XXX: I really doubt that is necessary...
749 */
750
751 /* Wait for any "dma complete" or error bits. */
752 tmo = POLL_TIMO;
753 for (;;) {
754 if (se->se_csr & CSR_MASK)
755 break;
756 if (--tmo <= 0) {
757 printf("se: DMA timeout (while polling)\n");
758 /* Indicate timeout as MI code would. */
759 sr->sr_flags |= SR_OVERDUE;
760 break;
761 }
762 delay(100);
763 }
764 NCR_TRACE("se_dma_poll: waited %d\n",
765 POLL_TIMO - tmo);
766
767 #ifdef DEBUG
768 if (se_debug & 2) {
769 printf("se_dma_poll: done, csr=0x%x\n", se->se_csr);
770 }
771 #endif
772 }
773
774