vme_machdep.c revision 1.5 1 /* $NetBSD: vme_machdep.c,v 1.5 1998/02/06 00:24:42 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43
44 #include <sys/proc.h>
45 #include <sys/user.h>
46 #include <sys/syslog.h>
47
48 #include <vm/vm.h>
49
50 #define _SPARC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 #include <machine/autoconf.h>
53 #include <machine/pmap.h>
54 #include <machine/oldmon.h>
55 #include <machine/cpu.h>
56 #include <machine/ctlreg.h>
57
58 #include <dev/vme/vmevar.h>
59
60 #include <sparc/sparc/asm.h>
61 #include <sparc/sparc/vaddrs.h>
62 #include <sparc/sparc/cpuvar.h>
63 #include <sparc/dev/vmereg.h>
64
65 struct vmebus_softc {
66 struct device sc_dev; /* base device */
67 struct vmebusreg *sc_reg; /* VME control registers */
68 struct vmebusvec *sc_vec; /* VME interrupt vector */
69 struct rom_range *sc_range; /* ROM range property */
70 int sc_nrange;
71 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */
72 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */
73 int (*sc_vmeintr) __P((void *));
74 struct bootpath *sc_bp;
75 };
76 struct vmebus_softc *vmebus_sc;/*XXX*/
77
78 /* autoconfiguration driver */
79 static int vmematch __P((struct device *, struct cfdata *, void *));
80 static void vmeattach __P((struct device *, struct device *, void *));
81 #if defined(SUN4)
82 static void vmeattach4 __P((struct device *, struct device *, void *));
83 int vmeintr4 __P((void *));
84 #endif
85 #if defined(SUN4M)
86 static void vmeattach4m __P((struct device *, struct device *, void *));
87 int vmeintr4m __P((void *));
88 #endif
89
90
91 static int sparc_vme_probe __P((void *, bus_space_tag_t, vme_addr_t,
92 size_t, vme_size_t, vme_mod_t,
93 int (*) __P((void *, void *)), void *));
94 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_mod_t,
95 bus_space_tag_t, bus_space_handle_t *));
96 static void sparc_vme_unmap __P((void *));
97 static int sparc_vme_mmap_cookie __P((void *, vme_addr_t, vme_mod_t,
98 bus_space_tag_t, int *));
99 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *));
100 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t,
101 int (*) __P((void *)), void *));
102 static void sparc_vme_intr_disestablish __P((void *, void *));
103
104 static void vmebus_translate __P((struct vmebus_softc *, vme_mod_t,
105 struct rom_reg *));
106 static void sparc_vme_bus_establish __P((void *, struct device *));
107 #if defined(SUN4M)
108 static void sparc_vme4m_barrier __P((void *));
109 #endif
110
111 /*
112 * DMA functions.
113 */
114 #if defined(SUN4)
115 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
116 bus_size_t, struct proc *, int));
117 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
118 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
119 bus_addr_t, bus_size_t, int));
120
121 static int sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
122 bus_size_t, bus_size_t, bus_dma_segment_t *,
123 int, int *, int));
124 static void sparc_vme4_dmamem_free __P((bus_dma_tag_t,
125 bus_dma_segment_t *, int));
126 #endif
127
128 #if defined(SUN4M)
129 static int sparc_vme4m_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
130 bus_size_t, bus_size_t, int, bus_dmamap_t *));
131
132 static int sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
133 bus_size_t, struct proc *, int));
134 static void sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
135 static void sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
136 bus_addr_t, bus_size_t, int));
137
138 static int sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
139 bus_size_t, bus_size_t, bus_dma_segment_t *,
140 int, int *, int));
141 static void sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
142 bus_dma_segment_t *, int));
143 #endif
144
145 #if 0
146 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
147 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
148 int, size_t, caddr_t *, int));
149 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t));
150 static int sparc_vme_dmamem_mmap __P((bus_dma_tag_t,
151 bus_dma_segment_t *, int, int, int, int));
152 #endif
153
154 struct cfattach vme_ca = {
155 sizeof(struct vmebus_softc), vmematch, vmeattach
156 };
157
158 struct sparc_bus_space_tag sparc_vme_bus_tag = {
159 NULL, /* cookie */
160 NULL, /* bus_map */
161 NULL, /* bus_unmap */
162 NULL, /* bus_subregion */
163 NULL /* barrier */
164 };
165
166 struct vme_chipset_tag sparc_vme_chipset_tag = {
167 NULL,
168 sparc_vme_probe,
169 sparc_vme_map,
170 sparc_vme_unmap,
171 sparc_vme_mmap_cookie,
172 sparc_vme_intr_map,
173 sparc_vme_intr_establish,
174 sparc_vme_intr_disestablish,
175 sparc_vme_bus_establish
176 };
177
178
179 #if defined(SUN4)
180 struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
181 NULL, /* cookie */
182 _bus_dmamap_create,
183 _bus_dmamap_destroy,
184 sparc_vme4_dmamap_load,
185 _bus_dmamap_load_mbuf,
186 _bus_dmamap_load_uio,
187 _bus_dmamap_load_raw,
188 sparc_vme4_dmamap_unload,
189 sparc_vme4_dmamap_sync,
190
191 sparc_vme4_dmamem_alloc,
192 sparc_vme4_dmamem_free,
193 _bus_dmamem_map,
194 _bus_dmamem_unmap,
195 _bus_dmamem_mmap
196 };
197 #endif
198
199 #if defined(SUN4M)
200 struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
201 NULL, /* cookie */
202 sparc_vme4m_dmamap_create,
203 _bus_dmamap_destroy,
204 sparc_vme4m_dmamap_load,
205 _bus_dmamap_load_mbuf,
206 _bus_dmamap_load_uio,
207 _bus_dmamap_load_raw,
208 sparc_vme4m_dmamap_unload,
209 sparc_vme4m_dmamap_sync,
210
211 sparc_vme4m_dmamem_alloc,
212 sparc_vme4m_dmamem_free,
213 _bus_dmamem_map,
214 _bus_dmamem_unmap,
215 _bus_dmamem_mmap
216 };
217 #endif
218
219
220 void
221 sparc_vme_bus_establish(cookie, dev)
222 void *cookie;
223 struct device *dev;
224 {
225 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
226 struct bootpath *bp = sc->sc_bp;
227 char *name;
228
229 name = dev->dv_cfdata->cf_driver->cd_name;
230 #ifdef DEBUG
231 printf("sparc_vme_bus_establish: %s%d\n", name, dev->dv_unit);
232 #endif
233 if (bp != NULL && strcmp(bp->name, name) == 0 &&
234 dev->dv_unit == bp->val[1]) {
235 bp->dev = dev;
236 #ifdef DEBUG
237 printf("sparc_vme_bus_establish: on the boot path\n");
238 #endif
239 sc->sc_bp++;
240 bootpath_store(1, sc->sc_bp);
241 }
242 }
243
244
245 int
246 vmematch(parent, cf, aux)
247 struct device *parent;
248 struct cfdata *cf;
249 void *aux;
250 {
251 register struct confargs *ca = aux;
252 register struct romaux *ra = &ca->ca_ra;
253
254 if (CPU_ISSUN4C)
255 return (0);
256
257 return (strcmp(cf->cf_driver->cd_name, ra->ra_name) == 0);
258 }
259
260 void
261 vmeattach(parent, self, aux)
262 struct device *parent, *self;
263 void *aux;
264 {
265 struct vmebus_softc *sc = (struct vmebus_softc *)self;
266 struct confargs *ca = aux;
267 register struct romaux *ra = &ca->ca_ra;
268
269 if (ra->ra_bp != NULL && strcmp(ra->ra_bp->name, "vme") == 0) {
270 sc->sc_bp = ra->ra_bp + 1;
271 bootpath_store(1, sc->sc_bp);
272 }
273
274 #if defined(SUN4)
275 if (CPU_ISSUN4)
276 vmeattach4(parent, self, aux);
277 #endif
278
279 #if defined(SUN4M)
280 if (CPU_ISSUN4M)
281 vmeattach4m(parent, self, aux);
282 #endif
283
284 bootpath_store(1, NULL);
285 }
286
287 #if defined(SUN4)
288 void
289 vmeattach4(parent, self, aux)
290 struct device *parent, *self;
291 void *aux;
292 {
293 struct vmebus_softc *sc = (struct vmebus_softc *)self;
294 struct vme_busattach_args vba;
295
296 if (self->dv_unit > 0) {
297 printf(" unsupported\n");
298 return;
299 }
300
301 /* VME interrupt entry point */
302 sc->sc_vmeintr = vmeintr4;
303
304 /*XXX*/ sparc_vme_chipset_tag.cookie = self;
305 /*XXX*/ sparc_vme4_dma_tag._cookie = self;
306
307 vba.vba_bustag = &sparc_vme_bus_tag;
308 vba.vba_chipset_tag = &sparc_vme_chipset_tag;
309 vba.vba_dmatag = &sparc_vme4_dma_tag;
310
311 printf("\n");
312 (void)config_search(vmesearch, self, &vba);
313 return;
314 }
315 #endif
316
317 #if defined(SUN4M)
318 /* sun4m vmebus */
319 void
320 vmeattach4m(parent, self, aux)
321 struct device *parent, *self;
322 void *aux;
323 {
324 struct vmebus_softc *sc = (struct vmebus_softc *)self;
325 struct confargs *ca = aux;
326 register struct romaux *ra = &ca->ca_ra;
327 int node, rlen;
328 struct vme_busattach_args vba;
329 int cline;
330
331 if (self->dv_unit > 0) {
332 printf(" unsupported\n");
333 return;
334 }
335
336 /* VME interrupt entry point */
337 sc->sc_vmeintr = vmeintr4m;
338
339 /*XXX*/ sparc_vme_chipset_tag.cookie = self;
340 /*XXX*/ sparc_vme4m_dma_tag._cookie = self;
341 sparc_vme_bus_tag.sparc_barrier = sparc_vme4m_barrier;
342
343 vba.vba_bustag = &sparc_vme_bus_tag;
344 vba.vba_chipset_tag = &sparc_vme_chipset_tag;
345 vba.vba_dmatag = &sparc_vme4m_dma_tag;
346
347 node = ra->ra_node;
348
349 /* Map VME control space */
350 sc->sc_reg = (struct vmebusreg *)
351 mapdev(&ra->ra_reg[0], 0, 0, ra->ra_reg[0].rr_len);
352 sc->sc_vec = (struct vmebusvec *)
353 mapdev(&ra->ra_reg[1], 0, 0, ra->ra_reg[1].rr_len);
354 sc->sc_ioctags = (u_int32_t *)
355 mapdev(&ra->ra_reg[1], 0, VME_IOC_TAGOFFSET, VME_IOC_SIZE);
356 sc->sc_iocflush = (u_int32_t *)
357 mapdev(&ra->ra_reg[1], 0, VME_IOC_FLUSHOFFSET, VME_IOC_SIZE);
358
359 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg;
360
361 /*
362 * Get "range" property.
363 */
364 rlen = getproplen(node, "ranges");
365 if (rlen > 0) {
366 sc->sc_nrange = rlen / sizeof(struct rom_range);
367 sc->sc_range =
368 (struct rom_range *)malloc(rlen, M_DEVBUF, M_NOWAIT);
369 if (sc->sc_range == 0)
370 panic("vme: PROM ranges too large: %d", rlen);
371 (void)getprop(node, "ranges", sc->sc_range, rlen);
372 }
373
374 vmebus_sc = sc;
375
376 /*
377 * Invalidate all IO-cache entries.
378 */
379 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
380 sc->sc_ioctags[--cline] = 0;
381 }
382
383 /* Enable IO-cache */
384 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
385
386 printf(": version 0x%x\n",
387 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
388
389 (void)config_search(vmesearch, self, &vba);
390 }
391 #endif
392
393 void sparc_vme_async_fault __P((void));
394 void
395 sparc_vme_async_fault()
396 {
397 struct vmebus_softc *sc = vmebus_sc;
398 u_int32_t addr;
399
400 addr = sc->sc_reg->vmebus_afar;
401 printf("vme afsr: %x; addr %x\n", sc->sc_reg->vmebus_afsr, addr);
402 }
403
404 int
405 sparc_vme_probe(cookie, tag, addr, offset, size, mod, callback, arg)
406 void *cookie;
407 bus_space_tag_t tag;
408 vme_addr_t addr;
409 size_t offset;
410 vme_size_t size;
411 int mod;
412 int (*callback) __P((void *, void *));
413 void *arg;
414 {
415 struct rom_reg reg;
416 caddr_t tmp;
417 int result;
418 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
419
420 /* XXX - Use bus_space_[un]map() etc. */
421 reg.rr_paddr = (void *)addr;
422 vmebus_translate(sc, mod, ®);
423 tmp = (caddr_t)mapdev(®, TMPMAP_VA, 0, NBPG);
424 result = probeget(tmp + offset, size) != -1;
425 if (result && callback != NULL)
426 result = (*callback)(tmp, arg);
427 pmap_remove(pmap_kernel(), TMPMAP_VA, TMPMAP_VA+NBPG);
428 return (result);
429 }
430
431 int
432 sparc_vme_map(cookie, addr, size, mod, tag, handlep)
433 void *cookie;
434 vme_addr_t addr;
435 vme_size_t size;
436 int mod;
437 bus_space_tag_t tag;
438 bus_space_handle_t *handlep;
439 {
440 struct rom_reg reg;
441 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
442
443 reg.rr_paddr = (void *)addr;
444 vmebus_translate(sc, mod, ®);
445 *handlep = (bus_space_handle_t)mapdev(®, 0, 0, size);
446 return (0);
447 }
448
449 int
450 sparc_vme_mmap_cookie(cookie, addr, mod, tag, handlep)
451 void *cookie;
452 vme_addr_t addr;
453 int mod;
454 bus_space_tag_t tag;
455 int *handlep;
456 {
457 struct rom_reg reg;
458 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
459
460 reg.rr_paddr = (void *)addr;
461 vmebus_translate(sc, mod, ®);
462 *handlep = (int)reg.rr_paddr | PMAP_IOENC(reg.rr_iospace) | PMAP_NC;
463 return (0);
464 }
465
466 void
467 vmebus_translate(sc, mod, rr)
468 struct vmebus_softc *sc;
469 vme_mod_t mod;
470 struct rom_reg *rr;
471 {
472 register int j;
473
474 if (CPU_ISSUN4) {
475 (int)rr->rr_iospace = (mod & VMEMOD_D32)
476 ? PMAP_VME32
477 : PMAP_VME16;
478
479 switch (mod & ~VMEMOD_D32) {
480 case VMEMOD_A16|VMEMOD_D|VMEMOD_S:
481 rr->rr_paddr += 0xffff0000;
482 break;
483 case VMEMOD_A24|VMEMOD_D|VMEMOD_S:
484 rr->rr_paddr += 0xff000000;
485 break;
486 case VMEMOD_A32|VMEMOD_D|VMEMOD_S:
487 break;
488 default:
489 panic("vmebus_translate: unsupported VME modifier: %x",
490 mod);
491 }
492 return;
493 }
494
495
496 /* sun4m VME node: translate through "ranges" property */
497 if (sc->sc_nrange == 0)
498 panic("vmebus: no ranges");
499
500 /* Translate into parent address spaces */
501 for (j = 0; j < sc->sc_nrange; j++) {
502 if (sc->sc_range[j].cspace == mod) {
503 (int)rr->rr_paddr +=
504 sc->sc_range[j].poffset;
505 (int)rr->rr_iospace =
506 sc->sc_range[j].pspace;
507 return;
508 }
509 }
510 panic("sparc_vme_translate: modifier %x not supported", mod);
511 }
512
513 #if defined(SUN4M)
514 void
515 sparc_vme4m_barrier(cookie)
516 void *cookie;
517 {
518 struct vmebusreg *vbp = (struct vmebusreg *)cookie;
519
520 /* Read async fault status to flush write-buffers */
521 (*(volatile int *)&vbp->vmebus_afsr);
522 }
523 #endif
524
525
526
527 /*
528 * VME Interrupt Priority Level to sparc Processor Interrupt Level.
529 */
530 static int vme_ipl_to_pil[] = {
531 0,
532 2,
533 3,
534 5,
535 7,
536 9,
537 11,
538 13
539 };
540
541
542 /*
543 * All VME device interrupts go through vmeintr(). This function reads
544 * the VME vector from the bus, then dispatches the device interrupt
545 * handler. All handlers for devices that map to the same Processor
546 * Interrupt Level (according to the table above) are on a linked list
547 * of `sparc_vme_intr_handle' structures. The head of which is passed
548 * down as the argument to `vmeintr(void *arg)'.
549 */
550 struct sparc_vme_intr_handle {
551 struct intrhand ih;
552 struct sparc_vme_intr_handle *next;
553 int vec; /* VME interrupt vector */
554 int pri; /* VME interrupt priority */
555 struct vmebus_softc *sc;/*XXX*/
556 };
557
558 #if defined(SUN4)
559 int
560 vmeintr4(arg)
561 void *arg;
562 {
563 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
564 int level, vec;
565 int i = 0;
566
567 level = (ihp->pri << 1) | 1;
568
569 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
570
571 if (vec == -1) {
572 printf("vme: spurious interrupt\n");
573 return 1; /* XXX - pretend we handled it, for now */
574 }
575
576 for (; ihp; ihp = ihp->next)
577 if (ihp->vec == vec && ihp->ih.ih_fun)
578 i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
579 return (i);
580 }
581 #endif
582
583 #if defined(SUN4M)
584 int
585 vmeintr4m(arg)
586 void *arg;
587 {
588 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
589 int level, vec;
590 int i = 0;
591
592 level = (ihp->pri << 1) | 1;
593
594 #if 0
595 int pending;
596
597 /* Flush VME <=> Sbus write buffers */
598 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
599
600 pending = *((int*)ICR_SI_PEND);
601 if ((pending & SINTR_VME(ihp->pri)) == 0) {
602 printf("vmeintr: non pending at pri %x(p 0x%x)\n",
603 ihp->pri, pending);
604 return (0);
605 }
606 #endif
607 #if 0
608 /* Why gives this a bus timeout sometimes? */
609 vec = ihp->sc->sc_vec->vmebusvec[level];
610 #else
611 /* so, arrange to catch the fault... */
612 {
613 extern struct user *proc0paddr;
614 extern int fkbyte __P((caddr_t, struct pcb *));
615 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level];
616 struct pcb *xpcb;
617 u_long saveonfault;
618 int s;
619
620 s = splhigh();
621 if (curproc == NULL)
622 xpcb = (struct pcb *)proc0paddr;
623 else
624 xpcb = &curproc->p_addr->u_pcb;
625
626 saveonfault = (u_long)xpcb->pcb_onfault;
627 vec = fkbyte(addr, xpcb);
628 xpcb->pcb_onfault = (caddr_t)saveonfault;
629
630 splx(s);
631 }
632 #endif
633
634 if (vec == -1) {
635 printf("vme: spurious interrupt: ");
636 printf("SI: 0x%x, VME AFSR: 0x%x, VME AFAR 0x%x\n",
637 *((int*)ICR_SI_PEND),
638 ihp->sc->sc_reg->vmebus_afsr,
639 ihp->sc->sc_reg->vmebus_afar);
640 return 1; /* XXX - pretend we handled it, for now */
641 }
642
643 for (; ihp; ihp = ihp->next)
644 if (ihp->vec == vec && ihp->ih.ih_fun)
645 i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
646 return (i);
647 }
648 #endif
649
650 int
651 sparc_vme_intr_map(cookie, vec, pri, ihp)
652 void *cookie;
653 int vec;
654 int pri;
655 vme_intr_handle_t *ihp;
656 {
657 struct sparc_vme_intr_handle *ih;
658
659 ih = (vme_intr_handle_t)
660 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
661 ih->pri = pri;
662 ih->vec = vec;
663 ih->sc = cookie;/*XXX*/
664 *ihp = ih;
665 return (0);
666 }
667
668 void *
669 sparc_vme_intr_establish(cookie, vih, func, arg)
670 void *cookie;
671 vme_intr_handle_t vih;
672 int (*func) __P((void *));
673 void *arg;
674 {
675 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
676 struct sparc_vme_intr_handle *svih =
677 (struct sparc_vme_intr_handle *)vih;
678 struct intrhand *ih;
679 int level;
680
681 /* Translate VME priority to processor IPL */
682 level = vme_ipl_to_pil[svih->pri];
683
684 svih->ih.ih_fun = func;
685 svih->ih.ih_arg = arg;
686 svih->next = NULL;
687
688 /* ensure the interrupt subsystem will call us at this level */
689 for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next)
690 if (ih->ih_fun == sc->sc_vmeintr)
691 break;
692
693 if (ih == NULL) {
694 ih = (struct intrhand *)
695 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
696 if (ih == NULL)
697 panic("vme_addirq");
698 bzero(ih, sizeof *ih);
699 ih->ih_fun = sc->sc_vmeintr;
700 ih->ih_arg = vih;
701 intr_establish(level, ih);
702 } else {
703 svih->next = (vme_intr_handle_t)ih->ih_arg;
704 ih->ih_arg = vih;
705 }
706 return (NULL);
707 }
708
709 void
710 sparc_vme_unmap(cookie)
711 void * cookie;
712 {
713 /* Not implemented */
714 panic("sparc_vme_unmap");
715 }
716
717 void
718 sparc_vme_intr_disestablish(cookie, a)
719 void *cookie;
720 void *a;
721 {
722 /* Not implemented */
723 panic("sparc_vme_intr_disestablish");
724 }
725
726
727
728 /*
729 * VME DMA functions.
730 */
731
732 #if defined(SUN4)
733 int
734 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
735 bus_dma_tag_t t;
736 bus_dmamap_t map;
737 void *buf;
738 bus_size_t buflen;
739 struct proc *p;
740 int flags;
741 {
742 int error;
743
744 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
745 if (error != 0)
746 return (error);
747
748 /* Adjust DVMA address to VME view */
749 map->dm_segs[0].ds_addr -= DVMA_BASE;
750 return (0);
751 }
752
753 void
754 sparc_vme4_dmamap_unload(t, map)
755 bus_dma_tag_t t;
756 bus_dmamap_t map;
757 {
758 map->dm_segs[0].ds_addr += DVMA_BASE;
759 _bus_dmamap_unload(t, map);
760 }
761
762 int
763 sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
764 bus_dma_tag_t t;
765 bus_size_t size, alignment, boundary;
766 bus_dma_segment_t *segs;
767 int nsegs;
768 int *rsegs;
769 int flags;
770 {
771 int error;
772
773 error = _bus_dmamem_alloc(t, size, alignment, boundary,
774 segs, nsegs, rsegs, flags);
775 if (error != 0)
776 return (error);
777
778 segs[0].ds_addr -= DVMA_BASE;
779 return (0);
780 }
781
782 void
783 sparc_vme4_dmamem_free(t, segs, nsegs)
784 bus_dma_tag_t t;
785 bus_dma_segment_t *segs;
786 int nsegs;
787 {
788 segs[0].ds_addr += DVMA_BASE;
789 _bus_dmamem_free(t, segs, nsegs);
790 }
791
792 void
793 sparc_vme4_dmamap_sync(t, map, offset, len, ops)
794 bus_dma_tag_t t;
795 bus_dmamap_t map;
796 bus_addr_t offset;
797 bus_size_t len;
798 int ops;
799 {
800
801 /*
802 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
803 */
804 }
805 #endif /* SUN4 */
806
807 #if defined(SUN4M)
808 static int
809 sparc_vme4m_dmamap_create (t, size, nsegments, maxsegsz, boundary, flags, dmamp)
810 bus_dma_tag_t t;
811 bus_size_t size;
812 int nsegments;
813 bus_size_t maxsegsz;
814 bus_size_t boundary;
815 int flags;
816 bus_dmamap_t *dmamp;
817 {
818 int align;
819
820 /* VME DVMA addresses must always be 8K aligned */
821 align = 8192;
822
823 /* XXX - todo: allocate DVMA addresses from assigned ranges:
824 upper 8MB for A32 space; upper 1MB for A24 space */
825 return (_bus_dmamap_create(t, size, nsegments, maxsegsz,
826 boundary, /*align,*/ flags, dmamp));
827 }
828
829 int
830 sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
831 bus_dma_tag_t t;
832 bus_dmamap_t map;
833 void *buf;
834 bus_size_t buflen;
835 struct proc *p;
836 int flags;
837 {
838 struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
839 volatile u_int32_t *ioctags;
840 int error;
841
842 buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
843 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
844 if (error != 0)
845 return (error);
846
847 /* allocate IO cache entries for this range */
848 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
849 for (;buflen > 0;) {
850 *ioctags = VME_IOC_IC | VME_IOC_W;
851 ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
852 buflen -= VME_IOC_PAGESZ;
853 }
854 return (0);
855 }
856
857
858 void
859 sparc_vme4m_dmamap_unload(t, map)
860 bus_dma_tag_t t;
861 bus_dmamap_t map;
862 {
863 struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
864 volatile u_int32_t *flushregs;
865 int len;
866
867 /* Flush VME IO cache */
868 len = map->dm_segs[0].ds_len;
869 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
870 for (;len > 0;) {
871 *flushregs = 0;
872 flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
873 len -= VME_IOC_PAGESZ;
874 }
875 /* Read a tag to synchronize the IOC flushes */
876 (*sc->sc_ioctags);
877
878 _bus_dmamap_unload(t, map);
879 }
880
881 int
882 sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
883 bus_dma_tag_t t;
884 bus_size_t size, alignmnt, boundary;
885 bus_dma_segment_t *segs;
886 int nsegs;
887 int *rsegs;
888 int flags;
889 {
890 int error;
891
892 error = _bus_dmamem_alloc(t, size, alignmnt, boundary,
893 segs, nsegs, rsegs, flags);
894 if (error != 0)
895 return (error);
896
897 return (0);
898 }
899
900 void
901 sparc_vme4m_dmamem_free(t, segs, nsegs)
902 bus_dma_tag_t t;
903 bus_dma_segment_t *segs;
904 int nsegs;
905 {
906 _bus_dmamem_free(t, segs, nsegs);
907 }
908
909 void
910 sparc_vme4m_dmamap_sync(t, map, offset, len, ops)
911 bus_dma_tag_t t;
912 bus_dmamap_t map;
913 bus_addr_t offset;
914 bus_size_t len;
915 int ops;
916 {
917
918 /*
919 * XXX Should perform cache flushes as necessary.
920 */
921 }
922 #endif /* SUN4M */
923