vme_machdep.c revision 1.2 1 /* $NetBSD: vme_machdep.c,v 1.2 1998/02/04 01:01:14 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43
44 #include <sys/proc.h>
45 #include <sys/user.h>
46 #include <sys/syslog.h>
47
48 #include <vm/vm.h>
49
50 #define _SPARC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 #include <machine/autoconf.h>
53 #include <machine/pmap.h>
54 #include <machine/oldmon.h>
55 #include <machine/cpu.h>
56 #include <machine/ctlreg.h>
57
58 #include <dev/vme/vmevar.h>
59
60 #include <sparc/sparc/asm.h>
61 #include <sparc/sparc/vaddrs.h>
62 #include <sparc/sparc/cpuvar.h>
63 #include <sparc/dev/vmereg.h>
64
65 struct vmebus_softc {
66 struct device sc_dev; /* base device */
67 struct vmebusreg *sc_reg; /* VME control registers */
68 struct vmebusvec *sc_vec; /* VME interrupt vector */
69 struct rom_range *sc_range; /* ROM range property */
70 int sc_nrange;
71 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */
72 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */
73 int (*sc_vmeintr) __P((void *));
74 struct bootpath *sc_bp;
75 };
76 struct vmebus_softc *vmebus_sc;/*XXX*/
77
78 /* autoconfiguration driver */
79 static int vmematch __P((struct device *, struct cfdata *, void *));
80 static void vmeattach __P((struct device *, struct device *, void *));
81 #if defined(SUN4)
82 static void vmeattach4 __P((struct device *, struct device *, void *));
83 int vmeintr4 __P((void *));
84 #endif
85 #if defined(SUN4M)
86 static void vmeattach4m __P((struct device *, struct device *, void *));
87 int vmeintr4m __P((void *));
88 #endif
89
90
91 static int sparc_vme_probe __P((void *, bus_space_tag_t, vme_addr_t,
92 vme_size_t, vme_mod_t,
93 int (*) __P((void *, void *)), void *));
94 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_mod_t,
95 bus_space_tag_t, bus_space_handle_t *));
96 static void sparc_vme_unmap __P((void *));
97 static int sparc_vme_mmap_cookie __P((void *, vme_addr_t, vme_mod_t,
98 bus_space_tag_t, int *));
99 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *));
100 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t,
101 int (*) __P((void *)), void *));
102 static void sparc_vme_intr_disestablish __P((void *, void *));
103
104 static void vmebus_translate __P((struct vmebus_softc *, vme_mod_t,
105 struct rom_reg *));
106 static void sparc_vme_bus_establish __P((void *, struct device *));
107 #if defined(SUN4M)
108 static void sparc_vme4m_barrier __P((void *));
109 #endif
110
111 /*
112 * DMA functions.
113 */
114 #if defined(SUN4)
115 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
116 bus_size_t, struct proc *, int));
117 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
118 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
119 bus_dmasync_op_t));
120
121 static int sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
122 bus_size_t, bus_size_t, bus_dma_segment_t *,
123 int, int *, int));
124 static void sparc_vme4_dmamem_free __P((bus_dma_tag_t,
125 bus_dma_segment_t *, int));
126 #endif
127
128 #if defined(SUN4M)
129 static int sparc_vme4m_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
130 bus_size_t, bus_size_t, int, bus_dmamap_t *));
131
132 static int sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
133 bus_size_t, struct proc *, int));
134 static void sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
135 static void sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
136 bus_dmasync_op_t));
137
138 static int sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
139 bus_size_t, bus_size_t, bus_dma_segment_t *,
140 int, int *, int));
141 static void sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
142 bus_dma_segment_t *, int));
143 #endif
144
145 #if 0
146 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
147 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
148 int, size_t, caddr_t *, int));
149 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t));
150 static int sparc_vme_dmamem_mmap __P((bus_dma_tag_t,
151 bus_dma_segment_t *, int, int, int, int));
152 #endif
153
154 struct cfattach vme_ca = {
155 sizeof(struct vmebus_softc), vmematch, vmeattach
156 };
157
158 struct sparc_bus_space_tag sparc_vme_bus_tag = {
159 NULL, /* cookie */
160 NULL, /* bus_map */
161 NULL, /* bus_unmap */
162 NULL, /* bus_subregion */
163 NULL /* barrier */
164 };
165
166 struct vme_chipset_tag sparc_vme_chipset_tag = {
167 NULL,
168 sparc_vme_probe,
169 sparc_vme_map,
170 sparc_vme_unmap,
171 sparc_vme_mmap_cookie,
172 sparc_vme_intr_map,
173 sparc_vme_intr_establish,
174 sparc_vme_intr_disestablish,
175 sparc_vme_bus_establish
176 };
177
178
179 #if defined(SUN4)
180 struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
181 NULL, /* cookie */
182 _bus_dmamap_create,
183 _bus_dmamap_destroy,
184 sparc_vme4_dmamap_load,
185 _bus_dmamap_load_mbuf,
186 _bus_dmamap_load_uio,
187 _bus_dmamap_load_raw,
188 sparc_vme4_dmamap_unload,
189 sparc_vme4_dmamap_sync,
190
191 sparc_vme4_dmamem_alloc,
192 sparc_vme4_dmamem_free,
193 _bus_dmamem_map,
194 _bus_dmamem_unmap,
195 _bus_dmamem_mmap
196 };
197 #endif
198
199 #if defined(SUN4M)
200 struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
201 NULL, /* cookie */
202 sparc_vme4m_dmamap_create,
203 _bus_dmamap_destroy,
204 sparc_vme4m_dmamap_load,
205 _bus_dmamap_load_mbuf,
206 _bus_dmamap_load_uio,
207 _bus_dmamap_load_raw,
208 sparc_vme4m_dmamap_unload,
209 sparc_vme4m_dmamap_sync,
210
211 sparc_vme4m_dmamem_alloc,
212 sparc_vme4m_dmamem_free,
213 _bus_dmamem_map,
214 _bus_dmamem_unmap,
215 _bus_dmamem_mmap
216 };
217 #endif
218
219
220 void
221 sparc_vme_bus_establish(cookie, dev)
222 void *cookie;
223 struct device *dev;
224 {
225 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
226 struct bootpath *bp = sc->sc_bp;
227 char *name;
228
229 name = dev->dv_cfdata->cf_driver->cd_name;
230 #ifdef DEBUG
231 printf("sparc_vme_bus_establish: %s%d\n", name, dev->dv_unit);
232 #endif
233 if (bp != NULL && strcmp(bp->name, name) == 0 &&
234 dev->dv_unit == bp->val[1]) {
235 bp->dev = dev;
236 #ifdef DEBUG
237 printf("sparc_vme_bus_establish: on the boot path\n");
238 #endif
239 sc->sc_bp++;
240 bootpath_store(1, sc->sc_bp);
241 }
242 }
243
244
245 int
246 vmematch(parent, cf, aux)
247 struct device *parent;
248 struct cfdata *cf;
249 void *aux;
250 {
251 register struct confargs *ca = aux;
252 register struct romaux *ra = &ca->ca_ra;
253
254 if (CPU_ISSUN4C)
255 return (0);
256
257 return (strcmp(cf->cf_driver->cd_name, ra->ra_name) == 0);
258 }
259
260 void
261 vmeattach(parent, self, aux)
262 struct device *parent, *self;
263 void *aux;
264 {
265 struct vmebus_softc *sc = (struct vmebus_softc *)self;
266 struct confargs *ca = aux;
267 register struct romaux *ra = &ca->ca_ra;
268
269 if (ra->ra_bp != NULL && strcmp(ra->ra_bp->name, "vme") == 0) {
270 sc->sc_bp = ra->ra_bp + 1;
271 bootpath_store(1, sc->sc_bp);
272 }
273
274 #if defined(SUN4)
275 if (CPU_ISSUN4)
276 vmeattach4(parent, self, aux);
277 #endif
278
279 #if defined(SUN4M)
280 if (CPU_ISSUN4M)
281 vmeattach4m(parent, self, aux);
282 #endif
283
284 bootpath_store(1, NULL);
285 }
286
287 #if defined(SUN4)
288 void
289 vmeattach4(parent, self, aux)
290 struct device *parent, *self;
291 void *aux;
292 {
293 struct vmebus_softc *sc = (struct vmebus_softc *)self;
294 struct vme_busattach_args vba;
295
296 if (self->dv_unit > 0) {
297 printf(" unsupported\n");
298 return;
299 }
300
301 /* VME interrupt entry point */
302 sc->sc_vmeintr = vmeintr4;
303
304 /*XXX*/ sparc_vme_chipset_tag.cookie = self;
305 /*XXX*/ sparc_vme4_dma_tag._cookie = self;
306
307 vba.vba_bustag = &sparc_vme_bus_tag;
308 vba.vba_chipset_tag = &sparc_vme_chipset_tag;
309 vba.vba_dmatag = &sparc_vme4_dma_tag;
310
311 printf("\n");
312 (void)config_search(vmesearch, self, &vba);
313 return;
314 }
315 #endif
316
317 #if defined(SUN4M)
318 /* sun4m vmebus */
319 void
320 vmeattach4m(parent, self, aux)
321 struct device *parent, *self;
322 void *aux;
323 {
324 struct vmebus_softc *sc = (struct vmebus_softc *)self;
325 struct confargs *ca = aux;
326 register struct romaux *ra = &ca->ca_ra;
327 int node, rlen;
328 struct vme_busattach_args vba;
329 int cline;
330
331 if (self->dv_unit > 0) {
332 printf(" unsupported\n");
333 return;
334 }
335
336 /* VME interrupt entry point */
337 sc->sc_vmeintr = vmeintr4m;
338
339 /*XXX*/ sparc_vme_chipset_tag.cookie = self;
340 /*XXX*/ sparc_vme4m_dma_tag._cookie = self;
341 sparc_vme_bus_tag.sparc_barrier = sparc_vme4m_barrier;
342
343 vba.vba_bustag = &sparc_vme_bus_tag;
344 vba.vba_chipset_tag = &sparc_vme_chipset_tag;
345 vba.vba_dmatag = &sparc_vme4m_dma_tag;
346
347 node = ra->ra_node;
348
349 /* Map VME control space */
350 sc->sc_reg = (struct vmebusreg *)
351 mapdev(&ra->ra_reg[0], 0, 0, ra->ra_reg[0].rr_len);
352 sc->sc_vec = (struct vmebusvec *)
353 mapdev(&ra->ra_reg[1], 0, 0, ra->ra_reg[1].rr_len);
354 sc->sc_ioctags = (u_int32_t *)
355 mapdev(&ra->ra_reg[1], 0, VME_IOC_TAGOFFSET, VME_IOC_SIZE);
356 sc->sc_iocflush = (u_int32_t *)
357 mapdev(&ra->ra_reg[1], 0, VME_IOC_FLUSHOFFSET, VME_IOC_SIZE);
358
359 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg;
360
361 /*
362 * Get "range" property.
363 */
364 rlen = getproplen(node, "ranges");
365 if (rlen > 0) {
366 sc->sc_nrange = rlen / sizeof(struct rom_range);
367 sc->sc_range =
368 (struct rom_range *)malloc(rlen, M_DEVBUF, M_NOWAIT);
369 if (sc->sc_range == 0)
370 panic("vme: PROM ranges too large: %d", rlen);
371 (void)getprop(node, "ranges", sc->sc_range, rlen);
372 }
373
374 vmebus_sc = sc;
375
376 /*
377 * Invalidate all IO-cache entries.
378 */
379 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
380 sc->sc_ioctags[--cline] = 0;
381 }
382
383 /* Enable IO-cache */
384 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
385
386 printf(": version 0x%x\n",
387 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
388
389 (void)config_search(vmesearch, self, &vba);
390 }
391 #endif
392
393 void sparc_vme_async_fault __P((void));
394 void
395 sparc_vme_async_fault()
396 {
397 struct vmebus_softc *sc = vmebus_sc;
398 u_int32_t addr;
399
400 addr = sc->sc_reg->vmebus_afar;
401 printf("vme afsr: %x; addr %x\n", sc->sc_reg->vmebus_afsr, addr);
402 }
403
404 int
405 sparc_vme_probe(cookie, tag, addr, size, mod, callback, arg)
406 void *cookie;
407 bus_space_tag_t tag;
408 vme_addr_t addr;
409 vme_size_t size;
410 int mod;
411 int (*callback) __P((void *, void *));
412 void *arg;
413 {
414 struct rom_reg reg;
415 caddr_t tmp;
416 int result;
417 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
418
419 /* XXX - Use bus_space_[un]map() etc. */
420 reg.rr_paddr = (void *)addr;
421 vmebus_translate(sc, mod, ®);
422 tmp = (caddr_t)mapdev(®, TMPMAP_VA, 0, NBPG);
423 result = probeget(tmp, size) != -1;
424 if (result && callback != NULL)
425 result = (*callback)(tmp, arg);
426 pmap_remove(pmap_kernel(), TMPMAP_VA, TMPMAP_VA+NBPG);
427 return (result);
428 }
429
430 int
431 sparc_vme_map(cookie, addr, size, mod, tag, handlep)
432 void *cookie;
433 vme_addr_t addr;
434 vme_size_t size;
435 int mod;
436 bus_space_tag_t tag;
437 bus_space_handle_t *handlep;
438 {
439 struct rom_reg reg;
440 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
441
442 reg.rr_paddr = (void *)addr;
443 vmebus_translate(sc, mod, ®);
444 *handlep = (bus_space_handle_t)mapdev(®, 0, 0, size);
445 return (0);
446 }
447
448 int
449 sparc_vme_mmap_cookie(cookie, addr, mod, tag, handlep)
450 void *cookie;
451 vme_addr_t addr;
452 int mod;
453 bus_space_tag_t tag;
454 int *handlep;
455 {
456 struct rom_reg reg;
457 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
458
459 reg.rr_paddr = (void *)addr;
460 vmebus_translate(sc, mod, ®);
461 *handlep = (int)reg.rr_paddr | PMAP_IOENC(reg.rr_iospace) | PMAP_NC;
462 return (0);
463 }
464
465 void
466 vmebus_translate(sc, mod, rr)
467 struct vmebus_softc *sc;
468 vme_mod_t mod;
469 struct rom_reg *rr;
470 {
471 register int j;
472
473 if (CPU_ISSUN4) {
474 (int)rr->rr_iospace = (mod & VMEMOD_D32)
475 ? PMAP_VME32
476 : PMAP_VME16;
477
478 switch (mod & ~VMEMOD_D32) {
479 case VMEMOD_A16|VMEMOD_D|VMEMOD_S:
480 rr->rr_paddr += 0xffff0000;
481 break;
482 case VMEMOD_A24|VMEMOD_D|VMEMOD_S:
483 rr->rr_paddr += 0xff000000;
484 break;
485 case VMEMOD_A32|VMEMOD_D|VMEMOD_S:
486 break;
487 default:
488 panic("vmebus_translate: unsupported VME modifier: %x",
489 mod);
490 }
491 return;
492 }
493
494
495 /* sun4m VME node: translate through "ranges" property */
496 if (sc->sc_nrange == 0)
497 panic("vmebus: no ranges");
498
499 /* Translate into parent address spaces */
500 for (j = 0; j < sc->sc_nrange; j++) {
501 if (sc->sc_range[j].cspace == mod) {
502 (int)rr->rr_paddr +=
503 sc->sc_range[j].poffset;
504 (int)rr->rr_iospace =
505 sc->sc_range[j].pspace;
506 return;
507 }
508 }
509 panic("sparc_vme_translate: modifier %x not supported", mod);
510 }
511
512 #if defined(SUN4M)
513 void
514 sparc_vme4m_barrier(cookie)
515 void *cookie;
516 {
517 struct vmebusreg *vbp = (struct vmebusreg *)cookie;
518
519 /* Read async fault status to flush write-buffers */
520 (*(volatile int *)&vbp->vmebus_afsr);
521 }
522 #endif
523
524
525
526 /*
527 * VME Interrupt Priority Level to sparc Processor Interrupt Level.
528 */
529 static int vme_ipl_to_pil[] = {
530 0,
531 2,
532 3,
533 5,
534 7,
535 9,
536 11,
537 13
538 };
539
540
541 /*
542 * All VME device interrupts go through vmeintr(). This function reads
543 * the VME vector from the bus, then dispatches the device interrupt
544 * handler. All handlers for devices that map to the same Processor
545 * Interrupt Level (according to the table above) are on a linked list
546 * of `sparc_vme_intr_handle' structures. The head of which is passed
547 * down as the argument to `vmeintr(void *arg)'.
548 */
549 struct sparc_vme_intr_handle {
550 struct intrhand ih;
551 struct sparc_vme_intr_handle *next;
552 int vec; /* VME interrupt vector */
553 int pri; /* VME interrupt priority */
554 struct vmebus_softc *sc;/*XXX*/
555 };
556
557 #if defined(SUN4)
558 int
559 vmeintr4(arg)
560 void *arg;
561 {
562 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
563 int level, vec;
564 int i = 0;
565
566 level = (ihp->pri << 1) | 1;
567
568 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
569
570 if (vec == -1) {
571 printf("vme: spurious interrupt\n");
572 return 1; /* XXX - pretend we handled it, for now */
573 }
574
575 for (; ihp; ihp = ihp->next)
576 if (ihp->vec == vec && ihp->ih.ih_fun)
577 i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
578 return (i);
579 }
580 #endif
581
582 #if defined(SUN4M)
583 int
584 vmeintr4m(arg)
585 void *arg;
586 {
587 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
588 int level, vec;
589 int i = 0;
590
591 level = (ihp->pri << 1) | 1;
592
593 #if 0
594 int pending;
595
596 /* Flush VME <=> Sbus write buffers */
597 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
598
599 pending = *((int*)ICR_SI_PEND);
600 if ((pending & SINTR_VME(ihp->pri)) == 0) {
601 printf("vmeintr: non pending at pri %x(p 0x%x)\n",
602 ihp->pri, pending);
603 return (0);
604 }
605 #endif
606 #if 0
607 /* Why gives this a bus timeout sometimes? */
608 vec = ihp->sc->sc_vec->vmebusvec[level];
609 #else
610 /* so, arrange to catch the fault... */
611 {
612 extern struct user *proc0paddr;
613 extern int fkbyte __P((caddr_t, struct pcb *));
614 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level];
615 struct pcb *xpcb;
616 u_long saveonfault;
617 int s;
618
619 s = splhigh();
620 if (curproc == NULL)
621 xpcb = (struct pcb *)proc0paddr;
622 else
623 xpcb = &curproc->p_addr->u_pcb;
624
625 saveonfault = (u_long)xpcb->pcb_onfault;
626 vec = fkbyte(addr, xpcb);
627 xpcb->pcb_onfault = (caddr_t)saveonfault;
628
629 splx(s);
630 }
631 #endif
632
633 if (vec == -1) {
634 printf("vme: spurious interrupt: ");
635 printf("SI: 0x%x, VME AFSR: 0x%x, VME AFAR 0x%x\n",
636 *((int*)ICR_SI_PEND),
637 ihp->sc->sc_reg->vmebus_afsr,
638 ihp->sc->sc_reg->vmebus_afar);
639 return 1; /* XXX - pretend we handled it, for now */
640 }
641
642 for (; ihp; ihp = ihp->next)
643 if (ihp->vec == vec && ihp->ih.ih_fun)
644 i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
645 return (i);
646 }
647 #endif
648
649 int
650 sparc_vme_intr_map(cookie, vec, pri, ihp)
651 void *cookie;
652 int vec;
653 int pri;
654 vme_intr_handle_t *ihp;
655 {
656 struct sparc_vme_intr_handle *ih;
657
658 ih = (vme_intr_handle_t)
659 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
660 ih->pri = pri;
661 ih->vec = vec;
662 ih->sc = cookie;/*XXX*/
663 *ihp = ih;
664 return (0);
665 }
666
667 void *
668 sparc_vme_intr_establish(cookie, vih, func, arg)
669 void *cookie;
670 vme_intr_handle_t vih;
671 int (*func) __P((void *));
672 void *arg;
673 {
674 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
675 struct sparc_vme_intr_handle *svih =
676 (struct sparc_vme_intr_handle *)vih;
677 struct intrhand *ih;
678 int level;
679
680 /* Translate VME priority to processor IPL */
681 level = vme_ipl_to_pil[svih->pri];
682
683 svih->ih.ih_fun = func;
684 svih->ih.ih_arg = arg;
685 svih->next = NULL;
686
687 /* ensure the interrupt subsystem will call us at this level */
688 for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next)
689 if (ih->ih_fun == sc->sc_vmeintr)
690 break;
691
692 if (ih == NULL) {
693 ih = (struct intrhand *)
694 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
695 if (ih == NULL)
696 panic("vme_addirq");
697 bzero(ih, sizeof *ih);
698 ih->ih_fun = sc->sc_vmeintr;
699 ih->ih_arg = vih;
700 intr_establish(level, ih);
701 } else {
702 svih->next = (vme_intr_handle_t)ih->ih_arg;
703 ih->ih_arg = vih;
704 }
705 return (NULL);
706 }
707
708 void
709 sparc_vme_unmap(cookie)
710 void * cookie;
711 {
712 /* Not implemented */
713 panic("sparc_vme_unmap");
714 }
715
716 void
717 sparc_vme_intr_disestablish(cookie, a)
718 void *cookie;
719 void *a;
720 {
721 /* Not implemented */
722 panic("sparc_vme_intr_disestablish");
723 }
724
725
726
727 /*
728 * VME DMA functions.
729 */
730
731 #if defined(SUN4)
732 int
733 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
734 bus_dma_tag_t t;
735 bus_dmamap_t map;
736 void *buf;
737 bus_size_t buflen;
738 struct proc *p;
739 int flags;
740 {
741 int error;
742
743 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
744 if (error != 0)
745 return (error);
746
747 /* Adjust DVMA address to VME view */
748 map->dm_segs[0].ds_addr -= DVMA_BASE;
749 return (0);
750 }
751
752 void
753 sparc_vme4_dmamap_unload(t, map)
754 bus_dma_tag_t t;
755 bus_dmamap_t map;
756 {
757 map->dm_segs[0].ds_addr += DVMA_BASE;
758 _bus_dmamap_unload(t, map);
759 }
760
761 int
762 sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
763 bus_dma_tag_t t;
764 bus_size_t size, alignment, boundary;
765 bus_dma_segment_t *segs;
766 int nsegs;
767 int *rsegs;
768 int flags;
769 {
770 int error;
771
772 error = _bus_dmamem_alloc(t, size, alignment, boundary,
773 segs, nsegs, rsegs, flags);
774 if (error != 0)
775 return (error);
776
777 segs[0].ds_addr -= DVMA_BASE;
778 return (0);
779 }
780
781 void
782 sparc_vme4_dmamem_free(t, segs, nsegs)
783 bus_dma_tag_t t;
784 bus_dma_segment_t *segs;
785 int nsegs;
786 {
787 segs[0].ds_addr += DVMA_BASE;
788 _bus_dmamem_free(t, segs, nsegs);
789 }
790
791 void
792 sparc_vme4_dmamap_sync(t, map, op)
793 bus_dma_tag_t t;
794 bus_dmamap_t map;
795 bus_dmasync_op_t op;
796 {
797 switch (op) {
798 default:
799 }
800 }
801 #endif /* SUN4 */
802
803 #if defined(SUN4M)
804 static int
805 sparc_vme4m_dmamap_create (t, size, nsegments, maxsegsz, boundary, flags, dmamp)
806 bus_dma_tag_t t;
807 bus_size_t size;
808 int nsegments;
809 bus_size_t maxsegsz;
810 bus_size_t boundary;
811 int flags;
812 bus_dmamap_t *dmamp;
813 {
814 int align;
815
816 /* VME DVMA addresses must always be 8K aligned */
817 align = 8192;
818
819 /* XXX - todo: allocate DVMA addresses from assigned ranges:
820 upper 8MB for A32 space; upper 1MB for A24 space */
821 return (_bus_dmamap_create(t, size, nsegments, maxsegsz,
822 boundary, /*align,*/ flags, dmamp));
823 }
824
825 int
826 sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
827 bus_dma_tag_t t;
828 bus_dmamap_t map;
829 void *buf;
830 bus_size_t buflen;
831 struct proc *p;
832 int flags;
833 {
834 struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
835 volatile u_int32_t *ioctags;
836 int error;
837
838 buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
839 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
840 if (error != 0)
841 return (error);
842
843 /* allocate IO cache entries for this range */
844 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
845 for (;buflen > 0;) {
846 *ioctags = VME_IOC_IC | VME_IOC_W;
847 ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
848 buflen -= VME_IOC_PAGESZ;
849 }
850 return (0);
851 }
852
853
854 void
855 sparc_vme4m_dmamap_unload(t, map)
856 bus_dma_tag_t t;
857 bus_dmamap_t map;
858 {
859 struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
860 volatile u_int32_t *flushregs;
861 int len;
862
863 /* Flush VME IO cache */
864 len = map->dm_segs[0].ds_len;
865 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
866 for (;len > 0;) {
867 *flushregs = 0;
868 flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
869 len -= VME_IOC_PAGESZ;
870 }
871 /* Read a tag to synchronize the IOC flushes */
872 (*sc->sc_ioctags);
873
874 _bus_dmamap_unload(t, map);
875 }
876
877 int
878 sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
879 bus_dma_tag_t t;
880 bus_size_t size, alignmnt, boundary;
881 bus_dma_segment_t *segs;
882 int nsegs;
883 int *rsegs;
884 int flags;
885 {
886 int error;
887
888 error = _bus_dmamem_alloc(t, size, alignmnt, boundary,
889 segs, nsegs, rsegs, flags);
890 if (error != 0)
891 return (error);
892
893 return (0);
894 }
895
896 void
897 sparc_vme4m_dmamem_free(t, segs, nsegs)
898 bus_dma_tag_t t;
899 bus_dma_segment_t *segs;
900 int nsegs;
901 {
902 _bus_dmamem_free(t, segs, nsegs);
903 }
904
905 void
906 sparc_vme4m_dmamap_sync(t, map, op)
907 bus_dma_tag_t t;
908 bus_dmamap_t map;
909 bus_dmasync_op_t op;
910 {
911 switch (op) {
912 default:
913 }
914 }
915 #endif /* SUN4M */
916