vme_machdep.c revision 1.7 1 /* $NetBSD: vme_machdep.c,v 1.7 1998/04/07 20:21:55 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43
44 #include <sys/proc.h>
45 #include <sys/user.h>
46 #include <sys/syslog.h>
47
48 #include <vm/vm.h>
49
50 #define _SPARC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 #include <sparc/sparc/iommuvar.h>
53 #include <machine/autoconf.h>
54 #include <machine/pmap.h>
55 #include <machine/oldmon.h>
56 #include <machine/cpu.h>
57 #include <machine/ctlreg.h>
58
59 #include <dev/vme/vmevar.h>
60
61 #include <sparc/sparc/asm.h>
62 #include <sparc/sparc/vaddrs.h>
63 #include <sparc/sparc/cpuvar.h>
64 #include <sparc/dev/vmereg.h>
65
66 struct vmebus_softc {
67 struct device sc_dev; /* base device */
68 bus_space_tag_t sc_bustag;
69 struct vmebusreg *sc_reg; /* VME control registers */
70 struct vmebusvec *sc_vec; /* VME interrupt vector */
71 struct rom_range *sc_range; /* ROM range property */
72 int sc_nrange;
73 volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */
74 volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */
75 int (*sc_vmeintr) __P((void *));
76 struct bootpath *sc_bp;
77 };
78 struct vmebus_softc *vmebus_sc;/*XXX*/
79
80 /* autoconfiguration driver */
81 static int vmematch_iommu __P((struct device *, struct cfdata *, void *));
82 static void vmeattach_iommu __P((struct device *, struct device *, void *));
83 static int vmematch_mainbus __P((struct device *, struct cfdata *, void *));
84 static void vmeattach_mainbus __P((struct device *, struct device *, void *));
85 #if defined(SUN4)
86 int vmeintr4 __P((void *));
87 #endif
88 #if defined(SUN4M)
89 int vmeintr4m __P((void *));
90 #endif
91
92
93 static int sparc_vme_probe __P((void *, bus_space_tag_t, vme_addr_t,
94 size_t, vme_size_t, vme_mod_t,
95 int (*) __P((void *, void *)), void *));
96 static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_mod_t,
97 bus_space_tag_t, bus_space_handle_t *));
98 static void sparc_vme_unmap __P((void *));
99 static int sparc_vme_mmap_cookie __P((void *, vme_addr_t, vme_mod_t,
100 bus_space_tag_t, bus_space_handle_t *));
101 static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *));
102 static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t,
103 int (*) __P((void *)), void *));
104 static void sparc_vme_intr_disestablish __P((void *, void *));
105
106 static int vmebus_translate __P((struct vmebus_softc *, vme_mod_t,
107 vme_addr_t, bus_type_t *, bus_addr_t *));
108 static void sparc_vme_bus_establish __P((void *, struct device *));
109 #if defined(SUN4M)
110 static void sparc_vme4m_barrier __P(( bus_space_tag_t, bus_space_handle_t,
111 bus_size_t, bus_size_t, int));
112
113 #endif
114
115 /*
116 * DMA functions.
117 */
118 #if defined(SUN4)
119 static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
120 bus_size_t, struct proc *, int));
121 static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
122 static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
123 bus_addr_t, bus_size_t, int));
124
125 static int sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
126 bus_size_t, bus_size_t, bus_dma_segment_t *,
127 int, int *, int));
128 static void sparc_vme4_dmamem_free __P((bus_dma_tag_t,
129 bus_dma_segment_t *, int));
130 #endif
131
132 #if defined(SUN4M)
133 static int sparc_vme4m_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
134 bus_size_t, bus_size_t, int, bus_dmamap_t *));
135
136 static int sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
137 bus_size_t, struct proc *, int));
138 static void sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
139 static void sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
140 bus_addr_t, bus_size_t, int));
141
142 static int sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
143 bus_size_t, bus_size_t, bus_dma_segment_t *,
144 int, int *, int));
145 static void sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
146 bus_dma_segment_t *, int));
147 #endif
148
149 #if 0
150 static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
151 static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
152 int, size_t, caddr_t *, int));
153 static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t));
154 static int sparc_vme_dmamem_mmap __P((bus_dma_tag_t,
155 bus_dma_segment_t *, int, int, int, int));
156 #endif
157
158 struct cfattach vme_mainbus_ca = {
159 sizeof(struct vmebus_softc), vmematch_mainbus, vmeattach_mainbus
160 };
161
162 struct cfattach vme_iommu_ca = {
163 sizeof(struct vmebus_softc), vmematch_iommu, vmeattach_iommu
164 };
165
166 /* If the PROM does not provide the `ranges' property, we make up our own */
167 struct rom_range vmebus_translations[] = {
168 #define _DS (VMEMOD_D|VMEMOD_S)
169 { VMEMOD_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 },
170 { VMEMOD_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 },
171 { VMEMOD_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 },
172 { VMEMOD_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 },
173 { VMEMOD_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 },
174 { VMEMOD_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 }
175 #undef _DS
176 };
177
178 struct sparc_bus_space_tag sparc_vme_bus_tag = {
179 NULL, /* cookie */
180 NULL, /* parent bus tag */
181 NULL, /* bus_map */
182 NULL, /* bus_unmap */
183 NULL, /* bus_subregion */
184 NULL /* barrier */
185 };
186
187 struct vme_chipset_tag sparc_vme_chipset_tag = {
188 NULL,
189 sparc_vme_probe,
190 sparc_vme_map,
191 sparc_vme_unmap,
192 sparc_vme_mmap_cookie,
193 sparc_vme_intr_map,
194 sparc_vme_intr_establish,
195 sparc_vme_intr_disestablish,
196 sparc_vme_bus_establish
197 };
198
199
200 #if defined(SUN4)
201 struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
202 NULL, /* cookie */
203 _bus_dmamap_create,
204 _bus_dmamap_destroy,
205 sparc_vme4_dmamap_load,
206 _bus_dmamap_load_mbuf,
207 _bus_dmamap_load_uio,
208 _bus_dmamap_load_raw,
209 sparc_vme4_dmamap_unload,
210 sparc_vme4_dmamap_sync,
211
212 sparc_vme4_dmamem_alloc,
213 sparc_vme4_dmamem_free,
214 _bus_dmamem_map,
215 _bus_dmamem_unmap,
216 _bus_dmamem_mmap
217 };
218 #endif
219
220 #if defined(SUN4M)
221 struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
222 NULL, /* cookie */
223 sparc_vme4m_dmamap_create,
224 _bus_dmamap_destroy,
225 sparc_vme4m_dmamap_load,
226 _bus_dmamap_load_mbuf,
227 _bus_dmamap_load_uio,
228 _bus_dmamap_load_raw,
229 sparc_vme4m_dmamap_unload,
230 sparc_vme4m_dmamap_sync,
231
232 sparc_vme4m_dmamem_alloc,
233 sparc_vme4m_dmamem_free,
234 _bus_dmamem_map,
235 _bus_dmamem_unmap,
236 _bus_dmamem_mmap
237 };
238 #endif
239
240
241 void
242 sparc_vme_bus_establish(cookie, dev)
243 void *cookie;
244 struct device *dev;
245 {
246 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
247 struct bootpath *bp = sc->sc_bp;
248 char *name;
249
250 name = dev->dv_cfdata->cf_driver->cd_name;
251 #ifdef DEBUG
252 printf("sparc_vme_bus_establish: %s%d\n", name, dev->dv_unit);
253 #endif
254 if (bp != NULL && strcmp(bp->name, name) == 0 &&
255 dev->dv_unit == bp->val[1]) {
256 bp->dev = dev;
257 #ifdef DEBUG
258 printf("sparc_vme_bus_establish: on the boot path\n");
259 #endif
260 sc->sc_bp++;
261 bootpath_store(1, sc->sc_bp);
262 }
263 }
264
265
266 int
267 vmematch_mainbus(parent, cf, aux)
268 struct device *parent;
269 struct cfdata *cf;
270 void *aux;
271 {
272
273 if (!CPU_ISSUN4)
274 return (0);
275
276 return (1);
277 }
278
279 int
280 vmematch_iommu(parent, cf, aux)
281 struct device *parent;
282 struct cfdata *cf;
283 void *aux;
284 {
285 struct mainbus_attach_args *ma = aux;
286
287 return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
288 }
289
290
291 void
292 vmeattach_mainbus(parent, self, aux)
293 struct device *parent, *self;
294 void *aux;
295 {
296 #if defined(SUN4)
297 struct mainbus_attach_args *ma = aux;
298 struct vmebus_softc *sc = (struct vmebus_softc *)self;
299 struct vme_busattach_args vba;
300
301 if (self->dv_unit > 0) {
302 printf(" unsupported\n");
303 return;
304 }
305
306 sc->sc_bustag = ma->ma_bustag;
307
308 if (ma->ma_bp != NULL && strcmp(ma->ma_bp->name, "vme") == 0) {
309 sc->sc_bp = ma->ma_bp + 1;
310 bootpath_store(1, sc->sc_bp);
311 }
312
313 /* VME interrupt entry point */
314 sc->sc_vmeintr = vmeintr4;
315
316 /*XXX*/ sparc_vme_chipset_tag.cookie = self;
317 /*XXX*/ sparc_vme4_dma_tag._cookie = self;
318
319 vba.vba_bustag = &sparc_vme_bus_tag;
320 vba.vba_chipset_tag = &sparc_vme_chipset_tag;
321 vba.vba_dmatag = &sparc_vme4_dma_tag;
322
323 /* Fall back to our own `range' construction */
324 sc->sc_range = vmebus_translations;
325 sc->sc_nrange =
326 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]);
327
328 printf("\n");
329 (void)config_search(vmesearch, self, &vba);
330
331 bootpath_store(1, NULL);
332 #endif
333 return;
334 }
335
336 /* sun4m vmebus */
337 void
338 vmeattach_iommu(parent, self, aux)
339 struct device *parent, *self;
340 void *aux;
341 {
342 #if defined(SUN4M)
343 struct vmebus_softc *sc = (struct vmebus_softc *)self;
344 struct iommu_attach_args *ia = aux;
345 struct vme_busattach_args vba;
346 bus_space_handle_t bh;
347 struct rom_reg *rr;
348 int nreg;
349 int node;
350 int cline;
351
352 if (self->dv_unit > 0) {
353 printf(" unsupported\n");
354 return;
355 }
356
357 sc->sc_bustag = ia->iom_bustag;
358
359 /* VME interrupt entry point */
360 sc->sc_vmeintr = vmeintr4m;
361
362 /*XXX*/ sparc_vme_chipset_tag.cookie = self;
363 /*XXX*/ sparc_vme4m_dma_tag._cookie = self;
364 sparc_vme_bus_tag.sparc_bus_barrier = sparc_vme4m_barrier;
365
366 vba.vba_bustag = &sparc_vme_bus_tag;
367 vba.vba_chipset_tag = &sparc_vme_chipset_tag;
368 vba.vba_dmatag = &sparc_vme4m_dma_tag;
369
370 node = ia->iom_node;
371
372 /*
373 * Map VME control space
374 */
375 rr = NULL;
376 if (getpropA(node, "reg", sizeof(*rr), &nreg, (void**)&rr) != 0) {
377 printf("%s: can't get register property\n", self->dv_xname);
378 return;
379 }
380 if (nreg < 2) {
381 printf("%s: only %d register sets\n", self->dv_xname, nreg);
382 return;
383 }
384
385 if (bus_space_map2(ia->iom_bustag,
386 (bus_type_t)rr[0].rr_iospace,
387 (bus_addr_t)rr[0].rr_paddr,
388 (bus_size_t)rr[0].rr_len,
389 BUS_SPACE_MAP_LINEAR,
390 0, &bh) != 0) {
391 panic("%s: can't map vmebusreg", self->dv_xname);
392 }
393 sc->sc_reg = (struct vmebusreg *)bh;
394
395 if (bus_space_map2(ia->iom_bustag,
396 (bus_type_t)rr[1].rr_iospace,
397 (bus_addr_t)rr[1].rr_paddr,
398 (bus_size_t)rr[1].rr_len,
399 BUS_SPACE_MAP_LINEAR,
400 0, &bh) != 0) {
401 panic("%s: can't map vmebusvec", self->dv_xname);
402 }
403 sc->sc_vec = (struct vmebusvec *)bh;
404
405 /*
406 * Map VME IO cache tags and flush control.
407 */
408 if (bus_space_map2(ia->iom_bustag,
409 (bus_type_t)rr[1].rr_iospace,
410 (bus_addr_t)rr[1].rr_paddr + VME_IOC_TAGOFFSET,
411 VME_IOC_SIZE,
412 BUS_SPACE_MAP_LINEAR,
413 0, &bh) != 0) {
414 panic("%s: can't map IOC tags", self->dv_xname);
415 }
416 sc->sc_ioctags = (u_int32_t *)bh;
417
418 if (bus_space_map2(ia->iom_bustag,
419 (bus_type_t)rr[1].rr_iospace,
420 (bus_addr_t)rr[1].rr_paddr + VME_IOC_FLUSHOFFSET,
421 VME_IOC_SIZE,
422 BUS_SPACE_MAP_LINEAR,
423 0, &bh) != 0) {
424 panic("%s: can't map IOC flush registers", self->dv_xname);
425 }
426 sc->sc_iocflush = (u_int32_t *)bh;
427
428 /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg;
429
430 /*
431 * Get "range" property.
432 */
433 if (getpropA(node, "ranges", sizeof(struct rom_range),
434 &sc->sc_nrange, (void **)&sc->sc_range) != 0) {
435 panic("%s: can't get ranges property", self->dv_xname);
436 }
437
438 vmebus_sc = sc;
439
440 /*
441 * Invalidate all IO-cache entries.
442 */
443 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
444 sc->sc_ioctags[--cline] = 0;
445 }
446
447 /* Enable IO-cache */
448 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
449
450 printf(": version 0x%x\n",
451 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
452
453 (void)config_search(vmesearch, self, &vba);
454 #endif
455 }
456
457 void sparc_vme_async_fault __P((void));
458 void
459 sparc_vme_async_fault()
460 {
461 struct vmebus_softc *sc = vmebus_sc;
462 u_int32_t addr;
463
464 addr = sc->sc_reg->vmebus_afar;
465 printf("vme afsr: %x; addr %x\n", sc->sc_reg->vmebus_afsr, addr);
466 }
467
468 int
469 vmebus_translate(sc, mod, addr, btp, bap)
470 struct vmebus_softc *sc;
471 vme_mod_t mod;
472 vme_addr_t addr;
473 bus_type_t *btp;
474 bus_addr_t *bap;
475 {
476 int i;
477
478 for (i = 0; i < sc->sc_nrange; i++) {
479
480 if (sc->sc_range[i].cspace != mod)
481 continue;
482
483 /* We've found the connection to the parent bus */
484 *bap = sc->sc_range[i].poffset + addr;
485 *btp = sc->sc_range[i].pspace;
486 return (0);
487 }
488 return (ENOENT);
489 }
490
491 int
492 sparc_vme_probe(cookie, tag, addr, offset, size, mod, callback, arg)
493 void *cookie;
494 bus_space_tag_t tag;
495 vme_addr_t addr;
496 size_t offset;
497 vme_size_t size;
498 int mod;
499 int (*callback) __P((void *, void *));
500 void *arg;
501 {
502 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
503 bus_type_t iospace;
504 bus_addr_t paddr;
505
506 if (vmebus_translate(sc, mod, addr, &iospace, &paddr) != 0)
507 return (0);
508
509 return (bus_space_probe(sc->sc_bustag, iospace, paddr, size, offset,
510 0, callback, arg));
511 }
512
513 int
514 sparc_vme_map(cookie, addr, size, mod, tag, hp)
515 void *cookie;
516 vme_addr_t addr;
517 vme_size_t size;
518 int mod;
519 bus_space_tag_t tag;
520 bus_space_handle_t *hp;
521 {
522 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
523 bus_type_t iospace;
524 bus_addr_t paddr;
525 int error;
526
527 error = vmebus_translate(sc, mod, addr, &iospace, &paddr);
528 if (error != 0)
529 return (error);
530
531 return (bus_space_map2(sc->sc_bustag, iospace, paddr, size, 0, 0, hp));
532 }
533
534 int
535 sparc_vme_mmap_cookie(cookie, addr, mod, tag, hp)
536 void *cookie;
537 vme_addr_t addr;
538 int mod;
539 bus_space_tag_t tag;
540 bus_space_handle_t *hp;
541 {
542 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
543 bus_type_t iospace;
544 bus_addr_t paddr;
545 int error;
546
547 error = vmebus_translate(sc, mod, addr, &iospace, &paddr);
548 if (error != 0)
549 return (error);
550
551 return (bus_space_mmap(sc->sc_bustag, iospace, paddr, 0, hp));
552 }
553
554 #if defined(SUN4M)
555 void
556 sparc_vme4m_barrier(t, h, offset, size, flags)
557 bus_space_tag_t t;
558 bus_space_handle_t h;
559 bus_size_t offset;
560 bus_size_t size;
561 int flags;
562 {
563 struct vmebusreg *vbp = (struct vmebusreg *)t->cookie;
564
565 /* Read async fault status to flush write-buffers */
566 (*(volatile int *)&vbp->vmebus_afsr);
567 }
568 #endif
569
570
571
572 /*
573 * VME Interrupt Priority Level to sparc Processor Interrupt Level.
574 */
575 static int vme_ipl_to_pil[] = {
576 0,
577 2,
578 3,
579 5,
580 7,
581 9,
582 11,
583 13
584 };
585
586
587 /*
588 * All VME device interrupts go through vmeintr(). This function reads
589 * the VME vector from the bus, then dispatches the device interrupt
590 * handler. All handlers for devices that map to the same Processor
591 * Interrupt Level (according to the table above) are on a linked list
592 * of `sparc_vme_intr_handle' structures. The head of which is passed
593 * down as the argument to `vmeintr(void *arg)'.
594 */
595 struct sparc_vme_intr_handle {
596 struct intrhand ih;
597 struct sparc_vme_intr_handle *next;
598 int vec; /* VME interrupt vector */
599 int pri; /* VME interrupt priority */
600 struct vmebus_softc *sc;/*XXX*/
601 };
602
603 #if defined(SUN4)
604 int
605 vmeintr4(arg)
606 void *arg;
607 {
608 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
609 int level, vec;
610 int i = 0;
611
612 level = (ihp->pri << 1) | 1;
613
614 vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
615
616 if (vec == -1) {
617 printf("vme: spurious interrupt\n");
618 return 1; /* XXX - pretend we handled it, for now */
619 }
620
621 for (; ihp; ihp = ihp->next)
622 if (ihp->vec == vec && ihp->ih.ih_fun)
623 i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
624 return (i);
625 }
626 #endif
627
628 #if defined(SUN4M)
629 int
630 vmeintr4m(arg)
631 void *arg;
632 {
633 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
634 int level, vec;
635 int i = 0;
636
637 level = (ihp->pri << 1) | 1;
638
639 #if 0
640 int pending;
641
642 /* Flush VME <=> Sbus write buffers */
643 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
644
645 pending = *((int*)ICR_SI_PEND);
646 if ((pending & SINTR_VME(ihp->pri)) == 0) {
647 printf("vmeintr: non pending at pri %x(p 0x%x)\n",
648 ihp->pri, pending);
649 return (0);
650 }
651 #endif
652 #if 0
653 /* Why gives this a bus timeout sometimes? */
654 vec = ihp->sc->sc_vec->vmebusvec[level];
655 #else
656 /* so, arrange to catch the fault... */
657 {
658 extern struct user *proc0paddr;
659 extern int fkbyte __P((caddr_t, struct pcb *));
660 caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level];
661 struct pcb *xpcb;
662 u_long saveonfault;
663 int s;
664
665 s = splhigh();
666 if (curproc == NULL)
667 xpcb = (struct pcb *)proc0paddr;
668 else
669 xpcb = &curproc->p_addr->u_pcb;
670
671 saveonfault = (u_long)xpcb->pcb_onfault;
672 vec = fkbyte(addr, xpcb);
673 xpcb->pcb_onfault = (caddr_t)saveonfault;
674
675 splx(s);
676 }
677 #endif
678
679 if (vec == -1) {
680 printf("vme: spurious interrupt: ");
681 printf("SI: 0x%x, VME AFSR: 0x%x, VME AFAR 0x%x\n",
682 *((int*)ICR_SI_PEND),
683 ihp->sc->sc_reg->vmebus_afsr,
684 ihp->sc->sc_reg->vmebus_afar);
685 return 1; /* XXX - pretend we handled it, for now */
686 }
687
688 for (; ihp; ihp = ihp->next)
689 if (ihp->vec == vec && ihp->ih.ih_fun)
690 i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
691 return (i);
692 }
693 #endif
694
695 int
696 sparc_vme_intr_map(cookie, vec, pri, ihp)
697 void *cookie;
698 int vec;
699 int pri;
700 vme_intr_handle_t *ihp;
701 {
702 struct sparc_vme_intr_handle *ih;
703
704 ih = (vme_intr_handle_t)
705 malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
706 ih->pri = pri;
707 ih->vec = vec;
708 ih->sc = cookie;/*XXX*/
709 *ihp = ih;
710 return (0);
711 }
712
713 void *
714 sparc_vme_intr_establish(cookie, vih, func, arg)
715 void *cookie;
716 vme_intr_handle_t vih;
717 int (*func) __P((void *));
718 void *arg;
719 {
720 struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
721 struct sparc_vme_intr_handle *svih =
722 (struct sparc_vme_intr_handle *)vih;
723 struct intrhand *ih;
724 int level;
725
726 /* Translate VME priority to processor IPL */
727 level = vme_ipl_to_pil[svih->pri];
728
729 svih->ih.ih_fun = func;
730 svih->ih.ih_arg = arg;
731 svih->next = NULL;
732
733 /* ensure the interrupt subsystem will call us at this level */
734 for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next)
735 if (ih->ih_fun == sc->sc_vmeintr)
736 break;
737
738 if (ih == NULL) {
739 ih = (struct intrhand *)
740 malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
741 if (ih == NULL)
742 panic("vme_addirq");
743 bzero(ih, sizeof *ih);
744 ih->ih_fun = sc->sc_vmeintr;
745 ih->ih_arg = vih;
746 intr_establish(level, ih);
747 } else {
748 svih->next = (vme_intr_handle_t)ih->ih_arg;
749 ih->ih_arg = vih;
750 }
751 return (NULL);
752 }
753
754 void
755 sparc_vme_unmap(cookie)
756 void * cookie;
757 {
758 /* Not implemented */
759 panic("sparc_vme_unmap");
760 }
761
762 void
763 sparc_vme_intr_disestablish(cookie, a)
764 void *cookie;
765 void *a;
766 {
767 /* Not implemented */
768 panic("sparc_vme_intr_disestablish");
769 }
770
771
772
773 /*
774 * VME DMA functions.
775 */
776
777 #if defined(SUN4)
778 int
779 sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
780 bus_dma_tag_t t;
781 bus_dmamap_t map;
782 void *buf;
783 bus_size_t buflen;
784 struct proc *p;
785 int flags;
786 {
787 int error;
788
789 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
790 if (error != 0)
791 return (error);
792
793 /* Adjust DVMA address to VME view */
794 map->dm_segs[0].ds_addr -= DVMA_BASE;
795 return (0);
796 }
797
798 void
799 sparc_vme4_dmamap_unload(t, map)
800 bus_dma_tag_t t;
801 bus_dmamap_t map;
802 {
803 map->dm_segs[0].ds_addr += DVMA_BASE;
804 _bus_dmamap_unload(t, map);
805 }
806
807 int
808 sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
809 bus_dma_tag_t t;
810 bus_size_t size, alignment, boundary;
811 bus_dma_segment_t *segs;
812 int nsegs;
813 int *rsegs;
814 int flags;
815 {
816 int error;
817
818 error = _bus_dmamem_alloc(t, size, alignment, boundary,
819 segs, nsegs, rsegs, flags);
820 if (error != 0)
821 return (error);
822
823 segs[0].ds_addr -= DVMA_BASE;
824 return (0);
825 }
826
827 void
828 sparc_vme4_dmamem_free(t, segs, nsegs)
829 bus_dma_tag_t t;
830 bus_dma_segment_t *segs;
831 int nsegs;
832 {
833 segs[0].ds_addr += DVMA_BASE;
834 _bus_dmamem_free(t, segs, nsegs);
835 }
836
837 void
838 sparc_vme4_dmamap_sync(t, map, offset, len, ops)
839 bus_dma_tag_t t;
840 bus_dmamap_t map;
841 bus_addr_t offset;
842 bus_size_t len;
843 int ops;
844 {
845
846 /*
847 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
848 */
849 }
850 #endif /* SUN4 */
851
852 #if defined(SUN4M)
853 static int
854 sparc_vme4m_dmamap_create (t, size, nsegments, maxsegsz, boundary, flags, dmamp)
855 bus_dma_tag_t t;
856 bus_size_t size;
857 int nsegments;
858 bus_size_t maxsegsz;
859 bus_size_t boundary;
860 int flags;
861 bus_dmamap_t *dmamp;
862 {
863 int align;
864
865 /* VME DVMA addresses must always be 8K aligned */
866 align = 8192;
867
868 /* XXX - todo: allocate DVMA addresses from assigned ranges:
869 upper 8MB for A32 space; upper 1MB for A24 space */
870 return (_bus_dmamap_create(t, size, nsegments, maxsegsz,
871 boundary, /*align,*/ flags, dmamp));
872 }
873
874 int
875 sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
876 bus_dma_tag_t t;
877 bus_dmamap_t map;
878 void *buf;
879 bus_size_t buflen;
880 struct proc *p;
881 int flags;
882 {
883 struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
884 volatile u_int32_t *ioctags;
885 int error;
886
887 buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
888 error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
889 if (error != 0)
890 return (error);
891
892 /* allocate IO cache entries for this range */
893 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
894 for (;buflen > 0;) {
895 *ioctags = VME_IOC_IC | VME_IOC_W;
896 ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
897 buflen -= VME_IOC_PAGESZ;
898 }
899 return (0);
900 }
901
902
903 void
904 sparc_vme4m_dmamap_unload(t, map)
905 bus_dma_tag_t t;
906 bus_dmamap_t map;
907 {
908 struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
909 volatile u_int32_t *flushregs;
910 int len;
911
912 /* Flush VME IO cache */
913 len = map->dm_segs[0].ds_len;
914 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
915 for (;len > 0;) {
916 *flushregs = 0;
917 flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
918 len -= VME_IOC_PAGESZ;
919 }
920 /* Read a tag to synchronize the IOC flushes */
921 (*sc->sc_ioctags);
922
923 _bus_dmamap_unload(t, map);
924 }
925
926 int
927 sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
928 bus_dma_tag_t t;
929 bus_size_t size, alignmnt, boundary;
930 bus_dma_segment_t *segs;
931 int nsegs;
932 int *rsegs;
933 int flags;
934 {
935 int error;
936
937 error = _bus_dmamem_alloc(t, size, alignmnt, boundary,
938 segs, nsegs, rsegs, flags);
939 if (error != 0)
940 return (error);
941
942 return (0);
943 }
944
945 void
946 sparc_vme4m_dmamem_free(t, segs, nsegs)
947 bus_dma_tag_t t;
948 bus_dma_segment_t *segs;
949 int nsegs;
950 {
951 _bus_dmamem_free(t, segs, nsegs);
952 }
953
954 void
955 sparc_vme4m_dmamap_sync(t, map, offset, len, ops)
956 bus_dma_tag_t t;
957 bus_dmamap_t map;
958 bus_addr_t offset;
959 bus_size_t len;
960 int ops;
961 {
962
963 /*
964 * XXX Should perform cache flushes as necessary.
965 */
966 }
967 #endif /* SUN4M */
968