vme_machdep.c revision 1.14 1 1.14 pk /* $NetBSD: vme_machdep.c,v 1.14 1998/09/19 16:45:43 pk Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.4 thorpej * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.1 pk * by Paul Kranenburg.
9 1.1 pk *
10 1.1 pk * Redistribution and use in source and binary forms, with or without
11 1.1 pk * modification, are permitted provided that the following conditions
12 1.1 pk * are met:
13 1.1 pk * 1. Redistributions of source code must retain the above copyright
14 1.1 pk * notice, this list of conditions and the following disclaimer.
15 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 pk * notice, this list of conditions and the following disclaimer in the
17 1.1 pk * documentation and/or other materials provided with the distribution.
18 1.1 pk * 3. All advertising materials mentioning features or use of this software
19 1.1 pk * must display the following acknowledgement:
20 1.1 pk * This product includes software developed by the NetBSD
21 1.1 pk * Foundation, Inc. and its contributors.
22 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 pk * contributors may be used to endorse or promote products derived
24 1.1 pk * from this software without specific prior written permission.
25 1.1 pk *
26 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
37 1.1 pk */
38 1.1 pk
39 1.1 pk #include <sys/param.h>
40 1.10 pk #include <sys/extent.h>
41 1.1 pk #include <sys/systm.h>
42 1.1 pk #include <sys/device.h>
43 1.1 pk #include <sys/malloc.h>
44 1.1 pk
45 1.1 pk #include <sys/proc.h>
46 1.1 pk #include <sys/user.h>
47 1.1 pk #include <sys/syslog.h>
48 1.1 pk
49 1.1 pk #include <vm/vm.h>
50 1.1 pk
51 1.1 pk #define _SPARC_BUS_DMA_PRIVATE
52 1.1 pk #include <machine/bus.h>
53 1.6 pk #include <sparc/sparc/iommuvar.h>
54 1.1 pk #include <machine/autoconf.h>
55 1.1 pk #include <machine/pmap.h>
56 1.1 pk #include <machine/oldmon.h>
57 1.1 pk #include <machine/cpu.h>
58 1.1 pk #include <machine/ctlreg.h>
59 1.1 pk
60 1.1 pk #include <dev/vme/vmevar.h>
61 1.1 pk
62 1.1 pk #include <sparc/sparc/asm.h>
63 1.1 pk #include <sparc/sparc/vaddrs.h>
64 1.1 pk #include <sparc/sparc/cpuvar.h>
65 1.1 pk #include <sparc/dev/vmereg.h>
66 1.1 pk
67 1.10 pk struct vmebus_softc {
68 1.1 pk struct device sc_dev; /* base device */
69 1.7 pk bus_space_tag_t sc_bustag;
70 1.8 pk bus_dma_tag_t sc_dmatag;
71 1.1 pk struct vmebusreg *sc_reg; /* VME control registers */
72 1.1 pk struct vmebusvec *sc_vec; /* VME interrupt vector */
73 1.1 pk struct rom_range *sc_range; /* ROM range property */
74 1.1 pk int sc_nrange;
75 1.1 pk volatile u_int32_t *sc_ioctags; /* VME IO-cache tag registers */
76 1.1 pk volatile u_int32_t *sc_iocflush;/* VME IO-cache flush registers */
77 1.1 pk int (*sc_vmeintr) __P((void *));
78 1.1 pk struct bootpath *sc_bp;
79 1.1 pk };
80 1.1 pk struct vmebus_softc *vmebus_sc;/*XXX*/
81 1.1 pk
82 1.1 pk /* autoconfiguration driver */
83 1.6 pk static int vmematch_iommu __P((struct device *, struct cfdata *, void *));
84 1.6 pk static void vmeattach_iommu __P((struct device *, struct device *, void *));
85 1.6 pk static int vmematch_mainbus __P((struct device *, struct cfdata *, void *));
86 1.6 pk static void vmeattach_mainbus __P((struct device *, struct device *, void *));
87 1.1 pk #if defined(SUN4)
88 1.1 pk int vmeintr4 __P((void *));
89 1.1 pk #endif
90 1.1 pk #if defined(SUN4M)
91 1.1 pk int vmeintr4m __P((void *));
92 1.1 pk #endif
93 1.14 pk static int sparc_vme_error __P((void));
94 1.1 pk
95 1.1 pk
96 1.1 pk static int sparc_vme_probe __P((void *, bus_space_tag_t, vme_addr_t,
97 1.5 pk size_t, vme_size_t, vme_mod_t,
98 1.2 pk int (*) __P((void *, void *)), void *));
99 1.1 pk static int sparc_vme_map __P((void *, vme_addr_t, vme_size_t, vme_mod_t,
100 1.1 pk bus_space_tag_t, bus_space_handle_t *));
101 1.1 pk static void sparc_vme_unmap __P((void *));
102 1.1 pk static int sparc_vme_mmap_cookie __P((void *, vme_addr_t, vme_mod_t,
103 1.7 pk bus_space_tag_t, bus_space_handle_t *));
104 1.1 pk static int sparc_vme_intr_map __P((void *, int, int, vme_intr_handle_t *));
105 1.1 pk static void * sparc_vme_intr_establish __P((void *, vme_intr_handle_t,
106 1.1 pk int (*) __P((void *)), void *));
107 1.1 pk static void sparc_vme_intr_disestablish __P((void *, void *));
108 1.1 pk
109 1.7 pk static int vmebus_translate __P((struct vmebus_softc *, vme_mod_t,
110 1.7 pk vme_addr_t, bus_type_t *, bus_addr_t *));
111 1.1 pk static void sparc_vme_bus_establish __P((void *, struct device *));
112 1.1 pk #if defined(SUN4M)
113 1.7 pk static void sparc_vme4m_barrier __P(( bus_space_tag_t, bus_space_handle_t,
114 1.7 pk bus_size_t, bus_size_t, int));
115 1.7 pk
116 1.1 pk #endif
117 1.1 pk
118 1.1 pk /*
119 1.1 pk * DMA functions.
120 1.1 pk */
121 1.1 pk #if defined(SUN4)
122 1.1 pk static int sparc_vme4_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
123 1.1 pk bus_size_t, struct proc *, int));
124 1.1 pk static void sparc_vme4_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
125 1.1 pk static void sparc_vme4_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
126 1.4 thorpej bus_addr_t, bus_size_t, int));
127 1.1 pk
128 1.1 pk static int sparc_vme4_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
129 1.1 pk bus_size_t, bus_size_t, bus_dma_segment_t *,
130 1.1 pk int, int *, int));
131 1.1 pk static void sparc_vme4_dmamem_free __P((bus_dma_tag_t,
132 1.1 pk bus_dma_segment_t *, int));
133 1.1 pk #endif
134 1.1 pk
135 1.1 pk #if defined(SUN4M)
136 1.1 pk static int sparc_vme4m_dmamap_create __P((bus_dma_tag_t, bus_size_t, int,
137 1.1 pk bus_size_t, bus_size_t, int, bus_dmamap_t *));
138 1.1 pk
139 1.1 pk static int sparc_vme4m_dmamap_load __P((bus_dma_tag_t, bus_dmamap_t, void *,
140 1.1 pk bus_size_t, struct proc *, int));
141 1.1 pk static void sparc_vme4m_dmamap_unload __P((bus_dma_tag_t, bus_dmamap_t));
142 1.1 pk static void sparc_vme4m_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t,
143 1.4 thorpej bus_addr_t, bus_size_t, int));
144 1.1 pk
145 1.1 pk static int sparc_vme4m_dmamem_alloc __P((bus_dma_tag_t, bus_size_t,
146 1.1 pk bus_size_t, bus_size_t, bus_dma_segment_t *,
147 1.1 pk int, int *, int));
148 1.1 pk static void sparc_vme4m_dmamem_free __P((bus_dma_tag_t,
149 1.1 pk bus_dma_segment_t *, int));
150 1.1 pk #endif
151 1.1 pk
152 1.9 pk static int sparc_vme_dmamem_map __P((bus_dma_tag_t, bus_dma_segment_t *,
153 1.9 pk int, size_t, caddr_t *, int));
154 1.1 pk #if 0
155 1.1 pk static void sparc_vme_dmamap_destroy __P((bus_dma_tag_t, bus_dmamap_t));
156 1.1 pk static void sparc_vme_dmamem_unmap __P((bus_dma_tag_t, caddr_t, size_t));
157 1.1 pk static int sparc_vme_dmamem_mmap __P((bus_dma_tag_t,
158 1.1 pk bus_dma_segment_t *, int, int, int, int));
159 1.1 pk #endif
160 1.1 pk
161 1.6 pk struct cfattach vme_mainbus_ca = {
162 1.6 pk sizeof(struct vmebus_softc), vmematch_mainbus, vmeattach_mainbus
163 1.6 pk };
164 1.6 pk
165 1.6 pk struct cfattach vme_iommu_ca = {
166 1.6 pk sizeof(struct vmebus_softc), vmematch_iommu, vmeattach_iommu
167 1.1 pk };
168 1.1 pk
169 1.14 pk int (*vmeerr_handler) __P((void));
170 1.14 pk
171 1.7 pk /* If the PROM does not provide the `ranges' property, we make up our own */
172 1.7 pk struct rom_range vmebus_translations[] = {
173 1.7 pk #define _DS (VMEMOD_D|VMEMOD_S)
174 1.7 pk { VMEMOD_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 },
175 1.7 pk { VMEMOD_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 },
176 1.7 pk { VMEMOD_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 },
177 1.7 pk { VMEMOD_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 },
178 1.7 pk { VMEMOD_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 },
179 1.7 pk { VMEMOD_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 }
180 1.7 pk #undef _DS
181 1.7 pk };
182 1.7 pk
183 1.11 pk /*
184 1.11 pk * DMA on sun4 VME devices use the last MB of virtual space, which
185 1.11 pk * is mapped by hardware onto the first MB of VME space.
186 1.11 pk */
187 1.10 pk struct extent *vme_dvmamap;
188 1.10 pk
189 1.1 pk struct sparc_bus_space_tag sparc_vme_bus_tag = {
190 1.1 pk NULL, /* cookie */
191 1.7 pk NULL, /* parent bus tag */
192 1.1 pk NULL, /* bus_map */
193 1.1 pk NULL, /* bus_unmap */
194 1.1 pk NULL, /* bus_subregion */
195 1.1 pk NULL /* barrier */
196 1.1 pk };
197 1.1 pk
198 1.1 pk struct vme_chipset_tag sparc_vme_chipset_tag = {
199 1.1 pk NULL,
200 1.1 pk sparc_vme_probe,
201 1.1 pk sparc_vme_map,
202 1.1 pk sparc_vme_unmap,
203 1.1 pk sparc_vme_mmap_cookie,
204 1.1 pk sparc_vme_intr_map,
205 1.1 pk sparc_vme_intr_establish,
206 1.1 pk sparc_vme_intr_disestablish,
207 1.1 pk sparc_vme_bus_establish
208 1.1 pk };
209 1.1 pk
210 1.1 pk
211 1.1 pk #if defined(SUN4)
212 1.1 pk struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
213 1.1 pk NULL, /* cookie */
214 1.1 pk _bus_dmamap_create,
215 1.1 pk _bus_dmamap_destroy,
216 1.1 pk sparc_vme4_dmamap_load,
217 1.1 pk _bus_dmamap_load_mbuf,
218 1.1 pk _bus_dmamap_load_uio,
219 1.1 pk _bus_dmamap_load_raw,
220 1.1 pk sparc_vme4_dmamap_unload,
221 1.1 pk sparc_vme4_dmamap_sync,
222 1.1 pk
223 1.1 pk sparc_vme4_dmamem_alloc,
224 1.1 pk sparc_vme4_dmamem_free,
225 1.9 pk sparc_vme_dmamem_map,
226 1.1 pk _bus_dmamem_unmap,
227 1.1 pk _bus_dmamem_mmap
228 1.1 pk };
229 1.1 pk #endif
230 1.1 pk
231 1.1 pk #if defined(SUN4M)
232 1.1 pk struct sparc_bus_dma_tag sparc_vme4m_dma_tag = {
233 1.1 pk NULL, /* cookie */
234 1.1 pk sparc_vme4m_dmamap_create,
235 1.1 pk _bus_dmamap_destroy,
236 1.1 pk sparc_vme4m_dmamap_load,
237 1.1 pk _bus_dmamap_load_mbuf,
238 1.1 pk _bus_dmamap_load_uio,
239 1.1 pk _bus_dmamap_load_raw,
240 1.1 pk sparc_vme4m_dmamap_unload,
241 1.1 pk sparc_vme4m_dmamap_sync,
242 1.1 pk
243 1.1 pk sparc_vme4m_dmamem_alloc,
244 1.1 pk sparc_vme4m_dmamem_free,
245 1.9 pk sparc_vme_dmamem_map,
246 1.1 pk _bus_dmamem_unmap,
247 1.1 pk _bus_dmamem_mmap
248 1.1 pk };
249 1.1 pk #endif
250 1.1 pk
251 1.1 pk
252 1.1 pk void
253 1.1 pk sparc_vme_bus_establish(cookie, dev)
254 1.1 pk void *cookie;
255 1.1 pk struct device *dev;
256 1.1 pk {
257 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
258 1.1 pk struct bootpath *bp = sc->sc_bp;
259 1.1 pk char *name;
260 1.1 pk
261 1.1 pk name = dev->dv_cfdata->cf_driver->cd_name;
262 1.1 pk #ifdef DEBUG
263 1.1 pk printf("sparc_vme_bus_establish: %s%d\n", name, dev->dv_unit);
264 1.1 pk #endif
265 1.1 pk if (bp != NULL && strcmp(bp->name, name) == 0 &&
266 1.1 pk dev->dv_unit == bp->val[1]) {
267 1.1 pk bp->dev = dev;
268 1.1 pk #ifdef DEBUG
269 1.1 pk printf("sparc_vme_bus_establish: on the boot path\n");
270 1.1 pk #endif
271 1.1 pk sc->sc_bp++;
272 1.1 pk bootpath_store(1, sc->sc_bp);
273 1.1 pk }
274 1.1 pk }
275 1.1 pk
276 1.1 pk
277 1.1 pk int
278 1.6 pk vmematch_mainbus(parent, cf, aux)
279 1.1 pk struct device *parent;
280 1.1 pk struct cfdata *cf;
281 1.1 pk void *aux;
282 1.1 pk {
283 1.1 pk
284 1.6 pk if (!CPU_ISSUN4)
285 1.1 pk return (0);
286 1.1 pk
287 1.6 pk return (1);
288 1.1 pk }
289 1.1 pk
290 1.6 pk int
291 1.6 pk vmematch_iommu(parent, cf, aux)
292 1.6 pk struct device *parent;
293 1.6 pk struct cfdata *cf;
294 1.1 pk void *aux;
295 1.1 pk {
296 1.6 pk struct mainbus_attach_args *ma = aux;
297 1.1 pk
298 1.6 pk return (strcmp(cf->cf_driver->cd_name, ma->ma_name) == 0);
299 1.6 pk }
300 1.1 pk
301 1.1 pk
302 1.1 pk void
303 1.6 pk vmeattach_mainbus(parent, self, aux)
304 1.1 pk struct device *parent, *self;
305 1.1 pk void *aux;
306 1.1 pk {
307 1.6 pk #if defined(SUN4)
308 1.6 pk struct mainbus_attach_args *ma = aux;
309 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)self;
310 1.1 pk struct vme_busattach_args vba;
311 1.1 pk
312 1.1 pk if (self->dv_unit > 0) {
313 1.1 pk printf(" unsupported\n");
314 1.1 pk return;
315 1.1 pk }
316 1.1 pk
317 1.7 pk sc->sc_bustag = ma->ma_bustag;
318 1.8 pk sc->sc_dmatag = ma->ma_dmatag;
319 1.7 pk
320 1.6 pk if (ma->ma_bp != NULL && strcmp(ma->ma_bp->name, "vme") == 0) {
321 1.6 pk sc->sc_bp = ma->ma_bp + 1;
322 1.6 pk bootpath_store(1, sc->sc_bp);
323 1.6 pk }
324 1.6 pk
325 1.1 pk /* VME interrupt entry point */
326 1.1 pk sc->sc_vmeintr = vmeintr4;
327 1.1 pk
328 1.1 pk /*XXX*/ sparc_vme_chipset_tag.cookie = self;
329 1.1 pk /*XXX*/ sparc_vme4_dma_tag._cookie = self;
330 1.1 pk
331 1.1 pk vba.vba_bustag = &sparc_vme_bus_tag;
332 1.1 pk vba.vba_chipset_tag = &sparc_vme_chipset_tag;
333 1.1 pk vba.vba_dmatag = &sparc_vme4_dma_tag;
334 1.1 pk
335 1.7 pk /* Fall back to our own `range' construction */
336 1.7 pk sc->sc_range = vmebus_translations;
337 1.7 pk sc->sc_nrange =
338 1.7 pk sizeof(vmebus_translations)/sizeof(vmebus_translations[0]);
339 1.7 pk
340 1.11 pk vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END,
341 1.11 pk M_DEVBUF, 0, 0, EX_NOWAIT);
342 1.11 pk if (vme_dvmamap == NULL)
343 1.11 pk panic("vme: unable to allocate DVMA map");
344 1.10 pk
345 1.1 pk printf("\n");
346 1.1 pk (void)config_search(vmesearch, self, &vba);
347 1.6 pk
348 1.6 pk bootpath_store(1, NULL);
349 1.6 pk #endif
350 1.1 pk return;
351 1.1 pk }
352 1.1 pk
353 1.1 pk /* sun4m vmebus */
354 1.1 pk void
355 1.6 pk vmeattach_iommu(parent, self, aux)
356 1.1 pk struct device *parent, *self;
357 1.1 pk void *aux;
358 1.1 pk {
359 1.6 pk #if defined(SUN4M)
360 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)self;
361 1.6 pk struct iommu_attach_args *ia = aux;
362 1.1 pk struct vme_busattach_args vba;
363 1.6 pk bus_space_handle_t bh;
364 1.6 pk int node;
365 1.1 pk int cline;
366 1.1 pk
367 1.1 pk if (self->dv_unit > 0) {
368 1.1 pk printf(" unsupported\n");
369 1.1 pk return;
370 1.1 pk }
371 1.1 pk
372 1.7 pk sc->sc_bustag = ia->iom_bustag;
373 1.8 pk sc->sc_dmatag = ia->iom_dmatag;
374 1.7 pk
375 1.1 pk /* VME interrupt entry point */
376 1.1 pk sc->sc_vmeintr = vmeintr4m;
377 1.1 pk
378 1.1 pk /*XXX*/ sparc_vme_chipset_tag.cookie = self;
379 1.1 pk /*XXX*/ sparc_vme4m_dma_tag._cookie = self;
380 1.7 pk sparc_vme_bus_tag.sparc_bus_barrier = sparc_vme4m_barrier;
381 1.1 pk
382 1.1 pk vba.vba_bustag = &sparc_vme_bus_tag;
383 1.1 pk vba.vba_chipset_tag = &sparc_vme_chipset_tag;
384 1.1 pk vba.vba_dmatag = &sparc_vme4m_dma_tag;
385 1.1 pk
386 1.6 pk node = ia->iom_node;
387 1.1 pk
388 1.7 pk /*
389 1.7 pk * Map VME control space
390 1.7 pk */
391 1.14 pk if (ia->iom_nreg < 2) {
392 1.14 pk printf("%s: only %d register sets\n", self->dv_xname,
393 1.14 pk ia->iom_nreg);
394 1.6 pk return;
395 1.6 pk }
396 1.6 pk
397 1.7 pk if (bus_space_map2(ia->iom_bustag,
398 1.14 pk (bus_type_t)ia->iom_reg[0].ior_iospace,
399 1.14 pk (bus_addr_t)ia->iom_reg[0].ior_pa,
400 1.14 pk (bus_size_t)ia->iom_reg[0].ior_size,
401 1.7 pk BUS_SPACE_MAP_LINEAR,
402 1.7 pk 0, &bh) != 0) {
403 1.6 pk panic("%s: can't map vmebusreg", self->dv_xname);
404 1.6 pk }
405 1.6 pk sc->sc_reg = (struct vmebusreg *)bh;
406 1.6 pk
407 1.7 pk if (bus_space_map2(ia->iom_bustag,
408 1.14 pk (bus_type_t)ia->iom_reg[1].ior_iospace,
409 1.14 pk (bus_addr_t)ia->iom_reg[1].ior_pa,
410 1.14 pk (bus_size_t)ia->iom_reg[1].ior_size,
411 1.7 pk BUS_SPACE_MAP_LINEAR,
412 1.7 pk 0, &bh) != 0) {
413 1.6 pk panic("%s: can't map vmebusvec", self->dv_xname);
414 1.6 pk }
415 1.6 pk sc->sc_vec = (struct vmebusvec *)bh;
416 1.6 pk
417 1.7 pk /*
418 1.7 pk * Map VME IO cache tags and flush control.
419 1.7 pk */
420 1.7 pk if (bus_space_map2(ia->iom_bustag,
421 1.14 pk (bus_type_t)ia->iom_reg[1].ior_iospace,
422 1.14 pk (bus_addr_t)ia->iom_reg[1].ior_pa + VME_IOC_TAGOFFSET,
423 1.7 pk VME_IOC_SIZE,
424 1.7 pk BUS_SPACE_MAP_LINEAR,
425 1.7 pk 0, &bh) != 0) {
426 1.6 pk panic("%s: can't map IOC tags", self->dv_xname);
427 1.6 pk }
428 1.6 pk sc->sc_ioctags = (u_int32_t *)bh;
429 1.6 pk
430 1.7 pk if (bus_space_map2(ia->iom_bustag,
431 1.14 pk (bus_type_t)ia->iom_reg[1].ior_iospace,
432 1.14 pk (bus_addr_t)ia->iom_reg[1].ior_pa+VME_IOC_FLUSHOFFSET,
433 1.7 pk VME_IOC_SIZE,
434 1.7 pk BUS_SPACE_MAP_LINEAR,
435 1.7 pk 0, &bh) != 0) {
436 1.6 pk panic("%s: can't map IOC flush registers", self->dv_xname);
437 1.6 pk }
438 1.6 pk sc->sc_iocflush = (u_int32_t *)bh;
439 1.1 pk
440 1.1 pk /*XXX*/ sparc_vme_bus_tag.cookie = sc->sc_reg;
441 1.1 pk
442 1.1 pk /*
443 1.1 pk * Get "range" property.
444 1.1 pk */
445 1.13 pk if (getprop(node, "ranges", sizeof(struct rom_range),
446 1.13 pk &sc->sc_nrange, (void **)&sc->sc_range) != 0) {
447 1.6 pk panic("%s: can't get ranges property", self->dv_xname);
448 1.1 pk }
449 1.1 pk
450 1.1 pk vmebus_sc = sc;
451 1.14 pk vmeerr_handler = sparc_vme_error;
452 1.1 pk
453 1.1 pk /*
454 1.1 pk * Invalidate all IO-cache entries.
455 1.1 pk */
456 1.1 pk for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
457 1.1 pk sc->sc_ioctags[--cline] = 0;
458 1.1 pk }
459 1.1 pk
460 1.1 pk /* Enable IO-cache */
461 1.1 pk sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
462 1.1 pk
463 1.1 pk printf(": version 0x%x\n",
464 1.1 pk sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
465 1.1 pk
466 1.1 pk (void)config_search(vmesearch, self, &vba);
467 1.6 pk #endif
468 1.1 pk }
469 1.1 pk
470 1.14 pk int
471 1.14 pk sparc_vme_error()
472 1.1 pk {
473 1.1 pk struct vmebus_softc *sc = vmebus_sc;
474 1.14 pk u_int32_t afsr, afpa;
475 1.14 pk char bits[64];
476 1.1 pk
477 1.14 pk afsr = sc->sc_reg->vmebus_afsr,
478 1.14 pk afpa = sc->sc_reg->vmebus_afar;
479 1.14 pk printf("VME error:\n\tAFSR %s\n",
480 1.14 pk bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits)));
481 1.14 pk printf("\taddress: 0x%x%x\n", afsr, afpa);
482 1.14 pk return (0);
483 1.1 pk }
484 1.1 pk
485 1.1 pk int
486 1.7 pk vmebus_translate(sc, mod, addr, btp, bap)
487 1.7 pk struct vmebus_softc *sc;
488 1.7 pk vme_mod_t mod;
489 1.7 pk vme_addr_t addr;
490 1.7 pk bus_type_t *btp;
491 1.7 pk bus_addr_t *bap;
492 1.7 pk {
493 1.7 pk int i;
494 1.7 pk
495 1.7 pk for (i = 0; i < sc->sc_nrange; i++) {
496 1.7 pk
497 1.7 pk if (sc->sc_range[i].cspace != mod)
498 1.7 pk continue;
499 1.7 pk
500 1.7 pk /* We've found the connection to the parent bus */
501 1.7 pk *bap = sc->sc_range[i].poffset + addr;
502 1.7 pk *btp = sc->sc_range[i].pspace;
503 1.7 pk return (0);
504 1.7 pk }
505 1.7 pk return (ENOENT);
506 1.7 pk }
507 1.7 pk
508 1.7 pk int
509 1.5 pk sparc_vme_probe(cookie, tag, addr, offset, size, mod, callback, arg)
510 1.1 pk void *cookie;
511 1.1 pk bus_space_tag_t tag;
512 1.1 pk vme_addr_t addr;
513 1.5 pk size_t offset;
514 1.1 pk vme_size_t size;
515 1.1 pk int mod;
516 1.2 pk int (*callback) __P((void *, void *));
517 1.2 pk void *arg;
518 1.1 pk {
519 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
520 1.7 pk bus_type_t iospace;
521 1.7 pk bus_addr_t paddr;
522 1.1 pk
523 1.7 pk if (vmebus_translate(sc, mod, addr, &iospace, &paddr) != 0)
524 1.7 pk return (0);
525 1.7 pk
526 1.7 pk return (bus_space_probe(sc->sc_bustag, iospace, paddr, size, offset,
527 1.7 pk 0, callback, arg));
528 1.1 pk }
529 1.1 pk
530 1.1 pk int
531 1.7 pk sparc_vme_map(cookie, addr, size, mod, tag, hp)
532 1.1 pk void *cookie;
533 1.1 pk vme_addr_t addr;
534 1.1 pk vme_size_t size;
535 1.1 pk int mod;
536 1.1 pk bus_space_tag_t tag;
537 1.7 pk bus_space_handle_t *hp;
538 1.1 pk {
539 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
540 1.7 pk bus_type_t iospace;
541 1.7 pk bus_addr_t paddr;
542 1.7 pk int error;
543 1.7 pk
544 1.7 pk error = vmebus_translate(sc, mod, addr, &iospace, &paddr);
545 1.7 pk if (error != 0)
546 1.7 pk return (error);
547 1.1 pk
548 1.7 pk return (bus_space_map2(sc->sc_bustag, iospace, paddr, size, 0, 0, hp));
549 1.1 pk }
550 1.1 pk
551 1.1 pk int
552 1.7 pk sparc_vme_mmap_cookie(cookie, addr, mod, tag, hp)
553 1.1 pk void *cookie;
554 1.1 pk vme_addr_t addr;
555 1.1 pk int mod;
556 1.1 pk bus_space_tag_t tag;
557 1.7 pk bus_space_handle_t *hp;
558 1.1 pk {
559 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
560 1.7 pk bus_type_t iospace;
561 1.7 pk bus_addr_t paddr;
562 1.7 pk int error;
563 1.7 pk
564 1.7 pk error = vmebus_translate(sc, mod, addr, &iospace, &paddr);
565 1.7 pk if (error != 0)
566 1.7 pk return (error);
567 1.1 pk
568 1.7 pk return (bus_space_mmap(sc->sc_bustag, iospace, paddr, 0, hp));
569 1.1 pk }
570 1.1 pk
571 1.1 pk #if defined(SUN4M)
572 1.1 pk void
573 1.7 pk sparc_vme4m_barrier(t, h, offset, size, flags)
574 1.7 pk bus_space_tag_t t;
575 1.7 pk bus_space_handle_t h;
576 1.7 pk bus_size_t offset;
577 1.7 pk bus_size_t size;
578 1.7 pk int flags;
579 1.1 pk {
580 1.7 pk struct vmebusreg *vbp = (struct vmebusreg *)t->cookie;
581 1.1 pk
582 1.1 pk /* Read async fault status to flush write-buffers */
583 1.1 pk (*(volatile int *)&vbp->vmebus_afsr);
584 1.1 pk }
585 1.1 pk #endif
586 1.1 pk
587 1.1 pk
588 1.1 pk
589 1.1 pk /*
590 1.1 pk * VME Interrupt Priority Level to sparc Processor Interrupt Level.
591 1.1 pk */
592 1.1 pk static int vme_ipl_to_pil[] = {
593 1.1 pk 0,
594 1.1 pk 2,
595 1.1 pk 3,
596 1.1 pk 5,
597 1.1 pk 7,
598 1.1 pk 9,
599 1.1 pk 11,
600 1.1 pk 13
601 1.1 pk };
602 1.1 pk
603 1.1 pk
604 1.1 pk /*
605 1.1 pk * All VME device interrupts go through vmeintr(). This function reads
606 1.1 pk * the VME vector from the bus, then dispatches the device interrupt
607 1.1 pk * handler. All handlers for devices that map to the same Processor
608 1.1 pk * Interrupt Level (according to the table above) are on a linked list
609 1.1 pk * of `sparc_vme_intr_handle' structures. The head of which is passed
610 1.1 pk * down as the argument to `vmeintr(void *arg)'.
611 1.1 pk */
612 1.1 pk struct sparc_vme_intr_handle {
613 1.1 pk struct intrhand ih;
614 1.1 pk struct sparc_vme_intr_handle *next;
615 1.1 pk int vec; /* VME interrupt vector */
616 1.1 pk int pri; /* VME interrupt priority */
617 1.1 pk struct vmebus_softc *sc;/*XXX*/
618 1.1 pk };
619 1.1 pk
620 1.1 pk #if defined(SUN4)
621 1.1 pk int
622 1.1 pk vmeintr4(arg)
623 1.1 pk void *arg;
624 1.1 pk {
625 1.1 pk struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
626 1.1 pk int level, vec;
627 1.1 pk int i = 0;
628 1.1 pk
629 1.1 pk level = (ihp->pri << 1) | 1;
630 1.1 pk
631 1.1 pk vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
632 1.1 pk
633 1.1 pk if (vec == -1) {
634 1.1 pk printf("vme: spurious interrupt\n");
635 1.1 pk return 1; /* XXX - pretend we handled it, for now */
636 1.1 pk }
637 1.1 pk
638 1.1 pk for (; ihp; ihp = ihp->next)
639 1.1 pk if (ihp->vec == vec && ihp->ih.ih_fun)
640 1.1 pk i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
641 1.1 pk return (i);
642 1.1 pk }
643 1.1 pk #endif
644 1.1 pk
645 1.1 pk #if defined(SUN4M)
646 1.1 pk int
647 1.1 pk vmeintr4m(arg)
648 1.1 pk void *arg;
649 1.1 pk {
650 1.1 pk struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
651 1.1 pk int level, vec;
652 1.1 pk int i = 0;
653 1.1 pk
654 1.1 pk level = (ihp->pri << 1) | 1;
655 1.1 pk
656 1.1 pk #if 0
657 1.1 pk int pending;
658 1.1 pk
659 1.1 pk /* Flush VME <=> Sbus write buffers */
660 1.1 pk (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
661 1.1 pk
662 1.1 pk pending = *((int*)ICR_SI_PEND);
663 1.1 pk if ((pending & SINTR_VME(ihp->pri)) == 0) {
664 1.1 pk printf("vmeintr: non pending at pri %x(p 0x%x)\n",
665 1.1 pk ihp->pri, pending);
666 1.1 pk return (0);
667 1.1 pk }
668 1.1 pk #endif
669 1.1 pk #if 0
670 1.1 pk /* Why gives this a bus timeout sometimes? */
671 1.1 pk vec = ihp->sc->sc_vec->vmebusvec[level];
672 1.1 pk #else
673 1.1 pk /* so, arrange to catch the fault... */
674 1.1 pk {
675 1.1 pk extern struct user *proc0paddr;
676 1.1 pk extern int fkbyte __P((caddr_t, struct pcb *));
677 1.1 pk caddr_t addr = (caddr_t)&ihp->sc->sc_vec->vmebusvec[level];
678 1.1 pk struct pcb *xpcb;
679 1.1 pk u_long saveonfault;
680 1.1 pk int s;
681 1.1 pk
682 1.1 pk s = splhigh();
683 1.1 pk if (curproc == NULL)
684 1.1 pk xpcb = (struct pcb *)proc0paddr;
685 1.1 pk else
686 1.1 pk xpcb = &curproc->p_addr->u_pcb;
687 1.1 pk
688 1.1 pk saveonfault = (u_long)xpcb->pcb_onfault;
689 1.1 pk vec = fkbyte(addr, xpcb);
690 1.1 pk xpcb->pcb_onfault = (caddr_t)saveonfault;
691 1.1 pk
692 1.1 pk splx(s);
693 1.1 pk }
694 1.1 pk #endif
695 1.1 pk
696 1.1 pk if (vec == -1) {
697 1.1 pk printf("vme: spurious interrupt: ");
698 1.1 pk printf("SI: 0x%x, VME AFSR: 0x%x, VME AFAR 0x%x\n",
699 1.1 pk *((int*)ICR_SI_PEND),
700 1.1 pk ihp->sc->sc_reg->vmebus_afsr,
701 1.1 pk ihp->sc->sc_reg->vmebus_afar);
702 1.14 pk return (1); /* XXX - pretend we handled it, for now */
703 1.1 pk }
704 1.1 pk
705 1.1 pk for (; ihp; ihp = ihp->next)
706 1.1 pk if (ihp->vec == vec && ihp->ih.ih_fun)
707 1.1 pk i += (ihp->ih.ih_fun)(ihp->ih.ih_arg);
708 1.1 pk return (i);
709 1.1 pk }
710 1.1 pk #endif
711 1.1 pk
712 1.1 pk int
713 1.1 pk sparc_vme_intr_map(cookie, vec, pri, ihp)
714 1.1 pk void *cookie;
715 1.1 pk int vec;
716 1.1 pk int pri;
717 1.1 pk vme_intr_handle_t *ihp;
718 1.1 pk {
719 1.1 pk struct sparc_vme_intr_handle *ih;
720 1.1 pk
721 1.1 pk ih = (vme_intr_handle_t)
722 1.1 pk malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
723 1.1 pk ih->pri = pri;
724 1.1 pk ih->vec = vec;
725 1.1 pk ih->sc = cookie;/*XXX*/
726 1.1 pk *ihp = ih;
727 1.1 pk return (0);
728 1.1 pk }
729 1.1 pk
730 1.1 pk void *
731 1.1 pk sparc_vme_intr_establish(cookie, vih, func, arg)
732 1.1 pk void *cookie;
733 1.1 pk vme_intr_handle_t vih;
734 1.1 pk int (*func) __P((void *));
735 1.1 pk void *arg;
736 1.1 pk {
737 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)cookie;
738 1.1 pk struct sparc_vme_intr_handle *svih =
739 1.1 pk (struct sparc_vme_intr_handle *)vih;
740 1.1 pk struct intrhand *ih;
741 1.1 pk int level;
742 1.1 pk
743 1.1 pk /* Translate VME priority to processor IPL */
744 1.1 pk level = vme_ipl_to_pil[svih->pri];
745 1.1 pk
746 1.1 pk svih->ih.ih_fun = func;
747 1.1 pk svih->ih.ih_arg = arg;
748 1.1 pk svih->next = NULL;
749 1.1 pk
750 1.1 pk /* ensure the interrupt subsystem will call us at this level */
751 1.1 pk for (ih = intrhand[level]; ih != NULL; ih = ih->ih_next)
752 1.1 pk if (ih->ih_fun == sc->sc_vmeintr)
753 1.1 pk break;
754 1.1 pk
755 1.1 pk if (ih == NULL) {
756 1.1 pk ih = (struct intrhand *)
757 1.1 pk malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
758 1.1 pk if (ih == NULL)
759 1.1 pk panic("vme_addirq");
760 1.1 pk bzero(ih, sizeof *ih);
761 1.1 pk ih->ih_fun = sc->sc_vmeintr;
762 1.1 pk ih->ih_arg = vih;
763 1.1 pk intr_establish(level, ih);
764 1.1 pk } else {
765 1.1 pk svih->next = (vme_intr_handle_t)ih->ih_arg;
766 1.1 pk ih->ih_arg = vih;
767 1.1 pk }
768 1.1 pk return (NULL);
769 1.1 pk }
770 1.1 pk
771 1.1 pk void
772 1.1 pk sparc_vme_unmap(cookie)
773 1.1 pk void * cookie;
774 1.1 pk {
775 1.1 pk /* Not implemented */
776 1.1 pk panic("sparc_vme_unmap");
777 1.1 pk }
778 1.1 pk
779 1.1 pk void
780 1.1 pk sparc_vme_intr_disestablish(cookie, a)
781 1.1 pk void *cookie;
782 1.1 pk void *a;
783 1.1 pk {
784 1.1 pk /* Not implemented */
785 1.1 pk panic("sparc_vme_intr_disestablish");
786 1.1 pk }
787 1.1 pk
788 1.1 pk
789 1.1 pk
790 1.1 pk /*
791 1.1 pk * VME DMA functions.
792 1.1 pk */
793 1.1 pk
794 1.1 pk #if defined(SUN4)
795 1.1 pk int
796 1.1 pk sparc_vme4_dmamap_load(t, map, buf, buflen, p, flags)
797 1.1 pk bus_dma_tag_t t;
798 1.1 pk bus_dmamap_t map;
799 1.1 pk void *buf;
800 1.1 pk bus_size_t buflen;
801 1.1 pk struct proc *p;
802 1.1 pk int flags;
803 1.1 pk {
804 1.10 pk bus_addr_t dvmaddr;
805 1.10 pk bus_size_t sgsize;
806 1.10 pk vaddr_t vaddr;
807 1.10 pk pmap_t pmap;
808 1.10 pk int pagesz = PAGE_SIZE;
809 1.1 pk int error;
810 1.1 pk
811 1.10 pk error = extent_alloc(vme_dvmamap, round_page(buflen), NBPG,
812 1.10 pk map->_dm_boundary,
813 1.10 pk (flags & BUS_DMA_NOWAIT) == 0
814 1.10 pk ? EX_WAITOK
815 1.10 pk : EX_NOWAIT,
816 1.10 pk (u_long *)&dvmaddr);
817 1.1 pk if (error != 0)
818 1.1 pk return (error);
819 1.1 pk
820 1.10 pk vaddr = (vaddr_t)buf;
821 1.10 pk map->dm_mapsize = buflen;
822 1.10 pk map->dm_nsegs = 1;
823 1.10 pk map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
824 1.10 pk map->dm_segs[0].ds_len = buflen;
825 1.10 pk
826 1.10 pk pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap;
827 1.10 pk
828 1.10 pk for (; buflen > 0; ) {
829 1.10 pk paddr_t pa;
830 1.10 pk /*
831 1.10 pk * Get the physical address for this page.
832 1.10 pk */
833 1.10 pk pa = pmap_extract(pmap, vaddr);
834 1.10 pk
835 1.10 pk /*
836 1.10 pk * Compute the segment size, and adjust counts.
837 1.10 pk */
838 1.10 pk sgsize = pagesz - ((u_long)vaddr & (pagesz - 1));
839 1.10 pk if (buflen < sgsize)
840 1.10 pk sgsize = buflen;
841 1.10 pk
842 1.10 pk #ifdef notyet
843 1.10 pk if (have_iocache)
844 1.10 pk curaddr |= PG_IOC;
845 1.10 pk #endif
846 1.10 pk pmap_enter(pmap_kernel(), dvmaddr,
847 1.10 pk (pa & ~(pagesz-1)) | PMAP_NC,
848 1.10 pk VM_PROT_READ|VM_PROT_WRITE, 1);
849 1.10 pk
850 1.10 pk dvmaddr += pagesz;
851 1.10 pk vaddr += sgsize;
852 1.10 pk buflen -= sgsize;
853 1.10 pk }
854 1.10 pk
855 1.1 pk /* Adjust DVMA address to VME view */
856 1.11 pk map->dm_segs[0].ds_addr -= VME4_DVMA_BASE;
857 1.1 pk return (0);
858 1.1 pk }
859 1.1 pk
860 1.1 pk void
861 1.1 pk sparc_vme4_dmamap_unload(t, map)
862 1.1 pk bus_dma_tag_t t;
863 1.1 pk bus_dmamap_t map;
864 1.1 pk {
865 1.10 pk bus_addr_t addr;
866 1.10 pk bus_size_t len;
867 1.8 pk
868 1.10 pk /* Go from VME to CPU view */
869 1.11 pk map->dm_segs[0].ds_addr += VME4_DVMA_BASE;
870 1.10 pk
871 1.10 pk addr = map->dm_segs[0].ds_addr & ~PGOFSET;
872 1.10 pk len = round_page(map->dm_segs[0].ds_len);
873 1.10 pk
874 1.10 pk /* Remove double-mapping in DVMA space */
875 1.10 pk pmap_remove(pmap_kernel(), addr, addr + len);
876 1.10 pk
877 1.10 pk /* Release DVMA space */
878 1.10 pk if (extent_free(vme_dvmamap, addr, len, EX_NOWAIT) != 0)
879 1.10 pk printf("warning: %ld of DVMA space lost\n", len);
880 1.10 pk
881 1.10 pk /* Mark the mappings as invalid. */
882 1.10 pk map->dm_mapsize = 0;
883 1.10 pk map->dm_nsegs = 0;
884 1.1 pk }
885 1.1 pk
886 1.1 pk int
887 1.1 pk sparc_vme4_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
888 1.1 pk bus_dma_tag_t t;
889 1.1 pk bus_size_t size, alignment, boundary;
890 1.1 pk bus_dma_segment_t *segs;
891 1.1 pk int nsegs;
892 1.1 pk int *rsegs;
893 1.1 pk int flags;
894 1.1 pk {
895 1.10 pk bus_addr_t dvmaddr;
896 1.10 pk struct pglist *mlist;
897 1.10 pk vm_page_t m;
898 1.10 pk paddr_t pa;
899 1.1 pk int error;
900 1.1 pk
901 1.10 pk size = round_page(size);
902 1.10 pk error = _bus_dmamem_alloc_common(t, size, alignment, boundary,
903 1.10 pk segs, nsegs, rsegs, flags);
904 1.1 pk if (error != 0)
905 1.1 pk return (error);
906 1.1 pk
907 1.10 pk if (extent_alloc(vme_dvmamap, size, alignment, boundary,
908 1.10 pk (flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT,
909 1.10 pk (u_long *)&dvmaddr) != 0)
910 1.10 pk return (ENOMEM);
911 1.10 pk
912 1.10 pk /*
913 1.10 pk * Compute the location, size, and number of segments actually
914 1.10 pk * returned by the VM code.
915 1.10 pk */
916 1.11 pk segs[0].ds_addr = dvmaddr - VME4_DVMA_BASE;
917 1.10 pk segs[0].ds_len = size;
918 1.10 pk *rsegs = 1;
919 1.10 pk
920 1.10 pk /* Map memory into DVMA space */
921 1.10 pk mlist = segs[0]._ds_mlist;
922 1.10 pk for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
923 1.10 pk pa = VM_PAGE_TO_PHYS(m);
924 1.10 pk
925 1.10 pk #ifdef notyet
926 1.10 pk if (have_iocache)
927 1.10 pk pa |= PG_IOC;
928 1.10 pk #endif
929 1.10 pk pmap_enter(pmap_kernel(), dvmaddr, pa | PMAP_NC,
930 1.10 pk VM_PROT_READ|VM_PROT_WRITE, 1);
931 1.10 pk dvmaddr += PAGE_SIZE;
932 1.10 pk }
933 1.10 pk
934 1.1 pk return (0);
935 1.1 pk }
936 1.1 pk
937 1.1 pk void
938 1.1 pk sparc_vme4_dmamem_free(t, segs, nsegs)
939 1.1 pk bus_dma_tag_t t;
940 1.1 pk bus_dma_segment_t *segs;
941 1.1 pk int nsegs;
942 1.1 pk {
943 1.10 pk bus_addr_t addr;
944 1.10 pk bus_size_t len;
945 1.10 pk
946 1.11 pk addr = segs[0].ds_addr + VME4_DVMA_BASE;
947 1.10 pk len = round_page(segs[0].ds_len);
948 1.10 pk
949 1.10 pk /* Remove DVMA kernel map */
950 1.10 pk pmap_remove(pmap_kernel(), addr, addr + len);
951 1.10 pk
952 1.10 pk /* Release DVMA address range */
953 1.10 pk if (extent_free(vme_dvmamap, addr, len, EX_NOWAIT) != 0)
954 1.10 pk printf("warning: %ld of DVMA space lost\n", len);
955 1.8 pk
956 1.10 pk /*
957 1.10 pk * Return the list of pages back to the VM system.
958 1.10 pk */
959 1.10 pk _bus_dmamem_free_common(t, segs, nsegs);
960 1.1 pk }
961 1.1 pk
962 1.1 pk void
963 1.4 thorpej sparc_vme4_dmamap_sync(t, map, offset, len, ops)
964 1.1 pk bus_dma_tag_t t;
965 1.1 pk bus_dmamap_t map;
966 1.4 thorpej bus_addr_t offset;
967 1.4 thorpej bus_size_t len;
968 1.3 thorpej int ops;
969 1.1 pk {
970 1.3 thorpej
971 1.3 thorpej /*
972 1.3 thorpej * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
973 1.10 pk * Currently the cache is flushed in bus_dma_load()...
974 1.3 thorpej */
975 1.1 pk }
976 1.1 pk #endif /* SUN4 */
977 1.1 pk
978 1.1 pk #if defined(SUN4M)
979 1.1 pk static int
980 1.1 pk sparc_vme4m_dmamap_create (t, size, nsegments, maxsegsz, boundary, flags, dmamp)
981 1.1 pk bus_dma_tag_t t;
982 1.1 pk bus_size_t size;
983 1.1 pk int nsegments;
984 1.1 pk bus_size_t maxsegsz;
985 1.1 pk bus_size_t boundary;
986 1.1 pk int flags;
987 1.1 pk bus_dmamap_t *dmamp;
988 1.1 pk {
989 1.8 pk struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
990 1.10 pk int error;
991 1.1 pk
992 1.10 pk /* XXX - todo: allocate DVMA addresses from assigned ranges:
993 1.10 pk upper 8MB for A32 space; upper 1MB for A24 space */
994 1.10 pk error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz,
995 1.10 pk boundary, flags, dmamp);
996 1.10 pk if (error != 0)
997 1.10 pk return (error);
998 1.10 pk
999 1.10 pk #if 0
1000 1.1 pk /* VME DVMA addresses must always be 8K aligned */
1001 1.10 pk (*dmamp)->_dm_align = 8192;
1002 1.10 pk #endif
1003 1.1 pk
1004 1.10 pk return (0);
1005 1.1 pk }
1006 1.1 pk
1007 1.1 pk int
1008 1.1 pk sparc_vme4m_dmamap_load(t, map, buf, buflen, p, flags)
1009 1.1 pk bus_dma_tag_t t;
1010 1.1 pk bus_dmamap_t map;
1011 1.1 pk void *buf;
1012 1.1 pk bus_size_t buflen;
1013 1.1 pk struct proc *p;
1014 1.1 pk int flags;
1015 1.1 pk {
1016 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
1017 1.1 pk volatile u_int32_t *ioctags;
1018 1.1 pk int error;
1019 1.1 pk
1020 1.1 pk buflen = (buflen + VME_IOC_PAGESZ - 1) & ~(VME_IOC_PAGESZ - 1);
1021 1.8 pk error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags);
1022 1.1 pk if (error != 0)
1023 1.1 pk return (error);
1024 1.1 pk
1025 1.1 pk /* allocate IO cache entries for this range */
1026 1.1 pk ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
1027 1.1 pk for (;buflen > 0;) {
1028 1.1 pk *ioctags = VME_IOC_IC | VME_IOC_W;
1029 1.1 pk ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
1030 1.1 pk buflen -= VME_IOC_PAGESZ;
1031 1.1 pk }
1032 1.1 pk return (0);
1033 1.1 pk }
1034 1.1 pk
1035 1.1 pk
1036 1.1 pk void
1037 1.1 pk sparc_vme4m_dmamap_unload(t, map)
1038 1.1 pk bus_dma_tag_t t;
1039 1.1 pk bus_dmamap_t map;
1040 1.1 pk {
1041 1.1 pk struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
1042 1.1 pk volatile u_int32_t *flushregs;
1043 1.1 pk int len;
1044 1.1 pk
1045 1.1 pk /* Flush VME IO cache */
1046 1.1 pk len = map->dm_segs[0].ds_len;
1047 1.1 pk flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
1048 1.1 pk for (;len > 0;) {
1049 1.1 pk *flushregs = 0;
1050 1.1 pk flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
1051 1.1 pk len -= VME_IOC_PAGESZ;
1052 1.1 pk }
1053 1.1 pk /* Read a tag to synchronize the IOC flushes */
1054 1.1 pk (*sc->sc_ioctags);
1055 1.1 pk
1056 1.8 pk bus_dmamap_unload(sc->sc_dmatag, map);
1057 1.1 pk }
1058 1.1 pk
1059 1.1 pk int
1060 1.1 pk sparc_vme4m_dmamem_alloc(t, size, alignmnt, boundary, segs, nsegs, rsegs, flags)
1061 1.1 pk bus_dma_tag_t t;
1062 1.1 pk bus_size_t size, alignmnt, boundary;
1063 1.1 pk bus_dma_segment_t *segs;
1064 1.1 pk int nsegs;
1065 1.1 pk int *rsegs;
1066 1.1 pk int flags;
1067 1.1 pk {
1068 1.8 pk struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
1069 1.1 pk int error;
1070 1.1 pk
1071 1.8 pk error = bus_dmamem_alloc(sc->sc_dmatag, size, alignmnt, boundary,
1072 1.1 pk segs, nsegs, rsegs, flags);
1073 1.1 pk if (error != 0)
1074 1.1 pk return (error);
1075 1.1 pk
1076 1.1 pk return (0);
1077 1.1 pk }
1078 1.1 pk
1079 1.1 pk void
1080 1.1 pk sparc_vme4m_dmamem_free(t, segs, nsegs)
1081 1.1 pk bus_dma_tag_t t;
1082 1.1 pk bus_dma_segment_t *segs;
1083 1.1 pk int nsegs;
1084 1.1 pk {
1085 1.8 pk struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
1086 1.8 pk
1087 1.8 pk bus_dmamem_free(sc->sc_dmatag, segs, nsegs);
1088 1.9 pk }
1089 1.9 pk
1090 1.1 pk void
1091 1.4 thorpej sparc_vme4m_dmamap_sync(t, map, offset, len, ops)
1092 1.1 pk bus_dma_tag_t t;
1093 1.1 pk bus_dmamap_t map;
1094 1.4 thorpej bus_addr_t offset;
1095 1.4 thorpej bus_size_t len;
1096 1.3 thorpej int ops;
1097 1.1 pk {
1098 1.3 thorpej
1099 1.3 thorpej /*
1100 1.3 thorpej * XXX Should perform cache flushes as necessary.
1101 1.3 thorpej */
1102 1.1 pk }
1103 1.1 pk #endif /* SUN4M */
1104 1.12 pk
1105 1.12 pk int
1106 1.12 pk sparc_vme_dmamem_map(t, segs, nsegs, size, kvap, flags)
1107 1.12 pk bus_dma_tag_t t;
1108 1.12 pk bus_dma_segment_t *segs;
1109 1.12 pk int nsegs;
1110 1.12 pk size_t size;
1111 1.12 pk caddr_t *kvap;
1112 1.12 pk int flags;
1113 1.12 pk {
1114 1.12 pk struct vmebus_softc *sc = (struct vmebus_softc *)t->_cookie;
1115 1.12 pk
1116 1.12 pk return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags));
1117 1.12 pk }
1118