vme_machdep.c revision 1.52.8.1 1 1.52.8.1 yamt /* $NetBSD: vme_machdep.c,v 1.52.8.1 2005/11/22 16:08:02 yamt Exp $ */
2 1.1 pk
3 1.1 pk /*-
4 1.4 thorpej * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 1.1 pk * All rights reserved.
6 1.1 pk *
7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation
8 1.1 pk * by Paul Kranenburg.
9 1.1 pk *
10 1.1 pk * Redistribution and use in source and binary forms, with or without
11 1.1 pk * modification, are permitted provided that the following conditions
12 1.1 pk * are met:
13 1.1 pk * 1. Redistributions of source code must retain the above copyright
14 1.1 pk * notice, this list of conditions and the following disclaimer.
15 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 pk * notice, this list of conditions and the following disclaimer in the
17 1.1 pk * documentation and/or other materials provided with the distribution.
18 1.1 pk * 3. All advertising materials mentioning features or use of this software
19 1.1 pk * must display the following acknowledgement:
20 1.1 pk * This product includes software developed by the NetBSD
21 1.1 pk * Foundation, Inc. and its contributors.
22 1.1 pk * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 pk * contributors may be used to endorse or promote products derived
24 1.1 pk * from this software without specific prior written permission.
25 1.1 pk *
26 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 pk * POSSIBILITY OF SUCH DAMAGE.
37 1.1 pk */
38 1.46 lukem
39 1.46 lukem #include <sys/cdefs.h>
40 1.52.8.1 yamt __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.52.8.1 2005/11/22 16:08:02 yamt Exp $");
41 1.1 pk
42 1.1 pk #include <sys/param.h>
43 1.10 pk #include <sys/extent.h>
44 1.1 pk #include <sys/systm.h>
45 1.1 pk #include <sys/device.h>
46 1.1 pk #include <sys/malloc.h>
47 1.19 drochner #include <sys/errno.h>
48 1.1 pk
49 1.1 pk #include <sys/proc.h>
50 1.1 pk #include <sys/user.h>
51 1.1 pk #include <sys/syslog.h>
52 1.1 pk
53 1.29 mrg #include <uvm/uvm_extern.h>
54 1.1 pk
55 1.1 pk #define _SPARC_BUS_DMA_PRIVATE
56 1.1 pk #include <machine/bus.h>
57 1.6 pk #include <sparc/sparc/iommuvar.h>
58 1.1 pk #include <machine/autoconf.h>
59 1.1 pk #include <machine/oldmon.h>
60 1.1 pk #include <machine/cpu.h>
61 1.1 pk #include <machine/ctlreg.h>
62 1.1 pk
63 1.19 drochner #include <dev/vme/vmereg.h>
64 1.1 pk #include <dev/vme/vmevar.h>
65 1.1 pk
66 1.1 pk #include <sparc/sparc/asm.h>
67 1.1 pk #include <sparc/sparc/vaddrs.h>
68 1.1 pk #include <sparc/sparc/cpuvar.h>
69 1.1 pk #include <sparc/dev/vmereg.h>
70 1.1 pk
71 1.19 drochner struct sparcvme_softc {
72 1.1 pk struct device sc_dev; /* base device */
73 1.7 pk bus_space_tag_t sc_bustag;
74 1.8 pk bus_dma_tag_t sc_dmatag;
75 1.1 pk struct vmebusreg *sc_reg; /* VME control registers */
76 1.1 pk struct vmebusvec *sc_vec; /* VME interrupt vector */
77 1.1 pk struct rom_range *sc_range; /* ROM range property */
78 1.1 pk int sc_nrange;
79 1.52.8.1 yamt volatile uint32_t *sc_ioctags; /* VME IO-cache tag registers */
80 1.52.8.1 yamt volatile uint32_t *sc_iocflush;/* VME IO-cache flush registers */
81 1.52.8.1 yamt int (*sc_vmeintr)(void *);
82 1.1 pk };
83 1.19 drochner struct sparcvme_softc *sparcvme_sc;/*XXX*/
84 1.1 pk
85 1.1 pk /* autoconfiguration driver */
86 1.52.8.1 yamt static int vmematch_iommu(struct device *, struct cfdata *, void *);
87 1.52.8.1 yamt static void vmeattach_iommu(struct device *, struct device *, void *);
88 1.52.8.1 yamt static int vmematch_mainbus(struct device *, struct cfdata *, void *);
89 1.52.8.1 yamt static void vmeattach_mainbus(struct device *, struct device *, void *);
90 1.1 pk #if defined(SUN4)
91 1.52.8.1 yamt int vmeintr4(void *);
92 1.1 pk #endif
93 1.1 pk #if defined(SUN4M)
94 1.52.8.1 yamt int vmeintr4m(void *);
95 1.52.8.1 yamt static int sparc_vme_error(void);
96 1.1 pk #endif
97 1.1 pk
98 1.1 pk
99 1.52.8.1 yamt static int sparc_vme_probe(void *, vme_addr_t, vme_size_t,
100 1.28 pk vme_am_t, vme_datasize_t,
101 1.52.8.1 yamt int (*)(void *,
102 1.52.8.1 yamt bus_space_tag_t, bus_space_handle_t),
103 1.52.8.1 yamt void *);
104 1.52.8.1 yamt static int sparc_vme_map(void *, vme_addr_t, vme_size_t, vme_am_t,
105 1.52.8.1 yamt vme_datasize_t, vme_swap_t,
106 1.52.8.1 yamt bus_space_tag_t *, bus_space_handle_t *,
107 1.52.8.1 yamt vme_mapresc_t *);
108 1.52.8.1 yamt static void sparc_vme_unmap(void *, vme_mapresc_t);
109 1.52.8.1 yamt static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t *);
110 1.52.8.1 yamt static const struct evcnt *sparc_vme_intr_evcnt(void *, vme_intr_handle_t);
111 1.52.8.1 yamt static void * sparc_vme_intr_establish(void *, vme_intr_handle_t, int,
112 1.52.8.1 yamt int (*)(void *), void *);
113 1.52.8.1 yamt static void sparc_vme_intr_disestablish(void *, void *);
114 1.1 pk
115 1.52.8.1 yamt static int vmebus_translate(struct sparcvme_softc *, vme_am_t,
116 1.52.8.1 yamt vme_addr_t, bus_addr_t *);
117 1.50 pk #ifdef notyet
118 1.1 pk #if defined(SUN4M)
119 1.52.8.1 yamt static void sparc_vme_iommu_barrier(bus_space_tag_t, bus_space_handle_t,
120 1.52.8.1 yamt bus_size_t, bus_size_t, int);
121 1.7 pk
122 1.50 pk #endif /* SUN4M */
123 1.1 pk #endif
124 1.1 pk
125 1.1 pk /*
126 1.1 pk * DMA functions.
127 1.1 pk */
128 1.47 pk #if defined(SUN4) || defined(SUN4M)
129 1.52.8.1 yamt static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t);
130 1.47 pk #endif
131 1.26 pk
132 1.1 pk #if defined(SUN4)
133 1.52.8.1 yamt static int sparc_vct4_dmamap_create(void *, vme_size_t, vme_am_t,
134 1.26 pk vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t,
135 1.52.8.1 yamt int, bus_dmamap_t *);
136 1.52.8.1 yamt static int sparc_vme4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
137 1.52.8.1 yamt bus_size_t, struct proc *, int);
138 1.52.8.1 yamt static void sparc_vme4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
139 1.52.8.1 yamt static void sparc_vme4_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
140 1.52.8.1 yamt bus_addr_t, bus_size_t, int);
141 1.47 pk #endif /* SUN4 */
142 1.1 pk
143 1.1 pk #if defined(SUN4M)
144 1.52.8.1 yamt static int sparc_vct_iommu_dmamap_create(void *, vme_size_t, vme_am_t,
145 1.26 pk vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t,
146 1.52.8.1 yamt int, bus_dmamap_t *);
147 1.52.8.1 yamt static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t, bus_size_t,
148 1.52.8.1 yamt int, bus_size_t, bus_size_t, int, bus_dmamap_t *);
149 1.52.8.1 yamt
150 1.52.8.1 yamt static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t,
151 1.52.8.1 yamt void *, bus_size_t, struct proc *, int);
152 1.52.8.1 yamt static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
153 1.52.8.1 yamt static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
154 1.52.8.1 yamt bus_addr_t, bus_size_t, int);
155 1.47 pk #endif /* SUN4M */
156 1.1 pk
157 1.47 pk #if defined(SUN4) || defined(SUN4M)
158 1.52.8.1 yamt static int sparc_vme_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
159 1.52.8.1 yamt int, size_t, caddr_t *, int);
160 1.47 pk #endif
161 1.47 pk
162 1.1 pk #if 0
163 1.52.8.1 yamt static void sparc_vme_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
164 1.52.8.1 yamt static void sparc_vme_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
165 1.52.8.1 yamt static paddr_t sparc_vme_dmamem_mmap(bus_dma_tag_t,
166 1.52.8.1 yamt bus_dma_segment_t *, int, off_t, int, int);
167 1.1 pk #endif
168 1.1 pk
169 1.52.8.1 yamt int sparc_vme_mmap_cookie(vme_addr_t, vme_am_t, bus_space_handle_t *);
170 1.19 drochner
171 1.38 thorpej CFATTACH_DECL(vme_mainbus, sizeof(struct sparcvme_softc),
172 1.39 thorpej vmematch_mainbus, vmeattach_mainbus, NULL, NULL);
173 1.6 pk
174 1.38 thorpej CFATTACH_DECL(vme_iommu, sizeof(struct sparcvme_softc),
175 1.39 thorpej vmematch_iommu, vmeattach_iommu, NULL, NULL);
176 1.1 pk
177 1.51 chs static int vme_attached;
178 1.51 chs
179 1.52.8.1 yamt int (*vmeerr_handler)(void);
180 1.14 pk
181 1.19 drochner #define VMEMOD_D32 0x40 /* ??? */
182 1.19 drochner
183 1.7 pk /* If the PROM does not provide the `ranges' property, we make up our own */
184 1.7 pk struct rom_range vmebus_translations[] = {
185 1.19 drochner #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA)
186 1.19 drochner { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 },
187 1.19 drochner { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 },
188 1.19 drochner { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 },
189 1.19 drochner { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 },
190 1.19 drochner { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 },
191 1.19 drochner { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 }
192 1.7 pk #undef _DS
193 1.7 pk };
194 1.7 pk
195 1.11 pk /*
196 1.28 pk * The VME bus logic on sun4 machines maps DMA requests in the first MB
197 1.28 pk * of VME space to the last MB of DVMA space. `vme_dvmamap' is used
198 1.28 pk * for DVMA space allocations. The DMA addresses returned by
199 1.28 pk * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE.
200 1.11 pk */
201 1.10 pk struct extent *vme_dvmamap;
202 1.10 pk
203 1.28 pk /*
204 1.28 pk * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit
205 1.28 pk * VME space to the last 8MB of DVMA space and the first 1MB of
206 1.28 pk * 24-bit VME space to the first 1MB of the last 8MB of DVMA space
207 1.28 pk * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space).
208 1.28 pk * The following constants define subregions in the IOMMU DVMA map
209 1.28 pk * for VME DVMA allocations. The DMA addresses returned by
210 1.28 pk * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE.
211 1.28 pk */
212 1.28 pk #define VME_IOMMU_DVMA_BASE 0xff800000
213 1.28 pk #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE
214 1.28 pk #define VME_IOMMU_DVMA_AM24_END 0xff900000
215 1.28 pk #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE
216 1.28 pk #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END
217 1.28 pk
218 1.1 pk struct vme_chipset_tag sparc_vme_chipset_tag = {
219 1.1 pk NULL,
220 1.1 pk sparc_vme_map,
221 1.1 pk sparc_vme_unmap,
222 1.19 drochner sparc_vme_probe,
223 1.1 pk sparc_vme_intr_map,
224 1.24 cgd sparc_vme_intr_evcnt,
225 1.1 pk sparc_vme_intr_establish,
226 1.1 pk sparc_vme_intr_disestablish,
227 1.19 drochner 0, 0, 0 /* bus specific DMA stuff */
228 1.1 pk };
229 1.1 pk
230 1.1 pk
231 1.1 pk #if defined(SUN4)
232 1.1 pk struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
233 1.1 pk NULL, /* cookie */
234 1.1 pk _bus_dmamap_create,
235 1.1 pk _bus_dmamap_destroy,
236 1.1 pk sparc_vme4_dmamap_load,
237 1.1 pk _bus_dmamap_load_mbuf,
238 1.1 pk _bus_dmamap_load_uio,
239 1.1 pk _bus_dmamap_load_raw,
240 1.1 pk sparc_vme4_dmamap_unload,
241 1.1 pk sparc_vme4_dmamap_sync,
242 1.1 pk
243 1.23 pk _bus_dmamem_alloc,
244 1.23 pk _bus_dmamem_free,
245 1.9 pk sparc_vme_dmamem_map,
246 1.1 pk _bus_dmamem_unmap,
247 1.1 pk _bus_dmamem_mmap
248 1.1 pk };
249 1.1 pk #endif
250 1.1 pk
251 1.1 pk #if defined(SUN4M)
252 1.28 pk struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = {
253 1.1 pk NULL, /* cookie */
254 1.28 pk sparc_vme_iommu_dmamap_create,
255 1.1 pk _bus_dmamap_destroy,
256 1.28 pk sparc_vme_iommu_dmamap_load,
257 1.1 pk _bus_dmamap_load_mbuf,
258 1.1 pk _bus_dmamap_load_uio,
259 1.1 pk _bus_dmamap_load_raw,
260 1.28 pk sparc_vme_iommu_dmamap_unload,
261 1.28 pk sparc_vme_iommu_dmamap_sync,
262 1.1 pk
263 1.23 pk _bus_dmamem_alloc,
264 1.23 pk _bus_dmamem_free,
265 1.9 pk sparc_vme_dmamem_map,
266 1.1 pk _bus_dmamem_unmap,
267 1.1 pk _bus_dmamem_mmap
268 1.1 pk };
269 1.1 pk #endif
270 1.1 pk
271 1.1 pk
272 1.52.8.1 yamt static int
273 1.52.8.1 yamt vmematch_mainbus(struct device *parent, struct cfdata *cf, void *aux)
274 1.1 pk {
275 1.15 pk struct mainbus_attach_args *ma = aux;
276 1.1 pk
277 1.51 chs if (!CPU_ISSUN4 || vme_attached)
278 1.1 pk return (0);
279 1.1 pk
280 1.19 drochner return (strcmp("vme", ma->ma_name) == 0);
281 1.1 pk }
282 1.1 pk
283 1.52.8.1 yamt static int
284 1.52.8.1 yamt vmematch_iommu(struct device *parent, struct cfdata *cf, void *aux)
285 1.1 pk {
286 1.15 pk struct iommu_attach_args *ia = aux;
287 1.1 pk
288 1.51 chs if (vme_attached)
289 1.51 chs return 0;
290 1.51 chs
291 1.19 drochner return (strcmp("vme", ia->iom_name) == 0);
292 1.6 pk }
293 1.1 pk
294 1.1 pk
295 1.52.8.1 yamt static void
296 1.52.8.1 yamt vmeattach_mainbus(struct device *parent, struct device *self, void *aux)
297 1.1 pk {
298 1.6 pk #if defined(SUN4)
299 1.6 pk struct mainbus_attach_args *ma = aux;
300 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)self;
301 1.19 drochner struct vmebus_attach_args vba;
302 1.1 pk
303 1.51 chs vme_attached = 1;
304 1.1 pk
305 1.7 pk sc->sc_bustag = ma->ma_bustag;
306 1.8 pk sc->sc_dmatag = ma->ma_dmatag;
307 1.7 pk
308 1.1 pk /* VME interrupt entry point */
309 1.1 pk sc->sc_vmeintr = vmeintr4;
310 1.1 pk
311 1.1 pk /*XXX*/ sparc_vme_chipset_tag.cookie = self;
312 1.26 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create;
313 1.26 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy;
314 1.1 pk /*XXX*/ sparc_vme4_dma_tag._cookie = self;
315 1.1 pk
316 1.19 drochner vba.va_vct = &sparc_vme_chipset_tag;
317 1.19 drochner vba.va_bdt = &sparc_vme4_dma_tag;
318 1.19 drochner vba.va_slaveconfig = 0;
319 1.1 pk
320 1.7 pk /* Fall back to our own `range' construction */
321 1.7 pk sc->sc_range = vmebus_translations;
322 1.7 pk sc->sc_nrange =
323 1.7 pk sizeof(vmebus_translations)/sizeof(vmebus_translations[0]);
324 1.7 pk
325 1.11 pk vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END,
326 1.11 pk M_DEVBUF, 0, 0, EX_NOWAIT);
327 1.11 pk if (vme_dvmamap == NULL)
328 1.11 pk panic("vme: unable to allocate DVMA map");
329 1.10 pk
330 1.1 pk printf("\n");
331 1.19 drochner (void)config_found(self, &vba, 0);
332 1.6 pk
333 1.52.8.1 yamt #endif /* SUN4 */
334 1.1 pk return;
335 1.1 pk }
336 1.1 pk
337 1.1 pk /* sun4m vmebus */
338 1.52.8.1 yamt static void
339 1.52.8.1 yamt vmeattach_iommu(struct device *parent, struct device *self, void *aux)
340 1.1 pk {
341 1.6 pk #if defined(SUN4M)
342 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)self;
343 1.6 pk struct iommu_attach_args *ia = aux;
344 1.19 drochner struct vmebus_attach_args vba;
345 1.6 pk bus_space_handle_t bh;
346 1.6 pk int node;
347 1.1 pk int cline;
348 1.1 pk
349 1.7 pk sc->sc_bustag = ia->iom_bustag;
350 1.8 pk sc->sc_dmatag = ia->iom_dmatag;
351 1.7 pk
352 1.1 pk /* VME interrupt entry point */
353 1.1 pk sc->sc_vmeintr = vmeintr4m;
354 1.1 pk
355 1.1 pk /*XXX*/ sparc_vme_chipset_tag.cookie = self;
356 1.28 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create;
357 1.26 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy;
358 1.28 pk /*XXX*/ sparc_vme_iommu_dma_tag._cookie = self;
359 1.1 pk
360 1.19 drochner vba.va_vct = &sparc_vme_chipset_tag;
361 1.28 pk vba.va_bdt = &sparc_vme_iommu_dma_tag;
362 1.19 drochner vba.va_slaveconfig = 0;
363 1.1 pk
364 1.6 pk node = ia->iom_node;
365 1.1 pk
366 1.7 pk /*
367 1.7 pk * Map VME control space
368 1.7 pk */
369 1.14 pk if (ia->iom_nreg < 2) {
370 1.14 pk printf("%s: only %d register sets\n", self->dv_xname,
371 1.14 pk ia->iom_nreg);
372 1.6 pk return;
373 1.6 pk }
374 1.6 pk
375 1.35 pk if (bus_space_map(ia->iom_bustag,
376 1.36 thorpej (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space,
377 1.36 thorpej ia->iom_reg[0].oa_base),
378 1.36 thorpej (bus_size_t)ia->iom_reg[0].oa_size,
379 1.7 pk BUS_SPACE_MAP_LINEAR,
380 1.35 pk &bh) != 0) {
381 1.6 pk panic("%s: can't map vmebusreg", self->dv_xname);
382 1.6 pk }
383 1.6 pk sc->sc_reg = (struct vmebusreg *)bh;
384 1.6 pk
385 1.35 pk if (bus_space_map(ia->iom_bustag,
386 1.36 thorpej (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space,
387 1.36 thorpej ia->iom_reg[1].oa_base),
388 1.36 thorpej (bus_size_t)ia->iom_reg[1].oa_size,
389 1.7 pk BUS_SPACE_MAP_LINEAR,
390 1.35 pk &bh) != 0) {
391 1.6 pk panic("%s: can't map vmebusvec", self->dv_xname);
392 1.6 pk }
393 1.6 pk sc->sc_vec = (struct vmebusvec *)bh;
394 1.6 pk
395 1.7 pk /*
396 1.7 pk * Map VME IO cache tags and flush control.
397 1.7 pk */
398 1.35 pk if (bus_space_map(ia->iom_bustag,
399 1.35 pk (bus_addr_t) BUS_ADDR(
400 1.36 thorpej ia->iom_reg[1].oa_space,
401 1.36 thorpej ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET),
402 1.7 pk VME_IOC_SIZE,
403 1.7 pk BUS_SPACE_MAP_LINEAR,
404 1.35 pk &bh) != 0) {
405 1.6 pk panic("%s: can't map IOC tags", self->dv_xname);
406 1.6 pk }
407 1.52.8.1 yamt sc->sc_ioctags = (uint32_t *)bh;
408 1.6 pk
409 1.35 pk if (bus_space_map(ia->iom_bustag,
410 1.35 pk (bus_addr_t) BUS_ADDR(
411 1.36 thorpej ia->iom_reg[1].oa_space,
412 1.36 thorpej ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET),
413 1.7 pk VME_IOC_SIZE,
414 1.7 pk BUS_SPACE_MAP_LINEAR,
415 1.35 pk &bh) != 0) {
416 1.6 pk panic("%s: can't map IOC flush registers", self->dv_xname);
417 1.6 pk }
418 1.52.8.1 yamt sc->sc_iocflush = (uint32_t *)bh;
419 1.1 pk
420 1.1 pk /*
421 1.1 pk * Get "range" property.
422 1.1 pk */
423 1.49 pk if (prom_getprop(node, "ranges", sizeof(struct rom_range),
424 1.48 mrg &sc->sc_nrange, &sc->sc_range) != 0) {
425 1.6 pk panic("%s: can't get ranges property", self->dv_xname);
426 1.1 pk }
427 1.1 pk
428 1.19 drochner sparcvme_sc = sc;
429 1.14 pk vmeerr_handler = sparc_vme_error;
430 1.1 pk
431 1.1 pk /*
432 1.1 pk * Invalidate all IO-cache entries.
433 1.1 pk */
434 1.1 pk for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
435 1.1 pk sc->sc_ioctags[--cline] = 0;
436 1.1 pk }
437 1.1 pk
438 1.1 pk /* Enable IO-cache */
439 1.1 pk sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
440 1.1 pk
441 1.1 pk printf(": version 0x%x\n",
442 1.1 pk sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
443 1.1 pk
444 1.19 drochner (void)config_found(self, &vba, 0);
445 1.47 pk #endif /* SUN4M */
446 1.1 pk }
447 1.1 pk
448 1.16 fvdl #if defined(SUN4M)
449 1.16 fvdl static int
450 1.52.8.1 yamt sparc_vme_error(void)
451 1.1 pk {
452 1.19 drochner struct sparcvme_softc *sc = sparcvme_sc;
453 1.52.8.1 yamt uint32_t afsr, afpa;
454 1.14 pk char bits[64];
455 1.1 pk
456 1.19 drochner afsr = sc->sc_reg->vmebus_afsr;
457 1.14 pk afpa = sc->sc_reg->vmebus_afar;
458 1.14 pk printf("VME error:\n\tAFSR %s\n",
459 1.14 pk bitmask_snprintf(afsr, VMEBUS_AFSR_BITS, bits, sizeof(bits)));
460 1.14 pk printf("\taddress: 0x%x%x\n", afsr, afpa);
461 1.14 pk return (0);
462 1.1 pk }
463 1.16 fvdl #endif
464 1.1 pk
465 1.52.8.1 yamt static int
466 1.52.8.1 yamt vmebus_translate(struct sparcvme_softc *sc, vme_am_t mod, vme_addr_t addr,
467 1.52.8.1 yamt bus_addr_t *bap)
468 1.7 pk {
469 1.7 pk int i;
470 1.7 pk
471 1.7 pk for (i = 0; i < sc->sc_nrange; i++) {
472 1.35 pk struct rom_range *rp = &sc->sc_range[i];
473 1.7 pk
474 1.35 pk if (rp->cspace != mod)
475 1.7 pk continue;
476 1.7 pk
477 1.7 pk /* We've found the connection to the parent bus */
478 1.35 pk *bap = BUS_ADDR(rp->pspace, rp->poffset + addr);
479 1.7 pk return (0);
480 1.7 pk }
481 1.7 pk return (ENOENT);
482 1.7 pk }
483 1.7 pk
484 1.19 drochner struct vmeprobe_myarg {
485 1.52.8.1 yamt int (*cb)(void *, bus_space_tag_t, bus_space_handle_t);
486 1.19 drochner void *cbarg;
487 1.19 drochner bus_space_tag_t tag;
488 1.19 drochner int res; /* backwards */
489 1.19 drochner };
490 1.19 drochner
491 1.52.8.1 yamt static int vmeprobe_mycb(void *, void *);
492 1.52.8.1 yamt
493 1.19 drochner static int
494 1.52.8.1 yamt vmeprobe_mycb(void *bh, void *arg)
495 1.19 drochner {
496 1.19 drochner struct vmeprobe_myarg *a = arg;
497 1.19 drochner
498 1.19 drochner a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh);
499 1.19 drochner return (!a->res);
500 1.19 drochner }
501 1.19 drochner
502 1.52.8.1 yamt static int
503 1.52.8.1 yamt sparc_vme_probe(void *cookie, vme_addr_t addr, vme_size_t len, vme_am_t mod,
504 1.52.8.1 yamt vme_datasize_t datasize,
505 1.52.8.1 yamt int (*callback)(void *, bus_space_tag_t, bus_space_handle_t),
506 1.52.8.1 yamt void *arg)
507 1.1 pk {
508 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie;
509 1.7 pk bus_addr_t paddr;
510 1.19 drochner bus_size_t size;
511 1.19 drochner struct vmeprobe_myarg myarg;
512 1.19 drochner int res, i;
513 1.1 pk
514 1.35 pk if (vmebus_translate(sc, mod, addr, &paddr) != 0)
515 1.19 drochner return (EINVAL);
516 1.19 drochner
517 1.19 drochner size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4));
518 1.7 pk
519 1.19 drochner if (callback) {
520 1.19 drochner myarg.cb = callback;
521 1.19 drochner myarg.cbarg = arg;
522 1.19 drochner myarg.tag = sc->sc_bustag;
523 1.19 drochner myarg.res = 0;
524 1.35 pk res = bus_space_probe(sc->sc_bustag, paddr, size, 0,
525 1.19 drochner 0, vmeprobe_mycb, &myarg);
526 1.19 drochner return (res ? 0 : (myarg.res ? myarg.res : EIO));
527 1.19 drochner }
528 1.19 drochner
529 1.19 drochner for (i = 0; i < len / size; i++) {
530 1.19 drochner myarg.res = 0;
531 1.35 pk res = bus_space_probe(sc->sc_bustag, paddr, size, 0,
532 1.19 drochner 0, 0, 0);
533 1.19 drochner if (res == 0)
534 1.19 drochner return (EIO);
535 1.19 drochner paddr += size;
536 1.19 drochner }
537 1.19 drochner return (0);
538 1.1 pk }
539 1.1 pk
540 1.52.8.1 yamt static int
541 1.52.8.1 yamt sparc_vme_map(void *cookie, vme_addr_t addr, vme_size_t size, vme_am_t mod,
542 1.52.8.1 yamt vme_datasize_t datasize, vme_swap_t swap,
543 1.52.8.1 yamt bus_space_tag_t *tp, bus_space_handle_t *hp, vme_mapresc_t *rp)
544 1.1 pk {
545 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie;
546 1.7 pk bus_addr_t paddr;
547 1.7 pk int error;
548 1.7 pk
549 1.35 pk error = vmebus_translate(sc, mod, addr, &paddr);
550 1.7 pk if (error != 0)
551 1.7 pk return (error);
552 1.1 pk
553 1.19 drochner *tp = sc->sc_bustag;
554 1.35 pk return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp));
555 1.1 pk }
556 1.1 pk
557 1.1 pk int
558 1.52.8.1 yamt sparc_vme_mmap_cookie(vme_addr_t addr, vme_am_t mod, bus_space_handle_t *hp)
559 1.1 pk {
560 1.19 drochner struct sparcvme_softc *sc = sparcvme_sc;
561 1.7 pk bus_addr_t paddr;
562 1.7 pk int error;
563 1.7 pk
564 1.35 pk error = vmebus_translate(sc, mod, addr, &paddr);
565 1.7 pk if (error != 0)
566 1.7 pk return (error);
567 1.1 pk
568 1.52.8.1 yamt return (bus_space_mmap(sc->sc_bustag, paddr, 0,
569 1.33 eeh 0/*prot is ignored*/, 0));
570 1.1 pk }
571 1.1 pk
572 1.50 pk #ifdef notyet
573 1.1 pk #if defined(SUN4M)
574 1.52.8.1 yamt static void
575 1.52.8.1 yamt sparc_vme_iommu_barrier(bus_space_tag_t t, bus_space_handle_t h,
576 1.52.8.1 yamt bus_size_t offset, bus_size_t size.
577 1.52.8.1 yamt int flags)
578 1.1 pk {
579 1.7 pk struct vmebusreg *vbp = (struct vmebusreg *)t->cookie;
580 1.1 pk
581 1.1 pk /* Read async fault status to flush write-buffers */
582 1.1 pk (*(volatile int *)&vbp->vmebus_afsr);
583 1.1 pk }
584 1.50 pk #endif /* SUN4M */
585 1.1 pk #endif
586 1.1 pk
587 1.1 pk
588 1.1 pk
589 1.1 pk /*
590 1.1 pk * VME Interrupt Priority Level to sparc Processor Interrupt Level.
591 1.1 pk */
592 1.1 pk static int vme_ipl_to_pil[] = {
593 1.1 pk 0,
594 1.1 pk 2,
595 1.1 pk 3,
596 1.1 pk 5,
597 1.1 pk 7,
598 1.1 pk 9,
599 1.1 pk 11,
600 1.1 pk 13
601 1.1 pk };
602 1.1 pk
603 1.1 pk
604 1.1 pk /*
605 1.1 pk * All VME device interrupts go through vmeintr(). This function reads
606 1.1 pk * the VME vector from the bus, then dispatches the device interrupt
607 1.1 pk * handler. All handlers for devices that map to the same Processor
608 1.1 pk * Interrupt Level (according to the table above) are on a linked list
609 1.1 pk * of `sparc_vme_intr_handle' structures. The head of which is passed
610 1.1 pk * down as the argument to `vmeintr(void *arg)'.
611 1.1 pk */
612 1.1 pk struct sparc_vme_intr_handle {
613 1.1 pk struct intrhand ih;
614 1.1 pk struct sparc_vme_intr_handle *next;
615 1.1 pk int vec; /* VME interrupt vector */
616 1.1 pk int pri; /* VME interrupt priority */
617 1.19 drochner struct sparcvme_softc *sc;/*XXX*/
618 1.1 pk };
619 1.1 pk
620 1.1 pk #if defined(SUN4)
621 1.1 pk int
622 1.52.8.1 yamt vmeintr4(void *arg)
623 1.1 pk {
624 1.1 pk struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
625 1.1 pk int level, vec;
626 1.30 pk int rv = 0;
627 1.1 pk
628 1.1 pk level = (ihp->pri << 1) | 1;
629 1.1 pk
630 1.1 pk vec = ldcontrolb((caddr_t)(AC_VMEINTVEC | level));
631 1.1 pk
632 1.1 pk if (vec == -1) {
633 1.30 pk #ifdef DEBUG
634 1.30 pk /*
635 1.30 pk * This seems to happen only with the i82586 based
636 1.30 pk * `ie1' boards.
637 1.30 pk */
638 1.30 pk printf("vme: spurious interrupt at VME level %d\n", ihp->pri);
639 1.30 pk #endif
640 1.30 pk return (1); /* XXX - pretend we handled it, for now */
641 1.1 pk }
642 1.1 pk
643 1.1 pk for (; ihp; ihp = ihp->next)
644 1.40 pk if (ihp->vec == vec && ihp->ih.ih_fun) {
645 1.40 pk splx(ihp->ih.ih_classipl);
646 1.30 pk rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg);
647 1.40 pk }
648 1.30 pk
649 1.30 pk return (rv);
650 1.1 pk }
651 1.1 pk #endif
652 1.1 pk
653 1.1 pk #if defined(SUN4M)
654 1.1 pk int
655 1.52.8.1 yamt vmeintr4m(void *arg)
656 1.1 pk {
657 1.1 pk struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
658 1.1 pk int level, vec;
659 1.30 pk int rv = 0;
660 1.1 pk
661 1.1 pk level = (ihp->pri << 1) | 1;
662 1.1 pk
663 1.1 pk #if 0
664 1.1 pk int pending;
665 1.1 pk
666 1.1 pk /* Flush VME <=> Sbus write buffers */
667 1.1 pk (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
668 1.1 pk
669 1.1 pk pending = *((int*)ICR_SI_PEND);
670 1.1 pk if ((pending & SINTR_VME(ihp->pri)) == 0) {
671 1.1 pk printf("vmeintr: non pending at pri %x(p 0x%x)\n",
672 1.1 pk ihp->pri, pending);
673 1.1 pk return (0);
674 1.1 pk }
675 1.1 pk #endif
676 1.1 pk #if 0
677 1.1 pk /* Why gives this a bus timeout sometimes? */
678 1.1 pk vec = ihp->sc->sc_vec->vmebusvec[level];
679 1.1 pk #else
680 1.1 pk /* so, arrange to catch the fault... */
681 1.1 pk {
682 1.1 pk extern struct user *proc0paddr;
683 1.52.8.1 yamt extern int fkbyte(volatile char *, struct pcb *);
684 1.52 tsutsui volatile char *addr = &ihp->sc->sc_vec->vmebusvec[level];
685 1.1 pk struct pcb *xpcb;
686 1.1 pk u_long saveonfault;
687 1.1 pk int s;
688 1.1 pk
689 1.1 pk s = splhigh();
690 1.45 thorpej if (curlwp == NULL)
691 1.1 pk xpcb = (struct pcb *)proc0paddr;
692 1.1 pk else
693 1.45 thorpej xpcb = &curlwp->l_addr->u_pcb;
694 1.1 pk
695 1.1 pk saveonfault = (u_long)xpcb->pcb_onfault;
696 1.1 pk vec = fkbyte(addr, xpcb);
697 1.1 pk xpcb->pcb_onfault = (caddr_t)saveonfault;
698 1.1 pk
699 1.1 pk splx(s);
700 1.1 pk }
701 1.1 pk #endif
702 1.1 pk
703 1.1 pk if (vec == -1) {
704 1.30 pk #ifdef DEBUG
705 1.30 pk /*
706 1.30 pk * This seems to happen only with the i82586 based
707 1.30 pk * `ie1' boards.
708 1.30 pk */
709 1.30 pk printf("vme: spurious interrupt at VME level %d\n", ihp->pri);
710 1.30 pk printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n",
711 1.1 pk *((int*)ICR_SI_PEND),
712 1.1 pk ihp->sc->sc_reg->vmebus_afsr,
713 1.1 pk ihp->sc->sc_reg->vmebus_afar);
714 1.30 pk #endif
715 1.14 pk return (1); /* XXX - pretend we handled it, for now */
716 1.1 pk }
717 1.1 pk
718 1.1 pk for (; ihp; ihp = ihp->next)
719 1.40 pk if (ihp->vec == vec && ihp->ih.ih_fun) {
720 1.40 pk splx(ihp->ih.ih_classipl);
721 1.30 pk rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg);
722 1.40 pk }
723 1.30 pk
724 1.30 pk return (rv);
725 1.1 pk }
726 1.52.8.1 yamt #endif /* SUN4M */
727 1.1 pk
728 1.52.8.1 yamt static int
729 1.52.8.1 yamt sparc_vme_intr_map(void *cookie, int level, int vec,
730 1.52.8.1 yamt vme_intr_handle_t *ihp)
731 1.1 pk {
732 1.1 pk struct sparc_vme_intr_handle *ih;
733 1.1 pk
734 1.1 pk ih = (vme_intr_handle_t)
735 1.1 pk malloc(sizeof(struct sparc_vme_intr_handle), M_DEVBUF, M_NOWAIT);
736 1.19 drochner ih->pri = level;
737 1.1 pk ih->vec = vec;
738 1.1 pk ih->sc = cookie;/*XXX*/
739 1.1 pk *ihp = ih;
740 1.1 pk return (0);
741 1.24 cgd }
742 1.24 cgd
743 1.52.8.1 yamt static const struct evcnt *
744 1.52.8.1 yamt sparc_vme_intr_evcnt(void *cookie, vme_intr_handle_t vih)
745 1.24 cgd {
746 1.24 cgd
747 1.24 cgd /* XXX for now, no evcnt parent reported */
748 1.24 cgd return NULL;
749 1.1 pk }
750 1.1 pk
751 1.52.8.1 yamt static void *
752 1.52.8.1 yamt sparc_vme_intr_establish(void *cookie, vme_intr_handle_t vih, int level,
753 1.52.8.1 yamt int (*func)(void *), void *arg)
754 1.1 pk {
755 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie;
756 1.1 pk struct sparc_vme_intr_handle *svih =
757 1.1 pk (struct sparc_vme_intr_handle *)vih;
758 1.1 pk struct intrhand *ih;
759 1.40 pk int pil;
760 1.1 pk
761 1.40 pk /* Translate VME priority to processor IPL */
762 1.40 pk pil = vme_ipl_to_pil[svih->pri];
763 1.19 drochner
764 1.40 pk if (level < pil)
765 1.40 pk panic("vme_intr_establish: class lvl (%d) < pil (%d)\n",
766 1.40 pk level, pil);
767 1.1 pk
768 1.1 pk svih->ih.ih_fun = func;
769 1.1 pk svih->ih.ih_arg = arg;
770 1.40 pk svih->ih.ih_classipl = level; /* note: used slightly differently
771 1.40 pk than in intr.c (no shift) */
772 1.1 pk svih->next = NULL;
773 1.1 pk
774 1.1 pk /* ensure the interrupt subsystem will call us at this level */
775 1.40 pk for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next)
776 1.1 pk if (ih->ih_fun == sc->sc_vmeintr)
777 1.1 pk break;
778 1.1 pk
779 1.1 pk if (ih == NULL) {
780 1.1 pk ih = (struct intrhand *)
781 1.1 pk malloc(sizeof(struct intrhand), M_DEVBUF, M_NOWAIT);
782 1.1 pk if (ih == NULL)
783 1.1 pk panic("vme_addirq");
784 1.1 pk bzero(ih, sizeof *ih);
785 1.1 pk ih->ih_fun = sc->sc_vmeintr;
786 1.1 pk ih->ih_arg = vih;
787 1.41 pk intr_establish(pil, 0, ih, NULL);
788 1.1 pk } else {
789 1.1 pk svih->next = (vme_intr_handle_t)ih->ih_arg;
790 1.1 pk ih->ih_arg = vih;
791 1.1 pk }
792 1.1 pk return (NULL);
793 1.1 pk }
794 1.1 pk
795 1.52.8.1 yamt static void
796 1.52.8.1 yamt sparc_vme_unmap(void *cookie, vme_mapresc_t resc)
797 1.1 pk {
798 1.52.8.1 yamt
799 1.1 pk /* Not implemented */
800 1.1 pk panic("sparc_vme_unmap");
801 1.1 pk }
802 1.1 pk
803 1.52.8.1 yamt static void
804 1.52.8.1 yamt sparc_vme_intr_disestablish(void *cookie, void *a)
805 1.1 pk {
806 1.52.8.1 yamt
807 1.1 pk /* Not implemented */
808 1.1 pk panic("sparc_vme_intr_disestablish");
809 1.1 pk }
810 1.1 pk
811 1.1 pk
812 1.1 pk
813 1.1 pk /*
814 1.1 pk * VME DMA functions.
815 1.1 pk */
816 1.1 pk
817 1.47 pk #if defined(SUN4) || defined(SUN4M)
818 1.26 pk static void
819 1.52.8.1 yamt sparc_vct_dmamap_destroy(void *cookie, bus_dmamap_t map)
820 1.26 pk {
821 1.26 pk struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie;
822 1.52.8.1 yamt
823 1.26 pk bus_dmamap_destroy(sc->sc_dmatag, map);
824 1.26 pk }
825 1.47 pk #endif
826 1.26 pk
827 1.1 pk #if defined(SUN4)
828 1.26 pk static int
829 1.52.8.1 yamt sparc_vct4_dmamap_create(void *cookie, vme_size_t size, vme_am_t am,
830 1.52.8.1 yamt vme_datasize_t datasize, vme_swap_t swap,
831 1.52.8.1 yamt int nsegments, vme_size_t maxsegsz,
832 1.52.8.1 yamt vme_addr_t boundary, int flags,
833 1.52.8.1 yamt bus_dmamap_t *dmamp)
834 1.26 pk {
835 1.26 pk struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie;
836 1.26 pk
837 1.26 pk /* Allocate a base map through parent bus ops */
838 1.26 pk return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz,
839 1.26 pk boundary, flags, dmamp));
840 1.26 pk }
841 1.26 pk
842 1.52.8.1 yamt static int
843 1.52.8.1 yamt sparc_vme4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
844 1.52.8.1 yamt void *buf, bus_size_t buflen,
845 1.52.8.1 yamt struct proc *p, int flags)
846 1.1 pk {
847 1.25 pk bus_addr_t dva;
848 1.10 pk bus_size_t sgsize;
849 1.48 mrg u_long ldva;
850 1.25 pk vaddr_t va, voff;
851 1.10 pk pmap_t pmap;
852 1.10 pk int pagesz = PAGE_SIZE;
853 1.1 pk int error;
854 1.1 pk
855 1.42 pk cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */
856 1.25 pk
857 1.25 pk va = (vaddr_t)buf;
858 1.25 pk voff = va & (pagesz - 1);
859 1.25 pk va &= -pagesz;
860 1.25 pk
861 1.25 pk /*
862 1.25 pk * Allocate an integral number of pages from DVMA space
863 1.25 pk * covering the passed buffer.
864 1.25 pk */
865 1.25 pk sgsize = (buflen + voff + pagesz - 1) & -pagesz;
866 1.25 pk error = extent_alloc(vme_dvmamap, sgsize, pagesz,
867 1.10 pk map->_dm_boundary,
868 1.10 pk (flags & BUS_DMA_NOWAIT) == 0
869 1.10 pk ? EX_WAITOK
870 1.10 pk : EX_NOWAIT,
871 1.48 mrg &ldva);
872 1.1 pk if (error != 0)
873 1.1 pk return (error);
874 1.48 mrg dva = (bus_addr_t)ldva;
875 1.1 pk
876 1.10 pk map->dm_mapsize = buflen;
877 1.10 pk map->dm_nsegs = 1;
878 1.25 pk /* Adjust DVMA address to VME view */
879 1.25 pk map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE;
880 1.10 pk map->dm_segs[0].ds_len = buflen;
881 1.25 pk map->dm_segs[0]._ds_sgsize = sgsize;
882 1.10 pk
883 1.10 pk pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap;
884 1.10 pk
885 1.25 pk for (; sgsize != 0; ) {
886 1.10 pk paddr_t pa;
887 1.10 pk /*
888 1.10 pk * Get the physical address for this page.
889 1.10 pk */
890 1.25 pk (void) pmap_extract(pmap, va, &pa);
891 1.10 pk
892 1.10 pk #ifdef notyet
893 1.10 pk if (have_iocache)
894 1.25 pk pa |= PG_IOC;
895 1.10 pk #endif
896 1.25 pk pmap_enter(pmap_kernel(), dva,
897 1.25 pk pa | PMAP_NC,
898 1.25 pk VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
899 1.25 pk
900 1.25 pk dva += pagesz;
901 1.25 pk va += pagesz;
902 1.25 pk sgsize -= pagesz;
903 1.10 pk }
904 1.32 chris pmap_update(pmap_kernel());
905 1.10 pk
906 1.1 pk return (0);
907 1.1 pk }
908 1.1 pk
909 1.52.8.1 yamt static void
910 1.52.8.1 yamt sparc_vme4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
911 1.1 pk {
912 1.23 pk bus_dma_segment_t *segs = map->dm_segs;
913 1.23 pk int nsegs = map->dm_nsegs;
914 1.23 pk bus_addr_t dva;
915 1.10 pk bus_size_t len;
916 1.25 pk int i, s, error;
917 1.8 pk
918 1.23 pk for (i = 0; i < nsegs; i++) {
919 1.23 pk /* Go from VME to CPU view */
920 1.23 pk dva = segs[i].ds_addr + VME4_DVMA_BASE;
921 1.25 pk dva &= -PAGE_SIZE;
922 1.25 pk len = segs[i]._ds_sgsize;
923 1.23 pk
924 1.23 pk /* Remove double-mapping in DVMA space */
925 1.23 pk pmap_remove(pmap_kernel(), dva, dva + len);
926 1.23 pk
927 1.23 pk /* Release DVMA space */
928 1.25 pk s = splhigh();
929 1.25 pk error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT);
930 1.25 pk splx(s);
931 1.25 pk if (error != 0)
932 1.23 pk printf("warning: %ld of DVMA space lost\n", len);
933 1.23 pk }
934 1.32 chris pmap_update(pmap_kernel());
935 1.10 pk
936 1.10 pk /* Mark the mappings as invalid. */
937 1.10 pk map->dm_mapsize = 0;
938 1.10 pk map->dm_nsegs = 0;
939 1.1 pk }
940 1.1 pk
941 1.52.8.1 yamt static void
942 1.52.8.1 yamt sparc_vme4_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
943 1.52.8.1 yamt bus_addr_t offset, bus_size_t len, int ops)
944 1.1 pk {
945 1.3 thorpej
946 1.3 thorpej /*
947 1.3 thorpej * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
948 1.10 pk * Currently the cache is flushed in bus_dma_load()...
949 1.3 thorpej */
950 1.1 pk }
951 1.1 pk #endif /* SUN4 */
952 1.1 pk
953 1.1 pk #if defined(SUN4M)
954 1.1 pk static int
955 1.52.8.1 yamt sparc_vme_iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size,
956 1.52.8.1 yamt int nsegments, bus_size_t maxsegsz,
957 1.52.8.1 yamt bus_size_t boundary, int flags,
958 1.52.8.1 yamt bus_dmamap_t *dmamp)
959 1.1 pk {
960 1.26 pk
961 1.28 pk printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n");
962 1.26 pk return (EINVAL);
963 1.26 pk }
964 1.26 pk
965 1.26 pk static int
966 1.52.8.1 yamt sparc_vct_iommu_dmamap_create(void *cookie, vme_size_t size, vme_am_t am,
967 1.52.8.1 yamt vme_datasize_t datasize, vme_swap_t swap,
968 1.52.8.1 yamt int nsegments, vme_size_t maxsegsz,
969 1.52.8.1 yamt vme_addr_t boundary, int flags,
970 1.52.8.1 yamt bus_dmamap_t *dmamp)
971 1.26 pk {
972 1.26 pk struct sparcvme_softc *sc = (struct sparcvme_softc *)cookie;
973 1.26 pk bus_dmamap_t map;
974 1.10 pk int error;
975 1.1 pk
976 1.26 pk /* Allocate a base map through parent bus ops */
977 1.10 pk error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz,
978 1.26 pk boundary, flags, &map);
979 1.10 pk if (error != 0)
980 1.10 pk return (error);
981 1.10 pk
982 1.26 pk /*
983 1.26 pk * Each I/O cache line maps to a 8K section of VME DVMA space, so
984 1.26 pk * we must ensure that DVMA alloctions are always 8K aligned.
985 1.26 pk */
986 1.26 pk map->_dm_align = VME_IOC_PAGESZ;
987 1.26 pk
988 1.26 pk /* Set map region based on Address Modifier */
989 1.26 pk switch ((am & VME_AM_ADRSIZEMASK)) {
990 1.26 pk case VME_AM_A16:
991 1.26 pk case VME_AM_A24:
992 1.26 pk /* 1 MB of DVMA space */
993 1.28 pk map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE;
994 1.28 pk map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END;
995 1.26 pk break;
996 1.26 pk case VME_AM_A32:
997 1.26 pk /* 8 MB of DVMA space */
998 1.28 pk map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE;
999 1.28 pk map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END;
1000 1.26 pk break;
1001 1.26 pk }
1002 1.1 pk
1003 1.26 pk *dmamp = map;
1004 1.10 pk return (0);
1005 1.1 pk }
1006 1.1 pk
1007 1.52.8.1 yamt static int
1008 1.52.8.1 yamt sparc_vme_iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
1009 1.52.8.1 yamt void *buf, bus_size_t buflen,
1010 1.52.8.1 yamt struct proc *p, int flags)
1011 1.1 pk {
1012 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie;
1013 1.52.8.1 yamt volatile uint32_t *ioctags;
1014 1.1 pk int error;
1015 1.1 pk
1016 1.26 pk /* Round request to a multiple of the I/O cache size */
1017 1.23 pk buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ;
1018 1.8 pk error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags);
1019 1.1 pk if (error != 0)
1020 1.1 pk return (error);
1021 1.1 pk
1022 1.26 pk /* Allocate I/O cache entries for this range */
1023 1.1 pk ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
1024 1.26 pk while (buflen > 0) {
1025 1.1 pk *ioctags = VME_IOC_IC | VME_IOC_W;
1026 1.1 pk ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
1027 1.1 pk buflen -= VME_IOC_PAGESZ;
1028 1.1 pk }
1029 1.28 pk
1030 1.28 pk /*
1031 1.28 pk * Adjust DVMA address to VME view.
1032 1.28 pk * Note: the DVMA base address is the same for all
1033 1.28 pk * VME address spaces.
1034 1.28 pk */
1035 1.28 pk map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE;
1036 1.1 pk return (0);
1037 1.1 pk }
1038 1.1 pk
1039 1.1 pk
1040 1.52.8.1 yamt static void
1041 1.52.8.1 yamt sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1042 1.1 pk {
1043 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie;
1044 1.52.8.1 yamt volatile uint32_t *flushregs;
1045 1.1 pk int len;
1046 1.1 pk
1047 1.28 pk /* Go from VME to CPU view */
1048 1.28 pk map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE;
1049 1.28 pk
1050 1.26 pk /* Flush VME I/O cache */
1051 1.26 pk len = map->dm_segs[0]._ds_sgsize;
1052 1.1 pk flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
1053 1.26 pk while (len > 0) {
1054 1.1 pk *flushregs = 0;
1055 1.1 pk flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
1056 1.1 pk len -= VME_IOC_PAGESZ;
1057 1.1 pk }
1058 1.26 pk
1059 1.26 pk /*
1060 1.26 pk * Start a read from `tag space' which will not complete until
1061 1.26 pk * all cache flushes have finished
1062 1.26 pk */
1063 1.1 pk (*sc->sc_ioctags);
1064 1.1 pk
1065 1.8 pk bus_dmamap_unload(sc->sc_dmatag, map);
1066 1.9 pk }
1067 1.9 pk
1068 1.52.8.1 yamt static void
1069 1.52.8.1 yamt sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1070 1.52.8.1 yamt bus_addr_t offset, bus_size_t len, int ops)
1071 1.1 pk {
1072 1.3 thorpej
1073 1.3 thorpej /*
1074 1.3 thorpej * XXX Should perform cache flushes as necessary.
1075 1.3 thorpej */
1076 1.1 pk }
1077 1.1 pk #endif /* SUN4M */
1078 1.12 pk
1079 1.47 pk #if defined(SUN4) || defined(SUN4M)
1080 1.52.8.1 yamt static int
1081 1.52.8.1 yamt sparc_vme_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1082 1.52.8.1 yamt size_t size, caddr_t *kvap, int flags)
1083 1.12 pk {
1084 1.19 drochner struct sparcvme_softc *sc = (struct sparcvme_softc *)t->_cookie;
1085 1.12 pk
1086 1.12 pk return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags));
1087 1.12 pk }
1088 1.47 pk #endif /* SUN4 || SUN4M */
1089