1 1.78 msaitoh /* $NetBSD: vme_machdep.c,v 1.78 2024/05/13 00:01:53 msaitoh Exp $ */ 2 1.1 pk 3 1.1 pk /*- 4 1.4 thorpej * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 5 1.1 pk * All rights reserved. 6 1.1 pk * 7 1.1 pk * This code is derived from software contributed to The NetBSD Foundation 8 1.1 pk * by Paul Kranenburg. 9 1.1 pk * 10 1.1 pk * Redistribution and use in source and binary forms, with or without 11 1.1 pk * modification, are permitted provided that the following conditions 12 1.1 pk * are met: 13 1.1 pk * 1. Redistributions of source code must retain the above copyright 14 1.1 pk * notice, this list of conditions and the following disclaimer. 15 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 pk * notice, this list of conditions and the following disclaimer in the 17 1.1 pk * documentation and/or other materials provided with the distribution. 18 1.1 pk * 19 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 pk * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 pk * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 pk * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 pk * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 pk * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 pk * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 pk * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 pk * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 pk * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 pk * POSSIBILITY OF SUCH DAMAGE. 30 1.1 pk */ 31 1.46 lukem 32 1.46 lukem #include <sys/cdefs.h> 33 1.78 msaitoh __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.78 2024/05/13 00:01:53 msaitoh Exp $"); 34 1.1 pk 35 1.1 pk #include <sys/param.h> 36 1.1 pk #include <sys/systm.h> 37 1.1 pk #include <sys/device.h> 38 1.71 thorpej #include <sys/kmem.h> 39 1.19 drochner #include <sys/errno.h> 40 1.77 thorpej #include <sys/vmem.h> 41 1.1 pk 42 1.1 pk #include <sys/proc.h> 43 1.1 pk #include <sys/syslog.h> 44 1.1 pk 45 1.29 mrg #include <uvm/uvm_extern.h> 46 1.1 pk 47 1.1 pk #define _SPARC_BUS_DMA_PRIVATE 48 1.64 dyoung #include <sys/bus.h> 49 1.6 pk #include <sparc/sparc/iommuvar.h> 50 1.1 pk #include <machine/autoconf.h> 51 1.1 pk #include <machine/oldmon.h> 52 1.1 pk #include <machine/cpu.h> 53 1.1 pk #include <machine/ctlreg.h> 54 1.63 rmind #include <machine/pcb.h> 55 1.1 pk 56 1.19 drochner #include <dev/vme/vmereg.h> 57 1.1 pk #include <dev/vme/vmevar.h> 58 1.1 pk 59 1.1 pk #include <sparc/sparc/asm.h> 60 1.1 pk #include <sparc/sparc/vaddrs.h> 61 1.1 pk #include <sparc/sparc/cpuvar.h> 62 1.1 pk #include <sparc/dev/vmereg.h> 63 1.1 pk 64 1.19 drochner struct sparcvme_softc { 65 1.7 pk bus_space_tag_t sc_bustag; 66 1.8 pk bus_dma_tag_t sc_dmatag; 67 1.1 pk struct vmebusreg *sc_reg; /* VME control registers */ 68 1.1 pk struct vmebusvec *sc_vec; /* VME interrupt vector */ 69 1.1 pk struct rom_range *sc_range; /* ROM range property */ 70 1.1 pk int sc_nrange; 71 1.53 uwe volatile uint32_t *sc_ioctags; /* VME IO-cache tag registers */ 72 1.53 uwe volatile uint32_t *sc_iocflush;/* VME IO-cache flush registers */ 73 1.53 uwe int (*sc_vmeintr)(void *); 74 1.1 pk }; 75 1.19 drochner struct sparcvme_softc *sparcvme_sc;/*XXX*/ 76 1.1 pk 77 1.1 pk /* autoconfiguration driver */ 78 1.60 tsutsui static int vmematch_iommu(device_t, cfdata_t, void *); 79 1.60 tsutsui static void vmeattach_iommu(device_t, device_t, void *); 80 1.60 tsutsui static int vmematch_mainbus(device_t, cfdata_t, void *); 81 1.60 tsutsui static void vmeattach_mainbus(device_t, device_t, void *); 82 1.1 pk #if defined(SUN4) 83 1.53 uwe int vmeintr4(void *); 84 1.1 pk #endif 85 1.1 pk #if defined(SUN4M) 86 1.53 uwe int vmeintr4m(void *); 87 1.53 uwe static int sparc_vme_error(void); 88 1.1 pk #endif 89 1.1 pk 90 1.1 pk 91 1.53 uwe static int sparc_vme_probe(void *, vme_addr_t, vme_size_t, 92 1.28 pk vme_am_t, vme_datasize_t, 93 1.53 uwe int (*)(void *, 94 1.53 uwe bus_space_tag_t, bus_space_handle_t), 95 1.53 uwe void *); 96 1.53 uwe static int sparc_vme_map(void *, vme_addr_t, vme_size_t, vme_am_t, 97 1.53 uwe vme_datasize_t, vme_swap_t, 98 1.53 uwe bus_space_tag_t *, bus_space_handle_t *, 99 1.53 uwe vme_mapresc_t *); 100 1.53 uwe static void sparc_vme_unmap(void *, vme_mapresc_t); 101 1.53 uwe static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t *); 102 1.53 uwe static const struct evcnt *sparc_vme_intr_evcnt(void *, vme_intr_handle_t); 103 1.53 uwe static void * sparc_vme_intr_establish(void *, vme_intr_handle_t, int, 104 1.53 uwe int (*)(void *), void *); 105 1.53 uwe static void sparc_vme_intr_disestablish(void *, void *); 106 1.1 pk 107 1.53 uwe static int vmebus_translate(struct sparcvme_softc *, vme_am_t, 108 1.53 uwe vme_addr_t, bus_addr_t *); 109 1.50 pk #ifdef notyet 110 1.1 pk #if defined(SUN4M) 111 1.53 uwe static void sparc_vme_iommu_barrier(bus_space_tag_t, bus_space_handle_t, 112 1.53 uwe bus_size_t, bus_size_t, int); 113 1.7 pk 114 1.50 pk #endif /* SUN4M */ 115 1.1 pk #endif 116 1.1 pk 117 1.1 pk /* 118 1.1 pk * DMA functions. 119 1.1 pk */ 120 1.47 pk #if defined(SUN4) || defined(SUN4M) 121 1.53 uwe static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t); 122 1.47 pk #endif 123 1.26 pk 124 1.1 pk #if defined(SUN4) 125 1.53 uwe static int sparc_vct4_dmamap_create(void *, vme_size_t, vme_am_t, 126 1.26 pk vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 127 1.53 uwe int, bus_dmamap_t *); 128 1.53 uwe static int sparc_vme4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, 129 1.53 uwe bus_size_t, struct proc *, int); 130 1.53 uwe static void sparc_vme4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 131 1.53 uwe static void sparc_vme4_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 132 1.53 uwe bus_addr_t, bus_size_t, int); 133 1.47 pk #endif /* SUN4 */ 134 1.1 pk 135 1.1 pk #if defined(SUN4M) 136 1.53 uwe static int sparc_vct_iommu_dmamap_create(void *, vme_size_t, vme_am_t, 137 1.26 pk vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t, 138 1.53 uwe int, bus_dmamap_t *); 139 1.53 uwe static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t, bus_size_t, 140 1.53 uwe int, bus_size_t, bus_size_t, int, bus_dmamap_t *); 141 1.53 uwe 142 1.53 uwe static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t, 143 1.53 uwe void *, bus_size_t, struct proc *, int); 144 1.53 uwe static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); 145 1.53 uwe static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, 146 1.53 uwe bus_addr_t, bus_size_t, int); 147 1.47 pk #endif /* SUN4M */ 148 1.1 pk 149 1.47 pk #if defined(SUN4) || defined(SUN4M) 150 1.53 uwe static int sparc_vme_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, 151 1.54 christos int, size_t, void **, int); 152 1.47 pk #endif 153 1.47 pk 154 1.1 pk #if 0 155 1.53 uwe static void sparc_vme_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); 156 1.54 christos static void sparc_vme_dmamem_unmap(bus_dma_tag_t, void *, size_t); 157 1.53 uwe static paddr_t sparc_vme_dmamem_mmap(bus_dma_tag_t, 158 1.53 uwe bus_dma_segment_t *, int, off_t, int, int); 159 1.1 pk #endif 160 1.1 pk 161 1.53 uwe int sparc_vme_mmap_cookie(vme_addr_t, vme_am_t, bus_space_handle_t *); 162 1.19 drochner 163 1.65 mrg CFATTACH_DECL_NEW(vme_mainbus, sizeof(struct sparcvme_softc), 164 1.39 thorpej vmematch_mainbus, vmeattach_mainbus, NULL, NULL); 165 1.6 pk 166 1.65 mrg CFATTACH_DECL_NEW(vme_iommu, sizeof(struct sparcvme_softc), 167 1.39 thorpej vmematch_iommu, vmeattach_iommu, NULL, NULL); 168 1.1 pk 169 1.51 chs static int vme_attached; 170 1.51 chs 171 1.67 matt extern int (*vmeerr_handler)(void); 172 1.14 pk 173 1.19 drochner #define VMEMOD_D32 0x40 /* ??? */ 174 1.19 drochner 175 1.7 pk /* If the PROM does not provide the `ranges' property, we make up our own */ 176 1.7 pk struct rom_range vmebus_translations[] = { 177 1.19 drochner #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA) 178 1.19 drochner { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 }, 179 1.19 drochner { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 }, 180 1.19 drochner { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 }, 181 1.19 drochner { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 }, 182 1.19 drochner { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 }, 183 1.19 drochner { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 } 184 1.7 pk #undef _DS 185 1.7 pk }; 186 1.7 pk 187 1.11 pk /* 188 1.28 pk * The VME bus logic on sun4 machines maps DMA requests in the first MB 189 1.28 pk * of VME space to the last MB of DVMA space. `vme_dvmamap' is used 190 1.28 pk * for DVMA space allocations. The DMA addresses returned by 191 1.28 pk * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE. 192 1.11 pk */ 193 1.77 thorpej vmem_t *vme_dvmamap; 194 1.10 pk 195 1.28 pk /* 196 1.28 pk * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit 197 1.28 pk * VME space to the last 8MB of DVMA space and the first 1MB of 198 1.28 pk * 24-bit VME space to the first 1MB of the last 8MB of DVMA space 199 1.78 msaitoh * (thus 24-bit VME space overlaps the first 1MB of 32-bit space). 200 1.28 pk * The following constants define subregions in the IOMMU DVMA map 201 1.28 pk * for VME DVMA allocations. The DMA addresses returned by 202 1.28 pk * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE. 203 1.28 pk */ 204 1.28 pk #define VME_IOMMU_DVMA_BASE 0xff800000 205 1.28 pk #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE 206 1.28 pk #define VME_IOMMU_DVMA_AM24_END 0xff900000 207 1.28 pk #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE 208 1.28 pk #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END 209 1.28 pk 210 1.1 pk struct vme_chipset_tag sparc_vme_chipset_tag = { 211 1.1 pk NULL, 212 1.1 pk sparc_vme_map, 213 1.1 pk sparc_vme_unmap, 214 1.19 drochner sparc_vme_probe, 215 1.1 pk sparc_vme_intr_map, 216 1.24 cgd sparc_vme_intr_evcnt, 217 1.1 pk sparc_vme_intr_establish, 218 1.1 pk sparc_vme_intr_disestablish, 219 1.19 drochner 0, 0, 0 /* bus specific DMA stuff */ 220 1.1 pk }; 221 1.1 pk 222 1.1 pk 223 1.1 pk #if defined(SUN4) 224 1.1 pk struct sparc_bus_dma_tag sparc_vme4_dma_tag = { 225 1.1 pk NULL, /* cookie */ 226 1.1 pk _bus_dmamap_create, 227 1.1 pk _bus_dmamap_destroy, 228 1.1 pk sparc_vme4_dmamap_load, 229 1.1 pk _bus_dmamap_load_mbuf, 230 1.1 pk _bus_dmamap_load_uio, 231 1.1 pk _bus_dmamap_load_raw, 232 1.1 pk sparc_vme4_dmamap_unload, 233 1.1 pk sparc_vme4_dmamap_sync, 234 1.1 pk 235 1.23 pk _bus_dmamem_alloc, 236 1.23 pk _bus_dmamem_free, 237 1.9 pk sparc_vme_dmamem_map, 238 1.1 pk _bus_dmamem_unmap, 239 1.1 pk _bus_dmamem_mmap 240 1.1 pk }; 241 1.1 pk #endif 242 1.1 pk 243 1.1 pk #if defined(SUN4M) 244 1.28 pk struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = { 245 1.1 pk NULL, /* cookie */ 246 1.28 pk sparc_vme_iommu_dmamap_create, 247 1.1 pk _bus_dmamap_destroy, 248 1.28 pk sparc_vme_iommu_dmamap_load, 249 1.1 pk _bus_dmamap_load_mbuf, 250 1.1 pk _bus_dmamap_load_uio, 251 1.1 pk _bus_dmamap_load_raw, 252 1.28 pk sparc_vme_iommu_dmamap_unload, 253 1.28 pk sparc_vme_iommu_dmamap_sync, 254 1.1 pk 255 1.23 pk _bus_dmamem_alloc, 256 1.23 pk _bus_dmamem_free, 257 1.9 pk sparc_vme_dmamem_map, 258 1.1 pk _bus_dmamem_unmap, 259 1.1 pk _bus_dmamem_mmap 260 1.1 pk }; 261 1.1 pk #endif 262 1.1 pk 263 1.1 pk 264 1.53 uwe static int 265 1.60 tsutsui vmematch_mainbus(device_t parent, cfdata_t cf, void *aux) 266 1.1 pk { 267 1.15 pk struct mainbus_attach_args *ma = aux; 268 1.1 pk 269 1.51 chs if (!CPU_ISSUN4 || vme_attached) 270 1.1 pk return (0); 271 1.1 pk 272 1.19 drochner return (strcmp("vme", ma->ma_name) == 0); 273 1.1 pk } 274 1.1 pk 275 1.53 uwe static int 276 1.60 tsutsui vmematch_iommu(device_t parent, cfdata_t cf, void *aux) 277 1.1 pk { 278 1.15 pk struct iommu_attach_args *ia = aux; 279 1.1 pk 280 1.51 chs if (vme_attached) 281 1.51 chs return 0; 282 1.51 chs 283 1.19 drochner return (strcmp("vme", ia->iom_name) == 0); 284 1.6 pk } 285 1.1 pk 286 1.1 pk 287 1.53 uwe static void 288 1.60 tsutsui vmeattach_mainbus(device_t parent, device_t self, void *aux) 289 1.1 pk { 290 1.6 pk #if defined(SUN4) 291 1.6 pk struct mainbus_attach_args *ma = aux; 292 1.60 tsutsui struct sparcvme_softc *sc = device_private(self); 293 1.19 drochner struct vmebus_attach_args vba; 294 1.1 pk 295 1.51 chs vme_attached = 1; 296 1.1 pk 297 1.7 pk sc->sc_bustag = ma->ma_bustag; 298 1.8 pk sc->sc_dmatag = ma->ma_dmatag; 299 1.7 pk 300 1.1 pk /* VME interrupt entry point */ 301 1.1 pk sc->sc_vmeintr = vmeintr4; 302 1.1 pk 303 1.60 tsutsui /*XXX*/ sparc_vme_chipset_tag.cookie = sc; 304 1.26 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create; 305 1.26 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 306 1.60 tsutsui /*XXX*/ sparc_vme4_dma_tag._cookie = sc; 307 1.1 pk 308 1.19 drochner vba.va_vct = &sparc_vme_chipset_tag; 309 1.19 drochner vba.va_bdt = &sparc_vme4_dma_tag; 310 1.19 drochner vba.va_slaveconfig = 0; 311 1.1 pk 312 1.7 pk /* Fall back to our own `range' construction */ 313 1.7 pk sc->sc_range = vmebus_translations; 314 1.7 pk sc->sc_nrange = 315 1.7 pk sizeof(vmebus_translations)/sizeof(vmebus_translations[0]); 316 1.7 pk 317 1.77 thorpej vme_dvmamap = vmem_create("vmedvma", 318 1.77 thorpej VME4_DVMA_BASE, 319 1.77 thorpej VME4_DVMA_END - VME4_DVMA_BASE, 320 1.77 thorpej PAGE_SIZE, /* quantum */ 321 1.77 thorpej NULL, /* importfn */ 322 1.77 thorpej NULL, /* releasefn */ 323 1.77 thorpej NULL, /* source */ 324 1.77 thorpej 0, /* qcache_max */ 325 1.77 thorpej VM_SLEEP, 326 1.77 thorpej IPL_VM); 327 1.10 pk 328 1.1 pk printf("\n"); 329 1.74 thorpej (void)config_found(self, &vba, 0, CFARGS_NONE); 330 1.6 pk 331 1.53 uwe #endif /* SUN4 */ 332 1.1 pk return; 333 1.1 pk } 334 1.1 pk 335 1.1 pk /* sun4m vmebus */ 336 1.53 uwe static void 337 1.68 chs vmeattach_iommu(device_t parent, device_t self, void *aux) 338 1.1 pk { 339 1.6 pk #if defined(SUN4M) 340 1.60 tsutsui struct sparcvme_softc *sc = device_private(self); 341 1.6 pk struct iommu_attach_args *ia = aux; 342 1.19 drochner struct vmebus_attach_args vba; 343 1.6 pk bus_space_handle_t bh; 344 1.6 pk int node; 345 1.1 pk int cline; 346 1.1 pk 347 1.7 pk sc->sc_bustag = ia->iom_bustag; 348 1.8 pk sc->sc_dmatag = ia->iom_dmatag; 349 1.7 pk 350 1.1 pk /* VME interrupt entry point */ 351 1.1 pk sc->sc_vmeintr = vmeintr4m; 352 1.1 pk 353 1.60 tsutsui /*XXX*/ sparc_vme_chipset_tag.cookie = sc; 354 1.28 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create; 355 1.26 pk /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy; 356 1.60 tsutsui /*XXX*/ sparc_vme_iommu_dma_tag._cookie = sc; 357 1.1 pk 358 1.19 drochner vba.va_vct = &sparc_vme_chipset_tag; 359 1.28 pk vba.va_bdt = &sparc_vme_iommu_dma_tag; 360 1.19 drochner vba.va_slaveconfig = 0; 361 1.1 pk 362 1.6 pk node = ia->iom_node; 363 1.1 pk 364 1.7 pk /* 365 1.7 pk * Map VME control space 366 1.7 pk */ 367 1.14 pk if (ia->iom_nreg < 2) { 368 1.60 tsutsui printf("%s: only %d register sets\n", device_xname(self), 369 1.14 pk ia->iom_nreg); 370 1.6 pk return; 371 1.6 pk } 372 1.6 pk 373 1.35 pk if (bus_space_map(ia->iom_bustag, 374 1.36 thorpej (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space, 375 1.36 thorpej ia->iom_reg[0].oa_base), 376 1.36 thorpej (bus_size_t)ia->iom_reg[0].oa_size, 377 1.7 pk BUS_SPACE_MAP_LINEAR, 378 1.35 pk &bh) != 0) { 379 1.60 tsutsui panic("%s: can't map vmebusreg", device_xname(self)); 380 1.6 pk } 381 1.6 pk sc->sc_reg = (struct vmebusreg *)bh; 382 1.6 pk 383 1.35 pk if (bus_space_map(ia->iom_bustag, 384 1.36 thorpej (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space, 385 1.36 thorpej ia->iom_reg[1].oa_base), 386 1.36 thorpej (bus_size_t)ia->iom_reg[1].oa_size, 387 1.7 pk BUS_SPACE_MAP_LINEAR, 388 1.35 pk &bh) != 0) { 389 1.60 tsutsui panic("%s: can't map vmebusvec", device_xname(self)); 390 1.6 pk } 391 1.6 pk sc->sc_vec = (struct vmebusvec *)bh; 392 1.6 pk 393 1.7 pk /* 394 1.7 pk * Map VME IO cache tags and flush control. 395 1.7 pk */ 396 1.35 pk if (bus_space_map(ia->iom_bustag, 397 1.35 pk (bus_addr_t) BUS_ADDR( 398 1.36 thorpej ia->iom_reg[1].oa_space, 399 1.36 thorpej ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET), 400 1.7 pk VME_IOC_SIZE, 401 1.7 pk BUS_SPACE_MAP_LINEAR, 402 1.35 pk &bh) != 0) { 403 1.60 tsutsui panic("%s: can't map IOC tags", device_xname(self)); 404 1.6 pk } 405 1.53 uwe sc->sc_ioctags = (uint32_t *)bh; 406 1.6 pk 407 1.35 pk if (bus_space_map(ia->iom_bustag, 408 1.35 pk (bus_addr_t) BUS_ADDR( 409 1.36 thorpej ia->iom_reg[1].oa_space, 410 1.36 thorpej ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET), 411 1.7 pk VME_IOC_SIZE, 412 1.7 pk BUS_SPACE_MAP_LINEAR, 413 1.35 pk &bh) != 0) { 414 1.60 tsutsui panic("%s: can't map IOC flush registers", device_xname(self)); 415 1.6 pk } 416 1.53 uwe sc->sc_iocflush = (uint32_t *)bh; 417 1.1 pk 418 1.1 pk /* 419 1.1 pk * Get "range" property. 420 1.1 pk */ 421 1.49 pk if (prom_getprop(node, "ranges", sizeof(struct rom_range), 422 1.48 mrg &sc->sc_nrange, &sc->sc_range) != 0) { 423 1.60 tsutsui panic("%s: can't get ranges property", device_xname(self)); 424 1.1 pk } 425 1.1 pk 426 1.19 drochner sparcvme_sc = sc; 427 1.14 pk vmeerr_handler = sparc_vme_error; 428 1.1 pk 429 1.1 pk /* 430 1.1 pk * Invalidate all IO-cache entries. 431 1.1 pk */ 432 1.1 pk for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) { 433 1.1 pk sc->sc_ioctags[--cline] = 0; 434 1.1 pk } 435 1.1 pk 436 1.1 pk /* Enable IO-cache */ 437 1.1 pk sc->sc_reg->vmebus_cr |= VMEBUS_CR_C; 438 1.1 pk 439 1.1 pk printf(": version 0x%x\n", 440 1.1 pk sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL); 441 1.1 pk 442 1.73 thorpej (void)config_found(self, &vba, 0, 443 1.76 thorpej CFARGS(.devhandle = device_handle(self))); 444 1.47 pk #endif /* SUN4M */ 445 1.1 pk } 446 1.1 pk 447 1.16 fvdl #if defined(SUN4M) 448 1.16 fvdl static int 449 1.53 uwe sparc_vme_error(void) 450 1.1 pk { 451 1.19 drochner struct sparcvme_softc *sc = sparcvme_sc; 452 1.53 uwe uint32_t afsr, afpa; 453 1.14 pk char bits[64]; 454 1.1 pk 455 1.19 drochner afsr = sc->sc_reg->vmebus_afsr; 456 1.14 pk afpa = sc->sc_reg->vmebus_afar; 457 1.58 christos snprintb(bits, sizeof(bits), VMEBUS_AFSR_BITS, afsr); 458 1.58 christos printf("VME error:\n\tAFSR %s\n", bits); 459 1.14 pk printf("\taddress: 0x%x%x\n", afsr, afpa); 460 1.14 pk return (0); 461 1.1 pk } 462 1.16 fvdl #endif 463 1.1 pk 464 1.53 uwe static int 465 1.53 uwe vmebus_translate(struct sparcvme_softc *sc, vme_am_t mod, vme_addr_t addr, 466 1.53 uwe bus_addr_t *bap) 467 1.7 pk { 468 1.7 pk int i; 469 1.7 pk 470 1.7 pk for (i = 0; i < sc->sc_nrange; i++) { 471 1.35 pk struct rom_range *rp = &sc->sc_range[i]; 472 1.7 pk 473 1.35 pk if (rp->cspace != mod) 474 1.7 pk continue; 475 1.7 pk 476 1.7 pk /* We've found the connection to the parent bus */ 477 1.35 pk *bap = BUS_ADDR(rp->pspace, rp->poffset + addr); 478 1.7 pk return (0); 479 1.7 pk } 480 1.7 pk return (ENOENT); 481 1.7 pk } 482 1.7 pk 483 1.19 drochner struct vmeprobe_myarg { 484 1.53 uwe int (*cb)(void *, bus_space_tag_t, bus_space_handle_t); 485 1.19 drochner void *cbarg; 486 1.19 drochner bus_space_tag_t tag; 487 1.19 drochner int res; /* backwards */ 488 1.19 drochner }; 489 1.19 drochner 490 1.53 uwe static int vmeprobe_mycb(void *, void *); 491 1.53 uwe 492 1.19 drochner static int 493 1.53 uwe vmeprobe_mycb(void *bh, void *arg) 494 1.19 drochner { 495 1.19 drochner struct vmeprobe_myarg *a = arg; 496 1.19 drochner 497 1.19 drochner a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh); 498 1.19 drochner return (!a->res); 499 1.19 drochner } 500 1.19 drochner 501 1.53 uwe static int 502 1.53 uwe sparc_vme_probe(void *cookie, vme_addr_t addr, vme_size_t len, vme_am_t mod, 503 1.53 uwe vme_datasize_t datasize, 504 1.53 uwe int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), 505 1.53 uwe void *arg) 506 1.1 pk { 507 1.60 tsutsui struct sparcvme_softc *sc = cookie; 508 1.7 pk bus_addr_t paddr; 509 1.19 drochner bus_size_t size; 510 1.19 drochner struct vmeprobe_myarg myarg; 511 1.19 drochner int res, i; 512 1.1 pk 513 1.35 pk if (vmebus_translate(sc, mod, addr, &paddr) != 0) 514 1.19 drochner return (EINVAL); 515 1.19 drochner 516 1.19 drochner size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4)); 517 1.7 pk 518 1.19 drochner if (callback) { 519 1.19 drochner myarg.cb = callback; 520 1.19 drochner myarg.cbarg = arg; 521 1.19 drochner myarg.tag = sc->sc_bustag; 522 1.19 drochner myarg.res = 0; 523 1.35 pk res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 524 1.19 drochner 0, vmeprobe_mycb, &myarg); 525 1.19 drochner return (res ? 0 : (myarg.res ? myarg.res : EIO)); 526 1.19 drochner } 527 1.19 drochner 528 1.19 drochner for (i = 0; i < len / size; i++) { 529 1.19 drochner myarg.res = 0; 530 1.35 pk res = bus_space_probe(sc->sc_bustag, paddr, size, 0, 531 1.19 drochner 0, 0, 0); 532 1.19 drochner if (res == 0) 533 1.19 drochner return (EIO); 534 1.19 drochner paddr += size; 535 1.19 drochner } 536 1.19 drochner return (0); 537 1.1 pk } 538 1.1 pk 539 1.53 uwe static int 540 1.53 uwe sparc_vme_map(void *cookie, vme_addr_t addr, vme_size_t size, vme_am_t mod, 541 1.53 uwe vme_datasize_t datasize, vme_swap_t swap, 542 1.53 uwe bus_space_tag_t *tp, bus_space_handle_t *hp, vme_mapresc_t *rp) 543 1.1 pk { 544 1.60 tsutsui struct sparcvme_softc *sc = cookie; 545 1.7 pk bus_addr_t paddr; 546 1.7 pk int error; 547 1.7 pk 548 1.35 pk error = vmebus_translate(sc, mod, addr, &paddr); 549 1.7 pk if (error != 0) 550 1.7 pk return (error); 551 1.1 pk 552 1.19 drochner *tp = sc->sc_bustag; 553 1.35 pk return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp)); 554 1.1 pk } 555 1.1 pk 556 1.1 pk int 557 1.53 uwe sparc_vme_mmap_cookie(vme_addr_t addr, vme_am_t mod, bus_space_handle_t *hp) 558 1.1 pk { 559 1.19 drochner struct sparcvme_softc *sc = sparcvme_sc; 560 1.7 pk bus_addr_t paddr; 561 1.7 pk int error; 562 1.7 pk 563 1.35 pk error = vmebus_translate(sc, mod, addr, &paddr); 564 1.7 pk if (error != 0) 565 1.7 pk return (error); 566 1.1 pk 567 1.53 uwe return (bus_space_mmap(sc->sc_bustag, paddr, 0, 568 1.33 eeh 0/*prot is ignored*/, 0)); 569 1.1 pk } 570 1.1 pk 571 1.50 pk #ifdef notyet 572 1.1 pk #if defined(SUN4M) 573 1.53 uwe static void 574 1.53 uwe sparc_vme_iommu_barrier(bus_space_tag_t t, bus_space_handle_t h, 575 1.53 uwe bus_size_t offset, bus_size_t size. 576 1.53 uwe int flags) 577 1.1 pk { 578 1.60 tsutsui struct vmebusreg *vbp = t->cookie; 579 1.1 pk 580 1.1 pk /* Read async fault status to flush write-buffers */ 581 1.1 pk (*(volatile int *)&vbp->vmebus_afsr); 582 1.1 pk } 583 1.50 pk #endif /* SUN4M */ 584 1.1 pk #endif 585 1.1 pk 586 1.1 pk 587 1.1 pk 588 1.1 pk /* 589 1.1 pk * VME Interrupt Priority Level to sparc Processor Interrupt Level. 590 1.1 pk */ 591 1.1 pk static int vme_ipl_to_pil[] = { 592 1.1 pk 0, 593 1.1 pk 2, 594 1.1 pk 3, 595 1.1 pk 5, 596 1.1 pk 7, 597 1.1 pk 9, 598 1.1 pk 11, 599 1.1 pk 13 600 1.1 pk }; 601 1.1 pk 602 1.1 pk 603 1.1 pk /* 604 1.1 pk * All VME device interrupts go through vmeintr(). This function reads 605 1.1 pk * the VME vector from the bus, then dispatches the device interrupt 606 1.1 pk * handler. All handlers for devices that map to the same Processor 607 1.1 pk * Interrupt Level (according to the table above) are on a linked list 608 1.1 pk * of `sparc_vme_intr_handle' structures. The head of which is passed 609 1.1 pk * down as the argument to `vmeintr(void *arg)'. 610 1.1 pk */ 611 1.1 pk struct sparc_vme_intr_handle { 612 1.1 pk struct intrhand ih; 613 1.1 pk struct sparc_vme_intr_handle *next; 614 1.1 pk int vec; /* VME interrupt vector */ 615 1.1 pk int pri; /* VME interrupt priority */ 616 1.19 drochner struct sparcvme_softc *sc;/*XXX*/ 617 1.1 pk }; 618 1.1 pk 619 1.1 pk #if defined(SUN4) 620 1.1 pk int 621 1.53 uwe vmeintr4(void *arg) 622 1.1 pk { 623 1.1 pk struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 624 1.1 pk int level, vec; 625 1.30 pk int rv = 0; 626 1.1 pk 627 1.1 pk level = (ihp->pri << 1) | 1; 628 1.1 pk 629 1.54 christos vec = ldcontrolb((void *)(AC_VMEINTVEC | level)); 630 1.1 pk 631 1.1 pk if (vec == -1) { 632 1.30 pk #ifdef DEBUG 633 1.30 pk /* 634 1.30 pk * This seems to happen only with the i82586 based 635 1.30 pk * `ie1' boards. 636 1.30 pk */ 637 1.30 pk printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 638 1.30 pk #endif 639 1.30 pk return (1); /* XXX - pretend we handled it, for now */ 640 1.1 pk } 641 1.1 pk 642 1.1 pk for (; ihp; ihp = ihp->next) 643 1.40 pk if (ihp->vec == vec && ihp->ih.ih_fun) { 644 1.40 pk splx(ihp->ih.ih_classipl); 645 1.30 pk rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 646 1.40 pk } 647 1.30 pk 648 1.30 pk return (rv); 649 1.1 pk } 650 1.1 pk #endif 651 1.1 pk 652 1.1 pk #if defined(SUN4M) 653 1.1 pk int 654 1.53 uwe vmeintr4m(void *arg) 655 1.1 pk { 656 1.1 pk struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg; 657 1.1 pk int level, vec; 658 1.30 pk int rv = 0; 659 1.1 pk 660 1.1 pk level = (ihp->pri << 1) | 1; 661 1.1 pk 662 1.1 pk #if 0 663 1.1 pk int pending; 664 1.1 pk 665 1.1 pk /* Flush VME <=> Sbus write buffers */ 666 1.1 pk (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr); 667 1.1 pk 668 1.1 pk pending = *((int*)ICR_SI_PEND); 669 1.1 pk if ((pending & SINTR_VME(ihp->pri)) == 0) { 670 1.1 pk printf("vmeintr: non pending at pri %x(p 0x%x)\n", 671 1.1 pk ihp->pri, pending); 672 1.1 pk return (0); 673 1.1 pk } 674 1.1 pk #endif 675 1.1 pk #if 0 676 1.1 pk /* Why gives this a bus timeout sometimes? */ 677 1.1 pk vec = ihp->sc->sc_vec->vmebusvec[level]; 678 1.1 pk #else 679 1.1 pk /* so, arrange to catch the fault... */ 680 1.1 pk { 681 1.53 uwe extern int fkbyte(volatile char *, struct pcb *); 682 1.52 tsutsui volatile char *addr = &ihp->sc->sc_vec->vmebusvec[level]; 683 1.1 pk struct pcb *xpcb; 684 1.63 rmind void *saveonfault; 685 1.1 pk int s; 686 1.1 pk 687 1.1 pk s = splhigh(); 688 1.1 pk 689 1.61 rmind xpcb = lwp_getpcb(curlwp); 690 1.63 rmind saveonfault = xpcb->pcb_onfault; 691 1.1 pk vec = fkbyte(addr, xpcb); 692 1.63 rmind xpcb->pcb_onfault = saveonfault; 693 1.1 pk 694 1.1 pk splx(s); 695 1.1 pk } 696 1.1 pk #endif 697 1.1 pk 698 1.1 pk if (vec == -1) { 699 1.30 pk #ifdef DEBUG 700 1.30 pk /* 701 1.30 pk * This seems to happen only with the i82586 based 702 1.30 pk * `ie1' boards. 703 1.30 pk */ 704 1.30 pk printf("vme: spurious interrupt at VME level %d\n", ihp->pri); 705 1.30 pk printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n", 706 1.1 pk *((int*)ICR_SI_PEND), 707 1.1 pk ihp->sc->sc_reg->vmebus_afsr, 708 1.1 pk ihp->sc->sc_reg->vmebus_afar); 709 1.30 pk #endif 710 1.14 pk return (1); /* XXX - pretend we handled it, for now */ 711 1.1 pk } 712 1.1 pk 713 1.1 pk for (; ihp; ihp = ihp->next) 714 1.40 pk if (ihp->vec == vec && ihp->ih.ih_fun) { 715 1.40 pk splx(ihp->ih.ih_classipl); 716 1.30 pk rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg); 717 1.40 pk } 718 1.30 pk 719 1.30 pk return (rv); 720 1.1 pk } 721 1.53 uwe #endif /* SUN4M */ 722 1.1 pk 723 1.53 uwe static int 724 1.53 uwe sparc_vme_intr_map(void *cookie, int level, int vec, 725 1.53 uwe vme_intr_handle_t *ihp) 726 1.1 pk { 727 1.1 pk struct sparc_vme_intr_handle *ih; 728 1.1 pk 729 1.71 thorpej ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 730 1.19 drochner ih->pri = level; 731 1.1 pk ih->vec = vec; 732 1.1 pk ih->sc = cookie;/*XXX*/ 733 1.1 pk *ihp = ih; 734 1.1 pk return (0); 735 1.24 cgd } 736 1.24 cgd 737 1.53 uwe static const struct evcnt * 738 1.53 uwe sparc_vme_intr_evcnt(void *cookie, vme_intr_handle_t vih) 739 1.24 cgd { 740 1.24 cgd 741 1.24 cgd /* XXX for now, no evcnt parent reported */ 742 1.24 cgd return NULL; 743 1.1 pk } 744 1.1 pk 745 1.53 uwe static void * 746 1.53 uwe sparc_vme_intr_establish(void *cookie, vme_intr_handle_t vih, int level, 747 1.53 uwe int (*func)(void *), void *arg) 748 1.1 pk { 749 1.60 tsutsui struct sparcvme_softc *sc = cookie; 750 1.1 pk struct sparc_vme_intr_handle *svih = 751 1.1 pk (struct sparc_vme_intr_handle *)vih; 752 1.1 pk struct intrhand *ih; 753 1.40 pk int pil; 754 1.1 pk 755 1.40 pk /* Translate VME priority to processor IPL */ 756 1.40 pk pil = vme_ipl_to_pil[svih->pri]; 757 1.19 drochner 758 1.40 pk if (level < pil) 759 1.40 pk panic("vme_intr_establish: class lvl (%d) < pil (%d)\n", 760 1.40 pk level, pil); 761 1.1 pk 762 1.1 pk svih->ih.ih_fun = func; 763 1.1 pk svih->ih.ih_arg = arg; 764 1.40 pk svih->ih.ih_classipl = level; /* note: used slightly differently 765 1.40 pk than in intr.c (no shift) */ 766 1.1 pk svih->next = NULL; 767 1.1 pk 768 1.1 pk /* ensure the interrupt subsystem will call us at this level */ 769 1.40 pk for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next) 770 1.1 pk if (ih->ih_fun == sc->sc_vmeintr) 771 1.1 pk break; 772 1.1 pk 773 1.1 pk if (ih == NULL) { 774 1.71 thorpej ih = kmem_zalloc(sizeof(*ih), KM_SLEEP); 775 1.1 pk ih->ih_fun = sc->sc_vmeintr; 776 1.1 pk ih->ih_arg = vih; 777 1.62 mrg intr_establish(pil, 0, ih, NULL, false); 778 1.1 pk } else { 779 1.1 pk svih->next = (vme_intr_handle_t)ih->ih_arg; 780 1.1 pk ih->ih_arg = vih; 781 1.1 pk } 782 1.1 pk return (NULL); 783 1.1 pk } 784 1.1 pk 785 1.53 uwe static void 786 1.53 uwe sparc_vme_unmap(void *cookie, vme_mapresc_t resc) 787 1.1 pk { 788 1.53 uwe 789 1.1 pk /* Not implemented */ 790 1.1 pk panic("sparc_vme_unmap"); 791 1.1 pk } 792 1.1 pk 793 1.53 uwe static void 794 1.53 uwe sparc_vme_intr_disestablish(void *cookie, void *a) 795 1.1 pk { 796 1.53 uwe 797 1.1 pk /* Not implemented */ 798 1.1 pk panic("sparc_vme_intr_disestablish"); 799 1.1 pk } 800 1.1 pk 801 1.1 pk 802 1.1 pk 803 1.1 pk /* 804 1.1 pk * VME DMA functions. 805 1.1 pk */ 806 1.1 pk 807 1.47 pk #if defined(SUN4) || defined(SUN4M) 808 1.26 pk static void 809 1.53 uwe sparc_vct_dmamap_destroy(void *cookie, bus_dmamap_t map) 810 1.26 pk { 811 1.60 tsutsui struct sparcvme_softc *sc = cookie; 812 1.53 uwe 813 1.26 pk bus_dmamap_destroy(sc->sc_dmatag, map); 814 1.26 pk } 815 1.47 pk #endif 816 1.26 pk 817 1.1 pk #if defined(SUN4) 818 1.26 pk static int 819 1.53 uwe sparc_vct4_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 820 1.53 uwe vme_datasize_t datasize, vme_swap_t swap, 821 1.53 uwe int nsegments, vme_size_t maxsegsz, 822 1.53 uwe vme_addr_t boundary, int flags, 823 1.53 uwe bus_dmamap_t *dmamp) 824 1.26 pk { 825 1.60 tsutsui struct sparcvme_softc *sc = cookie; 826 1.26 pk 827 1.26 pk /* Allocate a base map through parent bus ops */ 828 1.26 pk return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 829 1.26 pk boundary, flags, dmamp)); 830 1.26 pk } 831 1.26 pk 832 1.53 uwe static int 833 1.53 uwe sparc_vme4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 834 1.53 uwe void *buf, bus_size_t buflen, 835 1.53 uwe struct proc *p, int flags) 836 1.1 pk { 837 1.25 pk bus_addr_t dva; 838 1.10 pk bus_size_t sgsize; 839 1.77 thorpej vmem_addr_t ldva; 840 1.25 pk vaddr_t va, voff; 841 1.10 pk pmap_t pmap; 842 1.10 pk int pagesz = PAGE_SIZE; 843 1.1 pk int error; 844 1.1 pk 845 1.42 pk cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */ 846 1.25 pk 847 1.25 pk va = (vaddr_t)buf; 848 1.25 pk voff = va & (pagesz - 1); 849 1.25 pk va &= -pagesz; 850 1.25 pk 851 1.25 pk /* 852 1.25 pk * Allocate an integral number of pages from DVMA space 853 1.25 pk * covering the passed buffer. 854 1.25 pk */ 855 1.25 pk sgsize = (buflen + voff + pagesz - 1) & -pagesz; 856 1.77 thorpej 857 1.77 thorpej const vm_flag_t vmflags = VM_BESTFIT | 858 1.77 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 859 1.77 thorpej 860 1.77 thorpej error = vmem_xalloc(vme_dvmamap, sgsize, 861 1.77 thorpej 0, /* alignment */ 862 1.77 thorpej 0, /* phase */ 863 1.77 thorpej map->_dm_boundary, /* nocross */ 864 1.77 thorpej VMEM_ADDR_MIN, /* minaddr */ 865 1.77 thorpej VMEM_ADDR_MAX, /* maxaddr */ 866 1.77 thorpej vmflags, 867 1.77 thorpej &ldva); 868 1.1 pk if (error != 0) 869 1.1 pk return (error); 870 1.48 mrg dva = (bus_addr_t)ldva; 871 1.1 pk 872 1.10 pk map->dm_mapsize = buflen; 873 1.10 pk map->dm_nsegs = 1; 874 1.25 pk /* Adjust DVMA address to VME view */ 875 1.25 pk map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE; 876 1.10 pk map->dm_segs[0].ds_len = buflen; 877 1.25 pk map->dm_segs[0]._ds_sgsize = sgsize; 878 1.10 pk 879 1.10 pk pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap; 880 1.10 pk 881 1.25 pk for (; sgsize != 0; ) { 882 1.10 pk paddr_t pa; 883 1.10 pk /* 884 1.10 pk * Get the physical address for this page. 885 1.10 pk */ 886 1.25 pk (void) pmap_extract(pmap, va, &pa); 887 1.10 pk 888 1.10 pk #ifdef notyet 889 1.10 pk if (have_iocache) 890 1.25 pk pa |= PG_IOC; 891 1.10 pk #endif 892 1.25 pk pmap_enter(pmap_kernel(), dva, 893 1.25 pk pa | PMAP_NC, 894 1.25 pk VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 895 1.25 pk 896 1.25 pk dva += pagesz; 897 1.25 pk va += pagesz; 898 1.25 pk sgsize -= pagesz; 899 1.10 pk } 900 1.32 chris pmap_update(pmap_kernel()); 901 1.10 pk 902 1.1 pk return (0); 903 1.1 pk } 904 1.1 pk 905 1.53 uwe static void 906 1.53 uwe sparc_vme4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 907 1.1 pk { 908 1.23 pk bus_dma_segment_t *segs = map->dm_segs; 909 1.23 pk int nsegs = map->dm_nsegs; 910 1.23 pk bus_addr_t dva; 911 1.10 pk bus_size_t len; 912 1.77 thorpej int i; 913 1.8 pk 914 1.23 pk for (i = 0; i < nsegs; i++) { 915 1.23 pk /* Go from VME to CPU view */ 916 1.23 pk dva = segs[i].ds_addr + VME4_DVMA_BASE; 917 1.25 pk dva &= -PAGE_SIZE; 918 1.25 pk len = segs[i]._ds_sgsize; 919 1.23 pk 920 1.23 pk /* Remove double-mapping in DVMA space */ 921 1.23 pk pmap_remove(pmap_kernel(), dva, dva + len); 922 1.23 pk 923 1.23 pk /* Release DVMA space */ 924 1.77 thorpej vmem_xfree(vme_dvmamap, dva, len); 925 1.23 pk } 926 1.32 chris pmap_update(pmap_kernel()); 927 1.10 pk 928 1.10 pk /* Mark the mappings as invalid. */ 929 1.10 pk map->dm_mapsize = 0; 930 1.10 pk map->dm_nsegs = 0; 931 1.1 pk } 932 1.1 pk 933 1.53 uwe static void 934 1.53 uwe sparc_vme4_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 935 1.53 uwe bus_addr_t offset, bus_size_t len, int ops) 936 1.1 pk { 937 1.3 thorpej 938 1.3 thorpej /* 939 1.3 thorpej * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B). 940 1.10 pk * Currently the cache is flushed in bus_dma_load()... 941 1.3 thorpej */ 942 1.1 pk } 943 1.1 pk #endif /* SUN4 */ 944 1.1 pk 945 1.1 pk #if defined(SUN4M) 946 1.1 pk static int 947 1.53 uwe sparc_vme_iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size, 948 1.53 uwe int nsegments, bus_size_t maxsegsz, 949 1.53 uwe bus_size_t boundary, int flags, 950 1.53 uwe bus_dmamap_t *dmamp) 951 1.1 pk { 952 1.26 pk 953 1.28 pk printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n"); 954 1.26 pk return (EINVAL); 955 1.26 pk } 956 1.26 pk 957 1.26 pk static int 958 1.53 uwe sparc_vct_iommu_dmamap_create(void *cookie, vme_size_t size, vme_am_t am, 959 1.53 uwe vme_datasize_t datasize, vme_swap_t swap, 960 1.53 uwe int nsegments, vme_size_t maxsegsz, 961 1.53 uwe vme_addr_t boundary, int flags, 962 1.53 uwe bus_dmamap_t *dmamp) 963 1.26 pk { 964 1.60 tsutsui struct sparcvme_softc *sc = cookie; 965 1.26 pk bus_dmamap_t map; 966 1.10 pk int error; 967 1.1 pk 968 1.26 pk /* Allocate a base map through parent bus ops */ 969 1.10 pk error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz, 970 1.26 pk boundary, flags, &map); 971 1.10 pk if (error != 0) 972 1.10 pk return (error); 973 1.10 pk 974 1.26 pk /* 975 1.26 pk * Each I/O cache line maps to a 8K section of VME DVMA space, so 976 1.75 msaitoh * we must ensure that DVMA allocations are always 8K aligned. 977 1.26 pk */ 978 1.26 pk map->_dm_align = VME_IOC_PAGESZ; 979 1.26 pk 980 1.26 pk /* Set map region based on Address Modifier */ 981 1.26 pk switch ((am & VME_AM_ADRSIZEMASK)) { 982 1.26 pk case VME_AM_A16: 983 1.26 pk case VME_AM_A24: 984 1.26 pk /* 1 MB of DVMA space */ 985 1.28 pk map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE; 986 1.28 pk map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END; 987 1.26 pk break; 988 1.26 pk case VME_AM_A32: 989 1.26 pk /* 8 MB of DVMA space */ 990 1.28 pk map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE; 991 1.28 pk map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END; 992 1.26 pk break; 993 1.26 pk } 994 1.1 pk 995 1.26 pk *dmamp = map; 996 1.10 pk return (0); 997 1.1 pk } 998 1.1 pk 999 1.53 uwe static int 1000 1.53 uwe sparc_vme_iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, 1001 1.53 uwe void *buf, bus_size_t buflen, 1002 1.53 uwe struct proc *p, int flags) 1003 1.1 pk { 1004 1.60 tsutsui struct sparcvme_softc *sc = t->_cookie; 1005 1.53 uwe volatile uint32_t *ioctags; 1006 1.1 pk int error; 1007 1.1 pk 1008 1.26 pk /* Round request to a multiple of the I/O cache size */ 1009 1.23 pk buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ; 1010 1.8 pk error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags); 1011 1.1 pk if (error != 0) 1012 1.1 pk return (error); 1013 1.1 pk 1014 1.26 pk /* Allocate I/O cache entries for this range */ 1015 1.1 pk ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1016 1.26 pk while (buflen > 0) { 1017 1.1 pk *ioctags = VME_IOC_IC | VME_IOC_W; 1018 1.1 pk ioctags += VME_IOC_LINESZ/sizeof(*ioctags); 1019 1.1 pk buflen -= VME_IOC_PAGESZ; 1020 1.1 pk } 1021 1.28 pk 1022 1.28 pk /* 1023 1.28 pk * Adjust DVMA address to VME view. 1024 1.28 pk * Note: the DVMA base address is the same for all 1025 1.28 pk * VME address spaces. 1026 1.28 pk */ 1027 1.28 pk map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE; 1028 1.1 pk return (0); 1029 1.1 pk } 1030 1.1 pk 1031 1.1 pk 1032 1.53 uwe static void 1033 1.53 uwe sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 1034 1.1 pk { 1035 1.60 tsutsui struct sparcvme_softc *sc = t->_cookie; 1036 1.53 uwe volatile uint32_t *flushregs; 1037 1.1 pk int len; 1038 1.1 pk 1039 1.28 pk /* Go from VME to CPU view */ 1040 1.28 pk map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE; 1041 1.28 pk 1042 1.26 pk /* Flush VME I/O cache */ 1043 1.26 pk len = map->dm_segs[0]._ds_sgsize; 1044 1.1 pk flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr); 1045 1.26 pk while (len > 0) { 1046 1.1 pk *flushregs = 0; 1047 1.1 pk flushregs += VME_IOC_LINESZ/sizeof(*flushregs); 1048 1.1 pk len -= VME_IOC_PAGESZ; 1049 1.1 pk } 1050 1.26 pk 1051 1.26 pk /* 1052 1.26 pk * Start a read from `tag space' which will not complete until 1053 1.26 pk * all cache flushes have finished 1054 1.26 pk */ 1055 1.1 pk (*sc->sc_ioctags); 1056 1.1 pk 1057 1.8 pk bus_dmamap_unload(sc->sc_dmatag, map); 1058 1.9 pk } 1059 1.9 pk 1060 1.53 uwe static void 1061 1.53 uwe sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, 1062 1.53 uwe bus_addr_t offset, bus_size_t len, int ops) 1063 1.1 pk { 1064 1.3 thorpej 1065 1.3 thorpej /* 1066 1.3 thorpej * XXX Should perform cache flushes as necessary. 1067 1.3 thorpej */ 1068 1.1 pk } 1069 1.1 pk #endif /* SUN4M */ 1070 1.12 pk 1071 1.47 pk #if defined(SUN4) || defined(SUN4M) 1072 1.53 uwe static int 1073 1.53 uwe sparc_vme_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1074 1.54 christos size_t size, void **kvap, int flags) 1075 1.12 pk { 1076 1.60 tsutsui struct sparcvme_softc *sc = t->_cookie; 1077 1.12 pk 1078 1.12 pk return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags)); 1079 1.12 pk } 1080 1.47 pk #endif /* SUN4 || SUN4M */ 1081