1 1.6 thorpej /* $NetBSD: astro.c,v 1.6 2023/12/03 02:03:18 thorpej Exp $ */ 2 1.1 skrll 3 1.1 skrll /* $OpenBSD: astro.c,v 1.8 2007/10/06 23:50:54 krw Exp $ */ 4 1.1 skrll 5 1.1 skrll /* 6 1.1 skrll * Copyright (c) 2007 Mark Kettenis 7 1.1 skrll * 8 1.1 skrll * Permission to use, copy, modify, and distribute this software for any 9 1.1 skrll * purpose with or without fee is hereby granted, provided that the above 10 1.1 skrll * copyright notice and this permission notice appear in all copies. 11 1.1 skrll * 12 1.1 skrll * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 1.1 skrll * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 1.1 skrll * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 1.1 skrll * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 1.1 skrll * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 1.1 skrll * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 1.1 skrll * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 1.1 skrll */ 20 1.1 skrll 21 1.1 skrll #include <sys/param.h> 22 1.5 skrll 23 1.1 skrll #include <sys/systm.h> 24 1.1 skrll #include <sys/device.h> 25 1.6 thorpej #include <sys/vmem.h> 26 1.5 skrll #include <sys/kmem.h> 27 1.1 skrll #include <sys/reboot.h> 28 1.1 skrll #include <sys/tree.h> 29 1.1 skrll 30 1.1 skrll #include <uvm/uvm.h> 31 1.1 skrll 32 1.1 skrll #include <machine/iomod.h> 33 1.1 skrll #include <machine/autoconf.h> 34 1.1 skrll #include <machine/pdc.h> 35 1.1 skrll #include <machine/endian.h> 36 1.1 skrll 37 1.1 skrll #include <hppa/dev/cpudevs.h> 38 1.1 skrll #include <hppa/hppa/machdep.h> 39 1.1 skrll 40 1.1 skrll struct astro_regs { 41 1.1 skrll uint32_t rid; 42 1.1 skrll uint32_t pad0000; 43 1.1 skrll uint32_t ioc_ctrl; 44 1.1 skrll uint32_t pad0008; 45 1.1 skrll uint8_t resv1[0x0300 - 0x0010]; 46 1.1 skrll uint64_t lmmio_direct0_base; 47 1.1 skrll uint64_t lmmio_direct0_mask; 48 1.1 skrll uint64_t lmmio_direct0_route; 49 1.1 skrll uint64_t lmmio_direct1_base; 50 1.1 skrll uint64_t lmmio_direct1_mask; 51 1.1 skrll uint64_t lmmio_direct1_route; 52 1.1 skrll uint64_t lmmio_direct2_base; 53 1.1 skrll uint64_t lmmio_direct2_mask; 54 1.1 skrll uint64_t lmmio_direct2_route; 55 1.1 skrll uint64_t lmmio_direct3_base; 56 1.1 skrll uint64_t lmmio_direct3_mask; 57 1.1 skrll uint64_t lmmio_direct3_route; 58 1.1 skrll uint64_t lmmio_dist_base; 59 1.1 skrll uint64_t lmmio_dist_mask; 60 1.1 skrll uint64_t lmmio_dist_route; 61 1.1 skrll uint64_t gmmio_dist_base; 62 1.1 skrll uint64_t gmmio_dist_mask; 63 1.1 skrll uint64_t gmmio_dist_route; 64 1.1 skrll uint64_t ios_dist_base; 65 1.1 skrll uint64_t ios_dist_mask; 66 1.1 skrll uint64_t ios_dist_route; 67 1.1 skrll uint8_t resv2[0x03c0 - 0x03a8]; 68 1.1 skrll uint64_t ios_direct_base; 69 1.1 skrll uint64_t ios_direct_mask; 70 1.1 skrll uint64_t ios_direct_route; 71 1.1 skrll uint8_t resv3[0x22000 - 0x03d8]; 72 1.1 skrll uint64_t func_id; 73 1.1 skrll uint64_t func_class; 74 1.1 skrll uint8_t resv4[0x22040 - 0x22010]; 75 1.1 skrll uint64_t rope_config; 76 1.1 skrll uint8_t resv5[0x22050 - 0x22048]; 77 1.1 skrll uint64_t rope_debug; 78 1.1 skrll uint8_t resv6[0x22200 - 0x22058]; 79 1.1 skrll uint64_t rope0_control; 80 1.1 skrll uint64_t rope1_control; 81 1.1 skrll uint64_t rope2_control; 82 1.1 skrll uint64_t rope3_control; 83 1.1 skrll uint64_t rope4_control; 84 1.1 skrll uint64_t rope5_control; 85 1.1 skrll uint64_t rope6_control; 86 1.1 skrll uint64_t rope7_control; 87 1.1 skrll uint8_t resv7[0x22300 - 0x22240]; 88 1.1 skrll uint32_t tlb_ibase; 89 1.1 skrll uint32_t pad22300; 90 1.1 skrll uint32_t tlb_imask; 91 1.1 skrll uint32_t pad22308; 92 1.1 skrll uint32_t tlb_pcom; 93 1.1 skrll uint32_t pad22310; 94 1.1 skrll uint32_t tlb_tcnfg; 95 1.1 skrll uint32_t pad22318; 96 1.1 skrll uint64_t tlb_pdir_base; 97 1.1 skrll }; 98 1.1 skrll 99 1.1 skrll #define ASTRO_IOC_CTRL_TE 0x0001 /* TOC Enable */ 100 1.1 skrll #define ASTRO_IOC_CTRL_CE 0x0002 /* Coalesce Enable */ 101 1.1 skrll #define ASTRO_IOC_CTRL_DE 0x0004 /* Dillon Enable */ 102 1.1 skrll #define ASTRO_IOC_CTRL_IE 0x0008 /* IOS Enable */ 103 1.1 skrll #define ASTRO_IOC_CTRL_OS 0x0010 /* Outbound Synchronous */ 104 1.1 skrll #define ASTRO_IOC_CTRL_IS 0x0020 /* Inbound Synchronous */ 105 1.1 skrll #define ASTRO_IOC_CTRL_RC 0x0040 /* Read Current Enable */ 106 1.1 skrll #define ASTRO_IOC_CTRL_L0 0x0080 /* 0-length Read Enable */ 107 1.1 skrll #define ASTRO_IOC_CTRL_RM 0x0100 /* Real Mode */ 108 1.1 skrll #define ASTRO_IOC_CTRL_NC 0x0200 /* Non-coherent Mode */ 109 1.1 skrll #define ASTRO_IOC_CTRL_ID 0x0400 /* Interrupt Disable */ 110 1.1 skrll #define ASTRO_IOC_CTRL_D4 0x0800 /* Disable 4-byte Coalescing */ 111 1.1 skrll #define ASTRO_IOC_CTRL_CC 0x1000 /* Increase Coalescing counter value */ 112 1.1 skrll #define ASTRO_IOC_CTRL_DD 0x2000 /* Disable distr. range coalescing */ 113 1.1 skrll #define ASTRO_IOC_CTRL_DC 0x4000 /* Disable the coalescing counter */ 114 1.1 skrll 115 1.1 skrll #define IOTTE_V 0x8000000000000000LL /* Entry valid */ 116 1.1 skrll #define IOTTE_PAMASK 0x000000fffffff000LL 117 1.1 skrll #define IOTTE_CI 0x00000000000000ffLL /* Coherent index */ 118 1.1 skrll 119 1.1 skrll struct astro_softc { 120 1.1 skrll device_t sc_dv; 121 1.1 skrll 122 1.1 skrll bus_dma_tag_t sc_dmat; 123 1.1 skrll struct astro_regs volatile *sc_regs; 124 1.1 skrll uint64_t *sc_pdir; 125 1.1 skrll 126 1.1 skrll char sc_dvmamapname[20]; 127 1.6 thorpej vmem_t *sc_dvmamap; 128 1.1 skrll struct hppa_bus_dma_tag sc_dmatag; 129 1.1 skrll }; 130 1.1 skrll 131 1.1 skrll /* 132 1.1 skrll * per-map DVMA page table 133 1.1 skrll */ 134 1.1 skrll struct iommu_page_entry { 135 1.1 skrll SPLAY_ENTRY(iommu_page_entry) ipe_node; 136 1.1 skrll paddr_t ipe_pa; 137 1.1 skrll vaddr_t ipe_va; 138 1.1 skrll bus_addr_t ipe_dva; 139 1.1 skrll }; 140 1.1 skrll 141 1.1 skrll struct iommu_page_map { 142 1.1 skrll SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree; 143 1.1 skrll int ipm_maxpage; /* Size of allocated page map */ 144 1.1 skrll int ipm_pagecnt; /* Number of entries in use */ 145 1.1 skrll struct iommu_page_entry ipm_map[1]; 146 1.1 skrll }; 147 1.1 skrll 148 1.1 skrll /* 149 1.1 skrll * per-map IOMMU state 150 1.1 skrll */ 151 1.1 skrll struct iommu_map_state { 152 1.1 skrll struct astro_softc *ims_sc; 153 1.1 skrll bus_addr_t ims_dvmastart; 154 1.1 skrll bus_size_t ims_dvmasize; 155 1.1 skrll struct iommu_page_map ims_map; /* map must be last (array at end) */ 156 1.1 skrll }; 157 1.1 skrll 158 1.1 skrll int astro_match(device_t, cfdata_t, void *); 159 1.1 skrll void astro_attach(device_t, device_t, void *); 160 1.1 skrll static device_t astro_callback(device_t self, struct confargs *ca); 161 1.1 skrll 162 1.1 skrll CFATTACH_DECL_NEW(astro, sizeof(struct astro_softc), 163 1.1 skrll astro_match, astro_attach, NULL, NULL); 164 1.1 skrll 165 1.1 skrll extern struct cfdriver astro_cd; 166 1.1 skrll 167 1.1 skrll int iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, 168 1.1 skrll int, bus_dmamap_t *); 169 1.1 skrll void iommu_dvmamap_destroy(void *, bus_dmamap_t); 170 1.1 skrll int iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t, 171 1.1 skrll struct proc *, int); 172 1.1 skrll int iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int); 173 1.1 skrll int iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int); 174 1.1 skrll int iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, 175 1.1 skrll int, bus_size_t, int); 176 1.1 skrll void iommu_dvmamap_unload(void *, bus_dmamap_t); 177 1.1 skrll void iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int); 178 1.1 skrll int iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t, 179 1.1 skrll bus_dma_segment_t *, int, int *, int); 180 1.1 skrll void iommu_dvmamem_free(void *, bus_dma_segment_t *, int); 181 1.1 skrll int iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t, 182 1.1 skrll void **, int); 183 1.1 skrll void iommu_dvmamem_unmap(void *, void *, size_t); 184 1.1 skrll paddr_t iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int); 185 1.1 skrll 186 1.1 skrll void iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int); 187 1.1 skrll void iommu_remove(struct astro_softc *, bus_addr_t); 188 1.1 skrll 189 1.5 skrll struct iommu_map_state *iommu_iomap_create(int, int); 190 1.1 skrll void iommu_iomap_destroy(struct iommu_map_state *); 191 1.1 skrll int iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t); 192 1.1 skrll bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t); 193 1.1 skrll void iommu_iomap_clear_pages(struct iommu_map_state *); 194 1.1 skrll 195 1.1 skrll static int iommu_iomap_load_map(struct astro_softc *, bus_dmamap_t, int); 196 1.1 skrll 197 1.1 skrll const struct hppa_bus_dma_tag astro_dmat = { 198 1.1 skrll NULL, 199 1.1 skrll iommu_dvmamap_create, iommu_dvmamap_destroy, 200 1.1 skrll iommu_dvmamap_load, iommu_dvmamap_load_mbuf, 201 1.1 skrll iommu_dvmamap_load_uio, iommu_dvmamap_load_raw, 202 1.1 skrll iommu_dvmamap_unload, iommu_dvmamap_sync, 203 1.1 skrll 204 1.1 skrll iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map, 205 1.1 skrll iommu_dvmamem_unmap, iommu_dvmamem_mmap 206 1.1 skrll }; 207 1.1 skrll 208 1.1 skrll int 209 1.1 skrll astro_match(device_t parent, cfdata_t cf, void *aux) 210 1.1 skrll { 211 1.1 skrll struct confargs *ca = aux; 212 1.1 skrll 213 1.1 skrll /* Astro is a U-Turn variant. */ 214 1.1 skrll if (ca->ca_type.iodc_type != HPPA_TYPE_IOA || 215 1.1 skrll ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN) 216 1.1 skrll return 0; 217 1.1 skrll 218 1.1 skrll if (ca->ca_type.iodc_model == 0x58 && 219 1.1 skrll ca->ca_type.iodc_revision >= 0x20) 220 1.1 skrll return 1; 221 1.1 skrll 222 1.1 skrll return 0; 223 1.1 skrll } 224 1.1 skrll 225 1.1 skrll void 226 1.1 skrll astro_attach(device_t parent, device_t self, void *aux) 227 1.1 skrll { 228 1.1 skrll struct confargs *ca = aux, nca; 229 1.1 skrll struct astro_softc *sc = device_private(self); 230 1.1 skrll volatile struct astro_regs *r; 231 1.1 skrll bus_space_handle_t ioh; 232 1.1 skrll uint32_t rid, ioc_ctrl; 233 1.1 skrll psize_t size; 234 1.1 skrll vaddr_t va; 235 1.1 skrll paddr_t pa; 236 1.1 skrll void *p; 237 1.1 skrll struct vm_page *m; 238 1.1 skrll struct pglist mlist; 239 1.1 skrll int iova_bits; 240 1.1 skrll int pagezero_cookie; 241 1.1 skrll 242 1.1 skrll sc->sc_dv = self; 243 1.1 skrll sc->sc_dmat = ca->ca_dmatag; 244 1.1 skrll if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs), 245 1.1 skrll 0, &ioh)) { 246 1.1 skrll aprint_error(": can't map IO space\n"); 247 1.1 skrll return; 248 1.1 skrll } 249 1.1 skrll p = bus_space_vaddr(ca->ca_iot, ioh); 250 1.1 skrll sc->sc_regs = r = p; 251 1.1 skrll rid = le32toh(r->rid); 252 1.1 skrll aprint_normal(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3); 253 1.1 skrll 254 1.1 skrll ioc_ctrl = le32toh(r->ioc_ctrl); 255 1.1 skrll ioc_ctrl &= ~ASTRO_IOC_CTRL_CE; 256 1.1 skrll ioc_ctrl &= ~ASTRO_IOC_CTRL_RM; 257 1.1 skrll ioc_ctrl &= ~ASTRO_IOC_CTRL_NC; 258 1.1 skrll r->ioc_ctrl = htole32(ioc_ctrl); 259 1.1 skrll 260 1.1 skrll /* 261 1.1 skrll * Setup the iommu. 262 1.1 skrll */ 263 1.1 skrll 264 1.1 skrll /* XXX This gives us 256MB of iova space. */ 265 1.1 skrll iova_bits = 28; 266 1.1 skrll 267 1.1 skrll r->tlb_ibase = htole32(0); 268 1.1 skrll r->tlb_imask = htole32(0xffffffff << iova_bits); 269 1.1 skrll 270 1.1 skrll /* Page size is 4K. */ 271 1.1 skrll r->tlb_tcnfg = htole32(0); 272 1.1 skrll 273 1.1 skrll /* Flush TLB. */ 274 1.1 skrll r->tlb_pcom = htole32(31); 275 1.1 skrll 276 1.1 skrll /* 277 1.1 skrll * Allocate memory for I/O pagetables. They need to be physically 278 1.1 skrll * contiguous. 279 1.1 skrll */ 280 1.1 skrll 281 1.1 skrll size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(uint64_t); 282 1.1 skrll TAILQ_INIT(&mlist); 283 1.1 skrll if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist, 1, 0) != 0) { 284 1.1 skrll aprint_error(": can't allocate PDIR\n"); 285 1.1 skrll return; 286 1.1 skrll } 287 1.1 skrll 288 1.1 skrll va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT); 289 1.1 skrll 290 1.1 skrll if (va == 0) { 291 1.1 skrll aprint_error(": can't map PDIR\n"); 292 1.1 skrll return; 293 1.1 skrll } 294 1.1 skrll sc->sc_pdir = (uint64_t *)va; 295 1.1 skrll 296 1.1 skrll m = TAILQ_FIRST(&mlist); 297 1.1 skrll r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m)); 298 1.1 skrll 299 1.1 skrll /* Map the pages. */ 300 1.1 skrll for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) { 301 1.1 skrll pa = VM_PAGE_TO_PHYS(m); 302 1.1 skrll pmap_enter(pmap_kernel(), va, pa, 303 1.1 skrll VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED); 304 1.1 skrll va += PAGE_SIZE; 305 1.1 skrll } 306 1.1 skrll pmap_update(pmap_kernel()); 307 1.1 skrll memset(sc->sc_pdir, 0, size); 308 1.1 skrll 309 1.1 skrll /* 310 1.1 skrll * The PDC might have set up some devices to do DMA. It will do 311 1.1 skrll * this for the onboard USB controller if an USB keyboard is used 312 1.1 skrll * for console input. In that case, bad things will happen if we 313 1.1 skrll * enable iova space. So reset the PDC devices before we do that. 314 1.1 skrll * Don't do this if we're using a serial console though, since it 315 1.1 skrll * will stop working if we do. This is fine since the serial port 316 1.1 skrll * doesn't do DMA. 317 1.1 skrll */ 318 1.1 skrll pagezero_cookie = hppa_pagezero_map(); 319 1.1 skrll if (PAGE0->mem_cons.pz_class != PCL_DUPLEX) 320 1.1 skrll pdcproc_ioreset(); 321 1.1 skrll hppa_pagezero_unmap(pagezero_cookie); 322 1.1 skrll 323 1.1 skrll /* Enable iova space. */ 324 1.1 skrll r->tlb_ibase = htole32(1); 325 1.1 skrll 326 1.1 skrll /* 327 1.1 skrll * Now all the hardware's working we need to allocate a dvma map. 328 1.1 skrll */ 329 1.1 skrll snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname), 330 1.1 skrll "%s_dvma", device_xname(sc->sc_dv)); 331 1.6 thorpej sc->sc_dvmamap = vmem_create(sc->sc_dvmamapname, 332 1.6 thorpej 0, /* base */ 333 1.6 thorpej (1 << iova_bits), /* size */ 334 1.6 thorpej PAGE_SIZE, /* quantum */ 335 1.6 thorpej NULL, /* allocfn */ 336 1.6 thorpej NULL, /* freefn */ 337 1.6 thorpej NULL, /* source */ 338 1.6 thorpej 0, /* qcache_max */ 339 1.6 thorpej VM_SLEEP, 340 1.6 thorpej IPL_VM); 341 1.6 thorpej KASSERT(sc->sc_dvmamap != NULL); 342 1.1 skrll 343 1.1 skrll sc->sc_dmatag = astro_dmat; 344 1.1 skrll sc->sc_dmatag._cookie = sc; 345 1.1 skrll 346 1.1 skrll nca = *ca; /* clone from us */ 347 1.1 skrll nca.ca_dmatag = &sc->sc_dmatag; 348 1.1 skrll nca.ca_hpabase = IOMOD_IO_IO_LOW(p); 349 1.1 skrll nca.ca_nmodules = MAXMODBUS; 350 1.1 skrll pdc_scanbus(self, &nca, astro_callback); 351 1.1 skrll } 352 1.1 skrll 353 1.1 skrll static device_t 354 1.1 skrll astro_callback(device_t self, struct confargs *ca) 355 1.1 skrll { 356 1.1 skrll 357 1.3 thorpej return config_found(self, ca, mbprint, 358 1.4 thorpej CFARGS(.submatch = mbsubmatch)); 359 1.1 skrll } 360 1.1 skrll 361 1.1 skrll int 362 1.1 skrll iommu_dvmamap_create(void *v, bus_size_t size, int nsegments, 363 1.1 skrll bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap) 364 1.1 skrll { 365 1.1 skrll struct astro_softc *sc = v; 366 1.1 skrll bus_dmamap_t map; 367 1.1 skrll struct iommu_map_state *ims; 368 1.1 skrll int error; 369 1.1 skrll 370 1.1 skrll error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz, 371 1.1 skrll boundary, flags, &map); 372 1.1 skrll if (error) 373 1.1 skrll return (error); 374 1.1 skrll 375 1.5 skrll ims = iommu_iomap_create(atop(round_page(size)), flags); 376 1.1 skrll if (ims == NULL) { 377 1.1 skrll bus_dmamap_destroy(sc->sc_dmat, map); 378 1.1 skrll return (ENOMEM); 379 1.1 skrll } 380 1.1 skrll 381 1.1 skrll ims->ims_sc = sc; 382 1.1 skrll map->_dm_cookie = ims; 383 1.1 skrll *dmamap = map; 384 1.1 skrll 385 1.1 skrll return (0); 386 1.1 skrll } 387 1.1 skrll 388 1.1 skrll void 389 1.1 skrll iommu_dvmamap_destroy(void *v, bus_dmamap_t map) 390 1.1 skrll { 391 1.1 skrll struct astro_softc *sc = v; 392 1.1 skrll 393 1.1 skrll /* 394 1.1 skrll * The specification (man page) requires a loaded 395 1.1 skrll * map to be unloaded before it is destroyed. 396 1.1 skrll */ 397 1.1 skrll if (map->dm_nsegs) 398 1.1 skrll iommu_dvmamap_unload(sc, map); 399 1.1 skrll 400 1.1 skrll if (map->_dm_cookie) 401 1.1 skrll iommu_iomap_destroy(map->_dm_cookie); 402 1.1 skrll map->_dm_cookie = NULL; 403 1.1 skrll 404 1.1 skrll bus_dmamap_destroy(sc->sc_dmat, map); 405 1.1 skrll } 406 1.1 skrll 407 1.1 skrll static int 408 1.1 skrll iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags) 409 1.1 skrll { 410 1.1 skrll struct iommu_map_state *ims = map->_dm_cookie; 411 1.1 skrll struct iommu_page_map *ipm = &ims->ims_map; 412 1.1 skrll struct iommu_page_entry *e; 413 1.6 thorpej int err, seg; 414 1.1 skrll paddr_t pa, paend; 415 1.1 skrll vaddr_t va; 416 1.1 skrll bus_size_t sgsize; 417 1.1 skrll bus_size_t align, boundary; 418 1.6 thorpej vmem_addr_t dvmaddr; 419 1.1 skrll bus_addr_t dva; 420 1.1 skrll int i; 421 1.1 skrll 422 1.1 skrll /* XXX */ 423 1.1 skrll boundary = map->_dm_boundary; 424 1.6 thorpej align = 0; /* align to quantum */ 425 1.1 skrll 426 1.1 skrll iommu_iomap_clear_pages(ims); 427 1.1 skrll 428 1.1 skrll for (seg = 0; seg < map->dm_nsegs; seg++) { 429 1.1 skrll struct hppa_bus_dma_segment *ds = &map->dm_segs[seg]; 430 1.1 skrll 431 1.1 skrll paend = round_page(ds->ds_addr + ds->ds_len); 432 1.1 skrll for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va); 433 1.1 skrll pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) { 434 1.1 skrll err = iommu_iomap_insert_page(ims, va, pa); 435 1.1 skrll if (err) { 436 1.1 skrll printf("iomap insert error: %d for " 437 1.1 skrll "va 0x%lx pa 0x%lx\n", err, va, pa); 438 1.1 skrll bus_dmamap_unload(sc->sc_dmat, map); 439 1.1 skrll iommu_iomap_clear_pages(ims); 440 1.1 skrll } 441 1.1 skrll } 442 1.1 skrll } 443 1.1 skrll 444 1.6 thorpej const vm_flag_t vmflags = VM_BESTFIT | 445 1.6 thorpej ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 446 1.6 thorpej 447 1.1 skrll sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE; 448 1.6 thorpej err = vmem_xalloc(sc->sc_dvmamap, sgsize, 449 1.6 thorpej align, /* align */ 450 1.6 thorpej 0, /* phase */ 451 1.6 thorpej boundary, /* nocross */ 452 1.6 thorpej VMEM_ADDR_MIN, /* minaddr */ 453 1.6 thorpej VMEM_ADDR_MAX, /* maxaddr */ 454 1.6 thorpej vmflags, 455 1.6 thorpej &dvmaddr); 456 1.1 skrll if (err) 457 1.1 skrll return (err); 458 1.1 skrll 459 1.1 skrll ims->ims_dvmastart = dvmaddr; 460 1.1 skrll ims->ims_dvmasize = sgsize; 461 1.1 skrll 462 1.1 skrll dva = dvmaddr; 463 1.1 skrll for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) { 464 1.1 skrll e->ipe_dva = dva; 465 1.1 skrll iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags); 466 1.1 skrll dva += PAGE_SIZE; 467 1.1 skrll } 468 1.1 skrll 469 1.1 skrll for (seg = 0; seg < map->dm_nsegs; seg++) { 470 1.1 skrll struct hppa_bus_dma_segment *ds = &map->dm_segs[seg]; 471 1.1 skrll ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr); 472 1.1 skrll } 473 1.1 skrll 474 1.1 skrll return (0); 475 1.1 skrll } 476 1.1 skrll 477 1.1 skrll int 478 1.1 skrll iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size, 479 1.1 skrll struct proc *p, int flags) 480 1.1 skrll { 481 1.1 skrll struct astro_softc *sc = v; 482 1.1 skrll int err; 483 1.1 skrll 484 1.1 skrll err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags); 485 1.1 skrll if (err) 486 1.1 skrll return (err); 487 1.1 skrll 488 1.1 skrll return iommu_iomap_load_map(sc, map, flags); 489 1.1 skrll } 490 1.1 skrll 491 1.1 skrll int 492 1.1 skrll iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags) 493 1.1 skrll { 494 1.1 skrll struct astro_softc *sc = v; 495 1.1 skrll int err; 496 1.1 skrll 497 1.1 skrll err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags); 498 1.1 skrll if (err) 499 1.1 skrll return (err); 500 1.1 skrll 501 1.1 skrll return iommu_iomap_load_map(sc, map, flags); 502 1.1 skrll } 503 1.1 skrll 504 1.1 skrll int 505 1.1 skrll iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags) 506 1.1 skrll { 507 1.1 skrll struct astro_softc *sc = v; 508 1.1 skrll 509 1.1 skrll printf("load_uio\n"); 510 1.1 skrll 511 1.1 skrll return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags)); 512 1.1 skrll } 513 1.1 skrll 514 1.1 skrll int 515 1.1 skrll iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs, 516 1.1 skrll int nsegs, bus_size_t size, int flags) 517 1.1 skrll { 518 1.1 skrll struct astro_softc *sc = v; 519 1.1 skrll 520 1.1 skrll printf("load_raw\n"); 521 1.1 skrll 522 1.1 skrll return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags)); 523 1.1 skrll } 524 1.1 skrll 525 1.1 skrll void 526 1.1 skrll iommu_dvmamap_unload(void *v, bus_dmamap_t map) 527 1.1 skrll { 528 1.1 skrll struct astro_softc *sc = v; 529 1.1 skrll struct iommu_map_state *ims = map->_dm_cookie; 530 1.1 skrll struct iommu_page_map *ipm = &ims->ims_map; 531 1.1 skrll struct iommu_page_entry *e; 532 1.6 thorpej int i; 533 1.1 skrll 534 1.1 skrll /* Remove the IOMMU entries. */ 535 1.1 skrll for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) 536 1.1 skrll iommu_remove(sc, e->ipe_dva); 537 1.1 skrll 538 1.1 skrll /* Clear the iomap. */ 539 1.1 skrll iommu_iomap_clear_pages(ims); 540 1.1 skrll 541 1.1 skrll bus_dmamap_unload(sc->sc_dmat, map); 542 1.1 skrll 543 1.6 thorpej vmem_xfree(sc->sc_dvmamap, ims->ims_dvmastart, ims->ims_dvmasize); 544 1.1 skrll ims->ims_dvmastart = 0; 545 1.1 skrll ims->ims_dvmasize = 0; 546 1.1 skrll } 547 1.1 skrll 548 1.1 skrll void 549 1.1 skrll iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off, 550 1.1 skrll bus_size_t len, int ops) 551 1.1 skrll { 552 1.1 skrll /* Nothing to do; DMA is cache-coherent. */ 553 1.1 skrll } 554 1.1 skrll 555 1.1 skrll int 556 1.1 skrll iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment, 557 1.1 skrll bus_size_t boundary, bus_dma_segment_t *segs, 558 1.1 skrll int nsegs, int *rsegs, int flags) 559 1.1 skrll { 560 1.1 skrll struct astro_softc *sc = v; 561 1.1 skrll 562 1.1 skrll return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary, 563 1.1 skrll segs, nsegs, rsegs, flags)); 564 1.1 skrll } 565 1.1 skrll 566 1.1 skrll void 567 1.1 skrll iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs) 568 1.1 skrll { 569 1.1 skrll struct astro_softc *sc = v; 570 1.1 skrll 571 1.1 skrll bus_dmamem_free(sc->sc_dmat, segs, nsegs); 572 1.1 skrll } 573 1.1 skrll 574 1.1 skrll int 575 1.1 skrll iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size, 576 1.1 skrll void **kvap, int flags) 577 1.1 skrll { 578 1.1 skrll struct astro_softc *sc = v; 579 1.1 skrll 580 1.1 skrll return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags)); 581 1.1 skrll } 582 1.1 skrll 583 1.1 skrll void 584 1.1 skrll iommu_dvmamem_unmap(void *v, void *kva, size_t size) 585 1.1 skrll { 586 1.1 skrll struct astro_softc *sc = v; 587 1.1 skrll 588 1.1 skrll bus_dmamem_unmap(sc->sc_dmat, kva, size); 589 1.1 skrll } 590 1.1 skrll 591 1.1 skrll paddr_t 592 1.1 skrll iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off, 593 1.1 skrll int prot, int flags) 594 1.1 skrll { 595 1.1 skrll struct astro_softc *sc = v; 596 1.1 skrll 597 1.1 skrll return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags)); 598 1.1 skrll } 599 1.1 skrll 600 1.1 skrll /* 601 1.1 skrll * Utility function used by splay tree to order page entries by pa. 602 1.1 skrll */ 603 1.1 skrll static inline int 604 1.1 skrll iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b) 605 1.1 skrll { 606 1.1 skrll return ((a->ipe_pa > b->ipe_pa) ? 1 : 607 1.1 skrll (a->ipe_pa < b->ipe_pa) ? -1 : 0); 608 1.1 skrll } 609 1.1 skrll 610 1.1 skrll SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare); 611 1.1 skrll 612 1.1 skrll SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare); 613 1.1 skrll 614 1.1 skrll /* 615 1.1 skrll * Create a new iomap. 616 1.1 skrll */ 617 1.1 skrll struct iommu_map_state * 618 1.5 skrll iommu_iomap_create(int n, int flags) 619 1.1 skrll { 620 1.1 skrll struct iommu_map_state *ims; 621 1.1 skrll 622 1.1 skrll /* Safety for heavily fragmented data, such as mbufs */ 623 1.1 skrll n += 4; 624 1.1 skrll if (n < 16) 625 1.1 skrll n = 16; 626 1.1 skrll 627 1.5 skrll const size_t sz = 628 1.5 skrll sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]); 629 1.5 skrll 630 1.5 skrll ims = kmem_zalloc(sz, (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP); 631 1.1 skrll if (ims == NULL) 632 1.1 skrll return (NULL); 633 1.1 skrll 634 1.1 skrll /* Initialize the map. */ 635 1.1 skrll ims->ims_map.ipm_maxpage = n; 636 1.1 skrll SPLAY_INIT(&ims->ims_map.ipm_tree); 637 1.1 skrll 638 1.1 skrll return (ims); 639 1.1 skrll } 640 1.1 skrll 641 1.1 skrll /* 642 1.1 skrll * Destroy an iomap. 643 1.1 skrll */ 644 1.1 skrll void 645 1.1 skrll iommu_iomap_destroy(struct iommu_map_state *ims) 646 1.1 skrll { 647 1.1 skrll #ifdef DIAGNOSTIC 648 1.1 skrll if (ims->ims_map.ipm_pagecnt > 0) 649 1.1 skrll printf("iommu_iomap_destroy: %d page entries in use\n", 650 1.1 skrll ims->ims_map.ipm_pagecnt); 651 1.1 skrll #endif 652 1.5 skrll const int n = ims->ims_map.ipm_maxpage; 653 1.5 skrll const size_t sz = 654 1.5 skrll sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]); 655 1.1 skrll 656 1.5 skrll kmem_free(ims, sz); 657 1.1 skrll } 658 1.1 skrll 659 1.1 skrll /* 660 1.1 skrll * Insert a pa entry in the iomap. 661 1.1 skrll */ 662 1.1 skrll int 663 1.1 skrll iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa) 664 1.1 skrll { 665 1.1 skrll struct iommu_page_map *ipm = &ims->ims_map; 666 1.1 skrll struct iommu_page_entry *e; 667 1.1 skrll 668 1.1 skrll if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) { 669 1.1 skrll struct iommu_page_entry ipe; 670 1.1 skrll 671 1.1 skrll ipe.ipe_pa = pa; 672 1.1 skrll if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe)) 673 1.1 skrll return (0); 674 1.1 skrll 675 1.1 skrll return (ENOMEM); 676 1.1 skrll } 677 1.1 skrll 678 1.1 skrll e = &ipm->ipm_map[ipm->ipm_pagecnt]; 679 1.1 skrll 680 1.1 skrll e->ipe_pa = pa; 681 1.1 skrll e->ipe_va = va; 682 1.1 skrll e->ipe_dva = 0; 683 1.1 skrll 684 1.1 skrll e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e); 685 1.1 skrll 686 1.1 skrll /* Duplicates are okay, but only count them once. */ 687 1.1 skrll if (e) 688 1.1 skrll return (0); 689 1.1 skrll 690 1.1 skrll ++ipm->ipm_pagecnt; 691 1.1 skrll 692 1.1 skrll return (0); 693 1.1 skrll } 694 1.1 skrll 695 1.1 skrll /* 696 1.1 skrll * Translate a physical address (pa) into a DVMA address. 697 1.1 skrll */ 698 1.1 skrll bus_addr_t 699 1.1 skrll iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa) 700 1.1 skrll { 701 1.1 skrll struct iommu_page_map *ipm = &ims->ims_map; 702 1.1 skrll struct iommu_page_entry *e; 703 1.1 skrll struct iommu_page_entry pe; 704 1.1 skrll paddr_t offset = pa & PAGE_MASK; 705 1.1 skrll 706 1.1 skrll pe.ipe_pa = trunc_page(pa); 707 1.1 skrll 708 1.1 skrll e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe); 709 1.1 skrll 710 1.1 skrll if (e == NULL) { 711 1.1 skrll panic("couldn't find pa %lx\n", pa); 712 1.1 skrll return 0; 713 1.1 skrll } 714 1.1 skrll 715 1.1 skrll return (e->ipe_dva | offset); 716 1.1 skrll } 717 1.1 skrll 718 1.1 skrll /* 719 1.1 skrll * Clear the iomap table and tree. 720 1.1 skrll */ 721 1.1 skrll void 722 1.1 skrll iommu_iomap_clear_pages(struct iommu_map_state *ims) 723 1.1 skrll { 724 1.1 skrll ims->ims_map.ipm_pagecnt = 0; 725 1.1 skrll SPLAY_INIT(&ims->ims_map.ipm_tree); 726 1.1 skrll } 727 1.1 skrll 728 1.1 skrll /* 729 1.1 skrll * Add an entry to the IOMMU table. 730 1.1 skrll */ 731 1.1 skrll void 732 1.1 skrll iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va, 733 1.1 skrll int flags) 734 1.1 skrll { 735 1.1 skrll volatile uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT]; 736 1.1 skrll uint64_t tte; 737 1.1 skrll uint32_t ci; 738 1.1 skrll 739 1.1 skrll #ifdef ASTRODEBUG 740 1.1 skrll printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va); 741 1.1 skrll #endif 742 1.1 skrll 743 1.1 skrll #ifdef DIAGNOSTIC 744 1.1 skrll tte = le64toh(*tte_ptr); 745 1.1 skrll 746 1.1 skrll if (tte & IOTTE_V) { 747 1.1 skrll printf("Overwriting valid tte entry (dva %lx pa %lx " 748 1.1 skrll "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte); 749 1.6 thorpej /* vmem_print(sc->sc_dvmamap); XXX */ 750 1.1 skrll panic("IOMMU overwrite"); 751 1.1 skrll } 752 1.1 skrll #endif 753 1.1 skrll 754 1.1 skrll ci = lci(HPPA_SID_KERNEL, va); 755 1.1 skrll 756 1.1 skrll tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI); 757 1.1 skrll tte |= IOTTE_V; 758 1.1 skrll 759 1.1 skrll *tte_ptr = htole64(tte); 760 1.1 skrll fdcache(HPPA_SID_KERNEL, (vaddr_t)tte_ptr, sizeof(*tte_ptr)); 761 1.1 skrll } 762 1.1 skrll 763 1.1 skrll /* 764 1.1 skrll * Remove an entry from the IOMMU table. 765 1.1 skrll */ 766 1.1 skrll void 767 1.1 skrll iommu_remove(struct astro_softc *sc, bus_addr_t dva) 768 1.1 skrll { 769 1.1 skrll volatile struct astro_regs *r = sc->sc_regs; 770 1.1 skrll uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT]; 771 1.1 skrll uint64_t tte; 772 1.1 skrll 773 1.1 skrll #ifdef DIAGNOSTIC 774 1.1 skrll if (dva != trunc_page(dva)) { 775 1.1 skrll printf("iommu_remove: unaligned dva: %lx\n", dva); 776 1.1 skrll dva = trunc_page(dva); 777 1.1 skrll } 778 1.1 skrll #endif 779 1.1 skrll 780 1.1 skrll tte = le64toh(*tte_ptr); 781 1.1 skrll 782 1.1 skrll #ifdef DIAGNOSTIC 783 1.1 skrll if ((tte & IOTTE_V) == 0) { 784 1.1 skrll printf("Removing invalid tte entry (dva %lx &tte %p " 785 1.1 skrll "tte %llx)\n", dva, tte_ptr, tte); 786 1.6 thorpej /* vmem_print(sc->sc_dvmamap); XXX */ 787 1.1 skrll panic("IOMMU remove overwrite"); 788 1.1 skrll } 789 1.1 skrll #endif 790 1.1 skrll 791 1.1 skrll *tte_ptr = htole64(tte & ~IOTTE_V); 792 1.1 skrll 793 1.1 skrll /* Flush IOMMU. */ 794 1.1 skrll r->tlb_pcom = htole32(dva | PAGE_SHIFT); 795 1.1 skrll } 796