/src/sys/arch/amiga/dev/ |
zbusvar.h | 48 extern vaddr_t ZBUSADDR; /* kva of Zorro bus I/O pages */ 58 * maps a ztwo and/or A3000 builtin address into the mapped kva address 72 #define isztwokva(kva) \ 73 ((u_int)(kva) >= ZTWOROMADDR && \ 74 (u_int)(kva) < \ 77 #define isztwomem(kva) \ 78 (ZTWOMEMADDR && (u_int)(kva) >= ZTWOMEMADDR && \ 79 (u_int)(kva) < (ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE))
|
/src/sys/arch/sun3/sun3x/ |
dvma.c | 137 dvma_kvtopa(void *kva, int bustype) 141 addr = (u_long)kva; 167 vaddr_t kva; local in function:dvma_mapin 174 kva = (vaddr_t)kmem_va; 175 KASSERT(kva >= VM_MIN_KERNEL_ADDRESS); 180 off = kva & PGOFSET; 181 kva -= off; /* Truncate starting address to nearest page. */ 210 for (; npf--; kva += PAGE_SIZE, tva += PAGE_SIZE) { 217 rv = pmap_extract(pmap_kernel(), kva, &pa); 233 * Remove double map of `va' in DVMA space at `kva' 242 u_long kva; local in function:dvma_mapout 302 vaddr_t kva; local in function:_bus_dmamap_load [all...] |
/src/sys/arch/m68k/m68k/ |
vm_machdep.c | 153 vaddr_t kva; /* Kernel VA (new to) */ local in function:vmapbuf 163 kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 164 bp->b_data = (void *)(kva + off); 172 pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE, 175 pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 178 kva += PAGE_SIZE; 192 vaddr_t kva; local in function:vunmapbuf 198 kva = m68k_trunc_page(bp->b_data); 199 off = (vaddr_t)bp->b_data - kva; 203 pmap_remove(vm_map_pmap(phys_map), kva, kva + len) [all...] |
/src/sys/arch/mips/mips/ |
vm_machdep.c | 294 vaddr_t kva; /* Kernel VA (new to) */ local in function:vmapbuf 303 kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask, 305 KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0); 307 bp->b_data = (void *)(kva + off); 313 pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 316 kva += PAGE_SIZE; 330 vaddr_t kva; local in function:vunmapbuf 335 kva = mips_trunc_page(bp->b_data); 336 len = mips_round_page((vaddr_t)bp->b_data - kva + len); 337 pmap_kremove(kva, len) [all...] |
kgdb_machdep.c | 96 * Is kva a valid address to access? This is used by KGDB. 99 kvacc(vaddr_t kva) 101 if (pmap_md_direct_mapped_vaddr_p(kva)) 104 if (kva < VM_MIN_KERNEL_ADDRESS || kva >= VM_MAX_KERNEL_ADDRESS) 107 const pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), kva);
|
/src/sys/arch/hp300/hp300/ |
bus_space.c | 54 vaddr_t kva; local in function:bus_space_map 78 &kva); 85 physaccess((void *)kva, (void *)bpa, size, PG_RW|PG_CI); 90 *bshp = (bus_space_handle_t)(kva + offset); 119 vaddr_t kva; local in function:bus_space_unmap 134 kva = m68k_trunc_page(bsh); 147 physunaccess((void *)kva, size); 152 if (extent_free(extio_ex, kva, size, 154 printf("%s: kva 0x%lx size 0x%lx: "
|
/src/sys/uvm/ |
uvm_io.c | 58 vaddr_t baseva, endva, pageoffset, kva; local in function:uvm_io 104 error = uvm_map_extract(map, baseva, chunksz, kernel_map, &kva, 126 error = uiomove((void *) (kva + pageoffset), sz, uio); 135 uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries,
|
uvm_pager.c | 77 * the pager map: provides KVA for I/O 161 * this is an abuse of pmap_direct_process(), since the kva is being grabbed 166 uvm_pagermapdirect(void *kva, size_t sz, void *cookie) 170 *(vaddr_t *)cookie = (vaddr_t)kva; 176 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings 186 vaddr_t kva; local in function:uvm_pagermapin 205 PAGE_SIZE, uvm_pagermapdirect, &kva); 207 UVMHIST_LOG(maphist, "<- done, direct (KVA=%#jx)", kva,0,0,0); 208 return kva; [all...] |
uvm_km.c | 128 * Main arena controlling the kernel KVA used by other arenas. 368 kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva", 448 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 606 * => we return KVA of memory allocated 612 vaddr_t kva, loopva; local in function:uvm_km_alloc 632 kva = vm_map_min(map); /* hint */ 643 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 657 UVMHIST_LOG(maphist,"<- done valloc (kva=%#jx)", kva,0,0,0); 658 return(kva); [all...] |
/src/sys/arch/aarch64/aarch64/ |
efi_machdep.c | 76 /* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */ 77 bool kva = (va & AARCH64_ADDRTOP_TAG) != 0; local in function:cpu_efirt_map_range 78 if (kva) { 93 if (kva) { 102 if (kva)
|
/src/sys/arch/hppa/hppa/ |
vm_machdep.c | 208 vaddr_t uva, kva; local in function:vmapbuf 224 kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 225 bp->b_data = (void *)(kva + off); 230 pmap_enter(kpmap, kva, pa, 233 kva += PAGE_SIZE; 247 vaddr_t kva; local in function:vunmapbuf 254 kva = trunc_page((vaddr_t)bp->b_data); 255 off = (vaddr_t)bp->b_data - kva; 258 pmap_remove(pmap, kva, kva + len) [all...] |
/src/sys/arch/sparc/sparc/ |
vm_machdep.c | 85 vaddr_t kva; /* Kernel VA (new to) */ local in function:vmapbuf 100 kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 101 bp->b_data = (void *)(kva + off); 117 pmap_enter(kpmap, kva, pa, 120 kva += PAGE_SIZE; 134 vaddr_t kva; local in function:vunmapbuf 140 kva = trunc_page((vaddr_t)bp->b_data); 141 off = (vaddr_t)bp->b_data - kva; 143 pmap_remove(vm_map_pmap(kernel_map), kva, kva + len) [all...] |
/src/sys/arch/riscv/riscv/ |
vm_machdep.c | 215 vaddr_t kva; /* Kernel VA (new to) */ local in function:vmapbuf 224 kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask, 226 KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0); 228 bp->b_data = (void *)(kva + off); 234 pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 237 kva += PAGE_SIZE; 251 vaddr_t kva; local in function:vunmapbuf 255 kva = trunc_page((vaddr_t)bp->b_data); 256 len = round_page((vaddr_t)bp->b_data - kva + len); 257 pmap_kremove(kva, len) [all...] |
/src/sys/arch/sh3/sh3/ |
kgdb_machdep.c | 89 * Is kva a valid address to access? This is used by KGDB. 92 kvacc(vaddr_t kva) 96 if (kva < SH3_P1SEG_BASE) 99 if (kva < SH3_P2SEG_BASE) 102 if (kva >= VM_MAX_KERNEL_ADDRESS) 105 /* check kva is kernel virtual. */ 106 if ((kva < VM_MIN_KERNEL_ADDRESS) || 107 (kva >= VM_MAX_KERNEL_ADDRESS)) 110 /* check page which related kva is valid. */ 111 pte = __pmap_kpte_lookup(kva); [all...] |
/src/sys/arch/sparc64/sparc64/ |
vm_machdep.c | 84 vaddr_t kva; /* Kernel VA (new to) */ local in function:vmapbuf 95 kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); 96 bp->b_data = (void *)(kva + off); 103 pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0); 106 kva += PAGE_SIZE; 120 vaddr_t kva; local in function:vunmapbuf 126 kva = trunc_page((vaddr_t)bp->b_data); 127 off = (vaddr_t)bp->b_data - kva; 129 pmap_kremove(kva, len); 130 uvm_km_free(kernel_map, kva, len, UVM_KMF_VAONLY) [all...] |
/src/sys/arch/sun3/sun3/ |
dvma.c | 149 dvma_kvtopa(void *kva, int bustype) 153 addr = (u_long)kva; 177 dvma_mapin(void *kva, int len, int canwait /* ignored */) 186 seg_kva = (vaddr_t)kva; 289 vaddr_t kva; local in function:_bus_dmamap_load 305 kva = (vaddr_t)buf; 306 off = kva & PGOFSET; 341 rv = pmap_extract(pmap, kva, &pa); 348 kva += PAGE_SIZE;
|
/src/sys/dev/bus_dma/ |
bus_dmamem_common.c | 185 void *kva, 189 KASSERT(((vaddr_t)kva & PAGE_MASK) == 0); 193 pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size); 195 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_ih.c | 72 void *kva; local in function:amdgpu_ih_ring_init 87 &kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT); 93 r = -bus_dmamap_load(adev->ddev->dmat, ih->ring_map, kva, size, 96 fail3: __unused bus_dmamem_unmap(adev->ddev->dmat, kva, size); 99 ih->ring = kva; 164 void *kva = __UNVOLATILE(ih->ring); local in function:amdgpu_ih_ring_fini 166 bus_dmamem_unmap(adev->ddev->dmat, kva, size);
|
/src/sys/uvm/pmap/ |
pmap_segtab.c | 343 ustb->seg_seg[idx] = kstb->seg_seg[idx]; // copy KVA of PTP 353 pmap_page_attach(pmap_t pmap, vaddr_t kva, struct vm_page *pg, 357 UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx pg %#jx list %#jx", 358 (uintptr_t)pmap, (uintptr_t)kva, (uintptr_t)pg, (uintptr_t)pglist); 364 bool ok __diagused = pmap_extract(pmap_kernel(), kva, &pa); 371 UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx", 372 (uintptr_t)kva, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)pglist); 392 UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx list %#jx", 402 UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx", 421 pmap_segtab_pagefree(pmap_t pmap, struct pglist *list, vaddr_t kva, size_t size 491 vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE, local in function:pmap_ptpage_alloc 516 const vaddr_t kva = (vaddr_t)ppg; local in function:pmap_ptpage_free 806 const vaddr_t kva = (vaddr_t)ptb; local in function:pmap_pdetab_release 865 const vaddr_t kva = (vaddr_t)stb; local in function:pmap_segtab_release 1058 const vaddr_t kva = (vaddr_t)ptb; local in function:pmap_pdetab_reserve 1101 const vaddr_t kva = (vaddr_t)stb; local in function:pmap_pdetab_reserve [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_phys.c | 69 bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size) 74 KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva); 77 sva = (vaddr_t)kva; 89 pmap_kremove((vaddr_t)kva, size); 91 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 157 obj->mm.u.phys.kva = vaddr; 246 obj->mm.u.phys.kva = NULL; 263 void *vaddr = obj->mm.u.phys.kva; [all...] |
/src/sys/dev/tc/ |
pxg.c | 187 char *kva; local in function:pxg_init 189 kva = (void *)si->si_slotbase; 191 si->si_vdac = (uint32_t *)(kva + PXG_VDAC_OFFSET); 192 si->si_vdac_reset = (uint32_t *)(kva + PXG_VDAC_RESET_OFFSET); 193 si->si_stic = (volatile struct stic_regs *)(kva + PXG_STIC_OFFSET); 194 si->si_stamp = (uint32_t *)(kva + PXG_STAMP_OFFSET); 195 si->si_buf = (uint32_t *)(kva + PXG_SRAM_OFFSET); 206 slot = (volatile uint32_t *)kva;
|
bba.c | 75 void *kva; member in struct:bba_mem 272 void *kva; local in function:bba_allocm 289 &kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT)) { 298 m->kva = kva; 302 return (void *)kva; 306 bus_dmamem_unmap(sc->sc_dmat, kva, size); 319 void *kva; local in function:bba_freem 322 kva = (void *)addr; 323 for (mp = &sc->sc_mem_head; *mp && (*mp)->kva != kva [all...] |
px.c | 184 char *kva, *bva; local in function:px_init 187 kva = (void *)si->si_slotbase; 212 si->si_vdac = (uint32_t *)(kva + PX_VDAC_OFFSET); 213 si->si_vdac_reset = (uint32_t *)(kva + PX_VDAC_RESET_OFFSET); 214 si->si_stic = (volatile struct stic_regs *)(kva + PX_STIC_OFFSET); 215 si->si_stamp = (uint32_t *)(kva + PX_STAMP_OFFSET);
|
/src/sys/dev/isa/ |
isadma.c | 683 _isa_dmamem_unmap(struct isa_dma_state *ids, int chan, void *kva, size_t size) 691 bus_dmamem_unmap(ids->ids_dmat, kva, size); 729 void *kva; local in function:_isa_malloc 737 if (_isa_dmamem_map(ids, chan, addr, size, &kva, bflags)) { 743 _isa_dmamem_unmap(ids, chan, kva, size); 751 m->kva = kva; 754 return (void *)kva; 761 void *kva = (void *)addr; local in function:_isa_free 763 for(mp = &isa_mem_head; *mp && (*mp)->kva != kva [all...] |
/src/sys/miscfs/genfs/ |
genfs_io.c | 587 vaddr_t kva; local in function:genfs_getpages_read 604 kva = uvm_pagermapin(pgs, npages, 606 if (kva == 0) 611 mbp->b_data = (void *)kva; 638 memset((void *)(kva + tailstart), 0, len); 640 (uintptr_t)kva, tailstart, len, 0); 732 memset((char *)kva + (offset - startoffset), 0, 783 /* Remove the mapping (make KVA available as soon as possible) */ 784 uvm_pagermapout(kva, npages); 1377 vaddr_t kva; local in function:genfs_gop_write 1409 vaddr_t kva; local in function:genfs_gop_write_rwmap 1613 vaddr_t kva; local in function:genfs_compat_getpages 1696 vaddr_t kva; local in function:genfs_compat_gop_write 1860 vaddr_t kva, puva; local in function:genfs_do_directio [all...] |