| /src/lib/libkvm/ |
| kvm_i386pae.c | 72 pd_entry_t pde; local 80 * Find and read the PDE. Ignore the L3, as it is only a per-CPU 83 * to increment pdppaddr to compute the address of the PDE. 86 pde_pa = (cpu_kh->pdppaddr & PTE_FRAME) + (pl2_pi(va) * sizeof(pde)); 87 if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde), 88 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) { 89 _kvm_syserr(kd, 0, "could not read PDE"); 96 if ((pde & PTE_P) == 0) { 97 _kvm_err(kd, 0, "invalid translation (invalid PDE)"); [all...] |
| kvm_x86_64.c | 97 pd_entry_t pde; local 116 if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde), 117 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) { 121 if ((pde & PTE_P) == 0) { 122 _kvm_err(kd, 0, "invalid translation (invalid level 4 PDE)"); 129 pde_pa = (pde & PTE_FRAME) + (pl3_pi(va) * sizeof(pd_entry_t)); 130 if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde), 131 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) { [all...] |
| kvm_i386.c | 140 pd_entry_t pde; local 151 pde_pa = (cpu_kh->pdppaddr & PTE_FRAME) + (pl2_pi(va) * sizeof(pde)); 152 if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde), 153 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) { 154 _kvm_syserr(kd, 0, "could not read PDE"); 161 if ((pde & PTE_P) == 0) { 162 _kvm_err(kd, 0, "invalid translation (invalid PDE)"); 165 if ((pde & PTE_PS) != 0) { 170 *pa = (pde & PTE_LGFRAME) + page_off [all...] |
| kvm_hppa.c | 105 pd_entry_t pde; 125 if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde), 126 _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) { 127 _kvm_syserr(kd, 0, "could not read PDE"); 134 if ((pde & PG_V) == 0) { 135 _kvm_err(kd, 0, "invalid translation (invalid PDE)"); 138 pte_pa = (pde & PG_FRAME) + (ptei(va) * sizeof(pt_entry_t));
|
| kvm_arm.c | 79 pd_entry_t pde; local 110 if (_kvm_pread(kd, kd->pmfd, (void *) &pde, sizeof(pd_entry_t), 118 switch (pde & L1_TYPE_MASK) { 120 *pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); 123 pte_pa = (pde & L1_C_ADDR_MASK) 127 pte_pa = (pde & L1_S_ADDR_MASK)
|
| /src/sys/arch/aarch64/include/ |
| pmap.h | 91 #define lxpde_pa(pde) ((paddr_t)((pde) & LX_TBL_PA)) 92 #define lxpde_valid(pde) (((pde) & LX_VALID) == LX_VALID) 93 #define l0pde_pa(pde) lxpde_pa(pde) 95 #define l0pde_valid(pde) lxpde_valid(pde) 98 #define l1pde_pa(pde) lxpde_pa(pde) [all...] |
| asan.h | 121 pd_entry_t pde; local 132 pde = l0[idx]; 133 if (!l0pde_valid(pde)) { 137 pa = l0pde_pa(pde); 146 pde = l1[idx]; 147 if (!l1pde_valid(pde)) { 151 pa = l1pde_pa(pde); 160 pde = l2[idx]; 161 if (!l2pde_valid(pde)) { 173 } else if (l2pde_is_block(pde)) { [all...] |
| pmap_machdep.h | 355 pte_pde_valid_p(pd_entry_t pde) 358 return lxpde_valid(pde); 363 pte_pde_to_paddr(pd_entry_t pde) 366 return lxpde_pa(pde);
|
| /src/sys/arch/sun3/sun3x/ |
| iommu.h | 129 #define IOMMU_PA_PDE(pde) ((pde).addr.raw & IOMMU_PDE_PA) 130 #define IOMMU_VALID_DT(pde) ((pde).addr.raw & IOMMU_PDE_DT) /* X1 */
|
| /src/tests/modules/x86_pte_tester/ |
| x86_pte_tester.c | 87 scan_l1(paddr_t pa, walk_type (fn)(pd_entry_t pde, size_t slot, int lvl)) 107 scan_l2(paddr_t pa, walk_type (fn)(pd_entry_t pde, size_t slot, int lvl)) 134 scan_l3(paddr_t pa, walk_type (fn)(pd_entry_t pde, size_t slot, int lvl)) 161 scan_l4(paddr_t pa, walk_type (fn)(pd_entry_t pde, size_t slot, int lvl)) 188 scan_tree(paddr_t pa, walk_type (fn)(pd_entry_t pde, size_t slot, int lvl)) 199 count_krwx(pd_entry_t pde, size_t slot, int lvl) 204 if (is_flag(pde, PTE_NX) || !is_flag(pde, PTE_W)) { 207 if (lvl != 1 && !is_flag(pde, PTE_PS)) { 228 count_kshstk(pd_entry_t pde, size_t slot, int lvl [all...] |
| /src/sys/arch/aarch64/aarch64/ |
| db_interface.c | 464 pd_entry_t pde; local 500 pde = l0[idx]; 502 pr("L0[%3d]=%016"PRIx64":", idx, pde); 503 db_pte_print(pde, 0, pr); 505 if (!l0pde_valid(pde)) 508 l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 510 pde = l1[idx]; 512 pr(" L1[%3d]=%016"PRIx64":", idx, pde); 513 db_pte_print(pde, 1, pr); 515 if (!l1pde_valid(pde) || l1pde_is_block(pde) 559 pd_entry_t pde; local [all...] |
| pmap.c | 815 pd_entry_t pde; local 828 pde = *ptep; 829 if (!l0pde_valid(pde)) 833 l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde)); 836 pde = *ptep; 837 if (!l1pde_valid(pde) || l1pde_is_block(pde)) 841 l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde)); 844 pde = *ptep; 845 if (!l2pde_valid(pde) || l2pde_is_block(pde) 1717 pd_entry_t pde; local [all...] |
| /src/sys/arch/hppa/hppa/ |
| pmap.c | 366 UVMHIST_LOG(maphist, "pde %#jx", pa, 0, 0, 0); 379 pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde) 381 paddr_t pa = (paddr_t)pde; 385 (uintptr_t)pde, 0, 0); 420 pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va) 423 return (pde[(va >> 12) & 0x3ff]); 427 pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte) 433 UVMHIST_CALLARGS(maphist, "pdep %#jx va %#jx pte %#jx", (uintptr_t)pde, 437 KASSERT(pde != NULL); 438 KASSERT(((paddr_t)pde & PGOFSET) == 0) 474 volatile pt_entry_t *pde; local 487 volatile pt_entry_t *pde = NULL; local 565 volatile pt_entry_t *pde; local 1134 volatile pt_entry_t *pde; local 1265 pt_entry_t *pde, *epde; local 1369 volatile pt_entry_t *pde; local 1491 volatile pt_entry_t *pde = NULL; local 1558 volatile pt_entry_t *pde = NULL; local 1626 volatile pt_entry_t *pde; local 1718 volatile pt_entry_t *pde; local 1761 volatile pt_entry_t *pde; local 1987 volatile pt_entry_t *pde; local 2047 volatile pt_entry_t *pde = NULL; local [all...] |
| /src/sys/arch/arm/include/arm32/ |
| pmap.h | 534 #define l1pte_valid_p(pde) ((pde) != 0) 535 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 536 #define l1pte_supersection_p(pde) (l1pte_section_p(pde) \ 537 && ((pde) & L1_S_V6_SUPER) != 0) 538 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 539 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F [all...] |
| /src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
| gen6_ppgtt.c | 20 /* Write pde (index) from the page directory @pd to the page table @pt */ 22 const unsigned int pde, 28 bus_space_write_4(ppgtt->pd_bst, ppgtt->pd_bsh, pde*sizeof(gen6_pte_t), 32 ppgtt->pd_addr + pde); 95 unsigned int pde = first_entry / GEN6_PTES; local 101 i915_pt_entry(ppgtt->base.pd, pde++); 114 * Note that the hw doesn't support removing PDE on the fly 194 unsigned int pde; local 201 gen6_for_each_pde(pt, pd, start, end, pde) 202 gen6_write_pde(ppgtt, pde, pt) 224 unsigned int pde; local 304 u32 pde; local 388 unsigned int pde; local [all...] |
| gen8_ppgtt.c | 23 u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; local 26 pde |= PPAT_CACHED_PDE; 28 pde |= PPAT_UNCACHED; 30 return pde; 154 void **pde = pd->entry; local 157 if (!*pde) 160 __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); 161 } while (pde++, --count); 713 struct i915_page_directory *pde; local 715 pde = alloc_pd(vm) [all...] |
| /src/common/lib/libprop/ |
| prop_dictionary.c | 923 struct _prop_dict_entry *pde; local 934 pde = &pd->pd_array[idx]; 935 _PROP_ASSERT(pde->pde_key != NULL); 936 res = strcmp(key, pde->pde_key->pdk_key); 940 return (pde); 957 const struct _prop_dict_entry *pde; local 966 pde = _prop_dict_lookup(pd, key, NULL); 967 if (pde != NULL) { 968 _PROP_ASSERT(pde->pde_objref != NULL); 969 po = pde->pde_objref 1025 struct _prop_dict_entry *pde; local 1165 struct _prop_dict_entry *pde; local [all...] |
| /src/sys/arch/arm/arm32/ |
| vm_machdep.c | 280 pd_entry_t *pde, oldpde, tmppde; local 297 /* Get the PDE of the current VA. */ 298 if (pmap_get_pde_pte(pmap, (vaddr_t) dst, &pde, &pte) == false) 300 switch ((oldpde = *pde) & L1_TYPE_MASK) { 306 *pde = tmppde; 307 PTE_SYNC(pde); 347 *pde = oldpde; 348 PTE_SYNC(pde);
|
| /src/sys/arch/riscv/include/ |
| pte.h | 306 pte_pde_valid_p(pd_entry_t pde) 308 return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V; 312 pte_pde_to_paddr(pd_entry_t pde) 314 return pte_to_paddr((pt_entry_t)pde);
|
| /src/sys/arch/x86/include/ |
| pmap_private.h | 250 #define pmap_valid_entry(E) ((E) & PTE_P) /* is PDE or PTE valid? */ 329 pd_entry_t *pde; 333 pde = L2_BASE + pl2_i(va); 334 if (*pde & PTE_PS) 335 return ((pt_entry_t *)pde);
|
| /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/ |
| vmm.h | 34 * The array is indexed by PDE, and will either point to the 35 * child page table, or indicate the PDE is marked as sparse. 37 #define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde) 38 #define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde) 40 struct nvkm_vmm_pt **pde; member in struct:nvkm_vmm_pt 67 nvkm_vmm_pde_func pde; member in struct:nvkm_vmm_desc_func
|
| nouveau_nvkm_subdev_mmu_vmm.c | 39 kvfree(pgt->pde); 69 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL); 70 if (!pgt->pde) { 160 TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1])); 163 /* PDE no longer required. */ 167 pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE; 170 pgd->pde[pdei] = NULL; 177 func->pde(vmm, pgd, pdei); 178 pgd->pde[pdei] = NULL [all...] |
| /src/sys/arch/riscv/riscv/ |
| pmap_machdep.c | 259 pd_entry_t pde = pmap_kernel()->pm_pdetab->pde_pde[i]; local 262 if (pde) { 263 pmap->pm_pdetab->pde_pde[i] = pde;
|
| /src/sys/external/bsd/drm2/include/ |
| i915_trace.h | 362 "uint32_t"/*pde*/, 366 trace_i915_page_table_entry_alloc(struct i915_address_space *vm, uint32_t pde, 369 TRACE4(i915,, page_table_entry_alloc, vm, pde, start, pde_shift); 374 "uint32_t"/*pde*/, 380 trace_i915_page_table_entry_map(struct i915_address_space *vm, uint32_t pde, 383 TRACE6(i915,, page_table_entry_map, vm, pde, pt, first, count, bits);
|
| /src/sys/arch/xen/x86/ |
| xen_pmap.c | 212 pd_entry_t pde; local 221 if (!pmap_pdes_valid(va, pdes, &pde, &lvl)) {
|