/src/sys/uvm/pmap/ |
vmpagemd.h | 79 #define VM_PAGEMD_VMPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_VMPAGE) != 0) 80 #define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0) 81 #define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0) 82 #define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0) 83 #define VM_PAGEMD_EXECPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0 [all...] |
pmap.c | 418 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_long clear_attributes) 420 volatile u_long * const attrp = &mdpg->mdpg_attrs; 441 pmap_page_set_attributes(struct vm_page_md *mdpg, u_long set_attributes) 444 atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes); 446 mdpg->mdpg_attrs |= set_attributes; 458 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_page_syncicache 459 pv_entry_t pv = &mdpg->mdpg_first; 467 VM_PAGEMD_PVLIST_READLOCK(mdpg); 468 pmap_pvlist_check(mdpg); 490 pmap_pvlist_check(mdpg); 1037 struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp); local in function:pmap_pv_protect 1191 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_page_protect 1274 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_pte_protect 1439 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); local in function:pmap_enter 1584 struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL); local in function:pmap_kenter_pa 1846 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_clear_reference 1877 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_clear_modify 1977 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_set_modified 2163 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_remove_pv 2410 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_map_poolpage 2439 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); variable in typeref:struct:vm_page_md * const [all...] |
/src/sys/arch/powerpc/include/booke/ |
pte.h | 199 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) 206 if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg)) 209 if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg)) 219 if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg)) 226 pte_flag_bits(struct vm_page_md *mdpg, int flags) 229 if (__predict_true(mdpg != NULL)) { 235 if (__predict_false(mdpg != NULL)) [all...] |
pmap.h | 129 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep) 135 pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va, bool dirty) 141 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
|
/src/sys/arch/m68k/include/ |
pte_coldfire.h | 181 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) 187 if (mdpg != NULL && VM_PAGEMD_EXECPAGE_P(mdpg)) 191 if (mdpg == NULL || VM_PAGEMD_MODIFIED_P(mdpg)) 198 pte_flag_bits(struct vm_page_md *mdpg, int flags) 201 if (__predict_true(mdpg != NULL)) { 207 if (__predict_false(mdpg != NULL)) { 216 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 221 pt_entry |= pte_flag_bits(mdpg, flags) [all...] |
pmap_coldfire.h | 117 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep) 129 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
|
/src/sys/arch/mips/mips/ |
pmap_machdep.c | 173 pmap_md_map_ephemeral_page(struct vm_page_md *mdpg, bool locked_p, int prot, 176 KASSERT(VM_PAGEMD_VMPAGE_P(mdpg)); 178 struct vm_page *pg = VM_MD_TO_PAGE(mdpg); 180 pv_entry_t pv = &mdpg->mdpg_first; 187 KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); 226 const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, 0); 244 (void)VM_PAGEMD_PVLIST_READLOCK(mdpg); 245 if (VM_PAGEMD_CACHED_P(mdpg) 256 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 265 pmap_md_unmap_ephemeral_page(struct vm_page_md *mdpg, bool locked_p 737 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_md_map_poolpage 770 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_md_unmap_poolpage 1043 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_md_vca_remove [all...] |
mips_machdep.c | 2502 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:mm_md_page_color 2503 *colorp = atop(mdpg->mdpg_first.pv_va); 2504 return !mips_cache_badalias(pa, mdpg->mdpg_first.pv_va);
|
/src/sys/arch/riscv/include/ |
pte.h | 198 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p) 214 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p) 230 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 236 pte |= pte_flag_bits(mdpg, flags, kernel_p); 237 pte |= pte_prot_bits(mdpg, prot, kernel_p); 240 if (mdpg != NULL) { 243 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) { 250 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) { 268 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
|
pmap.h | 194 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc) 203 pmap_md_vca_add(struct vm_page_md *mdpg, vaddr_t va, pt_entry_t *nptep) 209 pmap_md_vca_remove(struct vm_page_md *mdpg, vaddr_t va) 214 pmap_md_vca_clean(struct vm_page_md *mdpg, vaddr_t va, int op)
|
/src/sys/arch/aarch64/aarch64/ |
pmap_machdep.c | 169 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_fault_fixup 178 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED); 197 pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED); 284 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_md_map_poolpage 285 const pv_entry_t pv = &mdpg->mdpg_first; 291 KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg)); 310 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:pmap_md_unmap_poolpage 312 KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg)); 314 const pv_entry_t pv = &mdpg->mdpg_first; 624 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc [all...] |
/src/sys/arch/mips/include/ |
pte.h | 368 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 387 pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot, 404 if (mdpg != NULL) { 416 if (VM_PAGEMD_MODIFIED_P(mdpg)) { 423 if (VM_PAGEMD_MODIFIED_P(mdpg)) {
|
/src/sys/arch/aarch64/include/ |
pmap_machdep.h | 415 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 470 pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot, 485 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) { 492 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
|
/src/sys/arch/powerpc/booke/ |
trap.c | 242 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:dsi_exception 244 if (!VM_PAGEMD_MODIFIED_P(mdpg)) { 245 pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED); 297 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); local in function:isi_exception 300 if (VM_PAGEMD_EXECPAGE_P(mdpg)) 312 if (!VM_PAGEMD_EXECPAGE_P(mdpg)) { 316 pmap_page_set_attributes(mdpg, VM_PAGEMD_EXECPAGE);
|
booke_pmap.c | 90 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc) 92 KASSERT(VM_PAGEMD_VMPAGE_P(mdpg)); 94 struct vm_page * const pg = VM_MD_TO_PAGE(mdpg);
|