/src/sys/arch/riscv/riscv/ |
riscv_tlb.c | 55 tlb_set_asid(tlb_asid_t asid, struct pmap *pm) 57 csr_asid_write(asid); 78 tlb_asid_t asid; local in function:tlb_invalidate_asids 79 for (asid = lo; asid <= hi; asid++) { 80 asm volatile("sfence.vma zero, %[asid]" 82 : [asid] "r" (asid) 100 for (asid = lo; asid <= hi; asid++) [all...] |
riscv_tlb.c | 55 tlb_set_asid(tlb_asid_t asid, struct pmap *pm) 57 csr_asid_write(asid); 78 tlb_asid_t asid; local in function:tlb_invalidate_asids 79 for (asid = lo; asid <= hi; asid++) { 80 asm volatile("sfence.vma zero, %[asid]" 82 : [asid] "r" (asid) 100 for (asid = lo; asid <= hi; asid++) [all...] |
riscv_tlb.c | 55 tlb_set_asid(tlb_asid_t asid, struct pmap *pm) 57 csr_asid_write(asid); 78 tlb_asid_t asid; local in function:tlb_invalidate_asids 79 for (asid = lo; asid <= hi; asid++) { 80 asm volatile("sfence.vma zero, %[asid]" 82 : [asid] "r" (asid) 100 for (asid = lo; asid <= hi; asid++) [all...] |
sbi.c | 123 unsigned long size, unsigned long asid) 126 hart_mask, hart_mask_base, start_addr, size, asid); 150 unsigned long size, unsigned long asid) 153 hart_mask, hart_mask_base, start_addr, size, asid);
|
sbi.c | 123 unsigned long size, unsigned long asid) 126 hart_mask, hart_mask_base, start_addr, size, asid); 150 unsigned long size, unsigned long asid) 153 hart_mask, hart_mask_base, start_addr, size, asid);
|
sbi.c | 123 unsigned long size, unsigned long asid) 126 hart_mask, hart_mask_base, start_addr, size, asid); 150 unsigned long size, unsigned long asid) 153 hart_mask, hart_mask_base, start_addr, size, asid);
|
/src/sys/arch/aarch64/aarch64/ |
aarch64_tlb.c | 53 tlb_set_asid(tlb_asid_t asid, pmap_t pm) 56 __SHIFTIN(asid, TTBR_ASID) | 84 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 88 aarch64_tlbi_by_asid_va(asid, va); 92 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 96 tlb_invalidate_addr(va, asid);
|
aarch64_tlb.c | 53 tlb_set_asid(tlb_asid_t asid, pmap_t pm) 56 __SHIFTIN(asid, TTBR_ASID) | 84 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 88 aarch64_tlbi_by_asid_va(asid, va); 92 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 96 tlb_invalidate_addr(va, asid);
|
aarch64_tlb.c | 53 tlb_set_asid(tlb_asid_t asid, pmap_t pm) 56 __SHIFTIN(asid, TTBR_ASID) | 84 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 88 aarch64_tlbi_by_asid_va(asid, va); 92 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 96 tlb_invalidate_addr(va, asid);
|
/src/sys/arch/arm/arm32/ |
arm32_tlb.c | 53 tlb_set_asid(tlb_asid_t asid, pmap_t pm) 56 if (asid == KERNEL_PID) { 60 armreg_contextidr_write(asid); 125 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 128 va = trunc_page(va) | asid; 140 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 142 tlb_invalidate_addr(va, asid); 163 const tlb_asid_t asid = __SHIFTOUT(d, local in function:tlb_cortex_a5_record_asids 165 const u_long mask = 1L << (asid & 31); 166 const size_t idx = asid >> 5 196 const tlb_asid_t asid = __SHIFTOUT(d01, local in function:tlb_cortex_a7_record_asids [all...] |
arm32_tlb.c | 53 tlb_set_asid(tlb_asid_t asid, pmap_t pm) 56 if (asid == KERNEL_PID) { 60 armreg_contextidr_write(asid); 125 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 128 va = trunc_page(va) | asid; 140 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 142 tlb_invalidate_addr(va, asid); 163 const tlb_asid_t asid = __SHIFTOUT(d, local in function:tlb_cortex_a5_record_asids 165 const u_long mask = 1L << (asid & 31); 166 const size_t idx = asid >> 5 196 const tlb_asid_t asid = __SHIFTOUT(d01, local in function:tlb_cortex_a7_record_asids [all...] |
arm32_tlb.c | 53 tlb_set_asid(tlb_asid_t asid, pmap_t pm) 56 if (asid == KERNEL_PID) { 60 armreg_contextidr_write(asid); 125 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 128 va = trunc_page(va) | asid; 140 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 142 tlb_invalidate_addr(va, asid); 163 const tlb_asid_t asid = __SHIFTOUT(d, local in function:tlb_cortex_a5_record_asids 165 const u_long mask = 1L << (asid & 31); 166 const size_t idx = asid >> 5 196 const tlb_asid_t asid = __SHIFTOUT(d01, local in function:tlb_cortex_a7_record_asids [all...] |
/src/sys/arch/sh3/sh3/ |
mmu_sh3.c | 49 /* Set current ASID to 0 - kernel */ 77 sh3_tlb_invalidate_asid(int asid) 88 if ((_reg_read_4(aa) & SH3_MMUAA_D_ASID_MASK) == asid) 96 sh3_tlb_invalidate_addr(int asid, vaddr_t va) 102 match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid; 123 sh3_tlb_update(int asid, vaddr_t va, uint32_t pte) 133 KDASSERT(asid < 256 && (pte & ~PGOFSET) != 0 && va != 0); 137 sh3_tlb_invalidate_addr(asid, va); 156 match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;
|
mmu_sh3.c | 49 /* Set current ASID to 0 - kernel */ 77 sh3_tlb_invalidate_asid(int asid) 88 if ((_reg_read_4(aa) & SH3_MMUAA_D_ASID_MASK) == asid) 96 sh3_tlb_invalidate_addr(int asid, vaddr_t va) 102 match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid; 123 sh3_tlb_update(int asid, vaddr_t va, uint32_t pte) 133 KDASSERT(asid < 256 && (pte & ~PGOFSET) != 0 && va != 0); 137 sh3_tlb_invalidate_addr(asid, va); 156 match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;
|
mmu_sh3.c | 49 /* Set current ASID to 0 - kernel */ 77 sh3_tlb_invalidate_asid(int asid) 88 if ((_reg_read_4(aa) & SH3_MMUAA_D_ASID_MASK) == asid) 96 sh3_tlb_invalidate_addr(int asid, vaddr_t va) 102 match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid; 123 sh3_tlb_update(int asid, vaddr_t va, uint32_t pte) 133 KDASSERT(asid < 256 && (pte & ~PGOFSET) != 0 && va != 0); 137 sh3_tlb_invalidate_addr(asid, va); 156 match = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid;
|
mmu_sh4.c | 58 /* Set current ASID to 0 */ 75 * address. Note, the ASID match is against PTEH, not "va". The 88 sh4_tlb_invalidate_addr(int asid, vaddr_t va) 99 opteh = _reg_read_4(SH4_PTEH); /* save current ASID */ 101 _reg_write_4(SH4_PTEH, asid); /* set ASID for associative write */ 102 (*tlb_assoc_p2)(va); /* invalidate { va, ASID } entry if exists */ 104 _reg_write_4(SH4_PTEH, opteh); /* restore ASID */ 110 do_invalidate_asid(int asid) 117 if ((aa & SH4_UTLB_AA_ASID_MASK) == asid) [all...] |
mmu_sh4.c | 58 /* Set current ASID to 0 */ 75 * address. Note, the ASID match is against PTEH, not "va". The 88 sh4_tlb_invalidate_addr(int asid, vaddr_t va) 99 opteh = _reg_read_4(SH4_PTEH); /* save current ASID */ 101 _reg_write_4(SH4_PTEH, asid); /* set ASID for associative write */ 102 (*tlb_assoc_p2)(va); /* invalidate { va, ASID } entry if exists */ 104 _reg_write_4(SH4_PTEH, opteh); /* restore ASID */ 110 do_invalidate_asid(int asid) 117 if ((aa & SH4_UTLB_AA_ASID_MASK) == asid) [all...] |
mmu_sh4.c | 58 /* Set current ASID to 0 */ 75 * address. Note, the ASID match is against PTEH, not "va". The 88 sh4_tlb_invalidate_addr(int asid, vaddr_t va) 99 opteh = _reg_read_4(SH4_PTEH); /* save current ASID */ 101 _reg_write_4(SH4_PTEH, asid); /* set ASID for associative write */ 102 (*tlb_assoc_p2)(va); /* invalidate { va, ASID } entry if exists */ 104 _reg_write_4(SH4_PTEH, opteh); /* restore ASID */ 110 do_invalidate_asid(int asid) 117 if ((aa & SH4_UTLB_AA_ASID_MASK) == asid) [all...] |
mmu.c | 86 r & SH3_MMUCR_IX ? "ASID+VPN" : "VPN", 108 sh_tlb_set_asid(int asid) 111 _reg_write_4(SH_(PTEH), asid);
|
mmu.c | 86 r & SH3_MMUCR_IX ? "ASID+VPN" : "VPN", 108 sh_tlb_set_asid(int asid) 111 _reg_write_4(SH_(PTEH), asid);
|
mmu.c | 86 r & SH3_MMUCR_IX ? "ASID+VPN" : "VPN", 108 sh_tlb_set_asid(int asid) 111 _reg_write_4(SH_(PTEH), asid);
|
/src/sys/arch/powerpc/booke/ |
booke_stubs.c | 51 tlb_set_asid(tlb_asid_t asid, struct pmap *pm) 53 (*cpu_md_ops.md_tlb_ops->md_tlb_set_asid)(asid); 91 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 93 (*cpu_md_ops.md_tlb_ops->md_tlb_invalidate_addr)(va, asid); 99 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 101 return (*cpu_md_ops.md_tlb_ops->md_tlb_update_addr)(va, asid, pte, insert_p);
|
booke_stubs.c | 51 tlb_set_asid(tlb_asid_t asid, struct pmap *pm) 53 (*cpu_md_ops.md_tlb_ops->md_tlb_set_asid)(asid); 91 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 93 (*cpu_md_ops.md_tlb_ops->md_tlb_invalidate_addr)(va, asid); 99 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 101 return (*cpu_md_ops.md_tlb_ops->md_tlb_update_addr)(va, asid, pte, insert_p);
|
booke_stubs.c | 51 tlb_set_asid(tlb_asid_t asid, struct pmap *pm) 53 (*cpu_md_ops.md_tlb_ops->md_tlb_set_asid)(asid); 91 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid) 93 (*cpu_md_ops.md_tlb_ops->md_tlb_invalidate_addr)(va, asid); 99 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p) 101 return (*cpu_md_ops.md_tlb_ops->md_tlb_update_addr)(va, asid, pte, insert_p);
|
/src/sys/uvm/pmap/ |
pmap_tlb.c | 46 * that have a valid ASID. 49 * then reinitialize the ASID space, and start allocating again at 1. When 50 * allocating from the ASID bitmap, we skip any ASID who has a corresponding 51 * bit set in the ASID bitmap. Eventually this causes the ASID bitmap to fill 52 * and, when completely filled, a reinitialization of the ASID space. 54 * To reinitialize the ASID space, the ASID bitmap is reset and then the ASIDs 55 * of non-kernel TLB entries get recorded in the ASID bitmap. If the entrie 425 for (tlb_asid_t asid = 1; asid <= ti->ti_asid_max; asid++) { local in function:pmap_tlb_asid_count [all...] |