Home | History | Annotate | Download | only in booke

Lines Matching defs:tlb

110 	struct e500_tlb tlb;
114 tlb.tlb_va = MAS2_EPN & hwtlb.hwtlb_mas2;
115 tlb.tlb_size = 1024 << (2 * MASX_TSIZE_GET(hwtlb.hwtlb_mas1));
116 tlb.tlb_asid = MASX_TID_GET(hwtlb.hwtlb_mas1);
117 tlb.tlb_pte = (hwtlb.hwtlb_mas2 & MAS2_WIMGE)
126 tlb.tlb_pte |= (prot_mask & hwtlb.hwtlb_mas3) << prot_shift;
127 return tlb;
147 * If tlbassoc is the same as tlbentries (like in TLB1) then the TLB is
149 * is less than the number of tlb entries, the slot is split in two
150 * fields. Since the TLB is M rows by N ways, the lowers bits are for
229 tlb_to_hwtlb(const struct e500_tlb tlb)
233 KASSERT(trunc_page(tlb.tlb_va) == tlb.tlb_va);
234 KASSERT(tlb.tlb_size != 0);
235 KASSERT((tlb.tlb_size & (tlb.tlb_size - 1)) == 0);
236 const uint32_t prot_mask = tlb.tlb_pte & PTE_RWX_MASK;
237 if (__predict_true(tlb.tlb_size == PAGE_SIZE)) {
244 if (tlb.tlb_asid) {
246 | MASX_TID_MAKE(tlb.tlb_asid);
256 if (tlb.tlb_pte & PTE_UNMODIFIED)
258 if (tlb.tlb_pte & PTE_UNSYNCED)
261 KASSERT(tlb.tlb_asid == 0);
262 KASSERT((tlb.tlb_size & 0xaaaaa7ff) == 0);
263 u_int cntlz = __builtin_clz(tlb.tlb_size);
278 hwtlb.hwtlb_mas2 = tlb.tlb_va | (tlb.tlb_pte & PTE_WIMGE_MASK);
279 hwtlb.hwtlb_mas3 |= tlb.tlb_pte & PTE_RPN_MASK;
404 * We have a valid kernel TLB entry. But if it matches
530 * the updated value in any TLB entries affected.
581 e500_tlb_write_entry(size_t index, const struct tlbmask *tlb)
586 e500_tlb_read_entry(size_t index, struct tlbmask *tlb)
619 struct e500_tlb tlb = hwtlb_to_tlb(hwtlb);
626 tlb.tlb_va, tlb.tlb_asid, tlb.tlb_pte);
628 tlb.tlb_pte & PTE_RPN_MASK,
629 tlb.tlb_pte & PTE_xR ? "R" : "",
630 tlb.tlb_pte & PTE_xW ? "W" : "",
631 tlb.tlb_pte & PTE_UNMODIFIED ? "*" : "",
632 tlb.tlb_pte & PTE_xX ? "X" : "",
633 tlb.tlb_pte & PTE_UNSYNCED ? "*" : "",
634 tlb.tlb_pte & PTE_W ? "W" : "",
635 tlb.tlb_pte & PTE_I ? "I" : "",
636 tlb.tlb_pte & PTE_M ? "M" : "",
637 tlb.tlb_pte & PTE_G ? "G" : "",
638 tlb.tlb_pte & PTE_E ? "E" : "");
668 struct e500_tlb tlb = hwtlb_to_tlb(hwtlb);
669 if (!(*func)(ctx, tlb.tlb_va, tlb.tlb_asid,
670 tlb.tlb_pte))
686 * See if we have a TLB entry for the pa.
708 * See if we have a TLB entry for the va.
730 * See if we have a TLB entry for the pa.
750 * See if we have a TLB entry for the pa. If completely falls within
751 * mark the reference and return the pa. But only if the tlb entry
854 "insufficient TLB entries");
992 * Let's see how many TLB entries are needed to map memory.
997 * To map main memory into the TLB, we need to flush any
998 * existing entries from the TLB that overlap the virtual
1003 * switch to it, and clear out the TLB entries from AS 0,
1004 * install the new TLB entries to map memory, and then switch
1036 * Now that we have a TLB mapping in AS1 for the kernel and its
1037 * stack, we switch to AS1 to cleanup the TLB mappings for TLB0.
1062 *** Now we can add the TLB entries that will map physical
1063 *** memory. If bit 0 [MSB] in slotmask is set, then tlb
1075 *** Synchronize the TLB and the instruction stream.