arm32_tlb.c revision 1.2 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2013 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt #include <sys/cdefs.h>
30 1.2 matt __KERNEL_RCSID(1, "$NetBSD: arm32_tlb.c,v 1.2 2014/04/11 02:39:03 matt Exp $");
31 1.1 matt
32 1.1 matt #include <sys/param.h>
33 1.1 matt #include <sys/types.h>
34 1.1 matt
35 1.1 matt #include <uvm/uvm.h>
36 1.1 matt
37 1.1 matt #include <arm/locore.h>
38 1.1 matt
39 1.1 matt bool arm_has_tlbiasid_p; // CPU supports TLBIASID system coprocessor op
40 1.1 matt
41 1.1 matt tlb_asid_t
42 1.1 matt tlb_get_asid(void)
43 1.1 matt {
44 1.1 matt return armreg_contextidr_read() & 0xff;
45 1.1 matt }
46 1.1 matt
47 1.1 matt void
48 1.1 matt tlb_set_asid(tlb_asid_t asid)
49 1.1 matt {
50 1.1 matt arm_dsb();
51 1.1 matt if (asid == 0) {
52 1.1 matt armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0);
53 1.1 matt }
54 1.1 matt armreg_contextidr_write(asid);
55 1.1 matt arm_isb();
56 1.1 matt }
57 1.1 matt
58 1.1 matt void
59 1.1 matt tlb_invalidate_all(void)
60 1.1 matt {
61 1.2 matt const bool vivt_icache_p = arm_pcache.icache_type == CACHE_TYPE_VIVT;
62 1.1 matt arm_dsb();
63 1.1 matt armreg_tlbiall_write(0);
64 1.1 matt arm_isb();
65 1.2 matt if (__predict_false(vivt_icache_p)) {
66 1.2 matt if (arm_has_tlbiasid_p) {
67 1.2 matt armreg_icialluis_write(0);
68 1.2 matt } else {
69 1.2 matt armreg_iciallu_write(0);
70 1.2 matt }
71 1.2 matt }
72 1.2 matt arm_isb();
73 1.1 matt }
74 1.1 matt
75 1.1 matt void
76 1.1 matt tlb_invalidate_globals(void)
77 1.1 matt {
78 1.1 matt tlb_invalidate_all();
79 1.1 matt }
80 1.1 matt
81 1.1 matt void
82 1.1 matt tlb_invalidate_asids(tlb_asid_t lo, tlb_asid_t hi)
83 1.1 matt {
84 1.2 matt const bool vivt_icache_p = arm_pcache.icache_type == CACHE_TYPE_VIVT;
85 1.1 matt arm_dsb();
86 1.1 matt if (arm_has_tlbiasid_p) {
87 1.1 matt for (; lo <= hi; lo++) {
88 1.1 matt armreg_tlbiasid_write(lo);
89 1.1 matt }
90 1.2 matt arm_isb();
91 1.2 matt if (__predict_false(vivt_icache_p)) {
92 1.2 matt armreg_icialluis_write(0);
93 1.2 matt }
94 1.2 matt } else {
95 1.2 matt armreg_tlbiall_write(0);
96 1.2 matt arm_isb();
97 1.2 matt if (__predict_false(vivt_icache_p)) {
98 1.2 matt armreg_iciallu_write(0);
99 1.2 matt }
100 1.1 matt }
101 1.1 matt arm_isb();
102 1.1 matt }
103 1.1 matt
104 1.1 matt void
105 1.1 matt tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
106 1.1 matt {
107 1.1 matt arm_dsb();
108 1.1 matt va = trunc_page(va) | asid;
109 1.1 matt for (vaddr_t eva = va + PAGE_SIZE; va < eva; va += L2_S_SIZE) {
110 1.1 matt armreg_tlbimva_write(va);
111 1.1 matt //armreg_tlbiall_write(asid);
112 1.1 matt }
113 1.1 matt arm_isb();
114 1.1 matt }
115 1.1 matt
116 1.1 matt bool
117 1.1 matt tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p)
118 1.1 matt {
119 1.1 matt tlb_invalidate_addr(va, asid);
120 1.1 matt return true;
121 1.1 matt }
122 1.1 matt
123 1.1 matt #if !defined(MULTIPROCESSOR) && defined(CPU_CORTEXA5)
124 1.1 matt static u_int
125 1.1 matt tlb_cortex_a5_record_asids(u_long *mapp)
126 1.1 matt {
127 1.1 matt u_int nasids = 0;
128 1.1 matt for (size_t va_index = 0; va_index < 63; va_index++) {
129 1.1 matt for (size_t way = 0; way < 2; way++) {
130 1.1 matt armreg_tlbdataop_write(
131 1.1 matt __SHIFTIN(way, ARM_TLBDATAOP_WAY)
132 1.1 matt | __SHIFTIN(va_index, ARM_A5_TLBDATAOP_INDEX));
133 1.1 matt arm_isb();
134 1.1 matt const uint64_t d = ((uint64_t) armreg_tlbdata1_read())
135 1.1 matt | armreg_tlbdata0_read();
136 1.1 matt if (!(d & ARM_TLBDATA_VALID)
137 1.1 matt || !(d & ARM_V5_TLBDATA_nG))
138 1.1 matt continue;
139 1.1 matt
140 1.1 matt const tlb_asid_t asid = __SHIFTOUT(d,
141 1.1 matt ARM_V5_TLBDATA_ASID);
142 1.1 matt const u_long mask = 1L << (asid & 31);
143 1.1 matt const size_t idx = asid >> 5;
144 1.1 matt if (mapp[idx] & mask)
145 1.1 matt continue;
146 1.1 matt
147 1.1 matt mapp[idx] |= mask;
148 1.1 matt nasids++;
149 1.1 matt }
150 1.1 matt }
151 1.1 matt return nasids;
152 1.1 matt }
153 1.1 matt #endif
154 1.1 matt
155 1.1 matt #if !defined(MULTIPROCESSOR) && defined(CPU_CORTEXA7)
156 1.1 matt static u_int
157 1.1 matt tlb_cortex_a7_record_asids(u_long *mapp)
158 1.1 matt {
159 1.1 matt u_int nasids = 0;
160 1.1 matt for (size_t va_index = 0; va_index < 128; va_index++) {
161 1.1 matt for (size_t way = 0; way < 2; way++) {
162 1.1 matt armreg_tlbdataop_write(
163 1.1 matt __SHIFTIN(way, ARM_TLBDATAOP_WAY)
164 1.1 matt | __SHIFTIN(va_index, ARM_A7_TLBDATAOP_INDEX));
165 1.1 matt arm_isb();
166 1.1 matt const uint32_t d0 = armreg_tlbdata0_read();
167 1.1 matt const uint32_t d1 = armreg_tlbdata1_read();
168 1.1 matt if (!(d0 & ARM_TLBDATA_VALID)
169 1.1 matt || !(d1 & ARM_A7_TLBDATA1_nG))
170 1.1 matt continue;
171 1.1 matt
172 1.1 matt const uint64_t d01 = ((uint64_t) d1)|d0;
173 1.1 matt const tlb_asid_t asid = __SHIFTOUT(d01,
174 1.1 matt ARM_A7_TLBDATA01_ASID);
175 1.1 matt const u_long mask = 1L << (asid & 31);
176 1.1 matt const size_t idx = asid >> 5;
177 1.1 matt if (mapp[idx] & mask)
178 1.1 matt continue;
179 1.1 matt
180 1.1 matt mapp[idx] |= mask;
181 1.1 matt nasids++;
182 1.1 matt }
183 1.1 matt }
184 1.1 matt return nasids;
185 1.1 matt }
186 1.1 matt #endif
187 1.1 matt
188 1.1 matt u_int
189 1.1 matt tlb_record_asids(u_long *mapp)
190 1.1 matt {
191 1.1 matt #ifndef MULTIPROCESSOR
192 1.1 matt #ifdef CPU_CORTEXA5
193 1.1 matt if (CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid))
194 1.1 matt return tlb_cortex_a5_record_asids(mapp);
195 1.1 matt #endif
196 1.1 matt #ifdef CPU_CORTEXA7
197 1.1 matt if (CPU_ID_CORTEX_A7_P(curcpu()->ci_arm_cpuid))
198 1.1 matt return tlb_cortex_a7_record_asids(mapp);
199 1.1 matt #endif
200 1.1 matt #endif /* MULTIPROCESSOR */
201 1.1 matt #ifdef DIAGNOSTIC
202 1.1 matt mapp[0] = 0xfffffffe;
203 1.1 matt mapp[1] = 0xffffffff;
204 1.1 matt mapp[2] = 0xffffffff;
205 1.1 matt mapp[3] = 0xffffffff;
206 1.1 matt mapp[4] = 0xffffffff;
207 1.1 matt mapp[5] = 0xffffffff;
208 1.1 matt mapp[6] = 0xffffffff;
209 1.1 matt mapp[7] = 0xffffffff;
210 1.1 matt #endif
211 1.1 matt return 255;
212 1.1 matt }
213 1.1 matt
214 1.1 matt void
215 1.1 matt tlb_walk(void *ctx, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
216 1.1 matt {
217 1.1 matt /* no way to view the TLB */
218 1.1 matt }
219