pmap_tlb.c revision 1.5 1 1.5 matt /* $NetBSD: pmap_tlb.c,v 1.5 2014/03/30 15:26:15 matt Exp $ */
2 1.1 christos
3 1.1 christos /*-
4 1.1 christos * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 1.1 christos * All rights reserved.
6 1.1 christos *
7 1.1 christos * This code is derived from software contributed to The NetBSD Foundation
8 1.1 christos * by Matt Thomas at 3am Software Foundry.
9 1.1 christos *
10 1.1 christos * Redistribution and use in source and binary forms, with or without
11 1.1 christos * modification, are permitted provided that the following conditions
12 1.1 christos * are met:
13 1.1 christos * 1. Redistributions of source code must retain the above copyright
14 1.1 christos * notice, this list of conditions and the following disclaimer.
15 1.1 christos * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 christos * notice, this list of conditions and the following disclaimer in the
17 1.1 christos * documentation and/or other materials provided with the distribution.
18 1.1 christos *
19 1.1 christos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 christos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 christos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 christos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 christos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 christos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 christos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 christos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 christos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 christos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 christos * POSSIBILITY OF SUCH DAMAGE.
30 1.1 christos */
31 1.1 christos
32 1.1 christos #include <sys/cdefs.h>
33 1.1 christos
34 1.5 matt __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.5 2014/03/30 15:26:15 matt Exp $");
35 1.1 christos
36 1.1 christos /*
37 1.1 christos * Manages address spaces in a TLB.
38 1.1 christos *
39 1.1 christos * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 1.1 christos * implementations may share a TLB between multiple CPUs (really CPU thread
41 1.1 christos * contexts). This requires the TLB abstraction to be separated from the
42 1.1 christos * CPU abstraction. It also requires that the TLB be locked while doing
43 1.1 christos * TLB activities.
44 1.1 christos *
45 1.1 christos * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 1.1 christos * that have a valid ASID.
47 1.1 christos *
48 1.1 christos * We allocate ASIDs in increasing order until we have exhausted the supply,
49 1.1 christos * then reinitialize the ASID space, and start allocating again at 1. When
50 1.1 christos * allocating from the ASID bitmap, we skip any ASID who has a corresponding
51 1.1 christos * bit set in the ASID bitmap. Eventually this causes the ASID bitmap to fill
52 1.1 christos * and, when completely filled, a reinitialization of the ASID space.
53 1.1 christos *
54 1.1 christos * To reinitialize the ASID space, the ASID bitmap is reset and then the ASIDs
55 1.1 christos * of non-kernel TLB entries get recorded in the ASID bitmap. If the entries
56 1.1 christos * in TLB consume more than half of the ASID space, all ASIDs are invalidated,
57 1.1 christos * the ASID bitmap is recleared, and the list of pmaps is emptied. Otherwise,
58 1.1 christos * (the normal case), any ASID present in the TLB (even those which are no
59 1.1 christos * longer used by a pmap) will remain active (allocated) and all other ASIDs
60 1.1 christos * will be freed. If the size of the TLB is much smaller than the ASID space,
61 1.1 christos * this algorithm completely avoids TLB invalidation.
62 1.1 christos *
63 1.1 christos * For multiprocessors, we also have to deal TLB invalidation requests from
64 1.1 christos * other CPUs, some of which are dealt with the reinitialization of the ASID
65 1.1 christos * space. Whereas above we keep the ASIDs of those pmaps which have active
66 1.1 christos * TLB entries, this type of reinitialization preserves the ASIDs of any
67 1.1 christos * "onproc" user pmap and all other ASIDs will be freed. We must do this
68 1.1 christos * since we can't change the current ASID.
69 1.1 christos *
70 1.1 christos * Each pmap has two bitmaps: pm_active and pm_onproc. Each bit in pm_active
71 1.1 christos * indicates whether that pmap has an allocated ASID for a CPU. Each bit in
72 1.1 christos * pm_onproc indicates that pmap's ASID is active (equal to the ASID in COP 0
73 1.1 christos * register EntryHi) on a CPU. The bit number comes from the CPU's cpu_index().
74 1.1 christos * Even though these bitmaps contain the bits for all CPUs, the bits that
75 1.1 christos * correspond to the bits belonging to the CPUs sharing a TLB can only be
76 1.1 christos * manipulated while holding that TLB's lock. Atomic ops must be used to
77 1.1 christos * update them since multiple CPUs may be changing different sets of bits at
78 1.1 christos * same time but these sets never overlap.
79 1.1 christos *
80 1.1 christos * When a change to the local TLB may require a change in the TLB's of other
81 1.1 christos * CPUs, we try to avoid sending an IPI if at all possible. For instance, if
82 1.1 christos * we are updating a PTE and that PTE previously was invalid and therefore
83 1.1 christos * couldn't support an active mapping, there's no need for an IPI since there
84 1.1 christos * can't be a TLB entry to invalidate. The other case is when we change a PTE
85 1.1 christos * to be modified we just update the local TLB. If another TLB has a stale
86 1.1 christos * entry, a TLB MOD exception will be raised and that will cause the local TLB
87 1.1 christos * to be updated.
88 1.1 christos *
89 1.1 christos * We never need to update a non-local TLB if the pmap doesn't have a valid
90 1.1 christos * ASID for that TLB. If it does have a valid ASID but isn't current "onproc"
91 1.1 christos * we simply reset its ASID for that TLB and then when it goes "onproc" it
92 1.1 christos * will allocate a new ASID and any existing TLB entries will be orphaned.
93 1.1 christos * Only in the case that pmap has an "onproc" ASID do we actually have to send
94 1.1 christos * an IPI.
95 1.1 christos *
96 1.1 christos * Once we determined we must send an IPI to shootdown a TLB, we need to send
97 1.1 christos * it to one of CPUs that share that TLB. We choose the lowest numbered CPU
98 1.1 christos * that has one of the pmap's ASID "onproc". In reality, any CPU sharing that
99 1.1 christos * TLB would do, but interrupting an active CPU seems best.
100 1.1 christos *
101 1.1 christos * A TLB might have multiple shootdowns active concurrently. The shootdown
102 1.1 christos * logic compresses these into a few cases:
103 1.1 christos * 0) nobody needs to have its TLB entries invalidated
104 1.1 christos * 1) one ASID needs to have its TLB entries invalidated
105 1.1 christos * 2) more than one ASID needs to have its TLB entries invalidated
106 1.1 christos * 3) the kernel needs to have its TLB entries invalidated
107 1.1 christos * 4) the kernel and one or more ASID need their TLB entries invalidated.
108 1.1 christos *
109 1.1 christos * And for each case we do:
110 1.1 christos * 0) nothing,
111 1.1 christos * 1) if that ASID is still "onproc", we invalidate the TLB entries for
112 1.1 christos * that single ASID. If not, just reset the pmap's ASID to invalidate
113 1.1 christos * and let it allocate a new ASID the next time it goes "onproc",
114 1.1 christos * 2) we reinitialize the ASID space (preserving any "onproc" ASIDs) and
115 1.1 christos * invalidate all non-wired non-global TLB entries,
116 1.1 christos * 3) we invalidate all of the non-wired global TLB entries,
117 1.1 christos * 4) we reinitialize the ASID space (again preserving any "onproc" ASIDs)
118 1.1 christos * invalidate all non-wired TLB entries.
119 1.1 christos *
120 1.1 christos * As you can see, shootdowns are not concerned with addresses, just address
121 1.1 christos * spaces. Since the number of TLB entries is usually quite small, this avoids
122 1.1 christos * a lot of overhead for not much gain.
123 1.1 christos */
124 1.1 christos
125 1.1 christos #define __PMAP_PRIVATE
126 1.1 christos
127 1.1 christos #include "opt_multiprocessor.h"
128 1.1 christos
129 1.1 christos #include <sys/param.h>
130 1.1 christos #include <sys/systm.h>
131 1.1 christos #include <sys/proc.h>
132 1.1 christos #include <sys/mutex.h>
133 1.1 christos #include <sys/atomic.h>
134 1.1 christos #include <sys/kernel.h> /* for cold */
135 1.1 christos #include <sys/cpu.h>
136 1.1 christos
137 1.1 christos #include <uvm/uvm.h>
138 1.1 christos
139 1.5 matt static kmutex_t pmap_tlb0_lock __cacheline_aligned;
140 1.1 christos
141 1.1 christos #define IFCONSTANT(x) (__builtin_constant_p((x)) ? (x) : 0)
142 1.1 christos
143 1.1 christos struct pmap_tlb_info pmap_tlb0_info = {
144 1.1 christos .ti_name = "tlb0",
145 1.1 christos .ti_asid_hint = KERNEL_PID + 1,
146 1.1 christos #ifdef PMAP_TLB_NUM_PIDS
147 1.1 christos .ti_asid_max = IFCONSTANT(PMAP_TLB_NUM_PIDS - 1),
148 1.5 matt .ti_asids_free = IFCONSTANT(PMAP_TLB_NUM_PIDS - (1 + KERNEL_PID)),
149 1.1 christos #endif
150 1.1 christos .ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1,
151 1.1 christos #ifdef PMAP_TLB_WIRED_UPAGES
152 1.1 christos .ti_wired = PMAP_TLB_WIRED_UPAGES,
153 1.1 christos #endif
154 1.5 matt .ti_lock = &pmap_tlb0_lock,
155 1.1 christos .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
156 1.3 matt #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
157 1.1 christos .ti_tlbinvop = TLBINV_NOBODY,
158 1.1 christos #endif
159 1.1 christos };
160 1.1 christos
161 1.1 christos #undef IFCONSTANT
162 1.1 christos
163 1.3 matt #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
164 1.3 matt struct pmap_tlb_info *pmap_tlbs[PMAP_TLB_MAX] = {
165 1.1 christos [0] = &pmap_tlb0_info,
166 1.1 christos };
167 1.1 christos u_int pmap_ntlbs = 1;
168 1.1 christos #endif
169 1.1 christos
170 1.1 christos #define __BITMAP_SET(bm, n) \
171 1.1 christos ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0]))))
172 1.1 christos #define __BITMAP_CLR(bm, n) \
173 1.1 christos ((bm)[(n) / (8*sizeof(bm[0]))] &= ~(1LU << ((n) % (8*sizeof(bm[0])))))
174 1.1 christos #define __BITMAP_ISSET_P(bm, n) \
175 1.1 christos (((bm)[(n) / (8*sizeof(bm[0]))] & (1LU << ((n) % (8*sizeof(bm[0]))))) != 0)
176 1.1 christos
177 1.5 matt #define TLBINFO_ASID_MARK_UNUSED(ti, asid) \
178 1.5 matt __BITMAP_CLR((ti)->ti_asid_bitmap, (asid))
179 1.1 christos #define TLBINFO_ASID_MARK_USED(ti, asid) \
180 1.1 christos __BITMAP_SET((ti)->ti_asid_bitmap, (asid))
181 1.1 christos #define TLBINFO_ASID_INUSE_P(ti, asid) \
182 1.1 christos __BITMAP_ISSET_P((ti)->ti_asid_bitmap, (asid))
183 1.1 christos
184 1.1 christos static void
185 1.1 christos pmap_pai_check(struct pmap_tlb_info *ti)
186 1.1 christos {
187 1.1 christos #ifdef DIAGNOSTIC
188 1.1 christos struct pmap_asid_info *pai;
189 1.1 christos LIST_FOREACH(pai, &ti->ti_pais, pai_link) {
190 1.1 christos KASSERT(pai != NULL);
191 1.1 christos KASSERT(PAI_PMAP(pai, ti) != pmap_kernel());
192 1.1 christos KASSERT(pai->pai_asid > KERNEL_PID);
193 1.1 christos KASSERT(TLBINFO_ASID_INUSE_P(ti, pai->pai_asid));
194 1.1 christos }
195 1.1 christos #endif
196 1.1 christos }
197 1.1 christos
198 1.3 matt #ifdef MULTIPROCESSOR
199 1.3 matt static inline bool
200 1.3 matt pmap_tlb_intersecting_active_p(pmap_t pm, struct pmap_tlb_info *ti)
201 1.3 matt {
202 1.3 matt #if PMAP_TLB_MAX == 1
203 1.3 matt return !kcpuset_iszero(pm->pm_active);
204 1.3 matt #else
205 1.3 matt return kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset);
206 1.3 matt #endif
207 1.3 matt }
208 1.3 matt
209 1.3 matt static inline bool
210 1.3 matt pmap_tlb_intersecting_onproc_p(pmap_t pm, struct pmap_tlb_info *ti)
211 1.3 matt {
212 1.3 matt #if PMAP_TLB_MAX == 1
213 1.3 matt return !kcpuset_iszero(pm->pm_onproc);
214 1.3 matt #else
215 1.3 matt return kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset);
216 1.3 matt #endif
217 1.3 matt }
218 1.3 matt #endif
219 1.3 matt
220 1.1 christos static inline void
221 1.1 christos pmap_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
222 1.1 christos struct pmap *pm)
223 1.1 christos {
224 1.1 christos /*
225 1.1 christos * We must have an ASID but it must not be onproc (on a processor).
226 1.1 christos */
227 1.1 christos KASSERT(pai->pai_asid > KERNEL_PID);
228 1.1 christos #if defined(MULTIPROCESSOR)
229 1.3 matt KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
230 1.1 christos #endif
231 1.1 christos LIST_REMOVE(pai, pai_link);
232 1.1 christos #ifdef DIAGNOSTIC
233 1.1 christos pai->pai_link.le_prev = NULL; /* tagged as unlinked */
234 1.1 christos #endif
235 1.1 christos /*
236 1.5 matt * If the platform has a cheap way to flush ASIDs then free the ASID
237 1.5 matt * back into the pool. On multiprocessor systems, we will flush the
238 1.5 matt * ASID from the TLB when it's allocated. That way we know the flush
239 1.5 matt * was always done in the correct TLB space. On uniprocessor systems,
240 1.5 matt * just do the flush now since we know that it has been used. This has
241 1.5 matt * a bit less overhead. Either way, this will mean that we will only
242 1.5 matt * need to flush all ASIDs if all ASIDs are in use and we need to
243 1.5 matt * allocate a new one.
244 1.5 matt */
245 1.5 matt if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
246 1.5 matt #ifndef MULTIPROCESSOR
247 1.5 matt tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
248 1.5 matt #endif
249 1.5 matt if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
250 1.5 matt TLBINFO_ASID_MARK_UNUSED(ti, pai->pai_asid);
251 1.5 matt ti->ti_asids_free++;
252 1.5 matt }
253 1.5 matt }
254 1.5 matt /*
255 1.1 christos * Note that we don't mark the ASID as not in use in the TLB's ASID
256 1.1 christos * bitmap (thus it can't be allocated until the ASID space is exhausted
257 1.1 christos * and therefore reinitialized). We don't want to flush the TLB for
258 1.1 christos * entries belonging to this ASID so we will let natural TLB entry
259 1.1 christos * replacement flush them out of the TLB. Any new entries for this
260 1.1 christos * pmap will need a new ASID allocated.
261 1.1 christos */
262 1.1 christos pai->pai_asid = 0;
263 1.1 christos
264 1.1 christos #if defined(MULTIPROCESSOR)
265 1.1 christos /*
266 1.1 christos * The bits in pm_active belonging to this TLB can only be changed
267 1.1 christos * while this TLB's lock is held.
268 1.1 christos */
269 1.3 matt #if PMAP_TLB_MAX == 1
270 1.3 matt kcpuset_zero(pm->pm_active);
271 1.3 matt #else
272 1.3 matt kcpuset_atomicly_remove(pm->pm_active, ti->ti_kcpuset);
273 1.3 matt #endif
274 1.1 christos #endif /* MULTIPROCESSOR */
275 1.1 christos }
276 1.1 christos
277 1.1 christos void
278 1.1 christos pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
279 1.1 christos {
280 1.1 christos #if defined(MULTIPROCESSOR)
281 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_desired,
282 1.1 christos EVCNT_TYPE_MISC, NULL,
283 1.1 christos ti->ti_name, "icache syncs desired");
284 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_asts,
285 1.1 christos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
286 1.1 christos ti->ti_name, "icache sync asts");
287 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_all,
288 1.1 christos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
289 1.1 christos ti->ti_name, "icache full syncs");
290 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_pages,
291 1.1 christos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
292 1.1 christos ti->ti_name, "icache pages synced");
293 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_duplicate,
294 1.1 christos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
295 1.1 christos ti->ti_name, "icache dup pages skipped");
296 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_deferred,
297 1.1 christos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
298 1.1 christos ti->ti_name, "icache pages deferred");
299 1.1 christos #endif /* MULTIPROCESSOR */
300 1.1 christos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_asid_reinits,
301 1.1 christos EVCNT_TYPE_MISC, NULL,
302 1.1 christos ti->ti_name, "asid pool reinit");
303 1.1 christos }
304 1.1 christos
305 1.1 christos void
306 1.1 christos pmap_tlb_info_init(struct pmap_tlb_info *ti)
307 1.1 christos {
308 1.1 christos #if defined(MULTIPROCESSOR)
309 1.3 matt #if PMAP_TLB_MAX == 1
310 1.3 matt KASSERT(ti == &pmap_tlb0_info);
311 1.3 matt #else
312 1.1 christos if (ti != &pmap_tlb0_info) {
313 1.3 matt KASSERT(pmap_ntlbs < PMAP_TLB_MAX);
314 1.1 christos
315 1.1 christos KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
316 1.1 christos
317 1.1 christos ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
318 1.1 christos ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
319 1.1 christos ti->ti_asid_hint = KERNEL_PID + 1;
320 1.1 christos ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
321 1.1 christos ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
322 1.1 christos ti->ti_tlbinvop = TLBINV_NOBODY,
323 1.1 christos ti->ti_victim = NULL;
324 1.3 matt kcpuset_create(&ti->ti_kcpuset, true);
325 1.1 christos ti->ti_index = pmap_ntlbs++;
326 1.1 christos ti->ti_wired = 0;
327 1.1 christos pmap_tlbs[ti->ti_index] = ti;
328 1.1 christos snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u",
329 1.1 christos ti->ti_index);
330 1.1 christos pmap_tlb_info_evcnt_attach(ti);
331 1.1 christos return;
332 1.1 christos }
333 1.3 matt #endif
334 1.1 christos #endif /* MULTIPROCESSOR */
335 1.1 christos KASSERT(ti == &pmap_tlb0_info);
336 1.5 matt KASSERT(ti->ti_lock == &pmap_tlb0_lock);
337 1.5 matt //printf("ti_lock %p ", ti->ti_lock);
338 1.1 christos mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
339 1.3 matt #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
340 1.3 matt kcpuset_create(&ti->ti_kcpuset, true);
341 1.5 matt kcpuset_set(ti->ti_kcpuset, cpu_index(curcpu()));
342 1.3 matt #endif
343 1.5 matt //printf("asid ");
344 1.1 christos if (ti->ti_asid_max == 0) {
345 1.1 christos ti->ti_asid_max = pmap_md_tlb_asid_max();
346 1.5 matt ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
347 1.1 christos }
348 1.1 christos
349 1.1 christos KASSERT(ti->ti_asid_max < sizeof(ti->ti_asid_bitmap)*8);
350 1.1 christos }
351 1.1 christos
352 1.1 christos #if defined(MULTIPROCESSOR)
353 1.1 christos void
354 1.1 christos pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
355 1.1 christos {
356 1.1 christos KASSERT(!CPU_IS_PRIMARY(ci));
357 1.1 christos KASSERT(ci->ci_data.cpu_idlelwp != NULL);
358 1.1 christos KASSERT(cold);
359 1.1 christos
360 1.1 christos TLBINFO_LOCK(ti);
361 1.3 matt #if PMAP_TLB_MAX > 1
362 1.3 matt kcpuset_set(ti->ti_kcpuset, cpu_index(ci));
363 1.5 matt cpu_set_tlb_info(ci, ti);
364 1.3 matt #endif
365 1.1 christos
366 1.1 christos /*
367 1.1 christos * Do any MD tlb info init.
368 1.1 christos */
369 1.1 christos pmap_md_tlb_info_attach(ti, ci);
370 1.1 christos
371 1.1 christos /*
372 1.3 matt * The kernel pmap uses the kcpuset_running set so it's always
373 1.3 matt * up-to-date.
374 1.1 christos */
375 1.1 christos TLBINFO_UNLOCK(ti);
376 1.1 christos }
377 1.1 christos #endif /* MULTIPROCESSOR */
378 1.1 christos
379 1.1 christos #ifdef DIAGNOSTIC
380 1.1 christos static size_t
381 1.1 christos pmap_tlb_asid_count(struct pmap_tlb_info *ti)
382 1.1 christos {
383 1.1 christos size_t count = 0;
384 1.1 christos for (tlb_asid_t asid = 1; asid <= ti->ti_asid_max; asid++) {
385 1.1 christos count += TLBINFO_ASID_INUSE_P(ti, asid);
386 1.1 christos }
387 1.1 christos return count;
388 1.1 christos }
389 1.1 christos #endif
390 1.1 christos
391 1.1 christos static void
392 1.1 christos pmap_tlb_asid_reinitialize(struct pmap_tlb_info *ti, enum tlb_invalidate_op op)
393 1.1 christos {
394 1.1 christos const size_t asid_bitmap_words =
395 1.1 christos ti->ti_asid_max / (8 * sizeof(ti->ti_asid_bitmap[0]));
396 1.1 christos
397 1.1 christos pmap_pai_check(ti);
398 1.1 christos
399 1.5 matt ti->ti_evcnt_asid_reinits.ev_count++;
400 1.5 matt
401 1.1 christos /*
402 1.1 christos * First, clear the ASID bitmap (except for ASID 0 which belongs
403 1.1 christos * to the kernel).
404 1.1 christos */
405 1.1 christos ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
406 1.1 christos ti->ti_asid_hint = KERNEL_PID + 1;
407 1.1 christos ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
408 1.1 christos for (size_t word = 1; word <= asid_bitmap_words; word++) {
409 1.1 christos ti->ti_asid_bitmap[word] = 0;
410 1.1 christos }
411 1.1 christos
412 1.1 christos switch (op) {
413 1.1 christos #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
414 1.1 christos case TLBINV_ALL:
415 1.1 christos tlb_invalidate_all();
416 1.1 christos break;
417 1.1 christos case TLBINV_ALLUSER:
418 1.1 christos tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
419 1.1 christos break;
420 1.1 christos #endif /* MULTIPROCESSOR && PMAP_NEED_TLB_SHOOTDOWN */
421 1.1 christos case TLBINV_NOBODY: {
422 1.1 christos /*
423 1.1 christos * If we are just reclaiming ASIDs in the TLB, let's go find
424 1.1 christos * what ASIDs are in use in the TLB. Since this is a
425 1.1 christos * semi-expensive operation, we don't want to do it too often.
426 1.1 christos * So if more half of the ASIDs are in use, we don't have
427 1.1 christos * enough free ASIDs so invalidate the TLB entries with ASIDs
428 1.1 christos * and clear the ASID bitmap. That will force everyone to
429 1.1 christos * allocate a new ASID.
430 1.1 christos */
431 1.1 christos #if !defined(MULTIPROCESSOR) || defined(PMAP_NEED_TLB_SHOOTDOWN)
432 1.1 christos pmap_tlb_asid_check();
433 1.1 christos const u_int asids_found = tlb_record_asids(ti->ti_asid_bitmap);
434 1.1 christos pmap_tlb_asid_check();
435 1.1 christos KASSERT(asids_found == pmap_tlb_asid_count(ti));
436 1.1 christos if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
437 1.1 christos tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
438 1.1 christos #else /* MULTIPROCESSOR && !PMAP_NEED_TLB_SHOOTDOWN */
439 1.1 christos /*
440 1.1 christos * For those systems (PowerPC) that don't need require
441 1.1 christos * cross cpu TLB shootdowns, we have to invalidate the
442 1.1 christos * entire TLB because we can't record the ASIDs in use
443 1.1 christos * on the other CPUs. This is hopefully cheaper than
444 1.1 christos * than trying to use an IPI to record all the ASIDs
445 1.1 christos * on all the CPUs (which would be a synchronization
446 1.1 christos * nightmare).
447 1.1 christos */
448 1.1 christos tlb_invalidate_all();
449 1.1 christos #endif /* MULTIPROCESSOR && !PMAP_NEED_TLB_SHOOTDOWN */
450 1.1 christos ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1;
451 1.1 christos for (size_t word = 1;
452 1.1 christos word <= asid_bitmap_words;
453 1.1 christos word++) {
454 1.1 christos ti->ti_asid_bitmap[word] = 0;
455 1.1 christos }
456 1.5 matt ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID;
457 1.1 christos #if !defined(MULTIPROCESSOR) || defined(PMAP_NEED_TLB_SHOOTDOWN)
458 1.1 christos } else {
459 1.1 christos ti->ti_asids_free -= asids_found;
460 1.1 christos }
461 1.1 christos #endif /* !MULTIPROCESSOR || PMAP_NEED_TLB_SHOOTDOWN */
462 1.5 matt KASSERTMSG(ti->ti_asids_free <= ti->ti_asid_max, "%u",
463 1.5 matt ti->ti_asids_free);
464 1.1 christos break;
465 1.1 christos }
466 1.1 christos default:
467 1.1 christos panic("%s: unexpected op %d", __func__, op);
468 1.1 christos }
469 1.1 christos
470 1.1 christos /*
471 1.1 christos * Now go through the active ASIDs. If the ASID is on a processor or
472 1.1 christos * we aren't invalidating all ASIDs and the TLB has an entry owned by
473 1.1 christos * that ASID, mark it as in use. Otherwise release the ASID.
474 1.1 christos */
475 1.1 christos struct pmap_asid_info *pai, *next;
476 1.1 christos for (pai = LIST_FIRST(&ti->ti_pais); pai != NULL; pai = next) {
477 1.1 christos struct pmap * const pm = PAI_PMAP(pai, ti);
478 1.1 christos next = LIST_NEXT(pai, pai_link);
479 1.1 christos KASSERT(pm != pmap_kernel());
480 1.1 christos KASSERT(pai->pai_asid > KERNEL_PID);
481 1.1 christos #if defined(MULTIPROCESSOR)
482 1.3 matt if (pmap_tlb_intersecting_onproc_p(pm, ti)) {
483 1.1 christos if (!TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
484 1.1 christos TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
485 1.1 christos ti->ti_asids_free--;
486 1.1 christos }
487 1.1 christos continue;
488 1.1 christos }
489 1.1 christos #endif /* MULTIPROCESSOR */
490 1.1 christos if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
491 1.1 christos KASSERT(op == TLBINV_NOBODY);
492 1.1 christos } else {
493 1.1 christos pmap_pai_reset(ti, pai, pm);
494 1.1 christos }
495 1.1 christos }
496 1.1 christos #ifdef DIAGNOSTIC
497 1.5 matt size_t free_count __diagused = ti->ti_asid_max - pmap_tlb_asid_count(ti);
498 1.5 matt KASSERTMSG(free_count == ti->ti_asids_free,
499 1.5 matt "bitmap error: %zu != %u", free_count, ti->ti_asids_free);
500 1.1 christos #endif
501 1.1 christos }
502 1.1 christos
503 1.1 christos #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
504 1.3 matt #if PMAP_MAX_TLB == 1
505 1.3 matt #error shootdown not required for single TLB systems
506 1.3 matt #endif
507 1.1 christos void
508 1.1 christos pmap_tlb_shootdown_process(void)
509 1.1 christos {
510 1.1 christos struct cpu_info * const ci = curcpu();
511 1.2 matt struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
512 1.1 christos #ifdef DIAGNOSTIC
513 1.1 christos struct pmap * const pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
514 1.1 christos #endif
515 1.1 christos
516 1.1 christos KASSERT(cpu_intr_p());
517 1.1 christos KASSERTMSG(ci->ci_cpl >= IPL_SCHED,
518 1.1 christos "%s: cpl (%d) < IPL_SCHED (%d)",
519 1.1 christos __func__, ci->ci_cpl, IPL_SCHED);
520 1.1 christos
521 1.1 christos TLBINFO_LOCK(ti);
522 1.1 christos
523 1.1 christos switch (ti->ti_tlbinvop) {
524 1.1 christos case TLBINV_ONE: {
525 1.1 christos /*
526 1.1 christos * We only need to invalidate one user ASID.
527 1.1 christos */
528 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
529 1.1 christos KASSERT(ti->ti_victim != pmap_kernel());
530 1.3 matt if (!pmap_tlb_intersecting_onproc_p(ti_victim->pm_onproc, ti)) {
531 1.1 christos /*
532 1.1 christos * The victim is an active pmap so we will just
533 1.1 christos * invalidate its TLB entries.
534 1.1 christos */
535 1.1 christos KASSERT(pai->pai_asid > KERNEL_PID);
536 1.1 christos pmap_tlb_asid_check();
537 1.1 christos tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
538 1.1 christos pmap_tlb_asid_check();
539 1.1 christos } else if (pai->pai_asid) {
540 1.1 christos /*
541 1.1 christos * The victim is no longer an active pmap for this TLB.
542 1.1 christos * So simply clear its ASID and when pmap_activate is
543 1.1 christos * next called for this pmap, it will allocate a new
544 1.1 christos * ASID.
545 1.1 christos */
546 1.3 matt KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
547 1.1 christos pmap_pai_reset(ti, pai, PAI_PMAP(pai, ti));
548 1.1 christos }
549 1.1 christos break;
550 1.1 christos }
551 1.1 christos case TLBINV_ALLUSER:
552 1.1 christos /*
553 1.1 christos * Flush all user TLB entries.
554 1.1 christos */
555 1.1 christos pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER);
556 1.1 christos break;
557 1.1 christos case TLBINV_ALLKERNEL:
558 1.1 christos /*
559 1.1 christos * We need to invalidate all global TLB entries.
560 1.1 christos */
561 1.1 christos pmap_tlb_asid_check();
562 1.1 christos tlb_invalidate_globals();
563 1.1 christos pmap_tlb_asid_check();
564 1.1 christos break;
565 1.1 christos case TLBINV_ALL:
566 1.1 christos /*
567 1.1 christos * Flush all the TLB entries (user and kernel).
568 1.1 christos */
569 1.1 christos pmap_tlb_asid_reinitialize(ti, TLBINV_ALL);
570 1.1 christos break;
571 1.1 christos case TLBINV_NOBODY:
572 1.1 christos /*
573 1.1 christos * Might be spurious or another SMT CPU sharing this TLB
574 1.1 christos * could have already done the work.
575 1.1 christos */
576 1.1 christos break;
577 1.1 christos }
578 1.1 christos
579 1.1 christos /*
580 1.1 christos * Indicate we are done with shutdown event.
581 1.1 christos */
582 1.1 christos ti->ti_victim = NULL;
583 1.1 christos ti->ti_tlbinvop = TLBINV_NOBODY;
584 1.1 christos TLBINFO_UNLOCK(ti);
585 1.1 christos }
586 1.1 christos
587 1.1 christos /*
588 1.1 christos * This state machine could be encoded into an array of integers but since all
589 1.1 christos * the values fit in 3 bits, the 5 entry "table" fits in a 16 bit value which
590 1.1 christos * can be loaded in a single instruction.
591 1.1 christos */
592 1.1 christos #define TLBINV_MAP(op, nobody, one, alluser, allkernel, all) \
593 1.1 christos (((( (nobody) << 3*TLBINV_NOBODY) \
594 1.1 christos | ( (one) << 3*TLBINV_ONE) \
595 1.1 christos | ( (alluser) << 3*TLBINV_ALLUSER) \
596 1.1 christos | ((allkernel) << 3*TLBINV_ALLKERNEL) \
597 1.1 christos | ( (all) << 3*TLBINV_ALL)) >> 3*(op)) & 7)
598 1.1 christos
599 1.1 christos #define TLBINV_USER_MAP(op) \
600 1.1 christos TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER, \
601 1.1 christos TLBINV_ALL, TLBINV_ALL)
602 1.1 christos
603 1.1 christos #define TLBINV_KERNEL_MAP(op) \
604 1.1 christos TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL, \
605 1.1 christos TLBINV_ALLKERNEL, TLBINV_ALL)
606 1.1 christos
607 1.1 christos bool
608 1.1 christos pmap_tlb_shootdown_bystanders(pmap_t pm)
609 1.1 christos {
610 1.1 christos /*
611 1.1 christos * We don't need to deal our own TLB.
612 1.1 christos */
613 1.3 matt kcpuset_t *pm_active;
614 1.3 matt
615 1.3 matt kcpuset_clone(&pm_active, pm->pm_active);
616 1.3 matt kcpuset_atomicly_remove(pm->pm_active,
617 1.3 matt cpu_tlb_info(curcpu())->ti_kcpuset);
618 1.1 christos const bool kernel_p = (pm == pmap_kernel());
619 1.1 christos bool ipi_sent = false;
620 1.1 christos
621 1.1 christos /*
622 1.1 christos * If pm_active gets more bits set, then it's after all our changes
623 1.1 christos * have been made so they will already be cognizant of them.
624 1.1 christos */
625 1.1 christos
626 1.3 matt for (size_t i = 0; !kcpuset_iszero(pm_active); i++) {
627 1.1 christos KASSERT(i < pmap_ntlbs);
628 1.1 christos struct pmap_tlb_info * const ti = pmap_tlbs[i];
629 1.1 christos KASSERT(tlbinfo_index(ti) == i);
630 1.1 christos /*
631 1.1 christos * Skip this TLB if there are no active mappings for it.
632 1.1 christos */
633 1.3 matt if (!kcpuset_intersecting_p(pm_active, ti->ti_kcpuset))
634 1.1 christos continue;
635 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
636 1.3 matt kcpuset_remove(pm_active, ti->ti_kcpuset);
637 1.1 christos TLBINFO_LOCK(ti);
638 1.3 matt if (pmap_tlb_intersecting_onproc_p(pm, ti)) {
639 1.3 matt cpuid_t j = kcpuset_ffs_intersecting(pm->pm_onproc,
640 1.3 matt ti->ti_kcpuset);
641 1.1 christos if (kernel_p) {
642 1.1 christos ti->ti_tlbinvop =
643 1.1 christos TLBINV_KERNEL_MAP(ti->ti_tlbinvop);
644 1.1 christos ti->ti_victim = NULL;
645 1.1 christos } else {
646 1.1 christos KASSERT(pai->pai_asid);
647 1.1 christos if (__predict_false(ti->ti_victim == pm)) {
648 1.1 christos KASSERT(ti->ti_tlbinvop == TLBINV_ONE);
649 1.1 christos /*
650 1.1 christos * We still need to invalidate this one
651 1.1 christos * ASID so there's nothing to change.
652 1.1 christos */
653 1.1 christos } else {
654 1.1 christos ti->ti_tlbinvop =
655 1.1 christos TLBINV_USER_MAP(ti->ti_tlbinvop);
656 1.1 christos if (ti->ti_tlbinvop == TLBINV_ONE)
657 1.1 christos ti->ti_victim = pm;
658 1.1 christos else
659 1.1 christos ti->ti_victim = NULL;
660 1.1 christos }
661 1.1 christos }
662 1.1 christos TLBINFO_UNLOCK(ti);
663 1.1 christos /*
664 1.1 christos * Now we can send out the shootdown IPIs to a CPU
665 1.1 christos * that shares this TLB and is currently using this
666 1.1 christos * pmap. That CPU will process the IPI and do the
667 1.1 christos * all the work. Any other CPUs sharing that TLB
668 1.1 christos * will take advantage of that work. pm_onproc might
669 1.1 christos * change now that we have released the lock but we
670 1.1 christos * can tolerate spurious shootdowns.
671 1.1 christos */
672 1.1 christos cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN);
673 1.1 christos ipi_sent = true;
674 1.1 christos continue;
675 1.1 christos }
676 1.3 matt if (!pmap_tlb_intersecting_active_p(pm, ti)) {
677 1.1 christos /*
678 1.1 christos * If this pmap has an ASID assigned but it's not
679 1.1 christos * currently running, nuke its ASID. Next time the
680 1.1 christos * pmap is activated, it will allocate a new ASID.
681 1.1 christos * And best of all, we avoid an IPI.
682 1.1 christos */
683 1.1 christos KASSERT(!kernel_p);
684 1.1 christos pmap_pai_reset(ti, pai, pm);
685 1.1 christos //ti->ti_evcnt_lazy_shots.ev_count++;
686 1.1 christos }
687 1.1 christos TLBINFO_UNLOCK(ti);
688 1.1 christos }
689 1.1 christos
690 1.3 matt kcpuset_destroy(pm_active);
691 1.3 matt
692 1.1 christos return ipi_sent;
693 1.1 christos }
694 1.1 christos #endif /* MULTIPROCESSOR && PMAP_NEED_TLB_SHOOTDOWN */
695 1.1 christos
696 1.2 matt #ifndef PMAP_TLB_HWPAGEWALKER
697 1.1 christos int
698 1.1 christos pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pt_entry, u_int flags)
699 1.1 christos {
700 1.2 matt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
701 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
702 1.1 christos int rv = -1;
703 1.1 christos
704 1.1 christos KASSERT(kpreempt_disabled());
705 1.1 christos
706 1.1 christos TLBINFO_LOCK(ti);
707 1.1 christos if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
708 1.1 christos pmap_tlb_asid_check();
709 1.1 christos rv = tlb_update_addr(va, pai->pai_asid, pt_entry,
710 1.1 christos (flags & PMAP_TLB_INSERT) != 0);
711 1.1 christos pmap_tlb_asid_check();
712 1.1 christos }
713 1.1 christos #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
714 1.1 christos pm->pm_shootdown_pending = (flags & PMAP_TLB_NEED_IPI) != 0;
715 1.1 christos #endif
716 1.1 christos TLBINFO_UNLOCK(ti);
717 1.1 christos
718 1.1 christos return rv;
719 1.1 christos }
720 1.2 matt #endif /* !PMAP_TLB_HWPAGEWALKER */
721 1.1 christos
722 1.1 christos void
723 1.1 christos pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va)
724 1.1 christos {
725 1.2 matt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
726 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
727 1.1 christos
728 1.5 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
729 1.5 matt
730 1.1 christos KASSERT(kpreempt_disabled());
731 1.1 christos
732 1.5 matt UVMHIST_LOG(maphist, " (pm=%#x va=%#x) ti=%#x asid=%#x",
733 1.5 matt pm, va, ti, pai->pai_asid);
734 1.5 matt
735 1.1 christos TLBINFO_LOCK(ti);
736 1.1 christos if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
737 1.1 christos pmap_tlb_asid_check();
738 1.5 matt UVMHIST_LOG(maphist, " invalidating %#x asid %#x",
739 1.5 matt va, pai->pai_asid, 0, 0);
740 1.1 christos tlb_invalidate_addr(va, pai->pai_asid);
741 1.1 christos pmap_tlb_asid_check();
742 1.1 christos }
743 1.1 christos #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
744 1.1 christos pm->pm_shootdown_pending = 1;
745 1.1 christos #endif
746 1.1 christos TLBINFO_UNLOCK(ti);
747 1.5 matt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
748 1.1 christos }
749 1.1 christos
750 1.1 christos static inline void
751 1.1 christos pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
752 1.1 christos struct pmap_asid_info *pai)
753 1.1 christos {
754 1.1 christos /*
755 1.1 christos * We shouldn't have an ASID assigned, and thusly must not be onproc
756 1.1 christos * nor active.
757 1.1 christos */
758 1.1 christos KASSERT(pm != pmap_kernel());
759 1.1 christos KASSERT(pai->pai_asid == 0);
760 1.1 christos KASSERT(pai->pai_link.le_prev == NULL);
761 1.1 christos #if defined(MULTIPROCESSOR)
762 1.5 matt KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
763 1.5 matt KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
764 1.1 christos #endif
765 1.1 christos KASSERT(ti->ti_asids_free > 0);
766 1.5 matt KASSERT(ti->ti_asid_hint > KERNEL_PID);
767 1.5 matt
768 1.5 matt /*
769 1.5 matt * If the last ASID allocated was the maximum ASID, then the
770 1.5 matt * hint will be out of range. Reset the hint to first
771 1.5 matt * available ASID.
772 1.5 matt */
773 1.5 matt if (PMAP_TLB_FLUSH_ASID_ON_RESET
774 1.5 matt && ti->ti_asid_hint > ti->ti_asid_max) {
775 1.5 matt ti->ti_asid_hint = KERNEL_PID + 1;
776 1.5 matt }
777 1.5 matt KASSERTMSG(ti->ti_asid_hint <= ti->ti_asid_max, "hint %u",
778 1.5 matt ti->ti_asid_hint);
779 1.1 christos
780 1.1 christos /*
781 1.1 christos * Let's see if the hinted ASID is free. If not search for
782 1.1 christos * a new one.
783 1.1 christos */
784 1.5 matt if (__predict_true(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) {
785 1.5 matt const size_t nbpw __diagused = 8*sizeof(ti->ti_asid_bitmap[0]);
786 1.5 matt size_t i;
787 1.5 matt u_long bits;
788 1.5 matt for (i = 0; (bits = ~ti->ti_asid_bitmap[i]) == 0; i++) {
789 1.5 matt KASSERT(i < __arraycount(ti->ti_asid_bitmap) - 1);
790 1.1 christos }
791 1.5 matt /*
792 1.5 matt * ffs wants to find the first bit set while we want
793 1.5 matt * to find the first bit cleared.
794 1.5 matt */
795 1.5 matt const u_int n = __builtin_ffsl(bits) - 1;
796 1.5 matt KASSERTMSG((bits << (nbpw - (n+1))) == (1ul << (nbpw-1)),
797 1.5 matt "n %u bits %#lx", n, bits);
798 1.5 matt KASSERT(n < nbpw);
799 1.5 matt ti->ti_asid_hint = n + i * nbpw;
800 1.1 christos }
801 1.1 christos
802 1.5 matt KASSERT(ti->ti_asid_hint > KERNEL_PID);
803 1.5 matt KASSERT(ti->ti_asid_hint <= ti->ti_asid_max);
804 1.5 matt KASSERTMSG(PMAP_TLB_FLUSH_ASID_ON_RESET
805 1.5 matt || TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint - 1),
806 1.5 matt "hint %u bitmap %p", ti->ti_asid_hint, ti->ti_asid_bitmap);
807 1.5 matt KASSERTMSG(!TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint),
808 1.5 matt "hint %u bitmap %p", ti->ti_asid_hint, ti->ti_asid_bitmap);
809 1.5 matt
810 1.1 christos /*
811 1.1 christos * The hint contains our next ASID so take it and advance the hint.
812 1.1 christos * Mark it as used and insert the pai into the list of active asids.
813 1.1 christos * There is also one less asid free in this TLB.
814 1.1 christos */
815 1.5 matt KASSERT(ti->ti_asid_hint > KERNEL_PID);
816 1.1 christos pai->pai_asid = ti->ti_asid_hint++;
817 1.5 matt #ifdef MULTIPROCESSOR
818 1.5 matt if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
819 1.5 matt /*
820 1.5 matt * Clean the new ASID from the TLB.
821 1.5 matt */
822 1.5 matt tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
823 1.5 matt }
824 1.5 matt #endif
825 1.1 christos TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
826 1.1 christos LIST_INSERT_HEAD(&ti->ti_pais, pai, pai_link);
827 1.1 christos ti->ti_asids_free--;
828 1.1 christos
829 1.1 christos #if defined(MULTIPROCESSOR)
830 1.1 christos /*
831 1.1 christos * Mark that we now have an active ASID for all CPUs sharing this TLB.
832 1.1 christos * The bits in pm_active belonging to this TLB can only be changed
833 1.1 christos * while this TLBs lock is held.
834 1.1 christos */
835 1.3 matt #if PMAP_TLB_MAX == 1
836 1.3 matt kcpuset_copy(pm->pm_active, kcpuset_running);
837 1.3 matt #else
838 1.3 matt kcpuset_atomicly_merge(pm->pm_active, ti->ti_kcpuset);
839 1.3 matt #endif
840 1.1 christos #endif
841 1.1 christos }
842 1.1 christos
843 1.1 christos /*
844 1.1 christos * Acquire a TLB address space tag (called ASID or TLBPID) and return it.
845 1.1 christos * ASID might have already been previously acquired.
846 1.1 christos */
847 1.1 christos void
848 1.1 christos pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l)
849 1.1 christos {
850 1.1 christos struct cpu_info * const ci = l->l_cpu;
851 1.2 matt struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
852 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
853 1.1 christos
854 1.1 christos KASSERT(kpreempt_disabled());
855 1.1 christos
856 1.5 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
857 1.5 matt
858 1.1 christos /*
859 1.1 christos * Kernels use a fixed ASID and thus doesn't need to acquire one.
860 1.1 christos */
861 1.5 matt if (pm == pmap_kernel()) {
862 1.5 matt UVMHIST_LOG(maphist, " <-- done (kernel)", 0, 0, 0, 0);
863 1.1 christos return;
864 1.5 matt }
865 1.1 christos
866 1.5 matt UVMHIST_LOG(maphist, " (pm=%#x, l=%#x, ti=%#x)", pm, l, ti, 0);
867 1.1 christos TLBINFO_LOCK(ti);
868 1.1 christos KASSERT(pai->pai_asid <= KERNEL_PID || pai->pai_link.le_prev != NULL);
869 1.1 christos KASSERT(pai->pai_asid > KERNEL_PID || pai->pai_link.le_prev == NULL);
870 1.1 christos pmap_pai_check(ti);
871 1.1 christos if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) {
872 1.1 christos /*
873 1.1 christos * If we've run out ASIDs, reinitialize the ASID space.
874 1.1 christos */
875 1.1 christos if (__predict_false(tlbinfo_noasids_p(ti))) {
876 1.1 christos KASSERT(l == curlwp);
877 1.5 matt UVMHIST_LOG(maphist, " asid reinit", 0, 0, 0, 0);
878 1.1 christos pmap_tlb_asid_reinitialize(ti, TLBINV_NOBODY);
879 1.5 matt KASSERT(!tlbinfo_noasids_p(ti));
880 1.1 christos }
881 1.1 christos
882 1.1 christos /*
883 1.1 christos * Get an ASID.
884 1.1 christos */
885 1.1 christos pmap_tlb_asid_alloc(ti, pm, pai);
886 1.5 matt UVMHIST_LOG(maphist, "allocated asid %#x", pai->pai_asid, 0, 0, 0);
887 1.1 christos }
888 1.1 christos
889 1.1 christos if (l == curlwp) {
890 1.1 christos #if defined(MULTIPROCESSOR)
891 1.1 christos /*
892 1.1 christos * The bits in pm_onproc belonging to this TLB can only
893 1.1 christos * be changed while this TLBs lock is held unless atomic
894 1.1 christos * operations are used.
895 1.1 christos */
896 1.5 matt KASSERT(pm != pmap_kernel());
897 1.3 matt kcpuset_atomic_set(pm->pm_onproc, cpu_index(ci));
898 1.1 christos #endif
899 1.1 christos ci->ci_pmap_asid_cur = pai->pai_asid;
900 1.5 matt UVMHIST_LOG(maphist, "setting asid to %#x", pai->pai_asid, 0, 0, 0);
901 1.1 christos tlb_set_asid(pai->pai_asid);
902 1.1 christos pmap_tlb_asid_check();
903 1.1 christos } else {
904 1.1 christos printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp);
905 1.1 christos }
906 1.1 christos TLBINFO_UNLOCK(ti);
907 1.5 matt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
908 1.1 christos }
909 1.1 christos
910 1.1 christos void
911 1.1 christos pmap_tlb_asid_deactivate(pmap_t pm)
912 1.1 christos {
913 1.5 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
914 1.1 christos KASSERT(kpreempt_disabled());
915 1.1 christos #if defined(MULTIPROCESSOR)
916 1.1 christos /*
917 1.1 christos * The kernel pmap is aways onproc and active and must never have
918 1.1 christos * those bits cleared. If pmap_remove_all was called, it has already
919 1.1 christos * deactivated the pmap and thusly onproc will be 0 so there's nothing
920 1.1 christos * to do.
921 1.1 christos */
922 1.5 matt if (pm != pmap_kernel() && !kcpuset_iszero(pm->pm_onproc)) {
923 1.1 christos struct cpu_info * const ci = curcpu();
924 1.1 christos KASSERT(!cpu_intr_p());
925 1.3 matt KASSERTMSG(kcpuset_isset(pm->pm_onproc, cpu_index(ci)),
926 1.3 matt "%s: pmap %p onproc %p doesn't include cpu %d (%p)",
927 1.1 christos __func__, pm, pm->pm_onproc, cpu_index(ci), ci);
928 1.1 christos /*
929 1.1 christos * The bits in pm_onproc that belong to this TLB can
930 1.1 christos * be changed while this TLBs lock is not held as long
931 1.1 christos * as we use atomic ops.
932 1.1 christos */
933 1.3 matt kcpuset_atomic_clear(pm->pm_onproc, cpu_index(ci));
934 1.1 christos }
935 1.5 matt #endif
936 1.1 christos curcpu()->ci_pmap_asid_cur = 0;
937 1.5 matt UVMHIST_LOG(maphist, " <-- done (pm=%#x)", pm, 0, 0, 0);
938 1.1 christos tlb_set_asid(0);
939 1.5 matt #if defined(DEBUG)
940 1.1 christos pmap_tlb_asid_check();
941 1.1 christos #endif
942 1.1 christos }
943 1.1 christos
944 1.1 christos void
945 1.1 christos pmap_tlb_asid_release_all(struct pmap *pm)
946 1.1 christos {
947 1.1 christos KASSERT(pm != pmap_kernel());
948 1.1 christos #if defined(MULTIPROCESSOR)
949 1.5 matt //KASSERT(!kcpuset_iszero(pm->pm_onproc)); // XXX
950 1.3 matt #if PMAP_TLB_MAX > 1
951 1.5 matt struct cpu_info * const ci = curcpu();
952 1.3 matt for (u_int i = 0; !kcpuset_iszero(pm->pm_active); i++) {
953 1.1 christos KASSERT(i < pmap_ntlbs);
954 1.1 christos struct pmap_tlb_info * const ti = pmap_tlbs[i];
955 1.3 matt #else
956 1.3 matt struct pmap_tlb_info * const ti = &pmap_tlb0_info;
957 1.3 matt #endif
958 1.5 matt struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
959 1.5 matt TLBINFO_LOCK(ti);
960 1.5 matt if (PMAP_PAI_ASIDVALID_P(pai, ti)) {
961 1.5 matt /*
962 1.5 matt * If this pmap isn't onproc on any of the cpus
963 1.5 matt * belonging to this tlb domain, we can just reset
964 1.5 matt * the ASID and be done.
965 1.5 matt */
966 1.5 matt if (!pmap_tlb_intersecting_onproc_p(pm, ti)) {
967 1.5 matt KASSERT(ti->ti_victim != pm);
968 1.5 matt pmap_pai_reset(ti, pai, pm);
969 1.5 matt #if PMAP_TLB_MAX == 1
970 1.5 matt } else {
971 1.5 matt KASSERT(cpu_tlb_info(ci) == ti);
972 1.5 matt tlb_invalidate_asids(pai->pai_asid,
973 1.5 matt pai->pai_asid);
974 1.5 matt #else
975 1.5 matt } else if (cpu_tlb_info(ci) == ti) {
976 1.5 matt tlb_invalidate_asids(pai->pai_asid,
977 1.5 matt pai->pai_asid);
978 1.5 matt } else {
979 1.5 matt pm->pm_shootdown_needed = 1;
980 1.5 matt #endif
981 1.5 matt }
982 1.1 christos }
983 1.5 matt TLBINFO_UNLOCK(ti);
984 1.3 matt #if PMAP_TLB_MAX > 1
985 1.1 christos }
986 1.3 matt #endif
987 1.1 christos #else
988 1.1 christos /*
989 1.1 christos * Handle the case of an UP kernel which only has, at most, one ASID.
990 1.1 christos * If the pmap has an ASID allocated, free it.
991 1.1 christos */
992 1.1 christos struct pmap_tlb_info * const ti = &pmap_tlb0_info;
993 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
994 1.1 christos TLBINFO_LOCK(ti);
995 1.1 christos if (pai->pai_asid > KERNEL_PID) {
996 1.5 matt if (curcpu()->ci_pmap_cur == pm) {
997 1.5 matt tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
998 1.5 matt } else {
999 1.5 matt pmap_pai_reset(ti, pai, pm);
1000 1.5 matt }
1001 1.1 christos }
1002 1.1 christos TLBINFO_UNLOCK(ti);
1003 1.1 christos #endif /* MULTIPROCESSOR */
1004 1.1 christos }
1005 1.1 christos
1006 1.1 christos void
1007 1.1 christos pmap_tlb_asid_check(void)
1008 1.1 christos {
1009 1.1 christos #ifdef DEBUG
1010 1.1 christos kpreempt_disable();
1011 1.5 matt const tlb_asid_t asid __debugused = tlb_get_asid();
1012 1.1 christos KDASSERTMSG(asid == curcpu()->ci_pmap_asid_cur,
1013 1.1 christos "%s: asid (%#x) != current asid (%#x)",
1014 1.1 christos __func__, asid, curcpu()->ci_pmap_asid_cur);
1015 1.1 christos kpreempt_enable();
1016 1.1 christos #endif
1017 1.1 christos }
1018 1.1 christos
1019 1.1 christos #ifdef DEBUG
1020 1.1 christos void
1021 1.1 christos pmap_tlb_check(pmap_t pm, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
1022 1.1 christos {
1023 1.2 matt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
1024 1.1 christos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1025 1.1 christos TLBINFO_LOCK(ti);
1026 1.1 christos if (pm == pmap_kernel() || pai->pai_asid > KERNEL_PID)
1027 1.1 christos tlb_walk(pm, func);
1028 1.1 christos TLBINFO_UNLOCK(ti);
1029 1.1 christos }
1030 1.1 christos #endif /* DEBUG */
1031