pmap.c revision 1.236 1 /* $NetBSD: pmap.c,v 1.236 2012/09/02 14:46:38 matt Exp $ */
2
3 /*
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 2002-2003 Wasabi Systems, Inc.
40 * Copyright (c) 2001 Richard Earnshaw
41 * Copyright (c) 2001-2002 Christopher Gilbert
42 * All rights reserved.
43 *
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. The name of the company nor the name of the author may be used to
50 * endorse or promote products derived from this software without specific
51 * prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 */
65
66 /*-
67 * Copyright (c) 1999 The NetBSD Foundation, Inc.
68 * All rights reserved.
69 *
70 * This code is derived from software contributed to The NetBSD Foundation
71 * by Charles M. Hannum.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 *
82 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
83 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
85 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
86 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
87 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
88 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
91 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
92 * POSSIBILITY OF SUCH DAMAGE.
93 */
94
95 /*
96 * Copyright (c) 1994-1998 Mark Brinicombe.
97 * Copyright (c) 1994 Brini.
98 * All rights reserved.
99 *
100 * This code is derived from software written for Brini by Mark Brinicombe
101 *
102 * Redistribution and use in source and binary forms, with or without
103 * modification, are permitted provided that the following conditions
104 * are met:
105 * 1. Redistributions of source code must retain the above copyright
106 * notice, this list of conditions and the following disclaimer.
107 * 2. Redistributions in binary form must reproduce the above copyright
108 * notice, this list of conditions and the following disclaimer in the
109 * documentation and/or other materials provided with the distribution.
110 * 3. All advertising materials mentioning features or use of this software
111 * must display the following acknowledgement:
112 * This product includes software developed by Mark Brinicombe.
113 * 4. The name of the author may not be used to endorse or promote products
114 * derived from this software without specific prior written permission.
115 *
116 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
117 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
118 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
119 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
120 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
121 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
122 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
123 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
124 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
125 *
126 * RiscBSD kernel project
127 *
128 * pmap.c
129 *
130 * Machine dependent vm stuff
131 *
132 * Created : 20/09/94
133 */
134
135 /*
136 * armv6 and VIPT cache support by 3am Software Foundry,
137 * Copyright (c) 2007 Microsoft
138 */
139
140 /*
141 * Performance improvements, UVM changes, overhauls and part-rewrites
142 * were contributed by Neil A. Carson <neil (at) causality.com>.
143 */
144
145 /*
146 * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
147 * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
148 * Systems, Inc.
149 *
150 * There are still a few things outstanding at this time:
151 *
152 * - There are some unresolved issues for MP systems:
153 *
154 * o The L1 metadata needs a lock, or more specifically, some places
155 * need to acquire an exclusive lock when modifying L1 translation
156 * table entries.
157 *
158 * o When one cpu modifies an L1 entry, and that L1 table is also
159 * being used by another cpu, then the latter will need to be told
160 * that a tlb invalidation may be necessary. (But only if the old
161 * domain number in the L1 entry being over-written is currently
162 * the active domain on that cpu). I guess there are lots more tlb
163 * shootdown issues too...
164 *
165 * o If the vector_page is at 0x00000000 instead of 0xffff0000, then
166 * MP systems will lose big-time because of the MMU domain hack.
167 * The only way this can be solved (apart from moving the vector
168 * page to 0xffff0000) is to reserve the first 1MB of user address
169 * space for kernel use only. This would require re-linking all
170 * applications so that the text section starts above this 1MB
171 * boundary.
172 *
173 * o Tracking which VM space is resident in the cache/tlb has not yet
174 * been implemented for MP systems.
175 *
176 * o Finally, there is a pathological condition where two cpus running
177 * two separate processes (not lwps) which happen to share an L1
178 * can get into a fight over one or more L1 entries. This will result
179 * in a significant slow-down if both processes are in tight loops.
180 */
181
182 /*
183 * Special compilation symbols
184 * PMAP_DEBUG - Build in pmap_debug_level code
185 */
186
187 /* Include header files */
188
189 #include "opt_cpuoptions.h"
190 #include "opt_pmap_debug.h"
191 #include "opt_ddb.h"
192 #include "opt_lockdebug.h"
193 #include "opt_multiprocessor.h"
194
195 #include <sys/param.h>
196 #include <sys/types.h>
197 #include <sys/kernel.h>
198 #include <sys/systm.h>
199 #include <sys/proc.h>
200 #include <sys/pool.h>
201 #include <sys/kmem.h>
202 #include <sys/cdefs.h>
203 #include <sys/cpu.h>
204 #include <sys/sysctl.h>
205
206 #include <uvm/uvm.h>
207
208 #include <sys/bus.h>
209 #include <machine/pmap.h>
210 #include <machine/pcb.h>
211 #include <machine/param.h>
212 #include <arm/cpuconf.h>
213 #include <arm/arm32/katelib.h>
214
215 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.236 2012/09/02 14:46:38 matt Exp $");
216
217 #ifdef PMAP_DEBUG
218
219 /* XXX need to get rid of all refs to this */
220 int pmap_debug_level = 0;
221
222 /*
223 * for switching to potentially finer grained debugging
224 */
225 #define PDB_FOLLOW 0x0001
226 #define PDB_INIT 0x0002
227 #define PDB_ENTER 0x0004
228 #define PDB_REMOVE 0x0008
229 #define PDB_CREATE 0x0010
230 #define PDB_PTPAGE 0x0020
231 #define PDB_GROWKERN 0x0040
232 #define PDB_BITS 0x0080
233 #define PDB_COLLECT 0x0100
234 #define PDB_PROTECT 0x0200
235 #define PDB_MAP_L1 0x0400
236 #define PDB_BOOTSTRAP 0x1000
237 #define PDB_PARANOIA 0x2000
238 #define PDB_WIRING 0x4000
239 #define PDB_PVDUMP 0x8000
240 #define PDB_VAC 0x10000
241 #define PDB_KENTER 0x20000
242 #define PDB_KREMOVE 0x40000
243 #define PDB_EXEC 0x80000
244
245 int debugmap = 1;
246 int pmapdebug = 0;
247 #define NPDEBUG(_lev_,_stat_) \
248 if (pmapdebug & (_lev_)) \
249 ((_stat_))
250
251 #else /* PMAP_DEBUG */
252 #define NPDEBUG(_lev_,_stat_) /* Nothing */
253 #endif /* PMAP_DEBUG */
254
255 /*
256 * pmap_kernel() points here
257 */
258 static struct pmap kernel_pmap_store;
259 struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
260
261 /*
262 * Which pmap is currently 'live' in the cache
263 *
264 * XXXSCW: Fix for SMP ...
265 */
266 static pmap_t pmap_recent_user;
267
268 /*
269 * Pointer to last active lwp, or NULL if it exited.
270 */
271 struct lwp *pmap_previous_active_lwp;
272
273 /*
274 * Pool and cache that pmap structures are allocated from.
275 * We use a cache to avoid clearing the pm_l2[] array (1KB)
276 * in pmap_create().
277 */
278 static struct pool_cache pmap_cache;
279 static LIST_HEAD(, pmap) pmap_pmaps;
280
281 /*
282 * Pool of PV structures
283 */
284 static struct pool pmap_pv_pool;
285 static void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
286 static void pmap_bootstrap_pv_page_free(struct pool *, void *);
287 static struct pool_allocator pmap_bootstrap_pv_allocator = {
288 pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
289 };
290
291 /*
292 * Pool and cache of l2_dtable structures.
293 * We use a cache to avoid clearing the structures when they're
294 * allocated. (196 bytes)
295 */
296 static struct pool_cache pmap_l2dtable_cache;
297 static vaddr_t pmap_kernel_l2dtable_kva;
298
299 /*
300 * Pool and cache of L2 page descriptors.
301 * We use a cache to avoid clearing the descriptor table
302 * when they're allocated. (1KB)
303 */
304 static struct pool_cache pmap_l2ptp_cache;
305 static vaddr_t pmap_kernel_l2ptp_kva;
306 static paddr_t pmap_kernel_l2ptp_phys;
307
308 #ifdef PMAPCOUNTERS
309 #define PMAP_EVCNT_INITIALIZER(name) \
310 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
311
312 #ifdef PMAP_CACHE_VIPT
313 static struct evcnt pmap_ev_vac_clean_one =
314 PMAP_EVCNT_INITIALIZER("clean page (1 color)");
315 static struct evcnt pmap_ev_vac_flush_one =
316 PMAP_EVCNT_INITIALIZER("flush page (1 color)");
317 static struct evcnt pmap_ev_vac_flush_lots =
318 PMAP_EVCNT_INITIALIZER("flush page (2+ colors)");
319 static struct evcnt pmap_ev_vac_flush_lots2 =
320 PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)");
321 EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one);
322 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one);
323 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots);
324 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2);
325
326 static struct evcnt pmap_ev_vac_color_new =
327 PMAP_EVCNT_INITIALIZER("new page color");
328 static struct evcnt pmap_ev_vac_color_reuse =
329 PMAP_EVCNT_INITIALIZER("ok first page color");
330 static struct evcnt pmap_ev_vac_color_ok =
331 PMAP_EVCNT_INITIALIZER("ok page color");
332 static struct evcnt pmap_ev_vac_color_blind =
333 PMAP_EVCNT_INITIALIZER("blind page color");
334 static struct evcnt pmap_ev_vac_color_change =
335 PMAP_EVCNT_INITIALIZER("change page color");
336 static struct evcnt pmap_ev_vac_color_erase =
337 PMAP_EVCNT_INITIALIZER("erase page color");
338 static struct evcnt pmap_ev_vac_color_none =
339 PMAP_EVCNT_INITIALIZER("no page color");
340 static struct evcnt pmap_ev_vac_color_restore =
341 PMAP_EVCNT_INITIALIZER("restore page color");
342
343 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new);
344 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse);
345 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok);
346 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind);
347 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change);
348 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase);
349 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none);
350 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore);
351 #endif
352
353 static struct evcnt pmap_ev_mappings =
354 PMAP_EVCNT_INITIALIZER("pages mapped");
355 static struct evcnt pmap_ev_unmappings =
356 PMAP_EVCNT_INITIALIZER("pages unmapped");
357 static struct evcnt pmap_ev_remappings =
358 PMAP_EVCNT_INITIALIZER("pages remapped");
359
360 EVCNT_ATTACH_STATIC(pmap_ev_mappings);
361 EVCNT_ATTACH_STATIC(pmap_ev_unmappings);
362 EVCNT_ATTACH_STATIC(pmap_ev_remappings);
363
364 static struct evcnt pmap_ev_kernel_mappings =
365 PMAP_EVCNT_INITIALIZER("kernel pages mapped");
366 static struct evcnt pmap_ev_kernel_unmappings =
367 PMAP_EVCNT_INITIALIZER("kernel pages unmapped");
368 static struct evcnt pmap_ev_kernel_remappings =
369 PMAP_EVCNT_INITIALIZER("kernel pages remapped");
370
371 EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings);
372 EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings);
373 EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings);
374
375 static struct evcnt pmap_ev_kenter_mappings =
376 PMAP_EVCNT_INITIALIZER("kenter pages mapped");
377 static struct evcnt pmap_ev_kenter_unmappings =
378 PMAP_EVCNT_INITIALIZER("kenter pages unmapped");
379 static struct evcnt pmap_ev_kenter_remappings =
380 PMAP_EVCNT_INITIALIZER("kenter pages remapped");
381 static struct evcnt pmap_ev_pt_mappings =
382 PMAP_EVCNT_INITIALIZER("page table pages mapped");
383
384 EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings);
385 EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings);
386 EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings);
387 EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings);
388
389 #ifdef PMAP_CACHE_VIPT
390 static struct evcnt pmap_ev_exec_mappings =
391 PMAP_EVCNT_INITIALIZER("exec pages mapped");
392 static struct evcnt pmap_ev_exec_cached =
393 PMAP_EVCNT_INITIALIZER("exec pages cached");
394
395 EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings);
396 EVCNT_ATTACH_STATIC(pmap_ev_exec_cached);
397
398 static struct evcnt pmap_ev_exec_synced =
399 PMAP_EVCNT_INITIALIZER("exec pages synced");
400 static struct evcnt pmap_ev_exec_synced_map =
401 PMAP_EVCNT_INITIALIZER("exec pages synced (MP)");
402 static struct evcnt pmap_ev_exec_synced_unmap =
403 PMAP_EVCNT_INITIALIZER("exec pages synced (UM)");
404 static struct evcnt pmap_ev_exec_synced_remap =
405 PMAP_EVCNT_INITIALIZER("exec pages synced (RM)");
406 static struct evcnt pmap_ev_exec_synced_clearbit =
407 PMAP_EVCNT_INITIALIZER("exec pages synced (DG)");
408 static struct evcnt pmap_ev_exec_synced_kremove =
409 PMAP_EVCNT_INITIALIZER("exec pages synced (KU)");
410
411 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced);
412 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map);
413 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap);
414 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap);
415 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit);
416 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove);
417
418 static struct evcnt pmap_ev_exec_discarded_unmap =
419 PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)");
420 static struct evcnt pmap_ev_exec_discarded_zero =
421 PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)");
422 static struct evcnt pmap_ev_exec_discarded_copy =
423 PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)");
424 static struct evcnt pmap_ev_exec_discarded_page_protect =
425 PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)");
426 static struct evcnt pmap_ev_exec_discarded_clearbit =
427 PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)");
428 static struct evcnt pmap_ev_exec_discarded_kremove =
429 PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)");
430
431 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap);
432 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero);
433 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy);
434 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect);
435 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit);
436 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove);
437 #endif /* PMAP_CACHE_VIPT */
438
439 static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates");
440 static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects");
441 static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations");
442
443 EVCNT_ATTACH_STATIC(pmap_ev_updates);
444 EVCNT_ATTACH_STATIC(pmap_ev_collects);
445 EVCNT_ATTACH_STATIC(pmap_ev_activations);
446
447 #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++))
448 #else
449 #define PMAPCOUNT(x) ((void)0)
450 #endif
451
452 /*
453 * pmap copy/zero page, and mem(5) hook point
454 */
455 static pt_entry_t *csrc_pte, *cdst_pte;
456 static vaddr_t csrcp, cdstp;
457 vaddr_t memhook; /* used by mem.c */
458 kmutex_t memlock; /* used by mem.c */
459 void *zeropage; /* used by mem.c */
460 extern void *msgbufaddr;
461 int pmap_kmpages;
462 /*
463 * Flag to indicate if pmap_init() has done its thing
464 */
465 bool pmap_initialized;
466
467 /*
468 * Misc. locking data structures
469 */
470
471 #define pmap_acquire_pmap_lock(pm) \
472 do { \
473 if ((pm) != pmap_kernel()) \
474 mutex_enter((pm)->pm_lock); \
475 } while (/*CONSTCOND*/0)
476
477 #define pmap_release_pmap_lock(pm) \
478 do { \
479 if ((pm) != pmap_kernel()) \
480 mutex_exit((pm)->pm_lock); \
481 } while (/*CONSTCOND*/0)
482
483
484 /*
485 * Metadata for L1 translation tables.
486 */
487 struct l1_ttable {
488 /* Entry on the L1 Table list */
489 SLIST_ENTRY(l1_ttable) l1_link;
490
491 /* Entry on the L1 Least Recently Used list */
492 TAILQ_ENTRY(l1_ttable) l1_lru;
493
494 /* Track how many domains are allocated from this L1 */
495 volatile u_int l1_domain_use_count;
496
497 /*
498 * A free-list of domain numbers for this L1.
499 * We avoid using ffs() and a bitmap to track domains since ffs()
500 * is slow on ARM.
501 */
502 u_int8_t l1_domain_first;
503 u_int8_t l1_domain_free[PMAP_DOMAINS];
504
505 /* Physical address of this L1 page table */
506 paddr_t l1_physaddr;
507
508 /* KVA of this L1 page table */
509 pd_entry_t *l1_kva;
510 };
511
512 /*
513 * Convert a virtual address into its L1 table index. That is, the
514 * index used to locate the L2 descriptor table pointer in an L1 table.
515 * This is basically used to index l1->l1_kva[].
516 *
517 * Each L2 descriptor table represents 1MB of VA space.
518 */
519 #define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT)
520
521 /*
522 * L1 Page Tables are tracked using a Least Recently Used list.
523 * - New L1s are allocated from the HEAD.
524 * - Freed L1s are added to the TAIl.
525 * - Recently accessed L1s (where an 'access' is some change to one of
526 * the userland pmaps which owns this L1) are moved to the TAIL.
527 */
528 static TAILQ_HEAD(, l1_ttable) l1_lru_list;
529 static kmutex_t l1_lru_lock __cacheline_aligned;
530
531 /*
532 * A list of all L1 tables
533 */
534 static SLIST_HEAD(, l1_ttable) l1_list;
535
536 /*
537 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
538 *
539 * This is normally 16MB worth L2 page descriptors for any given pmap.
540 * Reference counts are maintained for L2 descriptors so they can be
541 * freed when empty.
542 */
543 struct l2_dtable {
544 /* The number of L2 page descriptors allocated to this l2_dtable */
545 u_int l2_occupancy;
546
547 /* List of L2 page descriptors */
548 struct l2_bucket {
549 pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */
550 paddr_t l2b_phys; /* Physical address of same */
551 u_short l2b_l1idx; /* This L2 table's L1 index */
552 u_short l2b_occupancy; /* How many active descriptors */
553 } l2_bucket[L2_BUCKET_SIZE];
554 };
555
556 /*
557 * Given an L1 table index, calculate the corresponding l2_dtable index
558 * and bucket index within the l2_dtable.
559 */
560 #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \
561 (L2_SIZE - 1))
562 #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1))
563
564 /*
565 * Given a virtual address, this macro returns the
566 * virtual address required to drop into the next L2 bucket.
567 */
568 #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE)
569
570 /*
571 * L2 allocation.
572 */
573 #define pmap_alloc_l2_dtable() \
574 pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
575 #define pmap_free_l2_dtable(l2) \
576 pool_cache_put(&pmap_l2dtable_cache, (l2))
577 #define pmap_alloc_l2_ptp(pap) \
578 ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
579 PR_NOWAIT, (pap)))
580
581 /*
582 * We try to map the page tables write-through, if possible. However, not
583 * all CPUs have a write-through cache mode, so on those we have to sync
584 * the cache when we frob page tables.
585 *
586 * We try to evaluate this at compile time, if possible. However, it's
587 * not always possible to do that, hence this run-time var.
588 */
589 int pmap_needs_pte_sync;
590
591 /*
592 * Real definition of pv_entry.
593 */
594 struct pv_entry {
595 SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */
596 pmap_t pv_pmap; /* pmap where mapping lies */
597 vaddr_t pv_va; /* virtual address for mapping */
598 u_int pv_flags; /* flags */
599 };
600
601 /*
602 * Macro to determine if a mapping might be resident in the
603 * instruction cache and/or TLB
604 */
605 #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
606 #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0)
607
608 /*
609 * Macro to determine if a mapping might be resident in the
610 * data cache and/or TLB
611 */
612 #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0)
613
614 /*
615 * Local prototypes
616 */
617 static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t);
618 static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
619 pt_entry_t **);
620 static bool pmap_is_current(pmap_t);
621 static bool pmap_is_cached(pmap_t);
622 static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *,
623 pmap_t, vaddr_t, u_int);
624 static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t);
625 static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
626 static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t,
627 u_int, u_int);
628
629 static void pmap_pinit(pmap_t);
630 static int pmap_pmap_ctor(void *, void *, int);
631
632 static void pmap_alloc_l1(pmap_t);
633 static void pmap_free_l1(pmap_t);
634 static void pmap_use_l1(pmap_t);
635
636 static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
637 static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
638 static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
639 static int pmap_l2ptp_ctor(void *, void *, int);
640 static int pmap_l2dtable_ctor(void *, void *, int);
641
642 static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
643 #ifdef PMAP_CACHE_VIVT
644 static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
645 static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
646 #endif
647
648 static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int);
649 #ifdef PMAP_CACHE_VIVT
650 static int pmap_clean_page(struct pv_entry *, bool);
651 #endif
652 #ifdef PMAP_CACHE_VIPT
653 static void pmap_syncicache_page(struct vm_page_md *, paddr_t);
654 enum pmap_flush_op {
655 PMAP_FLUSH_PRIMARY,
656 PMAP_FLUSH_SECONDARY,
657 PMAP_CLEAN_PRIMARY
658 };
659 static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
660 #endif
661 static void pmap_page_remove(struct vm_page_md *, paddr_t);
662
663 static void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
664 static vaddr_t kernel_pt_lookup(paddr_t);
665
666
667 /*
668 * External function prototypes
669 */
670 extern void bzero_page(vaddr_t);
671 extern void bcopy_page(vaddr_t, vaddr_t);
672
673 /*
674 * Misc variables
675 */
676 vaddr_t virtual_avail;
677 vaddr_t virtual_end;
678 vaddr_t pmap_curmaxkvaddr;
679
680 paddr_t avail_start;
681 paddr_t avail_end;
682
683 pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq);
684 pv_addr_t kernelpages;
685 pv_addr_t kernel_l1pt;
686 pv_addr_t systempage;
687
688 /* Function to set the debug level of the pmap code */
689
690 #ifdef PMAP_DEBUG
691 void
692 pmap_debug(int level)
693 {
694 pmap_debug_level = level;
695 printf("pmap_debug: level=%d\n", pmap_debug_level);
696 }
697 #endif /* PMAP_DEBUG */
698
699 /*
700 * A bunch of routines to conditionally flush the caches/TLB depending
701 * on whether the specified pmap actually needs to be flushed at any
702 * given time.
703 */
704 static inline void
705 pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va)
706 {
707
708 if (pm->pm_cstate.cs_tlb_id)
709 cpu_tlb_flushID_SE(va);
710 }
711
712 static inline void
713 pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va)
714 {
715
716 if (pm->pm_cstate.cs_tlb_d)
717 cpu_tlb_flushD_SE(va);
718 }
719
720 static inline void
721 pmap_tlb_flushID(pmap_t pm)
722 {
723
724 if (pm->pm_cstate.cs_tlb_id) {
725 cpu_tlb_flushID();
726 pm->pm_cstate.cs_tlb = 0;
727 }
728 }
729
730 static inline void
731 pmap_tlb_flushD(pmap_t pm)
732 {
733
734 if (pm->pm_cstate.cs_tlb_d) {
735 cpu_tlb_flushD();
736 pm->pm_cstate.cs_tlb_d = 0;
737 }
738 }
739
740 #ifdef PMAP_CACHE_VIVT
741 static inline void
742 pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len)
743 {
744 if (pm->pm_cstate.cs_cache_id) {
745 cpu_idcache_wbinv_range(va, len);
746 }
747 }
748
749 static inline void
750 pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len,
751 bool do_inv, bool rd_only)
752 {
753
754 if (pm->pm_cstate.cs_cache_d) {
755 if (do_inv) {
756 if (rd_only)
757 cpu_dcache_inv_range(va, len);
758 else
759 cpu_dcache_wbinv_range(va, len);
760 } else
761 if (!rd_only)
762 cpu_dcache_wb_range(va, len);
763 }
764 }
765
766 static inline void
767 pmap_idcache_wbinv_all(pmap_t pm)
768 {
769 if (pm->pm_cstate.cs_cache_id) {
770 cpu_idcache_wbinv_all();
771 pm->pm_cstate.cs_cache = 0;
772 }
773 }
774
775 static inline void
776 pmap_dcache_wbinv_all(pmap_t pm)
777 {
778 if (pm->pm_cstate.cs_cache_d) {
779 cpu_dcache_wbinv_all();
780 pm->pm_cstate.cs_cache_d = 0;
781 }
782 }
783 #endif /* PMAP_CACHE_VIVT */
784
785 static inline bool
786 pmap_is_current(pmap_t pm)
787 {
788
789 if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm)
790 return true;
791
792 return false;
793 }
794
795 static inline bool
796 pmap_is_cached(pmap_t pm)
797 {
798
799 if (pm == pmap_kernel() || pmap_recent_user == NULL ||
800 pmap_recent_user == pm)
801 return (true);
802
803 return false;
804 }
805
806 /*
807 * PTE_SYNC_CURRENT:
808 *
809 * Make sure the pte is written out to RAM.
810 * We need to do this for one of two cases:
811 * - We're dealing with the kernel pmap
812 * - There is no pmap active in the cache/tlb.
813 * - The specified pmap is 'active' in the cache/tlb.
814 */
815 #ifdef PMAP_INCLUDE_PTE_SYNC
816 #define PTE_SYNC_CURRENT(pm, ptep) \
817 do { \
818 if (PMAP_NEEDS_PTE_SYNC && \
819 pmap_is_cached(pm)) \
820 PTE_SYNC(ptep); \
821 } while (/*CONSTCOND*/0)
822 #else
823 #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */
824 #endif
825
826 /*
827 * main pv_entry manipulation functions:
828 * pmap_enter_pv: enter a mapping onto a vm_page list
829 * pmap_remove_pv: remove a mappiing from a vm_page list
830 *
831 * NOTE: pmap_enter_pv expects to lock the pvh itself
832 * pmap_remove_pv expects te caller to lock the pvh before calling
833 */
834
835 /*
836 * pmap_enter_pv: enter a mapping onto a vm_page lst
837 *
838 * => caller should hold the proper lock on pmap_main_lock
839 * => caller should have pmap locked
840 * => we will gain the lock on the vm_page and allocate the new pv_entry
841 * => caller should adjust ptp's wire_count before calling
842 * => caller should not adjust pmap's wire_count
843 */
844 static void
845 pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm,
846 vaddr_t va, u_int flags)
847 {
848 struct pv_entry **pvp;
849
850 NPDEBUG(PDB_PVDUMP,
851 printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags));
852
853 pv->pv_pmap = pm;
854 pv->pv_va = va;
855 pv->pv_flags = flags;
856
857 pvp = &SLIST_FIRST(&md->pvh_list);
858 #ifdef PMAP_CACHE_VIPT
859 /*
860 * Insert unmanaged entries, writeable first, at the head of
861 * the pv list.
862 */
863 if (__predict_true((flags & PVF_KENTRY) == 0)) {
864 while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
865 pvp = &SLIST_NEXT(*pvp, pv_link);
866 } else if ((flags & PVF_WRITE) == 0) {
867 while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE)
868 pvp = &SLIST_NEXT(*pvp, pv_link);
869 }
870 #endif
871 SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */
872 *pvp = pv; /* ... locked list */
873 md->pvh_attrs |= flags & (PVF_REF | PVF_MOD);
874 #ifdef PMAP_CACHE_VIPT
875 if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE)
876 md->pvh_attrs |= PVF_KMOD;
877 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
878 md->pvh_attrs |= PVF_DIRTY;
879 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
880 #endif
881 if (pm == pmap_kernel()) {
882 PMAPCOUNT(kernel_mappings);
883 if (flags & PVF_WRITE)
884 md->krw_mappings++;
885 else
886 md->kro_mappings++;
887 } else {
888 if (flags & PVF_WRITE)
889 md->urw_mappings++;
890 else
891 md->uro_mappings++;
892 }
893
894 #ifdef PMAP_CACHE_VIPT
895 /*
896 * If this is an exec mapping and its the first exec mapping
897 * for this page, make sure to sync the I-cache.
898 */
899 if (PV_IS_EXEC_P(flags)) {
900 if (!PV_IS_EXEC_P(md->pvh_attrs)) {
901 pmap_syncicache_page(md, pa);
902 PMAPCOUNT(exec_synced_map);
903 }
904 PMAPCOUNT(exec_mappings);
905 }
906 #endif
907
908 PMAPCOUNT(mappings);
909
910 if (pv->pv_flags & PVF_WIRED)
911 ++pm->pm_stats.wired_count;
912 }
913
914 /*
915 *
916 * pmap_find_pv: Find a pv entry
917 *
918 * => caller should hold lock on vm_page
919 */
920 static inline struct pv_entry *
921 pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va)
922 {
923 struct pv_entry *pv;
924
925 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
926 if (pm == pv->pv_pmap && va == pv->pv_va)
927 break;
928 }
929
930 return (pv);
931 }
932
933 /*
934 * pmap_remove_pv: try to remove a mapping from a pv_list
935 *
936 * => caller should hold proper lock on pmap_main_lock
937 * => pmap should be locked
938 * => caller should hold lock on vm_page [so that attrs can be adjusted]
939 * => caller should adjust ptp's wire_count and free PTP if needed
940 * => caller should NOT adjust pmap's wire_count
941 * => we return the removed pv
942 */
943 static struct pv_entry *
944 pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
945 {
946 struct pv_entry *pv, **prevptr;
947
948 NPDEBUG(PDB_PVDUMP,
949 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va));
950
951 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
952 pv = *prevptr;
953
954 while (pv) {
955 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */
956 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md "
957 "%p, flags 0x%x\n", pm, md, pv->pv_flags));
958 if (pv->pv_flags & PVF_WIRED) {
959 --pm->pm_stats.wired_count;
960 }
961 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */
962 if (pm == pmap_kernel()) {
963 PMAPCOUNT(kernel_unmappings);
964 if (pv->pv_flags & PVF_WRITE)
965 md->krw_mappings--;
966 else
967 md->kro_mappings--;
968 } else {
969 if (pv->pv_flags & PVF_WRITE)
970 md->urw_mappings--;
971 else
972 md->uro_mappings--;
973 }
974
975 PMAPCOUNT(unmappings);
976 #ifdef PMAP_CACHE_VIPT
977 if (!(pv->pv_flags & PVF_WRITE))
978 break;
979 /*
980 * If this page has had an exec mapping, then if
981 * this was the last mapping, discard the contents,
982 * otherwise sync the i-cache for this page.
983 */
984 if (PV_IS_EXEC_P(md->pvh_attrs)) {
985 if (SLIST_EMPTY(&md->pvh_list)) {
986 md->pvh_attrs &= ~PVF_EXEC;
987 PMAPCOUNT(exec_discarded_unmap);
988 } else {
989 pmap_syncicache_page(md, pa);
990 PMAPCOUNT(exec_synced_unmap);
991 }
992 }
993 #endif /* PMAP_CACHE_VIPT */
994 break;
995 }
996 prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */
997 pv = *prevptr; /* advance */
998 }
999
1000 #ifdef PMAP_CACHE_VIPT
1001 /*
1002 * If we no longer have a WRITEABLE KENTRY at the head of list,
1003 * clear the KMOD attribute from the page.
1004 */
1005 if (SLIST_FIRST(&md->pvh_list) == NULL
1006 || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE)
1007 md->pvh_attrs &= ~PVF_KMOD;
1008
1009 /*
1010 * If this was a writeable page and there are no more writeable
1011 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
1012 * the contents to memory.
1013 */
1014 if (md->krw_mappings + md->urw_mappings == 0)
1015 md->pvh_attrs &= ~PVF_WRITE;
1016 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1017 #endif /* PMAP_CACHE_VIPT */
1018
1019 return(pv); /* return removed pv */
1020 }
1021
1022 /*
1023 *
1024 * pmap_modify_pv: Update pv flags
1025 *
1026 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1027 * => caller should NOT adjust pmap's wire_count
1028 * => caller must call pmap_vac_me_harder() if writable status of a page
1029 * may have changed.
1030 * => we return the old flags
1031 *
1032 * Modify a physical-virtual mapping in the pv table
1033 */
1034 static u_int
1035 pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va,
1036 u_int clr_mask, u_int set_mask)
1037 {
1038 struct pv_entry *npv;
1039 u_int flags, oflags;
1040
1041 KASSERT((clr_mask & PVF_KENTRY) == 0);
1042 KASSERT((set_mask & PVF_KENTRY) == 0);
1043
1044 if ((npv = pmap_find_pv(md, pm, va)) == NULL)
1045 return (0);
1046
1047 NPDEBUG(PDB_PVDUMP,
1048 printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags));
1049
1050 /*
1051 * There is at least one VA mapping this page.
1052 */
1053
1054 if (clr_mask & (PVF_REF | PVF_MOD)) {
1055 md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1056 #ifdef PMAP_CACHE_VIPT
1057 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
1058 md->pvh_attrs |= PVF_DIRTY;
1059 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1060 #endif
1061 }
1062
1063 oflags = npv->pv_flags;
1064 npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
1065
1066 if ((flags ^ oflags) & PVF_WIRED) {
1067 if (flags & PVF_WIRED)
1068 ++pm->pm_stats.wired_count;
1069 else
1070 --pm->pm_stats.wired_count;
1071 }
1072
1073 if ((flags ^ oflags) & PVF_WRITE) {
1074 if (pm == pmap_kernel()) {
1075 if (flags & PVF_WRITE) {
1076 md->krw_mappings++;
1077 md->kro_mappings--;
1078 } else {
1079 md->kro_mappings++;
1080 md->krw_mappings--;
1081 }
1082 } else {
1083 if (flags & PVF_WRITE) {
1084 md->urw_mappings++;
1085 md->uro_mappings--;
1086 } else {
1087 md->uro_mappings++;
1088 md->urw_mappings--;
1089 }
1090 }
1091 }
1092 #ifdef PMAP_CACHE_VIPT
1093 if (md->urw_mappings + md->krw_mappings == 0)
1094 md->pvh_attrs &= ~PVF_WRITE;
1095 /*
1096 * We have two cases here: the first is from enter_pv (new exec
1097 * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
1098 * Since in latter, pmap_enter_pv won't do anything, we just have
1099 * to do what pmap_remove_pv would do.
1100 */
1101 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs))
1102 || (PV_IS_EXEC_P(md->pvh_attrs)
1103 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) {
1104 pmap_syncicache_page(md, pa);
1105 PMAPCOUNT(exec_synced_remap);
1106 }
1107 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1108 #endif
1109
1110 PMAPCOUNT(remappings);
1111
1112 return (oflags);
1113 }
1114
1115 /*
1116 * Allocate an L1 translation table for the specified pmap.
1117 * This is called at pmap creation time.
1118 */
1119 static void
1120 pmap_alloc_l1(pmap_t pm)
1121 {
1122 struct l1_ttable *l1;
1123 u_int8_t domain;
1124
1125 /*
1126 * Remove the L1 at the head of the LRU list
1127 */
1128 mutex_spin_enter(&l1_lru_lock);
1129 l1 = TAILQ_FIRST(&l1_lru_list);
1130 KDASSERT(l1 != NULL);
1131 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1132
1133 /*
1134 * Pick the first available domain number, and update
1135 * the link to the next number.
1136 */
1137 domain = l1->l1_domain_first;
1138 l1->l1_domain_first = l1->l1_domain_free[domain];
1139
1140 /*
1141 * If there are still free domain numbers in this L1,
1142 * put it back on the TAIL of the LRU list.
1143 */
1144 if (++l1->l1_domain_use_count < PMAP_DOMAINS)
1145 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1146
1147 mutex_spin_exit(&l1_lru_lock);
1148
1149 /*
1150 * Fix up the relevant bits in the pmap structure
1151 */
1152 pm->pm_l1 = l1;
1153 pm->pm_domain = domain + 1;
1154 }
1155
1156 /*
1157 * Free an L1 translation table.
1158 * This is called at pmap destruction time.
1159 */
1160 static void
1161 pmap_free_l1(pmap_t pm)
1162 {
1163 struct l1_ttable *l1 = pm->pm_l1;
1164
1165 mutex_spin_enter(&l1_lru_lock);
1166
1167 /*
1168 * If this L1 is currently on the LRU list, remove it.
1169 */
1170 if (l1->l1_domain_use_count < PMAP_DOMAINS)
1171 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1172
1173 /*
1174 * Free up the domain number which was allocated to the pmap
1175 */
1176 l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
1177 l1->l1_domain_first = pm->pm_domain - 1;
1178 l1->l1_domain_use_count--;
1179
1180 /*
1181 * The L1 now must have at least 1 free domain, so add
1182 * it back to the LRU list. If the use count is zero,
1183 * put it at the head of the list, otherwise it goes
1184 * to the tail.
1185 */
1186 if (l1->l1_domain_use_count == 0)
1187 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
1188 else
1189 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1190
1191 mutex_spin_exit(&l1_lru_lock);
1192 }
1193
1194 static inline void
1195 pmap_use_l1(pmap_t pm)
1196 {
1197 struct l1_ttable *l1;
1198
1199 /*
1200 * Do nothing if we're in interrupt context.
1201 * Access to an L1 by the kernel pmap must not affect
1202 * the LRU list.
1203 */
1204 if (cpu_intr_p() || pm == pmap_kernel())
1205 return;
1206
1207 l1 = pm->pm_l1;
1208
1209 /*
1210 * If the L1 is not currently on the LRU list, just return
1211 */
1212 if (l1->l1_domain_use_count == PMAP_DOMAINS)
1213 return;
1214
1215 mutex_spin_enter(&l1_lru_lock);
1216
1217 /*
1218 * Check the use count again, now that we've acquired the lock
1219 */
1220 if (l1->l1_domain_use_count == PMAP_DOMAINS) {
1221 mutex_spin_exit(&l1_lru_lock);
1222 return;
1223 }
1224
1225 /*
1226 * Move the L1 to the back of the LRU list
1227 */
1228 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1229 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1230
1231 mutex_spin_exit(&l1_lru_lock);
1232 }
1233
1234 /*
1235 * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
1236 *
1237 * Free an L2 descriptor table.
1238 */
1239 static inline void
1240 #ifndef PMAP_INCLUDE_PTE_SYNC
1241 pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
1242 #else
1243 pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa)
1244 #endif
1245 {
1246 #ifdef PMAP_INCLUDE_PTE_SYNC
1247 #ifdef PMAP_CACHE_VIVT
1248 /*
1249 * Note: With a write-back cache, we may need to sync this
1250 * L2 table before re-using it.
1251 * This is because it may have belonged to a non-current
1252 * pmap, in which case the cache syncs would have been
1253 * skipped for the pages that were being unmapped. If the
1254 * L2 table were then to be immediately re-allocated to
1255 * the *current* pmap, it may well contain stale mappings
1256 * which have not yet been cleared by a cache write-back
1257 * and so would still be visible to the mmu.
1258 */
1259 if (need_sync)
1260 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1261 #endif /* PMAP_CACHE_VIVT */
1262 #endif /* PMAP_INCLUDE_PTE_SYNC */
1263 pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
1264 }
1265
1266 /*
1267 * Returns a pointer to the L2 bucket associated with the specified pmap
1268 * and VA, or NULL if no L2 bucket exists for the address.
1269 */
1270 static inline struct l2_bucket *
1271 pmap_get_l2_bucket(pmap_t pm, vaddr_t va)
1272 {
1273 struct l2_dtable *l2;
1274 struct l2_bucket *l2b;
1275 u_short l1idx;
1276
1277 l1idx = L1_IDX(va);
1278
1279 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
1280 (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
1281 return (NULL);
1282
1283 return (l2b);
1284 }
1285
1286 /*
1287 * Returns a pointer to the L2 bucket associated with the specified pmap
1288 * and VA.
1289 *
1290 * If no L2 bucket exists, perform the necessary allocations to put an L2
1291 * bucket/page table in place.
1292 *
1293 * Note that if a new L2 bucket/page was allocated, the caller *must*
1294 * increment the bucket occupancy counter appropriately *before*
1295 * releasing the pmap's lock to ensure no other thread or cpu deallocates
1296 * the bucket/page in the meantime.
1297 */
1298 static struct l2_bucket *
1299 pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va)
1300 {
1301 struct l2_dtable *l2;
1302 struct l2_bucket *l2b;
1303 u_short l1idx;
1304
1305 l1idx = L1_IDX(va);
1306
1307 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
1308 /*
1309 * No mapping at this address, as there is
1310 * no entry in the L1 table.
1311 * Need to allocate a new l2_dtable.
1312 */
1313 if ((l2 = pmap_alloc_l2_dtable()) == NULL)
1314 return (NULL);
1315
1316 /*
1317 * Link it into the parent pmap
1318 */
1319 pm->pm_l2[L2_IDX(l1idx)] = l2;
1320 }
1321
1322 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1323
1324 /*
1325 * Fetch pointer to the L2 page table associated with the address.
1326 */
1327 if (l2b->l2b_kva == NULL) {
1328 pt_entry_t *ptep;
1329
1330 /*
1331 * No L2 page table has been allocated. Chances are, this
1332 * is because we just allocated the l2_dtable, above.
1333 */
1334 if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) {
1335 /*
1336 * Oops, no more L2 page tables available at this
1337 * time. We may need to deallocate the l2_dtable
1338 * if we allocated a new one above.
1339 */
1340 if (l2->l2_occupancy == 0) {
1341 pm->pm_l2[L2_IDX(l1idx)] = NULL;
1342 pmap_free_l2_dtable(l2);
1343 }
1344 return (NULL);
1345 }
1346
1347 l2->l2_occupancy++;
1348 l2b->l2b_kva = ptep;
1349 l2b->l2b_l1idx = l1idx;
1350 }
1351
1352 return (l2b);
1353 }
1354
1355 /*
1356 * One or more mappings in the specified L2 descriptor table have just been
1357 * invalidated.
1358 *
1359 * Garbage collect the metadata and descriptor table itself if necessary.
1360 *
1361 * The pmap lock must be acquired when this is called (not necessary
1362 * for the kernel pmap).
1363 */
1364 static void
1365 pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
1366 {
1367 struct l2_dtable *l2;
1368 pd_entry_t *pl1pd, l1pd;
1369 pt_entry_t *ptep;
1370 u_short l1idx;
1371
1372 KDASSERT(count <= l2b->l2b_occupancy);
1373
1374 /*
1375 * Update the bucket's reference count according to how many
1376 * PTEs the caller has just invalidated.
1377 */
1378 l2b->l2b_occupancy -= count;
1379
1380 /*
1381 * Note:
1382 *
1383 * Level 2 page tables allocated to the kernel pmap are never freed
1384 * as that would require checking all Level 1 page tables and
1385 * removing any references to the Level 2 page table. See also the
1386 * comment elsewhere about never freeing bootstrap L2 descriptors.
1387 *
1388 * We make do with just invalidating the mapping in the L2 table.
1389 *
1390 * This isn't really a big deal in practice and, in fact, leads
1391 * to a performance win over time as we don't need to continually
1392 * alloc/free.
1393 */
1394 if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
1395 return;
1396
1397 /*
1398 * There are no more valid mappings in this level 2 page table.
1399 * Go ahead and NULL-out the pointer in the bucket, then
1400 * free the page table.
1401 */
1402 l1idx = l2b->l2b_l1idx;
1403 ptep = l2b->l2b_kva;
1404 l2b->l2b_kva = NULL;
1405
1406 pl1pd = &pm->pm_l1->l1_kva[l1idx];
1407
1408 /*
1409 * If the L1 slot matches the pmap's domain
1410 * number, then invalidate it.
1411 */
1412 l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
1413 if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
1414 *pl1pd = 0;
1415 PTE_SYNC(pl1pd);
1416 }
1417
1418 /*
1419 * Release the L2 descriptor table back to the pool cache.
1420 */
1421 #ifndef PMAP_INCLUDE_PTE_SYNC
1422 pmap_free_l2_ptp(ptep, l2b->l2b_phys);
1423 #else
1424 pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys);
1425 #endif
1426
1427 /*
1428 * Update the reference count in the associated l2_dtable
1429 */
1430 l2 = pm->pm_l2[L2_IDX(l1idx)];
1431 if (--l2->l2_occupancy > 0)
1432 return;
1433
1434 /*
1435 * There are no more valid mappings in any of the Level 1
1436 * slots managed by this l2_dtable. Go ahead and NULL-out
1437 * the pointer in the parent pmap and free the l2_dtable.
1438 */
1439 pm->pm_l2[L2_IDX(l1idx)] = NULL;
1440 pmap_free_l2_dtable(l2);
1441 }
1442
1443 /*
1444 * Pool cache constructors for L2 descriptor tables, metadata and pmap
1445 * structures.
1446 */
1447 static int
1448 pmap_l2ptp_ctor(void *arg, void *v, int flags)
1449 {
1450 #ifndef PMAP_INCLUDE_PTE_SYNC
1451 struct l2_bucket *l2b;
1452 pt_entry_t *ptep, pte;
1453 vaddr_t va = (vaddr_t)v & ~PGOFSET;
1454
1455 /*
1456 * The mappings for these page tables were initially made using
1457 * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
1458 * mode will not be right for page table mappings. To avoid
1459 * polluting the pmap_kenter_pa() code with a special case for
1460 * page tables, we simply fix up the cache-mode here if it's not
1461 * correct.
1462 */
1463 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1464 KDASSERT(l2b != NULL);
1465 ptep = &l2b->l2b_kva[l2pte_index(va)];
1466 pte = *ptep;
1467
1468 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1469 /*
1470 * Page tables must have the cache-mode set to Write-Thru.
1471 */
1472 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
1473 PTE_SYNC(ptep);
1474 cpu_tlb_flushD_SE(va);
1475 cpu_cpwait();
1476 }
1477 #endif
1478
1479 memset(v, 0, L2_TABLE_SIZE_REAL);
1480 PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1481 return (0);
1482 }
1483
1484 static int
1485 pmap_l2dtable_ctor(void *arg, void *v, int flags)
1486 {
1487
1488 memset(v, 0, sizeof(struct l2_dtable));
1489 return (0);
1490 }
1491
1492 static int
1493 pmap_pmap_ctor(void *arg, void *v, int flags)
1494 {
1495
1496 memset(v, 0, sizeof(struct pmap));
1497 return (0);
1498 }
1499
1500 static void
1501 pmap_pinit(pmap_t pm)
1502 {
1503 struct l2_bucket *l2b;
1504
1505 if (vector_page < KERNEL_BASE) {
1506 /*
1507 * Map the vector page.
1508 */
1509 pmap_enter(pm, vector_page, systempage.pv_pa,
1510 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1511 pmap_update(pm);
1512
1513 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
1514 l2b = pmap_get_l2_bucket(pm, vector_page);
1515 KDASSERT(l2b != NULL);
1516 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
1517 L1_C_DOM(pm->pm_domain);
1518 } else
1519 pm->pm_pl1vec = NULL;
1520 }
1521
1522 #ifdef PMAP_CACHE_VIVT
1523 /*
1524 * Since we have a virtually indexed cache, we may need to inhibit caching if
1525 * there is more than one mapping and at least one of them is writable.
1526 * Since we purge the cache on every context switch, we only need to check for
1527 * other mappings within the same pmap, or kernel_pmap.
1528 * This function is also called when a page is unmapped, to possibly reenable
1529 * caching on any remaining mappings.
1530 *
1531 * The code implements the following logic, where:
1532 *
1533 * KW = # of kernel read/write pages
1534 * KR = # of kernel read only pages
1535 * UW = # of user read/write pages
1536 * UR = # of user read only pages
1537 *
1538 * KC = kernel mapping is cacheable
1539 * UC = user mapping is cacheable
1540 *
1541 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
1542 * +---------------------------------------------
1543 * UW=0,UR=0 | --- KC=1 KC=1 KC=0
1544 * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
1545 * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1546 * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1547 */
1548
1549 static const int pmap_vac_flags[4][4] = {
1550 {-1, 0, 0, PVF_KNC},
1551 {0, 0, PVF_NC, PVF_NC},
1552 {0, PVF_NC, PVF_NC, PVF_NC},
1553 {PVF_UNC, PVF_NC, PVF_NC, PVF_NC}
1554 };
1555
1556 static inline int
1557 pmap_get_vac_flags(const struct vm_page_md *md)
1558 {
1559 int kidx, uidx;
1560
1561 kidx = 0;
1562 if (md->kro_mappings || md->krw_mappings > 1)
1563 kidx |= 1;
1564 if (md->krw_mappings)
1565 kidx |= 2;
1566
1567 uidx = 0;
1568 if (md->uro_mappings || md->urw_mappings > 1)
1569 uidx |= 1;
1570 if (md->urw_mappings)
1571 uidx |= 2;
1572
1573 return (pmap_vac_flags[uidx][kidx]);
1574 }
1575
1576 static inline void
1577 pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1578 {
1579 int nattr;
1580
1581 nattr = pmap_get_vac_flags(md);
1582
1583 if (nattr < 0) {
1584 md->pvh_attrs &= ~PVF_NC;
1585 return;
1586 }
1587
1588 if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0)
1589 return;
1590
1591 if (pm == pmap_kernel())
1592 pmap_vac_me_kpmap(md, pa, pm, va);
1593 else
1594 pmap_vac_me_user(md, pa, pm, va);
1595
1596 md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr;
1597 }
1598
1599 static void
1600 pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1601 {
1602 u_int u_cacheable, u_entries;
1603 struct pv_entry *pv;
1604 pmap_t last_pmap = pm;
1605
1606 /*
1607 * Pass one, see if there are both kernel and user pmaps for
1608 * this page. Calculate whether there are user-writable or
1609 * kernel-writable pages.
1610 */
1611 u_cacheable = 0;
1612 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1613 if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
1614 u_cacheable++;
1615 }
1616
1617 u_entries = md->urw_mappings + md->uro_mappings;
1618
1619 /*
1620 * We know we have just been updating a kernel entry, so if
1621 * all user pages are already cacheable, then there is nothing
1622 * further to do.
1623 */
1624 if (md->k_mappings == 0 && u_cacheable == u_entries)
1625 return;
1626
1627 if (u_entries) {
1628 /*
1629 * Scan over the list again, for each entry, if it
1630 * might not be set correctly, call pmap_vac_me_user
1631 * to recalculate the settings.
1632 */
1633 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1634 /*
1635 * We know kernel mappings will get set
1636 * correctly in other calls. We also know
1637 * that if the pmap is the same as last_pmap
1638 * then we've just handled this entry.
1639 */
1640 if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
1641 continue;
1642
1643 /*
1644 * If there are kernel entries and this page
1645 * is writable but non-cacheable, then we can
1646 * skip this entry also.
1647 */
1648 if (md->k_mappings &&
1649 (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
1650 (PVF_NC | PVF_WRITE))
1651 continue;
1652
1653 /*
1654 * Similarly if there are no kernel-writable
1655 * entries and the page is already
1656 * read-only/cacheable.
1657 */
1658 if (md->krw_mappings == 0 &&
1659 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1660 continue;
1661
1662 /*
1663 * For some of the remaining cases, we know
1664 * that we must recalculate, but for others we
1665 * can't tell if they are correct or not, so
1666 * we recalculate anyway.
1667 */
1668 pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0);
1669 }
1670
1671 if (md->k_mappings == 0)
1672 return;
1673 }
1674
1675 pmap_vac_me_user(md, pa, pm, va);
1676 }
1677
1678 static void
1679 pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1680 {
1681 pmap_t kpmap = pmap_kernel();
1682 struct pv_entry *pv, *npv = NULL;
1683 struct l2_bucket *l2b;
1684 pt_entry_t *ptep, pte;
1685 u_int entries = 0;
1686 u_int writable = 0;
1687 u_int cacheable_entries = 0;
1688 u_int kern_cacheable = 0;
1689 u_int other_writable = 0;
1690
1691 /*
1692 * Count mappings and writable mappings in this pmap.
1693 * Include kernel mappings as part of our own.
1694 * Keep a pointer to the first one.
1695 */
1696 npv = NULL;
1697 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1698 /* Count mappings in the same pmap */
1699 if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
1700 if (entries++ == 0)
1701 npv = pv;
1702
1703 /* Cacheable mappings */
1704 if ((pv->pv_flags & PVF_NC) == 0) {
1705 cacheable_entries++;
1706 if (kpmap == pv->pv_pmap)
1707 kern_cacheable++;
1708 }
1709
1710 /* Writable mappings */
1711 if (pv->pv_flags & PVF_WRITE)
1712 ++writable;
1713 } else
1714 if (pv->pv_flags & PVF_WRITE)
1715 other_writable = 1;
1716 }
1717
1718 /*
1719 * Enable or disable caching as necessary.
1720 * Note: the first entry might be part of the kernel pmap,
1721 * so we can't assume this is indicative of the state of the
1722 * other (maybe non-kpmap) entries.
1723 */
1724 if ((entries > 1 && writable) ||
1725 (entries > 0 && pm == kpmap && other_writable)) {
1726 if (cacheable_entries == 0)
1727 return;
1728
1729 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1730 if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
1731 (pv->pv_flags & PVF_NC))
1732 continue;
1733
1734 pv->pv_flags |= PVF_NC;
1735
1736 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1737 KDASSERT(l2b != NULL);
1738 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1739 pte = *ptep & ~L2_S_CACHE_MASK;
1740
1741 if ((va != pv->pv_va || pm != pv->pv_pmap) &&
1742 l2pte_valid(pte)) {
1743 if (PV_BEEN_EXECD(pv->pv_flags)) {
1744 #ifdef PMAP_CACHE_VIVT
1745 pmap_idcache_wbinv_range(pv->pv_pmap,
1746 pv->pv_va, PAGE_SIZE);
1747 #endif
1748 pmap_tlb_flushID_SE(pv->pv_pmap,
1749 pv->pv_va);
1750 } else
1751 if (PV_BEEN_REFD(pv->pv_flags)) {
1752 #ifdef PMAP_CACHE_VIVT
1753 pmap_dcache_wb_range(pv->pv_pmap,
1754 pv->pv_va, PAGE_SIZE, true,
1755 (pv->pv_flags & PVF_WRITE) == 0);
1756 #endif
1757 pmap_tlb_flushD_SE(pv->pv_pmap,
1758 pv->pv_va);
1759 }
1760 }
1761
1762 *ptep = pte;
1763 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1764 }
1765 cpu_cpwait();
1766 } else
1767 if (entries > cacheable_entries) {
1768 /*
1769 * Turn cacheing back on for some pages. If it is a kernel
1770 * page, only do so if there are no other writable pages.
1771 */
1772 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1773 if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
1774 (kpmap != pv->pv_pmap || other_writable)))
1775 continue;
1776
1777 pv->pv_flags &= ~PVF_NC;
1778
1779 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1780 KDASSERT(l2b != NULL);
1781 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1782 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
1783
1784 if (l2pte_valid(pte)) {
1785 if (PV_BEEN_EXECD(pv->pv_flags)) {
1786 pmap_tlb_flushID_SE(pv->pv_pmap,
1787 pv->pv_va);
1788 } else
1789 if (PV_BEEN_REFD(pv->pv_flags)) {
1790 pmap_tlb_flushD_SE(pv->pv_pmap,
1791 pv->pv_va);
1792 }
1793 }
1794
1795 *ptep = pte;
1796 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1797 }
1798 }
1799 }
1800 #endif
1801
1802 #ifdef PMAP_CACHE_VIPT
1803 static void
1804 pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1805 {
1806 struct pv_entry *pv;
1807 vaddr_t tst_mask;
1808 bool bad_alias;
1809 struct l2_bucket *l2b;
1810 pt_entry_t *ptep, pte, opte;
1811 const u_int
1812 rw_mappings = md->urw_mappings + md->krw_mappings,
1813 ro_mappings = md->uro_mappings + md->kro_mappings;
1814
1815 /* do we need to do anything? */
1816 if (arm_cache_prefer_mask == 0)
1817 return;
1818
1819 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n",
1820 md, pm, va));
1821
1822 KASSERT(!va || pm);
1823 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1824
1825 /* Already a conflict? */
1826 if (__predict_false(md->pvh_attrs & PVF_NC)) {
1827 /* just an add, things are already non-cached */
1828 KASSERT(!(md->pvh_attrs & PVF_DIRTY));
1829 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1830 bad_alias = false;
1831 if (va) {
1832 PMAPCOUNT(vac_color_none);
1833 bad_alias = true;
1834 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1835 goto fixup;
1836 }
1837 pv = SLIST_FIRST(&md->pvh_list);
1838 /* the list can't be empty because it would be cachable */
1839 if (md->pvh_attrs & PVF_KMPAGE) {
1840 tst_mask = md->pvh_attrs;
1841 } else {
1842 KASSERT(pv);
1843 tst_mask = pv->pv_va;
1844 pv = SLIST_NEXT(pv, pv_link);
1845 }
1846 /*
1847 * Only check for a bad alias if we have writable mappings.
1848 */
1849 tst_mask &= arm_cache_prefer_mask;
1850 if (rw_mappings > 0 && arm_cache_prefer_mask) {
1851 for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
1852 /* if there's a bad alias, stop checking. */
1853 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
1854 bad_alias = true;
1855 }
1856 md->pvh_attrs |= PVF_WRITE;
1857 if (!bad_alias)
1858 md->pvh_attrs |= PVF_DIRTY;
1859 } else {
1860 /*
1861 * We have only read-only mappings. Let's see if there
1862 * are multiple colors in use or if we mapped a KMPAGE.
1863 * If the latter, we have a bad alias. If the former,
1864 * we need to remember that.
1865 */
1866 for (; pv; pv = SLIST_NEXT(pv, pv_link)) {
1867 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) {
1868 if (md->pvh_attrs & PVF_KMPAGE)
1869 bad_alias = true;
1870 break;
1871 }
1872 }
1873 md->pvh_attrs &= ~PVF_WRITE;
1874 /*
1875 * No KMPAGE and we exited early, so we must have
1876 * multiple color mappings.
1877 */
1878 if (!bad_alias && pv != NULL)
1879 md->pvh_attrs |= PVF_MULTCLR;
1880 }
1881
1882 /* If no conflicting colors, set everything back to cached */
1883 if (!bad_alias) {
1884 #ifdef DEBUG
1885 if ((md->pvh_attrs & PVF_WRITE)
1886 || ro_mappings < 2) {
1887 SLIST_FOREACH(pv, &md->pvh_list, pv_link)
1888 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
1889 }
1890 #endif
1891 md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
1892 md->pvh_attrs |= tst_mask | PVF_COLORED;
1893 /*
1894 * Restore DIRTY bit if page is modified
1895 */
1896 if (md->pvh_attrs & PVF_DMOD)
1897 md->pvh_attrs |= PVF_DIRTY;
1898 PMAPCOUNT(vac_color_restore);
1899 } else {
1900 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
1901 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
1902 }
1903 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1904 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1905 } else if (!va) {
1906 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
1907 KASSERT(!(md->pvh_attrs & PVF_WRITE)
1908 || (md->pvh_attrs & PVF_DIRTY));
1909 if (rw_mappings == 0) {
1910 md->pvh_attrs &= ~PVF_WRITE;
1911 if (ro_mappings == 1
1912 && (md->pvh_attrs & PVF_MULTCLR)) {
1913 /*
1914 * If this is the last readonly mapping
1915 * but it doesn't match the current color
1916 * for the page, change the current color
1917 * to match this last readonly mapping.
1918 */
1919 pv = SLIST_FIRST(&md->pvh_list);
1920 tst_mask = (md->pvh_attrs ^ pv->pv_va)
1921 & arm_cache_prefer_mask;
1922 if (tst_mask) {
1923 md->pvh_attrs ^= tst_mask;
1924 PMAPCOUNT(vac_color_change);
1925 }
1926 }
1927 }
1928 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1929 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1930 return;
1931 } else if (!pmap_is_page_colored_p(md)) {
1932 /* not colored so we just use its color */
1933 KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY));
1934 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1935 PMAPCOUNT(vac_color_new);
1936 md->pvh_attrs &= PAGE_SIZE - 1;
1937 md->pvh_attrs |= PVF_COLORED
1938 | (va & arm_cache_prefer_mask)
1939 | (rw_mappings > 0 ? PVF_WRITE : 0);
1940 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1941 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1942 return;
1943 } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
1944 bad_alias = false;
1945 if (rw_mappings > 0) {
1946 /*
1947 * We now have writeable mappings and if we have
1948 * readonly mappings in more than once color, we have
1949 * an aliasing problem. Regardless mark the page as
1950 * writeable.
1951 */
1952 if (md->pvh_attrs & PVF_MULTCLR) {
1953 if (ro_mappings < 2) {
1954 /*
1955 * If we only have less than two
1956 * read-only mappings, just flush the
1957 * non-primary colors from the cache.
1958 */
1959 pmap_flush_page(md, pa,
1960 PMAP_FLUSH_SECONDARY);
1961 } else {
1962 bad_alias = true;
1963 }
1964 }
1965 md->pvh_attrs |= PVF_WRITE;
1966 }
1967 /* If no conflicting colors, set everything back to cached */
1968 if (!bad_alias) {
1969 #ifdef DEBUG
1970 if (rw_mappings > 0
1971 || (md->pvh_attrs & PMAP_KMPAGE)) {
1972 tst_mask = md->pvh_attrs & arm_cache_prefer_mask;
1973 SLIST_FOREACH(pv, &md->pvh_list, pv_link)
1974 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
1975 }
1976 #endif
1977 if (SLIST_EMPTY(&md->pvh_list))
1978 PMAPCOUNT(vac_color_reuse);
1979 else
1980 PMAPCOUNT(vac_color_ok);
1981
1982 /* matching color, just return */
1983 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1984 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1985 return;
1986 }
1987 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
1988 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
1989
1990 /* color conflict. evict from cache. */
1991
1992 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
1993 md->pvh_attrs &= ~PVF_COLORED;
1994 md->pvh_attrs |= PVF_NC;
1995 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1996 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1997 PMAPCOUNT(vac_color_erase);
1998 } else if (rw_mappings == 0
1999 && (md->pvh_attrs & PVF_KMPAGE) == 0) {
2000 KASSERT((md->pvh_attrs & PVF_WRITE) == 0);
2001
2002 /*
2003 * If the page has dirty cache lines, clean it.
2004 */
2005 if (md->pvh_attrs & PVF_DIRTY)
2006 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
2007
2008 /*
2009 * If this is the first remapping (we know that there are no
2010 * writeable mappings), then this is a simple color change.
2011 * Otherwise this is a seconary r/o mapping, which means
2012 * we don't have to do anything.
2013 */
2014 if (ro_mappings == 1) {
2015 KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0);
2016 md->pvh_attrs &= PAGE_SIZE - 1;
2017 md->pvh_attrs |= (va & arm_cache_prefer_mask);
2018 PMAPCOUNT(vac_color_change);
2019 } else {
2020 PMAPCOUNT(vac_color_blind);
2021 }
2022 md->pvh_attrs |= PVF_MULTCLR;
2023 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2024 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2025 return;
2026 } else {
2027 if (rw_mappings > 0)
2028 md->pvh_attrs |= PVF_WRITE;
2029
2030 /* color conflict. evict from cache. */
2031 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
2032
2033 /* the list can't be empty because this was a enter/modify */
2034 pv = SLIST_FIRST(&md->pvh_list);
2035 if ((md->pvh_attrs & PVF_KMPAGE) == 0) {
2036 KASSERT(pv);
2037 /*
2038 * If there's only one mapped page, change color to the
2039 * page's new color and return. Restore the DIRTY bit
2040 * that was erased by pmap_flush_page.
2041 */
2042 if (SLIST_NEXT(pv, pv_link) == NULL) {
2043 md->pvh_attrs &= PAGE_SIZE - 1;
2044 md->pvh_attrs |= (va & arm_cache_prefer_mask);
2045 if (md->pvh_attrs & PVF_DMOD)
2046 md->pvh_attrs |= PVF_DIRTY;
2047 PMAPCOUNT(vac_color_change);
2048 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2049 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2050 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2051 return;
2052 }
2053 }
2054 bad_alias = true;
2055 md->pvh_attrs &= ~PVF_COLORED;
2056 md->pvh_attrs |= PVF_NC;
2057 PMAPCOUNT(vac_color_erase);
2058 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2059 }
2060
2061 fixup:
2062 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2063
2064 /*
2065 * Turn cacheing on/off for all pages.
2066 */
2067 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2068 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
2069 KDASSERT(l2b != NULL);
2070 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2071 opte = *ptep;
2072 pte = opte & ~L2_S_CACHE_MASK;
2073 if (bad_alias) {
2074 pv->pv_flags |= PVF_NC;
2075 } else {
2076 pv->pv_flags &= ~PVF_NC;
2077 pte |= pte_l2_s_cache_mode;
2078 }
2079
2080 if (opte == pte) /* only update is there's a change */
2081 continue;
2082
2083 if (l2pte_valid(pte)) {
2084 if (PV_BEEN_EXECD(pv->pv_flags)) {
2085 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
2086 } else if (PV_BEEN_REFD(pv->pv_flags)) {
2087 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va);
2088 }
2089 }
2090
2091 *ptep = pte;
2092 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
2093 }
2094 }
2095 #endif /* PMAP_CACHE_VIPT */
2096
2097
2098 /*
2099 * Modify pte bits for all ptes corresponding to the given physical address.
2100 * We use `maskbits' rather than `clearbits' because we're always passing
2101 * constants and the latter would require an extra inversion at run-time.
2102 */
2103 static void
2104 pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
2105 {
2106 struct l2_bucket *l2b;
2107 struct pv_entry *pv;
2108 pt_entry_t *ptep, npte, opte;
2109 pmap_t pm;
2110 vaddr_t va;
2111 u_int oflags;
2112 #ifdef PMAP_CACHE_VIPT
2113 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
2114 bool need_syncicache = false;
2115 bool did_syncicache = false;
2116 bool need_vac_me_harder = false;
2117 #endif
2118
2119 NPDEBUG(PDB_BITS,
2120 printf("pmap_clearbit: md %p mask 0x%x\n",
2121 md, maskbits));
2122
2123 #ifdef PMAP_CACHE_VIPT
2124 /*
2125 * If we might want to sync the I-cache and we've modified it,
2126 * then we know we definitely need to sync or discard it.
2127 */
2128 if (want_syncicache)
2129 need_syncicache = md->pvh_attrs & PVF_MOD;
2130 #endif
2131 /*
2132 * Clear saved attributes (modify, reference)
2133 */
2134 md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
2135
2136 if (SLIST_EMPTY(&md->pvh_list)) {
2137 #ifdef PMAP_CACHE_VIPT
2138 if (need_syncicache) {
2139 /*
2140 * No one has it mapped, so just discard it. The next
2141 * exec remapping will cause it to be synced.
2142 */
2143 md->pvh_attrs &= ~PVF_EXEC;
2144 PMAPCOUNT(exec_discarded_clearbit);
2145 }
2146 #endif
2147 return;
2148 }
2149
2150 /*
2151 * Loop over all current mappings setting/clearing as appropos
2152 */
2153 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2154 va = pv->pv_va;
2155 pm = pv->pv_pmap;
2156 oflags = pv->pv_flags;
2157 /*
2158 * Kernel entries are unmanaged and as such not to be changed.
2159 */
2160 if (oflags & PVF_KENTRY)
2161 continue;
2162 pv->pv_flags &= ~maskbits;
2163
2164 pmap_acquire_pmap_lock(pm);
2165
2166 l2b = pmap_get_l2_bucket(pm, va);
2167 KDASSERT(l2b != NULL);
2168
2169 ptep = &l2b->l2b_kva[l2pte_index(va)];
2170 npte = opte = *ptep;
2171
2172 NPDEBUG(PDB_BITS,
2173 printf(
2174 "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
2175 pv, pv->pv_pmap, pv->pv_va, oflags));
2176
2177 if (maskbits & (PVF_WRITE|PVF_MOD)) {
2178 #ifdef PMAP_CACHE_VIVT
2179 if ((pv->pv_flags & PVF_NC)) {
2180 /*
2181 * Entry is not cacheable:
2182 *
2183 * Don't turn caching on again if this is a
2184 * modified emulation. This would be
2185 * inconsitent with the settings created by
2186 * pmap_vac_me_harder(). Otherwise, it's safe
2187 * to re-enable cacheing.
2188 *
2189 * There's no need to call pmap_vac_me_harder()
2190 * here: all pages are losing their write
2191 * permission.
2192 */
2193 if (maskbits & PVF_WRITE) {
2194 npte |= pte_l2_s_cache_mode;
2195 pv->pv_flags &= ~PVF_NC;
2196 }
2197 } else
2198 if (l2pte_writable_p(opte)) {
2199 /*
2200 * Entry is writable/cacheable: check if pmap
2201 * is current if it is flush it, otherwise it
2202 * won't be in the cache
2203 */
2204 if (PV_BEEN_EXECD(oflags))
2205 pmap_idcache_wbinv_range(pm, pv->pv_va,
2206 PAGE_SIZE);
2207 else
2208 if (PV_BEEN_REFD(oflags))
2209 pmap_dcache_wb_range(pm, pv->pv_va,
2210 PAGE_SIZE,
2211 (maskbits & PVF_REF) != 0, false);
2212 }
2213 #endif
2214
2215 /* make the pte read only */
2216 npte = l2pte_set_readonly(npte);
2217
2218 if (maskbits & oflags & PVF_WRITE) {
2219 /*
2220 * Keep alias accounting up to date
2221 */
2222 if (pv->pv_pmap == pmap_kernel()) {
2223 md->krw_mappings--;
2224 md->kro_mappings++;
2225 } else {
2226 md->urw_mappings--;
2227 md->uro_mappings++;
2228 }
2229 #ifdef PMAP_CACHE_VIPT
2230 if (md->urw_mappings + md->krw_mappings == 0)
2231 md->pvh_attrs &= ~PVF_WRITE;
2232 if (want_syncicache)
2233 need_syncicache = true;
2234 need_vac_me_harder = true;
2235 #endif
2236 }
2237 }
2238
2239 if (maskbits & PVF_REF) {
2240 if ((pv->pv_flags & PVF_NC) == 0 &&
2241 (maskbits & (PVF_WRITE|PVF_MOD)) == 0 &&
2242 l2pte_valid(npte)) {
2243 #ifdef PMAP_CACHE_VIVT
2244 /*
2245 * Check npte here; we may have already
2246 * done the wbinv above, and the validity
2247 * of the PTE is the same for opte and
2248 * npte.
2249 */
2250 /* XXXJRT need idcache_inv_range */
2251 if (PV_BEEN_EXECD(oflags))
2252 pmap_idcache_wbinv_range(pm,
2253 pv->pv_va, PAGE_SIZE);
2254 else
2255 if (PV_BEEN_REFD(oflags))
2256 pmap_dcache_wb_range(pm,
2257 pv->pv_va, PAGE_SIZE,
2258 true, true);
2259 #endif
2260 }
2261
2262 /*
2263 * Make the PTE invalid so that we will take a
2264 * page fault the next time the mapping is
2265 * referenced.
2266 */
2267 npte &= ~L2_TYPE_MASK;
2268 npte |= L2_TYPE_INV;
2269 }
2270
2271 if (npte != opte) {
2272 *ptep = npte;
2273 PTE_SYNC(ptep);
2274 /* Flush the TLB entry if a current pmap. */
2275 if (PV_BEEN_EXECD(oflags))
2276 pmap_tlb_flushID_SE(pm, pv->pv_va);
2277 else
2278 if (PV_BEEN_REFD(oflags))
2279 pmap_tlb_flushD_SE(pm, pv->pv_va);
2280 }
2281
2282 pmap_release_pmap_lock(pm);
2283
2284 NPDEBUG(PDB_BITS,
2285 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
2286 pm, va, opte, npte));
2287 }
2288
2289 #ifdef PMAP_CACHE_VIPT
2290 /*
2291 * If we need to sync the I-cache and we haven't done it yet, do it.
2292 */
2293 if (need_syncicache && !did_syncicache) {
2294 pmap_syncicache_page(md, pa);
2295 PMAPCOUNT(exec_synced_clearbit);
2296 }
2297 /*
2298 * If we are changing this to read-only, we need to call vac_me_harder
2299 * so we can change all the read-only pages to cacheable. We pretend
2300 * this as a page deletion.
2301 */
2302 if (need_vac_me_harder) {
2303 if (md->pvh_attrs & PVF_NC)
2304 pmap_vac_me_harder(md, pa, NULL, 0);
2305 }
2306 #endif
2307 }
2308
2309 /*
2310 * pmap_clean_page()
2311 *
2312 * This is a local function used to work out the best strategy to clean
2313 * a single page referenced by its entry in the PV table. It's used by
2314 * pmap_copy_page, pmap_zero page and maybe some others later on.
2315 *
2316 * Its policy is effectively:
2317 * o If there are no mappings, we don't bother doing anything with the cache.
2318 * o If there is one mapping, we clean just that page.
2319 * o If there are multiple mappings, we clean the entire cache.
2320 *
2321 * So that some functions can be further optimised, it returns 0 if it didn't
2322 * clean the entire cache, or 1 if it did.
2323 *
2324 * XXX One bug in this routine is that if the pv_entry has a single page
2325 * mapped at 0x00000000 a whole cache clean will be performed rather than
2326 * just the 1 page. Since this should not occur in everyday use and if it does
2327 * it will just result in not the most efficient clean for the page.
2328 */
2329 #ifdef PMAP_CACHE_VIVT
2330 static int
2331 pmap_clean_page(struct pv_entry *pv, bool is_src)
2332 {
2333 pmap_t pm_to_clean = NULL;
2334 struct pv_entry *npv;
2335 u_int cache_needs_cleaning = 0;
2336 u_int flags = 0;
2337 vaddr_t page_to_clean = 0;
2338
2339 if (pv == NULL) {
2340 /* nothing mapped in so nothing to flush */
2341 return (0);
2342 }
2343
2344 /*
2345 * Since we flush the cache each time we change to a different
2346 * user vmspace, we only need to flush the page if it is in the
2347 * current pmap.
2348 */
2349
2350 for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) {
2351 if (pmap_is_current(npv->pv_pmap)) {
2352 flags |= npv->pv_flags;
2353 /*
2354 * The page is mapped non-cacheable in
2355 * this map. No need to flush the cache.
2356 */
2357 if (npv->pv_flags & PVF_NC) {
2358 #ifdef DIAGNOSTIC
2359 if (cache_needs_cleaning)
2360 panic("pmap_clean_page: "
2361 "cache inconsistency");
2362 #endif
2363 break;
2364 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
2365 continue;
2366 if (cache_needs_cleaning) {
2367 page_to_clean = 0;
2368 break;
2369 } else {
2370 page_to_clean = npv->pv_va;
2371 pm_to_clean = npv->pv_pmap;
2372 }
2373 cache_needs_cleaning = 1;
2374 }
2375 }
2376
2377 if (page_to_clean) {
2378 if (PV_BEEN_EXECD(flags))
2379 pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
2380 PAGE_SIZE);
2381 else
2382 pmap_dcache_wb_range(pm_to_clean, page_to_clean,
2383 PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
2384 } else if (cache_needs_cleaning) {
2385 pmap_t const pm = curproc->p_vmspace->vm_map.pmap;
2386
2387 if (PV_BEEN_EXECD(flags))
2388 pmap_idcache_wbinv_all(pm);
2389 else
2390 pmap_dcache_wbinv_all(pm);
2391 return (1);
2392 }
2393 return (0);
2394 }
2395 #endif
2396
2397 #ifdef PMAP_CACHE_VIPT
2398 /*
2399 * Sync a page with the I-cache. Since this is a VIPT, we must pick the
2400 * right cache alias to make sure we flush the right stuff.
2401 */
2402 void
2403 pmap_syncicache_page(struct vm_page_md *md, paddr_t pa)
2404 {
2405 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2406 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
2407
2408 NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n",
2409 md, md->pvh_attrs));
2410 /*
2411 * No need to clean the page if it's non-cached.
2412 */
2413 if (md->pvh_attrs & PVF_NC)
2414 return;
2415 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
2416
2417 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2418 /*
2419 * Set up a PTE with the right coloring to flush existing cache lines.
2420 */
2421 *ptep = L2_S_PROTO |
2422 pa
2423 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
2424 | pte_l2_s_cache_mode;
2425 PTE_SYNC(ptep);
2426
2427 /*
2428 * Flush it.
2429 */
2430 cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE);
2431 /*
2432 * Unmap the page.
2433 */
2434 *ptep = 0;
2435 PTE_SYNC(ptep);
2436 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2437
2438 md->pvh_attrs |= PVF_EXEC;
2439 PMAPCOUNT(exec_synced);
2440 }
2441
2442 void
2443 pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush)
2444 {
2445 vsize_t va_offset, end_va;
2446 void (*cf)(vaddr_t, vsize_t);
2447
2448 if (arm_cache_prefer_mask == 0)
2449 return;
2450
2451 switch (flush) {
2452 case PMAP_FLUSH_PRIMARY:
2453 if (md->pvh_attrs & PVF_MULTCLR) {
2454 va_offset = 0;
2455 end_va = arm_cache_prefer_mask;
2456 md->pvh_attrs &= ~PVF_MULTCLR;
2457 PMAPCOUNT(vac_flush_lots);
2458 } else {
2459 va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2460 end_va = va_offset;
2461 PMAPCOUNT(vac_flush_one);
2462 }
2463 /*
2464 * Mark that the page is no longer dirty.
2465 */
2466 md->pvh_attrs &= ~PVF_DIRTY;
2467 cf = cpufuncs.cf_idcache_wbinv_range;
2468 break;
2469 case PMAP_FLUSH_SECONDARY:
2470 va_offset = 0;
2471 end_va = arm_cache_prefer_mask;
2472 cf = cpufuncs.cf_idcache_wbinv_range;
2473 md->pvh_attrs &= ~PVF_MULTCLR;
2474 PMAPCOUNT(vac_flush_lots);
2475 break;
2476 case PMAP_CLEAN_PRIMARY:
2477 va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2478 end_va = va_offset;
2479 cf = cpufuncs.cf_dcache_wb_range;
2480 /*
2481 * Mark that the page is no longer dirty.
2482 */
2483 if ((md->pvh_attrs & PVF_DMOD) == 0)
2484 md->pvh_attrs &= ~PVF_DIRTY;
2485 PMAPCOUNT(vac_clean_one);
2486 break;
2487 default:
2488 return;
2489 }
2490
2491 KASSERT(!(md->pvh_attrs & PVF_NC));
2492
2493 NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n",
2494 md, md->pvh_attrs));
2495
2496 for (; va_offset <= end_va; va_offset += PAGE_SIZE) {
2497 const size_t pte_offset = va_offset >> PGSHIFT;
2498 pt_entry_t * const ptep = &cdst_pte[pte_offset];
2499 const pt_entry_t oldpte = *ptep;
2500
2501 if (flush == PMAP_FLUSH_SECONDARY
2502 && va_offset == (md->pvh_attrs & arm_cache_prefer_mask))
2503 continue;
2504
2505 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2506 /*
2507 * Set up a PTE with the right coloring to flush
2508 * existing cache entries.
2509 */
2510 *ptep = L2_S_PROTO
2511 | pa
2512 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
2513 | pte_l2_s_cache_mode;
2514 PTE_SYNC(ptep);
2515
2516 /*
2517 * Flush it.
2518 */
2519 (*cf)(cdstp + va_offset, PAGE_SIZE);
2520
2521 /*
2522 * Restore the page table entry since we might have interrupted
2523 * pmap_zero_page or pmap_copy_page which was already using
2524 * this pte.
2525 */
2526 *ptep = oldpte;
2527 PTE_SYNC(ptep);
2528 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2529 }
2530 }
2531 #endif /* PMAP_CACHE_VIPT */
2532
2533 /*
2534 * Routine: pmap_page_remove
2535 * Function:
2536 * Removes this physical page from
2537 * all physical maps in which it resides.
2538 * Reflects back modify bits to the pager.
2539 */
2540 static void
2541 pmap_page_remove(struct vm_page_md *md, paddr_t pa)
2542 {
2543 struct l2_bucket *l2b;
2544 struct pv_entry *pv, *npv, **pvp;
2545 pmap_t pm;
2546 pt_entry_t *ptep;
2547 bool flush;
2548 u_int flags;
2549
2550 NPDEBUG(PDB_FOLLOW,
2551 printf("pmap_page_remove: md %p (0x%08lx)\n", md,
2552 pa));
2553
2554 pv = SLIST_FIRST(&md->pvh_list);
2555 if (pv == NULL) {
2556 #ifdef PMAP_CACHE_VIPT
2557 /*
2558 * We *know* the page contents are about to be replaced.
2559 * Discard the exec contents
2560 */
2561 if (PV_IS_EXEC_P(md->pvh_attrs))
2562 PMAPCOUNT(exec_discarded_page_protect);
2563 md->pvh_attrs &= ~PVF_EXEC;
2564 KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2565 #endif
2566 return;
2567 }
2568 #ifdef PMAP_CACHE_VIPT
2569 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
2570 #endif
2571
2572 /*
2573 * Clear alias counts
2574 */
2575 #ifdef PMAP_CACHE_VIVT
2576 md->k_mappings = 0;
2577 #endif
2578 md->urw_mappings = md->uro_mappings = 0;
2579
2580 flush = false;
2581 flags = 0;
2582
2583 #ifdef PMAP_CACHE_VIVT
2584 pmap_clean_page(pv, false);
2585 #endif
2586
2587 pvp = &SLIST_FIRST(&md->pvh_list);
2588 while (pv) {
2589 pm = pv->pv_pmap;
2590 npv = SLIST_NEXT(pv, pv_link);
2591 if (flush == false && pmap_is_current(pm))
2592 flush = true;
2593
2594 if (pm == pmap_kernel()) {
2595 #ifdef PMAP_CACHE_VIPT
2596 /*
2597 * If this was unmanaged mapping, it must be preserved.
2598 * Move it back on the list and advance the end-of-list
2599 * pointer.
2600 */
2601 if (pv->pv_flags & PVF_KENTRY) {
2602 *pvp = pv;
2603 pvp = &SLIST_NEXT(pv, pv_link);
2604 pv = npv;
2605 continue;
2606 }
2607 if (pv->pv_flags & PVF_WRITE)
2608 md->krw_mappings--;
2609 else
2610 md->kro_mappings--;
2611 #endif
2612 PMAPCOUNT(kernel_unmappings);
2613 }
2614 PMAPCOUNT(unmappings);
2615
2616 pmap_acquire_pmap_lock(pm);
2617
2618 l2b = pmap_get_l2_bucket(pm, pv->pv_va);
2619 KDASSERT(l2b != NULL);
2620
2621 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2622
2623 /*
2624 * Update statistics
2625 */
2626 --pm->pm_stats.resident_count;
2627
2628 /* Wired bit */
2629 if (pv->pv_flags & PVF_WIRED)
2630 --pm->pm_stats.wired_count;
2631
2632 flags |= pv->pv_flags;
2633
2634 /*
2635 * Invalidate the PTEs.
2636 */
2637 *ptep = 0;
2638 PTE_SYNC_CURRENT(pm, ptep);
2639 pmap_free_l2_bucket(pm, l2b, 1);
2640
2641 pool_put(&pmap_pv_pool, pv);
2642 pv = npv;
2643 /*
2644 * if we reach the end of the list and there are still
2645 * mappings, they might be able to be cached now.
2646 */
2647 if (pv == NULL) {
2648 *pvp = NULL;
2649 if (!SLIST_EMPTY(&md->pvh_list))
2650 pmap_vac_me_harder(md, pa, pm, 0);
2651 }
2652 pmap_release_pmap_lock(pm);
2653 }
2654 #ifdef PMAP_CACHE_VIPT
2655 /*
2656 * Its EXEC cache is now gone.
2657 */
2658 if (PV_IS_EXEC_P(md->pvh_attrs))
2659 PMAPCOUNT(exec_discarded_page_protect);
2660 md->pvh_attrs &= ~PVF_EXEC;
2661 KASSERT(md->urw_mappings == 0);
2662 KASSERT(md->uro_mappings == 0);
2663 if (md->krw_mappings == 0)
2664 md->pvh_attrs &= ~PVF_WRITE;
2665 KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2666 #endif
2667
2668 if (flush) {
2669 /*
2670 * Note: We can't use pmap_tlb_flush{I,D}() here since that
2671 * would need a subsequent call to pmap_update() to ensure
2672 * curpm->pm_cstate.cs_all is reset. Our callers are not
2673 * required to do that (see pmap(9)), so we can't modify
2674 * the current pmap's state.
2675 */
2676 if (PV_BEEN_EXECD(flags))
2677 cpu_tlb_flushID();
2678 else
2679 cpu_tlb_flushD();
2680 }
2681 cpu_cpwait();
2682 }
2683
2684 /*
2685 * pmap_t pmap_create(void)
2686 *
2687 * Create a new pmap structure from scratch.
2688 */
2689 pmap_t
2690 pmap_create(void)
2691 {
2692 pmap_t pm;
2693
2694 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
2695
2696 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
2697 uvm_obj_init(&pm->pm_obj, NULL, false, 1);
2698 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
2699
2700 pm->pm_stats.wired_count = 0;
2701 pm->pm_stats.resident_count = 1;
2702 pm->pm_cstate.cs_all = 0;
2703 pmap_alloc_l1(pm);
2704
2705 /*
2706 * Note: The pool cache ensures that the pm_l2[] array is already
2707 * initialised to zero.
2708 */
2709
2710 pmap_pinit(pm);
2711
2712 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
2713
2714 return (pm);
2715 }
2716
2717 u_int
2718 arm32_mmap_flags(paddr_t pa)
2719 {
2720 /*
2721 * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff
2722 * and we're using the upper bits in page numbers to pass flags around
2723 * so we might as well use the same bits
2724 */
2725 return (u_int)pa & PMAP_MD_MASK;
2726 }
2727 /*
2728 * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
2729 * u_int flags)
2730 *
2731 * Insert the given physical page (p) at
2732 * the specified virtual address (v) in the
2733 * target physical map with the protection requested.
2734 *
2735 * NB: This is the only routine which MAY NOT lazy-evaluate
2736 * or lose information. That is, this routine must actually
2737 * insert this page into the given map NOW.
2738 */
2739 int
2740 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2741 {
2742 struct l2_bucket *l2b;
2743 struct vm_page *pg, *opg;
2744 struct pv_entry *pv;
2745 pt_entry_t *ptep, npte, opte;
2746 u_int nflags;
2747 u_int oflags;
2748
2749 NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
2750
2751 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
2752 KDASSERT(((va | pa) & PGOFSET) == 0);
2753
2754 /*
2755 * Get a pointer to the page. Later on in this function, we
2756 * test for a managed page by checking pg != NULL.
2757 */
2758 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2759
2760 nflags = 0;
2761 if (prot & VM_PROT_WRITE)
2762 nflags |= PVF_WRITE;
2763 if (prot & VM_PROT_EXECUTE)
2764 nflags |= PVF_EXEC;
2765 if (flags & PMAP_WIRED)
2766 nflags |= PVF_WIRED;
2767
2768 pmap_acquire_pmap_lock(pm);
2769
2770 /*
2771 * Fetch the L2 bucket which maps this page, allocating one if
2772 * necessary for user pmaps.
2773 */
2774 if (pm == pmap_kernel())
2775 l2b = pmap_get_l2_bucket(pm, va);
2776 else
2777 l2b = pmap_alloc_l2_bucket(pm, va);
2778 if (l2b == NULL) {
2779 if (flags & PMAP_CANFAIL) {
2780 pmap_release_pmap_lock(pm);
2781 return (ENOMEM);
2782 }
2783 panic("pmap_enter: failed to allocate L2 bucket");
2784 }
2785 ptep = &l2b->l2b_kva[l2pte_index(va)];
2786 opte = *ptep;
2787 npte = pa;
2788 oflags = 0;
2789
2790 if (opte) {
2791 /*
2792 * There is already a mapping at this address.
2793 * If the physical address is different, lookup the
2794 * vm_page.
2795 */
2796 if (l2pte_pa(opte) != pa)
2797 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
2798 else
2799 opg = pg;
2800 } else
2801 opg = NULL;
2802
2803 if (pg) {
2804 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
2805
2806 /*
2807 * This is to be a managed mapping.
2808 */
2809 if ((flags & VM_PROT_ALL) ||
2810 (md->pvh_attrs & PVF_REF)) {
2811 /*
2812 * - The access type indicates that we don't need
2813 * to do referenced emulation.
2814 * OR
2815 * - The physical page has already been referenced
2816 * so no need to re-do referenced emulation here.
2817 */
2818 npte |= l2pte_set_readonly(L2_S_PROTO);
2819
2820 nflags |= PVF_REF;
2821
2822 if ((prot & VM_PROT_WRITE) != 0 &&
2823 ((flags & VM_PROT_WRITE) != 0 ||
2824 (md->pvh_attrs & PVF_MOD) != 0)) {
2825 /*
2826 * This is a writable mapping, and the
2827 * page's mod state indicates it has
2828 * already been modified. Make it
2829 * writable from the outset.
2830 */
2831 npte = l2pte_set_writable(npte);
2832 nflags |= PVF_MOD;
2833 }
2834 } else {
2835 /*
2836 * Need to do page referenced emulation.
2837 */
2838 npte |= L2_TYPE_INV;
2839 }
2840
2841 npte |= pte_l2_s_cache_mode;
2842
2843 if (pg == opg) {
2844 /*
2845 * We're changing the attrs of an existing mapping.
2846 */
2847 #ifdef MULTIPROCESSOR
2848 KASSERT(uvm_page_locked_p(pg));
2849 #endif
2850 oflags = pmap_modify_pv(md, pa, pm, va,
2851 PVF_WRITE | PVF_EXEC | PVF_WIRED |
2852 PVF_MOD | PVF_REF, nflags);
2853
2854 #ifdef PMAP_CACHE_VIVT
2855 /*
2856 * We may need to flush the cache if we're
2857 * doing rw-ro...
2858 */
2859 if (pm->pm_cstate.cs_cache_d &&
2860 (oflags & PVF_NC) == 0 &&
2861 l2pte_writable_p(opte) &&
2862 (prot & VM_PROT_WRITE) == 0)
2863 cpu_dcache_wb_range(va, PAGE_SIZE);
2864 #endif
2865 } else {
2866 /*
2867 * New mapping, or changing the backing page
2868 * of an existing mapping.
2869 */
2870 if (opg) {
2871 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
2872 paddr_t opa = VM_PAGE_TO_PHYS(opg);
2873
2874 /*
2875 * Replacing an existing mapping with a new one.
2876 * It is part of our managed memory so we
2877 * must remove it from the PV list
2878 */
2879 #ifdef MULTIPROCESSOR
2880 KASSERT(uvm_page_locked_p(opg));
2881 #endif
2882 pv = pmap_remove_pv(omd, opa, pm, va);
2883 pmap_vac_me_harder(omd, opa, pm, 0);
2884 oflags = pv->pv_flags;
2885
2886 #ifdef PMAP_CACHE_VIVT
2887 /*
2888 * If the old mapping was valid (ref/mod
2889 * emulation creates 'invalid' mappings
2890 * initially) then make sure to frob
2891 * the cache.
2892 */
2893 if ((oflags & PVF_NC) == 0 &&
2894 l2pte_valid(opte)) {
2895 if (PV_BEEN_EXECD(oflags)) {
2896 pmap_idcache_wbinv_range(pm, va,
2897 PAGE_SIZE);
2898 } else
2899 if (PV_BEEN_REFD(oflags)) {
2900 pmap_dcache_wb_range(pm, va,
2901 PAGE_SIZE, true,
2902 (oflags & PVF_WRITE) == 0);
2903 }
2904 }
2905 #endif
2906 } else
2907 if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){
2908 if ((flags & PMAP_CANFAIL) == 0)
2909 panic("pmap_enter: no pv entries");
2910
2911 if (pm != pmap_kernel())
2912 pmap_free_l2_bucket(pm, l2b, 0);
2913 pmap_release_pmap_lock(pm);
2914 NPDEBUG(PDB_ENTER,
2915 printf("pmap_enter: ENOMEM\n"));
2916 return (ENOMEM);
2917 }
2918
2919 #ifdef MULTIPROCESSOR
2920 KASSERT(uvm_page_locked_p(pg));
2921 #endif
2922 pmap_enter_pv(md, pa, pv, pm, va, nflags);
2923 }
2924 } else {
2925 /*
2926 * We're mapping an unmanaged page.
2927 * These are always readable, and possibly writable, from
2928 * the get go as we don't need to track ref/mod status.
2929 */
2930 npte |= l2pte_set_readonly(L2_S_PROTO);
2931 if (prot & VM_PROT_WRITE)
2932 npte = l2pte_set_writable(npte);
2933
2934 /*
2935 * Make sure the vector table is mapped cacheable
2936 */
2937 if ((pm != pmap_kernel() && va == vector_page) ||
2938 (flags & ARM32_MMAP_CACHEABLE)) {
2939 npte |= pte_l2_s_cache_mode;
2940 } else if (flags & ARM32_MMAP_WRITECOMBINE) {
2941 npte |= pte_l2_s_wc_mode;
2942 }
2943 if (opg) {
2944 /*
2945 * Looks like there's an existing 'managed' mapping
2946 * at this address.
2947 */
2948 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
2949 paddr_t opa = VM_PAGE_TO_PHYS(opg);
2950
2951 #ifdef MULTIPROCESSOR
2952 KASSERT(uvm_page_locked_p(opg));
2953 #endif
2954 pv = pmap_remove_pv(omd, opa, pm, va);
2955 pmap_vac_me_harder(omd, opa, pm, 0);
2956 oflags = pv->pv_flags;
2957
2958 #ifdef PMAP_CACHE_VIVT
2959 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
2960 if (PV_BEEN_EXECD(oflags))
2961 pmap_idcache_wbinv_range(pm, va,
2962 PAGE_SIZE);
2963 else
2964 if (PV_BEEN_REFD(oflags))
2965 pmap_dcache_wb_range(pm, va, PAGE_SIZE,
2966 true, (oflags & PVF_WRITE) == 0);
2967 }
2968 #endif
2969 pool_put(&pmap_pv_pool, pv);
2970 }
2971 }
2972
2973 /*
2974 * Make sure userland mappings get the right permissions
2975 */
2976 if (pm != pmap_kernel() && va != vector_page)
2977 npte |= L2_S_PROT_U;
2978
2979 /*
2980 * Keep the stats up to date
2981 */
2982 if (opte == 0) {
2983 l2b->l2b_occupancy++;
2984 pm->pm_stats.resident_count++;
2985 }
2986
2987 NPDEBUG(PDB_ENTER,
2988 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte));
2989
2990 /*
2991 * If this is just a wiring change, the two PTEs will be
2992 * identical, so there's no need to update the page table.
2993 */
2994 if (npte != opte) {
2995 bool is_cached = pmap_is_cached(pm);
2996
2997 *ptep = npte;
2998 if (is_cached) {
2999 /*
3000 * We only need to frob the cache/tlb if this pmap
3001 * is current
3002 */
3003 PTE_SYNC(ptep);
3004 if (va != vector_page && l2pte_valid(npte)) {
3005 /*
3006 * This mapping is likely to be accessed as
3007 * soon as we return to userland. Fix up the
3008 * L1 entry to avoid taking another
3009 * page/domain fault.
3010 */
3011 pd_entry_t *pl1pd, l1pd;
3012
3013 pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)];
3014 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) |
3015 L1_C_PROTO;
3016 if (*pl1pd != l1pd) {
3017 *pl1pd = l1pd;
3018 PTE_SYNC(pl1pd);
3019 }
3020 }
3021 }
3022
3023 if (PV_BEEN_EXECD(oflags))
3024 pmap_tlb_flushID_SE(pm, va);
3025 else
3026 if (PV_BEEN_REFD(oflags))
3027 pmap_tlb_flushD_SE(pm, va);
3028
3029 NPDEBUG(PDB_ENTER,
3030 printf("pmap_enter: is_cached %d cs 0x%08x\n",
3031 is_cached, pm->pm_cstate.cs_all));
3032
3033 if (pg != NULL) {
3034 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3035
3036 #ifdef MULTIPROCESSOR
3037 KASSERT(uvm_page_locked_p(pg));
3038 #endif
3039 pmap_vac_me_harder(md, pa, pm, va);
3040 }
3041 }
3042 #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
3043 if (pg) {
3044 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3045
3046 #ifdef MULTIPROCESSOR
3047 KASSERT(uvm_page_locked_p(pg));
3048 #endif
3049 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3050 KASSERT(arm_cache_prefer_mask == 0 || ((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0));
3051 }
3052 #endif
3053
3054 pmap_release_pmap_lock(pm);
3055
3056 return (0);
3057 }
3058
3059 /*
3060 * pmap_remove()
3061 *
3062 * pmap_remove is responsible for nuking a number of mappings for a range
3063 * of virtual address space in the current pmap. To do this efficiently
3064 * is interesting, because in a number of cases a wide virtual address
3065 * range may be supplied that contains few actual mappings. So, the
3066 * optimisations are:
3067 * 1. Skip over hunks of address space for which no L1 or L2 entry exists.
3068 * 2. Build up a list of pages we've hit, up to a maximum, so we can
3069 * maybe do just a partial cache clean. This path of execution is
3070 * complicated by the fact that the cache must be flushed _before_
3071 * the PTE is nuked, being a VAC :-)
3072 * 3. If we're called after UVM calls pmap_remove_all(), we can defer
3073 * all invalidations until pmap_update(), since pmap_remove_all() has
3074 * already flushed the cache.
3075 * 4. Maybe later fast-case a single page, but I don't think this is
3076 * going to make _that_ much difference overall.
3077 */
3078
3079 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
3080
3081 void
3082 pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
3083 {
3084 struct l2_bucket *l2b;
3085 vaddr_t next_bucket;
3086 pt_entry_t *ptep;
3087 u_int cleanlist_idx, total, cnt;
3088 struct {
3089 vaddr_t va;
3090 pt_entry_t *ptep;
3091 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
3092 u_int mappings, is_exec, is_refd;
3093
3094 NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx "
3095 "eva=%08lx\n", pm, sva, eva));
3096
3097 /*
3098 * we lock in the pmap => pv_head direction
3099 */
3100 pmap_acquire_pmap_lock(pm);
3101
3102 if (pm->pm_remove_all || !pmap_is_cached(pm)) {
3103 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3104 if (pm->pm_cstate.cs_tlb == 0)
3105 pm->pm_remove_all = true;
3106 } else
3107 cleanlist_idx = 0;
3108
3109 total = 0;
3110
3111 while (sva < eva) {
3112 /*
3113 * Do one L2 bucket's worth at a time.
3114 */
3115 next_bucket = L2_NEXT_BUCKET(sva);
3116 if (next_bucket > eva)
3117 next_bucket = eva;
3118
3119 l2b = pmap_get_l2_bucket(pm, sva);
3120 if (l2b == NULL) {
3121 sva = next_bucket;
3122 continue;
3123 }
3124
3125 ptep = &l2b->l2b_kva[l2pte_index(sva)];
3126
3127 for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){
3128 struct vm_page *pg;
3129 pt_entry_t pte;
3130 paddr_t pa;
3131
3132 pte = *ptep;
3133
3134 if (pte == 0) {
3135 /* Nothing here, move along */
3136 continue;
3137 }
3138
3139 pa = l2pte_pa(pte);
3140 is_exec = 0;
3141 is_refd = 1;
3142
3143 /*
3144 * Update flags. In a number of circumstances,
3145 * we could cluster a lot of these and do a
3146 * number of sequential pages in one go.
3147 */
3148 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
3149 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3150 struct pv_entry *pv;
3151
3152 #ifdef MULTIPROCESSOR
3153 KASSERT(uvm_page_locked_p(pg));
3154 #endif
3155 pv = pmap_remove_pv(md, pa, pm, sva);
3156 pmap_vac_me_harder(md, pa, pm, 0);
3157 if (pv != NULL) {
3158 if (pm->pm_remove_all == false) {
3159 is_exec =
3160 PV_BEEN_EXECD(pv->pv_flags);
3161 is_refd =
3162 PV_BEEN_REFD(pv->pv_flags);
3163 }
3164 pool_put(&pmap_pv_pool, pv);
3165 }
3166 }
3167 mappings++;
3168
3169 if (!l2pte_valid(pte)) {
3170 /*
3171 * Ref/Mod emulation is still active for this
3172 * mapping, therefore it is has not yet been
3173 * accessed. No need to frob the cache/tlb.
3174 */
3175 *ptep = 0;
3176 PTE_SYNC_CURRENT(pm, ptep);
3177 continue;
3178 }
3179
3180 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
3181 /* Add to the clean list. */
3182 cleanlist[cleanlist_idx].ptep = ptep;
3183 cleanlist[cleanlist_idx].va =
3184 sva | (is_exec & 1);
3185 cleanlist_idx++;
3186 } else
3187 if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
3188 /* Nuke everything if needed. */
3189 #ifdef PMAP_CACHE_VIVT
3190 pmap_idcache_wbinv_all(pm);
3191 #endif
3192 pmap_tlb_flushID(pm);
3193
3194 /*
3195 * Roll back the previous PTE list,
3196 * and zero out the current PTE.
3197 */
3198 for (cnt = 0;
3199 cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
3200 *cleanlist[cnt].ptep = 0;
3201 PTE_SYNC(cleanlist[cnt].ptep);
3202 }
3203 *ptep = 0;
3204 PTE_SYNC(ptep);
3205 cleanlist_idx++;
3206 pm->pm_remove_all = true;
3207 } else {
3208 *ptep = 0;
3209 PTE_SYNC(ptep);
3210 if (pm->pm_remove_all == false) {
3211 if (is_exec)
3212 pmap_tlb_flushID_SE(pm, sva);
3213 else
3214 if (is_refd)
3215 pmap_tlb_flushD_SE(pm, sva);
3216 }
3217 }
3218 }
3219
3220 /*
3221 * Deal with any left overs
3222 */
3223 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
3224 total += cleanlist_idx;
3225 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
3226 if (pm->pm_cstate.cs_all != 0) {
3227 vaddr_t clva = cleanlist[cnt].va & ~1;
3228 if (cleanlist[cnt].va & 1) {
3229 #ifdef PMAP_CACHE_VIVT
3230 pmap_idcache_wbinv_range(pm,
3231 clva, PAGE_SIZE);
3232 #endif
3233 pmap_tlb_flushID_SE(pm, clva);
3234 } else {
3235 #ifdef PMAP_CACHE_VIVT
3236 pmap_dcache_wb_range(pm,
3237 clva, PAGE_SIZE, true,
3238 false);
3239 #endif
3240 pmap_tlb_flushD_SE(pm, clva);
3241 }
3242 }
3243 *cleanlist[cnt].ptep = 0;
3244 PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep);
3245 }
3246
3247 /*
3248 * If it looks like we're removing a whole bunch
3249 * of mappings, it's faster to just write-back
3250 * the whole cache now and defer TLB flushes until
3251 * pmap_update() is called.
3252 */
3253 if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
3254 cleanlist_idx = 0;
3255 else {
3256 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3257 #ifdef PMAP_CACHE_VIVT
3258 pmap_idcache_wbinv_all(pm);
3259 #endif
3260 pm->pm_remove_all = true;
3261 }
3262 }
3263
3264 pmap_free_l2_bucket(pm, l2b, mappings);
3265 pm->pm_stats.resident_count -= mappings;
3266 }
3267
3268 pmap_release_pmap_lock(pm);
3269 }
3270
3271 #ifdef PMAP_CACHE_VIPT
3272 static struct pv_entry *
3273 pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
3274 {
3275 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3276 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3277 struct pv_entry *pv;
3278
3279 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
3280 KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
3281
3282 pv = pmap_remove_pv(md, pa, pmap_kernel(), va);
3283 KASSERT(pv);
3284 KASSERT(pv->pv_flags & PVF_KENTRY);
3285
3286 /*
3287 * If we are removing a writeable mapping to a cached exec page,
3288 * if it's the last mapping then clear it execness other sync
3289 * the page to the icache.
3290 */
3291 if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
3292 && (pv->pv_flags & PVF_WRITE) != 0) {
3293 if (SLIST_EMPTY(&md->pvh_list)) {
3294 md->pvh_attrs &= ~PVF_EXEC;
3295 PMAPCOUNT(exec_discarded_kremove);
3296 } else {
3297 pmap_syncicache_page(md, pa);
3298 PMAPCOUNT(exec_synced_kremove);
3299 }
3300 }
3301 pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
3302
3303 return pv;
3304 }
3305 #endif /* PMAP_CACHE_VIPT */
3306
3307 /*
3308 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
3309 *
3310 * We assume there is already sufficient KVM space available
3311 * to do this, as we can't allocate L2 descriptor tables/metadata
3312 * from here.
3313 */
3314 void
3315 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
3316 {
3317 struct l2_bucket *l2b;
3318 pt_entry_t *ptep, opte;
3319 #ifdef PMAP_CACHE_VIVT
3320 struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
3321 #endif
3322 #ifdef PMAP_CACHE_VIPT
3323 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3324 struct vm_page *opg;
3325 struct pv_entry *pv = NULL;
3326 #endif
3327 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3328
3329 NPDEBUG(PDB_KENTER,
3330 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
3331 va, pa, prot));
3332
3333 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3334 KDASSERT(l2b != NULL);
3335
3336 ptep = &l2b->l2b_kva[l2pte_index(va)];
3337 opte = *ptep;
3338
3339 if (opte == 0) {
3340 PMAPCOUNT(kenter_mappings);
3341 l2b->l2b_occupancy++;
3342 } else {
3343 PMAPCOUNT(kenter_remappings);
3344 #ifdef PMAP_CACHE_VIPT
3345 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3346 #ifdef DIAGNOSTIC
3347 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3348 #endif
3349 if (opg) {
3350 KASSERT(opg != pg);
3351 KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
3352 KASSERT((flags & PMAP_KMPAGE) == 0);
3353 pv = pmap_kremove_pg(opg, va);
3354 }
3355 #endif
3356 if (l2pte_valid(opte)) {
3357 #ifdef PMAP_CACHE_VIVT
3358 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3359 #endif
3360 cpu_tlb_flushD_SE(va);
3361 cpu_cpwait();
3362 }
3363 }
3364
3365 *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot)
3366 | ((flags & PMAP_NOCACHE) ? 0 : pte_l2_s_cache_mode);
3367 PTE_SYNC(ptep);
3368
3369 if (pg) {
3370 #ifdef MULTIPROCESSOR
3371 KASSERT(uvm_page_locked_p(pg));
3372 #endif
3373 if (flags & PMAP_KMPAGE) {
3374 KASSERT(md->urw_mappings == 0);
3375 KASSERT(md->uro_mappings == 0);
3376 KASSERT(md->krw_mappings == 0);
3377 KASSERT(md->kro_mappings == 0);
3378 #ifdef PMAP_CACHE_VIPT
3379 KASSERT(pv == NULL);
3380 KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0);
3381 KASSERT((md->pvh_attrs & PVF_NC) == 0);
3382 /* if there is a color conflict, evict from cache. */
3383 if (pmap_is_page_colored_p(md)
3384 && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) {
3385 PMAPCOUNT(vac_color_change);
3386 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
3387 } else if (md->pvh_attrs & PVF_MULTCLR) {
3388 /*
3389 * If this page has multiple colors, expunge
3390 * them.
3391 */
3392 PMAPCOUNT(vac_flush_lots2);
3393 pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY);
3394 }
3395 md->pvh_attrs &= PAGE_SIZE - 1;
3396 md->pvh_attrs |= PVF_KMPAGE
3397 | PVF_COLORED | PVF_DIRTY
3398 | (va & arm_cache_prefer_mask);
3399 #endif
3400 #ifdef PMAP_CACHE_VIVT
3401 md->pvh_attrs |= PVF_KMPAGE;
3402 #endif
3403 pmap_kmpages++;
3404 #ifdef PMAP_CACHE_VIPT
3405 } else {
3406 if (pv == NULL) {
3407 pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
3408 KASSERT(pv != NULL);
3409 }
3410 pmap_enter_pv(md, pa, pv, pmap_kernel(), va,
3411 PVF_WIRED | PVF_KENTRY
3412 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
3413 if ((prot & VM_PROT_WRITE)
3414 && !(md->pvh_attrs & PVF_NC))
3415 md->pvh_attrs |= PVF_DIRTY;
3416 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3417 pmap_vac_me_harder(md, pa, pmap_kernel(), va);
3418 #endif
3419 }
3420 #ifdef PMAP_CACHE_VIPT
3421 } else {
3422 if (pv != NULL)
3423 pool_put(&pmap_pv_pool, pv);
3424 #endif
3425 }
3426 }
3427
3428 void
3429 pmap_kremove(vaddr_t va, vsize_t len)
3430 {
3431 struct l2_bucket *l2b;
3432 pt_entry_t *ptep, *sptep, opte;
3433 vaddr_t next_bucket, eva;
3434 u_int mappings;
3435 struct vm_page *opg;
3436
3437 PMAPCOUNT(kenter_unmappings);
3438
3439 NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n",
3440 va, len));
3441
3442 eva = va + len;
3443
3444 while (va < eva) {
3445 next_bucket = L2_NEXT_BUCKET(va);
3446 if (next_bucket > eva)
3447 next_bucket = eva;
3448
3449 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3450 KDASSERT(l2b != NULL);
3451
3452 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
3453 mappings = 0;
3454
3455 while (va < next_bucket) {
3456 opte = *ptep;
3457 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3458 if (opg) {
3459 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3460
3461 if (omd->pvh_attrs & PVF_KMPAGE) {
3462 KASSERT(omd->urw_mappings == 0);
3463 KASSERT(omd->uro_mappings == 0);
3464 KASSERT(omd->krw_mappings == 0);
3465 KASSERT(omd->kro_mappings == 0);
3466 omd->pvh_attrs &= ~PVF_KMPAGE;
3467 #ifdef PMAP_CACHE_VIPT
3468 omd->pvh_attrs &= ~PVF_WRITE;
3469 #endif
3470 pmap_kmpages--;
3471 #ifdef PMAP_CACHE_VIPT
3472 } else {
3473 pool_put(&pmap_pv_pool,
3474 pmap_kremove_pg(opg, va));
3475 #endif
3476 }
3477 }
3478 if (l2pte_valid(opte)) {
3479 #ifdef PMAP_CACHE_VIVT
3480 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3481 #endif
3482 cpu_tlb_flushD_SE(va);
3483 }
3484 if (opte) {
3485 *ptep = 0;
3486 mappings++;
3487 }
3488 va += PAGE_SIZE;
3489 ptep++;
3490 }
3491 KDASSERT(mappings <= l2b->l2b_occupancy);
3492 l2b->l2b_occupancy -= mappings;
3493 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
3494 }
3495 cpu_cpwait();
3496 }
3497
3498 bool
3499 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
3500 {
3501 struct l2_dtable *l2;
3502 pd_entry_t *pl1pd, l1pd;
3503 pt_entry_t *ptep, pte;
3504 paddr_t pa;
3505 u_int l1idx;
3506
3507 pmap_acquire_pmap_lock(pm);
3508
3509 l1idx = L1_IDX(va);
3510 pl1pd = &pm->pm_l1->l1_kva[l1idx];
3511 l1pd = *pl1pd;
3512
3513 if (l1pte_section_p(l1pd)) {
3514 /*
3515 * These should only happen for pmap_kernel()
3516 */
3517 KDASSERT(pm == pmap_kernel());
3518 pmap_release_pmap_lock(pm);
3519 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
3520 if (l1pte_supersection_p(l1pd)) {
3521 pa = (l1pd & L1_SS_FRAME) | (va & L1_SS_OFFSET);
3522 } else
3523 #endif
3524 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3525 } else {
3526 /*
3527 * Note that we can't rely on the validity of the L1
3528 * descriptor as an indication that a mapping exists.
3529 * We have to look it up in the L2 dtable.
3530 */
3531 l2 = pm->pm_l2[L2_IDX(l1idx)];
3532
3533 if (l2 == NULL ||
3534 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3535 pmap_release_pmap_lock(pm);
3536 return false;
3537 }
3538
3539 ptep = &ptep[l2pte_index(va)];
3540 pte = *ptep;
3541 pmap_release_pmap_lock(pm);
3542
3543 if (pte == 0)
3544 return false;
3545
3546 switch (pte & L2_TYPE_MASK) {
3547 case L2_TYPE_L:
3548 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3549 break;
3550
3551 default:
3552 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3553 break;
3554 }
3555 }
3556
3557 if (pap != NULL)
3558 *pap = pa;
3559
3560 return true;
3561 }
3562
3563 void
3564 pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
3565 {
3566 struct l2_bucket *l2b;
3567 pt_entry_t *ptep, pte;
3568 vaddr_t next_bucket;
3569 u_int flags;
3570 u_int clr_mask;
3571 int flush;
3572
3573 NPDEBUG(PDB_PROTECT,
3574 printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
3575 pm, sva, eva, prot));
3576
3577 if ((prot & VM_PROT_READ) == 0) {
3578 pmap_remove(pm, sva, eva);
3579 return;
3580 }
3581
3582 if (prot & VM_PROT_WRITE) {
3583 /*
3584 * If this is a read->write transition, just ignore it and let
3585 * uvm_fault() take care of it later.
3586 */
3587 return;
3588 }
3589
3590 pmap_acquire_pmap_lock(pm);
3591
3592 flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
3593 flags = 0;
3594 clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC);
3595
3596 while (sva < eva) {
3597 next_bucket = L2_NEXT_BUCKET(sva);
3598 if (next_bucket > eva)
3599 next_bucket = eva;
3600
3601 l2b = pmap_get_l2_bucket(pm, sva);
3602 if (l2b == NULL) {
3603 sva = next_bucket;
3604 continue;
3605 }
3606
3607 ptep = &l2b->l2b_kva[l2pte_index(sva)];
3608
3609 while (sva < next_bucket) {
3610 pte = *ptep;
3611 if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) {
3612 struct vm_page *pg;
3613 u_int f;
3614
3615 #ifdef PMAP_CACHE_VIVT
3616 /*
3617 * OK, at this point, we know we're doing
3618 * write-protect operation. If the pmap is
3619 * active, write-back the page.
3620 */
3621 pmap_dcache_wb_range(pm, sva, PAGE_SIZE,
3622 false, false);
3623 #endif
3624
3625 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3626 pte = l2pte_set_readonly(pte);
3627 *ptep = pte;
3628 PTE_SYNC(ptep);
3629
3630 if (pg != NULL) {
3631 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3632 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3633
3634 #ifdef MULTIPROCESSOR
3635 KASSERT(uvm_page_locked_p(pg));
3636 #endif
3637 f = pmap_modify_pv(md, pa, pm, sva,
3638 clr_mask, 0);
3639 pmap_vac_me_harder(md, pa, pm, sva);
3640 } else {
3641 f = PVF_REF | PVF_EXEC;
3642 }
3643
3644 if (flush >= 0) {
3645 flush++;
3646 flags |= f;
3647 } else
3648 if (PV_BEEN_EXECD(f))
3649 pmap_tlb_flushID_SE(pm, sva);
3650 else
3651 if (PV_BEEN_REFD(f))
3652 pmap_tlb_flushD_SE(pm, sva);
3653 }
3654
3655 sva += PAGE_SIZE;
3656 ptep++;
3657 }
3658 }
3659
3660 pmap_release_pmap_lock(pm);
3661
3662 if (flush) {
3663 if (PV_BEEN_EXECD(flags))
3664 pmap_tlb_flushID(pm);
3665 else
3666 if (PV_BEEN_REFD(flags))
3667 pmap_tlb_flushD(pm);
3668 }
3669 }
3670
3671 void
3672 pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
3673 {
3674 struct l2_bucket *l2b;
3675 pt_entry_t *ptep;
3676 vaddr_t next_bucket;
3677 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva;
3678
3679 NPDEBUG(PDB_EXEC,
3680 printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n",
3681 pm, sva, eva));
3682
3683 pmap_acquire_pmap_lock(pm);
3684
3685 while (sva < eva) {
3686 next_bucket = L2_NEXT_BUCKET(sva);
3687 if (next_bucket > eva)
3688 next_bucket = eva;
3689
3690 l2b = pmap_get_l2_bucket(pm, sva);
3691 if (l2b == NULL) {
3692 sva = next_bucket;
3693 continue;
3694 }
3695
3696 for (ptep = &l2b->l2b_kva[l2pte_index(sva)];
3697 sva < next_bucket;
3698 sva += page_size, ptep++, page_size = PAGE_SIZE) {
3699 if (l2pte_valid(*ptep)) {
3700 cpu_icache_sync_range(sva,
3701 min(page_size, eva - sva));
3702 }
3703 }
3704 }
3705
3706 pmap_release_pmap_lock(pm);
3707 }
3708
3709 void
3710 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
3711 {
3712 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3713 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3714
3715 NPDEBUG(PDB_PROTECT,
3716 printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n",
3717 md, pa, prot));
3718
3719 #ifdef MULTIPROCESSOR
3720 KASSERT(uvm_page_locked_p(pg));
3721 #endif
3722
3723 switch(prot) {
3724 case VM_PROT_READ|VM_PROT_WRITE:
3725 #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3726 pmap_clearbit(md, pa, PVF_EXEC);
3727 break;
3728 #endif
3729 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3730 break;
3731
3732 case VM_PROT_READ:
3733 #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3734 pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC);
3735 break;
3736 #endif
3737 case VM_PROT_READ|VM_PROT_EXECUTE:
3738 pmap_clearbit(md, pa, PVF_WRITE);
3739 break;
3740
3741 default:
3742 pmap_page_remove(md, pa);
3743 break;
3744 }
3745 }
3746
3747 /*
3748 * pmap_clear_modify:
3749 *
3750 * Clear the "modified" attribute for a page.
3751 */
3752 bool
3753 pmap_clear_modify(struct vm_page *pg)
3754 {
3755 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3756 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3757 bool rv;
3758
3759 #ifdef MULTIPROCESSOR
3760 KASSERT(uvm_page_locked_p(pg));
3761 #endif
3762
3763 if (md->pvh_attrs & PVF_MOD) {
3764 rv = true;
3765 #ifdef PMAP_CACHE_VIPT
3766 /*
3767 * If we are going to clear the modified bit and there are
3768 * no other modified bits set, flush the page to memory and
3769 * mark it clean.
3770 */
3771 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD)
3772 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
3773 #endif
3774 pmap_clearbit(md, pa, PVF_MOD);
3775 } else
3776 rv = false;
3777
3778 return (rv);
3779 }
3780
3781 /*
3782 * pmap_clear_reference:
3783 *
3784 * Clear the "referenced" attribute for a page.
3785 */
3786 bool
3787 pmap_clear_reference(struct vm_page *pg)
3788 {
3789 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3790 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3791 bool rv;
3792
3793 #ifdef MULTIPROCESSOR
3794 KASSERT(uvm_page_locked_p(pg));
3795 #endif
3796
3797 if (md->pvh_attrs & PVF_REF) {
3798 rv = true;
3799 pmap_clearbit(md, pa, PVF_REF);
3800 } else
3801 rv = false;
3802
3803 return (rv);
3804 }
3805
3806 /*
3807 * pmap_is_modified:
3808 *
3809 * Test if a page has the "modified" attribute.
3810 */
3811 /* See <arm/arm32/pmap.h> */
3812
3813 /*
3814 * pmap_is_referenced:
3815 *
3816 * Test if a page has the "referenced" attribute.
3817 */
3818 /* See <arm/arm32/pmap.h> */
3819
3820 int
3821 pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
3822 {
3823 struct l2_dtable *l2;
3824 struct l2_bucket *l2b;
3825 pd_entry_t *pl1pd, l1pd;
3826 pt_entry_t *ptep, pte;
3827 paddr_t pa;
3828 u_int l1idx;
3829 int rv = 0;
3830
3831 pmap_acquire_pmap_lock(pm);
3832
3833 l1idx = L1_IDX(va);
3834
3835 /*
3836 * If there is no l2_dtable for this address, then the process
3837 * has no business accessing it.
3838 *
3839 * Note: This will catch userland processes trying to access
3840 * kernel addresses.
3841 */
3842 l2 = pm->pm_l2[L2_IDX(l1idx)];
3843 if (l2 == NULL)
3844 goto out;
3845
3846 /*
3847 * Likewise if there is no L2 descriptor table
3848 */
3849 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
3850 if (l2b->l2b_kva == NULL)
3851 goto out;
3852
3853 /*
3854 * Check the PTE itself.
3855 */
3856 ptep = &l2b->l2b_kva[l2pte_index(va)];
3857 pte = *ptep;
3858 if (pte == 0)
3859 goto out;
3860
3861 /*
3862 * Catch a userland access to the vector page mapped at 0x0
3863 */
3864 if (user && (pte & L2_S_PROT_U) == 0)
3865 goto out;
3866
3867 pa = l2pte_pa(pte);
3868
3869 if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) {
3870 /*
3871 * This looks like a good candidate for "page modified"
3872 * emulation...
3873 */
3874 struct pv_entry *pv;
3875 struct vm_page *pg;
3876
3877 /* Extract the physical address of the page */
3878 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3879 goto out;
3880
3881 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3882
3883 /* Get the current flags for this page. */
3884 #ifdef MULTIPROCESSOR
3885 KASSERT(uvm_page_locked_p(pg));
3886 #endif
3887
3888 pv = pmap_find_pv(md, pm, va);
3889 if (pv == NULL) {
3890 goto out;
3891 }
3892
3893 /*
3894 * Do the flags say this page is writable? If not then it
3895 * is a genuine write fault. If yes then the write fault is
3896 * our fault as we did not reflect the write access in the
3897 * PTE. Now we know a write has occurred we can correct this
3898 * and also set the modified bit
3899 */
3900 if ((pv->pv_flags & PVF_WRITE) == 0) {
3901 goto out;
3902 }
3903
3904 NPDEBUG(PDB_FOLLOW,
3905 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3906 pm, va, pa));
3907
3908 md->pvh_attrs |= PVF_REF | PVF_MOD;
3909 pv->pv_flags |= PVF_REF | PVF_MOD;
3910 #ifdef PMAP_CACHE_VIPT
3911 /*
3912 * If there are cacheable mappings for this page, mark it dirty.
3913 */
3914 if ((md->pvh_attrs & PVF_NC) == 0)
3915 md->pvh_attrs |= PVF_DIRTY;
3916 #endif
3917
3918 /*
3919 * Re-enable write permissions for the page. No need to call
3920 * pmap_vac_me_harder(), since this is just a
3921 * modified-emulation fault, and the PVF_WRITE bit isn't
3922 * changing. We've already set the cacheable bits based on
3923 * the assumption that we can write to this page.
3924 */
3925 *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
3926 PTE_SYNC(ptep);
3927 rv = 1;
3928 } else
3929 if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
3930 /*
3931 * This looks like a good candidate for "page referenced"
3932 * emulation.
3933 */
3934 struct pv_entry *pv;
3935 struct vm_page *pg;
3936
3937 /* Extract the physical address of the page */
3938 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3939 goto out;
3940
3941 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3942
3943 /* Get the current flags for this page. */
3944 #ifdef MULTIPROCESSOR
3945 KASSERT(uvm_page_locked_p(pg));
3946 #endif
3947
3948 pv = pmap_find_pv(md, pm, va);
3949 if (pv == NULL) {
3950 goto out;
3951 }
3952
3953 md->pvh_attrs |= PVF_REF;
3954 pv->pv_flags |= PVF_REF;
3955
3956 NPDEBUG(PDB_FOLLOW,
3957 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3958 pm, va, pa));
3959
3960 *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
3961 PTE_SYNC(ptep);
3962 rv = 1;
3963 }
3964
3965 /*
3966 * We know there is a valid mapping here, so simply
3967 * fix up the L1 if necessary.
3968 */
3969 pl1pd = &pm->pm_l1->l1_kva[l1idx];
3970 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
3971 if (*pl1pd != l1pd) {
3972 *pl1pd = l1pd;
3973 PTE_SYNC(pl1pd);
3974 rv = 1;
3975 }
3976
3977 #ifdef CPU_SA110
3978 /*
3979 * There are bugs in the rev K SA110. This is a check for one
3980 * of them.
3981 */
3982 if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
3983 curcpu()->ci_arm_cpurev < 3) {
3984 /* Always current pmap */
3985 if (l2pte_valid(pte)) {
3986 extern int kernel_debug;
3987 if (kernel_debug & 1) {
3988 struct proc *p = curlwp->l_proc;
3989 printf("prefetch_abort: page is already "
3990 "mapped - pte=%p *pte=%08x\n", ptep, pte);
3991 printf("prefetch_abort: pc=%08lx proc=%p "
3992 "process=%s\n", va, p, p->p_comm);
3993 printf("prefetch_abort: far=%08x fs=%x\n",
3994 cpu_faultaddress(), cpu_faultstatus());
3995 }
3996 #ifdef DDB
3997 if (kernel_debug & 2)
3998 Debugger();
3999 #endif
4000 rv = 1;
4001 }
4002 }
4003 #endif /* CPU_SA110 */
4004
4005 #ifdef DEBUG
4006 /*
4007 * If 'rv == 0' at this point, it generally indicates that there is a
4008 * stale TLB entry for the faulting address. This happens when two or
4009 * more processes are sharing an L1. Since we don't flush the TLB on
4010 * a context switch between such processes, we can take domain faults
4011 * for mappings which exist at the same VA in both processes. EVEN IF
4012 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
4013 * example.
4014 *
4015 * This is extremely likely to happen if pmap_enter() updated the L1
4016 * entry for a recently entered mapping. In this case, the TLB is
4017 * flushed for the new mapping, but there may still be TLB entries for
4018 * other mappings belonging to other processes in the 1MB range
4019 * covered by the L1 entry.
4020 *
4021 * Since 'rv == 0', we know that the L1 already contains the correct
4022 * value, so the fault must be due to a stale TLB entry.
4023 *
4024 * Since we always need to flush the TLB anyway in the case where we
4025 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
4026 * stale TLB entries dynamically.
4027 *
4028 * However, the above condition can ONLY happen if the current L1 is
4029 * being shared. If it happens when the L1 is unshared, it indicates
4030 * that other parts of the pmap are not doing their job WRT managing
4031 * the TLB.
4032 */
4033 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
4034 extern int last_fault_code;
4035 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
4036 pm, va, ftype);
4037 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
4038 l2, l2b, ptep, pl1pd);
4039 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
4040 pte, l1pd, last_fault_code);
4041 #ifdef DDB
4042 Debugger();
4043 #endif
4044 }
4045 #endif
4046
4047 cpu_tlb_flushID_SE(va);
4048 cpu_cpwait();
4049
4050 rv = 1;
4051
4052 out:
4053 pmap_release_pmap_lock(pm);
4054
4055 return (rv);
4056 }
4057
4058 /*
4059 * Routine: pmap_procwr
4060 *
4061 * Function:
4062 * Synchronize caches corresponding to [addr, addr+len) in p.
4063 *
4064 */
4065 void
4066 pmap_procwr(struct proc *p, vaddr_t va, int len)
4067 {
4068 /* We only need to do anything if it is the current process. */
4069 if (p == curproc)
4070 cpu_icache_sync_range(va, len);
4071 }
4072
4073 /*
4074 * Routine: pmap_unwire
4075 * Function: Clear the wired attribute for a map/virtual-address pair.
4076 *
4077 * In/out conditions:
4078 * The mapping must already exist in the pmap.
4079 */
4080 void
4081 pmap_unwire(pmap_t pm, vaddr_t va)
4082 {
4083 struct l2_bucket *l2b;
4084 pt_entry_t *ptep, pte;
4085 struct vm_page *pg;
4086 paddr_t pa;
4087
4088 NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
4089
4090 pmap_acquire_pmap_lock(pm);
4091
4092 l2b = pmap_get_l2_bucket(pm, va);
4093 KDASSERT(l2b != NULL);
4094
4095 ptep = &l2b->l2b_kva[l2pte_index(va)];
4096 pte = *ptep;
4097
4098 /* Extract the physical address of the page */
4099 pa = l2pte_pa(pte);
4100
4101 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
4102 /* Update the wired bit in the pv entry for this page. */
4103 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4104
4105 #ifdef MULTIPROCESSOR
4106 KASSERT(uvm_page_locked_p(pg));
4107 #endif
4108 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
4109 }
4110
4111 pmap_release_pmap_lock(pm);
4112 }
4113
4114 void
4115 pmap_activate(struct lwp *l)
4116 {
4117 extern int block_userspace_access;
4118 pmap_t opm, npm, rpm;
4119 uint32_t odacr, ndacr;
4120 int oldirqstate;
4121
4122 /*
4123 * If activating a non-current lwp or the current lwp is
4124 * already active, just return.
4125 */
4126 if (l != curlwp ||
4127 l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true)
4128 return;
4129
4130 npm = l->l_proc->p_vmspace->vm_map.pmap;
4131 ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4132 (DOMAIN_CLIENT << (npm->pm_domain * 2));
4133
4134 /*
4135 * If TTB and DACR are unchanged, short-circuit all the
4136 * TLB/cache management stuff.
4137 */
4138 if (pmap_previous_active_lwp != NULL) {
4139 opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap;
4140 odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4141 (DOMAIN_CLIENT << (opm->pm_domain * 2));
4142
4143 if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr)
4144 goto all_done;
4145 } else
4146 opm = NULL;
4147
4148 PMAPCOUNT(activations);
4149 block_userspace_access = 1;
4150
4151 /*
4152 * If switching to a user vmspace which is different to the
4153 * most recent one, and the most recent one is potentially
4154 * live in the cache, we must write-back and invalidate the
4155 * entire cache.
4156 */
4157 rpm = pmap_recent_user;
4158
4159 /*
4160 * XXXSCW: There's a corner case here which can leave turds in the cache as
4161 * reported in kern/41058. They're probably left over during tear-down and
4162 * switching away from an exiting process. Until the root cause is identified
4163 * and fixed, zap the cache when switching pmaps. This will result in a few
4164 * unnecessary cache flushes, but that's better than silently corrupting data.
4165 */
4166 #if 0
4167 if (npm != pmap_kernel() && rpm && npm != rpm &&
4168 rpm->pm_cstate.cs_cache) {
4169 rpm->pm_cstate.cs_cache = 0;
4170 #ifdef PMAP_CACHE_VIVT
4171 cpu_idcache_wbinv_all();
4172 #endif
4173 }
4174 #else
4175 if (rpm) {
4176 rpm->pm_cstate.cs_cache = 0;
4177 if (npm == pmap_kernel())
4178 pmap_recent_user = NULL;
4179 #ifdef PMAP_CACHE_VIVT
4180 cpu_idcache_wbinv_all();
4181 #endif
4182 }
4183 #endif
4184
4185 /* No interrupts while we frob the TTB/DACR */
4186 oldirqstate = disable_interrupts(IF32_bits);
4187
4188 /*
4189 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
4190 * entry corresponding to 'vector_page' in the incoming L1 table
4191 * before switching to it otherwise subsequent interrupts/exceptions
4192 * (including domain faults!) will jump into hyperspace.
4193 */
4194 if (npm->pm_pl1vec != NULL) {
4195 cpu_tlb_flushID_SE((u_int)vector_page);
4196 cpu_cpwait();
4197 *npm->pm_pl1vec = npm->pm_l1vec;
4198 PTE_SYNC(npm->pm_pl1vec);
4199 }
4200
4201 cpu_domains(ndacr);
4202
4203 if (npm == pmap_kernel() || npm == rpm) {
4204 /*
4205 * Switching to a kernel thread, or back to the
4206 * same user vmspace as before... Simply update
4207 * the TTB (no TLB flush required)
4208 */
4209 __asm volatile("mcr p15, 0, %0, c2, c0, 0" ::
4210 "r"(npm->pm_l1->l1_physaddr));
4211 cpu_cpwait();
4212 } else {
4213 /*
4214 * Otherwise, update TTB and flush TLB
4215 */
4216 cpu_context_switch(npm->pm_l1->l1_physaddr);
4217 if (rpm != NULL)
4218 rpm->pm_cstate.cs_tlb = 0;
4219 }
4220
4221 restore_interrupts(oldirqstate);
4222
4223 block_userspace_access = 0;
4224
4225 all_done:
4226 /*
4227 * The new pmap is resident. Make sure it's marked
4228 * as resident in the cache/TLB.
4229 */
4230 npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4231 if (npm != pmap_kernel())
4232 pmap_recent_user = npm;
4233
4234 /* The old pmap is not longer active */
4235 if (opm != NULL)
4236 opm->pm_activated = false;
4237
4238 /* But the new one is */
4239 npm->pm_activated = true;
4240 }
4241
4242 void
4243 pmap_deactivate(struct lwp *l)
4244 {
4245
4246 /*
4247 * If the process is exiting, make sure pmap_activate() does
4248 * a full MMU context-switch and cache flush, which we might
4249 * otherwise skip. See PR port-arm/38950.
4250 */
4251 if (l->l_proc->p_sflag & PS_WEXIT)
4252 pmap_previous_active_lwp = NULL;
4253
4254 l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false;
4255 }
4256
4257 void
4258 pmap_update(pmap_t pm)
4259 {
4260
4261 if (pm->pm_remove_all) {
4262 /*
4263 * Finish up the pmap_remove_all() optimisation by flushing
4264 * the TLB.
4265 */
4266 pmap_tlb_flushID(pm);
4267 pm->pm_remove_all = false;
4268 }
4269
4270 if (pmap_is_current(pm)) {
4271 /*
4272 * If we're dealing with a current userland pmap, move its L1
4273 * to the end of the LRU.
4274 */
4275 if (pm != pmap_kernel())
4276 pmap_use_l1(pm);
4277
4278 /*
4279 * We can assume we're done with frobbing the cache/tlb for
4280 * now. Make sure any future pmap ops don't skip cache/tlb
4281 * flushes.
4282 */
4283 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4284 }
4285
4286 PMAPCOUNT(updates);
4287
4288 /*
4289 * make sure TLB/cache operations have completed.
4290 */
4291 cpu_cpwait();
4292 }
4293
4294 void
4295 pmap_remove_all(pmap_t pm)
4296 {
4297
4298 /*
4299 * The vmspace described by this pmap is about to be torn down.
4300 * Until pmap_update() is called, UVM will only make calls
4301 * to pmap_remove(). We can make life much simpler by flushing
4302 * the cache now, and deferring TLB invalidation to pmap_update().
4303 */
4304 #ifdef PMAP_CACHE_VIVT
4305 pmap_idcache_wbinv_all(pm);
4306 #endif
4307 pm->pm_remove_all = true;
4308 }
4309
4310 /*
4311 * Retire the given physical map from service.
4312 * Should only be called if the map contains no valid mappings.
4313 */
4314 void
4315 pmap_destroy(pmap_t pm)
4316 {
4317 u_int count;
4318
4319 if (pm == NULL)
4320 return;
4321
4322 if (pm->pm_remove_all) {
4323 pmap_tlb_flushID(pm);
4324 pm->pm_remove_all = false;
4325 }
4326
4327 /*
4328 * Drop reference count
4329 */
4330 mutex_enter(pm->pm_lock);
4331 count = --pm->pm_obj.uo_refs;
4332 mutex_exit(pm->pm_lock);
4333 if (count > 0) {
4334 if (pmap_is_current(pm)) {
4335 if (pm != pmap_kernel())
4336 pmap_use_l1(pm);
4337 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4338 }
4339 return;
4340 }
4341
4342 /*
4343 * reference count is zero, free pmap resources and then free pmap.
4344 */
4345
4346 if (vector_page < KERNEL_BASE) {
4347 KDASSERT(!pmap_is_current(pm));
4348
4349 /* Remove the vector page mapping */
4350 pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
4351 pmap_update(pm);
4352 }
4353
4354 LIST_REMOVE(pm, pm_list);
4355
4356 pmap_free_l1(pm);
4357
4358 if (pmap_recent_user == pm)
4359 pmap_recent_user = NULL;
4360
4361 uvm_obj_destroy(&pm->pm_obj, false);
4362 mutex_destroy(&pm->pm_obj_lock);
4363 pool_cache_put(&pmap_cache, pm);
4364 }
4365
4366
4367 /*
4368 * void pmap_reference(pmap_t pm)
4369 *
4370 * Add a reference to the specified pmap.
4371 */
4372 void
4373 pmap_reference(pmap_t pm)
4374 {
4375
4376 if (pm == NULL)
4377 return;
4378
4379 pmap_use_l1(pm);
4380
4381 mutex_enter(pm->pm_lock);
4382 pm->pm_obj.uo_refs++;
4383 mutex_exit(pm->pm_lock);
4384 }
4385
4386 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
4387
4388 static struct evcnt pmap_prefer_nochange_ev =
4389 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange");
4390 static struct evcnt pmap_prefer_change_ev =
4391 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change");
4392
4393 EVCNT_ATTACH_STATIC(pmap_prefer_change_ev);
4394 EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev);
4395
4396 void
4397 pmap_prefer(vaddr_t hint, vaddr_t *vap, int td)
4398 {
4399 vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1);
4400 vaddr_t va = *vap;
4401 vaddr_t diff = (hint - va) & mask;
4402 if (diff == 0) {
4403 pmap_prefer_nochange_ev.ev_count++;
4404 } else {
4405 pmap_prefer_change_ev.ev_count++;
4406 if (__predict_false(td))
4407 va -= mask + 1;
4408 *vap = va + diff;
4409 }
4410 }
4411 #endif /* ARM_MMU_V6 | ARM_MMU_V7 */
4412
4413 /*
4414 * pmap_zero_page()
4415 *
4416 * Zero a given physical page by mapping it at a page hook point.
4417 * In doing the zero page op, the page we zero is mapped cachable, as with
4418 * StrongARM accesses to non-cached pages are non-burst making writing
4419 * _any_ bulk data very slow.
4420 */
4421 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
4422 void
4423 pmap_zero_page_generic(paddr_t phys)
4424 {
4425 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4426 struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
4427 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4428 #endif
4429 #ifdef PMAP_CACHE_VIPT
4430 /* Choose the last page color it had, if any */
4431 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
4432 #else
4433 const vsize_t va_offset = 0;
4434 #endif
4435 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
4436
4437 #ifdef DEBUG
4438 if (!SLIST_EMPTY(&md->pvh_list))
4439 panic("pmap_zero_page: page has mappings");
4440 #endif
4441
4442 KDASSERT((phys & PGOFSET) == 0);
4443
4444 /*
4445 * Hook in the page, zero it, and purge the cache for that
4446 * zeroed page. Invalidate the TLB as needed.
4447 */
4448 *ptep = L2_S_PROTO | phys |
4449 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4450 PTE_SYNC(ptep);
4451 cpu_tlb_flushD_SE(cdstp + va_offset);
4452 cpu_cpwait();
4453 bzero_page(cdstp + va_offset);
4454 /*
4455 * Unmap the page.
4456 */
4457 *ptep = 0;
4458 PTE_SYNC(ptep);
4459 cpu_tlb_flushD_SE(cdstp + va_offset);
4460 #ifdef PMAP_CACHE_VIVT
4461 cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
4462 #endif
4463 #ifdef PMAP_CACHE_VIPT
4464 /*
4465 * This page is now cache resident so it now has a page color.
4466 * Any contents have been obliterated so clear the EXEC flag.
4467 */
4468 if (!pmap_is_page_colored_p(md)) {
4469 PMAPCOUNT(vac_color_new);
4470 md->pvh_attrs |= PVF_COLORED;
4471 }
4472 if (PV_IS_EXEC_P(md->pvh_attrs)) {
4473 md->pvh_attrs &= ~PVF_EXEC;
4474 PMAPCOUNT(exec_discarded_zero);
4475 }
4476 md->pvh_attrs |= PVF_DIRTY;
4477 #endif
4478 }
4479 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
4480
4481 #if ARM_MMU_XSCALE == 1
4482 void
4483 pmap_zero_page_xscale(paddr_t phys)
4484 {
4485 #ifdef DEBUG
4486 struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
4487 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4488
4489 if (!SLIST_EMPTY(&md->pvh_list))
4490 panic("pmap_zero_page: page has mappings");
4491 #endif
4492
4493 KDASSERT((phys & PGOFSET) == 0);
4494
4495 /*
4496 * Hook in the page, zero it, and purge the cache for that
4497 * zeroed page. Invalidate the TLB as needed.
4498 */
4499 *cdst_pte = L2_S_PROTO | phys |
4500 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4501 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */
4502 PTE_SYNC(cdst_pte);
4503 cpu_tlb_flushD_SE(cdstp);
4504 cpu_cpwait();
4505 bzero_page(cdstp);
4506 xscale_cache_clean_minidata();
4507 }
4508 #endif /* ARM_MMU_XSCALE == 1 */
4509
4510 /* pmap_pageidlezero()
4511 *
4512 * The same as above, except that we assume that the page is not
4513 * mapped. This means we never have to flush the cache first. Called
4514 * from the idle loop.
4515 */
4516 bool
4517 pmap_pageidlezero(paddr_t phys)
4518 {
4519 unsigned int i;
4520 int *ptr;
4521 bool rv = true;
4522 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4523 struct vm_page * const pg = PHYS_TO_VM_PAGE(phys);
4524 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4525 #endif
4526 #ifdef PMAP_CACHE_VIPT
4527 /* Choose the last page color it had, if any */
4528 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
4529 #else
4530 const vsize_t va_offset = 0;
4531 #endif
4532 pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT];
4533
4534
4535 #ifdef DEBUG
4536 if (!SLIST_EMPTY(&md->pvh_list))
4537 panic("pmap_pageidlezero: page has mappings");
4538 #endif
4539
4540 KDASSERT((phys & PGOFSET) == 0);
4541
4542 /*
4543 * Hook in the page, zero it, and purge the cache for that
4544 * zeroed page. Invalidate the TLB as needed.
4545 */
4546 *ptep = L2_S_PROTO | phys |
4547 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4548 PTE_SYNC(ptep);
4549 cpu_tlb_flushD_SE(cdstp + va_offset);
4550 cpu_cpwait();
4551
4552 for (i = 0, ptr = (int *)(cdstp + va_offset);
4553 i < (PAGE_SIZE / sizeof(int)); i++) {
4554 if (sched_curcpu_runnable_p() != 0) {
4555 /*
4556 * A process has become ready. Abort now,
4557 * so we don't keep it waiting while we
4558 * do slow memory access to finish this
4559 * page.
4560 */
4561 rv = false;
4562 break;
4563 }
4564 *ptr++ = 0;
4565 }
4566
4567 #ifdef PMAP_CACHE_VIVT
4568 if (rv)
4569 /*
4570 * if we aborted we'll rezero this page again later so don't
4571 * purge it unless we finished it
4572 */
4573 cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
4574 #elif defined(PMAP_CACHE_VIPT)
4575 /*
4576 * This page is now cache resident so it now has a page color.
4577 * Any contents have been obliterated so clear the EXEC flag.
4578 */
4579 if (!pmap_is_page_colored_p(md)) {
4580 PMAPCOUNT(vac_color_new);
4581 md->pvh_attrs |= PVF_COLORED;
4582 }
4583 if (PV_IS_EXEC_P(md->pvh_attrs)) {
4584 md->pvh_attrs &= ~PVF_EXEC;
4585 PMAPCOUNT(exec_discarded_zero);
4586 }
4587 #endif
4588 /*
4589 * Unmap the page.
4590 */
4591 *ptep = 0;
4592 PTE_SYNC(ptep);
4593 cpu_tlb_flushD_SE(cdstp + va_offset);
4594
4595 return (rv);
4596 }
4597
4598 /*
4599 * pmap_copy_page()
4600 *
4601 * Copy one physical page into another, by mapping the pages into
4602 * hook points. The same comment regarding cachability as in
4603 * pmap_zero_page also applies here.
4604 */
4605 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
4606 void
4607 pmap_copy_page_generic(paddr_t src, paddr_t dst)
4608 {
4609 struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src);
4610 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
4611 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4612 struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst);
4613 struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg);
4614 #endif
4615 #ifdef PMAP_CACHE_VIPT
4616 const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask;
4617 const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask;
4618 #else
4619 const vsize_t src_va_offset = 0;
4620 const vsize_t dst_va_offset = 0;
4621 #endif
4622 pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT];
4623 pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
4624
4625 #ifdef DEBUG
4626 if (!SLIST_EMPTY(&dst_md->pvh_list))
4627 panic("pmap_copy_page: dst page has mappings");
4628 #endif
4629
4630 #ifdef PMAP_CACHE_VIPT
4631 KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC));
4632 #endif
4633 KDASSERT((src & PGOFSET) == 0);
4634 KDASSERT((dst & PGOFSET) == 0);
4635
4636 /*
4637 * Clean the source page. Hold the source page's lock for
4638 * the duration of the copy so that no other mappings can
4639 * be created while we have a potentially aliased mapping.
4640 */
4641 #ifdef MULTIPROCESSOR
4642 KASSERT(uvm_page_locked_p(src_pg));
4643 #endif
4644 #ifdef PMAP_CACHE_VIVT
4645 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true);
4646 #endif
4647
4648 /*
4649 * Map the pages into the page hook points, copy them, and purge
4650 * the cache for the appropriate page. Invalidate the TLB
4651 * as required.
4652 */
4653 *src_ptep = L2_S_PROTO
4654 | src
4655 #ifdef PMAP_CACHE_VIPT
4656 | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
4657 #endif
4658 #ifdef PMAP_CACHE_VIVT
4659 | pte_l2_s_cache_mode
4660 #endif
4661 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
4662 *dst_ptep = L2_S_PROTO | dst |
4663 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4664 PTE_SYNC(src_ptep);
4665 PTE_SYNC(dst_ptep);
4666 cpu_tlb_flushD_SE(csrcp + src_va_offset);
4667 cpu_tlb_flushD_SE(cdstp + dst_va_offset);
4668 cpu_cpwait();
4669 bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset);
4670 #ifdef PMAP_CACHE_VIVT
4671 cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE);
4672 #endif
4673 #ifdef PMAP_CACHE_VIVT
4674 cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE);
4675 #endif
4676 /*
4677 * Unmap the pages.
4678 */
4679 *src_ptep = 0;
4680 *dst_ptep = 0;
4681 PTE_SYNC(src_ptep);
4682 PTE_SYNC(dst_ptep);
4683 cpu_tlb_flushD_SE(csrcp + src_va_offset);
4684 cpu_tlb_flushD_SE(cdstp + dst_va_offset);
4685 #ifdef PMAP_CACHE_VIPT
4686 /*
4687 * Now that the destination page is in the cache, mark it as colored.
4688 * If this was an exec page, discard it.
4689 */
4690 if (!pmap_is_page_colored_p(dst_md)) {
4691 PMAPCOUNT(vac_color_new);
4692 dst_md->pvh_attrs |= PVF_COLORED;
4693 }
4694 if (PV_IS_EXEC_P(dst_md->pvh_attrs)) {
4695 dst_md->pvh_attrs &= ~PVF_EXEC;
4696 PMAPCOUNT(exec_discarded_copy);
4697 }
4698 dst_md->pvh_attrs |= PVF_DIRTY;
4699 #endif
4700 }
4701 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
4702
4703 #if ARM_MMU_XSCALE == 1
4704 void
4705 pmap_copy_page_xscale(paddr_t src, paddr_t dst)
4706 {
4707 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
4708 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
4709 #ifdef DEBUG
4710 struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst));
4711
4712 if (!SLIST_EMPTY(&dst_md->pvh_list))
4713 panic("pmap_copy_page: dst page has mappings");
4714 #endif
4715
4716 KDASSERT((src & PGOFSET) == 0);
4717 KDASSERT((dst & PGOFSET) == 0);
4718
4719 /*
4720 * Clean the source page. Hold the source page's lock for
4721 * the duration of the copy so that no other mappings can
4722 * be created while we have a potentially aliased mapping.
4723 */
4724 #ifdef MULTIPROCESSOR
4725 KASSERT(uvm_page_locked_p(src_pg));
4726 #endif
4727 #ifdef PMAP_CACHE_VIVT
4728 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true);
4729 #endif
4730
4731 /*
4732 * Map the pages into the page hook points, copy them, and purge
4733 * the cache for the appropriate page. Invalidate the TLB
4734 * as required.
4735 */
4736 *csrc_pte = L2_S_PROTO | src |
4737 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4738 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */
4739 PTE_SYNC(csrc_pte);
4740 *cdst_pte = L2_S_PROTO | dst |
4741 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4742 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */
4743 PTE_SYNC(cdst_pte);
4744 cpu_tlb_flushD_SE(csrcp);
4745 cpu_tlb_flushD_SE(cdstp);
4746 cpu_cpwait();
4747 bcopy_page(csrcp, cdstp);
4748 xscale_cache_clean_minidata();
4749 }
4750 #endif /* ARM_MMU_XSCALE == 1 */
4751
4752 /*
4753 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
4754 *
4755 * Return the start and end addresses of the kernel's virtual space.
4756 * These values are setup in pmap_bootstrap and are updated as pages
4757 * are allocated.
4758 */
4759 void
4760 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
4761 {
4762 *start = virtual_avail;
4763 *end = virtual_end;
4764 }
4765
4766 /*
4767 * Helper function for pmap_grow_l2_bucket()
4768 */
4769 static inline int
4770 pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
4771 {
4772 struct l2_bucket *l2b;
4773 pt_entry_t *ptep;
4774 paddr_t pa;
4775
4776 if (uvm.page_init_done == false) {
4777 #ifdef PMAP_STEAL_MEMORY
4778 pv_addr_t pv;
4779 pmap_boot_pagealloc(PAGE_SIZE,
4780 #ifdef PMAP_CACHE_VIPT
4781 arm_cache_prefer_mask,
4782 va & arm_cache_prefer_mask,
4783 #else
4784 0, 0,
4785 #endif
4786 &pv);
4787 pa = pv.pv_pa;
4788 #else
4789 if (uvm_page_physget(&pa) == false)
4790 return (1);
4791 #endif /* PMAP_STEAL_MEMORY */
4792 } else {
4793 struct vm_page *pg;
4794 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
4795 if (pg == NULL)
4796 return (1);
4797 pa = VM_PAGE_TO_PHYS(pg);
4798 #ifdef PMAP_CACHE_VIPT
4799 #ifdef DIAGNOSTIC
4800 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4801 #endif
4802 /*
4803 * This new page must not have any mappings. Enter it via
4804 * pmap_kenter_pa and let that routine do the hard work.
4805 */
4806 KASSERT(SLIST_EMPTY(&md->pvh_list));
4807 pmap_kenter_pa(va, pa,
4808 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
4809 #endif
4810 }
4811
4812 if (pap)
4813 *pap = pa;
4814
4815 PMAPCOUNT(pt_mappings);
4816 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
4817 KDASSERT(l2b != NULL);
4818
4819 ptep = &l2b->l2b_kva[l2pte_index(va)];
4820 *ptep = L2_S_PROTO | pa | cache_mode |
4821 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
4822 PTE_SYNC(ptep);
4823 memset((void *)va, 0, PAGE_SIZE);
4824 return (0);
4825 }
4826
4827 /*
4828 * This is the same as pmap_alloc_l2_bucket(), except that it is only
4829 * used by pmap_growkernel().
4830 */
4831 static inline struct l2_bucket *
4832 pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
4833 {
4834 struct l2_dtable *l2;
4835 struct l2_bucket *l2b;
4836 u_short l1idx;
4837 vaddr_t nva;
4838
4839 l1idx = L1_IDX(va);
4840
4841 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
4842 /*
4843 * No mapping at this address, as there is
4844 * no entry in the L1 table.
4845 * Need to allocate a new l2_dtable.
4846 */
4847 nva = pmap_kernel_l2dtable_kva;
4848 if ((nva & PGOFSET) == 0) {
4849 /*
4850 * Need to allocate a backing page
4851 */
4852 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
4853 return (NULL);
4854 }
4855
4856 l2 = (struct l2_dtable *)nva;
4857 nva += sizeof(struct l2_dtable);
4858
4859 if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) {
4860 /*
4861 * The new l2_dtable straddles a page boundary.
4862 * Map in another page to cover it.
4863 */
4864 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
4865 return (NULL);
4866 }
4867
4868 pmap_kernel_l2dtable_kva = nva;
4869
4870 /*
4871 * Link it into the parent pmap
4872 */
4873 pm->pm_l2[L2_IDX(l1idx)] = l2;
4874 }
4875
4876 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
4877
4878 /*
4879 * Fetch pointer to the L2 page table associated with the address.
4880 */
4881 if (l2b->l2b_kva == NULL) {
4882 pt_entry_t *ptep;
4883
4884 /*
4885 * No L2 page table has been allocated. Chances are, this
4886 * is because we just allocated the l2_dtable, above.
4887 */
4888 nva = pmap_kernel_l2ptp_kva;
4889 ptep = (pt_entry_t *)nva;
4890 if ((nva & PGOFSET) == 0) {
4891 /*
4892 * Need to allocate a backing page
4893 */
4894 if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
4895 &pmap_kernel_l2ptp_phys))
4896 return (NULL);
4897 PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
4898 }
4899
4900 l2->l2_occupancy++;
4901 l2b->l2b_kva = ptep;
4902 l2b->l2b_l1idx = l1idx;
4903 l2b->l2b_phys = pmap_kernel_l2ptp_phys;
4904
4905 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
4906 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
4907 }
4908
4909 return (l2b);
4910 }
4911
4912 vaddr_t
4913 pmap_growkernel(vaddr_t maxkvaddr)
4914 {
4915 pmap_t kpm = pmap_kernel();
4916 struct l1_ttable *l1;
4917 struct l2_bucket *l2b;
4918 pd_entry_t *pl1pd;
4919 int s;
4920
4921 if (maxkvaddr <= pmap_curmaxkvaddr)
4922 goto out; /* we are OK */
4923
4924 NPDEBUG(PDB_GROWKERN,
4925 printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
4926 pmap_curmaxkvaddr, maxkvaddr));
4927
4928 KDASSERT(maxkvaddr <= virtual_end);
4929
4930 /*
4931 * whoops! we need to add kernel PTPs
4932 */
4933
4934 s = splhigh(); /* to be safe */
4935 mutex_enter(kpm->pm_lock);
4936
4937 /* Map 1MB at a time */
4938 for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) {
4939
4940 l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
4941 KDASSERT(l2b != NULL);
4942
4943 /* Distribute new L1 entry to all other L1s */
4944 SLIST_FOREACH(l1, &l1_list, l1_link) {
4945 pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)];
4946 *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
4947 L1_C_PROTO;
4948 PTE_SYNC(pl1pd);
4949 }
4950 }
4951
4952 /*
4953 * flush out the cache, expensive but growkernel will happen so
4954 * rarely
4955 */
4956 cpu_dcache_wbinv_all();
4957 cpu_tlb_flushD();
4958 cpu_cpwait();
4959
4960 mutex_exit(kpm->pm_lock);
4961 splx(s);
4962
4963 out:
4964 return (pmap_curmaxkvaddr);
4965 }
4966
4967 /************************ Utility routines ****************************/
4968
4969 /*
4970 * vector_page_setprot:
4971 *
4972 * Manipulate the protection of the vector page.
4973 */
4974 void
4975 vector_page_setprot(int prot)
4976 {
4977 struct l2_bucket *l2b;
4978 pt_entry_t *ptep;
4979
4980 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
4981 KDASSERT(l2b != NULL);
4982
4983 ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
4984
4985 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
4986 PTE_SYNC(ptep);
4987 cpu_tlb_flushD_SE(vector_page);
4988 cpu_cpwait();
4989 }
4990
4991 /*
4992 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
4993 * Returns true if the mapping exists, else false.
4994 *
4995 * NOTE: This function is only used by a couple of arm-specific modules.
4996 * It is not safe to take any pmap locks here, since we could be right
4997 * in the middle of debugging the pmap anyway...
4998 *
4999 * It is possible for this routine to return false even though a valid
5000 * mapping does exist. This is because we don't lock, so the metadata
5001 * state may be inconsistent.
5002 *
5003 * NOTE: We can return a NULL *ptp in the case where the L1 pde is
5004 * a "section" mapping.
5005 */
5006 bool
5007 pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp)
5008 {
5009 struct l2_dtable *l2;
5010 pd_entry_t *pl1pd, l1pd;
5011 pt_entry_t *ptep;
5012 u_short l1idx;
5013
5014 if (pm->pm_l1 == NULL)
5015 return false;
5016
5017 l1idx = L1_IDX(va);
5018 *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
5019 l1pd = *pl1pd;
5020
5021 if (l1pte_section_p(l1pd)) {
5022 *ptp = NULL;
5023 return true;
5024 }
5025
5026 if (pm->pm_l2 == NULL)
5027 return false;
5028
5029 l2 = pm->pm_l2[L2_IDX(l1idx)];
5030
5031 if (l2 == NULL ||
5032 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
5033 return false;
5034 }
5035
5036 *ptp = &ptep[l2pte_index(va)];
5037 return true;
5038 }
5039
5040 bool
5041 pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp)
5042 {
5043 u_short l1idx;
5044
5045 if (pm->pm_l1 == NULL)
5046 return false;
5047
5048 l1idx = L1_IDX(va);
5049 *pdp = &pm->pm_l1->l1_kva[l1idx];
5050
5051 return true;
5052 }
5053
5054 /************************ Bootstrapping routines ****************************/
5055
5056 static void
5057 pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
5058 {
5059 int i;
5060
5061 l1->l1_kva = l1pt;
5062 l1->l1_domain_use_count = 0;
5063 l1->l1_domain_first = 0;
5064
5065 for (i = 0; i < PMAP_DOMAINS; i++)
5066 l1->l1_domain_free[i] = i + 1;
5067
5068 /*
5069 * Copy the kernel's L1 entries to each new L1.
5070 */
5071 if (pmap_initialized)
5072 memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
5073
5074 if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt,
5075 &l1->l1_physaddr) == false)
5076 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
5077
5078 SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
5079 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
5080 }
5081
5082 /*
5083 * pmap_bootstrap() is called from the board-specific initarm() routine
5084 * once the kernel L1/L2 descriptors tables have been set up.
5085 *
5086 * This is a somewhat convoluted process since pmap bootstrap is, effectively,
5087 * spread over a number of disparate files/functions.
5088 *
5089 * We are passed the following parameters
5090 * - kernel_l1pt
5091 * This is a pointer to the base of the kernel's L1 translation table.
5092 * - vstart
5093 * 1MB-aligned start of managed kernel virtual memory.
5094 * - vend
5095 * 1MB-aligned end of managed kernel virtual memory.
5096 *
5097 * We use the first parameter to build the metadata (struct l1_ttable and
5098 * struct l2_dtable) necessary to track kernel mappings.
5099 */
5100 #define PMAP_STATIC_L2_SIZE 16
5101 void
5102 pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
5103 {
5104 static struct l1_ttable static_l1;
5105 static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
5106 struct l1_ttable *l1 = &static_l1;
5107 struct l2_dtable *l2;
5108 struct l2_bucket *l2b;
5109 pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va;
5110 pmap_t pm = pmap_kernel();
5111 pd_entry_t pde;
5112 pt_entry_t *ptep;
5113 paddr_t pa;
5114 vaddr_t va;
5115 vsize_t size;
5116 int nptes, l1idx, l2idx, l2next = 0;
5117
5118 /*
5119 * Initialise the kernel pmap object
5120 */
5121 pm->pm_l1 = l1;
5122 pm->pm_domain = PMAP_DOMAIN_KERNEL;
5123 pm->pm_activated = true;
5124 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
5125
5126 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
5127 uvm_obj_init(&pm->pm_obj, NULL, false, 1);
5128 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
5129
5130 /*
5131 * Scan the L1 translation table created by initarm() and create
5132 * the required metadata for all valid mappings found in it.
5133 */
5134 for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
5135 pde = l1pt[l1idx];
5136
5137 /*
5138 * We're only interested in Coarse mappings.
5139 * pmap_extract() can deal with section mappings without
5140 * recourse to checking L2 metadata.
5141 */
5142 if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
5143 continue;
5144
5145 /*
5146 * Lookup the KVA of this L2 descriptor table
5147 */
5148 pa = (paddr_t)(pde & L1_C_ADDR_MASK);
5149 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
5150 if (ptep == NULL) {
5151 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
5152 (u_int)l1idx << L1_S_SHIFT, pa);
5153 }
5154
5155 /*
5156 * Fetch the associated L2 metadata structure.
5157 * Allocate a new one if necessary.
5158 */
5159 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
5160 if (l2next == PMAP_STATIC_L2_SIZE)
5161 panic("pmap_bootstrap: out of static L2s");
5162 pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++];
5163 }
5164
5165 /*
5166 * One more L1 slot tracked...
5167 */
5168 l2->l2_occupancy++;
5169
5170 /*
5171 * Fill in the details of the L2 descriptor in the
5172 * appropriate bucket.
5173 */
5174 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
5175 l2b->l2b_kva = ptep;
5176 l2b->l2b_phys = pa;
5177 l2b->l2b_l1idx = l1idx;
5178
5179 /*
5180 * Establish an initial occupancy count for this descriptor
5181 */
5182 for (l2idx = 0;
5183 l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
5184 l2idx++) {
5185 if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
5186 l2b->l2b_occupancy++;
5187 }
5188 }
5189
5190 /*
5191 * Make sure the descriptor itself has the correct cache mode.
5192 * If not, fix it, but whine about the problem. Port-meisters
5193 * should consider this a clue to fix up their initarm()
5194 * function. :)
5195 */
5196 if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) {
5197 printf("pmap_bootstrap: WARNING! wrong cache mode for "
5198 "L2 pte @ %p\n", ptep);
5199 }
5200 }
5201
5202 /*
5203 * Ensure the primary (kernel) L1 has the correct cache mode for
5204 * a page table. Bitch if it is not correctly set.
5205 */
5206 for (va = (vaddr_t)l1pt;
5207 va < ((vaddr_t)l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
5208 if (pmap_set_pt_cache_mode(l1pt, va))
5209 printf("pmap_bootstrap: WARNING! wrong cache mode for "
5210 "primary L1 @ 0x%lx\n", va);
5211 }
5212
5213 cpu_dcache_wbinv_all();
5214 cpu_tlb_flushID();
5215 cpu_cpwait();
5216
5217 /*
5218 * now we allocate the "special" VAs which are used for tmp mappings
5219 * by the pmap (and other modules). we allocate the VAs by advancing
5220 * virtual_avail (note that there are no pages mapped at these VAs).
5221 *
5222 * Managed KVM space start from wherever initarm() tells us.
5223 */
5224 virtual_avail = vstart;
5225 virtual_end = vend;
5226
5227 #ifdef PMAP_CACHE_VIPT
5228 /*
5229 * If we have a VIPT cache, we need one page/pte per possible alias
5230 * page so we won't violate cache aliasing rules.
5231 */
5232 virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask;
5233 nptes = (arm_cache_prefer_mask >> PGSHIFT) + 1;
5234 #else
5235 nptes = 1;
5236 #endif
5237 pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte);
5238 pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte);
5239 pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte);
5240 pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte);
5241 pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL);
5242 pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
5243 (void *)&msgbufaddr, NULL);
5244
5245 /*
5246 * Allocate a range of kernel virtual address space to be used
5247 * for L2 descriptor tables and metadata allocation in
5248 * pmap_growkernel().
5249 */
5250 size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
5251 pmap_alloc_specials(&virtual_avail,
5252 round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
5253 &pmap_kernel_l2ptp_kva, NULL);
5254
5255 size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
5256 pmap_alloc_specials(&virtual_avail,
5257 round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
5258 &pmap_kernel_l2dtable_kva, NULL);
5259
5260 /*
5261 * init the static-global locks and global pmap list.
5262 */
5263 mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM);
5264
5265 /*
5266 * We can now initialise the first L1's metadata.
5267 */
5268 SLIST_INIT(&l1_list);
5269 TAILQ_INIT(&l1_lru_list);
5270 pmap_init_l1(l1, l1pt);
5271
5272 /* Set up vector page L1 details, if necessary */
5273 if (vector_page < KERNEL_BASE) {
5274 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
5275 l2b = pmap_get_l2_bucket(pm, vector_page);
5276 KDASSERT(l2b != NULL);
5277 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
5278 L1_C_DOM(pm->pm_domain);
5279 } else
5280 pm->pm_pl1vec = NULL;
5281
5282 /*
5283 * Initialize the pmap cache
5284 */
5285 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
5286 "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL);
5287 LIST_INIT(&pmap_pmaps);
5288 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
5289
5290 /*
5291 * Initialize the pv pool.
5292 */
5293 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl",
5294 &pmap_bootstrap_pv_allocator, IPL_NONE);
5295
5296 /*
5297 * Initialize the L2 dtable pool and cache.
5298 */
5299 pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0,
5300 0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL);
5301
5302 /*
5303 * Initialise the L2 descriptor table pool and cache
5304 */
5305 pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 0,
5306 L2_TABLE_SIZE_REAL, 0, "l2ptppl", NULL, IPL_NONE,
5307 pmap_l2ptp_ctor, NULL, NULL);
5308
5309 cpu_dcache_wbinv_all();
5310 }
5311
5312 static int
5313 pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va)
5314 {
5315 pd_entry_t *pdep, pde;
5316 pt_entry_t *ptep, pte;
5317 vaddr_t pa;
5318 int rv = 0;
5319
5320 /*
5321 * Make sure the descriptor itself has the correct cache mode
5322 */
5323 pdep = &kl1[L1_IDX(va)];
5324 pde = *pdep;
5325
5326 if (l1pte_section_p(pde)) {
5327 __CTASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0);
5328 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
5329 *pdep = (pde & ~L1_S_CACHE_MASK) |
5330 pte_l1_s_cache_mode_pt;
5331 PTE_SYNC(pdep);
5332 cpu_dcache_wbinv_range((vaddr_t)pdep, sizeof(*pdep));
5333 rv = 1;
5334 }
5335 } else {
5336 pa = (paddr_t)(pde & L1_C_ADDR_MASK);
5337 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
5338 if (ptep == NULL)
5339 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
5340
5341 ptep = &ptep[l2pte_index(va)];
5342 pte = *ptep;
5343 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
5344 *ptep = (pte & ~L2_S_CACHE_MASK) |
5345 pte_l2_s_cache_mode_pt;
5346 PTE_SYNC(ptep);
5347 cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep));
5348 rv = 1;
5349 }
5350 }
5351
5352 return (rv);
5353 }
5354
5355 static void
5356 pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep)
5357 {
5358 vaddr_t va = *availp;
5359 struct l2_bucket *l2b;
5360
5361 if (ptep) {
5362 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
5363 if (l2b == NULL)
5364 panic("pmap_alloc_specials: no l2b for 0x%lx", va);
5365
5366 if (ptep)
5367 *ptep = &l2b->l2b_kva[l2pte_index(va)];
5368 }
5369
5370 *vap = va;
5371 *availp = va + (PAGE_SIZE * pages);
5372 }
5373
5374 void
5375 pmap_init(void)
5376 {
5377
5378 /*
5379 * Set the available memory vars - These do not map to real memory
5380 * addresses and cannot as the physical memory is fragmented.
5381 * They are used by ps for %mem calculations.
5382 * One could argue whether this should be the entire memory or just
5383 * the memory that is useable in a user process.
5384 */
5385 avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
5386 avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
5387
5388 /*
5389 * Now we need to free enough pv_entry structures to allow us to get
5390 * the kmem_map/kmem_object allocated and inited (done after this
5391 * function is finished). to do this we allocate one bootstrap page out
5392 * of kernel_map and use it to provide an initial pool of pv_entry
5393 * structures. we never free this page.
5394 */
5395 pool_setlowat(&pmap_pv_pool,
5396 (PAGE_SIZE / sizeof(struct pv_entry)) * 2);
5397
5398 mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE);
5399 zeropage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
5400 UVM_KMF_WIRED|UVM_KMF_ZERO);
5401
5402 pmap_initialized = true;
5403 }
5404
5405 static vaddr_t last_bootstrap_page = 0;
5406 static void *free_bootstrap_pages = NULL;
5407
5408 static void *
5409 pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags)
5410 {
5411 extern void *pool_page_alloc(struct pool *, int);
5412 vaddr_t new_page;
5413 void *rv;
5414
5415 if (pmap_initialized)
5416 return (pool_page_alloc(pp, flags));
5417
5418 if (free_bootstrap_pages) {
5419 rv = free_bootstrap_pages;
5420 free_bootstrap_pages = *((void **)rv);
5421 return (rv);
5422 }
5423
5424 new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
5425 UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT));
5426
5427 KASSERT(new_page > last_bootstrap_page);
5428 last_bootstrap_page = new_page;
5429 return ((void *)new_page);
5430 }
5431
5432 static void
5433 pmap_bootstrap_pv_page_free(struct pool *pp, void *v)
5434 {
5435 extern void pool_page_free(struct pool *, void *);
5436
5437 if ((vaddr_t)v <= last_bootstrap_page) {
5438 *((void **)v) = free_bootstrap_pages;
5439 free_bootstrap_pages = v;
5440 return;
5441 }
5442
5443 if (pmap_initialized) {
5444 pool_page_free(pp, v);
5445 return;
5446 }
5447 }
5448
5449 /*
5450 * pmap_postinit()
5451 *
5452 * This routine is called after the vm and kmem subsystems have been
5453 * initialised. This allows the pmap code to perform any initialisation
5454 * that can only be done one the memory allocation is in place.
5455 */
5456 void
5457 pmap_postinit(void)
5458 {
5459 extern paddr_t physical_start, physical_end;
5460 struct l2_bucket *l2b;
5461 struct l1_ttable *l1;
5462 struct pglist plist;
5463 struct vm_page *m;
5464 pd_entry_t *pl1pt;
5465 pt_entry_t *ptep, pte;
5466 vaddr_t va, eva;
5467 u_int loop, needed;
5468 int error;
5469
5470 pool_cache_setlowat(&pmap_l2ptp_cache,
5471 (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4);
5472 pool_cache_setlowat(&pmap_l2dtable_cache,
5473 (PAGE_SIZE / sizeof(struct l2_dtable)) * 2);
5474
5475 needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
5476 needed -= 1;
5477
5478 l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP);
5479
5480 for (loop = 0; loop < needed; loop++, l1++) {
5481 /* Allocate a L1 page table */
5482 va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY);
5483 if (va == 0)
5484 panic("Cannot allocate L1 KVM");
5485
5486 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
5487 physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1);
5488 if (error)
5489 panic("Cannot allocate L1 physical pages");
5490
5491 m = TAILQ_FIRST(&plist);
5492 eva = va + L1_TABLE_SIZE;
5493 pl1pt = (pd_entry_t *)va;
5494
5495 while (m && va < eva) {
5496 paddr_t pa = VM_PAGE_TO_PHYS(m);
5497
5498 pmap_kenter_pa(va, pa,
5499 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
5500
5501 /*
5502 * Make sure the L1 descriptor table is mapped
5503 * with the cache-mode set to write-through.
5504 */
5505 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
5506 KDASSERT(l2b != NULL);
5507 ptep = &l2b->l2b_kva[l2pte_index(va)];
5508 pte = *ptep;
5509 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
5510 *ptep = pte;
5511 PTE_SYNC(ptep);
5512 cpu_tlb_flushD_SE(va);
5513
5514 va += PAGE_SIZE;
5515 m = TAILQ_NEXT(m, pageq.queue);
5516 }
5517
5518 #ifdef DIAGNOSTIC
5519 if (m)
5520 panic("pmap_alloc_l1pt: pglist not empty");
5521 #endif /* DIAGNOSTIC */
5522
5523 pmap_init_l1(l1, pl1pt);
5524 }
5525
5526 #ifdef DEBUG
5527 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
5528 needed);
5529 #endif
5530 }
5531
5532 /*
5533 * Note that the following routines are used by board-specific initialisation
5534 * code to configure the initial kernel page tables.
5535 *
5536 * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that
5537 * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the
5538 * behaviour of the old pmap, and provides an easy migration path for
5539 * initial bring-up of the new pmap on existing ports. Fortunately,
5540 * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and
5541 * will be deprecated.
5542 *
5543 * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page
5544 * tables.
5545 */
5546
5547 /*
5548 * This list exists for the benefit of pmap_map_chunk(). It keeps track
5549 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
5550 * find them as necessary.
5551 *
5552 * Note that the data on this list MUST remain valid after initarm() returns,
5553 * as pmap_bootstrap() uses it to contruct L2 table metadata.
5554 */
5555 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
5556
5557 static vaddr_t
5558 kernel_pt_lookup(paddr_t pa)
5559 {
5560 pv_addr_t *pv;
5561
5562 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
5563 #ifndef ARM32_NEW_VM_LAYOUT
5564 if (pv->pv_pa == (pa & ~PGOFSET))
5565 return (pv->pv_va | (pa & PGOFSET));
5566 #else
5567 if (pv->pv_pa == pa)
5568 return (pv->pv_va);
5569 #endif
5570 }
5571 return (0);
5572 }
5573
5574 /*
5575 * pmap_map_section:
5576 *
5577 * Create a single section mapping.
5578 */
5579 void
5580 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
5581 {
5582 pd_entry_t *pde = (pd_entry_t *) l1pt;
5583 pd_entry_t fl;
5584
5585 KASSERT(((va | pa) & L1_S_OFFSET) == 0);
5586
5587 switch (cache) {
5588 case PTE_NOCACHE:
5589 default:
5590 fl = 0;
5591 break;
5592
5593 case PTE_CACHE:
5594 fl = pte_l1_s_cache_mode;
5595 break;
5596
5597 case PTE_PAGETABLE:
5598 fl = pte_l1_s_cache_mode_pt;
5599 break;
5600 }
5601
5602 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
5603 L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
5604 PTE_SYNC(&pde[va >> L1_S_SHIFT]);
5605 }
5606
5607 /*
5608 * pmap_map_entry:
5609 *
5610 * Create a single page mapping.
5611 */
5612 void
5613 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
5614 {
5615 pd_entry_t *pde = (pd_entry_t *) l1pt;
5616 pt_entry_t fl;
5617 pt_entry_t *pte;
5618
5619 KASSERT(((va | pa) & PGOFSET) == 0);
5620
5621 switch (cache) {
5622 case PTE_NOCACHE:
5623 default:
5624 fl = 0;
5625 break;
5626
5627 case PTE_CACHE:
5628 fl = pte_l2_s_cache_mode;
5629 break;
5630
5631 case PTE_PAGETABLE:
5632 fl = pte_l2_s_cache_mode_pt;
5633 break;
5634 }
5635
5636 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
5637 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
5638
5639 #ifndef ARM32_NEW_VM_LAYOUT
5640 pte = (pt_entry_t *)
5641 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
5642 #else
5643 pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
5644 #endif
5645 if (pte == NULL)
5646 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
5647
5648 fl |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot);
5649 #ifndef ARM32_NEW_VM_LAYOUT
5650 pte += (va >> PGSHIFT) & 0x3ff;
5651 #else
5652 pte += l2pte_index(va);
5653 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
5654 #endif
5655 *pte = fl;
5656 PTE_SYNC(pte);
5657 }
5658
5659 /*
5660 * pmap_link_l2pt:
5661 *
5662 * Link the L2 page table specified by "l2pv" into the L1
5663 * page table at the slot for "va".
5664 */
5665 void
5666 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
5667 {
5668 pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
5669 u_int slot = va >> L1_S_SHIFT;
5670
5671 #ifndef ARM32_NEW_VM_LAYOUT
5672 KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0);
5673 KASSERT((l2pv->pv_pa & PGOFSET) == 0);
5674 #endif
5675
5676 proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
5677
5678 pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
5679 #ifdef ARM32_NEW_VM_LAYOUT
5680 PTE_SYNC(&pde[slot]);
5681 #else
5682 pde[slot + 1] = proto | (l2pv->pv_pa + 0x400);
5683 pde[slot + 2] = proto | (l2pv->pv_pa + 0x800);
5684 pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00);
5685 PTE_SYNC_RANGE(&pde[slot + 0], 4);
5686 #endif
5687
5688 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
5689 }
5690
5691 /*
5692 * pmap_map_chunk:
5693 *
5694 * Map a chunk of memory using the most efficient mappings
5695 * possible (section, large page, small page) into the
5696 * provided L1 and L2 tables at the specified virtual address.
5697 */
5698 vsize_t
5699 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
5700 int prot, int cache)
5701 {
5702 pd_entry_t *pdep = (pd_entry_t *) l1pt;
5703 pt_entry_t *pte, f1, f2s, f2l;
5704 vsize_t resid;
5705 int i;
5706
5707 resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5708
5709 if (l1pt == 0)
5710 panic("pmap_map_chunk: no L1 table provided");
5711
5712 #ifdef VERBOSE_INIT_ARM
5713 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
5714 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
5715 #endif
5716
5717 switch (cache) {
5718 case PTE_NOCACHE:
5719 default:
5720 f1 = 0;
5721 f2l = 0;
5722 f2s = 0;
5723 break;
5724
5725 case PTE_CACHE:
5726 f1 = pte_l1_s_cache_mode;
5727 f2l = pte_l2_l_cache_mode;
5728 f2s = pte_l2_s_cache_mode;
5729 break;
5730
5731 case PTE_PAGETABLE:
5732 f1 = pte_l1_s_cache_mode_pt;
5733 f2l = pte_l2_l_cache_mode_pt;
5734 f2s = pte_l2_s_cache_mode_pt;
5735 break;
5736 }
5737
5738 size = resid;
5739
5740 while (resid > 0) {
5741 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
5742 /* See if we can use a supersection mapping. */
5743 if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) {
5744 /* Supersection are always domain 0 */
5745 pd_entry_t pde = L1_SS_PROTO | pa |
5746 L1_S_PROT(PTE_KERNEL, prot) | f1;
5747 #ifdef VERBOSE_INIT_ARM
5748 printf("sS");
5749 #endif
5750 for (size_t s = va >> L1_S_SHIFT,
5751 e = s + L1_SS_SIZE / L1_S_SIZE;
5752 s < e;
5753 s++) {
5754 pdep[s] = pde;
5755 PTE_SYNC(&pdep[s]);
5756 }
5757 va += L1_SS_SIZE;
5758 pa += L1_SS_SIZE;
5759 resid -= L1_SS_SIZE;
5760 continue;
5761 }
5762 #endif
5763 /* See if we can use a section mapping. */
5764 if (L1_S_MAPPABLE_P(va, pa, resid)) {
5765 #ifdef VERBOSE_INIT_ARM
5766 printf("S");
5767 #endif
5768 pdep[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
5769 L1_S_PROT(PTE_KERNEL, prot) | f1 |
5770 L1_S_DOM(PMAP_DOMAIN_KERNEL);
5771 PTE_SYNC(&pdep[va >> L1_S_SHIFT]);
5772 va += L1_S_SIZE;
5773 pa += L1_S_SIZE;
5774 resid -= L1_S_SIZE;
5775 continue;
5776 }
5777
5778 /*
5779 * Ok, we're going to use an L2 table. Make sure
5780 * one is actually in the corresponding L1 slot
5781 * for the current VA.
5782 */
5783 if ((pdep[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
5784 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
5785
5786 #ifndef ARM32_NEW_VM_LAYOUT
5787 pte = (pt_entry_t *)
5788 kernel_pt_lookup(pdep[va >> L1_S_SHIFT] & L2_S_FRAME);
5789 #else
5790 pte = (pt_entry_t *) kernel_pt_lookup(
5791 pdep[L1_IDX(va)] & L1_C_ADDR_MASK);
5792 #endif
5793 if (pte == NULL)
5794 panic("pmap_map_chunk: can't find L2 table for VA"
5795 "0x%08lx", va);
5796
5797 /* See if we can use a L2 large page mapping. */
5798 if (L2_L_MAPPABLE_P(va, pa, resid)) {
5799 #ifdef VERBOSE_INIT_ARM
5800 printf("L");
5801 #endif
5802 for (i = 0; i < 16; i++) {
5803 #ifndef ARM32_NEW_VM_LAYOUT
5804 pte[((va >> PGSHIFT) & 0x3f0) + i] =
5805 L2_L_PROTO | pa |
5806 L2_L_PROT(PTE_KERNEL, prot) | f2l;
5807 PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]);
5808 #else
5809 pte[l2pte_index(va) + i] =
5810 L2_L_PROTO | pa |
5811 L2_L_PROT(PTE_KERNEL, prot) | f2l;
5812 PTE_SYNC(&pte[l2pte_index(va) + i]);
5813 #endif
5814 }
5815 va += L2_L_SIZE;
5816 pa += L2_L_SIZE;
5817 resid -= L2_L_SIZE;
5818 continue;
5819 }
5820
5821 /* Use a small page mapping. */
5822 #ifdef VERBOSE_INIT_ARM
5823 printf("P");
5824 #endif
5825 #ifndef ARM32_NEW_VM_LAYOUT
5826 pte[(va >> PGSHIFT) & 0x3ff] =
5827 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
5828 PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]);
5829 #else
5830 pte[l2pte_index(va)] =
5831 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
5832 PTE_SYNC(&pte[l2pte_index(va)]);
5833 #endif
5834 va += PAGE_SIZE;
5835 pa += PAGE_SIZE;
5836 resid -= PAGE_SIZE;
5837 }
5838 #ifdef VERBOSE_INIT_ARM
5839 printf("\n");
5840 #endif
5841 return (size);
5842 }
5843
5844 /********************** Static device map routines ***************************/
5845
5846 static const struct pmap_devmap *pmap_devmap_table;
5847
5848 /*
5849 * Register the devmap table. This is provided in case early console
5850 * initialization needs to register mappings created by bootstrap code
5851 * before pmap_devmap_bootstrap() is called.
5852 */
5853 void
5854 pmap_devmap_register(const struct pmap_devmap *table)
5855 {
5856
5857 pmap_devmap_table = table;
5858 }
5859
5860 /*
5861 * Map all of the static regions in the devmap table, and remember
5862 * the devmap table so other parts of the kernel can look up entries
5863 * later.
5864 */
5865 void
5866 pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table)
5867 {
5868 int i;
5869
5870 pmap_devmap_table = table;
5871
5872 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
5873 #ifdef VERBOSE_INIT_ARM
5874 printf("devmap: %08lx -> %08lx @ %08lx\n",
5875 pmap_devmap_table[i].pd_pa,
5876 pmap_devmap_table[i].pd_pa +
5877 pmap_devmap_table[i].pd_size - 1,
5878 pmap_devmap_table[i].pd_va);
5879 #endif
5880 pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
5881 pmap_devmap_table[i].pd_pa,
5882 pmap_devmap_table[i].pd_size,
5883 pmap_devmap_table[i].pd_prot,
5884 pmap_devmap_table[i].pd_cache);
5885 }
5886 }
5887
5888 const struct pmap_devmap *
5889 pmap_devmap_find_pa(paddr_t pa, psize_t size)
5890 {
5891 uint64_t endpa;
5892 int i;
5893
5894 if (pmap_devmap_table == NULL)
5895 return (NULL);
5896
5897 endpa = (uint64_t)pa + (uint64_t)(size - 1);
5898
5899 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
5900 if (pa >= pmap_devmap_table[i].pd_pa &&
5901 endpa <= (uint64_t)pmap_devmap_table[i].pd_pa +
5902 (uint64_t)(pmap_devmap_table[i].pd_size - 1))
5903 return (&pmap_devmap_table[i]);
5904 }
5905
5906 return (NULL);
5907 }
5908
5909 const struct pmap_devmap *
5910 pmap_devmap_find_va(vaddr_t va, vsize_t size)
5911 {
5912 int i;
5913
5914 if (pmap_devmap_table == NULL)
5915 return (NULL);
5916
5917 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
5918 if (va >= pmap_devmap_table[i].pd_va &&
5919 va + size - 1 <= pmap_devmap_table[i].pd_va +
5920 pmap_devmap_table[i].pd_size - 1)
5921 return (&pmap_devmap_table[i]);
5922 }
5923
5924 return (NULL);
5925 }
5926
5927 /********************** PTE initialization routines **************************/
5928
5929 /*
5930 * These routines are called when the CPU type is identified to set up
5931 * the PTE prototypes, cache modes, etc.
5932 *
5933 * The variables are always here, just in case modules need to reference
5934 * them (though, they shouldn't).
5935 */
5936
5937 pt_entry_t pte_l1_s_cache_mode;
5938 pt_entry_t pte_l1_s_wc_mode;
5939 pt_entry_t pte_l1_s_cache_mode_pt;
5940 pt_entry_t pte_l1_s_cache_mask;
5941
5942 pt_entry_t pte_l2_l_cache_mode;
5943 pt_entry_t pte_l2_l_wc_mode;
5944 pt_entry_t pte_l2_l_cache_mode_pt;
5945 pt_entry_t pte_l2_l_cache_mask;
5946
5947 pt_entry_t pte_l2_s_cache_mode;
5948 pt_entry_t pte_l2_s_wc_mode;
5949 pt_entry_t pte_l2_s_cache_mode_pt;
5950 pt_entry_t pte_l2_s_cache_mask;
5951
5952 pt_entry_t pte_l1_s_prot_u;
5953 pt_entry_t pte_l1_s_prot_w;
5954 pt_entry_t pte_l1_s_prot_ro;
5955 pt_entry_t pte_l1_s_prot_mask;
5956
5957 pt_entry_t pte_l2_s_prot_u;
5958 pt_entry_t pte_l2_s_prot_w;
5959 pt_entry_t pte_l2_s_prot_ro;
5960 pt_entry_t pte_l2_s_prot_mask;
5961
5962 pt_entry_t pte_l2_l_prot_u;
5963 pt_entry_t pte_l2_l_prot_w;
5964 pt_entry_t pte_l2_l_prot_ro;
5965 pt_entry_t pte_l2_l_prot_mask;
5966
5967 pt_entry_t pte_l1_ss_proto;
5968 pt_entry_t pte_l1_s_proto;
5969 pt_entry_t pte_l1_c_proto;
5970 pt_entry_t pte_l2_s_proto;
5971
5972 void (*pmap_copy_page_func)(paddr_t, paddr_t);
5973 void (*pmap_zero_page_func)(paddr_t);
5974
5975 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
5976 void
5977 pmap_pte_init_generic(void)
5978 {
5979
5980 pte_l1_s_cache_mode = L1_S_B|L1_S_C;
5981 pte_l1_s_wc_mode = L1_S_B;
5982 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
5983
5984 pte_l2_l_cache_mode = L2_B|L2_C;
5985 pte_l2_l_wc_mode = L2_B;
5986 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
5987
5988 pte_l2_s_cache_mode = L2_B|L2_C;
5989 pte_l2_s_wc_mode = L2_B;
5990 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
5991
5992 /*
5993 * If we have a write-through cache, set B and C. If
5994 * we have a write-back cache, then we assume setting
5995 * only C will make those pages write-through (except for those
5996 * Cortex CPUs which can read the L1 caches).
5997 */
5998 if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop
5999 #if ARM_MMU_V7 > 0
6000 || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)
6001 #endif
6002 #if ARM_MMU_V6 > 0
6003 || CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid) /* arm116 errata 399234 */
6004 #endif
6005 || false) {
6006 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
6007 pte_l2_l_cache_mode_pt = L2_B|L2_C;
6008 pte_l2_s_cache_mode_pt = L2_B|L2_C;
6009 } else {
6010 pte_l1_s_cache_mode_pt = L1_S_C; /* write through */
6011 pte_l2_l_cache_mode_pt = L2_C; /* write through */
6012 pte_l2_s_cache_mode_pt = L2_C; /* write through */
6013 }
6014
6015 pte_l1_s_prot_u = L1_S_PROT_U_generic;
6016 pte_l1_s_prot_w = L1_S_PROT_W_generic;
6017 pte_l1_s_prot_ro = L1_S_PROT_RO_generic;
6018 pte_l1_s_prot_mask = L1_S_PROT_MASK_generic;
6019
6020 pte_l2_s_prot_u = L2_S_PROT_U_generic;
6021 pte_l2_s_prot_w = L2_S_PROT_W_generic;
6022 pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
6023 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
6024
6025 pte_l2_l_prot_u = L2_L_PROT_U_generic;
6026 pte_l2_l_prot_w = L2_L_PROT_W_generic;
6027 pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
6028 pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
6029
6030 pte_l1_ss_proto = L1_SS_PROTO_generic;
6031 pte_l1_s_proto = L1_S_PROTO_generic;
6032 pte_l1_c_proto = L1_C_PROTO_generic;
6033 pte_l2_s_proto = L2_S_PROTO_generic;
6034
6035 pmap_copy_page_func = pmap_copy_page_generic;
6036 pmap_zero_page_func = pmap_zero_page_generic;
6037 }
6038
6039 #if defined(CPU_ARM8)
6040 void
6041 pmap_pte_init_arm8(void)
6042 {
6043
6044 /*
6045 * ARM8 is compatible with generic, but we need to use
6046 * the page tables uncached.
6047 */
6048 pmap_pte_init_generic();
6049
6050 pte_l1_s_cache_mode_pt = 0;
6051 pte_l2_l_cache_mode_pt = 0;
6052 pte_l2_s_cache_mode_pt = 0;
6053 }
6054 #endif /* CPU_ARM8 */
6055
6056 #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
6057 void
6058 pmap_pte_init_arm9(void)
6059 {
6060
6061 /*
6062 * ARM9 is compatible with generic, but we want to use
6063 * write-through caching for now.
6064 */
6065 pmap_pte_init_generic();
6066
6067 pte_l1_s_cache_mode = L1_S_C;
6068 pte_l2_l_cache_mode = L2_C;
6069 pte_l2_s_cache_mode = L2_C;
6070
6071 pte_l1_s_wc_mode = L1_S_B;
6072 pte_l2_l_wc_mode = L2_B;
6073 pte_l2_s_wc_mode = L2_B;
6074
6075 pte_l1_s_cache_mode_pt = L1_S_C;
6076 pte_l2_l_cache_mode_pt = L2_C;
6077 pte_l2_s_cache_mode_pt = L2_C;
6078 }
6079 #endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */
6080 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
6081
6082 #if defined(CPU_ARM10)
6083 void
6084 pmap_pte_init_arm10(void)
6085 {
6086
6087 /*
6088 * ARM10 is compatible with generic, but we want to use
6089 * write-through caching for now.
6090 */
6091 pmap_pte_init_generic();
6092
6093 pte_l1_s_cache_mode = L1_S_B | L1_S_C;
6094 pte_l2_l_cache_mode = L2_B | L2_C;
6095 pte_l2_s_cache_mode = L2_B | L2_C;
6096
6097 pte_l1_s_cache_mode = L1_S_B;
6098 pte_l2_l_cache_mode = L2_B;
6099 pte_l2_s_cache_mode = L2_B;
6100
6101 pte_l1_s_cache_mode_pt = L1_S_C;
6102 pte_l2_l_cache_mode_pt = L2_C;
6103 pte_l2_s_cache_mode_pt = L2_C;
6104
6105 }
6106 #endif /* CPU_ARM10 */
6107
6108 #if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH)
6109 void
6110 pmap_pte_init_arm11(void)
6111 {
6112
6113 /*
6114 * ARM11 is compatible with generic, but we want to use
6115 * write-through caching for now.
6116 */
6117 pmap_pte_init_generic();
6118
6119 pte_l1_s_cache_mode = L1_S_C;
6120 pte_l2_l_cache_mode = L2_C;
6121 pte_l2_s_cache_mode = L2_C;
6122
6123 pte_l1_s_wc_mode = L1_S_B;
6124 pte_l2_l_wc_mode = L2_B;
6125 pte_l2_s_wc_mode = L2_B;
6126
6127 pte_l1_s_cache_mode_pt = L1_S_C;
6128 pte_l2_l_cache_mode_pt = L2_C;
6129 pte_l2_s_cache_mode_pt = L2_C;
6130 }
6131 #endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */
6132
6133 #if ARM_MMU_SA1 == 1
6134 void
6135 pmap_pte_init_sa1(void)
6136 {
6137
6138 /*
6139 * The StrongARM SA-1 cache does not have a write-through
6140 * mode. So, do the generic initialization, then reset
6141 * the page table cache mode to B=1,C=1, and note that
6142 * the PTEs need to be sync'd.
6143 */
6144 pmap_pte_init_generic();
6145
6146 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
6147 pte_l2_l_cache_mode_pt = L2_B|L2_C;
6148 pte_l2_s_cache_mode_pt = L2_B|L2_C;
6149
6150 pmap_needs_pte_sync = 1;
6151 }
6152 #endif /* ARM_MMU_SA1 == 1*/
6153
6154 #if ARM_MMU_XSCALE == 1
6155 #if (ARM_NMMUS > 1)
6156 static u_int xscale_use_minidata;
6157 #endif
6158
6159 void
6160 pmap_pte_init_xscale(void)
6161 {
6162 uint32_t auxctl;
6163 int write_through = 0;
6164
6165 pte_l1_s_cache_mode = L1_S_B|L1_S_C;
6166 pte_l1_s_wc_mode = L1_S_B;
6167 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
6168
6169 pte_l2_l_cache_mode = L2_B|L2_C;
6170 pte_l2_l_wc_mode = L2_B;
6171 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
6172
6173 pte_l2_s_cache_mode = L2_B|L2_C;
6174 pte_l2_s_wc_mode = L2_B;
6175 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
6176
6177 pte_l1_s_cache_mode_pt = L1_S_C;
6178 pte_l2_l_cache_mode_pt = L2_C;
6179 pte_l2_s_cache_mode_pt = L2_C;
6180
6181 #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
6182 /*
6183 * The XScale core has an enhanced mode where writes that
6184 * miss the cache cause a cache line to be allocated. This
6185 * is significantly faster than the traditional, write-through
6186 * behavior of this case.
6187 */
6188 pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X);
6189 pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X);
6190 pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X);
6191 #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
6192
6193 #ifdef XSCALE_CACHE_WRITE_THROUGH
6194 /*
6195 * Some versions of the XScale core have various bugs in
6196 * their cache units, the work-around for which is to run
6197 * the cache in write-through mode. Unfortunately, this
6198 * has a major (negative) impact on performance. So, we
6199 * go ahead and run fast-and-loose, in the hopes that we
6200 * don't line up the planets in a way that will trip the
6201 * bugs.
6202 *
6203 * However, we give you the option to be slow-but-correct.
6204 */
6205 write_through = 1;
6206 #elif defined(XSCALE_CACHE_WRITE_BACK)
6207 /* force write back cache mode */
6208 write_through = 0;
6209 #elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
6210 /*
6211 * Intel PXA2[15]0 processors are known to have a bug in
6212 * write-back cache on revision 4 and earlier (stepping
6213 * A[01] and B[012]). Fixed for C0 and later.
6214 */
6215 {
6216 uint32_t id, type;
6217
6218 id = cpufunc_id();
6219 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
6220
6221 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
6222 if ((id & CPU_ID_REVISION_MASK) < 5) {
6223 /* write through for stepping A0-1 and B0-2 */
6224 write_through = 1;
6225 }
6226 }
6227 }
6228 #endif /* XSCALE_CACHE_WRITE_THROUGH */
6229
6230 if (write_through) {
6231 pte_l1_s_cache_mode = L1_S_C;
6232 pte_l2_l_cache_mode = L2_C;
6233 pte_l2_s_cache_mode = L2_C;
6234 }
6235
6236 #if (ARM_NMMUS > 1)
6237 xscale_use_minidata = 1;
6238 #endif
6239
6240 pte_l1_s_prot_u = L1_S_PROT_U_xscale;
6241 pte_l1_s_prot_w = L1_S_PROT_W_xscale;
6242 pte_l1_s_prot_ro = L1_S_PROT_RO_xscale;
6243 pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale;
6244
6245 pte_l2_s_prot_u = L2_S_PROT_U_xscale;
6246 pte_l2_s_prot_w = L2_S_PROT_W_xscale;
6247 pte_l2_s_prot_ro = L2_S_PROT_RO_xscale;
6248 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
6249
6250 pte_l2_l_prot_u = L2_L_PROT_U_xscale;
6251 pte_l2_l_prot_w = L2_L_PROT_W_xscale;
6252 pte_l2_l_prot_ro = L2_L_PROT_RO_xscale;
6253 pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale;
6254
6255 pte_l1_ss_proto = L1_SS_PROTO_xscale;
6256 pte_l1_s_proto = L1_S_PROTO_xscale;
6257 pte_l1_c_proto = L1_C_PROTO_xscale;
6258 pte_l2_s_proto = L2_S_PROTO_xscale;
6259
6260 pmap_copy_page_func = pmap_copy_page_xscale;
6261 pmap_zero_page_func = pmap_zero_page_xscale;
6262
6263 /*
6264 * Disable ECC protection of page table access, for now.
6265 */
6266 __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
6267 auxctl &= ~XSCALE_AUXCTL_P;
6268 __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
6269 }
6270
6271 /*
6272 * xscale_setup_minidata:
6273 *
6274 * Set up the mini-data cache clean area. We require the
6275 * caller to allocate the right amount of physically and
6276 * virtually contiguous space.
6277 */
6278 void
6279 xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
6280 {
6281 extern vaddr_t xscale_minidata_clean_addr;
6282 extern vsize_t xscale_minidata_clean_size; /* already initialized */
6283 pd_entry_t *pde = (pd_entry_t *) l1pt;
6284 pt_entry_t *pte;
6285 vsize_t size;
6286 uint32_t auxctl;
6287
6288 xscale_minidata_clean_addr = va;
6289
6290 /* Round it to page size. */
6291 size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
6292
6293 for (; size != 0;
6294 va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
6295 #ifndef ARM32_NEW_VM_LAYOUT
6296 pte = (pt_entry_t *)
6297 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
6298 #else
6299 pte = (pt_entry_t *) kernel_pt_lookup(
6300 pde[L1_IDX(va)] & L1_C_ADDR_MASK);
6301 #endif
6302 if (pte == NULL)
6303 panic("xscale_setup_minidata: can't find L2 table for "
6304 "VA 0x%08lx", va);
6305 #ifndef ARM32_NEW_VM_LAYOUT
6306 pte[(va >> PGSHIFT) & 0x3ff] =
6307 #else
6308 pte[l2pte_index(va)] =
6309 #endif
6310 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
6311 L2_C | L2_XS_T_TEX(TEX_XSCALE_X);
6312 }
6313
6314 /*
6315 * Configure the mini-data cache for write-back with
6316 * read/write-allocate.
6317 *
6318 * NOTE: In order to reconfigure the mini-data cache, we must
6319 * make sure it contains no valid data! In order to do that,
6320 * we must issue a global data cache invalidate command!
6321 *
6322 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
6323 * THIS IS VERY IMPORTANT!
6324 */
6325
6326 /* Invalidate data and mini-data. */
6327 __asm volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
6328 __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
6329 auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
6330 __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
6331 }
6332
6333 /*
6334 * Change the PTEs for the specified kernel mappings such that they
6335 * will use the mini data cache instead of the main data cache.
6336 */
6337 void
6338 pmap_uarea(vaddr_t va)
6339 {
6340 struct l2_bucket *l2b;
6341 pt_entry_t *ptep, *sptep, pte;
6342 vaddr_t next_bucket, eva;
6343
6344 #if (ARM_NMMUS > 1)
6345 if (xscale_use_minidata == 0)
6346 return;
6347 #endif
6348
6349 eva = va + USPACE;
6350
6351 while (va < eva) {
6352 next_bucket = L2_NEXT_BUCKET(va);
6353 if (next_bucket > eva)
6354 next_bucket = eva;
6355
6356 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
6357 KDASSERT(l2b != NULL);
6358
6359 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
6360
6361 while (va < next_bucket) {
6362 pte = *ptep;
6363 if (!l2pte_minidata(pte)) {
6364 cpu_dcache_wbinv_range(va, PAGE_SIZE);
6365 cpu_tlb_flushD_SE(va);
6366 *ptep = pte & ~L2_B;
6367 }
6368 ptep++;
6369 va += PAGE_SIZE;
6370 }
6371 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
6372 }
6373 cpu_cpwait();
6374 }
6375 #endif /* ARM_MMU_XSCALE == 1 */
6376
6377
6378 #if defined(CPU_ARM11MPCORE)
6379
6380 void
6381 pmap_pte_init_arm11mpcore(void)
6382 {
6383
6384 /* cache mode is controlled by 5 bits (B, C, TEX) */
6385 pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6;
6386 pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6;
6387 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
6388 /* use extended small page (without APn, with TEX) */
6389 pte_l2_s_cache_mask = L2_XS_CACHE_MASK_armv6;
6390 #else
6391 pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6c;
6392 #endif
6393
6394 /* write-back, write-allocate */
6395 pte_l1_s_cache_mode = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01);
6396 pte_l2_l_cache_mode = L2_C | L2_B | L2_V6_L_TEX(0x01);
6397 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
6398 pte_l2_s_cache_mode = L2_C | L2_B | L2_V6_XS_TEX(0x01);
6399 #else
6400 /* no TEX. read-allocate */
6401 pte_l2_s_cache_mode = L2_C | L2_B;
6402 #endif
6403 /*
6404 * write-back, write-allocate for page tables.
6405 */
6406 pte_l1_s_cache_mode_pt = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01);
6407 pte_l2_l_cache_mode_pt = L2_C | L2_B | L2_V6_L_TEX(0x01);
6408 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
6409 pte_l2_s_cache_mode_pt = L2_C | L2_B | L2_V6_XS_TEX(0x01);
6410 #else
6411 pte_l2_s_cache_mode_pt = L2_C | L2_B;
6412 #endif
6413
6414 pte_l1_s_prot_u = L1_S_PROT_U_armv6;
6415 pte_l1_s_prot_w = L1_S_PROT_W_armv6;
6416 pte_l1_s_prot_ro = L1_S_PROT_RO_armv6;
6417 pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6;
6418
6419 #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
6420 pte_l2_s_prot_u = L2_S_PROT_U_armv6n;
6421 pte_l2_s_prot_w = L2_S_PROT_W_armv6n;
6422 pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n;
6423 pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n;
6424
6425 #else
6426 /* with AP[0..3] */
6427 pte_l2_s_prot_u = L2_S_PROT_U_generic;
6428 pte_l2_s_prot_w = L2_S_PROT_W_generic;
6429 pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
6430 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
6431 #endif
6432
6433 #ifdef ARM11MPCORE_COMPAT_MMU
6434 /* with AP[0..3] */
6435 pte_l2_l_prot_u = L2_L_PROT_U_generic;
6436 pte_l2_l_prot_w = L2_L_PROT_W_generic;
6437 pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
6438 pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
6439
6440 pte_l1_ss_proto = L1_SS_PROTO_armv6;
6441 pte_l1_s_proto = L1_S_PROTO_armv6;
6442 pte_l1_c_proto = L1_C_PROTO_armv6;
6443 pte_l2_s_proto = L2_S_PROTO_armv6c;
6444 #else
6445 pte_l2_l_prot_u = L2_L_PROT_U_armv6n;
6446 pte_l2_l_prot_w = L2_L_PROT_W_armv6n;
6447 pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n;
6448 pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n;
6449
6450 pte_l1_ss_proto = L1_SS_PROTO_armv6;
6451 pte_l1_s_proto = L1_S_PROTO_armv6;
6452 pte_l1_c_proto = L1_C_PROTO_armv6;
6453 pte_l2_s_proto = L2_S_PROTO_armv6n;
6454 #endif
6455
6456 pmap_copy_page_func = pmap_copy_page_generic;
6457 pmap_zero_page_func = pmap_zero_page_generic;
6458 pmap_needs_pte_sync = 1;
6459 }
6460 #endif /* CPU_ARM11MPCORE */
6461
6462
6463 #if ARM_MMU_V7 == 1
6464 void
6465 pmap_pte_init_armv7(void)
6466 {
6467 /*
6468 * The ARMv7-A MMU is mostly compatible with generic. If the
6469 * AP field is zero, that now means "no access" rather than
6470 * read-only. The prototypes are a little different because of
6471 * the XN bit.
6472 */
6473 pmap_pte_init_generic();
6474
6475 pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv7;
6476 pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv7;
6477 pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv7;
6478
6479 pte_l1_s_prot_u = L1_S_PROT_U_armv7;
6480 pte_l1_s_prot_w = L1_S_PROT_W_armv7;
6481 pte_l1_s_prot_ro = L1_S_PROT_RO_armv7;
6482 pte_l1_s_prot_mask = L1_S_PROT_MASK_armv7;
6483
6484 pte_l2_s_prot_u = L2_S_PROT_U_armv7;
6485 pte_l2_s_prot_w = L2_S_PROT_W_armv7;
6486 pte_l2_s_prot_ro = L2_S_PROT_RO_armv7;
6487 pte_l2_s_prot_mask = L2_S_PROT_MASK_armv7;
6488
6489 pte_l2_l_prot_u = L2_L_PROT_U_armv7;
6490 pte_l2_l_prot_w = L2_L_PROT_W_armv7;
6491 pte_l2_l_prot_ro = L2_L_PROT_RO_armv7;
6492 pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7;
6493
6494 pte_l1_ss_proto = L1_SS_PROTO_armv7;
6495 pte_l1_s_proto = L1_S_PROTO_armv7;
6496 pte_l1_c_proto = L1_C_PROTO_armv7;
6497 pte_l2_s_proto = L2_S_PROTO_armv7;
6498 }
6499 #endif /* ARM_MMU_V7 */
6500
6501 /*
6502 * return the PA of the current L1 table, for use when handling a crash dump
6503 */
6504 uint32_t pmap_kernel_L1_addr(void)
6505 {
6506 return pmap_kernel()->pm_l1->l1_physaddr;
6507 }
6508
6509 #if defined(DDB)
6510 /*
6511 * A couple of ddb-callable functions for dumping pmaps
6512 */
6513 void pmap_dump_all(void);
6514 void pmap_dump(pmap_t);
6515
6516 void
6517 pmap_dump_all(void)
6518 {
6519 pmap_t pm;
6520
6521 LIST_FOREACH(pm, &pmap_pmaps, pm_list) {
6522 if (pm == pmap_kernel())
6523 continue;
6524 pmap_dump(pm);
6525 printf("\n");
6526 }
6527 }
6528
6529 static pt_entry_t ncptes[64];
6530 static void pmap_dump_ncpg(pmap_t);
6531
6532 void
6533 pmap_dump(pmap_t pm)
6534 {
6535 struct l2_dtable *l2;
6536 struct l2_bucket *l2b;
6537 pt_entry_t *ptep, pte;
6538 vaddr_t l2_va, l2b_va, va;
6539 int i, j, k, occ, rows = 0;
6540
6541 if (pm == pmap_kernel())
6542 printf("pmap_kernel (%p): ", pm);
6543 else
6544 printf("user pmap (%p): ", pm);
6545
6546 printf("domain %d, l1 at %p\n", pm->pm_domain, pm->pm_l1->l1_kva);
6547
6548 l2_va = 0;
6549 for (i = 0; i < L2_SIZE; i++, l2_va += 0x01000000) {
6550 l2 = pm->pm_l2[i];
6551
6552 if (l2 == NULL || l2->l2_occupancy == 0)
6553 continue;
6554
6555 l2b_va = l2_va;
6556 for (j = 0; j < L2_BUCKET_SIZE; j++, l2b_va += 0x00100000) {
6557 l2b = &l2->l2_bucket[j];
6558
6559 if (l2b->l2b_occupancy == 0 || l2b->l2b_kva == NULL)
6560 continue;
6561
6562 ptep = l2b->l2b_kva;
6563
6564 for (k = 0; k < 256 && ptep[k] == 0; k++)
6565 ;
6566
6567 k &= ~63;
6568 occ = l2b->l2b_occupancy;
6569 va = l2b_va + (k * 4096);
6570 for (; k < 256; k++, va += 0x1000) {
6571 char ch = ' ';
6572 if ((k % 64) == 0) {
6573 if ((rows % 8) == 0) {
6574 printf(
6575 " |0000 |8000 |10000 |18000 |20000 |28000 |30000 |38000\n");
6576 }
6577 printf("%08lx: ", va);
6578 }
6579
6580 ncptes[k & 63] = 0;
6581 pte = ptep[k];
6582 if (pte == 0) {
6583 ch = '.';
6584 } else {
6585 occ--;
6586 switch (pte & 0x0c) {
6587 case 0x00:
6588 ch = 'D'; /* No cache No buff */
6589 break;
6590 case 0x04:
6591 ch = 'B'; /* No cache buff */
6592 break;
6593 case 0x08:
6594 if (pte & 0x40)
6595 ch = 'm';
6596 else
6597 ch = 'C'; /* Cache No buff */
6598 break;
6599 case 0x0c:
6600 ch = 'F'; /* Cache Buff */
6601 break;
6602 }
6603
6604 if ((pte & L2_S_PROT_U) == L2_S_PROT_U)
6605 ch += 0x20;
6606
6607 if ((pte & 0xc) == 0)
6608 ncptes[k & 63] = pte;
6609 }
6610
6611 if ((k % 64) == 63) {
6612 rows++;
6613 printf("%c\n", ch);
6614 pmap_dump_ncpg(pm);
6615 if (occ == 0)
6616 break;
6617 } else
6618 printf("%c", ch);
6619 }
6620 }
6621 }
6622 }
6623
6624 static void
6625 pmap_dump_ncpg(pmap_t pm)
6626 {
6627 struct vm_page *pg;
6628 struct vm_page_md *md;
6629 struct pv_entry *pv;
6630 int i;
6631
6632 for (i = 0; i < 63; i++) {
6633 if (ncptes[i] == 0)
6634 continue;
6635
6636 pg = PHYS_TO_VM_PAGE(l2pte_pa(ncptes[i]));
6637 if (pg == NULL)
6638 continue;
6639 md = VM_PAGE_TO_MD(pg);
6640
6641 printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n",
6642 VM_PAGE_TO_PHYS(pg),
6643 md->krw_mappings, md->kro_mappings,
6644 md->urw_mappings, md->uro_mappings);
6645
6646 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
6647 printf(" %c va 0x%08lx, flags 0x%x\n",
6648 (pm == pv->pv_pmap) ? '*' : ' ',
6649 pv->pv_va, pv->pv_flags);
6650 }
6651 }
6652 }
6653 #endif
6654
6655 #ifdef PMAP_STEAL_MEMORY
6656 void
6657 pmap_boot_pageadd(pv_addr_t *newpv)
6658 {
6659 pv_addr_t *pv, *npv;
6660
6661 if ((pv = SLIST_FIRST(&pmap_boot_freeq)) != NULL) {
6662 if (newpv->pv_pa < pv->pv_va) {
6663 KASSERT(newpv->pv_pa + newpv->pv_size <= pv->pv_pa);
6664 if (newpv->pv_pa + newpv->pv_size == pv->pv_pa) {
6665 newpv->pv_size += pv->pv_size;
6666 SLIST_REMOVE_HEAD(&pmap_boot_freeq, pv_list);
6667 }
6668 pv = NULL;
6669 } else {
6670 for (; (npv = SLIST_NEXT(pv, pv_list)) != NULL;
6671 pv = npv) {
6672 KASSERT(pv->pv_pa + pv->pv_size < npv->pv_pa);
6673 KASSERT(pv->pv_pa < newpv->pv_pa);
6674 if (newpv->pv_pa > npv->pv_pa)
6675 continue;
6676 if (pv->pv_pa + pv->pv_size == newpv->pv_pa) {
6677 pv->pv_size += newpv->pv_size;
6678 return;
6679 }
6680 if (newpv->pv_pa + newpv->pv_size < npv->pv_pa)
6681 break;
6682 newpv->pv_size += npv->pv_size;
6683 SLIST_INSERT_AFTER(pv, newpv, pv_list);
6684 SLIST_REMOVE_AFTER(newpv, pv_list);
6685 return;
6686 }
6687 }
6688 }
6689
6690 if (pv) {
6691 SLIST_INSERT_AFTER(pv, newpv, pv_list);
6692 } else {
6693 SLIST_INSERT_HEAD(&pmap_boot_freeq, newpv, pv_list);
6694 }
6695 }
6696
6697 void
6698 pmap_boot_pagealloc(psize_t amount, psize_t mask, psize_t match,
6699 pv_addr_t *rpv)
6700 {
6701 pv_addr_t *pv, **pvp;
6702 struct vm_physseg *ps;
6703 size_t i;
6704
6705 KASSERT(amount & PGOFSET);
6706 KASSERT((mask & PGOFSET) == 0);
6707 KASSERT((match & PGOFSET) == 0);
6708 KASSERT(amount != 0);
6709
6710 for (pvp = &SLIST_FIRST(&pmap_boot_freeq);
6711 (pv = *pvp) != NULL;
6712 pvp = &SLIST_NEXT(pv, pv_list)) {
6713 pv_addr_t *newpv;
6714 psize_t off;
6715 /*
6716 * If this entry is too small to satify the request...
6717 */
6718 KASSERT(pv->pv_size > 0);
6719 if (pv->pv_size < amount)
6720 continue;
6721
6722 for (off = 0; off <= mask; off += PAGE_SIZE) {
6723 if (((pv->pv_pa + off) & mask) == match
6724 && off + amount <= pv->pv_size)
6725 break;
6726 }
6727 if (off > mask)
6728 continue;
6729
6730 rpv->pv_va = pv->pv_va + off;
6731 rpv->pv_pa = pv->pv_pa + off;
6732 rpv->pv_size = amount;
6733 pv->pv_size -= amount;
6734 if (pv->pv_size == 0) {
6735 KASSERT(off == 0);
6736 KASSERT((vaddr_t) pv == rpv->pv_va);
6737 *pvp = SLIST_NEXT(pv, pv_list);
6738 } else if (off == 0) {
6739 KASSERT((vaddr_t) pv == rpv->pv_va);
6740 newpv = (pv_addr_t *) (rpv->pv_va + amount);
6741 *newpv = *pv;
6742 newpv->pv_pa += amount;
6743 newpv->pv_va += amount;
6744 *pvp = newpv;
6745 } else if (off < pv->pv_size) {
6746 newpv = (pv_addr_t *) (rpv->pv_va + amount);
6747 *newpv = *pv;
6748 newpv->pv_size -= off;
6749 newpv->pv_pa += off + amount;
6750 newpv->pv_va += off + amount;
6751
6752 SLIST_NEXT(pv, pv_list) = newpv;
6753 pv->pv_size = off;
6754 } else {
6755 KASSERT((vaddr_t) pv != rpv->pv_va);
6756 }
6757 memset((void *)rpv->pv_va, 0, amount);
6758 return;
6759 }
6760
6761 if (vm_nphysseg == 0)
6762 panic("pmap_boot_pagealloc: couldn't allocate memory");
6763
6764 for (pvp = &SLIST_FIRST(&pmap_boot_freeq);
6765 (pv = *pvp) != NULL;
6766 pvp = &SLIST_NEXT(pv, pv_list)) {
6767 if (SLIST_NEXT(pv, pv_list) == NULL)
6768 break;
6769 }
6770 KASSERT(mask == 0);
6771 for (i = 0; i < vm_nphysseg; i++) {
6772 ps = VM_PHYSMEM_PTR(i);
6773 if (ps->avail_start == atop(pv->pv_pa + pv->pv_size)
6774 && pv->pv_va + pv->pv_size <= ptoa(ps->avail_end)) {
6775 rpv->pv_va = pv->pv_va;
6776 rpv->pv_pa = pv->pv_pa;
6777 rpv->pv_size = amount;
6778 *pvp = NULL;
6779 pmap_map_chunk(kernel_l1pt.pv_va,
6780 ptoa(ps->avail_start) + (pv->pv_va - pv->pv_pa),
6781 ptoa(ps->avail_start),
6782 amount - pv->pv_size,
6783 VM_PROT_READ|VM_PROT_WRITE,
6784 PTE_CACHE);
6785 ps->avail_start += atop(amount - pv->pv_size);
6786 /*
6787 * If we consumed the entire physseg, remove it.
6788 */
6789 if (ps->avail_start == ps->avail_end) {
6790 for (--vm_nphysseg; i < vm_nphysseg; i++)
6791 VM_PHYSMEM_PTR_SWAP(i, i + 1);
6792 }
6793 memset((void *)rpv->pv_va, 0, rpv->pv_size);
6794 return;
6795 }
6796 }
6797
6798 panic("pmap_boot_pagealloc: couldn't allocate memory");
6799 }
6800
6801 vaddr_t
6802 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
6803 {
6804 pv_addr_t pv;
6805
6806 pmap_boot_pagealloc(size, 0, 0, &pv);
6807
6808 return pv.pv_va;
6809 }
6810 #endif /* PMAP_STEAL_MEMORY */
6811
6812 SYSCTL_SETUP(sysctl_machdep_pmap_setup, "sysctl machdep.kmpages setup")
6813 {
6814 sysctl_createv(clog, 0, NULL, NULL,
6815 CTLFLAG_PERMANENT,
6816 CTLTYPE_NODE, "machdep", NULL,
6817 NULL, 0, NULL, 0,
6818 CTL_MACHDEP, CTL_EOL);
6819
6820 sysctl_createv(clog, 0, NULL, NULL,
6821 CTLFLAG_PERMANENT,
6822 CTLTYPE_INT, "kmpages",
6823 SYSCTL_DESCR("count of pages allocated to kernel memory allocators"),
6824 NULL, 0, &pmap_kmpages, 0,
6825 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
6826 }
6827