pmap.h revision 1.154.2.1 1 /* $NetBSD: pmap.h,v 1.154.2.1 2018/04/07 04:12:12 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 #ifndef _ARM32_PMAP_H_
69 #define _ARM32_PMAP_H_
70
71 #ifdef _KERNEL
72
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #include "opt_multiprocessor.h"
79 #endif
80 #include <arm/cpufunc.h>
81 #include <arm/locore.h>
82 #include <uvm/uvm_object.h>
83 #include <uvm/pmap/pmap_pvt.h>
84 #endif
85
86 #ifdef ARM_MMU_EXTENDED
87 #define PMAP_HWPAGEWALKER 1
88 #define PMAP_TLB_MAX 1
89 #if PMAP_TLB_MAX > 1
90 #define PMAP_TLB_NEED_SHOOTDOWN 1
91 #endif
92 #define PMAP_TLB_FLUSH_ASID_ON_RESET (arm_has_tlbiasid_p)
93 #define PMAP_TLB_NUM_PIDS 256
94 #define cpu_set_tlb_info(ci, ti) ((void)((ci)->ci_tlb_info = (ti)))
95 #if PMAP_TLB_MAX > 1
96 #define cpu_tlb_info(ci) ((ci)->ci_tlb_info)
97 #else
98 #define cpu_tlb_info(ci) (&pmap_tlb0_info)
99 #endif
100 #define pmap_md_tlb_asid_max() (PMAP_TLB_NUM_PIDS - 1)
101 #include <uvm/pmap/tlb.h>
102 #include <uvm/pmap/pmap_tlb.h>
103
104 /*
105 * If we have an EXTENDED MMU and the address space is split evenly between
106 * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
107 * user and kernel address spaces.
108 */
109 #if (KERNEL_BASE & 0x80000000) == 0
110 #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
111 #endif
112 #endif /* ARM_MMU_EXTENDED */
113
114 /*
115 * a pmap describes a processes' 4GB virtual address space. this
116 * virtual address space can be broken up into 4096 1MB regions which
117 * are described by L1 PTEs in the L1 table.
118 *
119 * There is a line drawn at KERNEL_BASE. Everything below that line
120 * changes when the VM context is switched. Everything above that line
121 * is the same no matter which VM context is running. This is achieved
122 * by making the L1 PTEs for those slots above KERNEL_BASE reference
123 * kernel L2 tables.
124 *
125 * The basic layout of the virtual address space thus looks like this:
126 *
127 * 0xffffffff
128 * .
129 * .
130 * .
131 * KERNEL_BASE
132 * --------------------
133 * .
134 * .
135 * .
136 * 0x00000000
137 */
138
139 /*
140 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
141 * A bucket size of 16 provides for 16MB of contiguous virtual address
142 * space per l2_dtable. Most processes will, therefore, require only two or
143 * three of these to map their whole working set.
144 */
145 #define L2_BUCKET_XLOG2 (L1_S_SHIFT)
146 #define L2_BUCKET_XSIZE (1 << L2_BUCKET_XLOG2)
147 #define L2_BUCKET_LOG2 4
148 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
149
150 /*
151 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
152 * of l2_dtable structures required to track all possible page descriptors
153 * mappable by an L1 translation table is given by the following constants:
154 */
155 #define L2_LOG2 (32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
156 #define L2_SIZE (1 << L2_LOG2)
157
158 /*
159 * tell MI code that the cache is virtually-indexed.
160 * ARMv6 is physically-tagged but all others are virtually-tagged.
161 */
162 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
163 #define PMAP_CACHE_VIPT
164 #else
165 #define PMAP_CACHE_VIVT
166 #endif
167
168 #ifndef _LOCORE
169
170 #ifndef ARM_MMU_EXTENDED
171 struct l1_ttable;
172 struct l2_dtable;
173
174 /*
175 * Track cache/tlb occupancy using the following structure
176 */
177 union pmap_cache_state {
178 struct {
179 union {
180 uint8_t csu_cache_b[2];
181 uint16_t csu_cache;
182 } cs_cache_u;
183
184 union {
185 uint8_t csu_tlb_b[2];
186 uint16_t csu_tlb;
187 } cs_tlb_u;
188 } cs_s;
189 uint32_t cs_all;
190 };
191 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
192 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
193 #define cs_cache cs_s.cs_cache_u.csu_cache
194 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
195 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
196 #define cs_tlb cs_s.cs_tlb_u.csu_tlb
197
198 /*
199 * Assigned to cs_all to force cacheops to work for a particular pmap
200 */
201 #define PMAP_CACHE_STATE_ALL 0xffffffffu
202 #endif /* !ARM_MMU_EXTENDED */
203
204 /*
205 * This structure is used by machine-dependent code to describe
206 * static mappings of devices, created at bootstrap time.
207 */
208 struct pmap_devmap {
209 vaddr_t pd_va; /* virtual address */
210 paddr_t pd_pa; /* physical address */
211 psize_t pd_size; /* size of region */
212 vm_prot_t pd_prot; /* protection code */
213 int pd_cache; /* cache attributes */
214 };
215
216 #define DEVMAP_ALIGN(a) ((a) & ~L1_S_OFFSET)
217 #define DEVMAP_SIZE(s) roundup2((s), L1_S_SIZE)
218 #define DEVMAP_ENTRY(va, pa, sz) \
219 { \
220 .pd_va = DEVMAP_ALIGN(va), \
221 .pd_pa = DEVMAP_ALIGN(pa), \
222 .pd_size = DEVMAP_SIZE(sz), \
223 .pd_prot = VM_PROT_READ|VM_PROT_WRITE, \
224 .pd_cache = PTE_NOCACHE \
225 }
226 #define DEVMAP_ENTRY_END { 0 }
227
228 /*
229 * The pmap structure itself
230 */
231 struct pmap {
232 struct uvm_object pm_obj;
233 kmutex_t pm_obj_lock;
234 #define pm_lock pm_obj.vmobjlock
235 #ifndef ARM_HAS_VBAR
236 pd_entry_t *pm_pl1vec;
237 pd_entry_t pm_l1vec;
238 #endif
239 struct l2_dtable *pm_l2[L2_SIZE];
240 struct pmap_statistics pm_stats;
241 LIST_ENTRY(pmap) pm_list;
242 #ifdef ARM_MMU_EXTENDED
243 pd_entry_t *pm_l1;
244 paddr_t pm_l1_pa;
245 bool pm_remove_all;
246 #ifdef MULTIPROCESSOR
247 kcpuset_t *pm_onproc;
248 kcpuset_t *pm_active;
249 #if PMAP_TLB_MAX > 1
250 u_int pm_shootdown_pending;
251 #endif
252 #endif
253 struct pmap_asid_info pm_pai[PMAP_TLB_MAX];
254 #else
255 struct l1_ttable *pm_l1;
256 union pmap_cache_state pm_cstate;
257 uint8_t pm_domain;
258 bool pm_activated;
259 bool pm_remove_all;
260 #endif
261 };
262
263 struct pmap_kernel {
264 struct pmap kernel_pmap;
265 };
266
267 /*
268 * Physical / virtual address structure. In a number of places (particularly
269 * during bootstrapping) we need to keep track of the physical and virtual
270 * addresses of various pages
271 */
272 typedef struct pv_addr {
273 SLIST_ENTRY(pv_addr) pv_list;
274 paddr_t pv_pa;
275 vaddr_t pv_va;
276 vsize_t pv_size;
277 uint8_t pv_cache;
278 uint8_t pv_prot;
279 } pv_addr_t;
280 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
281
282 extern pv_addrqh_t pmap_freeq;
283 extern pv_addr_t kernelstack;
284 extern pv_addr_t abtstack;
285 extern pv_addr_t fiqstack;
286 extern pv_addr_t irqstack;
287 extern pv_addr_t undstack;
288 extern pv_addr_t idlestack;
289 extern pv_addr_t systempage;
290 extern pv_addr_t kernel_l1pt;
291
292 #ifdef ARM_MMU_EXTENDED
293 extern bool arm_has_tlbiasid_p; /* also in <arm/locore.h> */
294 #endif
295
296 /*
297 * Determine various modes for PTEs (user vs. kernel, cacheable
298 * vs. non-cacheable).
299 */
300 #define PTE_KERNEL 0
301 #define PTE_USER 1
302 #define PTE_NOCACHE 0
303 #define PTE_CACHE 1
304 #define PTE_PAGETABLE 2
305
306 /*
307 * Flags that indicate attributes of pages or mappings of pages.
308 *
309 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
310 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
311 * pv_entry's for each page. They live in the same "namespace" so
312 * that we can clear multiple attributes at a time.
313 *
314 * Note the "non-cacheable" flag generally means the page has
315 * multiple mappings in a given address space.
316 */
317 #define PVF_MOD 0x01 /* page is modified */
318 #define PVF_REF 0x02 /* page is referenced */
319 #define PVF_WIRED 0x04 /* mapping is wired */
320 #define PVF_WRITE 0x08 /* mapping is writable */
321 #define PVF_EXEC 0x10 /* mapping is executable */
322 #ifdef PMAP_CACHE_VIVT
323 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
324 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
325 #define PVF_NC (PVF_UNC|PVF_KNC)
326 #endif
327 #ifdef PMAP_CACHE_VIPT
328 #define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */
329 #define PVF_MULTCLR 0x40 /* mapping is multi-colored */
330 #endif
331 #define PVF_COLORED 0x80 /* page has or had a color */
332 #define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
333 #define PVF_KMPAGE 0x0200 /* page is used for kmem */
334 #define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
335 #define PVF_KMOD 0x0800 /* unmanaged page is modified */
336 #define PVF_KWRITE (PVF_KENTRY|PVF_WRITE)
337 #define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
338
339 /*
340 * Commonly referenced structures
341 */
342 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
343 extern int arm_poolpage_vmfreelist;
344
345 /*
346 * Macros that we need to export
347 */
348 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
349 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
350
351 #define pmap_is_modified(pg) \
352 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
353 #define pmap_is_referenced(pg) \
354 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
355 #define pmap_is_page_colored_p(md) \
356 (((md)->pvh_attrs & PVF_COLORED) != 0)
357
358 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
359
360 #define pmap_phys_address(ppn) (arm_ptob((ppn)))
361 u_int arm32_mmap_flags(paddr_t);
362 #define ARM32_MMAP_WRITECOMBINE 0x40000000
363 #define ARM32_MMAP_CACHEABLE 0x20000000
364 #define ARM_MMAP_WRITECOMBINE ARM32_MMAP_WRITECOMBINE
365 #define ARM_MMAP_CACHEABLE ARM32_MMAP_CACHEABLE
366 #define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn)
367
368 #define PMAP_PTE 0x10000000 /* kenter_pa */
369
370 /*
371 * Functions that we need to export
372 */
373 void pmap_procwr(struct proc *, vaddr_t, int);
374 void pmap_remove_all(pmap_t);
375 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
376
377 #define PMAP_NEED_PROCWR
378 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
379 #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
380
381 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
382 #define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
383 void pmap_prefer(vaddr_t, vaddr_t *, int);
384 #endif
385
386 void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
387
388 /* Functions we use internally. */
389 #ifdef PMAP_STEAL_MEMORY
390 void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
391 void pmap_boot_pageadd(pv_addr_t *);
392 vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
393 #endif
394 void pmap_bootstrap(vaddr_t, vaddr_t);
395
396 void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
397 int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
398 int pmap_prefetchabt_fixup(void *);
399 bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
400 bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
401 bool pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
402
403 void pmap_debug(int);
404 void pmap_postinit(void);
405
406 void vector_page_setprot(int);
407
408 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
409 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
410
411 /* Bootstrapping routines. */
412 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
413 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
414 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
415 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
416 void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
417 void pmap_devmap_register(const struct pmap_devmap *);
418
419 /*
420 * Special page zero routine for use by the idle loop (no cache cleans).
421 */
422 bool pmap_pageidlezero(paddr_t);
423 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
424
425 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
426 /*
427 * For the pmap, this is a more useful way to map a direct mapped page.
428 * It returns either the direct-mapped VA or the VA supplied if it can't
429 * be direct mapped.
430 */
431 vaddr_t pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
432 #endif
433
434 /*
435 * used by dumpsys to record the PA of the L1 table
436 */
437 uint32_t pmap_kernel_L1_addr(void);
438 /*
439 * The current top of kernel VM
440 */
441 extern vaddr_t pmap_curmaxkvaddr;
442
443 #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
444 /*
445 * Ending VA of direct mapped memory (usually KERNEL_VM_BASE).
446 */
447 extern vaddr_t pmap_directlimit;
448 #endif
449
450 /*
451 * Useful macros and constants
452 */
453
454 /* Virtual address to page table entry */
455 static inline pt_entry_t *
456 vtopte(vaddr_t va)
457 {
458 pd_entry_t *pdep;
459 pt_entry_t *ptep;
460
461 KASSERT(trunc_page(va) == va);
462
463 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
464 return (NULL);
465 return (ptep);
466 }
467
468 /*
469 * Virtual address to physical address
470 */
471 static inline paddr_t
472 vtophys(vaddr_t va)
473 {
474 paddr_t pa;
475
476 if (pmap_extract(pmap_kernel(), va, &pa) == false)
477 return (0); /* XXXSCW: Panic? */
478
479 return (pa);
480 }
481
482 /*
483 * The new pmap ensures that page-tables are always mapping Write-Thru.
484 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
485 * on every change.
486 *
487 * Unfortunately, not all CPUs have a write-through cache mode. So we
488 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
489 * and if there is the chance for PTE syncs to be needed, we define
490 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
491 * the code.
492 */
493 extern int pmap_needs_pte_sync;
494 #if defined(_KERNEL_OPT)
495 /*
496 * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
497 * single MMU type is selected.
498 *
499 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
500 * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
501 * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
502 *
503 * Use run time evaluation for all other cases.
504 *
505 */
506 #if (ARM_NMMUS == 1)
507 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
508 #define PMAP_INCLUDE_PTE_SYNC
509 #define PMAP_NEEDS_PTE_SYNC 1
510 #elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
511 #define PMAP_NEEDS_PTE_SYNC 0
512 #endif
513 #endif
514 #endif /* _KERNEL_OPT */
515
516 /*
517 * Provide a fallback in case we were not able to determine it at
518 * compile-time.
519 */
520 #ifndef PMAP_NEEDS_PTE_SYNC
521 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
522 #define PMAP_INCLUDE_PTE_SYNC
523 #endif
524
525 static inline void
526 pmap_ptesync(pt_entry_t *ptep, size_t cnt)
527 {
528 if (PMAP_NEEDS_PTE_SYNC) {
529 cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
530 #ifdef SHEEVA_L2_CACHE
531 cpu_sdcache_wb_range((vaddr_t)ptep, -1,
532 cnt * sizeof(pt_entry_t));
533 #endif
534 }
535 arm_dsb();
536 }
537
538 #define PDE_SYNC(pdep) pmap_ptesync((pdep), 1)
539 #define PDE_SYNC_RANGE(pdep, cnt) pmap_ptesync((pdep), (cnt))
540 #define PTE_SYNC(ptep) pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
541 #define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt))
542
543 #define l1pte_valid_p(pde) ((pde) != 0)
544 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
545 #define l1pte_supersection_p(pde) (l1pte_section_p(pde) \
546 && ((pde) & L1_S_V6_SUPER) != 0)
547 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
548 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
549 #define l1pte_pa(pde) ((pde) & L1_C_ADDR_MASK)
550 #define l1pte_index(v) ((vaddr_t)(v) >> L1_S_SHIFT)
551 #define l1pte_pgindex(v) l1pte_index((v) & L1_ADDR_BITS \
552 & ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
553
554 static inline void
555 l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
556 {
557 *pdep = pde;
558 }
559
560 static inline void
561 l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
562 {
563 *pdep = pde;
564 if (l1pte_page_p(pde)) {
565 KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
566 for (size_t k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
567 pde += L2_T_SIZE;
568 pdep[k] = pde;
569 }
570 } else if (l1pte_supersection_p(pde)) {
571 KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
572 for (size_t k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
573 pdep[k] = pde;
574 }
575 }
576 }
577
578 #define l2pte_index(v) ((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
579 #define l2pte_valid_p(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
580 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
581 #define l1pte_lpage_p(pte) (((pte) & L2_TYPE_MASK) == L2_TYPE_L)
582 #define l2pte_minidata_p(pte) (((pte) & \
583 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
584 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
585
586 static inline void
587 l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
588 {
589 if (l1pte_lpage_p(pte)) {
590 KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (L2_L_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
591 for (size_t k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
592 *ptep++ = pte;
593 }
594 } else {
595 KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
596 for (size_t k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
597 KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
598 *ptep++ = pte;
599 pte += L2_S_SIZE;
600 if (opte)
601 opte += L2_S_SIZE;
602 }
603 }
604 }
605
606 static inline void
607 l2pte_reset(pt_entry_t *ptep)
608 {
609 KASSERTMSG((((uintptr_t)ptep / sizeof(*ptep)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
610 *ptep = 0;
611 for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
612 ptep[k] = 0;
613 }
614 }
615
616 /* L1 and L2 page table macros */
617 #define pmap_pde_v(pde) l1pte_valid(*(pde))
618 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
619 #define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde))
620 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
621 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
622
623 #define pmap_pte_v(pte) l2pte_valid_p(*(pte))
624 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
625
626 /* Size of the kernel part of the L1 page table */
627 #define KERNEL_PD_SIZE \
628 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
629
630 void bzero_page(vaddr_t);
631 void bcopy_page(vaddr_t, vaddr_t);
632
633 #ifdef FPU_VFP
634 void bzero_page_vfp(vaddr_t);
635 void bcopy_page_vfp(vaddr_t, vaddr_t);
636 #endif
637
638 /************************* ARM MMU configuration *****************************/
639
640 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
641 void pmap_copy_page_generic(paddr_t, paddr_t);
642 void pmap_zero_page_generic(paddr_t);
643
644 void pmap_pte_init_generic(void);
645 #if defined(CPU_ARM8)
646 void pmap_pte_init_arm8(void);
647 #endif
648 #if defined(CPU_ARM9)
649 void pmap_pte_init_arm9(void);
650 #endif /* CPU_ARM9 */
651 #if defined(CPU_ARM10)
652 void pmap_pte_init_arm10(void);
653 #endif /* CPU_ARM10 */
654 #if defined(CPU_ARM11) /* ARM_MMU_V6 */
655 void pmap_pte_init_arm11(void);
656 #endif /* CPU_ARM11 */
657 #if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
658 void pmap_pte_init_arm11mpcore(void);
659 #endif
660 #if ARM_MMU_V7 == 1
661 void pmap_pte_init_armv7(void);
662 #endif /* ARM_MMU_V7 */
663 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
664
665 #if ARM_MMU_SA1 == 1
666 void pmap_pte_init_sa1(void);
667 #endif /* ARM_MMU_SA1 == 1 */
668
669 #if ARM_MMU_XSCALE == 1
670 void pmap_copy_page_xscale(paddr_t, paddr_t);
671 void pmap_zero_page_xscale(paddr_t);
672
673 void pmap_pte_init_xscale(void);
674
675 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
676
677 #define PMAP_UAREA(va) pmap_uarea(va)
678 void pmap_uarea(vaddr_t);
679 #endif /* ARM_MMU_XSCALE == 1 */
680
681 extern pt_entry_t pte_l1_s_cache_mode;
682 extern pt_entry_t pte_l1_s_cache_mask;
683
684 extern pt_entry_t pte_l2_l_cache_mode;
685 extern pt_entry_t pte_l2_l_cache_mask;
686
687 extern pt_entry_t pte_l2_s_cache_mode;
688 extern pt_entry_t pte_l2_s_cache_mask;
689
690 extern pt_entry_t pte_l1_s_cache_mode_pt;
691 extern pt_entry_t pte_l2_l_cache_mode_pt;
692 extern pt_entry_t pte_l2_s_cache_mode_pt;
693
694 extern pt_entry_t pte_l1_s_wc_mode;
695 extern pt_entry_t pte_l2_l_wc_mode;
696 extern pt_entry_t pte_l2_s_wc_mode;
697
698 extern pt_entry_t pte_l1_s_prot_u;
699 extern pt_entry_t pte_l1_s_prot_w;
700 extern pt_entry_t pte_l1_s_prot_ro;
701 extern pt_entry_t pte_l1_s_prot_mask;
702
703 extern pt_entry_t pte_l2_s_prot_u;
704 extern pt_entry_t pte_l2_s_prot_w;
705 extern pt_entry_t pte_l2_s_prot_ro;
706 extern pt_entry_t pte_l2_s_prot_mask;
707
708 extern pt_entry_t pte_l2_l_prot_u;
709 extern pt_entry_t pte_l2_l_prot_w;
710 extern pt_entry_t pte_l2_l_prot_ro;
711 extern pt_entry_t pte_l2_l_prot_mask;
712
713 extern pt_entry_t pte_l1_ss_proto;
714 extern pt_entry_t pte_l1_s_proto;
715 extern pt_entry_t pte_l1_c_proto;
716 extern pt_entry_t pte_l2_s_proto;
717
718 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
719 extern void (*pmap_zero_page_func)(paddr_t);
720
721 #endif /* !_LOCORE */
722
723 /*****************************************************************************/
724
725 #define KERNEL_PID 0 /* The kernel uses ASID 0 */
726
727 /*
728 * Definitions for MMU domains
729 */
730 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
731 #define PMAP_DOMAIN_KERNEL 0 /* The kernel pmap uses domain #0 */
732 #ifdef ARM_MMU_EXTENDED
733 #define PMAP_DOMAIN_USER 1 /* User pmaps use domain #1 */
734 #endif
735
736 /*
737 * These macros define the various bit masks in the PTE.
738 *
739 * We use these macros since we use different bits on different processor
740 * models.
741 */
742 #define L1_S_PROT_U_generic (L1_S_AP(AP_U))
743 #define L1_S_PROT_W_generic (L1_S_AP(AP_W))
744 #define L1_S_PROT_RO_generic (0)
745 #define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
746
747 #define L1_S_PROT_U_xscale (L1_S_AP(AP_U))
748 #define L1_S_PROT_W_xscale (L1_S_AP(AP_W))
749 #define L1_S_PROT_RO_xscale (0)
750 #define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
751
752 #define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
753 #define L1_S_PROT_W_armv6 (L1_S_AP(AP_W))
754 #define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
755 #define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
756
757 #define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
758 #define L1_S_PROT_W_armv7 (L1_S_AP(AP_W))
759 #define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
760 #define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
761
762 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
763 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
764 #define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
765 #define L1_S_CACHE_MASK_armv6n (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
766 #define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
767
768 #define L2_L_PROT_U_generic (L2_AP(AP_U))
769 #define L2_L_PROT_W_generic (L2_AP(AP_W))
770 #define L2_L_PROT_RO_generic (0)
771 #define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
772
773 #define L2_L_PROT_U_xscale (L2_AP(AP_U))
774 #define L2_L_PROT_W_xscale (L2_AP(AP_W))
775 #define L2_L_PROT_RO_xscale (0)
776 #define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
777
778 #define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
779 #define L2_L_PROT_W_armv6n (L2_AP0(AP_W))
780 #define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
781 #define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
782
783 #define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
784 #define L2_L_PROT_W_armv7 (L2_AP0(AP_W))
785 #define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
786 #define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
787
788 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
789 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
790 #define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
791 #define L2_L_CACHE_MASK_armv6n (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
792 #define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
793
794 #define L2_S_PROT_U_generic (L2_AP(AP_U))
795 #define L2_S_PROT_W_generic (L2_AP(AP_W))
796 #define L2_S_PROT_RO_generic (0)
797 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
798
799 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
800 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
801 #define L2_S_PROT_RO_xscale (0)
802 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
803
804 #define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
805 #define L2_S_PROT_W_armv6n (L2_AP0(AP_W))
806 #define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
807 #define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
808
809 #define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
810 #define L2_S_PROT_W_armv7 (L2_AP0(AP_W))
811 #define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
812 #define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
813
814 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
815 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
816 #define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
817 #ifdef ARMV6_EXTENDED_SMALL_PAGE
818 #define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6
819 #else
820 #define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic
821 #endif
822 #define L2_S_CACHE_MASK_armv6n (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
823 #define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
824
825
826 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
827 #define L1_S_PROTO_xscale (L1_TYPE_S)
828 #define L1_S_PROTO_armv6 (L1_TYPE_S)
829 #define L1_S_PROTO_armv7 (L1_TYPE_S)
830
831 #define L1_SS_PROTO_generic 0
832 #define L1_SS_PROTO_xscale 0
833 #define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
834 #define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
835
836 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
837 #define L1_C_PROTO_xscale (L1_TYPE_C)
838 #define L1_C_PROTO_armv6 (L1_TYPE_C)
839 #define L1_C_PROTO_armv7 (L1_TYPE_C)
840
841 #define L2_L_PROTO (L2_TYPE_L)
842
843 #define L2_S_PROTO_generic (L2_TYPE_S)
844 #define L2_S_PROTO_xscale (L2_TYPE_XS)
845 #ifdef ARMV6_EXTENDED_SMALL_PAGE
846 #define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */
847 #else
848 #define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */
849 #endif
850 #ifdef ARM_MMU_EXTENDED
851 #define L2_S_PROTO_armv6n (L2_TYPE_S|L2_XS_XN)
852 #else
853 #define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */
854 #endif
855 #ifdef ARM_MMU_EXTENDED
856 #define L2_S_PROTO_armv7 (L2_TYPE_S|L2_XS_XN)
857 #else
858 #define L2_S_PROTO_armv7 (L2_TYPE_S)
859 #endif
860
861 /*
862 * User-visible names for the ones that vary with MMU class.
863 */
864
865 #if ARM_NMMUS > 1
866 /* More than one MMU class configured; use variables. */
867 #define L1_S_PROT_U pte_l1_s_prot_u
868 #define L1_S_PROT_W pte_l1_s_prot_w
869 #define L1_S_PROT_RO pte_l1_s_prot_ro
870 #define L1_S_PROT_MASK pte_l1_s_prot_mask
871
872 #define L2_S_PROT_U pte_l2_s_prot_u
873 #define L2_S_PROT_W pte_l2_s_prot_w
874 #define L2_S_PROT_RO pte_l2_s_prot_ro
875 #define L2_S_PROT_MASK pte_l2_s_prot_mask
876
877 #define L2_L_PROT_U pte_l2_l_prot_u
878 #define L2_L_PROT_W pte_l2_l_prot_w
879 #define L2_L_PROT_RO pte_l2_l_prot_ro
880 #define L2_L_PROT_MASK pte_l2_l_prot_mask
881
882 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
883 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
884 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
885
886 #define L1_SS_PROTO pte_l1_ss_proto
887 #define L1_S_PROTO pte_l1_s_proto
888 #define L1_C_PROTO pte_l1_c_proto
889 #define L2_S_PROTO pte_l2_s_proto
890
891 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
892 #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
893 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
894 #define L1_S_PROT_U L1_S_PROT_U_generic
895 #define L1_S_PROT_W L1_S_PROT_W_generic
896 #define L1_S_PROT_RO L1_S_PROT_RO_generic
897 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
898
899 #define L2_S_PROT_U L2_S_PROT_U_generic
900 #define L2_S_PROT_W L2_S_PROT_W_generic
901 #define L2_S_PROT_RO L2_S_PROT_RO_generic
902 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
903
904 #define L2_L_PROT_U L2_L_PROT_U_generic
905 #define L2_L_PROT_W L2_L_PROT_W_generic
906 #define L2_L_PROT_RO L2_L_PROT_RO_generic
907 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
908
909 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
910 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
911 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
912
913 #define L1_SS_PROTO L1_SS_PROTO_generic
914 #define L1_S_PROTO L1_S_PROTO_generic
915 #define L1_C_PROTO L1_C_PROTO_generic
916 #define L2_S_PROTO L2_S_PROTO_generic
917
918 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
919 #define pmap_zero_page(d) pmap_zero_page_generic((d))
920 #elif ARM_MMU_V6N != 0
921 #define L1_S_PROT_U L1_S_PROT_U_armv6
922 #define L1_S_PROT_W L1_S_PROT_W_armv6
923 #define L1_S_PROT_RO L1_S_PROT_RO_armv6
924 #define L1_S_PROT_MASK L1_S_PROT_MASK_armv6
925
926 #define L2_S_PROT_U L2_S_PROT_U_armv6n
927 #define L2_S_PROT_W L2_S_PROT_W_armv6n
928 #define L2_S_PROT_RO L2_S_PROT_RO_armv6n
929 #define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n
930
931 #define L2_L_PROT_U L2_L_PROT_U_armv6n
932 #define L2_L_PROT_W L2_L_PROT_W_armv6n
933 #define L2_L_PROT_RO L2_L_PROT_RO_armv6n
934 #define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n
935
936 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6n
937 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6n
938 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n
939
940 /*
941 * These prototypes make writeable mappings, while the other MMU types
942 * make read-only mappings.
943 */
944 #define L1_SS_PROTO L1_SS_PROTO_armv6
945 #define L1_S_PROTO L1_S_PROTO_armv6
946 #define L1_C_PROTO L1_C_PROTO_armv6
947 #define L2_S_PROTO L2_S_PROTO_armv6n
948
949 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
950 #define pmap_zero_page(d) pmap_zero_page_generic((d))
951 #elif ARM_MMU_V6C != 0
952 #define L1_S_PROT_U L1_S_PROT_U_generic
953 #define L1_S_PROT_W L1_S_PROT_W_generic
954 #define L1_S_PROT_RO L1_S_PROT_RO_generic
955 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
956
957 #define L2_S_PROT_U L2_S_PROT_U_generic
958 #define L2_S_PROT_W L2_S_PROT_W_generic
959 #define L2_S_PROT_RO L2_S_PROT_RO_generic
960 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
961
962 #define L2_L_PROT_U L2_L_PROT_U_generic
963 #define L2_L_PROT_W L2_L_PROT_W_generic
964 #define L2_L_PROT_RO L2_L_PROT_RO_generic
965 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
966
967 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
968 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
969 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
970
971 #define L1_SS_PROTO L1_SS_PROTO_armv6
972 #define L1_S_PROTO L1_S_PROTO_generic
973 #define L1_C_PROTO L1_C_PROTO_generic
974 #define L2_S_PROTO L2_S_PROTO_generic
975
976 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
977 #define pmap_zero_page(d) pmap_zero_page_generic((d))
978 #elif ARM_MMU_XSCALE == 1
979 #define L1_S_PROT_U L1_S_PROT_U_generic
980 #define L1_S_PROT_W L1_S_PROT_W_generic
981 #define L1_S_PROT_RO L1_S_PROT_RO_generic
982 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
983
984 #define L2_S_PROT_U L2_S_PROT_U_xscale
985 #define L2_S_PROT_W L2_S_PROT_W_xscale
986 #define L2_S_PROT_RO L2_S_PROT_RO_xscale
987 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
988
989 #define L2_L_PROT_U L2_L_PROT_U_generic
990 #define L2_L_PROT_W L2_L_PROT_W_generic
991 #define L2_L_PROT_RO L2_L_PROT_RO_generic
992 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
993
994 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
995 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
996 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
997
998 #define L1_SS_PROTO L1_SS_PROTO_xscale
999 #define L1_S_PROTO L1_S_PROTO_xscale
1000 #define L1_C_PROTO L1_C_PROTO_xscale
1001 #define L2_S_PROTO L2_S_PROTO_xscale
1002
1003 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
1004 #define pmap_zero_page(d) pmap_zero_page_xscale((d))
1005 #elif ARM_MMU_V7 == 1
1006 #define L1_S_PROT_U L1_S_PROT_U_armv7
1007 #define L1_S_PROT_W L1_S_PROT_W_armv7
1008 #define L1_S_PROT_RO L1_S_PROT_RO_armv7
1009 #define L1_S_PROT_MASK L1_S_PROT_MASK_armv7
1010
1011 #define L2_S_PROT_U L2_S_PROT_U_armv7
1012 #define L2_S_PROT_W L2_S_PROT_W_armv7
1013 #define L2_S_PROT_RO L2_S_PROT_RO_armv7
1014 #define L2_S_PROT_MASK L2_S_PROT_MASK_armv7
1015
1016 #define L2_L_PROT_U L2_L_PROT_U_armv7
1017 #define L2_L_PROT_W L2_L_PROT_W_armv7
1018 #define L2_L_PROT_RO L2_L_PROT_RO_armv7
1019 #define L2_L_PROT_MASK L2_L_PROT_MASK_armv7
1020
1021 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7
1022 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7
1023 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7
1024
1025 /*
1026 * These prototypes make writeable mappings, while the other MMU types
1027 * make read-only mappings.
1028 */
1029 #define L1_SS_PROTO L1_SS_PROTO_armv7
1030 #define L1_S_PROTO L1_S_PROTO_armv7
1031 #define L1_C_PROTO L1_C_PROTO_armv7
1032 #define L2_S_PROTO L2_S_PROTO_armv7
1033
1034 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
1035 #define pmap_zero_page(d) pmap_zero_page_generic((d))
1036 #endif /* ARM_NMMUS > 1 */
1037
1038 /*
1039 * Macros to set and query the write permission on page descriptors.
1040 */
1041 #define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
1042 #define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
1043
1044 #define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
1045 #define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
1046
1047 #define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
1048 (L2_S_PROT_RO == 0 || \
1049 ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
1050
1051 /*
1052 * These macros return various bits based on kernel/user and protection.
1053 * Note that the compiler will usually fold these at compile time.
1054 */
1055
1056 #define L1_S_PROT(ku, pr) ( \
1057 (((ku) == PTE_USER) ? \
1058 L1_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0) \
1059 : \
1060 (((L1_S_PROT_RO && \
1061 ((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
1062 L1_S_PROT_RO : L1_S_PROT_W))) \
1063 )
1064
1065 #define L2_L_PROT(ku, pr) ( \
1066 (((ku) == PTE_USER) ? \
1067 L2_L_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0) \
1068 : \
1069 (((L2_L_PROT_RO && \
1070 ((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
1071 L2_L_PROT_RO : L2_L_PROT_W))) \
1072 )
1073
1074 #define L2_S_PROT(ku, pr) ( \
1075 (((ku) == PTE_USER) ? \
1076 L2_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0) \
1077 : \
1078 (((L2_S_PROT_RO && \
1079 ((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
1080 L2_S_PROT_RO : L2_S_PROT_W))) \
1081 )
1082
1083 /*
1084 * Macros to test if a mapping is mappable with an L1 SuperSection,
1085 * L1 Section, or an L2 Large Page mapping.
1086 */
1087 #define L1_SS_MAPPABLE_P(va, pa, size) \
1088 ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
1089
1090 #define L1_S_MAPPABLE_P(va, pa, size) \
1091 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
1092
1093 #define L2_L_MAPPABLE_P(va, pa, size) \
1094 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1095
1096 #define PMAP_MAPSIZE1 L2_L_SIZE
1097 #define PMAP_MAPSIZE2 L1_S_SIZE
1098 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1099 #define PMAP_MAPSIZE3 L1_SS_SIZE
1100 #endif
1101
1102 #ifndef _LOCORE
1103 /*
1104 * Hooks for the pool allocator.
1105 */
1106 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
1107 extern paddr_t physical_start, physical_end;
1108 #ifdef PMAP_NEED_ALLOC_POOLPAGE
1109 struct vm_page *arm_pmap_alloc_poolpage(int);
1110 #define PMAP_ALLOC_POOLPAGE arm_pmap_alloc_poolpage
1111 #endif
1112 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
1113 vaddr_t pmap_map_poolpage(paddr_t);
1114 paddr_t pmap_unmap_poolpage(vaddr_t);
1115 #define PMAP_MAP_POOLPAGE(pa) pmap_map_poolpage(pa)
1116 #define PMAP_UNMAP_POOLPAGE(va) pmap_unmap_poolpage(va)
1117 #endif
1118
1119 #define __HAVE_PMAP_PV_TRACK 1
1120
1121 void pmap_pv_protect(paddr_t, vm_prot_t);
1122
1123 struct pmap_page {
1124 SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */
1125 int pvh_attrs; /* page attributes */
1126 u_int uro_mappings;
1127 u_int urw_mappings;
1128 union {
1129 u_short s_mappings[2]; /* Assume kernel count <= 65535 */
1130 u_int i_mappings;
1131 } k_u;
1132 };
1133
1134 /*
1135 * pmap-specific data store in the vm_page structure.
1136 */
1137 #define __HAVE_VM_PAGE_MD
1138 struct vm_page_md {
1139 struct pmap_page pp;
1140 #define pvh_list pp.pvh_list
1141 #define pvh_attrs pp.pvh_attrs
1142 #define uro_mappings pp.uro_mappings
1143 #define urw_mappings pp.urw_mappings
1144 #define kro_mappings pp.k_u.s_mappings[0]
1145 #define krw_mappings pp.k_u.s_mappings[1]
1146 #define k_mappings pp.k_u.i_mappings
1147 };
1148
1149 #define PMAP_PAGE_TO_MD(ppage) container_of((ppage), struct vm_page_md, pp)
1150
1151 /*
1152 * Set the default color of each page.
1153 */
1154 #if ARM_MMU_V6 > 0
1155 #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1156 (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
1157 #else
1158 #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1159 (pg)->mdpage.pvh_attrs = 0
1160 #endif
1161
1162 #define VM_MDPAGE_INIT(pg) \
1163 do { \
1164 SLIST_INIT(&(pg)->mdpage.pvh_list); \
1165 VM_MDPAGE_PVH_ATTRS_INIT(pg); \
1166 (pg)->mdpage.uro_mappings = 0; \
1167 (pg)->mdpage.urw_mappings = 0; \
1168 (pg)->mdpage.k_mappings = 0; \
1169 } while (/*CONSTCOND*/0)
1170
1171 #endif /* !_LOCORE */
1172
1173 #endif /* _KERNEL */
1174
1175 #endif /* _ARM32_PMAP_H_ */
1176