pmap.h revision 1.135.2.2.4.1 1 /* $NetBSD: pmap.h,v 1.135.2.2.4.1 2017/03/13 07:41:26 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 #ifndef _ARM32_PMAP_H_
69 #define _ARM32_PMAP_H_
70
71 #ifdef _KERNEL
72
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #include "opt_multiprocessor.h"
79 #endif
80 #include <arm/cpufunc.h>
81 #include <arm/locore.h>
82 #include <uvm/uvm_object.h>
83 #endif
84
85 #ifdef ARM_MMU_EXTENDED
86 #define PMAP_TLB_MAX 1
87 #define PMAP_TLB_HWPAGEWALKER 1
88 #if PMAP_TLB_MAX > 1
89 #define PMAP_NEED_TLB_SHOOTDOWN 1
90 #endif
91 #define PMAP_TLB_FLUSH_ASID_ON_RESET (arm_has_tlbiasid_p)
92 #define PMAP_TLB_NUM_PIDS 256
93 #define cpu_set_tlb_info(ci, ti) ((void)((ci)->ci_tlb_info = (ti)))
94 #if PMAP_TLB_MAX > 1
95 #define cpu_tlb_info(ci) ((ci)->ci_tlb_info)
96 #else
97 #define cpu_tlb_info(ci) (&pmap_tlb0_info)
98 #endif
99 #define pmap_md_tlb_asid_max() (PMAP_TLB_NUM_PIDS - 1)
100 #include <uvm/pmap/tlb.h>
101 #include <uvm/pmap/pmap_tlb.h>
102
103 /*
104 * If we have an EXTENDED MMU and the address space is split evenly between
105 * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
106 * user and kernel address spaces.
107 */
108 #if (KERNEL_BASE & 0x80000000) == 0
109 #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
110 #endif
111 #endif /* ARM_MMU_EXTENDED */
112
113 /*
114 * a pmap describes a processes' 4GB virtual address space. this
115 * virtual address space can be broken up into 4096 1MB regions which
116 * are described by L1 PTEs in the L1 table.
117 *
118 * There is a line drawn at KERNEL_BASE. Everything below that line
119 * changes when the VM context is switched. Everything above that line
120 * is the same no matter which VM context is running. This is achieved
121 * by making the L1 PTEs for those slots above KERNEL_BASE reference
122 * kernel L2 tables.
123 *
124 * The basic layout of the virtual address space thus looks like this:
125 *
126 * 0xffffffff
127 * .
128 * .
129 * .
130 * KERNEL_BASE
131 * --------------------
132 * .
133 * .
134 * .
135 * 0x00000000
136 */
137
138 /*
139 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
140 * A bucket size of 16 provides for 16MB of contiguous virtual address
141 * space per l2_dtable. Most processes will, therefore, require only two or
142 * three of these to map their whole working set.
143 */
144 #define L2_BUCKET_XLOG2 (L1_S_SHIFT)
145 #define L2_BUCKET_XSIZE (1 << L2_BUCKET_XLOG2)
146 #define L2_BUCKET_LOG2 4
147 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
148
149 /*
150 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
151 * of l2_dtable structures required to track all possible page descriptors
152 * mappable by an L1 translation table is given by the following constants:
153 */
154 #define L2_LOG2 (32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
155 #define L2_SIZE (1 << L2_LOG2)
156
157 /*
158 * tell MI code that the cache is virtually-indexed.
159 * ARMv6 is physically-tagged but all others are virtually-tagged.
160 */
161 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
162 #define PMAP_CACHE_VIPT
163 #else
164 #define PMAP_CACHE_VIVT
165 #endif
166
167 #ifndef _LOCORE
168
169 #ifndef PMAP_MMU_EXTENDED
170 struct l1_ttable;
171 struct l2_dtable;
172
173 /*
174 * Track cache/tlb occupancy using the following structure
175 */
176 union pmap_cache_state {
177 struct {
178 union {
179 uint8_t csu_cache_b[2];
180 uint16_t csu_cache;
181 } cs_cache_u;
182
183 union {
184 uint8_t csu_tlb_b[2];
185 uint16_t csu_tlb;
186 } cs_tlb_u;
187 } cs_s;
188 uint32_t cs_all;
189 };
190 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
191 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
192 #define cs_cache cs_s.cs_cache_u.csu_cache
193 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
194 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
195 #define cs_tlb cs_s.cs_tlb_u.csu_tlb
196
197 /*
198 * Assigned to cs_all to force cacheops to work for a particular pmap
199 */
200 #define PMAP_CACHE_STATE_ALL 0xffffffffu
201 #endif /* !ARM_MMU_EXTENDED */
202
203 /*
204 * This structure is used by machine-dependent code to describe
205 * static mappings of devices, created at bootstrap time.
206 */
207 struct pmap_devmap {
208 vaddr_t pd_va; /* virtual address */
209 paddr_t pd_pa; /* physical address */
210 psize_t pd_size; /* size of region */
211 vm_prot_t pd_prot; /* protection code */
212 int pd_cache; /* cache attributes */
213 };
214
215 /*
216 * The pmap structure itself
217 */
218 struct pmap {
219 struct uvm_object pm_obj;
220 kmutex_t pm_obj_lock;
221 #define pm_lock pm_obj.vmobjlock
222 #ifndef ARM_HAS_VBAR
223 pd_entry_t *pm_pl1vec;
224 pd_entry_t pm_l1vec;
225 #endif
226 struct l2_dtable *pm_l2[L2_SIZE];
227 struct pmap_statistics pm_stats;
228 LIST_ENTRY(pmap) pm_list;
229 #ifdef ARM_MMU_EXTENDED
230 pd_entry_t *pm_l1;
231 paddr_t pm_l1_pa;
232 bool pm_remove_all;
233 #ifdef MULTIPROCESSOR
234 kcpuset_t *pm_onproc;
235 kcpuset_t *pm_active;
236 #if PMAP_TLB_MAX > 1
237 u_int pm_shootdown_pending;
238 #endif
239 #endif
240 struct pmap_asid_info pm_pai[PMAP_TLB_MAX];
241 #else
242 struct l1_ttable *pm_l1;
243 union pmap_cache_state pm_cstate;
244 uint8_t pm_domain;
245 bool pm_activated;
246 bool pm_remove_all;
247 #endif
248 };
249
250 struct pmap_kernel {
251 struct pmap kernel_pmap;
252 };
253
254 /*
255 * Physical / virtual address structure. In a number of places (particularly
256 * during bootstrapping) we need to keep track of the physical and virtual
257 * addresses of various pages
258 */
259 typedef struct pv_addr {
260 SLIST_ENTRY(pv_addr) pv_list;
261 paddr_t pv_pa;
262 vaddr_t pv_va;
263 vsize_t pv_size;
264 uint8_t pv_cache;
265 uint8_t pv_prot;
266 } pv_addr_t;
267 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
268
269 extern pv_addrqh_t pmap_freeq;
270 extern pv_addr_t kernelstack;
271 extern pv_addr_t abtstack;
272 extern pv_addr_t fiqstack;
273 extern pv_addr_t irqstack;
274 extern pv_addr_t undstack;
275 extern pv_addr_t idlestack;
276 extern pv_addr_t systempage;
277 extern pv_addr_t kernel_l1pt;
278
279 #ifdef ARM_MMU_EXTENDED
280 extern bool arm_has_tlbiasid_p; /* also in <arm/locore.h> */
281 #endif
282
283 /*
284 * Determine various modes for PTEs (user vs. kernel, cacheable
285 * vs. non-cacheable).
286 */
287 #define PTE_KERNEL 0
288 #define PTE_USER 1
289 #define PTE_NOCACHE 0
290 #define PTE_CACHE 1
291 #define PTE_PAGETABLE 2
292
293 /*
294 * Flags that indicate attributes of pages or mappings of pages.
295 *
296 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
297 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
298 * pv_entry's for each page. They live in the same "namespace" so
299 * that we can clear multiple attributes at a time.
300 *
301 * Note the "non-cacheable" flag generally means the page has
302 * multiple mappings in a given address space.
303 */
304 #define PVF_MOD 0x01 /* page is modified */
305 #define PVF_REF 0x02 /* page is referenced */
306 #define PVF_WIRED 0x04 /* mapping is wired */
307 #define PVF_WRITE 0x08 /* mapping is writable */
308 #define PVF_EXEC 0x10 /* mapping is executable */
309 #ifdef PMAP_CACHE_VIVT
310 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
311 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
312 #define PVF_NC (PVF_UNC|PVF_KNC)
313 #endif
314 #ifdef PMAP_CACHE_VIPT
315 #define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */
316 #define PVF_MULTCLR 0x40 /* mapping is multi-colored */
317 #endif
318 #define PVF_COLORED 0x80 /* page has or had a color */
319 #define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
320 #define PVF_KMPAGE 0x0200 /* page is used for kmem */
321 #define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
322 #define PVF_KMOD 0x0800 /* unmanaged page is modified */
323 #define PVF_KWRITE (PVF_KENTRY|PVF_WRITE)
324 #define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
325
326 /*
327 * Commonly referenced structures
328 */
329 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
330 extern int arm_poolpage_vmfreelist;
331
332 /*
333 * Macros that we need to export
334 */
335 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
336 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
337
338 #define pmap_is_modified(pg) \
339 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
340 #define pmap_is_referenced(pg) \
341 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
342 #define pmap_is_page_colored_p(md) \
343 (((md)->pvh_attrs & PVF_COLORED) != 0)
344
345 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
346
347 #define pmap_phys_address(ppn) (arm_ptob((ppn)))
348 u_int arm32_mmap_flags(paddr_t);
349 #define ARM32_MMAP_WRITECOMBINE 0x40000000
350 #define ARM32_MMAP_CACHEABLE 0x20000000
351 #define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn)
352
353 #define PMAP_PTE 0x10000000 /* kenter_pa */
354
355 /*
356 * Functions that we need to export
357 */
358 void pmap_procwr(struct proc *, vaddr_t, int);
359 void pmap_remove_all(pmap_t);
360 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
361
362 #define PMAP_NEED_PROCWR
363 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
364 #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
365
366 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
367 #define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
368 void pmap_prefer(vaddr_t, vaddr_t *, int);
369 #endif
370
371 void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
372
373 /* Functions we use internally. */
374 #ifdef PMAP_STEAL_MEMORY
375 void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
376 void pmap_boot_pageadd(pv_addr_t *);
377 vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
378 #endif
379 void pmap_bootstrap(vaddr_t, vaddr_t);
380
381 void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
382 int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
383 int pmap_prefetchabt_fixup(void *);
384 bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
385 bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
386 struct pcb;
387 void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
388
389 void pmap_debug(int);
390 void pmap_postinit(void);
391
392 void vector_page_setprot(int);
393
394 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
395 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
396
397 /* Bootstrapping routines. */
398 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
399 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
400 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
401 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
402 void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
403 void pmap_devmap_register(const struct pmap_devmap *);
404
405 /*
406 * Special page zero routine for use by the idle loop (no cache cleans).
407 */
408 bool pmap_pageidlezero(paddr_t);
409 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
410
411 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
412 /*
413 * For the pmap, this is a more useful way to map a direct mapped page.
414 * It returns either the direct-mapped VA or the VA supplied if it can't
415 * be direct mapped.
416 */
417 vaddr_t pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
418 #endif
419
420 /*
421 * used by dumpsys to record the PA of the L1 table
422 */
423 uint32_t pmap_kernel_L1_addr(void);
424 /*
425 * The current top of kernel VM
426 */
427 extern vaddr_t pmap_curmaxkvaddr;
428
429 #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
430 /*
431 * Starting VA of direct mapped memory (usually KERNEL_BASE).
432 */
433 extern vaddr_t pmap_directbase;
434 #endif
435
436 /*
437 * Useful macros and constants
438 */
439
440 /* Virtual address to page table entry */
441 static inline pt_entry_t *
442 vtopte(vaddr_t va)
443 {
444 pd_entry_t *pdep;
445 pt_entry_t *ptep;
446
447 KASSERT(trunc_page(va) == va);
448
449 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
450 return (NULL);
451 return (ptep);
452 }
453
454 /*
455 * Virtual address to physical address
456 */
457 static inline paddr_t
458 vtophys(vaddr_t va)
459 {
460 paddr_t pa;
461
462 if (pmap_extract(pmap_kernel(), va, &pa) == false)
463 return (0); /* XXXSCW: Panic? */
464
465 return (pa);
466 }
467
468 /*
469 * The new pmap ensures that page-tables are always mapping Write-Thru.
470 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
471 * on every change.
472 *
473 * Unfortunately, not all CPUs have a write-through cache mode. So we
474 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
475 * and if there is the chance for PTE syncs to be needed, we define
476 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
477 * the code.
478 */
479 extern int pmap_needs_pte_sync;
480 #if defined(_KERNEL_OPT)
481 /*
482 * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
483 * single MMU type is selected.
484 *
485 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
486 * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
487 * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
488 *
489 * Use run time evaluation for all other cases.
490 *
491 */
492 #if (ARM_NMMUS == 1)
493 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
494 #define PMAP_INCLUDE_PTE_SYNC
495 #define PMAP_NEEDS_PTE_SYNC 1
496 #elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
497 #define PMAP_NEEDS_PTE_SYNC 0
498 #endif
499 #endif
500 #endif /* _KERNEL_OPT */
501
502 /*
503 * Provide a fallback in case we were not able to determine it at
504 * compile-time.
505 */
506 #ifndef PMAP_NEEDS_PTE_SYNC
507 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
508 #define PMAP_INCLUDE_PTE_SYNC
509 #endif
510
511 static inline void
512 pmap_ptesync(pt_entry_t *ptep, size_t cnt)
513 {
514 if (PMAP_NEEDS_PTE_SYNC) {
515 cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
516 #ifdef SHEEVA_L2_CACHE
517 cpu_sdcache_wb_range((vaddr_t)ptep, -1,
518 cnt * sizeof(pt_entry_t));
519 #endif
520 }
521 arm_dsb();
522 }
523
524 #define PDE_SYNC(pdep) pmap_ptesync((pdep), 1)
525 #define PDE_SYNC_RANGE(pdep, cnt) pmap_ptesync((pdep), (cnt))
526 #define PTE_SYNC(ptep) pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
527 #define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt))
528
529 #define l1pte_valid_p(pde) ((pde) != 0)
530 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
531 #define l1pte_supersection_p(pde) (l1pte_section_p(pde) \
532 && ((pde) & L1_S_V6_SUPER) != 0)
533 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
534 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
535 #define l1pte_pa(pde) ((pde) & L1_C_ADDR_MASK)
536 #define l1pte_index(v) ((vaddr_t)(v) >> L1_S_SHIFT)
537 #define l1pte_pgindex(v) l1pte_index((v) & L1_ADDR_BITS \
538 & ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
539
540 static inline void
541 l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
542 {
543 *pdep = pde;
544 }
545
546 static inline void
547 l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
548 {
549 *pdep = pde;
550 if (l1pte_page_p(pde)) {
551 KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
552 for (size_t k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
553 pde += L2_T_SIZE;
554 pdep[k] = pde;
555 }
556 } else if (l1pte_supersection_p(pde)) {
557 KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
558 for (size_t k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
559 pdep[k] = pde;
560 }
561 }
562 }
563
564 #define l2pte_index(v) ((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
565 #define l2pte_valid_p(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
566 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
567 #define l1pte_lpage_p(pte) (((pte) & L2_TYPE_MASK) == L2_TYPE_L)
568 #define l2pte_minidata_p(pte) (((pte) & \
569 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
570 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
571
572 static inline void
573 l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
574 {
575 if (l1pte_lpage_p(pte)) {
576 for (size_t k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
577 *ptep++ = pte;
578 }
579 } else {
580 for (size_t k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
581 KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
582 *ptep++ = pte;
583 pte += L2_S_SIZE;
584 if (opte)
585 opte += L2_S_SIZE;
586 }
587 }
588 }
589
590 static inline void
591 l2pte_reset(pt_entry_t *ptep)
592 {
593 *ptep = 0;
594 for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
595 ptep[k] = 0;
596 }
597 }
598
599 /* L1 and L2 page table macros */
600 #define pmap_pde_v(pde) l1pte_valid(*(pde))
601 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
602 #define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde))
603 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
604 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
605
606 #define pmap_pte_v(pte) l2pte_valid_p(*(pte))
607 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
608
609 /* Size of the kernel part of the L1 page table */
610 #define KERNEL_PD_SIZE \
611 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
612
613 void bzero_page(vaddr_t);
614 void bcopy_page(vaddr_t, vaddr_t);
615
616 #ifdef FPU_VFP
617 void bzero_page_vfp(vaddr_t);
618 void bcopy_page_vfp(vaddr_t, vaddr_t);
619 #endif
620
621 /************************* ARM MMU configuration *****************************/
622
623 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
624 void pmap_copy_page_generic(paddr_t, paddr_t);
625 void pmap_zero_page_generic(paddr_t);
626
627 void pmap_pte_init_generic(void);
628 #if defined(CPU_ARM8)
629 void pmap_pte_init_arm8(void);
630 #endif
631 #if defined(CPU_ARM9)
632 void pmap_pte_init_arm9(void);
633 #endif /* CPU_ARM9 */
634 #if defined(CPU_ARM10)
635 void pmap_pte_init_arm10(void);
636 #endif /* CPU_ARM10 */
637 #if defined(CPU_ARM11) /* ARM_MMU_V6 */
638 void pmap_pte_init_arm11(void);
639 #endif /* CPU_ARM11 */
640 #if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
641 void pmap_pte_init_arm11mpcore(void);
642 #endif
643 #if ARM_MMU_V7 == 1
644 void pmap_pte_init_armv7(void);
645 #endif /* ARM_MMU_V7 */
646 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
647
648 #if ARM_MMU_SA1 == 1
649 void pmap_pte_init_sa1(void);
650 #endif /* ARM_MMU_SA1 == 1 */
651
652 #if ARM_MMU_XSCALE == 1
653 void pmap_copy_page_xscale(paddr_t, paddr_t);
654 void pmap_zero_page_xscale(paddr_t);
655
656 void pmap_pte_init_xscale(void);
657
658 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
659
660 #define PMAP_UAREA(va) pmap_uarea(va)
661 void pmap_uarea(vaddr_t);
662 #endif /* ARM_MMU_XSCALE == 1 */
663
664 extern pt_entry_t pte_l1_s_cache_mode;
665 extern pt_entry_t pte_l1_s_cache_mask;
666
667 extern pt_entry_t pte_l2_l_cache_mode;
668 extern pt_entry_t pte_l2_l_cache_mask;
669
670 extern pt_entry_t pte_l2_s_cache_mode;
671 extern pt_entry_t pte_l2_s_cache_mask;
672
673 extern pt_entry_t pte_l1_s_cache_mode_pt;
674 extern pt_entry_t pte_l2_l_cache_mode_pt;
675 extern pt_entry_t pte_l2_s_cache_mode_pt;
676
677 extern pt_entry_t pte_l1_s_wc_mode;
678 extern pt_entry_t pte_l2_l_wc_mode;
679 extern pt_entry_t pte_l2_s_wc_mode;
680
681 extern pt_entry_t pte_l1_s_prot_u;
682 extern pt_entry_t pte_l1_s_prot_w;
683 extern pt_entry_t pte_l1_s_prot_ro;
684 extern pt_entry_t pte_l1_s_prot_mask;
685
686 extern pt_entry_t pte_l2_s_prot_u;
687 extern pt_entry_t pte_l2_s_prot_w;
688 extern pt_entry_t pte_l2_s_prot_ro;
689 extern pt_entry_t pte_l2_s_prot_mask;
690
691 extern pt_entry_t pte_l2_l_prot_u;
692 extern pt_entry_t pte_l2_l_prot_w;
693 extern pt_entry_t pte_l2_l_prot_ro;
694 extern pt_entry_t pte_l2_l_prot_mask;
695
696 extern pt_entry_t pte_l1_ss_proto;
697 extern pt_entry_t pte_l1_s_proto;
698 extern pt_entry_t pte_l1_c_proto;
699 extern pt_entry_t pte_l2_s_proto;
700
701 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
702 extern void (*pmap_zero_page_func)(paddr_t);
703
704 #endif /* !_LOCORE */
705
706 /*****************************************************************************/
707
708 #define KERNEL_PID 0 /* The kernel uses ASID 0 */
709
710 /*
711 * Definitions for MMU domains
712 */
713 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
714 #define PMAP_DOMAIN_KERNEL 0 /* The kernel pmap uses domain #0 */
715 #ifdef ARM_MMU_EXTENDED
716 #define PMAP_DOMAIN_USER 1 /* User pmaps use domain #1 */
717 #endif
718
719 /*
720 * These macros define the various bit masks in the PTE.
721 *
722 * We use these macros since we use different bits on different processor
723 * models.
724 */
725 #define L1_S_PROT_U_generic (L1_S_AP(AP_U))
726 #define L1_S_PROT_W_generic (L1_S_AP(AP_W))
727 #define L1_S_PROT_RO_generic (0)
728 #define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
729
730 #define L1_S_PROT_U_xscale (L1_S_AP(AP_U))
731 #define L1_S_PROT_W_xscale (L1_S_AP(AP_W))
732 #define L1_S_PROT_RO_xscale (0)
733 #define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
734
735 #define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
736 #define L1_S_PROT_W_armv6 (L1_S_AP(AP_W))
737 #define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
738 #define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
739
740 #define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
741 #define L1_S_PROT_W_armv7 (L1_S_AP(AP_W))
742 #define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
743 #define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
744
745 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
746 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
747 #define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
748 #define L1_S_CACHE_MASK_armv6n (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
749 #define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
750
751 #define L2_L_PROT_U_generic (L2_AP(AP_U))
752 #define L2_L_PROT_W_generic (L2_AP(AP_W))
753 #define L2_L_PROT_RO_generic (0)
754 #define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
755
756 #define L2_L_PROT_U_xscale (L2_AP(AP_U))
757 #define L2_L_PROT_W_xscale (L2_AP(AP_W))
758 #define L2_L_PROT_RO_xscale (0)
759 #define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
760
761 #define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
762 #define L2_L_PROT_W_armv6n (L2_AP0(AP_W))
763 #define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
764 #define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
765
766 #define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
767 #define L2_L_PROT_W_armv7 (L2_AP0(AP_W))
768 #define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
769 #define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
770
771 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
772 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
773 #define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
774 #define L2_L_CACHE_MASK_armv6n (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
775 #define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
776
777 #define L2_S_PROT_U_generic (L2_AP(AP_U))
778 #define L2_S_PROT_W_generic (L2_AP(AP_W))
779 #define L2_S_PROT_RO_generic (0)
780 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
781
782 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
783 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
784 #define L2_S_PROT_RO_xscale (0)
785 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
786
787 #define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
788 #define L2_S_PROT_W_armv6n (L2_AP0(AP_W))
789 #define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
790 #define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
791
792 #define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
793 #define L2_S_PROT_W_armv7 (L2_AP0(AP_W))
794 #define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
795 #define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
796
797 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
798 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
799 #define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
800 #define L2_S_CACHE_MASK_armv6n L2_XS_CACHE_MASK_armv6
801 #ifdef ARMV6_EXTENDED_SMALL_PAGE
802 #define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6
803 #else
804 #define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic
805 #endif
806 #define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
807
808
809 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
810 #define L1_S_PROTO_xscale (L1_TYPE_S)
811 #define L1_S_PROTO_armv6 (L1_TYPE_S)
812 #define L1_S_PROTO_armv7 (L1_TYPE_S)
813
814 #define L1_SS_PROTO_generic 0
815 #define L1_SS_PROTO_xscale 0
816 #define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
817 #define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
818
819 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
820 #define L1_C_PROTO_xscale (L1_TYPE_C)
821 #define L1_C_PROTO_armv6 (L1_TYPE_C)
822 #define L1_C_PROTO_armv7 (L1_TYPE_C)
823
824 #define L2_L_PROTO (L2_TYPE_L)
825
826 #define L2_S_PROTO_generic (L2_TYPE_S)
827 #define L2_S_PROTO_xscale (L2_TYPE_XS)
828 #ifdef ARMV6_EXTENDED_SMALL_PAGE
829 #define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */
830 #else
831 #define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */
832 #endif
833 #ifdef ARM_MMU_EXTENDED
834 #define L2_S_PROTO_armv6n (L2_TYPE_S|L2_XS_XN)
835 #else
836 #define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */
837 #endif
838 #ifdef ARM_MMU_EXTENDED
839 #define L2_S_PROTO_armv7 (L2_TYPE_S|L2_XS_XN)
840 #else
841 #define L2_S_PROTO_armv7 (L2_TYPE_S)
842 #endif
843
844 /*
845 * User-visible names for the ones that vary with MMU class.
846 */
847
848 #if ARM_NMMUS > 1
849 /* More than one MMU class configured; use variables. */
850 #define L1_S_PROT_U pte_l1_s_prot_u
851 #define L1_S_PROT_W pte_l1_s_prot_w
852 #define L1_S_PROT_RO pte_l1_s_prot_ro
853 #define L1_S_PROT_MASK pte_l1_s_prot_mask
854
855 #define L2_S_PROT_U pte_l2_s_prot_u
856 #define L2_S_PROT_W pte_l2_s_prot_w
857 #define L2_S_PROT_RO pte_l2_s_prot_ro
858 #define L2_S_PROT_MASK pte_l2_s_prot_mask
859
860 #define L2_L_PROT_U pte_l2_l_prot_u
861 #define L2_L_PROT_W pte_l2_l_prot_w
862 #define L2_L_PROT_RO pte_l2_l_prot_ro
863 #define L2_L_PROT_MASK pte_l2_l_prot_mask
864
865 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
866 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
867 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
868
869 #define L1_SS_PROTO pte_l1_ss_proto
870 #define L1_S_PROTO pte_l1_s_proto
871 #define L1_C_PROTO pte_l1_c_proto
872 #define L2_S_PROTO pte_l2_s_proto
873
874 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
875 #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
876 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
877 #define L1_S_PROT_U L1_S_PROT_U_generic
878 #define L1_S_PROT_W L1_S_PROT_W_generic
879 #define L1_S_PROT_RO L1_S_PROT_RO_generic
880 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
881
882 #define L2_S_PROT_U L2_S_PROT_U_generic
883 #define L2_S_PROT_W L2_S_PROT_W_generic
884 #define L2_S_PROT_RO L2_S_PROT_RO_generic
885 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
886
887 #define L2_L_PROT_U L2_L_PROT_U_generic
888 #define L2_L_PROT_W L2_L_PROT_W_generic
889 #define L2_L_PROT_RO L2_L_PROT_RO_generic
890 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
891
892 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
893 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
894 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
895
896 #define L1_SS_PROTO L1_SS_PROTO_generic
897 #define L1_S_PROTO L1_S_PROTO_generic
898 #define L1_C_PROTO L1_C_PROTO_generic
899 #define L2_S_PROTO L2_S_PROTO_generic
900
901 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
902 #define pmap_zero_page(d) pmap_zero_page_generic((d))
903 #elif ARM_MMU_V6N != 0
904 #define L1_S_PROT_U L1_S_PROT_U_armv6
905 #define L1_S_PROT_W L1_S_PROT_W_armv6
906 #define L1_S_PROT_RO L1_S_PROT_RO_armv6
907 #define L1_S_PROT_MASK L1_S_PROT_MASK_armv6
908
909 #define L2_S_PROT_U L2_S_PROT_U_armv6n
910 #define L2_S_PROT_W L2_S_PROT_W_armv6n
911 #define L2_S_PROT_RO L2_S_PROT_RO_armv6n
912 #define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n
913
914 #define L2_L_PROT_U L2_L_PROT_U_armv6n
915 #define L2_L_PROT_W L2_L_PROT_W_armv6n
916 #define L2_L_PROT_RO L2_L_PROT_RO_armv6n
917 #define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n
918
919 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6n
920 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6n
921 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n
922
923 /* These prototypes make writeable mappings, while the other MMU types
924 * make read-only mappings. */
925 #define L1_SS_PROTO L1_SS_PROTO_armv6
926 #define L1_S_PROTO L1_S_PROTO_armv6
927 #define L1_C_PROTO L1_C_PROTO_armv6
928 #define L2_S_PROTO L2_S_PROTO_armv6n
929
930 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
931 #define pmap_zero_page(d) pmap_zero_page_generic((d))
932 #elif ARM_MMU_V6C != 0
933 #define L1_S_PROT_U L1_S_PROT_U_generic
934 #define L1_S_PROT_W L1_S_PROT_W_generic
935 #define L1_S_PROT_RO L1_S_PROT_RO_generic
936 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
937
938 #define L2_S_PROT_U L2_S_PROT_U_generic
939 #define L2_S_PROT_W L2_S_PROT_W_generic
940 #define L2_S_PROT_RO L2_S_PROT_RO_generic
941 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
942
943 #define L2_L_PROT_U L2_L_PROT_U_generic
944 #define L2_L_PROT_W L2_L_PROT_W_generic
945 #define L2_L_PROT_RO L2_L_PROT_RO_generic
946 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
947
948 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
949 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
950 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
951
952 #define L1_SS_PROTO L1_SS_PROTO_armv6
953 #define L1_S_PROTO L1_S_PROTO_generic
954 #define L1_C_PROTO L1_C_PROTO_generic
955 #define L2_S_PROTO L2_S_PROTO_generic
956
957 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
958 #define pmap_zero_page(d) pmap_zero_page_generic((d))
959 #elif ARM_MMU_XSCALE == 1
960 #define L1_S_PROT_U L1_S_PROT_U_generic
961 #define L1_S_PROT_W L1_S_PROT_W_generic
962 #define L1_S_PROT_RO L1_S_PROT_RO_generic
963 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
964
965 #define L2_S_PROT_U L2_S_PROT_U_xscale
966 #define L2_S_PROT_W L2_S_PROT_W_xscale
967 #define L2_S_PROT_RO L2_S_PROT_RO_xscale
968 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
969
970 #define L2_L_PROT_U L2_L_PROT_U_generic
971 #define L2_L_PROT_W L2_L_PROT_W_generic
972 #define L2_L_PROT_RO L2_L_PROT_RO_generic
973 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
974
975 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
976 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
977 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
978
979 #define L1_SS_PROTO L1_SS_PROTO_xscale
980 #define L1_S_PROTO L1_S_PROTO_xscale
981 #define L1_C_PROTO L1_C_PROTO_xscale
982 #define L2_S_PROTO L2_S_PROTO_xscale
983
984 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
985 #define pmap_zero_page(d) pmap_zero_page_xscale((d))
986 #elif ARM_MMU_V7 == 1
987 #define L1_S_PROT_U L1_S_PROT_U_armv7
988 #define L1_S_PROT_W L1_S_PROT_W_armv7
989 #define L1_S_PROT_RO L1_S_PROT_RO_armv7
990 #define L1_S_PROT_MASK L1_S_PROT_MASK_armv7
991
992 #define L2_S_PROT_U L2_S_PROT_U_armv7
993 #define L2_S_PROT_W L2_S_PROT_W_armv7
994 #define L2_S_PROT_RO L2_S_PROT_RO_armv7
995 #define L2_S_PROT_MASK L2_S_PROT_MASK_armv7
996
997 #define L2_L_PROT_U L2_L_PROT_U_armv7
998 #define L2_L_PROT_W L2_L_PROT_W_armv7
999 #define L2_L_PROT_RO L2_L_PROT_RO_armv7
1000 #define L2_L_PROT_MASK L2_L_PROT_MASK_armv7
1001
1002 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7
1003 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7
1004 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7
1005
1006 /* These prototypes make writeable mappings, while the other MMU types
1007 * make read-only mappings. */
1008 #define L1_SS_PROTO L1_SS_PROTO_armv7
1009 #define L1_S_PROTO L1_S_PROTO_armv7
1010 #define L1_C_PROTO L1_C_PROTO_armv7
1011 #define L2_S_PROTO L2_S_PROTO_armv7
1012
1013 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
1014 #define pmap_zero_page(d) pmap_zero_page_generic((d))
1015 #endif /* ARM_NMMUS > 1 */
1016
1017 /*
1018 * Macros to set and query the write permission on page descriptors.
1019 */
1020 #define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
1021 #define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
1022 #define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
1023 #define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
1024
1025 #define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
1026 (L2_S_PROT_RO == 0 || \
1027 ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
1028
1029 /*
1030 * These macros return various bits based on kernel/user and protection.
1031 * Note that the compiler will usually fold these at compile time.
1032 */
1033 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
1034 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
1035
1036 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
1037 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
1038
1039 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
1040 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
1041
1042 /*
1043 * Macros to test if a mapping is mappable with an L1 SuperSection,
1044 * L1 Section, or an L2 Large Page mapping.
1045 */
1046 #define L1_SS_MAPPABLE_P(va, pa, size) \
1047 ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
1048
1049 #define L1_S_MAPPABLE_P(va, pa, size) \
1050 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
1051
1052 #define L2_L_MAPPABLE_P(va, pa, size) \
1053 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1054
1055 #ifndef _LOCORE
1056 /*
1057 * Hooks for the pool allocator.
1058 */
1059 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
1060 extern paddr_t physical_start, physical_end;
1061 #ifdef PMAP_NEED_ALLOC_POOLPAGE
1062 struct vm_page *arm_pmap_alloc_poolpage(int);
1063 #define PMAP_ALLOC_POOLPAGE arm_pmap_alloc_poolpage
1064 #endif
1065 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
1066 vaddr_t pmap_map_poolpage(paddr_t);
1067 paddr_t pmap_unmap_poolpage(vaddr_t);
1068 #define PMAP_MAP_POOLPAGE(pa) pmap_map_poolpage(pa)
1069 #define PMAP_UNMAP_POOLPAGE(va) pmap_unmap_poolpage(va)
1070 #endif
1071
1072 /*
1073 * pmap-specific data store in the vm_page structure.
1074 */
1075 #define __HAVE_VM_PAGE_MD
1076 struct vm_page_md {
1077 SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */
1078 int pvh_attrs; /* page attributes */
1079 u_int uro_mappings;
1080 u_int urw_mappings;
1081 union {
1082 u_short s_mappings[2]; /* Assume kernel count <= 65535 */
1083 u_int i_mappings;
1084 } k_u;
1085 #define kro_mappings k_u.s_mappings[0]
1086 #define krw_mappings k_u.s_mappings[1]
1087 #define k_mappings k_u.i_mappings
1088 };
1089
1090 /*
1091 * Set the default color of each page.
1092 */
1093 #if ARM_MMU_V6 > 0
1094 #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1095 (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
1096 #else
1097 #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1098 (pg)->mdpage.pvh_attrs = 0
1099 #endif
1100
1101 #define VM_MDPAGE_INIT(pg) \
1102 do { \
1103 SLIST_INIT(&(pg)->mdpage.pvh_list); \
1104 VM_MDPAGE_PVH_ATTRS_INIT(pg); \
1105 (pg)->mdpage.uro_mappings = 0; \
1106 (pg)->mdpage.urw_mappings = 0; \
1107 (pg)->mdpage.k_mappings = 0; \
1108 } while (/*CONSTCOND*/0)
1109
1110 #endif /* !_LOCORE */
1111
1112 #endif /* _KERNEL */
1113
1114 #endif /* _ARM32_PMAP_H_ */
1115