pmap.h revision 1.87 1 /* $NetBSD: pmap.h,v 1.87 2008/07/21 07:33:54 matt Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 #ifndef _ARM32_PMAP_H_
69 #define _ARM32_PMAP_H_
70
71 #ifdef _KERNEL
72
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #endif
79 #include <arm/cpufunc.h>
80 #include <uvm/uvm_object.h>
81 #endif
82
83 /*
84 * a pmap describes a processes' 4GB virtual address space. this
85 * virtual address space can be broken up into 4096 1MB regions which
86 * are described by L1 PTEs in the L1 table.
87 *
88 * There is a line drawn at KERNEL_BASE. Everything below that line
89 * changes when the VM context is switched. Everything above that line
90 * is the same no matter which VM context is running. This is achieved
91 * by making the L1 PTEs for those slots above KERNEL_BASE reference
92 * kernel L2 tables.
93 *
94 * The basic layout of the virtual address space thus looks like this:
95 *
96 * 0xffffffff
97 * .
98 * .
99 * .
100 * KERNEL_BASE
101 * --------------------
102 * .
103 * .
104 * .
105 * 0x00000000
106 */
107
108 /*
109 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
110 * A bucket size of 16 provides for 16MB of contiguous virtual address
111 * space per l2_dtable. Most processes will, therefore, require only two or
112 * three of these to map their whole working set.
113 */
114 #define L2_BUCKET_LOG2 4
115 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
116
117 /*
118 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
119 * of l2_dtable structures required to track all possible page descriptors
120 * mappable by an L1 translation table is given by the following constants:
121 */
122 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
123 #define L2_SIZE (1 << L2_LOG2)
124
125 #ifndef _LOCORE
126
127 struct l1_ttable;
128 struct l2_dtable;
129
130 /*
131 * Track cache/tlb occupancy using the following structure
132 */
133 union pmap_cache_state {
134 struct {
135 union {
136 u_int8_t csu_cache_b[2];
137 u_int16_t csu_cache;
138 } cs_cache_u;
139
140 union {
141 u_int8_t csu_tlb_b[2];
142 u_int16_t csu_tlb;
143 } cs_tlb_u;
144 } cs_s;
145 u_int32_t cs_all;
146 };
147 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
148 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
149 #define cs_cache cs_s.cs_cache_u.csu_cache
150 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
151 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
152 #define cs_tlb cs_s.cs_tlb_u.csu_tlb
153
154 /*
155 * Assigned to cs_all to force cacheops to work for a particular pmap
156 */
157 #define PMAP_CACHE_STATE_ALL 0xffffffffu
158
159 /*
160 * This structure is used by machine-dependent code to describe
161 * static mappings of devices, created at bootstrap time.
162 */
163 struct pmap_devmap {
164 vaddr_t pd_va; /* virtual address */
165 paddr_t pd_pa; /* physical address */
166 psize_t pd_size; /* size of region */
167 vm_prot_t pd_prot; /* protection code */
168 int pd_cache; /* cache attributes */
169 };
170
171 /*
172 * The pmap structure itself
173 */
174 struct pmap {
175 u_int8_t pm_domain;
176 bool pm_remove_all;
177 bool pm_activated;
178 struct l1_ttable *pm_l1;
179 pd_entry_t *pm_pl1vec;
180 pd_entry_t pm_l1vec;
181 union pmap_cache_state pm_cstate;
182 struct uvm_object pm_obj;
183 #define pm_lock pm_obj.vmobjlock
184 struct l2_dtable *pm_l2[L2_SIZE];
185 struct pmap_statistics pm_stats;
186 LIST_ENTRY(pmap) pm_list;
187 };
188
189 typedef struct pmap *pmap_t;
190
191 /*
192 * Physical / virtual address structure. In a number of places (particularly
193 * during bootstrapping) we need to keep track of the physical and virtual
194 * addresses of various pages
195 */
196 typedef struct pv_addr {
197 SLIST_ENTRY(pv_addr) pv_list;
198 paddr_t pv_pa;
199 vaddr_t pv_va;
200 vsize_t pv_size;
201 } pv_addr_t;
202 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
203
204 extern pv_addrqh_t pmap_freeq;
205 extern pv_addr_t kernelpages;
206 extern pv_addr_t systempage;
207 extern pv_addr_t kernel_l1pt;
208
209 /*
210 * Determine various modes for PTEs (user vs. kernel, cacheable
211 * vs. non-cacheable).
212 */
213 #define PTE_KERNEL 0
214 #define PTE_USER 1
215 #define PTE_NOCACHE 0
216 #define PTE_CACHE 1
217 #define PTE_PAGETABLE 2
218
219 /*
220 * Flags that indicate attributes of pages or mappings of pages.
221 *
222 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
223 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
224 * pv_entry's for each page. They live in the same "namespace" so
225 * that we can clear multiple attributes at a time.
226 *
227 * Note the "non-cacheable" flag generally means the page has
228 * multiple mappings in a given address space.
229 */
230 #define PVF_MOD 0x01 /* page is modified */
231 #define PVF_REF 0x02 /* page is referenced */
232 #define PVF_WIRED 0x04 /* mapping is wired */
233 #define PVF_WRITE 0x08 /* mapping is writable */
234 #define PVF_EXEC 0x10 /* mapping is executable */
235 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
236 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
237 #define PVF_COLORED 0x80 /* page has or had a color */
238 #define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
239 #define PVF_KMPAGE 0x0200 /* page is used for kmem */
240 #define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
241 #define PVF_NC (PVF_UNC|PVF_KNC)
242
243 /*
244 * Commonly referenced structures
245 */
246 extern struct pmap kernel_pmap_store;
247 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
248
249 /*
250 * Macros that we need to export
251 */
252 #define pmap_kernel() (&kernel_pmap_store)
253 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
254 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
255
256 #define pmap_remove(pmap,sva,eva) pmap_do_remove((pmap),(sva),(eva),0)
257
258 #define pmap_is_modified(pg) \
259 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
260 #define pmap_is_referenced(pg) \
261 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
262 #define pmap_is_page_colored_p(pg) \
263 (((pg)->mdpage.pvh_attrs & PVF_COLORED) != 0)
264
265 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
266
267 #define pmap_phys_address(ppn) (arm_ptob((ppn)))
268
269 /*
270 * Functions that we need to export
271 */
272 void pmap_procwr(struct proc *, vaddr_t, int);
273 void pmap_remove_all(pmap_t);
274 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
275
276 #define PMAP_NEED_PROCWR
277 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
278 #define PMAP_KMPAGE 0x00000040 /* Make uvm tell us when it allocates
279 a page to be used for kernel memory */
280
281
282 #if ARM_MMU_V6 > 0
283 #define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
284 void pmap_prefer(vaddr_t, vaddr_t *, int);
285 #endif
286
287 void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
288
289 /* Functions we use internally. */
290 #ifdef PMAP_STEAL_MEMORY
291 void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
292 void pmap_boot_pageadd(pv_addr_t *);
293 vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
294 #endif
295 void pmap_bootstrap(vaddr_t, vaddr_t);
296
297 void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
298 int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
299 bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
300 bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
301 void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
302
303 void pmap_debug(int);
304 void pmap_postinit(void);
305
306 void vector_page_setprot(int);
307
308 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
309 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
310
311 /* Bootstrapping routines. */
312 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
313 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
314 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
315 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
316 void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
317 void pmap_devmap_register(const struct pmap_devmap *);
318
319 /*
320 * Special page zero routine for use by the idle loop (no cache cleans).
321 */
322 bool pmap_pageidlezero(paddr_t);
323 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
324
325 /*
326 * used by dumpsys to record the PA of the L1 table
327 */
328 uint32_t pmap_kernel_L1_addr(void);
329 /*
330 * The current top of kernel VM
331 */
332 extern vaddr_t pmap_curmaxkvaddr;
333
334 /*
335 * Useful macros and constants
336 */
337
338 /* Virtual address to page table entry */
339 static inline pt_entry_t *
340 vtopte(vaddr_t va)
341 {
342 pd_entry_t *pdep;
343 pt_entry_t *ptep;
344
345 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
346 return (NULL);
347 return (ptep);
348 }
349
350 /*
351 * Virtual address to physical address
352 */
353 static inline paddr_t
354 vtophys(vaddr_t va)
355 {
356 paddr_t pa;
357
358 if (pmap_extract(pmap_kernel(), va, &pa) == false)
359 return (0); /* XXXSCW: Panic? */
360
361 return (pa);
362 }
363
364 /*
365 * The new pmap ensures that page-tables are always mapping Write-Thru.
366 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
367 * on every change.
368 *
369 * Unfortunately, not all CPUs have a write-through cache mode. So we
370 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
371 * and if there is the chance for PTE syncs to be needed, we define
372 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
373 * the code.
374 */
375 extern int pmap_needs_pte_sync;
376 #if defined(_KERNEL_OPT)
377 /*
378 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
379 * we need to do PTE syncs. If only SA-1 is configured, then evaluate
380 * this at compile time.
381 */
382 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
383 #define PMAP_NEEDS_PTE_SYNC 1
384 #define PMAP_INCLUDE_PTE_SYNC
385 #elif (ARM_MMU_SA1 == 0)
386 #define PMAP_NEEDS_PTE_SYNC 0
387 #endif
388 #endif /* _KERNEL_OPT */
389
390 /*
391 * Provide a fallback in case we were not able to determine it at
392 * compile-time.
393 */
394 #ifndef PMAP_NEEDS_PTE_SYNC
395 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
396 #define PMAP_INCLUDE_PTE_SYNC
397 #endif
398
399 #define PTE_SYNC(pte) \
400 do { \
401 if (PMAP_NEEDS_PTE_SYNC) \
402 cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
403 } while (/*CONSTCOND*/0)
404
405 #define PTE_SYNC_RANGE(pte, cnt) \
406 do { \
407 if (PMAP_NEEDS_PTE_SYNC) { \
408 cpu_dcache_wb_range((vaddr_t)(pte), \
409 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
410 } \
411 } while (/*CONSTCOND*/0)
412
413 #define l1pte_valid(pde) ((pde) != 0)
414 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
415 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
416 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
417
418 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
419 #define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
420 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
421 #define l2pte_minidata(pte) (((pte) & \
422 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
423 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
424
425 /* L1 and L2 page table macros */
426 #define pmap_pde_v(pde) l1pte_valid(*(pde))
427 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
428 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
429 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
430
431 #define pmap_pte_v(pte) l2pte_valid(*(pte))
432 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
433
434 /* Size of the kernel part of the L1 page table */
435 #define KERNEL_PD_SIZE \
436 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
437
438 /************************* ARM MMU configuration *****************************/
439
440 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
441 void pmap_copy_page_generic(paddr_t, paddr_t);
442 void pmap_zero_page_generic(paddr_t);
443
444 void pmap_pte_init_generic(void);
445 #if defined(CPU_ARM8)
446 void pmap_pte_init_arm8(void);
447 #endif
448 #if defined(CPU_ARM9)
449 void pmap_pte_init_arm9(void);
450 #endif /* CPU_ARM9 */
451 #if defined(CPU_ARM10)
452 void pmap_pte_init_arm10(void);
453 #endif /* CPU_ARM10 */
454 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
455
456 #if ARM_MMU_SA1 == 1
457 void pmap_pte_init_sa1(void);
458 #endif /* ARM_MMU_SA1 == 1 */
459
460 #if ARM_MMU_XSCALE == 1
461 void pmap_copy_page_xscale(paddr_t, paddr_t);
462 void pmap_zero_page_xscale(paddr_t);
463
464 void pmap_pte_init_xscale(void);
465
466 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
467
468 #define PMAP_UAREA(va) pmap_uarea(va)
469 void pmap_uarea(vaddr_t);
470 #endif /* ARM_MMU_XSCALE == 1 */
471
472 extern pt_entry_t pte_l1_s_cache_mode;
473 extern pt_entry_t pte_l1_s_cache_mask;
474
475 extern pt_entry_t pte_l2_l_cache_mode;
476 extern pt_entry_t pte_l2_l_cache_mask;
477
478 extern pt_entry_t pte_l2_s_cache_mode;
479 extern pt_entry_t pte_l2_s_cache_mask;
480
481 extern pt_entry_t pte_l1_s_cache_mode_pt;
482 extern pt_entry_t pte_l2_l_cache_mode_pt;
483 extern pt_entry_t pte_l2_s_cache_mode_pt;
484
485 extern pt_entry_t pte_l2_s_prot_u;
486 extern pt_entry_t pte_l2_s_prot_w;
487 extern pt_entry_t pte_l2_s_prot_mask;
488
489 extern pt_entry_t pte_l1_s_proto;
490 extern pt_entry_t pte_l1_c_proto;
491 extern pt_entry_t pte_l2_s_proto;
492
493 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
494 extern void (*pmap_zero_page_func)(paddr_t);
495
496 #endif /* !_LOCORE */
497
498 /*****************************************************************************/
499
500 /*
501 * tell MI code that the cache is virtually-indexed.
502 * ARMv6 is physically-tagged but all others are virtually-tagged.
503 */
504 #if ARM_MMU_V6 > 0
505 #define PMAP_CACHE_VIPT
506 #else
507 #define PMAP_CACHE_VIVT
508 #endif
509
510 /*
511 * Definitions for MMU domains
512 */
513 #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
514 #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
515
516 /*
517 * These macros define the various bit masks in the PTE.
518 *
519 * We use these macros since we use different bits on different processor
520 * models.
521 */
522 #define L1_S_PROT_U (L1_S_AP(AP_U))
523 #define L1_S_PROT_W (L1_S_AP(AP_W))
524 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
525
526 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
527 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
528
529 #define L2_L_PROT_U (L2_AP(AP_U))
530 #define L2_L_PROT_W (L2_AP(AP_W))
531 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
532
533 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
534 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
535
536 #define L2_S_PROT_U_generic (L2_AP(AP_U))
537 #define L2_S_PROT_W_generic (L2_AP(AP_W))
538 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
539
540 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
541 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
542 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
543
544 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
545 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
546
547 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
548 #define L1_S_PROTO_xscale (L1_TYPE_S)
549
550 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
551 #define L1_C_PROTO_xscale (L1_TYPE_C)
552
553 #define L2_L_PROTO (L2_TYPE_L)
554
555 #define L2_S_PROTO_generic (L2_TYPE_S)
556 #define L2_S_PROTO_xscale (L2_TYPE_XS)
557
558 /*
559 * User-visible names for the ones that vary with MMU class.
560 */
561
562 #if ARM_NMMUS > 1
563 /* More than one MMU class configured; use variables. */
564 #define L2_S_PROT_U pte_l2_s_prot_u
565 #define L2_S_PROT_W pte_l2_s_prot_w
566 #define L2_S_PROT_MASK pte_l2_s_prot_mask
567
568 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
569 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
570 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
571
572 #define L1_S_PROTO pte_l1_s_proto
573 #define L1_C_PROTO pte_l1_c_proto
574 #define L2_S_PROTO pte_l2_s_proto
575
576 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
577 #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
578 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
579 #define L2_S_PROT_U L2_S_PROT_U_generic
580 #define L2_S_PROT_W L2_S_PROT_W_generic
581 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
582
583 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
584 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
585 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
586
587 #define L1_S_PROTO L1_S_PROTO_generic
588 #define L1_C_PROTO L1_C_PROTO_generic
589 #define L2_S_PROTO L2_S_PROTO_generic
590
591 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
592 #define pmap_zero_page(d) pmap_zero_page_generic((d))
593 #elif ARM_MMU_XSCALE == 1
594 #define L2_S_PROT_U L2_S_PROT_U_xscale
595 #define L2_S_PROT_W L2_S_PROT_W_xscale
596 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
597
598 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
599 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
600 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
601
602 #define L1_S_PROTO L1_S_PROTO_xscale
603 #define L1_C_PROTO L1_C_PROTO_xscale
604 #define L2_S_PROTO L2_S_PROTO_xscale
605
606 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
607 #define pmap_zero_page(d) pmap_zero_page_xscale((d))
608 #endif /* ARM_NMMUS > 1 */
609
610 /*
611 * These macros return various bits based on kernel/user and protection.
612 * Note that the compiler will usually fold these at compile time.
613 */
614 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
615 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
616
617 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
618 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
619
620 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
621 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
622
623 /*
624 * Macros to test if a mapping is mappable with an L1 Section mapping
625 * or an L2 Large Page mapping.
626 */
627 #define L1_S_MAPPABLE_P(va, pa, size) \
628 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
629
630 #define L2_L_MAPPABLE_P(va, pa, size) \
631 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
632
633 /*
634 * Hooks for the pool allocator.
635 */
636 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
637
638 #endif /* _KERNEL */
639
640 #endif /* _ARM32_PMAP_H_ */
641