pmap.h revision 1.81.24.2 1 /* $NetBSD: pmap.h,v 1.81.24.2 2007/11/06 23:15:05 matt Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 #ifndef _ARM32_PMAP_H_
69 #define _ARM32_PMAP_H_
70
71 #ifdef _KERNEL
72
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #endif
79 #include <arm/cpufunc.h>
80 #include <uvm/uvm_object.h>
81 #endif
82
83 /*
84 * a pmap describes a processes' 4GB virtual address space. this
85 * virtual address space can be broken up into 4096 1MB regions which
86 * are described by L1 PTEs in the L1 table.
87 *
88 * There is a line drawn at KERNEL_BASE. Everything below that line
89 * changes when the VM context is switched. Everything above that line
90 * is the same no matter which VM context is running. This is achieved
91 * by making the L1 PTEs for those slots above KERNEL_BASE reference
92 * kernel L2 tables.
93 *
94 * The basic layout of the virtual address space thus looks like this:
95 *
96 * 0xffffffff
97 * .
98 * .
99 * .
100 * KERNEL_BASE
101 * --------------------
102 * .
103 * .
104 * .
105 * 0x00000000
106 */
107
108 /*
109 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
110 * A bucket size of 16 provides for 16MB of contiguous virtual address
111 * space per l2_dtable. Most processes will, therefore, require only two or
112 * three of these to map their whole working set.
113 */
114 #define L2_BUCKET_LOG2 4
115 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
116
117 /*
118 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
119 * of l2_dtable structures required to track all possible page descriptors
120 * mappable by an L1 translation table is given by the following constants:
121 */
122 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
123 #define L2_SIZE (1 << L2_LOG2)
124
125 #ifndef _LOCORE
126
127 struct l1_ttable;
128 struct l2_dtable;
129
130 /*
131 * Track cache/tlb occupancy using the following structure
132 */
133 union pmap_cache_state {
134 struct {
135 union {
136 u_int8_t csu_cache_b[2];
137 u_int16_t csu_cache;
138 } cs_cache_u;
139
140 union {
141 u_int8_t csu_tlb_b[2];
142 u_int16_t csu_tlb;
143 } cs_tlb_u;
144 } cs_s;
145 u_int32_t cs_all;
146 };
147 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
148 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
149 #define cs_cache cs_s.cs_cache_u.csu_cache
150 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
151 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
152 #define cs_tlb cs_s.cs_tlb_u.csu_tlb
153
154 /*
155 * Assigned to cs_all to force cacheops to work for a particular pmap
156 */
157 #define PMAP_CACHE_STATE_ALL 0xffffffffu
158
159 /*
160 * This structure is used by machine-dependent code to describe
161 * static mappings of devices, created at bootstrap time.
162 */
163 struct pmap_devmap {
164 vaddr_t pd_va; /* virtual address */
165 paddr_t pd_pa; /* physical address */
166 psize_t pd_size; /* size of region */
167 vm_prot_t pd_prot; /* protection code */
168 int pd_cache; /* cache attributes */
169 };
170
171 /*
172 * The pmap structure itself
173 */
174 struct pmap {
175 u_int8_t pm_domain;
176 bool pm_remove_all;
177 bool pm_activated;
178 struct l1_ttable *pm_l1;
179 pd_entry_t *pm_pl1vec;
180 pd_entry_t pm_l1vec;
181 union pmap_cache_state pm_cstate;
182 struct uvm_object pm_obj;
183 #define pm_lock pm_obj.vmobjlock
184 struct l2_dtable *pm_l2[L2_SIZE];
185 struct pmap_statistics pm_stats;
186 LIST_ENTRY(pmap) pm_list;
187 };
188
189 typedef struct pmap *pmap_t;
190
191 /*
192 * Physical / virtual address structure. In a number of places (particularly
193 * during bootstrapping) we need to keep track of the physical and virtual
194 * addresses of various pages
195 */
196 typedef struct pv_addr {
197 SLIST_ENTRY(pv_addr) pv_list;
198 paddr_t pv_pa;
199 vaddr_t pv_va;
200 vsize_t pv_size;
201 } pv_addr_t;
202 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
203
204 extern pv_addrqh_t pmap_freeq;
205 extern pv_addr_t kernelpages;
206 extern pv_addr_t systempage;
207 extern pv_addr_t kernel_l1pt;
208
209 /*
210 * Determine various modes for PTEs (user vs. kernel, cacheable
211 * vs. non-cacheable).
212 */
213 #define PTE_KERNEL 0
214 #define PTE_USER 1
215 #define PTE_NOCACHE 0
216 #define PTE_CACHE 1
217 #define PTE_PAGETABLE 2
218
219 /*
220 * Flags that indicate attributes of pages or mappings of pages.
221 *
222 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
223 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
224 * pv_entry's for each page. They live in the same "namespace" so
225 * that we can clear multiple attributes at a time.
226 *
227 * Note the "non-cacheable" flag generally means the page has
228 * multiple mappings in a given address space.
229 */
230 #define PVF_MOD 0x01 /* page is modified */
231 #define PVF_REF 0x02 /* page is referenced */
232 #define PVF_WIRED 0x04 /* mapping is wired */
233 #define PVF_WRITE 0x08 /* mapping is writable */
234 #define PVF_EXEC 0x10 /* mapping is executable */
235 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
236 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
237 #define PVF_COLORED 0x80 /* page has or had a color */
238 #define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
239 #define PVF_NC (PVF_UNC|PVF_KNC)
240
241 /*
242 * Commonly referenced structures
243 */
244 extern struct pmap kernel_pmap_store;
245 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
246
247 /*
248 * Macros that we need to export
249 */
250 #define pmap_kernel() (&kernel_pmap_store)
251 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
252 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
253
254 #define pmap_remove(pmap,sva,eva) pmap_do_remove((pmap),(sva),(eva),0)
255
256 #define pmap_is_modified(pg) \
257 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
258 #define pmap_is_referenced(pg) \
259 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
260 #define pmap_is_page_colored_p(pg) \
261 (((pg)->mdpage.pvh_attrs & PVF_COLORED) != 0)
262
263 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
264
265 #define pmap_phys_address(ppn) (arm_ptob((ppn)))
266
267 /*
268 * Functions that we need to export
269 */
270 void pmap_procwr(struct proc *, vaddr_t, int);
271 void pmap_remove_all(pmap_t);
272 bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
273
274 #define PMAP_NEED_PROCWR
275 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
276
277 #if ARM_MMU_V6 > 0
278 #define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
279 void pmap_prefer(vaddr_t, vaddr_t *, int);
280 #endif
281
282 /* Functions we use internally. */
283 #ifdef PMAP_STEAL_MEMORY
284 void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
285 void pmap_boot_pageadd(pv_addr_t *);
286 vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
287 #endif
288 void pmap_bootstrap(vaddr_t, vaddr_t);
289
290 void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
291 int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
292 bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
293 bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
294 void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
295
296 void pmap_debug(int);
297 void pmap_postinit(void);
298
299 void vector_page_setprot(int);
300
301 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
302 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
303
304 /* Bootstrapping routines. */
305 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
306 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
307 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
308 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
309 void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
310 void pmap_devmap_register(const struct pmap_devmap *);
311
312 /*
313 * Special page zero routine for use by the idle loop (no cache cleans).
314 */
315 bool pmap_pageidlezero(paddr_t);
316 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
317
318 /*
319 * The current top of kernel VM
320 */
321 extern vaddr_t pmap_curmaxkvaddr;
322
323 /*
324 * Useful macros and constants
325 */
326
327 /* Virtual address to page table entry */
328 static inline pt_entry_t *
329 vtopte(vaddr_t va)
330 {
331 pd_entry_t *pdep;
332 pt_entry_t *ptep;
333
334 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
335 return (NULL);
336 return (ptep);
337 }
338
339 /*
340 * Virtual address to physical address
341 */
342 static inline paddr_t
343 vtophys(vaddr_t va)
344 {
345 paddr_t pa;
346
347 if (pmap_extract(pmap_kernel(), va, &pa) == false)
348 return (0); /* XXXSCW: Panic? */
349
350 return (pa);
351 }
352
353 /*
354 * The new pmap ensures that page-tables are always mapping Write-Thru.
355 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
356 * on every change.
357 *
358 * Unfortunately, not all CPUs have a write-through cache mode. So we
359 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
360 * and if there is the chance for PTE syncs to be needed, we define
361 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
362 * the code.
363 */
364 extern int pmap_needs_pte_sync;
365 #if defined(_KERNEL_OPT)
366 /*
367 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
368 * we need to do PTE syncs. If only SA-1 is configured, then evaluate
369 * this at compile time.
370 */
371 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
372 #define PMAP_NEEDS_PTE_SYNC 1
373 #define PMAP_INCLUDE_PTE_SYNC
374 #elif (ARM_MMU_SA1 == 0)
375 #define PMAP_NEEDS_PTE_SYNC 0
376 #endif
377 #endif /* _KERNEL_OPT */
378
379 /*
380 * Provide a fallback in case we were not able to determine it at
381 * compile-time.
382 */
383 #ifndef PMAP_NEEDS_PTE_SYNC
384 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
385 #define PMAP_INCLUDE_PTE_SYNC
386 #endif
387
388 #define PTE_SYNC(pte) \
389 do { \
390 if (PMAP_NEEDS_PTE_SYNC) \
391 cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
392 } while (/*CONSTCOND*/0)
393
394 #define PTE_SYNC_RANGE(pte, cnt) \
395 do { \
396 if (PMAP_NEEDS_PTE_SYNC) { \
397 cpu_dcache_wb_range((vaddr_t)(pte), \
398 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
399 } \
400 } while (/*CONSTCOND*/0)
401
402 #define l1pte_valid(pde) ((pde) != 0)
403 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
404 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
405 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
406
407 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
408 #define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
409 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
410 #define l2pte_minidata(pte) (((pte) & \
411 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
412 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
413
414 /* L1 and L2 page table macros */
415 #define pmap_pde_v(pde) l1pte_valid(*(pde))
416 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
417 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
418 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
419
420 #define pmap_pte_v(pte) l2pte_valid(*(pte))
421 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
422
423 /* Size of the kernel part of the L1 page table */
424 #define KERNEL_PD_SIZE \
425 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
426
427 /************************* ARM MMU configuration *****************************/
428
429 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
430 void pmap_copy_page_generic(paddr_t, paddr_t);
431 void pmap_zero_page_generic(paddr_t);
432
433 void pmap_pte_init_generic(void);
434 #if defined(CPU_ARM8)
435 void pmap_pte_init_arm8(void);
436 #endif
437 #if defined(CPU_ARM9)
438 void pmap_pte_init_arm9(void);
439 #endif /* CPU_ARM9 */
440 #if defined(CPU_ARM10)
441 void pmap_pte_init_arm10(void);
442 #endif /* CPU_ARM10 */
443 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
444
445 #if ARM_MMU_SA1 == 1
446 void pmap_pte_init_sa1(void);
447 #endif /* ARM_MMU_SA1 == 1 */
448
449 #if ARM_MMU_XSCALE == 1
450 void pmap_copy_page_xscale(paddr_t, paddr_t);
451 void pmap_zero_page_xscale(paddr_t);
452
453 void pmap_pte_init_xscale(void);
454
455 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
456
457 #define PMAP_UAREA(va) pmap_uarea(va)
458 void pmap_uarea(vaddr_t);
459 #endif /* ARM_MMU_XSCALE == 1 */
460
461 extern pt_entry_t pte_l1_s_cache_mode;
462 extern pt_entry_t pte_l1_s_cache_mask;
463
464 extern pt_entry_t pte_l2_l_cache_mode;
465 extern pt_entry_t pte_l2_l_cache_mask;
466
467 extern pt_entry_t pte_l2_s_cache_mode;
468 extern pt_entry_t pte_l2_s_cache_mask;
469
470 extern pt_entry_t pte_l1_s_cache_mode_pt;
471 extern pt_entry_t pte_l2_l_cache_mode_pt;
472 extern pt_entry_t pte_l2_s_cache_mode_pt;
473
474 extern pt_entry_t pte_l2_s_prot_u;
475 extern pt_entry_t pte_l2_s_prot_w;
476 extern pt_entry_t pte_l2_s_prot_mask;
477
478 extern pt_entry_t pte_l1_s_proto;
479 extern pt_entry_t pte_l1_c_proto;
480 extern pt_entry_t pte_l2_s_proto;
481
482 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
483 extern void (*pmap_zero_page_func)(paddr_t);
484
485 #endif /* !_LOCORE */
486
487 /*****************************************************************************/
488
489 /*
490 * tell MI code that the cache is virtually-indexed.
491 * ARMv6 is physically-tagged but all others are virtually-tagged.
492 */
493 #if ARM_MMU_V6 > 0
494 #define PMAP_CACHE_VIPT
495 #else
496 #define PMAP_CACHE_VIVT
497 #endif
498
499 /*
500 * Definitions for MMU domains
501 */
502 #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
503 #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
504
505 /*
506 * These macros define the various bit masks in the PTE.
507 *
508 * We use these macros since we use different bits on different processor
509 * models.
510 */
511 #define L1_S_PROT_U (L1_S_AP(AP_U))
512 #define L1_S_PROT_W (L1_S_AP(AP_W))
513 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
514
515 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
516 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
517
518 #define L2_L_PROT_U (L2_AP(AP_U))
519 #define L2_L_PROT_W (L2_AP(AP_W))
520 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
521
522 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
523 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
524
525 #define L2_S_PROT_U_generic (L2_AP(AP_U))
526 #define L2_S_PROT_W_generic (L2_AP(AP_W))
527 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
528
529 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
530 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
531 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
532
533 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
534 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
535
536 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
537 #define L1_S_PROTO_xscale (L1_TYPE_S)
538
539 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
540 #define L1_C_PROTO_xscale (L1_TYPE_C)
541
542 #define L2_L_PROTO (L2_TYPE_L)
543
544 #define L2_S_PROTO_generic (L2_TYPE_S)
545 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
546
547 /*
548 * User-visible names for the ones that vary with MMU class.
549 */
550
551 #if ARM_NMMUS > 1
552 /* More than one MMU class configured; use variables. */
553 #define L2_S_PROT_U pte_l2_s_prot_u
554 #define L2_S_PROT_W pte_l2_s_prot_w
555 #define L2_S_PROT_MASK pte_l2_s_prot_mask
556
557 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
558 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
559 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
560
561 #define L1_S_PROTO pte_l1_s_proto
562 #define L1_C_PROTO pte_l1_c_proto
563 #define L2_S_PROTO pte_l2_s_proto
564
565 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
566 #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
567 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
568 #define L2_S_PROT_U L2_S_PROT_U_generic
569 #define L2_S_PROT_W L2_S_PROT_W_generic
570 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
571
572 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
573 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
574 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
575
576 #define L1_S_PROTO L1_S_PROTO_generic
577 #define L1_C_PROTO L1_C_PROTO_generic
578 #define L2_S_PROTO L2_S_PROTO_generic
579
580 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
581 #define pmap_zero_page(d) pmap_zero_page_generic((d))
582 #elif ARM_MMU_XSCALE == 1
583 #define L2_S_PROT_U L2_S_PROT_U_xscale
584 #define L2_S_PROT_W L2_S_PROT_W_xscale
585 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
586
587 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
588 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
589 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
590
591 #define L1_S_PROTO L1_S_PROTO_xscale
592 #define L1_C_PROTO L1_C_PROTO_xscale
593 #define L2_S_PROTO L2_S_PROTO_xscale
594
595 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
596 #define pmap_zero_page(d) pmap_zero_page_xscale((d))
597 #endif /* ARM_NMMUS > 1 */
598
599 /*
600 * These macros return various bits based on kernel/user and protection.
601 * Note that the compiler will usually fold these at compile time.
602 */
603 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
604 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
605
606 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
607 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
608
609 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
610 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
611
612 /*
613 * Macros to test if a mapping is mappable with an L1 Section mapping
614 * or an L2 Large Page mapping.
615 */
616 #define L1_S_MAPPABLE_P(va, pa, size) \
617 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
618
619 #define L2_L_MAPPABLE_P(va, pa, size) \
620 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
621
622 /*
623 * Hooks for the pool allocator.
624 */
625 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
626
627 #endif /* _KERNEL */
628
629 #endif /* _ARM32_PMAP_H_ */
630