pmap.h revision 1.74 1 /* $NetBSD: pmap.h,v 1.74 2003/06/15 18:18:17 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */
67
68 #ifndef _ARM32_PMAP_H_
69 #define _ARM32_PMAP_H_
70
71 #ifdef _KERNEL
72
73 #include <arm/cpuconf.h>
74 #include <arm/cpufunc.h>
75 #include <arm/arm32/pte.h>
76 #include <uvm/uvm_object.h>
77
78 /*
79 * a pmap describes a processes' 4GB virtual address space. this
80 * virtual address space can be broken up into 4096 1MB regions which
81 * are described by L1 PTEs in the L1 table.
82 *
83 * There is a line drawn at KERNEL_BASE. Everything below that line
84 * changes when the VM context is switched. Everything above that line
85 * is the same no matter which VM context is running. This is achieved
86 * by making the L1 PTEs for those slots above KERNEL_BASE reference
87 * kernel L2 tables.
88 *
89 * The basic layout of the virtual address space thus looks like this:
90 *
91 * 0xffffffff
92 * .
93 * .
94 * .
95 * KERNEL_BASE
96 * --------------------
97 * .
98 * .
99 * .
100 * 0x00000000
101 */
102
103 /*
104 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
105 * A bucket size of 16 provides for 16MB of contiguous virtual address
106 * space per l2_dtable. Most processes will, therefore, require only two or
107 * three of these to map their whole working set.
108 */
109 #define L2_BUCKET_LOG2 4
110 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
111
112 /*
113 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
114 * of l2_dtable structures required to track all possible page descriptors
115 * mappable by an L1 translation table is given by the following constants:
116 */
117 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
118 #define L2_SIZE (1 << L2_LOG2)
119
120 struct l1_ttable;
121 struct l2_dtable;
122
123 /*
124 * Track cache/tlb occupancy using the following structure
125 */
126 union pmap_cache_state {
127 struct {
128 union {
129 u_int8_t csu_cache_b[2];
130 u_int16_t csu_cache;
131 } cs_cache_u;
132
133 union {
134 u_int8_t csu_tlb_b[2];
135 u_int16_t csu_tlb;
136 } cs_tlb_u;
137 } cs_s;
138 u_int32_t cs_all;
139 };
140 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
141 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
142 #define cs_cache cs_s.cs_cache_u.csu_cache
143 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
144 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
145 #define cs_tlb cs_s.cs_tlb_u.csu_tlb
146
147 /*
148 * Assigned to cs_all to force cacheops to work for a particular pmap
149 */
150 #define PMAP_CACHE_STATE_ALL 0xffffffffu
151
152 /*
153 * This structure is used by machine-dependent code to describe
154 * static mappings of devices, created at bootstrap time.
155 */
156 struct pmap_devmap {
157 vaddr_t pd_va; /* virtual address */
158 paddr_t pd_pa; /* physical address */
159 psize_t pd_size; /* size of region */
160 vm_prot_t pd_prot; /* protection code */
161 int pd_cache; /* cache attributes */
162 };
163
164 /*
165 * The pmap structure itself
166 */
167 struct pmap {
168 u_int8_t pm_domain;
169 boolean_t pm_remove_all;
170 struct l1_ttable *pm_l1;
171 union pmap_cache_state pm_cstate;
172 struct uvm_object pm_obj;
173 #define pm_lock pm_obj.vmobjlock
174 struct l2_dtable *pm_l2[L2_SIZE];
175 struct pmap_statistics pm_stats;
176 LIST_ENTRY(pmap) pm_list;
177 };
178
179 typedef struct pmap *pmap_t;
180
181 /*
182 * Physical / virtual address structure. In a number of places (particularly
183 * during bootstrapping) we need to keep track of the physical and virtual
184 * addresses of various pages
185 */
186 typedef struct pv_addr {
187 SLIST_ENTRY(pv_addr) pv_list;
188 paddr_t pv_pa;
189 vaddr_t pv_va;
190 } pv_addr_t;
191
192 /*
193 * Determine various modes for PTEs (user vs. kernel, cacheable
194 * vs. non-cacheable).
195 */
196 #define PTE_KERNEL 0
197 #define PTE_USER 1
198 #define PTE_NOCACHE 0
199 #define PTE_CACHE 1
200 #define PTE_PAGETABLE 2
201
202 /*
203 * Flags that indicate attributes of pages or mappings of pages.
204 *
205 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
206 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
207 * pv_entry's for each page. They live in the same "namespace" so
208 * that we can clear multiple attributes at a time.
209 *
210 * Note the "non-cacheable" flag generally means the page has
211 * multiple mappings in a given address space.
212 */
213 #define PVF_MOD 0x01 /* page is modified */
214 #define PVF_REF 0x02 /* page is referenced */
215 #define PVF_WIRED 0x04 /* mapping is wired */
216 #define PVF_WRITE 0x08 /* mapping is writable */
217 #define PVF_EXEC 0x10 /* mapping is executable */
218 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
219 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
220 #define PVF_NC (PVF_UNC|PVF_KNC)
221
222 /*
223 * Commonly referenced structures
224 */
225 extern struct pmap kernel_pmap_store;
226 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
227
228 /*
229 * Macros that we need to export
230 */
231 #define pmap_kernel() (&kernel_pmap_store)
232 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
233 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
234
235 #define pmap_is_modified(pg) \
236 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
237 #define pmap_is_referenced(pg) \
238 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
239
240 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
241
242 #define pmap_phys_address(ppn) (arm_ptob((ppn)))
243
244 /*
245 * Functions that we need to export
246 */
247 void pmap_procwr(struct proc *, vaddr_t, int);
248 void pmap_remove_all(pmap_t);
249 boolean_t pmap_extract(pmap_t, vaddr_t, paddr_t *);
250
251 #define PMAP_NEED_PROCWR
252 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
253
254 /* Functions we use internally. */
255 void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
256
257 int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
258 boolean_t pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
259 boolean_t pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
260 void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
261
262 void pmap_debug(int);
263 void pmap_postinit(void);
264
265 void vector_page_setprot(int);
266
267 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
268 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
269
270 /* Bootstrapping routines. */
271 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
272 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
273 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
274 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
275 void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
276 void pmap_devmap_register(const struct pmap_devmap *);
277
278 /*
279 * Special page zero routine for use by the idle loop (no cache cleans).
280 */
281 boolean_t pmap_pageidlezero(paddr_t);
282 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
283
284 /*
285 * The current top of kernel VM
286 */
287 extern vaddr_t pmap_curmaxkvaddr;
288
289 /*
290 * Useful macros and constants
291 */
292
293 /* Virtual address to page table entry */
294 static __inline pt_entry_t *
295 vtopte(vaddr_t va)
296 {
297 pd_entry_t *pdep;
298 pt_entry_t *ptep;
299
300 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
301 return (NULL);
302 return (ptep);
303 }
304
305 /*
306 * Virtual address to physical address
307 */
308 static __inline paddr_t
309 vtophys(vaddr_t va)
310 {
311 paddr_t pa;
312
313 if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
314 return (0); /* XXXSCW: Panic? */
315
316 return (pa);
317 }
318
319 /*
320 * The new pmap ensures that page-tables are always mapping Write-Thru.
321 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
322 * on every change.
323 *
324 * Unfortunately, not all CPUs have a write-through cache mode. So we
325 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
326 * and if there is the chance for PTE syncs to be needed, we define
327 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
328 * the code.
329 */
330 extern int pmap_needs_pte_sync;
331 #if defined(_KERNEL_OPT)
332 /*
333 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
334 * we need to do PTE syncs. If only SA-1 is configured, then evaluate
335 * this at compile time.
336 */
337 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
338 #define PMAP_NEEDS_PTE_SYNC 1
339 #define PMAP_INCLUDE_PTE_SYNC
340 #elif (ARM_MMU_SA1 == 0)
341 #define PMAP_NEEDS_PTE_SYNC 0
342 #endif
343 #endif /* _KERNEL_OPT */
344
345 /*
346 * Provide a fallback in case we were not able to determine it at
347 * compile-time.
348 */
349 #ifndef PMAP_NEEDS_PTE_SYNC
350 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
351 #define PMAP_INCLUDE_PTE_SYNC
352 #endif
353
354 #define PTE_SYNC(pte) \
355 do { \
356 if (PMAP_NEEDS_PTE_SYNC) \
357 cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
358 } while (/*CONSTCOND*/0)
359
360 #define PTE_SYNC_RANGE(pte, cnt) \
361 do { \
362 if (PMAP_NEEDS_PTE_SYNC) { \
363 cpu_dcache_wb_range((vaddr_t)(pte), \
364 (cnt) << 2); /* * sizeof(pt_entry_t) */ \
365 } \
366 } while (/*CONSTCOND*/0)
367
368 #define l1pte_valid(pde) ((pde) != 0)
369 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
370 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
371 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
372
373 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
374 #define l2pte_valid(pte) ((pte) != 0)
375 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
376
377 /* L1 and L2 page table macros */
378 #define pmap_pde_v(pde) l1pte_valid(*(pde))
379 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
380 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
381 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
382
383 #define pmap_pte_v(pte) l2pte_valid(*(pte))
384 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
385
386 /* Size of the kernel part of the L1 page table */
387 #define KERNEL_PD_SIZE \
388 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
389
390 /************************* ARM MMU configuration *****************************/
391
392 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
393 void pmap_copy_page_generic(paddr_t, paddr_t);
394 void pmap_zero_page_generic(paddr_t);
395
396 void pmap_pte_init_generic(void);
397 #if defined(CPU_ARM8)
398 void pmap_pte_init_arm8(void);
399 #endif
400 #if defined(CPU_ARM9)
401 void pmap_pte_init_arm9(void);
402 #endif /* CPU_ARM9 */
403 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
404
405 #if ARM_MMU_SA1 == 1
406 void pmap_pte_init_sa1(void);
407 #endif /* ARM_MMU_SA1 == 1 */
408
409 #if ARM_MMU_XSCALE == 1
410 void pmap_copy_page_xscale(paddr_t, paddr_t);
411 void pmap_zero_page_xscale(paddr_t);
412
413 void pmap_pte_init_xscale(void);
414
415 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
416 #endif /* ARM_MMU_XSCALE == 1 */
417
418 extern pt_entry_t pte_l1_s_cache_mode;
419 extern pt_entry_t pte_l1_s_cache_mask;
420
421 extern pt_entry_t pte_l2_l_cache_mode;
422 extern pt_entry_t pte_l2_l_cache_mask;
423
424 extern pt_entry_t pte_l2_s_cache_mode;
425 extern pt_entry_t pte_l2_s_cache_mask;
426
427 extern pt_entry_t pte_l1_s_cache_mode_pt;
428 extern pt_entry_t pte_l2_l_cache_mode_pt;
429 extern pt_entry_t pte_l2_s_cache_mode_pt;
430
431 extern pt_entry_t pte_l2_s_prot_u;
432 extern pt_entry_t pte_l2_s_prot_w;
433 extern pt_entry_t pte_l2_s_prot_mask;
434
435 extern pt_entry_t pte_l1_s_proto;
436 extern pt_entry_t pte_l1_c_proto;
437 extern pt_entry_t pte_l2_s_proto;
438
439 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
440 extern void (*pmap_zero_page_func)(paddr_t);
441
442 /*****************************************************************************/
443
444 /*
445 * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
446 */
447 #define PMAP_CACHE_VIVT
448
449 /*
450 * Definitions for MMU domains
451 */
452 #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
453 #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
454
455 /*
456 * These macros define the various bit masks in the PTE.
457 *
458 * We use these macros since we use different bits on different processor
459 * models.
460 */
461 #define L1_S_PROT_U (L1_S_AP(AP_U))
462 #define L1_S_PROT_W (L1_S_AP(AP_W))
463 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
464
465 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
466 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
467
468 #define L2_L_PROT_U (L2_AP(AP_U))
469 #define L2_L_PROT_W (L2_AP(AP_W))
470 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
471
472 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
473 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
474
475 #define L2_S_PROT_U_generic (L2_AP(AP_U))
476 #define L2_S_PROT_W_generic (L2_AP(AP_W))
477 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
478
479 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
480 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
481 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
482
483 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
484 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
485
486 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
487 #define L1_S_PROTO_xscale (L1_TYPE_S)
488
489 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
490 #define L1_C_PROTO_xscale (L1_TYPE_C)
491
492 #define L2_L_PROTO (L2_TYPE_L)
493
494 #define L2_S_PROTO_generic (L2_TYPE_S)
495 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
496
497 /*
498 * User-visible names for the ones that vary with MMU class.
499 */
500
501 #if ARM_NMMUS > 1
502 /* More than one MMU class configured; use variables. */
503 #define L2_S_PROT_U pte_l2_s_prot_u
504 #define L2_S_PROT_W pte_l2_s_prot_w
505 #define L2_S_PROT_MASK pte_l2_s_prot_mask
506
507 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
508 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
509 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
510
511 #define L1_S_PROTO pte_l1_s_proto
512 #define L1_C_PROTO pte_l1_c_proto
513 #define L2_S_PROTO pte_l2_s_proto
514
515 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
516 #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
517 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
518 #define L2_S_PROT_U L2_S_PROT_U_generic
519 #define L2_S_PROT_W L2_S_PROT_W_generic
520 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
521
522 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
523 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
524 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
525
526 #define L1_S_PROTO L1_S_PROTO_generic
527 #define L1_C_PROTO L1_C_PROTO_generic
528 #define L2_S_PROTO L2_S_PROTO_generic
529
530 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
531 #define pmap_zero_page(d) pmap_zero_page_generic((d))
532 #elif ARM_MMU_XSCALE == 1
533 #define L2_S_PROT_U L2_S_PROT_U_xscale
534 #define L2_S_PROT_W L2_S_PROT_W_xscale
535 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
536
537 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
538 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
539 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
540
541 #define L1_S_PROTO L1_S_PROTO_xscale
542 #define L1_C_PROTO L1_C_PROTO_xscale
543 #define L2_S_PROTO L2_S_PROTO_xscale
544
545 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
546 #define pmap_zero_page(d) pmap_zero_page_xscale((d))
547 #endif /* ARM_NMMUS > 1 */
548
549 /*
550 * These macros return various bits based on kernel/user and protection.
551 * Note that the compiler will usually fold these at compile time.
552 */
553 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
554 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
555
556 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
557 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
558
559 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
560 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
561
562 /*
563 * Macros to test if a mapping is mappable with an L1 Section mapping
564 * or an L2 Large Page mapping.
565 */
566 #define L1_S_MAPPABLE_P(va, pa, size) \
567 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
568
569 #define L2_L_MAPPABLE_P(va, pa, size) \
570 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
571
572 /*
573 * Hooks for the pool allocator.
574 */
575 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
576
577 #endif /* _KERNEL */
578
579 #endif /* _ARM32_PMAP_H_ */
580