pmap.h revision 1.16.2.9 1 1.16.2.9 thorpej /* $NetBSD: pmap.h,v 1.16.2.9 2002/12/11 05:53:07 thorpej Exp $ */
2 1.16.2.5 nathanw
3 1.16.2.5 nathanw /*
4 1.16.2.5 nathanw * Copyright (c 2002 Wasabi Systems, Inc.
5 1.16.2.5 nathanw * All rights reserved.
6 1.16.2.5 nathanw *
7 1.16.2.5 nathanw * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.16.2.5 nathanw *
9 1.16.2.5 nathanw * Redistribution and use in source and binary forms, with or without
10 1.16.2.5 nathanw * modification, are permitted provided that the following conditions
11 1.16.2.5 nathanw * are met:
12 1.16.2.5 nathanw * 1. Redistributions of source code must retain the above copyright
13 1.16.2.5 nathanw * notice, this list of conditions and the following disclaimer.
14 1.16.2.5 nathanw * 2. Redistributions in binary form must reproduce the above copyright
15 1.16.2.5 nathanw * notice, this list of conditions and the following disclaimer in the
16 1.16.2.5 nathanw * documentation and/or other materials provided with the distribution.
17 1.16.2.5 nathanw * 3. All advertising materials mentioning features or use of this software
18 1.16.2.5 nathanw * must display the following acknowledgement:
19 1.16.2.5 nathanw * This product includes software developed for the NetBSD Project by
20 1.16.2.5 nathanw * Wasabi Systems, Inc.
21 1.16.2.5 nathanw * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.16.2.5 nathanw * or promote products derived from this software without specific prior
23 1.16.2.5 nathanw * written permission.
24 1.16.2.5 nathanw *
25 1.16.2.5 nathanw * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.16.2.5 nathanw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.16.2.5 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.16.2.5 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.16.2.5 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.16.2.5 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.16.2.5 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.16.2.5 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.16.2.5 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.16.2.5 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.16.2.5 nathanw * POSSIBILITY OF SUCH DAMAGE.
36 1.16.2.5 nathanw */
37 1.16.2.2 nathanw
38 1.16.2.2 nathanw /*
39 1.16.2.2 nathanw * Copyright (c) 1994,1995 Mark Brinicombe.
40 1.16.2.2 nathanw * All rights reserved.
41 1.16.2.2 nathanw *
42 1.16.2.2 nathanw * Redistribution and use in source and binary forms, with or without
43 1.16.2.2 nathanw * modification, are permitted provided that the following conditions
44 1.16.2.2 nathanw * are met:
45 1.16.2.2 nathanw * 1. Redistributions of source code must retain the above copyright
46 1.16.2.2 nathanw * notice, this list of conditions and the following disclaimer.
47 1.16.2.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
48 1.16.2.2 nathanw * notice, this list of conditions and the following disclaimer in the
49 1.16.2.2 nathanw * documentation and/or other materials provided with the distribution.
50 1.16.2.2 nathanw * 3. All advertising materials mentioning features or use of this software
51 1.16.2.2 nathanw * must display the following acknowledgement:
52 1.16.2.2 nathanw * This product includes software developed by Mark Brinicombe
53 1.16.2.2 nathanw * 4. The name of the author may not be used to endorse or promote products
54 1.16.2.2 nathanw * derived from this software without specific prior written permission.
55 1.16.2.2 nathanw *
56 1.16.2.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 1.16.2.2 nathanw * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 1.16.2.2 nathanw * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 1.16.2.2 nathanw * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 1.16.2.2 nathanw * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 1.16.2.2 nathanw * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 1.16.2.2 nathanw * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 1.16.2.2 nathanw * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 1.16.2.2 nathanw * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 1.16.2.2 nathanw * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 1.16.2.2 nathanw */
67 1.16.2.2 nathanw
68 1.16.2.2 nathanw #ifndef _ARM32_PMAP_H_
69 1.16.2.2 nathanw #define _ARM32_PMAP_H_
70 1.16.2.2 nathanw
71 1.16.2.2 nathanw #ifdef _KERNEL
72 1.16.2.2 nathanw
73 1.16.2.5 nathanw #include <arm/cpuconf.h>
74 1.16.2.2 nathanw #include <arm/cpufunc.h>
75 1.16.2.2 nathanw #include <arm/arm32/pte.h>
76 1.16.2.2 nathanw #include <uvm/uvm_object.h>
77 1.16.2.2 nathanw
78 1.16.2.2 nathanw /*
79 1.16.2.2 nathanw * a pmap describes a processes' 4GB virtual address space. this
80 1.16.2.2 nathanw * virtual address space can be broken up into 4096 1MB regions which
81 1.16.2.4 nathanw * are described by L1 PTEs in the L1 table.
82 1.16.2.2 nathanw *
83 1.16.2.4 nathanw * There is a line drawn at KERNEL_BASE. Everything below that line
84 1.16.2.4 nathanw * changes when the VM context is switched. Everything above that line
85 1.16.2.4 nathanw * is the same no matter which VM context is running. This is achieved
86 1.16.2.4 nathanw * by making the L1 PTEs for those slots above KERNEL_BASE reference
87 1.16.2.4 nathanw * kernel L2 tables.
88 1.16.2.4 nathanw *
89 1.16.2.4 nathanw * The L2 tables are mapped linearly starting at PTE_BASE. PTE_BASE
90 1.16.2.4 nathanw * is below KERNEL_BASE, which means that the current process's PTEs
91 1.16.2.4 nathanw * are always available starting at PTE_BASE. Another region of KVA
92 1.16.2.4 nathanw * above KERNEL_BASE, APTE_BASE, is reserved for mapping in the PTEs
93 1.16.2.4 nathanw * of another process, should we need to manipulate them.
94 1.16.2.4 nathanw *
95 1.16.2.4 nathanw * The basic layout of the virtual address space thus looks like this:
96 1.16.2.4 nathanw *
97 1.16.2.4 nathanw * 0xffffffff
98 1.16.2.4 nathanw * .
99 1.16.2.4 nathanw * .
100 1.16.2.4 nathanw * .
101 1.16.2.4 nathanw * KERNEL_BASE
102 1.16.2.4 nathanw * --------------------
103 1.16.2.4 nathanw * PTE_BASE
104 1.16.2.4 nathanw * .
105 1.16.2.4 nathanw * .
106 1.16.2.4 nathanw * .
107 1.16.2.4 nathanw * 0x00000000
108 1.16.2.2 nathanw */
109 1.16.2.2 nathanw
110 1.16.2.2 nathanw /*
111 1.16.2.2 nathanw * The pmap structure itself.
112 1.16.2.2 nathanw */
113 1.16.2.2 nathanw struct pmap {
114 1.16.2.2 nathanw struct uvm_object pm_obj; /* uvm_object */
115 1.16.2.2 nathanw #define pm_lock pm_obj.vmobjlock
116 1.16.2.4 nathanw LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
117 1.16.2.2 nathanw pd_entry_t *pm_pdir; /* KVA of page directory */
118 1.16.2.4 nathanw struct l1pt *pm_l1pt; /* L1 table metadata */
119 1.16.2.2 nathanw paddr_t pm_pptpt; /* PA of pt's page table */
120 1.16.2.2 nathanw vaddr_t pm_vptpt; /* VA of pt's page table */
121 1.16.2.2 nathanw struct pmap_statistics pm_stats; /* pmap statistics */
122 1.16.2.4 nathanw struct vm_page *pm_ptphint; /* recently used PT */
123 1.16.2.2 nathanw };
124 1.16.2.2 nathanw
125 1.16.2.2 nathanw typedef struct pmap *pmap_t;
126 1.16.2.2 nathanw
127 1.16.2.2 nathanw /*
128 1.16.2.2 nathanw * Physical / virtual address structure. In a number of places (particularly
129 1.16.2.2 nathanw * during bootstrapping) we need to keep track of the physical and virtual
130 1.16.2.2 nathanw * addresses of various pages
131 1.16.2.2 nathanw */
132 1.16.2.3 nathanw typedef struct pv_addr {
133 1.16.2.3 nathanw SLIST_ENTRY(pv_addr) pv_list;
134 1.16.2.2 nathanw paddr_t pv_pa;
135 1.16.2.2 nathanw vaddr_t pv_va;
136 1.16.2.2 nathanw } pv_addr_t;
137 1.16.2.2 nathanw
138 1.16.2.2 nathanw /*
139 1.16.2.3 nathanw * Determine various modes for PTEs (user vs. kernel, cacheable
140 1.16.2.3 nathanw * vs. non-cacheable).
141 1.16.2.3 nathanw */
142 1.16.2.3 nathanw #define PTE_KERNEL 0
143 1.16.2.3 nathanw #define PTE_USER 1
144 1.16.2.3 nathanw #define PTE_NOCACHE 0
145 1.16.2.3 nathanw #define PTE_CACHE 1
146 1.16.2.3 nathanw
147 1.16.2.3 nathanw /*
148 1.16.2.5 nathanw * Flags that indicate attributes of pages or mappings of pages.
149 1.16.2.5 nathanw *
150 1.16.2.5 nathanw * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
151 1.16.2.5 nathanw * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
152 1.16.2.5 nathanw * pv_entry's for each page. They live in the same "namespace" so
153 1.16.2.5 nathanw * that we can clear multiple attributes at a time.
154 1.16.2.5 nathanw *
155 1.16.2.5 nathanw * Note the "non-cacheable" flag generally means the page has
156 1.16.2.5 nathanw * multiple mappings in a given address space.
157 1.16.2.5 nathanw */
158 1.16.2.5 nathanw #define PVF_MOD 0x01 /* page is modified */
159 1.16.2.5 nathanw #define PVF_REF 0x02 /* page is referenced */
160 1.16.2.5 nathanw #define PVF_WIRED 0x04 /* mapping is wired */
161 1.16.2.5 nathanw #define PVF_WRITE 0x08 /* mapping is writable */
162 1.16.2.6 nathanw #define PVF_EXEC 0x10 /* mapping is executable */
163 1.16.2.6 nathanw #define PVF_NC 0x20 /* mapping is non-cacheable */
164 1.16.2.5 nathanw
165 1.16.2.5 nathanw /*
166 1.16.2.2 nathanw * Commonly referenced structures
167 1.16.2.2 nathanw */
168 1.16.2.2 nathanw extern struct pmap kernel_pmap_store;
169 1.16.2.2 nathanw extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
170 1.16.2.2 nathanw
171 1.16.2.2 nathanw /*
172 1.16.2.2 nathanw * Macros that we need to export
173 1.16.2.2 nathanw */
174 1.16.2.2 nathanw #define pmap_kernel() (&kernel_pmap_store)
175 1.16.2.2 nathanw #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
176 1.16.2.2 nathanw #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
177 1.16.2.2 nathanw
178 1.16.2.5 nathanw #define pmap_is_modified(pg) \
179 1.16.2.5 nathanw (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
180 1.16.2.5 nathanw #define pmap_is_referenced(pg) \
181 1.16.2.5 nathanw (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
182 1.16.2.4 nathanw
183 1.16.2.4 nathanw #define pmap_copy(dp, sp, da, l, sa) /* nothing */
184 1.16.2.4 nathanw
185 1.16.2.8 nathanw static __inline void
186 1.16.2.8 nathanw pmap_remove_all(struct pmap *pmap)
187 1.16.2.8 nathanw {
188 1.16.2.8 nathanw /* Nothing. */
189 1.16.2.8 nathanw }
190 1.16.2.8 nathanw
191 1.16.2.4 nathanw #define pmap_phys_address(ppn) (arm_ptob((ppn)))
192 1.16.2.2 nathanw
193 1.16.2.2 nathanw /*
194 1.16.2.2 nathanw * Functions that we need to export
195 1.16.2.2 nathanw */
196 1.16.2.4 nathanw vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int);
197 1.16.2.4 nathanw void pmap_procwr(struct proc *, vaddr_t, int);
198 1.16.2.4 nathanw
199 1.16.2.2 nathanw #define PMAP_NEED_PROCWR
200 1.16.2.4 nathanw #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
201 1.16.2.2 nathanw
202 1.16.2.4 nathanw /* Functions we use internally. */
203 1.16.2.4 nathanw void pmap_bootstrap(pd_entry_t *, pv_addr_t);
204 1.16.2.4 nathanw void pmap_debug(int);
205 1.16.2.4 nathanw int pmap_handled_emulation(struct pmap *, vaddr_t);
206 1.16.2.4 nathanw int pmap_modified_emulation(struct pmap *, vaddr_t);
207 1.16.2.4 nathanw void pmap_postinit(void);
208 1.16.2.2 nathanw
209 1.16.2.5 nathanw void vector_page_setprot(int);
210 1.16.2.5 nathanw
211 1.16.2.3 nathanw /* Bootstrapping routines. */
212 1.16.2.3 nathanw void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
213 1.16.2.3 nathanw void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
214 1.16.2.3 nathanw vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
215 1.16.2.3 nathanw void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
216 1.16.2.3 nathanw
217 1.16.2.2 nathanw /*
218 1.16.2.2 nathanw * Special page zero routine for use by the idle loop (no cache cleans).
219 1.16.2.2 nathanw */
220 1.16.2.2 nathanw boolean_t pmap_pageidlezero __P((paddr_t));
221 1.16.2.2 nathanw #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
222 1.16.2.2 nathanw
223 1.16.2.4 nathanw /*
224 1.16.2.4 nathanw * The current top of kernel VM
225 1.16.2.4 nathanw */
226 1.16.2.4 nathanw extern vaddr_t pmap_curmaxkvaddr;
227 1.16.2.2 nathanw
228 1.16.2.2 nathanw /*
229 1.16.2.2 nathanw * Useful macros and constants
230 1.16.2.2 nathanw */
231 1.16.2.2 nathanw
232 1.16.2.7 thorpej /*
233 1.16.2.7 thorpej * While the ARM MMU's L1 descriptors describe a 1M "section", each
234 1.16.2.7 thorpej * one pointing to a 1K L2 table, NetBSD's VM system allocates the
235 1.16.2.7 thorpej * page tables in 4K chunks, and thus we describe 4M "super sections".
236 1.16.2.7 thorpej *
237 1.16.2.7 thorpej * We'll lift terminology from another architecture and refer to this as
238 1.16.2.7 thorpej * the "page directory" size.
239 1.16.2.7 thorpej */
240 1.16.2.7 thorpej #define PD_SIZE (L1_S_SIZE * 4) /* 4M */
241 1.16.2.7 thorpej #define PD_OFFSET (PD_SIZE - 1)
242 1.16.2.7 thorpej #define PD_FRAME (~PD_OFFSET)
243 1.16.2.7 thorpej #define PD_SHIFT 22
244 1.16.2.7 thorpej
245 1.16.2.2 nathanw /* Virtual address to page table entry */
246 1.16.2.2 nathanw #define vtopte(va) \
247 1.16.2.4 nathanw (((pt_entry_t *)PTE_BASE) + arm_btop((vaddr_t) (va)))
248 1.16.2.2 nathanw
249 1.16.2.2 nathanw /* Virtual address to physical address */
250 1.16.2.2 nathanw #define vtophys(va) \
251 1.16.2.5 nathanw ((*vtopte(va) & L2_S_FRAME) | ((vaddr_t) (va) & L2_S_OFFSET))
252 1.16.2.4 nathanw
253 1.16.2.7 thorpej #define PTE_SYNC(pte) \
254 1.16.2.7 thorpej cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t))
255 1.16.2.7 thorpej #define PTE_FLUSH(pte) \
256 1.16.2.7 thorpej cpu_dcache_wbinv_range((vaddr_t)(pte), sizeof(pt_entry_t))
257 1.16.2.7 thorpej
258 1.16.2.7 thorpej #define PTE_SYNC_RANGE(pte, cnt) \
259 1.16.2.7 thorpej cpu_dcache_wb_range((vaddr_t)(pte), (cnt) << 2) /* * sizeof(...) */
260 1.16.2.9 thorpej #define PTE_FLUSH_RANGE(pte, cnt) \
261 1.16.2.7 thorpej cpu_dcache_wbinv_range((vaddr_t)(pte), (cnt) << 2) /* * sizeof(...) */
262 1.16.2.7 thorpej
263 1.16.2.4 nathanw #define l1pte_valid(pde) ((pde) != 0)
264 1.16.2.5 nathanw #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
265 1.16.2.5 nathanw #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
266 1.16.2.5 nathanw #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
267 1.16.2.4 nathanw
268 1.16.2.4 nathanw #define l2pte_valid(pte) ((pte) != 0)
269 1.16.2.5 nathanw #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
270 1.16.2.2 nathanw
271 1.16.2.2 nathanw /* L1 and L2 page table macros */
272 1.16.2.5 nathanw #define pmap_pdei(v) ((v & L1_S_FRAME) >> L1_S_SHIFT)
273 1.16.2.4 nathanw #define pmap_pde(m, v) (&((m)->pm_pdir[pmap_pdei(v)]))
274 1.16.2.4 nathanw
275 1.16.2.4 nathanw #define pmap_pde_v(pde) l1pte_valid(*(pde))
276 1.16.2.4 nathanw #define pmap_pde_section(pde) l1pte_section_p(*(pde))
277 1.16.2.4 nathanw #define pmap_pde_page(pde) l1pte_page_p(*(pde))
278 1.16.2.4 nathanw #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
279 1.16.2.4 nathanw
280 1.16.2.4 nathanw #define pmap_pte_v(pte) l2pte_valid(*(pte))
281 1.16.2.4 nathanw #define pmap_pte_pa(pte) l2pte_pa(*(pte))
282 1.16.2.2 nathanw
283 1.16.2.2 nathanw
284 1.16.2.2 nathanw /* Size of the kernel part of the L1 page table */
285 1.16.2.2 nathanw #define KERNEL_PD_SIZE \
286 1.16.2.5 nathanw (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
287 1.16.2.5 nathanw
288 1.16.2.5 nathanw /************************* ARM MMU configuration *****************************/
289 1.16.2.5 nathanw
290 1.16.2.5 nathanw #if ARM_MMU_GENERIC == 1
291 1.16.2.5 nathanw void pmap_copy_page_generic(paddr_t, paddr_t);
292 1.16.2.5 nathanw void pmap_zero_page_generic(paddr_t);
293 1.16.2.5 nathanw
294 1.16.2.5 nathanw void pmap_pte_init_generic(void);
295 1.16.2.5 nathanw #if defined(CPU_ARM9)
296 1.16.2.5 nathanw void pmap_pte_init_arm9(void);
297 1.16.2.5 nathanw #endif /* CPU_ARM9 */
298 1.16.2.5 nathanw #endif /* ARM_MMU_GENERIC == 1 */
299 1.16.2.5 nathanw
300 1.16.2.5 nathanw #if ARM_MMU_XSCALE == 1
301 1.16.2.5 nathanw void pmap_copy_page_xscale(paddr_t, paddr_t);
302 1.16.2.5 nathanw void pmap_zero_page_xscale(paddr_t);
303 1.16.2.5 nathanw
304 1.16.2.5 nathanw void pmap_pte_init_xscale(void);
305 1.16.2.5 nathanw
306 1.16.2.5 nathanw void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
307 1.16.2.5 nathanw #endif /* ARM_MMU_XSCALE == 1 */
308 1.16.2.5 nathanw
309 1.16.2.5 nathanw extern pt_entry_t pte_l1_s_cache_mode;
310 1.16.2.5 nathanw extern pt_entry_t pte_l1_s_cache_mask;
311 1.16.2.5 nathanw
312 1.16.2.5 nathanw extern pt_entry_t pte_l2_l_cache_mode;
313 1.16.2.5 nathanw extern pt_entry_t pte_l2_l_cache_mask;
314 1.16.2.5 nathanw
315 1.16.2.5 nathanw extern pt_entry_t pte_l2_s_cache_mode;
316 1.16.2.5 nathanw extern pt_entry_t pte_l2_s_cache_mask;
317 1.16.2.5 nathanw
318 1.16.2.5 nathanw extern pt_entry_t pte_l2_s_prot_u;
319 1.16.2.5 nathanw extern pt_entry_t pte_l2_s_prot_w;
320 1.16.2.5 nathanw extern pt_entry_t pte_l2_s_prot_mask;
321 1.16.2.5 nathanw
322 1.16.2.5 nathanw extern pt_entry_t pte_l1_s_proto;
323 1.16.2.5 nathanw extern pt_entry_t pte_l1_c_proto;
324 1.16.2.5 nathanw extern pt_entry_t pte_l2_s_proto;
325 1.16.2.5 nathanw
326 1.16.2.5 nathanw extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
327 1.16.2.5 nathanw extern void (*pmap_zero_page_func)(paddr_t);
328 1.16.2.5 nathanw
329 1.16.2.5 nathanw /*****************************************************************************/
330 1.16.2.2 nathanw
331 1.16.2.3 nathanw /*
332 1.16.2.3 nathanw * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
333 1.16.2.3 nathanw */
334 1.16.2.3 nathanw #define PMAP_CACHE_VIVT
335 1.16.2.3 nathanw
336 1.16.2.5 nathanw /*
337 1.16.2.5 nathanw * These macros define the various bit masks in the PTE.
338 1.16.2.5 nathanw *
339 1.16.2.5 nathanw * We use these macros since we use different bits on different processor
340 1.16.2.5 nathanw * models.
341 1.16.2.5 nathanw */
342 1.16.2.5 nathanw #define L1_S_PROT_U (L1_S_AP(AP_U))
343 1.16.2.5 nathanw #define L1_S_PROT_W (L1_S_AP(AP_W))
344 1.16.2.5 nathanw #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
345 1.16.2.5 nathanw
346 1.16.2.5 nathanw #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
347 1.16.2.5 nathanw #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
348 1.16.2.5 nathanw
349 1.16.2.5 nathanw #define L2_L_PROT_U (L2_AP(AP_U))
350 1.16.2.5 nathanw #define L2_L_PROT_W (L2_AP(AP_W))
351 1.16.2.5 nathanw #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
352 1.16.2.5 nathanw
353 1.16.2.5 nathanw #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
354 1.16.2.5 nathanw #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
355 1.16.2.5 nathanw
356 1.16.2.5 nathanw #define L2_S_PROT_U_generic (L2_AP(AP_U))
357 1.16.2.5 nathanw #define L2_S_PROT_W_generic (L2_AP(AP_W))
358 1.16.2.5 nathanw #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
359 1.16.2.5 nathanw
360 1.16.2.5 nathanw #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
361 1.16.2.5 nathanw #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
362 1.16.2.5 nathanw #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
363 1.16.2.5 nathanw
364 1.16.2.5 nathanw #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
365 1.16.2.5 nathanw #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
366 1.16.2.5 nathanw
367 1.16.2.5 nathanw #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
368 1.16.2.5 nathanw #define L1_S_PROTO_xscale (L1_TYPE_S)
369 1.16.2.5 nathanw
370 1.16.2.5 nathanw #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
371 1.16.2.5 nathanw #define L1_C_PROTO_xscale (L1_TYPE_C)
372 1.16.2.5 nathanw
373 1.16.2.5 nathanw #define L2_L_PROTO (L2_TYPE_L)
374 1.16.2.5 nathanw
375 1.16.2.5 nathanw #define L2_S_PROTO_generic (L2_TYPE_S)
376 1.16.2.5 nathanw #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
377 1.16.2.5 nathanw
378 1.16.2.5 nathanw /*
379 1.16.2.5 nathanw * User-visible names for the ones that vary with MMU class.
380 1.16.2.5 nathanw */
381 1.16.2.5 nathanw
382 1.16.2.5 nathanw #if ARM_NMMUS > 1
383 1.16.2.5 nathanw /* More than one MMU class configured; use variables. */
384 1.16.2.5 nathanw #define L2_S_PROT_U pte_l2_s_prot_u
385 1.16.2.5 nathanw #define L2_S_PROT_W pte_l2_s_prot_w
386 1.16.2.5 nathanw #define L2_S_PROT_MASK pte_l2_s_prot_mask
387 1.16.2.5 nathanw
388 1.16.2.5 nathanw #define L1_S_CACHE_MASK pte_l1_s_cache_mask
389 1.16.2.5 nathanw #define L2_L_CACHE_MASK pte_l2_l_cache_mask
390 1.16.2.5 nathanw #define L2_S_CACHE_MASK pte_l2_s_cache_mask
391 1.16.2.5 nathanw
392 1.16.2.5 nathanw #define L1_S_PROTO pte_l1_s_proto
393 1.16.2.5 nathanw #define L1_C_PROTO pte_l1_c_proto
394 1.16.2.5 nathanw #define L2_S_PROTO pte_l2_s_proto
395 1.16.2.5 nathanw
396 1.16.2.5 nathanw #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
397 1.16.2.5 nathanw #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
398 1.16.2.5 nathanw #elif ARM_MMU_GENERIC == 1
399 1.16.2.5 nathanw #define L2_S_PROT_U L2_S_PROT_U_generic
400 1.16.2.5 nathanw #define L2_S_PROT_W L2_S_PROT_W_generic
401 1.16.2.5 nathanw #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
402 1.16.2.5 nathanw
403 1.16.2.5 nathanw #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
404 1.16.2.5 nathanw #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
405 1.16.2.5 nathanw #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
406 1.16.2.5 nathanw
407 1.16.2.5 nathanw #define L1_S_PROTO L1_S_PROTO_generic
408 1.16.2.5 nathanw #define L1_C_PROTO L1_C_PROTO_generic
409 1.16.2.5 nathanw #define L2_S_PROTO L2_S_PROTO_generic
410 1.16.2.5 nathanw
411 1.16.2.5 nathanw #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
412 1.16.2.5 nathanw #define pmap_zero_page(d) pmap_zero_page_generic((d))
413 1.16.2.5 nathanw #elif ARM_MMU_XSCALE == 1
414 1.16.2.5 nathanw #define L2_S_PROT_U L2_S_PROT_U_xscale
415 1.16.2.5 nathanw #define L2_S_PROT_W L2_S_PROT_W_xscale
416 1.16.2.5 nathanw #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
417 1.16.2.5 nathanw
418 1.16.2.5 nathanw #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
419 1.16.2.5 nathanw #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
420 1.16.2.5 nathanw #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
421 1.16.2.5 nathanw
422 1.16.2.5 nathanw #define L1_S_PROTO L1_S_PROTO_xscale
423 1.16.2.5 nathanw #define L1_C_PROTO L1_C_PROTO_xscale
424 1.16.2.5 nathanw #define L2_S_PROTO L2_S_PROTO_xscale
425 1.16.2.5 nathanw
426 1.16.2.5 nathanw #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
427 1.16.2.5 nathanw #define pmap_zero_page(d) pmap_zero_page_xscale((d))
428 1.16.2.5 nathanw #endif /* ARM_NMMUS > 1 */
429 1.16.2.5 nathanw
430 1.16.2.5 nathanw /*
431 1.16.2.5 nathanw * These macros return various bits based on kernel/user and protection.
432 1.16.2.5 nathanw * Note that the compiler will usually fold these at compile time.
433 1.16.2.5 nathanw */
434 1.16.2.5 nathanw #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
435 1.16.2.5 nathanw (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
436 1.16.2.5 nathanw
437 1.16.2.5 nathanw #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
438 1.16.2.5 nathanw (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
439 1.16.2.5 nathanw
440 1.16.2.5 nathanw #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
441 1.16.2.5 nathanw (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
442 1.16.2.5 nathanw
443 1.16.2.2 nathanw #endif /* _KERNEL */
444 1.16.2.2 nathanw
445 1.16.2.2 nathanw #endif /* _ARM32_PMAP_H_ */
446