pmap.h revision 1.16.2.4 1 1.16.2.4 nathanw /* $NetBSD: pmap.h,v 1.16.2.4 2002/04/01 07:39:11 nathanw Exp $ */
2 1.16.2.2 nathanw
3 1.16.2.2 nathanw /*
4 1.16.2.2 nathanw * Copyright (c) 1994,1995 Mark Brinicombe.
5 1.16.2.2 nathanw * All rights reserved.
6 1.16.2.2 nathanw *
7 1.16.2.2 nathanw * Redistribution and use in source and binary forms, with or without
8 1.16.2.2 nathanw * modification, are permitted provided that the following conditions
9 1.16.2.2 nathanw * are met:
10 1.16.2.2 nathanw * 1. Redistributions of source code must retain the above copyright
11 1.16.2.2 nathanw * notice, this list of conditions and the following disclaimer.
12 1.16.2.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
13 1.16.2.2 nathanw * notice, this list of conditions and the following disclaimer in the
14 1.16.2.2 nathanw * documentation and/or other materials provided with the distribution.
15 1.16.2.2 nathanw * 3. All advertising materials mentioning features or use of this software
16 1.16.2.2 nathanw * must display the following acknowledgement:
17 1.16.2.2 nathanw * This product includes software developed by Mark Brinicombe
18 1.16.2.2 nathanw * 4. The name of the author may not be used to endorse or promote products
19 1.16.2.2 nathanw * derived from this software without specific prior written permission.
20 1.16.2.2 nathanw *
21 1.16.2.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.16.2.2 nathanw * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.16.2.2 nathanw * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.16.2.2 nathanw * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.16.2.2 nathanw * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.16.2.2 nathanw * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.16.2.2 nathanw * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.16.2.2 nathanw * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.16.2.2 nathanw * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.16.2.2 nathanw * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.16.2.2 nathanw */
32 1.16.2.2 nathanw
33 1.16.2.2 nathanw #ifndef _ARM32_PMAP_H_
34 1.16.2.2 nathanw #define _ARM32_PMAP_H_
35 1.16.2.2 nathanw
36 1.16.2.2 nathanw #ifdef _KERNEL
37 1.16.2.2 nathanw
38 1.16.2.2 nathanw #include <arm/cpufunc.h>
39 1.16.2.2 nathanw #include <arm/arm32/pte.h>
40 1.16.2.2 nathanw #include <uvm/uvm_object.h>
41 1.16.2.2 nathanw
42 1.16.2.2 nathanw /*
43 1.16.2.2 nathanw * a pmap describes a processes' 4GB virtual address space. this
44 1.16.2.2 nathanw * virtual address space can be broken up into 4096 1MB regions which
45 1.16.2.4 nathanw * are described by L1 PTEs in the L1 table.
46 1.16.2.2 nathanw *
47 1.16.2.4 nathanw * There is a line drawn at KERNEL_BASE. Everything below that line
48 1.16.2.4 nathanw * changes when the VM context is switched. Everything above that line
49 1.16.2.4 nathanw * is the same no matter which VM context is running. This is achieved
50 1.16.2.4 nathanw * by making the L1 PTEs for those slots above KERNEL_BASE reference
51 1.16.2.4 nathanw * kernel L2 tables.
52 1.16.2.4 nathanw *
53 1.16.2.4 nathanw * The L2 tables are mapped linearly starting at PTE_BASE. PTE_BASE
54 1.16.2.4 nathanw * is below KERNEL_BASE, which means that the current process's PTEs
55 1.16.2.4 nathanw * are always available starting at PTE_BASE. Another region of KVA
56 1.16.2.4 nathanw * above KERNEL_BASE, APTE_BASE, is reserved for mapping in the PTEs
57 1.16.2.4 nathanw * of another process, should we need to manipulate them.
58 1.16.2.4 nathanw *
59 1.16.2.4 nathanw * The basic layout of the virtual address space thus looks like this:
60 1.16.2.4 nathanw *
61 1.16.2.4 nathanw * 0xffffffff
62 1.16.2.4 nathanw * .
63 1.16.2.4 nathanw * .
64 1.16.2.4 nathanw * .
65 1.16.2.4 nathanw * KERNEL_BASE
66 1.16.2.4 nathanw * --------------------
67 1.16.2.4 nathanw * PTE_BASE
68 1.16.2.4 nathanw * .
69 1.16.2.4 nathanw * .
70 1.16.2.4 nathanw * .
71 1.16.2.4 nathanw * 0x00000000
72 1.16.2.2 nathanw */
73 1.16.2.2 nathanw
74 1.16.2.2 nathanw /*
75 1.16.2.2 nathanw * The pmap structure itself.
76 1.16.2.2 nathanw */
77 1.16.2.2 nathanw struct pmap {
78 1.16.2.2 nathanw struct uvm_object pm_obj; /* uvm_object */
79 1.16.2.2 nathanw #define pm_lock pm_obj.vmobjlock
80 1.16.2.4 nathanw LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
81 1.16.2.2 nathanw pd_entry_t *pm_pdir; /* KVA of page directory */
82 1.16.2.4 nathanw struct l1pt *pm_l1pt; /* L1 table metadata */
83 1.16.2.2 nathanw paddr_t pm_pptpt; /* PA of pt's page table */
84 1.16.2.2 nathanw vaddr_t pm_vptpt; /* VA of pt's page table */
85 1.16.2.2 nathanw struct pmap_statistics pm_stats; /* pmap statistics */
86 1.16.2.4 nathanw struct vm_page *pm_ptphint; /* recently used PT */
87 1.16.2.2 nathanw };
88 1.16.2.2 nathanw
89 1.16.2.2 nathanw typedef struct pmap *pmap_t;
90 1.16.2.2 nathanw
91 1.16.2.2 nathanw /*
92 1.16.2.2 nathanw * Physical / virtual address structure. In a number of places (particularly
93 1.16.2.2 nathanw * during bootstrapping) we need to keep track of the physical and virtual
94 1.16.2.2 nathanw * addresses of various pages
95 1.16.2.2 nathanw */
96 1.16.2.3 nathanw typedef struct pv_addr {
97 1.16.2.3 nathanw SLIST_ENTRY(pv_addr) pv_list;
98 1.16.2.2 nathanw paddr_t pv_pa;
99 1.16.2.2 nathanw vaddr_t pv_va;
100 1.16.2.2 nathanw } pv_addr_t;
101 1.16.2.2 nathanw
102 1.16.2.2 nathanw /*
103 1.16.2.3 nathanw * Determine various modes for PTEs (user vs. kernel, cacheable
104 1.16.2.3 nathanw * vs. non-cacheable).
105 1.16.2.3 nathanw */
106 1.16.2.3 nathanw #define PTE_KERNEL 0
107 1.16.2.3 nathanw #define PTE_USER 1
108 1.16.2.3 nathanw #define PTE_NOCACHE 0
109 1.16.2.3 nathanw #define PTE_CACHE 1
110 1.16.2.3 nathanw
111 1.16.2.3 nathanw /*
112 1.16.2.2 nathanw * Commonly referenced structures
113 1.16.2.2 nathanw */
114 1.16.2.2 nathanw extern struct pmap kernel_pmap_store;
115 1.16.2.2 nathanw extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
116 1.16.2.2 nathanw
117 1.16.2.2 nathanw /*
118 1.16.2.2 nathanw * Macros that we need to export
119 1.16.2.2 nathanw */
120 1.16.2.2 nathanw #define pmap_kernel() (&kernel_pmap_store)
121 1.16.2.2 nathanw #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
122 1.16.2.2 nathanw #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
123 1.16.2.2 nathanw
124 1.16.2.4 nathanw #define pmap_is_modified(pg) (((pg)->mdpage.pvh_attrs & PT_M) != 0)
125 1.16.2.4 nathanw #define pmap_is_referenced(pg) (((pg)->mdpage.pvh_attrs & PT_H) != 0)
126 1.16.2.4 nathanw
127 1.16.2.4 nathanw #define pmap_copy(dp, sp, da, l, sa) /* nothing */
128 1.16.2.4 nathanw
129 1.16.2.4 nathanw #define pmap_phys_address(ppn) (arm_ptob((ppn)))
130 1.16.2.2 nathanw
131 1.16.2.2 nathanw /*
132 1.16.2.2 nathanw * Functions that we need to export
133 1.16.2.2 nathanw */
134 1.16.2.4 nathanw vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int);
135 1.16.2.4 nathanw void pmap_procwr(struct proc *, vaddr_t, int);
136 1.16.2.4 nathanw
137 1.16.2.2 nathanw #define PMAP_NEED_PROCWR
138 1.16.2.4 nathanw #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
139 1.16.2.2 nathanw
140 1.16.2.4 nathanw /* Functions we use internally. */
141 1.16.2.4 nathanw void pmap_bootstrap(pd_entry_t *, pv_addr_t);
142 1.16.2.4 nathanw void pmap_debug(int);
143 1.16.2.4 nathanw int pmap_handled_emulation(struct pmap *, vaddr_t);
144 1.16.2.4 nathanw int pmap_modified_emulation(struct pmap *, vaddr_t);
145 1.16.2.4 nathanw void pmap_postinit(void);
146 1.16.2.2 nathanw
147 1.16.2.3 nathanw /* Bootstrapping routines. */
148 1.16.2.3 nathanw void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
149 1.16.2.3 nathanw void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
150 1.16.2.3 nathanw vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
151 1.16.2.3 nathanw void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
152 1.16.2.3 nathanw
153 1.16.2.2 nathanw /*
154 1.16.2.2 nathanw * Special page zero routine for use by the idle loop (no cache cleans).
155 1.16.2.2 nathanw */
156 1.16.2.2 nathanw boolean_t pmap_pageidlezero __P((paddr_t));
157 1.16.2.2 nathanw #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
158 1.16.2.2 nathanw
159 1.16.2.4 nathanw /*
160 1.16.2.4 nathanw * The current top of kernel VM
161 1.16.2.4 nathanw */
162 1.16.2.4 nathanw extern vaddr_t pmap_curmaxkvaddr;
163 1.16.2.2 nathanw
164 1.16.2.2 nathanw /*
165 1.16.2.2 nathanw * Useful macros and constants
166 1.16.2.2 nathanw */
167 1.16.2.2 nathanw
168 1.16.2.2 nathanw /* Virtual address to page table entry */
169 1.16.2.2 nathanw #define vtopte(va) \
170 1.16.2.4 nathanw (((pt_entry_t *)PTE_BASE) + arm_btop((vaddr_t) (va)))
171 1.16.2.2 nathanw
172 1.16.2.2 nathanw /* Virtual address to physical address */
173 1.16.2.2 nathanw #define vtophys(va) \
174 1.16.2.4 nathanw ((*vtopte(va) & PG_FRAME) | ((vaddr_t) (va) & ~PG_FRAME))
175 1.16.2.4 nathanw
176 1.16.2.4 nathanw #define l1pte_valid(pde) ((pde) != 0)
177 1.16.2.4 nathanw #define l1pte_section_p(pde) (((pde) & L1_MASK) == L1_SECTION)
178 1.16.2.4 nathanw #define l1pte_page_p(pde) (((pde) & L1_MASK) == L1_PAGE)
179 1.16.2.4 nathanw #define l1pte_fpage_p(pde) (((pde) & L1_MASK) == L1_FPAGE)
180 1.16.2.4 nathanw
181 1.16.2.4 nathanw #define l2pte_valid(pte) ((pte) != 0)
182 1.16.2.4 nathanw #define l2pte_pa(pte) ((pte) & PG_FRAME)
183 1.16.2.2 nathanw
184 1.16.2.2 nathanw /* L1 and L2 page table macros */
185 1.16.2.4 nathanw #define pmap_pdei(v) ((v & PD_MASK) >> PDSHIFT)
186 1.16.2.4 nathanw #define pmap_pde(m, v) (&((m)->pm_pdir[pmap_pdei(v)]))
187 1.16.2.4 nathanw
188 1.16.2.4 nathanw #define pmap_pde_v(pde) l1pte_valid(*(pde))
189 1.16.2.4 nathanw #define pmap_pde_section(pde) l1pte_section_p(*(pde))
190 1.16.2.4 nathanw #define pmap_pde_page(pde) l1pte_page_p(*(pde))
191 1.16.2.4 nathanw #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
192 1.16.2.4 nathanw
193 1.16.2.4 nathanw #define pmap_pte_v(pte) l2pte_valid(*(pte))
194 1.16.2.4 nathanw #define pmap_pte_pa(pte) l2pte_pa(*(pte))
195 1.16.2.2 nathanw
196 1.16.2.2 nathanw
197 1.16.2.2 nathanw /* Size of the kernel part of the L1 page table */
198 1.16.2.2 nathanw #define KERNEL_PD_SIZE \
199 1.16.2.4 nathanw (PD_SIZE - (KERNEL_BASE >> PDSHIFT) * sizeof(pd_entry_t))
200 1.16.2.2 nathanw
201 1.16.2.3 nathanw /*
202 1.16.2.3 nathanw * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
203 1.16.2.3 nathanw */
204 1.16.2.3 nathanw
205 1.16.2.3 nathanw #define PMAP_CACHE_VIVT
206 1.16.2.3 nathanw
207 1.16.2.2 nathanw #endif /* _KERNEL */
208 1.16.2.2 nathanw
209 1.16.2.2 nathanw #endif /* _ARM32_PMAP_H_ */
210