pmap.h revision 1.16.2.3 1 1.16.2.3 nathanw /* $NetBSD: pmap.h,v 1.16.2.3 2002/02/28 04:07:35 nathanw Exp $ */
2 1.16.2.2 nathanw
3 1.16.2.2 nathanw /*
4 1.16.2.2 nathanw * Copyright (c) 1994,1995 Mark Brinicombe.
5 1.16.2.2 nathanw * All rights reserved.
6 1.16.2.2 nathanw *
7 1.16.2.2 nathanw * Redistribution and use in source and binary forms, with or without
8 1.16.2.2 nathanw * modification, are permitted provided that the following conditions
9 1.16.2.2 nathanw * are met:
10 1.16.2.2 nathanw * 1. Redistributions of source code must retain the above copyright
11 1.16.2.2 nathanw * notice, this list of conditions and the following disclaimer.
12 1.16.2.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
13 1.16.2.2 nathanw * notice, this list of conditions and the following disclaimer in the
14 1.16.2.2 nathanw * documentation and/or other materials provided with the distribution.
15 1.16.2.2 nathanw * 3. All advertising materials mentioning features or use of this software
16 1.16.2.2 nathanw * must display the following acknowledgement:
17 1.16.2.2 nathanw * This product includes software developed by Mark Brinicombe
18 1.16.2.2 nathanw * 4. The name of the author may not be used to endorse or promote products
19 1.16.2.2 nathanw * derived from this software without specific prior written permission.
20 1.16.2.2 nathanw *
21 1.16.2.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.16.2.2 nathanw * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.16.2.2 nathanw * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.16.2.2 nathanw * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.16.2.2 nathanw * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.16.2.2 nathanw * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.16.2.2 nathanw * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.16.2.2 nathanw * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.16.2.2 nathanw * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.16.2.2 nathanw * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.16.2.2 nathanw */
32 1.16.2.2 nathanw
33 1.16.2.2 nathanw #ifndef _ARM32_PMAP_H_
34 1.16.2.2 nathanw #define _ARM32_PMAP_H_
35 1.16.2.2 nathanw
36 1.16.2.2 nathanw #ifdef _KERNEL
37 1.16.2.2 nathanw
38 1.16.2.2 nathanw #include <arm/cpufunc.h>
39 1.16.2.2 nathanw #include <arm/arm32/pte.h>
40 1.16.2.2 nathanw #include <uvm/uvm_object.h>
41 1.16.2.2 nathanw
42 1.16.2.2 nathanw /*
43 1.16.2.2 nathanw * a pmap describes a processes' 4GB virtual address space. this
44 1.16.2.2 nathanw * virtual address space can be broken up into 4096 1MB regions which
45 1.16.2.2 nathanw * are described by PDEs in the PDP. the PDEs are defined as follows:
46 1.16.2.2 nathanw *
47 1.16.2.2 nathanw * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
48 1.16.2.2 nathanw * (the following assumes that KERNBASE is 0xf0000000)
49 1.16.2.2 nathanw *
50 1.16.2.2 nathanw * PDE#s VA range usage
51 1.16.2.2 nathanw * 0->3835 0x0 -> 0xefc00000 user address space
52 1.16.2.2 nathanw * 3836->3839 0xefc00000-> recursive mapping of PDP (used for
53 1.16.2.2 nathanw * 0xf0000000 linear mapping of PTPs)
54 1.16.2.2 nathanw * 3840->3851 0xf0000000-> kernel text address space (constant
55 1.16.2.2 nathanw * 0xf0c00000 across all pmap's/processes)
56 1.16.2.2 nathanw * 3852->3855 0xf0c00000-> "alternate" recursive PDP mapping
57 1.16.2.2 nathanw * 0xf1000000 (for other pmaps)
58 1.16.2.2 nathanw * 3856->4095 0xf1000000-> KVM and device mappings, constant
59 1.16.2.2 nathanw * 0x00000000 across all pmaps
60 1.16.2.2 nathanw *
61 1.16.2.2 nathanw * The maths works out that to then map each 1MB block into 4k pages requires
62 1.16.2.2 nathanw * 256 entries, of 4 bytes each, totaling 1k per 1MB. However as we use 4k
63 1.16.2.2 nathanw * pages we allocate 4 PDE's at a time, allocating the same access permissions
64 1.16.2.2 nathanw * to them all. This means we only need 1024 entries in the page table page
65 1.16.2.2 nathanw * table, IE we use 1 4k page to linearly map all the other page tables used.
66 1.16.2.2 nathanw */
67 1.16.2.2 nathanw
68 1.16.2.2 nathanw /*
69 1.16.2.2 nathanw * Data structures used by pmap
70 1.16.2.2 nathanw */
71 1.16.2.2 nathanw
72 1.16.2.2 nathanw /*
73 1.16.2.2 nathanw * Structure that describes a Level 1 page table and the flags
74 1.16.2.2 nathanw * associated with it.
75 1.16.2.2 nathanw */
76 1.16.2.2 nathanw struct l1pt {
77 1.16.2.2 nathanw SIMPLEQ_ENTRY(l1pt) pt_queue; /* Queue pointers */
78 1.16.2.2 nathanw struct pglist pt_plist; /* Allocated page list */
79 1.16.2.2 nathanw vaddr_t pt_va; /* Allocated virtual address */
80 1.16.2.2 nathanw int pt_flags; /* Flags */
81 1.16.2.2 nathanw };
82 1.16.2.2 nathanw #define PTFLAG_STATIC 1 /* Statically allocated */
83 1.16.2.2 nathanw #define PTFLAG_KPT 2 /* Kernel pt's are mapped */
84 1.16.2.2 nathanw #define PTFLAG_CLEAN 4 /* L1 is clean */
85 1.16.2.2 nathanw
86 1.16.2.2 nathanw /*
87 1.16.2.2 nathanw * The pmap structure itself.
88 1.16.2.2 nathanw */
89 1.16.2.2 nathanw struct pmap {
90 1.16.2.2 nathanw struct uvm_object pm_obj; /* uvm_object */
91 1.16.2.2 nathanw #define pm_lock pm_obj.vmobjlock
92 1.16.2.2 nathanw pd_entry_t *pm_pdir; /* KVA of page directory */
93 1.16.2.2 nathanw struct l1pt *pm_l1pt; /* L1 descriptor */
94 1.16.2.2 nathanw paddr_t pm_pptpt; /* PA of pt's page table */
95 1.16.2.2 nathanw vaddr_t pm_vptpt; /* VA of pt's page table */
96 1.16.2.2 nathanw struct pmap_statistics pm_stats; /* pmap statistics */
97 1.16.2.2 nathanw };
98 1.16.2.2 nathanw
99 1.16.2.2 nathanw typedef struct pmap *pmap_t;
100 1.16.2.2 nathanw
101 1.16.2.2 nathanw /*
102 1.16.2.2 nathanw * for each managed physical page we maintain a list of <PMAP,VA>'s
103 1.16.2.2 nathanw * which it is mapped at. the list is headed by a pv_head structure.
104 1.16.2.2 nathanw * there is one pv_head per managed phys page (allocated at boot time).
105 1.16.2.2 nathanw * the pv_head structure points to a list of pv_entry structures (each
106 1.16.2.2 nathanw * describes one mapping).
107 1.16.2.2 nathanw *
108 1.16.2.2 nathanw * pv_entry's are only visible within pmap.c, so only provide a placeholder
109 1.16.2.2 nathanw * here
110 1.16.2.2 nathanw */
111 1.16.2.2 nathanw
112 1.16.2.2 nathanw struct pv_entry;
113 1.16.2.2 nathanw
114 1.16.2.2 nathanw struct pv_head {
115 1.16.2.2 nathanw struct simplelock pvh_lock; /* locks every pv on this list */
116 1.16.2.2 nathanw struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
117 1.16.2.2 nathanw };
118 1.16.2.2 nathanw
119 1.16.2.2 nathanw /*
120 1.16.2.2 nathanw * Page hooks. I'll eliminate these sometime soon :-)
121 1.16.2.2 nathanw *
122 1.16.2.2 nathanw * For speed we store the both the virtual address and the page table
123 1.16.2.2 nathanw * entry address for each page hook.
124 1.16.2.2 nathanw */
125 1.16.2.2 nathanw typedef struct {
126 1.16.2.3 nathanw vaddr_t va;
127 1.16.2.3 nathanw pt_entry_t *pte;
128 1.16.2.2 nathanw } pagehook_t;
129 1.16.2.2 nathanw
130 1.16.2.2 nathanw /*
131 1.16.2.2 nathanw * Physical / virtual address structure. In a number of places (particularly
132 1.16.2.2 nathanw * during bootstrapping) we need to keep track of the physical and virtual
133 1.16.2.2 nathanw * addresses of various pages
134 1.16.2.2 nathanw */
135 1.16.2.3 nathanw typedef struct pv_addr {
136 1.16.2.3 nathanw SLIST_ENTRY(pv_addr) pv_list;
137 1.16.2.2 nathanw paddr_t pv_pa;
138 1.16.2.2 nathanw vaddr_t pv_va;
139 1.16.2.2 nathanw } pv_addr_t;
140 1.16.2.2 nathanw
141 1.16.2.2 nathanw /*
142 1.16.2.3 nathanw * Determine various modes for PTEs (user vs. kernel, cacheable
143 1.16.2.3 nathanw * vs. non-cacheable).
144 1.16.2.3 nathanw */
145 1.16.2.3 nathanw #define PTE_KERNEL 0
146 1.16.2.3 nathanw #define PTE_USER 1
147 1.16.2.3 nathanw #define PTE_NOCACHE 0
148 1.16.2.3 nathanw #define PTE_CACHE 1
149 1.16.2.3 nathanw
150 1.16.2.3 nathanw /*
151 1.16.2.2 nathanw * _KERNEL specific macros, functions and prototypes
152 1.16.2.2 nathanw */
153 1.16.2.2 nathanw
154 1.16.2.2 nathanw #ifdef _KERNEL
155 1.16.2.2 nathanw
156 1.16.2.2 nathanw /*
157 1.16.2.2 nathanw * Commonly referenced structures
158 1.16.2.2 nathanw */
159 1.16.2.2 nathanw extern struct pv_entry *pv_table; /* Phys to virt mappings, per page. */
160 1.16.2.2 nathanw extern struct pmap kernel_pmap_store;
161 1.16.2.2 nathanw extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
162 1.16.2.2 nathanw
163 1.16.2.2 nathanw /*
164 1.16.2.2 nathanw * Macros that we need to export
165 1.16.2.2 nathanw */
166 1.16.2.2 nathanw #define pmap_kernel() (&kernel_pmap_store)
167 1.16.2.2 nathanw #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
168 1.16.2.2 nathanw #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
169 1.16.2.2 nathanw
170 1.16.2.2 nathanw #define pmap_phys_address(ppn) (arm_page_to_byte((ppn)))
171 1.16.2.2 nathanw
172 1.16.2.2 nathanw /*
173 1.16.2.2 nathanw * Functions that we need to export
174 1.16.2.2 nathanw */
175 1.16.2.2 nathanw extern vaddr_t pmap_map __P((vaddr_t, vaddr_t, vaddr_t, int));
176 1.16.2.2 nathanw extern void pmap_procwr __P((struct proc *, vaddr_t, int));
177 1.16.2.2 nathanw #define PMAP_NEED_PROCWR
178 1.16.2.2 nathanw
179 1.16.2.2 nathanw /*
180 1.16.2.2 nathanw * Functions we use internally
181 1.16.2.2 nathanw */
182 1.16.2.2 nathanw void pmap_bootstrap __P((pd_entry_t *, pv_addr_t));
183 1.16.2.2 nathanw void pmap_debug __P((int));
184 1.16.2.2 nathanw int pmap_handled_emulation __P((struct pmap *, vaddr_t));
185 1.16.2.2 nathanw int pmap_modified_emulation __P((struct pmap *, vaddr_t));
186 1.16.2.2 nathanw void pmap_postinit __P((void));
187 1.16.2.2 nathanw pt_entry_t *pmap_pte __P((struct pmap *, vaddr_t));
188 1.16.2.2 nathanw
189 1.16.2.3 nathanw /* Bootstrapping routines. */
190 1.16.2.3 nathanw void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
191 1.16.2.3 nathanw void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
192 1.16.2.3 nathanw vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
193 1.16.2.3 nathanw void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
194 1.16.2.3 nathanw
195 1.16.2.2 nathanw /*
196 1.16.2.2 nathanw * Special page zero routine for use by the idle loop (no cache cleans).
197 1.16.2.2 nathanw */
198 1.16.2.2 nathanw boolean_t pmap_pageidlezero __P((paddr_t));
199 1.16.2.2 nathanw #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
200 1.16.2.2 nathanw
201 1.16.2.2 nathanw #endif /* _KERNEL */
202 1.16.2.2 nathanw
203 1.16.2.2 nathanw /*
204 1.16.2.2 nathanw * Useful macros and constants
205 1.16.2.2 nathanw */
206 1.16.2.2 nathanw
207 1.16.2.2 nathanw /* Virtual address to page table entry */
208 1.16.2.2 nathanw #define vtopte(va) \
209 1.16.2.2 nathanw ((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE + \
210 1.16.2.2 nathanw (arm_byte_to_page((unsigned int)(va)) << 2)))
211 1.16.2.2 nathanw
212 1.16.2.2 nathanw /* Virtual address to physical address */
213 1.16.2.2 nathanw #define vtophys(va) \
214 1.16.2.2 nathanw ((*vtopte(va) & PG_FRAME) | ((unsigned int)(va) & ~PG_FRAME))
215 1.16.2.2 nathanw
216 1.16.2.2 nathanw /* L1 and L2 page table macros */
217 1.16.2.2 nathanw #define pmap_pde(m, v) (&((m)->pm_pdir[((vaddr_t)(v) >> PDSHIFT)&4095]))
218 1.16.2.2 nathanw #define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
219 1.16.2.2 nathanw #define pmap_pde_v(pde) (*(pde) != 0)
220 1.16.2.2 nathanw #define pmap_pde_section(pde) ((*(pde) & L1_MASK) == L1_SECTION)
221 1.16.2.2 nathanw #define pmap_pde_page(pde) ((*(pde) & L1_MASK) == L1_PAGE)
222 1.16.2.2 nathanw #define pmap_pde_fpage(pde) ((*(pde) & L1_MASK) == L1_FPAGE)
223 1.16.2.2 nathanw
224 1.16.2.2 nathanw #define pmap_pte_v(pte) (*(pte) != 0)
225 1.16.2.2 nathanw
226 1.16.2.2 nathanw /* Size of the kernel part of the L1 page table */
227 1.16.2.2 nathanw #define KERNEL_PD_SIZE \
228 1.16.2.2 nathanw (PD_SIZE - (KERNEL_SPACE_START >> PDSHIFT) * sizeof(pd_entry_t))
229 1.16.2.2 nathanw
230 1.16.2.3 nathanw /*
231 1.16.2.3 nathanw * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
232 1.16.2.3 nathanw */
233 1.16.2.3 nathanw
234 1.16.2.3 nathanw #define PMAP_CACHE_VIVT
235 1.16.2.3 nathanw
236 1.16.2.2 nathanw #endif /* _KERNEL */
237 1.16.2.2 nathanw
238 1.16.2.2 nathanw #endif /* _ARM32_PMAP_H_ */
239