pmap.h revision 1.2.2.2 1 1.2.2.2 ad /* $NetBSD: pmap.h,v 1.2.2.2 2007/10/23 20:36:41 ad Exp $ */
2 1.2.2.2 ad
3 1.2.2.2 ad /*
4 1.2.2.2 ad *
5 1.2.2.2 ad * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.2.2.2 ad * All rights reserved.
7 1.2.2.2 ad *
8 1.2.2.2 ad * Redistribution and use in source and binary forms, with or without
9 1.2.2.2 ad * modification, are permitted provided that the following conditions
10 1.2.2.2 ad * are met:
11 1.2.2.2 ad * 1. Redistributions of source code must retain the above copyright
12 1.2.2.2 ad * notice, this list of conditions and the following disclaimer.
13 1.2.2.2 ad * 2. Redistributions in binary form must reproduce the above copyright
14 1.2.2.2 ad * notice, this list of conditions and the following disclaimer in the
15 1.2.2.2 ad * documentation and/or other materials provided with the distribution.
16 1.2.2.2 ad * 3. All advertising materials mentioning features or use of this software
17 1.2.2.2 ad * must display the following acknowledgment:
18 1.2.2.2 ad * This product includes software developed by Charles D. Cranor and
19 1.2.2.2 ad * Washington University.
20 1.2.2.2 ad * 4. The name of the author may not be used to endorse or promote products
21 1.2.2.2 ad * derived from this software without specific prior written permission.
22 1.2.2.2 ad *
23 1.2.2.2 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.2.2.2 ad * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.2.2.2 ad * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.2.2.2 ad * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.2.2.2 ad * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.2.2.2 ad * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.2.2.2 ad * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.2.2.2 ad * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.2.2.2 ad * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.2.2.2 ad * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.2.2.2 ad */
34 1.2.2.2 ad
35 1.2.2.2 ad /*
36 1.2.2.2 ad * Copyright (c) 2001 Wasabi Systems, Inc.
37 1.2.2.2 ad * All rights reserved.
38 1.2.2.2 ad *
39 1.2.2.2 ad * Written by Frank van der Linden for Wasabi Systems, Inc.
40 1.2.2.2 ad *
41 1.2.2.2 ad * Redistribution and use in source and binary forms, with or without
42 1.2.2.2 ad * modification, are permitted provided that the following conditions
43 1.2.2.2 ad * are met:
44 1.2.2.2 ad * 1. Redistributions of source code must retain the above copyright
45 1.2.2.2 ad * notice, this list of conditions and the following disclaimer.
46 1.2.2.2 ad * 2. Redistributions in binary form must reproduce the above copyright
47 1.2.2.2 ad * notice, this list of conditions and the following disclaimer in the
48 1.2.2.2 ad * documentation and/or other materials provided with the distribution.
49 1.2.2.2 ad * 3. All advertising materials mentioning features or use of this software
50 1.2.2.2 ad * must display the following acknowledgement:
51 1.2.2.2 ad * This product includes software developed for the NetBSD Project by
52 1.2.2.2 ad * Wasabi Systems, Inc.
53 1.2.2.2 ad * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54 1.2.2.2 ad * or promote products derived from this software without specific prior
55 1.2.2.2 ad * written permission.
56 1.2.2.2 ad *
57 1.2.2.2 ad * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58 1.2.2.2 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59 1.2.2.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60 1.2.2.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
61 1.2.2.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 1.2.2.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 1.2.2.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 1.2.2.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 1.2.2.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 1.2.2.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 1.2.2.2 ad * POSSIBILITY OF SUCH DAMAGE.
68 1.2.2.2 ad */
69 1.2.2.2 ad
70 1.2.2.2 ad /*
71 1.2.2.2 ad * pmap.h: see pmap.c for the history of this pmap module.
72 1.2.2.2 ad */
73 1.2.2.2 ad
74 1.2.2.2 ad #ifndef _X86_PMAP_H_
75 1.2.2.2 ad #define _X86_PMAP_H_
76 1.2.2.2 ad
77 1.2.2.2 ad #define ptei(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
78 1.2.2.2 ad
79 1.2.2.2 ad /*
80 1.2.2.2 ad * pl*_pi: index in the ptp page for a pde mapping a VA.
81 1.2.2.2 ad * (pl*_i below is the index in the virtual array of all pdes per level)
82 1.2.2.2 ad */
83 1.2.2.2 ad #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
84 1.2.2.2 ad #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
85 1.2.2.2 ad #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
86 1.2.2.2 ad #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
87 1.2.2.2 ad
88 1.2.2.2 ad /*
89 1.2.2.2 ad * pl*_i: generate index into pde/pte arrays in virtual space
90 1.2.2.2 ad */
91 1.2.2.2 ad #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
92 1.2.2.2 ad #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
93 1.2.2.2 ad #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
94 1.2.2.2 ad #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
95 1.2.2.2 ad #define pl_i(va, lvl) \
96 1.2.2.2 ad (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
97 1.2.2.2 ad
98 1.2.2.2 ad #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
99 1.2.2.2 ad
100 1.2.2.2 ad /*
101 1.2.2.2 ad * PTP macros:
102 1.2.2.2 ad * a PTP's index is the PD index of the PDE that points to it
103 1.2.2.2 ad * a PTP's offset is the byte-offset in the PTE space that this PTP is at
104 1.2.2.2 ad * a PTP's VA is the first VA mapped by that PTP
105 1.2.2.2 ad */
106 1.2.2.2 ad
107 1.2.2.2 ad #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
108 1.2.2.2 ad
109 1.2.2.2 ad #if defined(_KERNEL)
110 1.2.2.2 ad /*
111 1.2.2.2 ad * pmap data structures: see pmap.c for details of locking.
112 1.2.2.2 ad */
113 1.2.2.2 ad
114 1.2.2.2 ad struct pmap;
115 1.2.2.2 ad typedef struct pmap *pmap_t;
116 1.2.2.2 ad
117 1.2.2.2 ad /*
118 1.2.2.2 ad * we maintain a list of all non-kernel pmaps
119 1.2.2.2 ad */
120 1.2.2.2 ad
121 1.2.2.2 ad LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
122 1.2.2.2 ad
123 1.2.2.2 ad /*
124 1.2.2.2 ad * the pmap structure
125 1.2.2.2 ad *
126 1.2.2.2 ad * note that the pm_obj contains the simple_lock, the reference count,
127 1.2.2.2 ad * page list, and number of PTPs within the pmap.
128 1.2.2.2 ad *
129 1.2.2.2 ad * pm_lock is the same as the spinlock for vm object 0. Changes to
130 1.2.2.2 ad * the other objects may only be made if that lock has been taken
131 1.2.2.2 ad * (the other object locks are only used when uvm_pagealloc is called)
132 1.2.2.2 ad *
133 1.2.2.2 ad * XXX If we ever support processor numbers higher than 31, we'll have
134 1.2.2.2 ad * XXX to rethink the CPU mask.
135 1.2.2.2 ad */
136 1.2.2.2 ad
137 1.2.2.2 ad struct pmap {
138 1.2.2.2 ad struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
139 1.2.2.2 ad #define pm_lock pm_obj[0].vmobjlock
140 1.2.2.2 ad LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
141 1.2.2.2 ad pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
142 1.2.2.2 ad paddr_t pm_pdirpa; /* PA of PD (read-only after create) */
143 1.2.2.2 ad struct vm_page *pm_ptphint[PTP_LEVELS-1];
144 1.2.2.2 ad /* pointer to a PTP in our pmap */
145 1.2.2.2 ad struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
146 1.2.2.2 ad
147 1.2.2.2 ad #if !defined(__x86_64__)
148 1.2.2.2 ad vaddr_t pm_hiexec; /* highest executable mapping */
149 1.2.2.2 ad #endif /* !defined(__x86_64__) */
150 1.2.2.2 ad int pm_flags; /* see below */
151 1.2.2.2 ad
152 1.2.2.2 ad union descriptor *pm_ldt; /* user-set LDT */
153 1.2.2.2 ad int pm_ldt_len; /* number of LDT entries */
154 1.2.2.2 ad int pm_ldt_sel; /* LDT selector */
155 1.2.2.2 ad uint32_t pm_cpus; /* mask of CPUs using pmap */
156 1.2.2.2 ad uint32_t pm_kernel_cpus; /* mask of CPUs using kernel part
157 1.2.2.2 ad of pmap */
158 1.2.2.2 ad };
159 1.2.2.2 ad
160 1.2.2.2 ad /* pm_flags */
161 1.2.2.2 ad #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
162 1.2.2.2 ad
163 1.2.2.2 ad /*
164 1.2.2.2 ad * for each managed physical page we maintain a list of <PMAP,VA>'s
165 1.2.2.2 ad * which it is mapped at. the list is headed by a pv_head structure.
166 1.2.2.2 ad * there is one pv_head per managed phys page (allocated at boot time).
167 1.2.2.2 ad * the pv_head structure points to a list of pv_entry structures (each
168 1.2.2.2 ad * describes one mapping).
169 1.2.2.2 ad */
170 1.2.2.2 ad
171 1.2.2.2 ad struct pv_entry { /* locked by its list's pvh_lock */
172 1.2.2.2 ad SPLAY_ENTRY(pv_entry) pv_node; /* splay-tree node */
173 1.2.2.2 ad struct pmap *pv_pmap; /* the pmap */
174 1.2.2.2 ad vaddr_t pv_va; /* the virtual address */
175 1.2.2.2 ad struct vm_page *pv_ptp; /* the vm_page of the PTP */
176 1.2.2.2 ad struct pmap_cpu *pv_alloc_cpu; /* CPU allocated from */
177 1.2.2.2 ad };
178 1.2.2.2 ad
179 1.2.2.2 ad /*
180 1.2.2.2 ad * pv_entrys are dynamically allocated in chunks from a single page.
181 1.2.2.2 ad * we keep track of how many pv_entrys are in use for each page and
182 1.2.2.2 ad * we can free pv_entry pages if needed. there is one lock for the
183 1.2.2.2 ad * entire allocation system.
184 1.2.2.2 ad */
185 1.2.2.2 ad
186 1.2.2.2 ad struct pv_page_info {
187 1.2.2.2 ad TAILQ_ENTRY(pv_page) pvpi_list;
188 1.2.2.2 ad struct pv_entry *pvpi_pvfree;
189 1.2.2.2 ad int pvpi_nfree;
190 1.2.2.2 ad };
191 1.2.2.2 ad
192 1.2.2.2 ad /*
193 1.2.2.2 ad * number of pv_entry's in a pv_page
194 1.2.2.2 ad * (note: won't work on systems where NPBG isn't a constant)
195 1.2.2.2 ad */
196 1.2.2.2 ad
197 1.2.2.2 ad #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
198 1.2.2.2 ad sizeof(struct pv_entry))
199 1.2.2.2 ad
200 1.2.2.2 ad /*
201 1.2.2.2 ad * a pv_page: where pv_entrys are allocated from
202 1.2.2.2 ad */
203 1.2.2.2 ad
204 1.2.2.2 ad struct pv_page {
205 1.2.2.2 ad struct pv_page_info pvinfo;
206 1.2.2.2 ad struct pv_entry pvents[PVE_PER_PVPAGE];
207 1.2.2.2 ad };
208 1.2.2.2 ad
209 1.2.2.2 ad /*
210 1.2.2.2 ad * global kernel variables
211 1.2.2.2 ad */
212 1.2.2.2 ad
213 1.2.2.2 ad /* PDPpaddr: is the physical address of the kernel's PDP */
214 1.2.2.2 ad extern u_long PDPpaddr;
215 1.2.2.2 ad
216 1.2.2.2 ad extern struct pmap kernel_pmap_store; /* kernel pmap */
217 1.2.2.2 ad extern int pmap_pg_g; /* do we support PG_G? */
218 1.2.2.2 ad extern long nkptp[PTP_LEVELS];
219 1.2.2.2 ad
220 1.2.2.2 ad /*
221 1.2.2.2 ad * macros
222 1.2.2.2 ad */
223 1.2.2.2 ad
224 1.2.2.2 ad #define pmap_kernel() (&kernel_pmap_store)
225 1.2.2.2 ad #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
226 1.2.2.2 ad #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
227 1.2.2.2 ad
228 1.2.2.2 ad #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
229 1.2.2.2 ad #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
230 1.2.2.2 ad #define pmap_copy(DP,SP,D,L,S)
231 1.2.2.2 ad #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
232 1.2.2.2 ad #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
233 1.2.2.2 ad #define pmap_move(DP,SP,D,L,S)
234 1.2.2.2 ad #define pmap_phys_address(ppn) x86_ptob(ppn)
235 1.2.2.2 ad #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
236 1.2.2.2 ad
237 1.2.2.2 ad
238 1.2.2.2 ad /*
239 1.2.2.2 ad * prototypes
240 1.2.2.2 ad */
241 1.2.2.2 ad
242 1.2.2.2 ad void pmap_activate(struct lwp *);
243 1.2.2.2 ad void pmap_bootstrap(vaddr_t);
244 1.2.2.2 ad bool pmap_clear_attrs(struct vm_page *, unsigned);
245 1.2.2.2 ad void pmap_deactivate(struct lwp *);
246 1.2.2.2 ad void pmap_page_remove (struct vm_page *);
247 1.2.2.2 ad void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
248 1.2.2.2 ad bool pmap_test_attrs(struct vm_page *, unsigned);
249 1.2.2.2 ad void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
250 1.2.2.2 ad void pmap_load(void);
251 1.2.2.2 ad
252 1.2.2.2 ad vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
253 1.2.2.2 ad
254 1.2.2.2 ad void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
255 1.2.2.2 ad void pmap_tlb_shootwait(void);
256 1.2.2.2 ad
257 1.2.2.2 ad #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
258 1.2.2.2 ad
259 1.2.2.2 ad /*
260 1.2.2.2 ad * Do idle page zero'ing uncached to avoid polluting the cache.
261 1.2.2.2 ad */
262 1.2.2.2 ad bool pmap_pageidlezero(paddr_t);
263 1.2.2.2 ad #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
264 1.2.2.2 ad
265 1.2.2.2 ad /*
266 1.2.2.2 ad * inline functions
267 1.2.2.2 ad */
268 1.2.2.2 ad
269 1.2.2.2 ad /*ARGSUSED*/
270 1.2.2.2 ad static __inline void
271 1.2.2.2 ad pmap_remove_all(struct pmap *pmap)
272 1.2.2.2 ad {
273 1.2.2.2 ad /* Nothing. */
274 1.2.2.2 ad }
275 1.2.2.2 ad
276 1.2.2.2 ad /*
277 1.2.2.2 ad * pmap_update_pg: flush one page from the TLB (or flush the whole thing
278 1.2.2.2 ad * if hardware doesn't support one-page flushing)
279 1.2.2.2 ad */
280 1.2.2.2 ad
281 1.2.2.2 ad __inline static void __attribute__((__unused__))
282 1.2.2.2 ad pmap_update_pg(vaddr_t va)
283 1.2.2.2 ad {
284 1.2.2.2 ad #if defined(I386_CPU)
285 1.2.2.2 ad if (cpu_class == CPUCLASS_386)
286 1.2.2.2 ad tlbflush();
287 1.2.2.2 ad else
288 1.2.2.2 ad #endif
289 1.2.2.2 ad invlpg(va);
290 1.2.2.2 ad }
291 1.2.2.2 ad
292 1.2.2.2 ad /*
293 1.2.2.2 ad * pmap_update_2pg: flush two pages from the TLB
294 1.2.2.2 ad */
295 1.2.2.2 ad
296 1.2.2.2 ad __inline static void __attribute__((__unused__))
297 1.2.2.2 ad pmap_update_2pg(vaddr_t va, vaddr_t vb)
298 1.2.2.2 ad {
299 1.2.2.2 ad #if defined(I386_CPU)
300 1.2.2.2 ad if (cpu_class == CPUCLASS_386)
301 1.2.2.2 ad tlbflush();
302 1.2.2.2 ad else
303 1.2.2.2 ad #endif
304 1.2.2.2 ad {
305 1.2.2.2 ad invlpg(va);
306 1.2.2.2 ad invlpg(vb);
307 1.2.2.2 ad }
308 1.2.2.2 ad }
309 1.2.2.2 ad
310 1.2.2.2 ad /*
311 1.2.2.2 ad * pmap_page_protect: change the protection of all recorded mappings
312 1.2.2.2 ad * of a managed page
313 1.2.2.2 ad *
314 1.2.2.2 ad * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
315 1.2.2.2 ad * => we only have to worry about making the page more protected.
316 1.2.2.2 ad * unprotecting a page is done on-demand at fault time.
317 1.2.2.2 ad */
318 1.2.2.2 ad
319 1.2.2.2 ad __inline static void __attribute__((__unused__))
320 1.2.2.2 ad pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
321 1.2.2.2 ad {
322 1.2.2.2 ad if ((prot & VM_PROT_WRITE) == 0) {
323 1.2.2.2 ad if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
324 1.2.2.2 ad (void) pmap_clear_attrs(pg, PG_RW);
325 1.2.2.2 ad } else {
326 1.2.2.2 ad pmap_page_remove(pg);
327 1.2.2.2 ad }
328 1.2.2.2 ad }
329 1.2.2.2 ad }
330 1.2.2.2 ad
331 1.2.2.2 ad /*
332 1.2.2.2 ad * pmap_protect: change the protection of pages in a pmap
333 1.2.2.2 ad *
334 1.2.2.2 ad * => this function is a frontend for pmap_remove/pmap_write_protect
335 1.2.2.2 ad * => we only have to worry about making the page more protected.
336 1.2.2.2 ad * unprotecting a page is done on-demand at fault time.
337 1.2.2.2 ad */
338 1.2.2.2 ad
339 1.2.2.2 ad __inline static void __attribute__((__unused__))
340 1.2.2.2 ad pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
341 1.2.2.2 ad {
342 1.2.2.2 ad if ((prot & VM_PROT_WRITE) == 0) {
343 1.2.2.2 ad if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
344 1.2.2.2 ad pmap_write_protect(pmap, sva, eva, prot);
345 1.2.2.2 ad } else {
346 1.2.2.2 ad pmap_remove(pmap, sva, eva);
347 1.2.2.2 ad }
348 1.2.2.2 ad }
349 1.2.2.2 ad }
350 1.2.2.2 ad
351 1.2.2.2 ad /*
352 1.2.2.2 ad * various address inlines
353 1.2.2.2 ad *
354 1.2.2.2 ad * vtopte: return a pointer to the PTE mapping a VA, works only for
355 1.2.2.2 ad * user and PT addresses
356 1.2.2.2 ad *
357 1.2.2.2 ad * kvtopte: return a pointer to the PTE mapping a kernel VA
358 1.2.2.2 ad */
359 1.2.2.2 ad
360 1.2.2.2 ad #include <lib/libkern/libkern.h>
361 1.2.2.2 ad
362 1.2.2.2 ad static __inline pt_entry_t * __attribute__((__unused__))
363 1.2.2.2 ad vtopte(vaddr_t va)
364 1.2.2.2 ad {
365 1.2.2.2 ad
366 1.2.2.2 ad KASSERT(va < VM_MIN_KERNEL_ADDRESS);
367 1.2.2.2 ad
368 1.2.2.2 ad return (PTE_BASE + pl1_i(va));
369 1.2.2.2 ad }
370 1.2.2.2 ad
371 1.2.2.2 ad static __inline pt_entry_t * __attribute__((__unused__))
372 1.2.2.2 ad kvtopte(vaddr_t va)
373 1.2.2.2 ad {
374 1.2.2.2 ad pd_entry_t *pde;
375 1.2.2.2 ad
376 1.2.2.2 ad KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
377 1.2.2.2 ad
378 1.2.2.2 ad pde = L2_BASE + pl2_i(va);
379 1.2.2.2 ad if (*pde & PG_PS)
380 1.2.2.2 ad return ((pt_entry_t *)pde);
381 1.2.2.2 ad
382 1.2.2.2 ad return (PTE_BASE + pl1_i(va));
383 1.2.2.2 ad }
384 1.2.2.2 ad
385 1.2.2.2 ad paddr_t vtophys(vaddr_t);
386 1.2.2.2 ad vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
387 1.2.2.2 ad void pmap_cpu_init_early(struct cpu_info *);
388 1.2.2.2 ad void pmap_cpu_init_late(struct cpu_info *);
389 1.2.2.2 ad void sse2_zero_page(void *);
390 1.2.2.2 ad void sse2_copy_page(void *, void *);
391 1.2.2.2 ad
392 1.2.2.2 ad /*
393 1.2.2.2 ad * Hooks for the pool allocator.
394 1.2.2.2 ad */
395 1.2.2.2 ad #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
396 1.2.2.2 ad
397 1.2.2.2 ad /*
398 1.2.2.2 ad * TLB shootdown mailbox.
399 1.2.2.2 ad */
400 1.2.2.2 ad
401 1.2.2.2 ad struct pmap_mbox {
402 1.2.2.2 ad volatile void *mb_pointer;
403 1.2.2.2 ad volatile uintptr_t mb_addr1;
404 1.2.2.2 ad volatile uintptr_t mb_addr2;
405 1.2.2.2 ad volatile uintptr_t mb_head;
406 1.2.2.2 ad volatile uintptr_t mb_tail;
407 1.2.2.2 ad volatile uintptr_t mb_global;
408 1.2.2.2 ad };
409 1.2.2.2 ad
410 1.2.2.2 ad #endif /* _KERNEL */
411 1.2.2.2 ad
412 1.2.2.2 ad #endif /* _X86_PMAP_H_ */
413