pmap.h revision 1.6 1 1.6 jmcneill /* $NetBSD: pmap.h,v 1.6 2007/12/09 20:27:48 jmcneill Exp $ */
2 1.2 yamt
3 1.2 yamt /*
4 1.2 yamt *
5 1.2 yamt * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.2 yamt * All rights reserved.
7 1.2 yamt *
8 1.2 yamt * Redistribution and use in source and binary forms, with or without
9 1.2 yamt * modification, are permitted provided that the following conditions
10 1.2 yamt * are met:
11 1.2 yamt * 1. Redistributions of source code must retain the above copyright
12 1.2 yamt * notice, this list of conditions and the following disclaimer.
13 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
14 1.2 yamt * notice, this list of conditions and the following disclaimer in the
15 1.2 yamt * documentation and/or other materials provided with the distribution.
16 1.2 yamt * 3. All advertising materials mentioning features or use of this software
17 1.2 yamt * must display the following acknowledgment:
18 1.2 yamt * This product includes software developed by Charles D. Cranor and
19 1.2 yamt * Washington University.
20 1.2 yamt * 4. The name of the author may not be used to endorse or promote products
21 1.2 yamt * derived from this software without specific prior written permission.
22 1.2 yamt *
23 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.2 yamt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.2 yamt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.2 yamt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.2 yamt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.2 yamt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.2 yamt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.2 yamt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.2 yamt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.2 yamt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.2 yamt */
34 1.2 yamt
35 1.2 yamt /*
36 1.2 yamt * Copyright (c) 2001 Wasabi Systems, Inc.
37 1.2 yamt * All rights reserved.
38 1.2 yamt *
39 1.2 yamt * Written by Frank van der Linden for Wasabi Systems, Inc.
40 1.2 yamt *
41 1.2 yamt * Redistribution and use in source and binary forms, with or without
42 1.2 yamt * modification, are permitted provided that the following conditions
43 1.2 yamt * are met:
44 1.2 yamt * 1. Redistributions of source code must retain the above copyright
45 1.2 yamt * notice, this list of conditions and the following disclaimer.
46 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
47 1.2 yamt * notice, this list of conditions and the following disclaimer in the
48 1.2 yamt * documentation and/or other materials provided with the distribution.
49 1.2 yamt * 3. All advertising materials mentioning features or use of this software
50 1.2 yamt * must display the following acknowledgement:
51 1.2 yamt * This product includes software developed for the NetBSD Project by
52 1.2 yamt * Wasabi Systems, Inc.
53 1.2 yamt * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54 1.2 yamt * or promote products derived from this software without specific prior
55 1.2 yamt * written permission.
56 1.2 yamt *
57 1.2 yamt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59 1.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60 1.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
61 1.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 1.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 1.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 1.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 1.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 1.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 1.2 yamt * POSSIBILITY OF SUCH DAMAGE.
68 1.2 yamt */
69 1.2 yamt
70 1.2 yamt /*
71 1.2 yamt * pmap.h: see pmap.c for the history of this pmap module.
72 1.2 yamt */
73 1.2 yamt
74 1.2 yamt #ifndef _X86_PMAP_H_
75 1.2 yamt #define _X86_PMAP_H_
76 1.2 yamt
77 1.2 yamt #define ptei(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
78 1.2 yamt
79 1.2 yamt /*
80 1.2 yamt * pl*_pi: index in the ptp page for a pde mapping a VA.
81 1.2 yamt * (pl*_i below is the index in the virtual array of all pdes per level)
82 1.2 yamt */
83 1.2 yamt #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
84 1.2 yamt #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
85 1.2 yamt #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
86 1.2 yamt #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
87 1.2 yamt
88 1.2 yamt /*
89 1.2 yamt * pl*_i: generate index into pde/pte arrays in virtual space
90 1.2 yamt */
91 1.2 yamt #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
92 1.2 yamt #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
93 1.2 yamt #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
94 1.2 yamt #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
95 1.2 yamt #define pl_i(va, lvl) \
96 1.2 yamt (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
97 1.2 yamt
98 1.2 yamt #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
99 1.2 yamt
100 1.2 yamt /*
101 1.2 yamt * PTP macros:
102 1.2 yamt * a PTP's index is the PD index of the PDE that points to it
103 1.2 yamt * a PTP's offset is the byte-offset in the PTE space that this PTP is at
104 1.2 yamt * a PTP's VA is the first VA mapped by that PTP
105 1.2 yamt */
106 1.2 yamt
107 1.2 yamt #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
108 1.2 yamt
109 1.2 yamt #if defined(_KERNEL)
110 1.2 yamt /*
111 1.2 yamt * pmap data structures: see pmap.c for details of locking.
112 1.2 yamt */
113 1.2 yamt
114 1.2 yamt struct pmap;
115 1.2 yamt typedef struct pmap *pmap_t;
116 1.2 yamt
117 1.2 yamt /*
118 1.2 yamt * we maintain a list of all non-kernel pmaps
119 1.2 yamt */
120 1.2 yamt
121 1.2 yamt LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
122 1.2 yamt
123 1.2 yamt /*
124 1.2 yamt * the pmap structure
125 1.2 yamt *
126 1.2 yamt * note that the pm_obj contains the simple_lock, the reference count,
127 1.2 yamt * page list, and number of PTPs within the pmap.
128 1.2 yamt *
129 1.2 yamt * pm_lock is the same as the spinlock for vm object 0. Changes to
130 1.2 yamt * the other objects may only be made if that lock has been taken
131 1.2 yamt * (the other object locks are only used when uvm_pagealloc is called)
132 1.2 yamt *
133 1.2 yamt * XXX If we ever support processor numbers higher than 31, we'll have
134 1.2 yamt * XXX to rethink the CPU mask.
135 1.2 yamt */
136 1.2 yamt
137 1.2 yamt struct pmap {
138 1.2 yamt struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
139 1.2 yamt #define pm_lock pm_obj[0].vmobjlock
140 1.2 yamt LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
141 1.2 yamt pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
142 1.2 yamt paddr_t pm_pdirpa; /* PA of PD (read-only after create) */
143 1.2 yamt struct vm_page *pm_ptphint[PTP_LEVELS-1];
144 1.2 yamt /* pointer to a PTP in our pmap */
145 1.2 yamt struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
146 1.2 yamt
147 1.2 yamt #if !defined(__x86_64__)
148 1.2 yamt vaddr_t pm_hiexec; /* highest executable mapping */
149 1.2 yamt #endif /* !defined(__x86_64__) */
150 1.2 yamt int pm_flags; /* see below */
151 1.2 yamt
152 1.2 yamt union descriptor *pm_ldt; /* user-set LDT */
153 1.2 yamt int pm_ldt_len; /* number of LDT entries */
154 1.2 yamt int pm_ldt_sel; /* LDT selector */
155 1.2 yamt uint32_t pm_cpus; /* mask of CPUs using pmap */
156 1.2 yamt uint32_t pm_kernel_cpus; /* mask of CPUs using kernel part
157 1.2 yamt of pmap */
158 1.2 yamt };
159 1.2 yamt
160 1.2 yamt /* pm_flags */
161 1.2 yamt #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
162 1.5 bouyer #define PMF_USER_XPIN 0x02 /* pmap pdirpa is pinned (Xen) */
163 1.5 bouyer #define PMF_USER_RELOAD 0x04 /* reload user pmap on PTE unmap (Xen) */
164 1.5 bouyer
165 1.2 yamt
166 1.2 yamt /*
167 1.2 yamt * for each managed physical page we maintain a list of <PMAP,VA>'s
168 1.2 yamt * which it is mapped at. the list is headed by a pv_head structure.
169 1.2 yamt * there is one pv_head per managed phys page (allocated at boot time).
170 1.2 yamt * the pv_head structure points to a list of pv_entry structures (each
171 1.2 yamt * describes one mapping).
172 1.2 yamt */
173 1.2 yamt
174 1.2 yamt struct pv_entry { /* locked by its list's pvh_lock */
175 1.2 yamt SPLAY_ENTRY(pv_entry) pv_node; /* splay-tree node */
176 1.2 yamt struct pmap *pv_pmap; /* the pmap */
177 1.2 yamt vaddr_t pv_va; /* the virtual address */
178 1.2 yamt struct vm_page *pv_ptp; /* the vm_page of the PTP */
179 1.2 yamt };
180 1.2 yamt
181 1.2 yamt /*
182 1.2 yamt * pv_entrys are dynamically allocated in chunks from a single page.
183 1.2 yamt * we keep track of how many pv_entrys are in use for each page and
184 1.2 yamt * we can free pv_entry pages if needed. there is one lock for the
185 1.2 yamt * entire allocation system.
186 1.2 yamt */
187 1.2 yamt
188 1.2 yamt struct pv_page_info {
189 1.2 yamt TAILQ_ENTRY(pv_page) pvpi_list;
190 1.2 yamt struct pv_entry *pvpi_pvfree;
191 1.2 yamt int pvpi_nfree;
192 1.2 yamt };
193 1.2 yamt
194 1.2 yamt /*
195 1.2 yamt * number of pv_entry's in a pv_page
196 1.2 yamt * (note: won't work on systems where NPBG isn't a constant)
197 1.2 yamt */
198 1.2 yamt
199 1.2 yamt #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
200 1.2 yamt sizeof(struct pv_entry))
201 1.2 yamt
202 1.2 yamt /*
203 1.2 yamt * a pv_page: where pv_entrys are allocated from
204 1.2 yamt */
205 1.2 yamt
206 1.2 yamt struct pv_page {
207 1.2 yamt struct pv_page_info pvinfo;
208 1.2 yamt struct pv_entry pvents[PVE_PER_PVPAGE];
209 1.2 yamt };
210 1.2 yamt
211 1.2 yamt /*
212 1.2 yamt * global kernel variables
213 1.2 yamt */
214 1.2 yamt
215 1.2 yamt /* PDPpaddr: is the physical address of the kernel's PDP */
216 1.2 yamt extern u_long PDPpaddr;
217 1.2 yamt
218 1.2 yamt extern struct pmap kernel_pmap_store; /* kernel pmap */
219 1.2 yamt extern int pmap_pg_g; /* do we support PG_G? */
220 1.2 yamt extern long nkptp[PTP_LEVELS];
221 1.2 yamt
222 1.2 yamt /*
223 1.2 yamt * macros
224 1.2 yamt */
225 1.2 yamt
226 1.2 yamt #define pmap_kernel() (&kernel_pmap_store)
227 1.2 yamt #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
228 1.2 yamt #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
229 1.2 yamt
230 1.2 yamt #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
231 1.2 yamt #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
232 1.2 yamt #define pmap_copy(DP,SP,D,L,S)
233 1.2 yamt #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
234 1.2 yamt #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
235 1.2 yamt #define pmap_move(DP,SP,D,L,S)
236 1.2 yamt #define pmap_phys_address(ppn) x86_ptob(ppn)
237 1.2 yamt #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
238 1.2 yamt
239 1.2 yamt
240 1.2 yamt /*
241 1.2 yamt * prototypes
242 1.2 yamt */
243 1.2 yamt
244 1.2 yamt void pmap_activate(struct lwp *);
245 1.2 yamt void pmap_bootstrap(vaddr_t);
246 1.2 yamt bool pmap_clear_attrs(struct vm_page *, unsigned);
247 1.2 yamt void pmap_deactivate(struct lwp *);
248 1.2 yamt void pmap_page_remove (struct vm_page *);
249 1.2 yamt void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
250 1.2 yamt bool pmap_test_attrs(struct vm_page *, unsigned);
251 1.2 yamt void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
252 1.2 yamt void pmap_load(void);
253 1.6 jmcneill paddr_t pmap_init_tmp_pgtbl(paddr_t);
254 1.2 yamt
255 1.2 yamt vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
256 1.2 yamt
257 1.2 yamt void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
258 1.2 yamt void pmap_tlb_shootwait(void);
259 1.2 yamt
260 1.2 yamt #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
261 1.2 yamt
262 1.2 yamt /*
263 1.2 yamt * Do idle page zero'ing uncached to avoid polluting the cache.
264 1.2 yamt */
265 1.2 yamt bool pmap_pageidlezero(paddr_t);
266 1.2 yamt #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
267 1.2 yamt
268 1.2 yamt /*
269 1.2 yamt * inline functions
270 1.2 yamt */
271 1.2 yamt
272 1.2 yamt /*ARGSUSED*/
273 1.2 yamt static __inline void
274 1.2 yamt pmap_remove_all(struct pmap *pmap)
275 1.2 yamt {
276 1.2 yamt /* Nothing. */
277 1.2 yamt }
278 1.2 yamt
279 1.2 yamt /*
280 1.2 yamt * pmap_update_pg: flush one page from the TLB (or flush the whole thing
281 1.2 yamt * if hardware doesn't support one-page flushing)
282 1.2 yamt */
283 1.2 yamt
284 1.2 yamt __inline static void __attribute__((__unused__))
285 1.2 yamt pmap_update_pg(vaddr_t va)
286 1.2 yamt {
287 1.4 ad invlpg(va);
288 1.2 yamt }
289 1.2 yamt
290 1.2 yamt /*
291 1.2 yamt * pmap_update_2pg: flush two pages from the TLB
292 1.2 yamt */
293 1.2 yamt
294 1.2 yamt __inline static void __attribute__((__unused__))
295 1.2 yamt pmap_update_2pg(vaddr_t va, vaddr_t vb)
296 1.2 yamt {
297 1.4 ad invlpg(va);
298 1.4 ad invlpg(vb);
299 1.2 yamt }
300 1.2 yamt
301 1.2 yamt /*
302 1.2 yamt * pmap_page_protect: change the protection of all recorded mappings
303 1.2 yamt * of a managed page
304 1.2 yamt *
305 1.2 yamt * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
306 1.2 yamt * => we only have to worry about making the page more protected.
307 1.2 yamt * unprotecting a page is done on-demand at fault time.
308 1.2 yamt */
309 1.2 yamt
310 1.2 yamt __inline static void __attribute__((__unused__))
311 1.2 yamt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
312 1.2 yamt {
313 1.2 yamt if ((prot & VM_PROT_WRITE) == 0) {
314 1.2 yamt if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
315 1.2 yamt (void) pmap_clear_attrs(pg, PG_RW);
316 1.2 yamt } else {
317 1.2 yamt pmap_page_remove(pg);
318 1.2 yamt }
319 1.2 yamt }
320 1.2 yamt }
321 1.2 yamt
322 1.2 yamt /*
323 1.2 yamt * pmap_protect: change the protection of pages in a pmap
324 1.2 yamt *
325 1.2 yamt * => this function is a frontend for pmap_remove/pmap_write_protect
326 1.2 yamt * => we only have to worry about making the page more protected.
327 1.2 yamt * unprotecting a page is done on-demand at fault time.
328 1.2 yamt */
329 1.2 yamt
330 1.2 yamt __inline static void __attribute__((__unused__))
331 1.2 yamt pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
332 1.2 yamt {
333 1.2 yamt if ((prot & VM_PROT_WRITE) == 0) {
334 1.2 yamt if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
335 1.2 yamt pmap_write_protect(pmap, sva, eva, prot);
336 1.2 yamt } else {
337 1.2 yamt pmap_remove(pmap, sva, eva);
338 1.2 yamt }
339 1.2 yamt }
340 1.2 yamt }
341 1.2 yamt
342 1.2 yamt /*
343 1.2 yamt * various address inlines
344 1.2 yamt *
345 1.2 yamt * vtopte: return a pointer to the PTE mapping a VA, works only for
346 1.2 yamt * user and PT addresses
347 1.2 yamt *
348 1.2 yamt * kvtopte: return a pointer to the PTE mapping a kernel VA
349 1.2 yamt */
350 1.2 yamt
351 1.2 yamt #include <lib/libkern/libkern.h>
352 1.2 yamt
353 1.2 yamt static __inline pt_entry_t * __attribute__((__unused__))
354 1.2 yamt vtopte(vaddr_t va)
355 1.2 yamt {
356 1.2 yamt
357 1.2 yamt KASSERT(va < VM_MIN_KERNEL_ADDRESS);
358 1.2 yamt
359 1.2 yamt return (PTE_BASE + pl1_i(va));
360 1.2 yamt }
361 1.2 yamt
362 1.2 yamt static __inline pt_entry_t * __attribute__((__unused__))
363 1.2 yamt kvtopte(vaddr_t va)
364 1.2 yamt {
365 1.2 yamt pd_entry_t *pde;
366 1.2 yamt
367 1.2 yamt KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
368 1.2 yamt
369 1.2 yamt pde = L2_BASE + pl2_i(va);
370 1.2 yamt if (*pde & PG_PS)
371 1.2 yamt return ((pt_entry_t *)pde);
372 1.2 yamt
373 1.2 yamt return (PTE_BASE + pl1_i(va));
374 1.2 yamt }
375 1.2 yamt
376 1.2 yamt paddr_t vtophys(vaddr_t);
377 1.2 yamt vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
378 1.2 yamt void pmap_cpu_init_early(struct cpu_info *);
379 1.2 yamt void pmap_cpu_init_late(struct cpu_info *);
380 1.2 yamt void sse2_zero_page(void *);
381 1.2 yamt void sse2_copy_page(void *, void *);
382 1.2 yamt
383 1.5 bouyer
384 1.5 bouyer #ifdef XEN
385 1.5 bouyer
386 1.5 bouyer #define XPTE_MASK L1_FRAME
387 1.5 bouyer #define XPTE_SHIFT 9
388 1.5 bouyer
389 1.5 bouyer /* PTE access inline fuctions */
390 1.5 bouyer
391 1.5 bouyer /*
392 1.5 bouyer * Get the machine address of the pointed pte
393 1.5 bouyer * We use hardware MMU to get value so works only for levels 1-3
394 1.5 bouyer */
395 1.5 bouyer
396 1.5 bouyer static __inline paddr_t
397 1.5 bouyer xpmap_ptetomach(pt_entry_t *pte)
398 1.5 bouyer {
399 1.5 bouyer pt_entry_t *up_pte;
400 1.5 bouyer vaddr_t va = (vaddr_t) pte;
401 1.5 bouyer
402 1.5 bouyer va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
403 1.5 bouyer up_pte = (pt_entry_t *) va;
404 1.5 bouyer
405 1.5 bouyer return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
406 1.5 bouyer }
407 1.5 bouyer
408 1.5 bouyer /*
409 1.5 bouyer * xpmap_update()
410 1.5 bouyer * Update an active pt entry with Xen
411 1.5 bouyer * Equivalent to *pte = npte
412 1.5 bouyer */
413 1.5 bouyer
414 1.5 bouyer static __inline void
415 1.5 bouyer xpmap_update (pt_entry_t *pte, pt_entry_t npte)
416 1.5 bouyer {
417 1.5 bouyer int s = splvm();
418 1.5 bouyer
419 1.5 bouyer xpq_queue_pte_update((pt_entry_t *) xpmap_ptetomach(pte), npte);
420 1.5 bouyer xpq_flush_queue();
421 1.5 bouyer splx(s);
422 1.5 bouyer }
423 1.5 bouyer
424 1.5 bouyer
425 1.5 bouyer /* Xen helpers to change bits of a pte */
426 1.5 bouyer #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
427 1.5 bouyer
428 1.5 bouyer /* pmap functions with machine addresses */
429 1.5 bouyer void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t);
430 1.5 bouyer int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
431 1.5 bouyer vm_prot_t, int, int);
432 1.5 bouyer bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
433 1.5 bouyer paddr_t vtomach(vaddr_t);
434 1.5 bouyer
435 1.5 bouyer #endif /* XEN */
436 1.5 bouyer
437 1.2 yamt /*
438 1.2 yamt * Hooks for the pool allocator.
439 1.2 yamt */
440 1.2 yamt #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
441 1.2 yamt
442 1.2 yamt /*
443 1.2 yamt * TLB shootdown mailbox.
444 1.2 yamt */
445 1.2 yamt
446 1.2 yamt struct pmap_mbox {
447 1.2 yamt volatile void *mb_pointer;
448 1.2 yamt volatile uintptr_t mb_addr1;
449 1.2 yamt volatile uintptr_t mb_addr2;
450 1.2 yamt volatile uintptr_t mb_head;
451 1.2 yamt volatile uintptr_t mb_tail;
452 1.2 yamt volatile uintptr_t mb_global;
453 1.2 yamt };
454 1.2 yamt
455 1.2 yamt #endif /* _KERNEL */
456 1.2 yamt
457 1.2 yamt #endif /* _X86_PMAP_H_ */
458