pmap.h revision 1.1.4.2 1 /* $NetBSD: pmap.h,v 1.1.4.2 2007/10/25 23:59:23 bouyer Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgment:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*
36 * Copyright (c) 2001 Wasabi Systems, Inc.
37 * All rights reserved.
38 *
39 * Written by Frank van der Linden for Wasabi Systems, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 * must display the following acknowledgement:
51 * This product includes software developed for the NetBSD Project by
52 * Wasabi Systems, Inc.
53 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54 * or promote products derived from this software without specific prior
55 * written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
61 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 * POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 /*
71 * pmap.h: see pmap.c for the history of this pmap module.
72 */
73
74 #ifndef _X86_PMAP_H_
75 #define _X86_PMAP_H_
76
77 #define ptei(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
78
79 /*
80 * pl*_pi: index in the ptp page for a pde mapping a VA.
81 * (pl*_i below is the index in the virtual array of all pdes per level)
82 */
83 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
84 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
85 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
86 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
87
88 /*
89 * pl*_i: generate index into pde/pte arrays in virtual space
90 */
91 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
92 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
93 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
94 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
95 #define pl_i(va, lvl) \
96 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
97
98 #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
99
100 /*
101 * PTP macros:
102 * a PTP's index is the PD index of the PDE that points to it
103 * a PTP's offset is the byte-offset in the PTE space that this PTP is at
104 * a PTP's VA is the first VA mapped by that PTP
105 */
106
107 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
108
109 #if defined(_KERNEL)
110 /*
111 * pmap data structures: see pmap.c for details of locking.
112 */
113
114 struct pmap;
115 typedef struct pmap *pmap_t;
116
117 /*
118 * we maintain a list of all non-kernel pmaps
119 */
120
121 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
122
123 /*
124 * the pmap structure
125 *
126 * note that the pm_obj contains the simple_lock, the reference count,
127 * page list, and number of PTPs within the pmap.
128 *
129 * pm_lock is the same as the spinlock for vm object 0. Changes to
130 * the other objects may only be made if that lock has been taken
131 * (the other object locks are only used when uvm_pagealloc is called)
132 *
133 * XXX If we ever support processor numbers higher than 31, we'll have
134 * XXX to rethink the CPU mask.
135 */
136
137 struct pmap {
138 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
139 #define pm_lock pm_obj[0].vmobjlock
140 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
141 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
142 paddr_t pm_pdirpa; /* PA of PD (read-only after create) */
143 struct vm_page *pm_ptphint[PTP_LEVELS-1];
144 /* pointer to a PTP in our pmap */
145 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
146
147 #if !defined(__x86_64__)
148 vaddr_t pm_hiexec; /* highest executable mapping */
149 #endif /* !defined(__x86_64__) */
150 int pm_flags; /* see below */
151
152 union descriptor *pm_ldt; /* user-set LDT */
153 int pm_ldt_len; /* number of LDT entries */
154 int pm_ldt_sel; /* LDT selector */
155 uint32_t pm_cpus; /* mask of CPUs using pmap */
156 uint32_t pm_kernel_cpus; /* mask of CPUs using kernel part
157 of pmap */
158 };
159
160 /* pm_flags */
161 #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
162 #define PMF_USER_XPIN 0x02 /* pmap pdirpa is pinned (Xen) */
163 #define PMF_USER_RELOAD 0x04 /* reload user pmap on PTE unmap (Xen) */
164
165
166 /*
167 * for each managed physical page we maintain a list of <PMAP,VA>'s
168 * which it is mapped at. the list is headed by a pv_head structure.
169 * there is one pv_head per managed phys page (allocated at boot time).
170 * the pv_head structure points to a list of pv_entry structures (each
171 * describes one mapping).
172 */
173
174 struct pv_entry { /* locked by its list's pvh_lock */
175 SPLAY_ENTRY(pv_entry) pv_node; /* splay-tree node */
176 struct pmap *pv_pmap; /* the pmap */
177 vaddr_t pv_va; /* the virtual address */
178 struct vm_page *pv_ptp; /* the vm_page of the PTP */
179 struct pmap_cpu *pv_alloc_cpu; /* CPU allocated from */
180 };
181
182 /*
183 * pv_entrys are dynamically allocated in chunks from a single page.
184 * we keep track of how many pv_entrys are in use for each page and
185 * we can free pv_entry pages if needed. there is one lock for the
186 * entire allocation system.
187 */
188
189 struct pv_page_info {
190 TAILQ_ENTRY(pv_page) pvpi_list;
191 struct pv_entry *pvpi_pvfree;
192 int pvpi_nfree;
193 };
194
195 /*
196 * number of pv_entry's in a pv_page
197 * (note: won't work on systems where NPBG isn't a constant)
198 */
199
200 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
201 sizeof(struct pv_entry))
202
203 /*
204 * a pv_page: where pv_entrys are allocated from
205 */
206
207 struct pv_page {
208 struct pv_page_info pvinfo;
209 struct pv_entry pvents[PVE_PER_PVPAGE];
210 };
211
212 /*
213 * global kernel variables
214 */
215
216 /* PDPpaddr: is the physical address of the kernel's PDP */
217 extern u_long PDPpaddr;
218
219 extern struct pmap kernel_pmap_store; /* kernel pmap */
220 extern int pmap_pg_g; /* do we support PG_G? */
221 extern long nkptp[PTP_LEVELS];
222
223 /*
224 * macros
225 */
226
227 #define pmap_kernel() (&kernel_pmap_store)
228 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
229 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
230
231 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
232 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
233 #define pmap_copy(DP,SP,D,L,S)
234 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
235 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
236 #define pmap_move(DP,SP,D,L,S)
237 #define pmap_phys_address(ppn) x86_ptob(ppn)
238 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
239
240
241 /*
242 * prototypes
243 */
244
245 void pmap_activate(struct lwp *);
246 void pmap_bootstrap(vaddr_t);
247 bool pmap_clear_attrs(struct vm_page *, unsigned);
248 void pmap_deactivate(struct lwp *);
249 void pmap_page_remove (struct vm_page *);
250 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
251 bool pmap_test_attrs(struct vm_page *, unsigned);
252 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
253 void pmap_load(void);
254
255 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
256
257 void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
258 void pmap_tlb_shootwait(void);
259
260 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
261
262 /*
263 * Do idle page zero'ing uncached to avoid polluting the cache.
264 */
265 bool pmap_pageidlezero(paddr_t);
266 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
267
268 /*
269 * inline functions
270 */
271
272 /*ARGSUSED*/
273 static __inline void
274 pmap_remove_all(struct pmap *pmap)
275 {
276 /* Nothing. */
277 }
278
279 /*
280 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
281 * if hardware doesn't support one-page flushing)
282 */
283
284 __inline static void __attribute__((__unused__))
285 pmap_update_pg(vaddr_t va)
286 {
287 #if defined(I386_CPU)
288 if (cpu_class == CPUCLASS_386)
289 tlbflush();
290 else
291 #endif
292 invlpg(va);
293 }
294
295 /*
296 * pmap_update_2pg: flush two pages from the TLB
297 */
298
299 __inline static void __attribute__((__unused__))
300 pmap_update_2pg(vaddr_t va, vaddr_t vb)
301 {
302 #if defined(I386_CPU)
303 if (cpu_class == CPUCLASS_386)
304 tlbflush();
305 else
306 #endif
307 {
308 invlpg(va);
309 invlpg(vb);
310 }
311 }
312
313 /*
314 * pmap_page_protect: change the protection of all recorded mappings
315 * of a managed page
316 *
317 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
318 * => we only have to worry about making the page more protected.
319 * unprotecting a page is done on-demand at fault time.
320 */
321
322 __inline static void __attribute__((__unused__))
323 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
324 {
325 if ((prot & VM_PROT_WRITE) == 0) {
326 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
327 (void) pmap_clear_attrs(pg, PG_RW);
328 } else {
329 pmap_page_remove(pg);
330 }
331 }
332 }
333
334 /*
335 * pmap_protect: change the protection of pages in a pmap
336 *
337 * => this function is a frontend for pmap_remove/pmap_write_protect
338 * => we only have to worry about making the page more protected.
339 * unprotecting a page is done on-demand at fault time.
340 */
341
342 __inline static void __attribute__((__unused__))
343 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
344 {
345 if ((prot & VM_PROT_WRITE) == 0) {
346 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
347 pmap_write_protect(pmap, sva, eva, prot);
348 } else {
349 pmap_remove(pmap, sva, eva);
350 }
351 }
352 }
353
354 /*
355 * various address inlines
356 *
357 * vtopte: return a pointer to the PTE mapping a VA, works only for
358 * user and PT addresses
359 *
360 * kvtopte: return a pointer to the PTE mapping a kernel VA
361 */
362
363 #include <lib/libkern/libkern.h>
364
365 static __inline pt_entry_t * __attribute__((__unused__))
366 vtopte(vaddr_t va)
367 {
368
369 KASSERT(va < VM_MIN_KERNEL_ADDRESS);
370
371 return (PTE_BASE + pl1_i(va));
372 }
373
374 static __inline pt_entry_t * __attribute__((__unused__))
375 kvtopte(vaddr_t va)
376 {
377 pd_entry_t *pde;
378
379 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
380
381 pde = L2_BASE + pl2_i(va);
382 if (*pde & PG_PS)
383 return ((pt_entry_t *)pde);
384
385 return (PTE_BASE + pl1_i(va));
386 }
387
388 paddr_t vtophys(vaddr_t);
389 vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
390 void pmap_cpu_init_early(struct cpu_info *);
391 void pmap_cpu_init_late(struct cpu_info *);
392 void sse2_zero_page(void *);
393 void sse2_copy_page(void *, void *);
394
395
396 #ifdef XEN
397
398 #define XPTE_MASK L1_FRAME
399 #define XPTE_SHIFT 9
400
401 /* PTE access inline fuctions */
402
403 /*
404 * Get the machine address of the pointed pte
405 * We use hardware MMU to get value so works only for levels 1-3
406 */
407
408 static __inline paddr_t
409 xpmap_ptetomach(pt_entry_t *pte)
410 {
411 pt_entry_t *up_pte;
412 vaddr_t va = (vaddr_t) pte;
413
414 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
415 up_pte = (pt_entry_t *) va;
416
417 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
418 }
419
420 /*
421 * xpmap_update()
422 * Update an active pt entry with Xen
423 * Equivalent to *pte = npte
424 */
425
426 static __inline void
427 xpmap_update (pt_entry_t *pte, pt_entry_t npte)
428 {
429 int s = splvm();
430
431 xpq_queue_pte_update((pt_entry_t *) xpmap_ptetomach(pte), npte);
432 xpq_flush_queue();
433 splx(s);
434 }
435
436
437 /* Xen helpers to change bits of a pte */
438 #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
439
440 /* pmap functions with machine addresses */
441 void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t);
442 int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
443 vm_prot_t, int, int);
444 bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
445 paddr_t vtomach(vaddr_t);
446
447 #endif /* XEN */
448
449 /*
450 * Hooks for the pool allocator.
451 */
452 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
453
454 /*
455 * TLB shootdown mailbox.
456 */
457
458 struct pmap_mbox {
459 volatile void *mb_pointer;
460 volatile uintptr_t mb_addr1;
461 volatile uintptr_t mb_addr2;
462 volatile uintptr_t mb_head;
463 volatile uintptr_t mb_tail;
464 volatile uintptr_t mb_global;
465 };
466
467 #endif /* _KERNEL */
468
469 #endif /* _X86_PMAP_H_ */
470