pmap.h revision 1.76 1 /* $NetBSD: pmap.h,v 1.76 2018/03/04 10:13:08 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2001 Wasabi Systems, Inc.
30 * All rights reserved.
31 *
32 * Written by Frank van der Linden for Wasabi Systems, Inc.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed for the NetBSD Project by
45 * Wasabi Systems, Inc.
46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 * or promote products derived from this software without specific prior
48 * written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
61 */
62
63 /*
64 * pmap.h: see pmap.c for the history of this pmap module.
65 */
66
67 #ifndef _X86_PMAP_H_
68 #define _X86_PMAP_H_
69
70 /*
71 * pl*_pi: index in the ptp page for a pde mapping a VA.
72 * (pl*_i below is the index in the virtual array of all pdes per level)
73 */
74 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
75 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
76 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
77 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
78
79 /*
80 * pl*_i: generate index into pde/pte arrays in virtual space
81 *
82 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
83 */
84 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
85 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
86 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
87 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
88 #define pl_i(va, lvl) \
89 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
90
91 #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
92
93 /*
94 * PTP macros:
95 * a PTP's index is the PD index of the PDE that points to it
96 * a PTP's offset is the byte-offset in the PTE space that this PTP is at
97 * a PTP's VA is the first VA mapped by that PTP
98 */
99
100 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
101
102 /* size of a PDP: usually one page, except for PAE */
103 #ifdef PAE
104 #define PDP_SIZE 4
105 #else
106 #define PDP_SIZE 1
107 #endif
108
109
110 #if defined(_KERNEL)
111 #include <sys/kcpuset.h>
112 #include <uvm/pmap/pmap_pvt.h>
113
114 #define BTSEG_NONE 0
115 #define BTSEG_TEXT 1
116 #define BTSEG_RODATA 2
117 #define BTSEG_DATA 3
118 #define BTSPACE_NSEGS 64
119
120 struct bootspace {
121 struct {
122 vaddr_t va;
123 paddr_t pa;
124 size_t sz;
125 } head;
126
127 /* Kernel segments. */
128 struct {
129 int type;
130 vaddr_t va;
131 paddr_t pa;
132 size_t sz;
133 } segs[BTSPACE_NSEGS];
134
135 /*
136 * The area used by the early kernel bootstrap. It contains the kernel
137 * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O
138 * mem.
139 */
140 struct {
141 vaddr_t va;
142 paddr_t pa;
143 size_t sz;
144 } boot;
145
146 /* A magic VA usable by the bootstrap code. */
147 vaddr_t spareva;
148
149 /* Virtual address of the page directory. */
150 vaddr_t pdir;
151
152 /* End of the area dedicated to kernel modules (amd64 only). */
153 vaddr_t emodule;
154 };
155
156 #ifndef MAXGDTSIZ
157 #define MAXGDTSIZ 65536 /* XXX */
158 #endif
159
160 struct pcpu_entry {
161 uint8_t gdt[MAXGDTSIZ];
162 uint8_t tss[PAGE_SIZE];
163 uint8_t ist0[PAGE_SIZE];
164 uint8_t ist1[PAGE_SIZE];
165 uint8_t ist2[PAGE_SIZE];
166 uint8_t rsp0[2 * PAGE_SIZE];
167 } __packed;
168
169 struct pcpu_area {
170 #ifdef SVS
171 uint8_t utls[PAGE_SIZE];
172 #endif
173 uint8_t idt[PAGE_SIZE];
174 uint8_t ldt[PAGE_SIZE];
175 struct pcpu_entry ent[MAXCPUS];
176 } __packed;
177
178 extern struct pcpu_area *pcpuarea;
179
180 /*
181 * pmap data structures: see pmap.c for details of locking.
182 */
183
184 /*
185 * we maintain a list of all non-kernel pmaps
186 */
187
188 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
189
190 /*
191 * linked list of all non-kernel pmaps
192 */
193 extern struct pmap_head pmaps;
194 extern kmutex_t pmaps_lock; /* protects pmaps */
195
196 /*
197 * pool_cache(9) that PDPs are allocated from
198 */
199 extern struct pool_cache pmap_pdp_cache;
200
201 /*
202 * the pmap structure
203 *
204 * note that the pm_obj contains the lock pointer, the reference count,
205 * page list, and number of PTPs within the pmap.
206 *
207 * pm_lock is the same as the lock for vm object 0. Changes to
208 * the other objects may only be made if that lock has been taken
209 * (the other object locks are only used when uvm_pagealloc is called)
210 */
211
212 struct pmap {
213 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
214 #define pm_lock pm_obj[0].vmobjlock
215 kmutex_t pm_obj_lock[PTP_LEVELS-1]; /* locks for pm_objs */
216 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
217 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
218 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */
219 struct vm_page *pm_ptphint[PTP_LEVELS-1];
220 /* pointer to a PTP in our pmap */
221 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
222
223 #if !defined(__x86_64__)
224 vaddr_t pm_hiexec; /* highest executable mapping */
225 #endif /* !defined(__x86_64__) */
226 int pm_flags; /* see below */
227
228 union descriptor *pm_ldt; /* user-set LDT */
229 size_t pm_ldt_len; /* size of LDT in bytes */
230 int pm_ldt_sel; /* LDT selector */
231 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */
232 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part
233 of pmap */
234 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's
235 ptp mapped */
236 uint64_t pm_ncsw; /* for assertions */
237 struct vm_page *pm_gc_ptp; /* pages from pmap g/c */
238 };
239
240 /* macro to access pm_pdirpa slots */
241 #ifdef PAE
242 #define pmap_pdirpa(pmap, index) \
243 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
244 #else
245 #define pmap_pdirpa(pmap, index) \
246 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
247 #endif
248
249 /*
250 * MD flags that we use for pmap_enter and pmap_kenter_pa:
251 */
252
253 /*
254 * global kernel variables
255 */
256
257 /*
258 * PDPpaddr is the physical address of the kernel's PDP.
259 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
260 * value associated to the kernel process, proc0.
261 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
262 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
263 * - Xen: it corresponds to the PFN of the kernel's PDP.
264 */
265 extern u_long PDPpaddr;
266
267 extern pd_entry_t pmap_pg_g; /* do we support PG_G? */
268 extern pd_entry_t pmap_pg_nx; /* do we support PG_NX? */
269 extern int pmap_largepages;
270 extern long nkptp[PTP_LEVELS];
271
272 /*
273 * macros
274 */
275
276 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
277 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
278
279 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
280 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
281 #define pmap_copy(DP,SP,D,L,S) __USE(L)
282 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
283 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
284 #define pmap_move(DP,SP,D,L,S)
285 #define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
286 #define pmap_mmap_flags(ppn) x86_mmap_flags(ppn)
287 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
288
289 #if defined(__x86_64__) || defined(PAE)
290 #define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT)
291 #else
292 #define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT)
293 #endif
294
295 #define X86_MMAP_FLAG_MASK 0xf
296 #define X86_MMAP_FLAG_PREFETCH 0x1
297
298 /*
299 * prototypes
300 */
301
302 void pmap_activate(struct lwp *);
303 void pmap_bootstrap(vaddr_t);
304 bool pmap_clear_attrs(struct vm_page *, unsigned);
305 bool pmap_pv_clear_attrs(paddr_t, unsigned);
306 void pmap_deactivate(struct lwp *);
307 void pmap_page_remove(struct vm_page *);
308 void pmap_pv_remove(paddr_t);
309 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
310 bool pmap_test_attrs(struct vm_page *, unsigned);
311 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
312 void pmap_load(void);
313 paddr_t pmap_init_tmp_pgtbl(paddr_t);
314 void pmap_remove_all(struct pmap *);
315 void pmap_ldt_cleanup(struct lwp *);
316 void pmap_ldt_sync(struct pmap *);
317 void pmap_kremove_local(vaddr_t, vsize_t);
318
319 void pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
320 void pmap_emap_remove(vaddr_t, vsize_t);
321 void pmap_emap_sync(bool);
322
323 #define __HAVE_PMAP_PV_TRACK 1
324 void pmap_pv_init(void);
325 void pmap_pv_track(paddr_t, psize_t);
326 void pmap_pv_untrack(paddr_t, psize_t);
327
328 void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
329 pd_entry_t * const **);
330 void pmap_unmap_ptes(struct pmap *, struct pmap *);
331
332 int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
333
334 u_int x86_mmap_flags(paddr_t);
335
336 bool pmap_is_curpmap(struct pmap *);
337
338 #ifndef __HAVE_DIRECT_MAP
339 void pmap_vpage_cpu_init(struct cpu_info *);
340 #endif
341
342 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
343
344 typedef enum tlbwhy {
345 TLBSHOOT_APTE,
346 TLBSHOOT_KENTER,
347 TLBSHOOT_KREMOVE,
348 TLBSHOOT_FREE_PTP1,
349 TLBSHOOT_FREE_PTP2,
350 TLBSHOOT_REMOVE_PTE,
351 TLBSHOOT_REMOVE_PTES,
352 TLBSHOOT_SYNC_PV1,
353 TLBSHOOT_SYNC_PV2,
354 TLBSHOOT_WRITE_PROTECT,
355 TLBSHOOT_ENTER,
356 TLBSHOOT_UPDATE,
357 TLBSHOOT_BUS_DMA,
358 TLBSHOOT_BUS_SPACE,
359 TLBSHOOT__MAX,
360 } tlbwhy_t;
361
362 void pmap_tlb_init(void);
363 void pmap_tlb_cpu_init(struct cpu_info *);
364 void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
365 void pmap_tlb_shootnow(void);
366 void pmap_tlb_intr(void);
367
368 #define __HAVE_PMAP_EMAP
369
370 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
371 #define PMAP_FORK /* turn on pmap_fork interface */
372
373 /*
374 * Do idle page zero'ing uncached to avoid polluting the cache.
375 */
376 bool pmap_pageidlezero(paddr_t);
377 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
378
379 /*
380 * inline functions
381 */
382
383 __inline static bool __unused
384 pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
385 {
386 return pmap_pdes_invalid(va, pdes, lastpde) == 0;
387 }
388
389 /*
390 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
391 * if hardware doesn't support one-page flushing)
392 */
393
394 __inline static void __unused
395 pmap_update_pg(vaddr_t va)
396 {
397 invlpg(va);
398 }
399
400 /*
401 * pmap_page_protect: change the protection of all recorded mappings
402 * of a managed page
403 *
404 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
405 * => we only have to worry about making the page more protected.
406 * unprotecting a page is done on-demand at fault time.
407 */
408
409 __inline static void __unused
410 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
411 {
412 if ((prot & VM_PROT_WRITE) == 0) {
413 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
414 (void) pmap_clear_attrs(pg, PG_RW);
415 } else {
416 pmap_page_remove(pg);
417 }
418 }
419 }
420
421 /*
422 * pmap_pv_protect: change the protection of all recorded mappings
423 * of an unmanaged page
424 */
425
426 __inline static void __unused
427 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
428 {
429 if ((prot & VM_PROT_WRITE) == 0) {
430 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
431 (void) pmap_pv_clear_attrs(pa, PG_RW);
432 } else {
433 pmap_pv_remove(pa);
434 }
435 }
436 }
437
438 /*
439 * pmap_protect: change the protection of pages in a pmap
440 *
441 * => this function is a frontend for pmap_remove/pmap_write_protect
442 * => we only have to worry about making the page more protected.
443 * unprotecting a page is done on-demand at fault time.
444 */
445
446 __inline static void __unused
447 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
448 {
449 if ((prot & VM_PROT_WRITE) == 0) {
450 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
451 pmap_write_protect(pmap, sva, eva, prot);
452 } else {
453 pmap_remove(pmap, sva, eva);
454 }
455 }
456 }
457
458 /*
459 * various address inlines
460 *
461 * vtopte: return a pointer to the PTE mapping a VA, works only for
462 * user and PT addresses
463 *
464 * kvtopte: return a pointer to the PTE mapping a kernel VA
465 */
466
467 #include <lib/libkern/libkern.h>
468
469 static __inline pt_entry_t * __unused
470 vtopte(vaddr_t va)
471 {
472
473 KASSERT(va < VM_MIN_KERNEL_ADDRESS);
474
475 return (PTE_BASE + pl1_i(va));
476 }
477
478 static __inline pt_entry_t * __unused
479 kvtopte(vaddr_t va)
480 {
481 pd_entry_t *pde;
482
483 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
484
485 pde = L2_BASE + pl2_i(va);
486 if (*pde & PG_PS)
487 return ((pt_entry_t *)pde);
488
489 return (PTE_BASE + pl1_i(va));
490 }
491
492 paddr_t vtophys(vaddr_t);
493 vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
494 void pmap_cpu_init_late(struct cpu_info *);
495 bool sse2_idlezero_page(void *);
496
497 #ifdef XEN
498 #include <sys/bitops.h>
499
500 #define XPTE_MASK L1_FRAME
501 /* Selects the index of a PTE in (A)PTE_BASE */
502 #define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
503
504 /* PTE access inline fuctions */
505
506 /*
507 * Get the machine address of the pointed pte
508 * We use hardware MMU to get value so works only for levels 1-3
509 */
510
511 static __inline paddr_t
512 xpmap_ptetomach(pt_entry_t *pte)
513 {
514 pt_entry_t *up_pte;
515 vaddr_t va = (vaddr_t) pte;
516
517 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
518 up_pte = (pt_entry_t *) va;
519
520 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
521 }
522
523 /* Xen helpers to change bits of a pte */
524 #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
525
526 paddr_t vtomach(vaddr_t);
527 #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
528 #endif /* XEN */
529
530 /* pmap functions with machine addresses */
531 void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
532 int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
533 vm_prot_t, u_int, int);
534 bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
535 void pmap_free_ptps(struct vm_page *);
536
537 /*
538 * Hooks for the pool allocator.
539 */
540 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
541
542 #ifdef __HAVE_PCPU_AREA
543 extern struct pcpu_area *pcpuarea;
544 #define PDIR_SLOT_PCPU 384
545 #define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
546 #endif
547
548 #ifdef __HAVE_DIRECT_MAP
549
550 extern vaddr_t pmap_direct_base;
551 extern vaddr_t pmap_direct_end;
552
553 #define L4_SLOT_DIRECT 456
554 #define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
555
556 #define NL4_SLOT_DIRECT 32
557
558 #define PMAP_DIRECT_DEFAULT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
559
560 #define PMAP_DIRECT_BASE pmap_direct_base
561 #define PMAP_DIRECT_END pmap_direct_end
562
563 #define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa))
564 #define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE)
565
566 /*
567 * Alternate mapping hooks for pool pages.
568 */
569 #define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa))
570 #define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va))
571
572 void pagezero(vaddr_t);
573
574 #endif /* __HAVE_DIRECT_MAP */
575
576 #endif /* _KERNEL */
577
578 #endif /* _X86_PMAP_H_ */
579