pmap.h revision 1.76.2.3 1 /* $NetBSD: pmap.h,v 1.76.2.3 2018/07/28 04:37:42 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2001 Wasabi Systems, Inc.
30 * All rights reserved.
31 *
32 * Written by Frank van der Linden for Wasabi Systems, Inc.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed for the NetBSD Project by
45 * Wasabi Systems, Inc.
46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 * or promote products derived from this software without specific prior
48 * written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
61 */
62
63 /*
64 * pmap.h: see pmap.c for the history of this pmap module.
65 */
66
67 #ifndef _X86_PMAP_H_
68 #define _X86_PMAP_H_
69
70 /*
71 * pl*_pi: index in the ptp page for a pde mapping a VA.
72 * (pl*_i below is the index in the virtual array of all pdes per level)
73 */
74 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
75 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
76 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
77 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
78
79 /*
80 * pl*_i: generate index into pde/pte arrays in virtual space
81 *
82 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
83 */
84 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
85 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
86 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
87 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
88 #define pl_i(va, lvl) \
89 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
90
91 #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
92
93 /*
94 * PTP macros:
95 * a PTP's index is the PD index of the PDE that points to it
96 * a PTP's offset is the byte-offset in the PTE space that this PTP is at
97 * a PTP's VA is the first VA mapped by that PTP
98 */
99
100 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
101
102 /* size of a PDP: usually one page, except for PAE */
103 #ifdef PAE
104 #define PDP_SIZE 4
105 #else
106 #define PDP_SIZE 1
107 #endif
108
109
110 #if defined(_KERNEL)
111 #include <sys/kcpuset.h>
112 #include <uvm/pmap/pmap_pvt.h>
113
114 #define BTSEG_NONE 0
115 #define BTSEG_TEXT 1
116 #define BTSEG_RODATA 2
117 #define BTSEG_DATA 3
118 #define BTSPACE_NSEGS 64
119
120 struct bootspace {
121 struct {
122 vaddr_t va;
123 paddr_t pa;
124 size_t sz;
125 } head;
126
127 /* Kernel segments. */
128 struct {
129 int type;
130 vaddr_t va;
131 paddr_t pa;
132 size_t sz;
133 } segs[BTSPACE_NSEGS];
134
135 /*
136 * The area used by the early kernel bootstrap. It contains the kernel
137 * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O
138 * mem.
139 */
140 struct {
141 vaddr_t va;
142 paddr_t pa;
143 size_t sz;
144 } boot;
145
146 /* A magic VA usable by the bootstrap code. */
147 vaddr_t spareva;
148
149 /* Virtual address of the page directory. */
150 vaddr_t pdir;
151
152 /* Area dedicated to kernel modules (amd64 only). */
153 vaddr_t smodule;
154 vaddr_t emodule;
155 };
156
157 #define SLSPACE_NONE 0
158 #define SLAREA_USER 1
159 #define SLAREA_PTE 2
160 #define SLAREA_MAIN 3
161 #define SLAREA_PCPU 4
162 #define SLAREA_DMAP 5
163 #define SLAREA_KERN 6
164 #define SLSPACE_NAREAS 7
165
166 struct slotspace {
167 struct {
168 size_t sslot; /* start slot */
169 size_t nslot; /* # of slots */
170 size_t mslot; /* max # of slots */
171 bool active; /* area is active */
172 bool dropmax; /* !resizable */
173 } area[SLSPACE_NAREAS];
174 };
175
176 #ifndef MAXGDTSIZ
177 #define MAXGDTSIZ 65536 /* XXX */
178 #endif
179
180 struct pcpu_entry {
181 uint8_t gdt[MAXGDTSIZ];
182 uint8_t tss[PAGE_SIZE];
183 uint8_t ist0[PAGE_SIZE];
184 uint8_t ist1[PAGE_SIZE];
185 uint8_t ist2[PAGE_SIZE];
186 uint8_t ist3[PAGE_SIZE];
187 uint8_t rsp0[2 * PAGE_SIZE];
188 } __packed;
189
190 struct pcpu_area {
191 #ifdef SVS
192 uint8_t utls[PAGE_SIZE];
193 #endif
194 uint8_t idt[PAGE_SIZE];
195 uint8_t ldt[PAGE_SIZE];
196 struct pcpu_entry ent[MAXCPUS];
197 } __packed;
198
199 extern struct pcpu_area *pcpuarea;
200
201 /*
202 * pmap data structures: see pmap.c for details of locking.
203 */
204
205 /*
206 * we maintain a list of all non-kernel pmaps
207 */
208
209 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
210
211 /*
212 * linked list of all non-kernel pmaps
213 */
214 extern struct pmap_head pmaps;
215 extern kmutex_t pmaps_lock; /* protects pmaps */
216
217 /*
218 * pool_cache(9) that PDPs are allocated from
219 */
220 extern struct pool_cache pmap_pdp_cache;
221
222 /*
223 * the pmap structure
224 *
225 * note that the pm_obj contains the lock pointer, the reference count,
226 * page list, and number of PTPs within the pmap.
227 *
228 * pm_lock is the same as the lock for vm object 0. Changes to
229 * the other objects may only be made if that lock has been taken
230 * (the other object locks are only used when uvm_pagealloc is called)
231 */
232
233 struct pmap {
234 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
235 #define pm_lock pm_obj[0].vmobjlock
236 kmutex_t pm_obj_lock[PTP_LEVELS-1]; /* locks for pm_objs */
237 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
238 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
239 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */
240 struct vm_page *pm_ptphint[PTP_LEVELS-1];
241 /* pointer to a PTP in our pmap */
242 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
243
244 #if !defined(__x86_64__)
245 vaddr_t pm_hiexec; /* highest executable mapping */
246 #endif /* !defined(__x86_64__) */
247 int pm_flags; /* see below */
248
249 union descriptor *pm_ldt; /* user-set LDT */
250 size_t pm_ldt_len; /* size of LDT in bytes */
251 int pm_ldt_sel; /* LDT selector */
252 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */
253 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part
254 of pmap */
255 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's
256 ptp mapped */
257 uint64_t pm_ncsw; /* for assertions */
258 struct vm_page *pm_gc_ptp; /* pages from pmap g/c */
259 };
260
261 /* macro to access pm_pdirpa slots */
262 #ifdef PAE
263 #define pmap_pdirpa(pmap, index) \
264 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
265 #else
266 #define pmap_pdirpa(pmap, index) \
267 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
268 #endif
269
270 /*
271 * MD flags that we use for pmap_enter and pmap_kenter_pa:
272 */
273
274 /*
275 * global kernel variables
276 */
277
278 /*
279 * PDPpaddr is the physical address of the kernel's PDP.
280 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
281 * value associated to the kernel process, proc0.
282 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
283 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
284 * - Xen: it corresponds to the PFN of the kernel's PDP.
285 */
286 extern u_long PDPpaddr;
287
288 extern pd_entry_t pmap_pg_g; /* do we support PG_G? */
289 extern pd_entry_t pmap_pg_nx; /* do we support PG_NX? */
290 extern int pmap_largepages;
291 extern long nkptp[PTP_LEVELS];
292
293 /*
294 * macros
295 */
296
297 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
298 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
299
300 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
301 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
302 #define pmap_copy(DP,SP,D,L,S) __USE(L)
303 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
304 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
305 #define pmap_move(DP,SP,D,L,S)
306 #define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
307 #define pmap_mmap_flags(ppn) x86_mmap_flags(ppn)
308 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
309
310 #if defined(__x86_64__) || defined(PAE)
311 #define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT)
312 #else
313 #define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT)
314 #endif
315
316 #define X86_MMAP_FLAG_MASK 0xf
317 #define X86_MMAP_FLAG_PREFETCH 0x1
318
319 /*
320 * prototypes
321 */
322
323 void pmap_activate(struct lwp *);
324 void pmap_bootstrap(vaddr_t);
325 bool pmap_clear_attrs(struct vm_page *, unsigned);
326 bool pmap_pv_clear_attrs(paddr_t, unsigned);
327 void pmap_deactivate(struct lwp *);
328 void pmap_page_remove(struct vm_page *);
329 void pmap_pv_remove(paddr_t);
330 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
331 bool pmap_test_attrs(struct vm_page *, unsigned);
332 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
333 void pmap_load(void);
334 paddr_t pmap_init_tmp_pgtbl(paddr_t);
335 void pmap_remove_all(struct pmap *);
336 void pmap_ldt_cleanup(struct lwp *);
337 void pmap_ldt_sync(struct pmap *);
338 void pmap_kremove_local(vaddr_t, vsize_t);
339
340 #define __HAVE_PMAP_PV_TRACK 1
341 void pmap_pv_init(void);
342 void pmap_pv_track(paddr_t, psize_t);
343 void pmap_pv_untrack(paddr_t, psize_t);
344
345 void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
346 pd_entry_t * const **);
347 void pmap_unmap_ptes(struct pmap *, struct pmap *);
348
349 int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
350
351 u_int x86_mmap_flags(paddr_t);
352
353 bool pmap_is_curpmap(struct pmap *);
354
355 #ifndef __HAVE_DIRECT_MAP
356 void pmap_vpage_cpu_init(struct cpu_info *);
357 #endif
358
359 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
360
361 typedef enum tlbwhy {
362 TLBSHOOT_APTE,
363 TLBSHOOT_KENTER,
364 TLBSHOOT_KREMOVE,
365 TLBSHOOT_FREE_PTP1,
366 TLBSHOOT_FREE_PTP2,
367 TLBSHOOT_REMOVE_PTE,
368 TLBSHOOT_REMOVE_PTES,
369 TLBSHOOT_SYNC_PV1,
370 TLBSHOOT_SYNC_PV2,
371 TLBSHOOT_WRITE_PROTECT,
372 TLBSHOOT_ENTER,
373 TLBSHOOT_UPDATE,
374 TLBSHOOT_BUS_DMA,
375 TLBSHOOT_BUS_SPACE,
376 TLBSHOOT__MAX,
377 } tlbwhy_t;
378
379 void pmap_tlb_init(void);
380 void pmap_tlb_cpu_init(struct cpu_info *);
381 void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
382 void pmap_tlb_shootnow(void);
383 void pmap_tlb_intr(void);
384
385 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
386 #define PMAP_FORK /* turn on pmap_fork interface */
387
388 /*
389 * Do idle page zero'ing uncached to avoid polluting the cache.
390 */
391 bool pmap_pageidlezero(paddr_t);
392 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
393
394 /*
395 * inline functions
396 */
397
398 __inline static bool __unused
399 pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
400 {
401 return pmap_pdes_invalid(va, pdes, lastpde) == 0;
402 }
403
404 /*
405 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
406 * if hardware doesn't support one-page flushing)
407 */
408
409 __inline static void __unused
410 pmap_update_pg(vaddr_t va)
411 {
412 invlpg(va);
413 }
414
415 /*
416 * pmap_page_protect: change the protection of all recorded mappings
417 * of a managed page
418 *
419 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
420 * => we only have to worry about making the page more protected.
421 * unprotecting a page is done on-demand at fault time.
422 */
423
424 __inline static void __unused
425 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
426 {
427 if ((prot & VM_PROT_WRITE) == 0) {
428 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
429 (void) pmap_clear_attrs(pg, PG_RW);
430 } else {
431 pmap_page_remove(pg);
432 }
433 }
434 }
435
436 /*
437 * pmap_pv_protect: change the protection of all recorded mappings
438 * of an unmanaged page
439 */
440
441 __inline static void __unused
442 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
443 {
444 if ((prot & VM_PROT_WRITE) == 0) {
445 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
446 (void) pmap_pv_clear_attrs(pa, PG_RW);
447 } else {
448 pmap_pv_remove(pa);
449 }
450 }
451 }
452
453 /*
454 * pmap_protect: change the protection of pages in a pmap
455 *
456 * => this function is a frontend for pmap_remove/pmap_write_protect
457 * => we only have to worry about making the page more protected.
458 * unprotecting a page is done on-demand at fault time.
459 */
460
461 __inline static void __unused
462 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
463 {
464 if ((prot & VM_PROT_WRITE) == 0) {
465 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
466 pmap_write_protect(pmap, sva, eva, prot);
467 } else {
468 pmap_remove(pmap, sva, eva);
469 }
470 }
471 }
472
473 /*
474 * various address inlines
475 *
476 * vtopte: return a pointer to the PTE mapping a VA, works only for
477 * user and PT addresses
478 *
479 * kvtopte: return a pointer to the PTE mapping a kernel VA
480 */
481
482 #include <lib/libkern/libkern.h>
483
484 static __inline pt_entry_t * __unused
485 vtopte(vaddr_t va)
486 {
487
488 KASSERT(va < VM_MIN_KERNEL_ADDRESS);
489
490 return (PTE_BASE + pl1_i(va));
491 }
492
493 static __inline pt_entry_t * __unused
494 kvtopte(vaddr_t va)
495 {
496 pd_entry_t *pde;
497
498 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
499
500 pde = L2_BASE + pl2_i(va);
501 if (*pde & PG_PS)
502 return ((pt_entry_t *)pde);
503
504 return (PTE_BASE + pl1_i(va));
505 }
506
507 paddr_t vtophys(vaddr_t);
508 vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
509 void pmap_cpu_init_late(struct cpu_info *);
510 bool sse2_idlezero_page(void *);
511
512 #ifdef XEN
513 #include <sys/bitops.h>
514
515 #define XPTE_MASK L1_FRAME
516 /* Selects the index of a PTE in (A)PTE_BASE */
517 #define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
518
519 /* PTE access inline fuctions */
520
521 /*
522 * Get the machine address of the pointed pte
523 * We use hardware MMU to get value so works only for levels 1-3
524 */
525
526 static __inline paddr_t
527 xpmap_ptetomach(pt_entry_t *pte)
528 {
529 pt_entry_t *up_pte;
530 vaddr_t va = (vaddr_t) pte;
531
532 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
533 up_pte = (pt_entry_t *) va;
534
535 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
536 }
537
538 /* Xen helpers to change bits of a pte */
539 #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
540
541 paddr_t vtomach(vaddr_t);
542 #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
543 #endif /* XEN */
544
545 /* pmap functions with machine addresses */
546 void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
547 int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
548 vm_prot_t, u_int, int);
549 bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
550 void pmap_free_ptps(struct vm_page *);
551
552 /*
553 * Hooks for the pool allocator.
554 */
555 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
556
557 #ifdef __HAVE_PCPU_AREA
558 extern struct pcpu_area *pcpuarea;
559 #define PDIR_SLOT_PCPU 384
560 #define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
561 #endif
562
563 #ifdef __HAVE_DIRECT_MAP
564
565 extern vaddr_t pmap_direct_base;
566 extern vaddr_t pmap_direct_end;
567
568 #define L4_SLOT_DIRECT 456
569 #define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
570
571 #define NL4_SLOT_DIRECT 32
572
573 #define PMAP_DIRECT_DEFAULT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
574
575 #define PMAP_DIRECT_BASE pmap_direct_base
576 #define PMAP_DIRECT_END pmap_direct_end
577
578 #define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa))
579 #define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE)
580
581 /*
582 * Alternate mapping hooks for pool pages.
583 */
584 #define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa))
585 #define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va))
586
587 void pagezero(vaddr_t);
588
589 #endif /* __HAVE_DIRECT_MAP */
590
591 #endif /* _KERNEL */
592
593 #endif /* _X86_PMAP_H_ */
594