pmap.h revision 1.11
1/*	$NetBSD: pmap.h,v 1.11 2007/08/29 23:38:03 ad Exp $	*/
2
3/*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgment:
18 *      This product includes software developed by Charles D. Cranor and
19 *      Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * Copyright (c) 2001 Wasabi Systems, Inc.
37 * All rights reserved.
38 *
39 * Written by Frank van der Linden for Wasabi Systems, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 *    must display the following acknowledgement:
51 *      This product includes software developed for the NetBSD Project by
52 *      Wasabi Systems, Inc.
53 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54 *    or promote products derived from this software without specific prior
55 *    written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
61 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 * POSSIBILITY OF SUCH DAMAGE.
68 */
69
70/*
71 * pmap.h: see pmap.c for the history of this pmap module.
72 */
73
74#ifndef	_AMD64_PMAP_H_
75#define	_AMD64_PMAP_H_
76
77#ifndef _LOCORE
78#if defined(_KERNEL_OPT)
79#include "opt_largepages.h"
80#endif
81
82#include <machine/cpufunc.h>
83#include <machine/pte.h>
84#include <machine/segments.h>
85#include <machine/atomic.h>
86
87#include <uvm/uvm_object.h>
88#endif
89
90/*
91 * The x86_64 pmap module closely resembles the i386 one. It uses
92 * the same recursive entry scheme, and the same alternate area
93 * trick for accessing non-current pmaps. See the i386 pmap.h
94 * for a description. The obvious difference is that 3 extra
95 * levels of page table need to be dealt with. The level 1 page
96 * table pages are at:
97 *
98 * l1: 0x00007f8000000000 - 0x00007fffffffffff     (39 bits, needs PML4 entry)
99 *
100 * The alternate space is at:
101 *
102 * l1: 0xffffff8000000000 - 0xffffffffffffffff     (39 bits, needs PML4 entry)
103 *
104 * The rest is kept as physical pages in 3 UVM objects, and is
105 * temporarily mapped for virtual access when needed.
106 *
107 * Note that address space is signed, so the layout for 48 bits is:
108 *
109 *  +---------------------------------+ 0xffffffffffffffff
110 *  |                                 |
111 *  |    alt.L1 table (PTE pages)     |
112 *  |                                 |
113 *  +---------------------------------+ 0xffffff8000000000
114 *  ~                                 ~
115 *  |                                 |
116 *  |         Kernel Space            |
117 *  |                                 |
118 *  |                                 |
119 *  +---------------------------------+ 0xffff800000000000 = 0x0000800000000000
120 *  |                                 |
121 *  |    alt.L1 table (PTE pages)     |
122 *  |                                 |
123 *  +---------------------------------+ 0x00007f8000000000
124 *  ~                                 ~
125 *  |                                 |
126 *  |         User Space              |
127 *  |                                 |
128 *  |                                 |
129 *  +---------------------------------+ 0x0000000000000000
130 *
131 * In other words, there is a 'VA hole' at 0x0000800000000000 -
132 * 0xffff800000000000 which will trap, just as on, for example,
133 * sparcv9.
134 *
135 * The unused space can be used if needed, but it adds a little more
136 * complexity to the calculations.
137 */
138
139/*
140 * The first generation of Hammer processors can use 48 bits of
141 * virtual memory, and 40 bits of physical memory. This will be
142 * more for later generations. These defines can be changed to
143 * variable names containing the # of bits, extracted from an
144 * extended cpuid instruction (variables are harder to use during
145 * bootstrap, though)
146 */
147#define VIRT_BITS	48
148#define PHYS_BITS	40
149
150/*
151 * Mask to get rid of the sign-extended part of addresses.
152 */
153#define VA_SIGN_MASK		0xffff000000000000
154#define VA_SIGN_NEG(va)		((va) | VA_SIGN_MASK)
155/*
156 * XXXfvdl this one's not right.
157 */
158#define VA_SIGN_POS(va)		((va) & ~VA_SIGN_MASK)
159
160#define L4_SLOT_PTE		255
161#define L4_SLOT_KERN		256
162#define L4_SLOT_KERNBASE	511
163#define L4_SLOT_APTE		510
164
165#define PDIR_SLOT_KERN	L4_SLOT_KERN
166#define PDIR_SLOT_PTE	L4_SLOT_PTE
167#define PDIR_SLOT_APTE	L4_SLOT_APTE
168
169/*
170 * the following defines give the virtual addresses of various MMU
171 * data structures:
172 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
173 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
174 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
175 *
176 */
177
178#define PTE_BASE  ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4))
179#define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4))))
180
181#define L1_BASE		PTE_BASE
182#define AL1_BASE	APTE_BASE
183
184#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
185#define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
186#define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
187
188#define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L4_SLOT_PTE * NBPD_L3))
189#define AL3_BASE ((pd_entry_t *)((char *)AL2_BASE + L4_SLOT_PTE * NBPD_L2))
190#define AL4_BASE ((pd_entry_t *)((char *)AL3_BASE + L4_SLOT_PTE * NBPD_L1))
191
192#define PDP_PDE		(L4_BASE + PDIR_SLOT_PTE)
193#define APDP_PDE	(L4_BASE + PDIR_SLOT_APTE)
194
195#define PDP_BASE	L4_BASE
196#define APDP_BASE	AL4_BASE
197
198#define NKL4_MAX_ENTRIES	(unsigned long)1
199#define NKL3_MAX_ENTRIES	(unsigned long)(NKL4_MAX_ENTRIES * 512)
200#define NKL2_MAX_ENTRIES	(unsigned long)(NKL3_MAX_ENTRIES * 512)
201#define NKL1_MAX_ENTRIES	(unsigned long)(NKL2_MAX_ENTRIES * 512)
202
203#define NKL4_KIMG_ENTRIES	1
204#define NKL3_KIMG_ENTRIES	1
205#define NKL2_KIMG_ENTRIES	8
206
207/*
208 * Since kva space is below the kernel in its entirety, we start off
209 * with zero entries on each level.
210 */
211#define NKL4_START_ENTRIES	0
212#define NKL3_START_ENTRIES	0
213#define NKL2_START_ENTRIES	0
214#define NKL1_START_ENTRIES	0	/* XXX */
215
216#define NTOPLEVEL_PDES		(PAGE_SIZE / (sizeof (pd_entry_t)))
217
218#define KERNSPACE		(NKL4_ENTRIES * NBPD_L4)
219
220#define NPDPG			(PAGE_SIZE / sizeof (pd_entry_t))
221
222#define ptei(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
223
224/*
225 * pl*_pi: index in the ptp page for a pde mapping a VA.
226 * (pl*_i below is the index in the virtual array of all pdes per level)
227 */
228#define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
229#define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
230#define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
231#define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
232
233/*
234 * pl*_i: generate index into pde/pte arrays in virtual space
235 */
236#define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
237#define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
238#define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
239#define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
240#define pl_i(va, lvl) \
241        (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
242
243#define PTP_MASK_INITIALIZER	{ L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
244#define PTP_SHIFT_INITIALIZER	{ L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
245#define NKPTP_INITIALIZER	{ NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
246				  NKL3_START_ENTRIES, NKL4_START_ENTRIES }
247#define NKPTPMAX_INITIALIZER	{ NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
248				  NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
249#define NBPD_INITIALIZER	{ NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
250#define PDES_INITIALIZER	{ L2_BASE, L3_BASE, L4_BASE }
251#define APDES_INITIALIZER	{ AL2_BASE, AL3_BASE, AL4_BASE }
252
253/*
254 * PTP macros:
255 *   a PTP's index is the PD index of the PDE that points to it
256 *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
257 *   a PTP's VA is the first VA mapped by that PTP
258 *
259 * note that PAGE_SIZE == number of bytes in a PTP (4096 bytes == 1024 entries)
260 *           NBPD == number of bytes a PTP can map (4MB)
261 */
262
263#define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
264
265#define PTP_LEVELS	4
266
267/*
268 * PG_AVAIL usage: we make use of the ignored bits of the PTE
269 */
270
271#define PG_W		PG_AVAIL1	/* "wired" mapping */
272#define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
273/* PG_AVAIL3 not used */
274
275/*
276 * Number of PTE's per cache line.  8 byte pte, 64-byte cache line
277 * Used to avoid false sharing of cache lines.
278 */
279#define NPTECL		8
280
281
282#if defined(_KERNEL) && !defined(_LOCORE)
283/*
284 * pmap data structures: see pmap.c for details of locking.
285 */
286
287struct pmap;
288typedef struct pmap *pmap_t;
289
290/*
291 * we maintain a list of all non-kernel pmaps
292 */
293
294LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
295
296/*
297 * the pmap structure
298 *
299 * note that the pm_obj contains the simple_lock, the reference count,
300 * page list, and number of PTPs within the pmap.
301 *
302 * pm_lock is the same as the spinlock for vm object 0. Changes to
303 * the other objects may only be made if that lock has been taken
304 * (the other object locks are only used when uvm_pagealloc is called)
305 */
306
307struct pmap {
308	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
309#define	pm_lock	pm_obj[0].vmobjlock
310#define pm_obj_l1 pm_obj[0]
311#define pm_obj_l2 pm_obj[1]
312#define pm_obj_l3 pm_obj[2]
313	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
314	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
315	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
316	struct vm_page *pm_ptphint[PTP_LEVELS-1];
317					/* pointer to a PTP in our pmap */
318	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
319
320	int pm_flags;			/* see below */
321
322	union descriptor *pm_ldt;	/* user-set LDT */
323	int pm_ldt_len;			/* number of LDT entries */
324	int pm_ldt_sel;			/* LDT selector */
325	u_int32_t pm_cpus;		/* mask of CPUs using pmap */
326};
327
328/* pm_flags */
329#define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
330
331/*
332 * for each managed physical page we maintain a list of <PMAP,VA>'s
333 * which it is mapped at.  the list is headed by a pv_head structure.
334 * there is one pv_head per managed phys page (allocated at boot time).
335 * the pv_head structure points to a list of pv_entry structures (each
336 * describes one mapping).
337 */
338
339struct pv_entry {                       /* locked by its list's pvh_lock */
340        SPLAY_ENTRY(pv_entry) pv_node;  /* splay-tree node */
341        struct pmap *pv_pmap;           /* the pmap */
342        vaddr_t pv_va;                  /* the virtual address */
343        struct vm_page *pv_ptp;         /* the vm_page of the PTP */
344	struct pmap_cpu *pv_alloc_cpu;	/* CPU allocated from */
345};
346
347/*
348 * pv_entrys are dynamically allocated in chunks from a single page.
349 * we keep track of how many pv_entrys are in use for each page and
350 * we can free pv_entry pages if needed.  there is one lock for the
351 * entire allocation system.
352 */
353
354struct pv_page_info {
355	TAILQ_ENTRY(pv_page) pvpi_list;
356	struct pv_entry *pvpi_pvfree;
357	int pvpi_nfree;
358};
359
360/*
361 * number of pv_entry's in a pv_page
362 * (note: won't work on systems where NPBG isn't a constant)
363 */
364
365#define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
366			sizeof(struct pv_entry))
367
368/*
369 * a pv_page: where pv_entrys are allocated from
370 */
371
372struct pv_page {
373	struct pv_page_info pvinfo;
374	struct pv_entry pvents[PVE_PER_PVPAGE];
375};
376
377/*
378 * pmap_remove_record: a record of VAs that have been unmapped, used to
379 * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
380 */
381
382#define PMAP_RR_MAX	16	/* max of 16 pages (64K) */
383
384struct pmap_remove_record {
385	int prr_npages;
386	vaddr_t prr_vas[PMAP_RR_MAX];
387};
388
389/*
390 * global kernel variables
391 */
392
393/* PTDpaddr: is the physical address of the kernel's PDP */
394extern u_long PTDpaddr;
395
396extern struct pmap kernel_pmap_store;	/* kernel pmap */
397extern int pmap_pg_g;			/* do we support PG_G? */
398
399extern paddr_t ptp_masks[];
400extern int ptp_shifts[];
401extern long nkptp[], nbpd[], nkptpmax[];
402
403/*
404 * macros
405 */
406
407#define	pmap_kernel()			(&kernel_pmap_store)
408#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
409#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
410
411#define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
412#define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
413#define pmap_copy(DP,SP,D,L,S)
414#define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
415#define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
416#define pmap_move(DP,SP,D,L,S)
417#define pmap_phys_address(ppn)		ptob(ppn)
418#define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
419
420
421/*
422 * prototypes
423 */
424
425void		pmap_activate __P((struct lwp *));
426void		pmap_bootstrap __P((vaddr_t));
427bool		pmap_clear_attrs __P((struct vm_page *, unsigned));
428void		pmap_deactivate __P((struct lwp *));
429static void	pmap_page_protect __P((struct vm_page *, vm_prot_t));
430void		pmap_page_remove  __P((struct vm_page *));
431static void	pmap_protect __P((struct pmap *, vaddr_t,
432				vaddr_t, vm_prot_t));
433void		pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
434bool		pmap_test_attrs __P((struct vm_page *, unsigned));
435static void	pmap_update_pg __P((vaddr_t));
436static void	pmap_update_2pg __P((vaddr_t,vaddr_t));
437void		pmap_write_protect __P((struct pmap *, vaddr_t,
438				vaddr_t, vm_prot_t));
439void		pmap_changeprot_local(vaddr_t, vm_prot_t);
440
441vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
442
443void	pmap_tlb_shootdown __P((pmap_t, vaddr_t, vaddr_t, pt_entry_t));
444void	pmap_tlb_shootwait __P((void));
445void	pmap_prealloc_lowmem_ptps __P((void));
446
447#define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
448
449/*
450 * Do idle page zero'ing uncached to avoid polluting the cache.
451 */
452bool		pmap_pageidlezero __P((paddr_t));
453#define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
454
455/*
456 * inline functions
457 */
458
459static __inline void
460pmap_remove_all(struct pmap *pmap)
461{
462	/* Nothing. */
463}
464
465/*
466 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
467 *	if hardware doesn't support one-page flushing)
468 */
469
470__inline static void
471pmap_update_pg(va)
472	vaddr_t va;
473{
474	invlpg(va);
475}
476
477/*
478 * pmap_update_2pg: flush two pages from the TLB
479 */
480
481__inline static void
482pmap_update_2pg(va, vb)
483	vaddr_t va, vb;
484{
485	invlpg(va);
486	invlpg(vb);
487}
488
489/*
490 * pmap_page_protect: change the protection of all recorded mappings
491 *	of a managed page
492 *
493 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
494 * => we only have to worry about making the page more protected.
495 *	unprotecting a page is done on-demand at fault time.
496 */
497
498__inline static void
499pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
500{
501	if ((prot & VM_PROT_WRITE) == 0) {
502		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
503			(void) pmap_clear_attrs(pg, PG_RW);
504		} else {
505			pmap_page_remove(pg);
506		}
507	}
508}
509
510/*
511 * pmap_protect: change the protection of pages in a pmap
512 *
513 * => this function is a frontend for pmap_remove/pmap_write_protect
514 * => we only have to worry about making the page more protected.
515 *	unprotecting a page is done on-demand at fault time.
516 */
517
518__inline static void
519pmap_protect(pmap, sva, eva, prot)
520	struct pmap *pmap;
521	vaddr_t sva, eva;
522	vm_prot_t prot;
523{
524	if ((prot & VM_PROT_WRITE) == 0) {
525		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
526			pmap_write_protect(pmap, sva, eva, prot);
527		} else {
528			pmap_remove(pmap, sva, eva);
529		}
530	}
531}
532
533/*
534 * various address inlines
535 *
536 *  vtopte: return a pointer to the PTE mapping a VA, works only for
537 *  user and PT addresses
538 *
539 *  kvtopte: return a pointer to the PTE mapping a kernel VA
540 */
541
542#include <lib/libkern/libkern.h>
543
544static __inline pt_entry_t *
545vtopte(vaddr_t va)
546{
547
548	KASSERT(va < (L4_SLOT_KERN * NBPD_L4));
549
550	return (PTE_BASE + pl1_i(va));
551}
552
553static __inline pt_entry_t *
554kvtopte(vaddr_t va)
555{
556
557	KASSERT(va >= (L4_SLOT_KERN * NBPD_L4));
558
559#ifdef LARGEPAGES
560	{
561		pd_entry_t *pde;
562
563		pde = L2_BASE + pl2_i(va);
564		if (*pde & PG_PS)
565			return ((pt_entry_t *)pde);
566	}
567#endif
568
569	return (PTE_BASE + pl1_i(va));
570}
571
572#define pmap_pte_set(p, n)		x86_atomic_testset_u64(p, n)
573#define pmap_pte_setbits(p, b)		x86_atomic_setbits_u64(p, b)
574#define pmap_pte_clearbits(p, b)	x86_atomic_clearbits_u64(p, b)
575#define pmap_cpu_has_pg_n()		(1)
576#define pmap_cpu_has_invlpg		(1)
577
578paddr_t vtophys __P((vaddr_t));
579vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
580void	pmap_cpu_init_early(struct cpu_info *);
581void	pmap_cpu_init_late(struct cpu_info *);
582void	sse2_zero_page(void *);
583void	sse2_copy_page(void *, void *);
584
585#if 0   /* XXXfvdl was USER_LDT, need to check if that can be supported */
586void	pmap_ldt_cleanup __P((struct lwp *));
587#define	PMAP_FORK
588#endif /* USER_LDT */
589
590/*
591 * Hooks for the pool allocator.
592 */
593#define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
594
595/*
596 * TLB shootdown mailbox.
597 */
598
599struct pmap_mbox {
600	volatile void		*mb_pointer;
601	volatile uintptr_t	mb_addr1;
602	volatile uintptr_t	mb_addr2;
603	volatile uintptr_t	mb_head;
604	volatile uintptr_t	mb_tail;
605	volatile uintptr_t	mb_global;
606};
607
608#endif /* _KERNEL && !_LOCORE */
609#endif	/* _AMD64_PMAP_H_ */
610