pmap.h revision 1.12
1/*	$NetBSD: pmap.h,v 1.12 2007/09/27 01:10:11 ad Exp $	*/
2
3/*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgment:
18 *      This product includes software developed by Charles D. Cranor and
19 *      Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * Copyright (c) 2001 Wasabi Systems, Inc.
37 * All rights reserved.
38 *
39 * Written by Frank van der Linden for Wasabi Systems, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 *    must display the following acknowledgement:
51 *      This product includes software developed for the NetBSD Project by
52 *      Wasabi Systems, Inc.
53 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54 *    or promote products derived from this software without specific prior
55 *    written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
61 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 * POSSIBILITY OF SUCH DAMAGE.
68 */
69
70/*
71 * pmap.h: see pmap.c for the history of this pmap module.
72 */
73
74#ifndef	_AMD64_PMAP_H_
75#define	_AMD64_PMAP_H_
76
77#ifndef _LOCORE
78#if defined(_KERNEL_OPT)
79#include "opt_largepages.h"
80#endif
81
82#include <machine/pte.h>
83#include <machine/segments.h>
84#include <machine/atomic.h>
85#ifdef _KERNEL
86#include <machine/cpufunc.h>
87#endif
88
89#include <uvm/uvm_object.h>
90#endif
91
92/*
93 * The x86_64 pmap module closely resembles the i386 one. It uses
94 * the same recursive entry scheme, and the same alternate area
95 * trick for accessing non-current pmaps. See the i386 pmap.h
96 * for a description. The obvious difference is that 3 extra
97 * levels of page table need to be dealt with. The level 1 page
98 * table pages are at:
99 *
100 * l1: 0x00007f8000000000 - 0x00007fffffffffff     (39 bits, needs PML4 entry)
101 *
102 * The alternate space is at:
103 *
104 * l1: 0xffffff8000000000 - 0xffffffffffffffff     (39 bits, needs PML4 entry)
105 *
106 * The rest is kept as physical pages in 3 UVM objects, and is
107 * temporarily mapped for virtual access when needed.
108 *
109 * Note that address space is signed, so the layout for 48 bits is:
110 *
111 *  +---------------------------------+ 0xffffffffffffffff
112 *  |                                 |
113 *  |    alt.L1 table (PTE pages)     |
114 *  |                                 |
115 *  +---------------------------------+ 0xffffff8000000000
116 *  ~                                 ~
117 *  |                                 |
118 *  |         Kernel Space            |
119 *  |                                 |
120 *  |                                 |
121 *  +---------------------------------+ 0xffff800000000000 = 0x0000800000000000
122 *  |                                 |
123 *  |    alt.L1 table (PTE pages)     |
124 *  |                                 |
125 *  +---------------------------------+ 0x00007f8000000000
126 *  ~                                 ~
127 *  |                                 |
128 *  |         User Space              |
129 *  |                                 |
130 *  |                                 |
131 *  +---------------------------------+ 0x0000000000000000
132 *
133 * In other words, there is a 'VA hole' at 0x0000800000000000 -
134 * 0xffff800000000000 which will trap, just as on, for example,
135 * sparcv9.
136 *
137 * The unused space can be used if needed, but it adds a little more
138 * complexity to the calculations.
139 */
140
141/*
142 * The first generation of Hammer processors can use 48 bits of
143 * virtual memory, and 40 bits of physical memory. This will be
144 * more for later generations. These defines can be changed to
145 * variable names containing the # of bits, extracted from an
146 * extended cpuid instruction (variables are harder to use during
147 * bootstrap, though)
148 */
149#define VIRT_BITS	48
150#define PHYS_BITS	40
151
152/*
153 * Mask to get rid of the sign-extended part of addresses.
154 */
155#define VA_SIGN_MASK		0xffff000000000000
156#define VA_SIGN_NEG(va)		((va) | VA_SIGN_MASK)
157/*
158 * XXXfvdl this one's not right.
159 */
160#define VA_SIGN_POS(va)		((va) & ~VA_SIGN_MASK)
161
162#define L4_SLOT_PTE		255
163#define L4_SLOT_KERN		256
164#define L4_SLOT_KERNBASE	511
165#define L4_SLOT_APTE		510
166
167#define PDIR_SLOT_KERN	L4_SLOT_KERN
168#define PDIR_SLOT_PTE	L4_SLOT_PTE
169#define PDIR_SLOT_APTE	L4_SLOT_APTE
170
171/*
172 * the following defines give the virtual addresses of various MMU
173 * data structures:
174 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
175 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
176 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
177 *
178 */
179
180#define PTE_BASE  ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4))
181#define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4))))
182
183#define L1_BASE		PTE_BASE
184#define AL1_BASE	APTE_BASE
185
186#define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
187#define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
188#define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
189
190#define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L4_SLOT_PTE * NBPD_L3))
191#define AL3_BASE ((pd_entry_t *)((char *)AL2_BASE + L4_SLOT_PTE * NBPD_L2))
192#define AL4_BASE ((pd_entry_t *)((char *)AL3_BASE + L4_SLOT_PTE * NBPD_L1))
193
194#define PDP_PDE		(L4_BASE + PDIR_SLOT_PTE)
195#define APDP_PDE	(L4_BASE + PDIR_SLOT_APTE)
196
197#define PDP_BASE	L4_BASE
198#define APDP_BASE	AL4_BASE
199
200#define NKL4_MAX_ENTRIES	(unsigned long)1
201#define NKL3_MAX_ENTRIES	(unsigned long)(NKL4_MAX_ENTRIES * 512)
202#define NKL2_MAX_ENTRIES	(unsigned long)(NKL3_MAX_ENTRIES * 512)
203#define NKL1_MAX_ENTRIES	(unsigned long)(NKL2_MAX_ENTRIES * 512)
204
205#define NKL4_KIMG_ENTRIES	1
206#define NKL3_KIMG_ENTRIES	1
207#define NKL2_KIMG_ENTRIES	8
208
209/*
210 * Since kva space is below the kernel in its entirety, we start off
211 * with zero entries on each level.
212 */
213#define NKL4_START_ENTRIES	0
214#define NKL3_START_ENTRIES	0
215#define NKL2_START_ENTRIES	0
216#define NKL1_START_ENTRIES	0	/* XXX */
217
218#define NTOPLEVEL_PDES		(PAGE_SIZE / (sizeof (pd_entry_t)))
219
220#define KERNSPACE		(NKL4_ENTRIES * NBPD_L4)
221
222#define NPDPG			(PAGE_SIZE / sizeof (pd_entry_t))
223
224#define ptei(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
225
226/*
227 * pl*_pi: index in the ptp page for a pde mapping a VA.
228 * (pl*_i below is the index in the virtual array of all pdes per level)
229 */
230#define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
231#define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
232#define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
233#define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
234
235/*
236 * pl*_i: generate index into pde/pte arrays in virtual space
237 */
238#define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
239#define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
240#define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
241#define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
242#define pl_i(va, lvl) \
243        (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
244
245#define PTP_MASK_INITIALIZER	{ L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
246#define PTP_SHIFT_INITIALIZER	{ L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
247#define NKPTP_INITIALIZER	{ NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
248				  NKL3_START_ENTRIES, NKL4_START_ENTRIES }
249#define NKPTPMAX_INITIALIZER	{ NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
250				  NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
251#define NBPD_INITIALIZER	{ NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
252#define PDES_INITIALIZER	{ L2_BASE, L3_BASE, L4_BASE }
253#define APDES_INITIALIZER	{ AL2_BASE, AL3_BASE, AL4_BASE }
254
255/*
256 * PTP macros:
257 *   a PTP's index is the PD index of the PDE that points to it
258 *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
259 *   a PTP's VA is the first VA mapped by that PTP
260 *
261 * note that PAGE_SIZE == number of bytes in a PTP (4096 bytes == 1024 entries)
262 *           NBPD == number of bytes a PTP can map (4MB)
263 */
264
265#define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
266
267#define PTP_LEVELS	4
268
269/*
270 * PG_AVAIL usage: we make use of the ignored bits of the PTE
271 */
272
273#define PG_W		PG_AVAIL1	/* "wired" mapping */
274#define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
275/* PG_AVAIL3 not used */
276
277/*
278 * Number of PTE's per cache line.  8 byte pte, 64-byte cache line
279 * Used to avoid false sharing of cache lines.
280 */
281#define NPTECL		8
282
283
284#if defined(_KERNEL) && !defined(_LOCORE)
285/*
286 * pmap data structures: see pmap.c for details of locking.
287 */
288
289struct pmap;
290typedef struct pmap *pmap_t;
291
292/*
293 * we maintain a list of all non-kernel pmaps
294 */
295
296LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
297
298/*
299 * the pmap structure
300 *
301 * note that the pm_obj contains the simple_lock, the reference count,
302 * page list, and number of PTPs within the pmap.
303 *
304 * pm_lock is the same as the spinlock for vm object 0. Changes to
305 * the other objects may only be made if that lock has been taken
306 * (the other object locks are only used when uvm_pagealloc is called)
307 */
308
309struct pmap {
310	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
311#define	pm_lock	pm_obj[0].vmobjlock
312#define pm_obj_l1 pm_obj[0]
313#define pm_obj_l2 pm_obj[1]
314#define pm_obj_l3 pm_obj[2]
315	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
316	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
317	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
318	struct vm_page *pm_ptphint[PTP_LEVELS-1];
319					/* pointer to a PTP in our pmap */
320	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
321
322	int pm_flags;			/* see below */
323
324	union descriptor *pm_ldt;	/* user-set LDT */
325	int pm_ldt_len;			/* number of LDT entries */
326	int pm_ldt_sel;			/* LDT selector */
327	u_int32_t pm_cpus;		/* mask of CPUs using pmap */
328};
329
330/* pm_flags */
331#define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
332
333/*
334 * for each managed physical page we maintain a list of <PMAP,VA>'s
335 * which it is mapped at.  the list is headed by a pv_head structure.
336 * there is one pv_head per managed phys page (allocated at boot time).
337 * the pv_head structure points to a list of pv_entry structures (each
338 * describes one mapping).
339 */
340
341struct pv_entry {                       /* locked by its list's pvh_lock */
342        SPLAY_ENTRY(pv_entry) pv_node;  /* splay-tree node */
343        struct pmap *pv_pmap;           /* the pmap */
344        vaddr_t pv_va;                  /* the virtual address */
345        struct vm_page *pv_ptp;         /* the vm_page of the PTP */
346	struct pmap_cpu *pv_alloc_cpu;	/* CPU allocated from */
347};
348
349/*
350 * pv_entrys are dynamically allocated in chunks from a single page.
351 * we keep track of how many pv_entrys are in use for each page and
352 * we can free pv_entry pages if needed.  there is one lock for the
353 * entire allocation system.
354 */
355
356struct pv_page_info {
357	TAILQ_ENTRY(pv_page) pvpi_list;
358	struct pv_entry *pvpi_pvfree;
359	int pvpi_nfree;
360};
361
362/*
363 * number of pv_entry's in a pv_page
364 * (note: won't work on systems where NPBG isn't a constant)
365 */
366
367#define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
368			sizeof(struct pv_entry))
369
370/*
371 * a pv_page: where pv_entrys are allocated from
372 */
373
374struct pv_page {
375	struct pv_page_info pvinfo;
376	struct pv_entry pvents[PVE_PER_PVPAGE];
377};
378
379/*
380 * pmap_remove_record: a record of VAs that have been unmapped, used to
381 * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
382 */
383
384#define PMAP_RR_MAX	16	/* max of 16 pages (64K) */
385
386struct pmap_remove_record {
387	int prr_npages;
388	vaddr_t prr_vas[PMAP_RR_MAX];
389};
390
391/*
392 * global kernel variables
393 */
394
395/* PTDpaddr: is the physical address of the kernel's PDP */
396extern u_long PTDpaddr;
397
398extern struct pmap kernel_pmap_store;	/* kernel pmap */
399extern int pmap_pg_g;			/* do we support PG_G? */
400
401extern paddr_t ptp_masks[];
402extern int ptp_shifts[];
403extern long nkptp[], nbpd[], nkptpmax[];
404
405/*
406 * macros
407 */
408
409#define	pmap_kernel()			(&kernel_pmap_store)
410#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
411#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
412
413#define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
414#define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
415#define pmap_copy(DP,SP,D,L,S)
416#define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
417#define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
418#define pmap_move(DP,SP,D,L,S)
419#define pmap_phys_address(ppn)		ptob(ppn)
420#define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
421
422
423/*
424 * prototypes
425 */
426
427void		pmap_activate __P((struct lwp *));
428void		pmap_bootstrap __P((vaddr_t));
429bool		pmap_clear_attrs __P((struct vm_page *, unsigned));
430void		pmap_deactivate __P((struct lwp *));
431static void	pmap_page_protect __P((struct vm_page *, vm_prot_t));
432void		pmap_page_remove  __P((struct vm_page *));
433static void	pmap_protect __P((struct pmap *, vaddr_t,
434				vaddr_t, vm_prot_t));
435void		pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
436bool		pmap_test_attrs __P((struct vm_page *, unsigned));
437static void	pmap_update_pg __P((vaddr_t));
438static void	pmap_update_2pg __P((vaddr_t,vaddr_t));
439void		pmap_write_protect __P((struct pmap *, vaddr_t,
440				vaddr_t, vm_prot_t));
441void		pmap_changeprot_local(vaddr_t, vm_prot_t);
442
443vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
444
445void	pmap_tlb_shootdown __P((pmap_t, vaddr_t, vaddr_t, pt_entry_t));
446void	pmap_tlb_shootwait __P((void));
447void	pmap_prealloc_lowmem_ptps __P((void));
448
449#define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
450
451/*
452 * Do idle page zero'ing uncached to avoid polluting the cache.
453 */
454bool		pmap_pageidlezero __P((paddr_t));
455#define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
456
457/*
458 * inline functions
459 */
460
461static __inline void
462pmap_remove_all(struct pmap *pmap)
463{
464	/* Nothing. */
465}
466
467/*
468 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
469 *	if hardware doesn't support one-page flushing)
470 */
471
472__inline static void
473pmap_update_pg(va)
474	vaddr_t va;
475{
476	invlpg(va);
477}
478
479/*
480 * pmap_update_2pg: flush two pages from the TLB
481 */
482
483__inline static void
484pmap_update_2pg(va, vb)
485	vaddr_t va, vb;
486{
487	invlpg(va);
488	invlpg(vb);
489}
490
491/*
492 * pmap_page_protect: change the protection of all recorded mappings
493 *	of a managed page
494 *
495 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
496 * => we only have to worry about making the page more protected.
497 *	unprotecting a page is done on-demand at fault time.
498 */
499
500__inline static void
501pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
502{
503	if ((prot & VM_PROT_WRITE) == 0) {
504		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
505			(void) pmap_clear_attrs(pg, PG_RW);
506		} else {
507			pmap_page_remove(pg);
508		}
509	}
510}
511
512/*
513 * pmap_protect: change the protection of pages in a pmap
514 *
515 * => this function is a frontend for pmap_remove/pmap_write_protect
516 * => we only have to worry about making the page more protected.
517 *	unprotecting a page is done on-demand at fault time.
518 */
519
520__inline static void
521pmap_protect(pmap, sva, eva, prot)
522	struct pmap *pmap;
523	vaddr_t sva, eva;
524	vm_prot_t prot;
525{
526	if ((prot & VM_PROT_WRITE) == 0) {
527		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
528			pmap_write_protect(pmap, sva, eva, prot);
529		} else {
530			pmap_remove(pmap, sva, eva);
531		}
532	}
533}
534
535/*
536 * various address inlines
537 *
538 *  vtopte: return a pointer to the PTE mapping a VA, works only for
539 *  user and PT addresses
540 *
541 *  kvtopte: return a pointer to the PTE mapping a kernel VA
542 */
543
544#include <lib/libkern/libkern.h>
545
546static __inline pt_entry_t *
547vtopte(vaddr_t va)
548{
549
550	KASSERT(va < (L4_SLOT_KERN * NBPD_L4));
551
552	return (PTE_BASE + pl1_i(va));
553}
554
555static __inline pt_entry_t *
556kvtopte(vaddr_t va)
557{
558
559	KASSERT(va >= (L4_SLOT_KERN * NBPD_L4));
560
561#ifdef LARGEPAGES
562	{
563		pd_entry_t *pde;
564
565		pde = L2_BASE + pl2_i(va);
566		if (*pde & PG_PS)
567			return ((pt_entry_t *)pde);
568	}
569#endif
570
571	return (PTE_BASE + pl1_i(va));
572}
573
574#define pmap_pte_set(p, n)		x86_atomic_testset_u64(p, n)
575#define pmap_pte_setbits(p, b)		x86_atomic_setbits_u64(p, b)
576#define pmap_pte_clearbits(p, b)	x86_atomic_clearbits_u64(p, b)
577#define pmap_cpu_has_pg_n()		(1)
578#define pmap_cpu_has_invlpg		(1)
579
580paddr_t vtophys __P((vaddr_t));
581vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
582void	pmap_cpu_init_early(struct cpu_info *);
583void	pmap_cpu_init_late(struct cpu_info *);
584void	sse2_zero_page(void *);
585void	sse2_copy_page(void *, void *);
586
587#if 0   /* XXXfvdl was USER_LDT, need to check if that can be supported */
588void	pmap_ldt_cleanup __P((struct lwp *));
589#define	PMAP_FORK
590#endif /* USER_LDT */
591
592/*
593 * Hooks for the pool allocator.
594 */
595#define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
596
597/*
598 * TLB shootdown mailbox.
599 */
600
601struct pmap_mbox {
602	volatile void		*mb_pointer;
603	volatile uintptr_t	mb_addr1;
604	volatile uintptr_t	mb_addr2;
605	volatile uintptr_t	mb_head;
606	volatile uintptr_t	mb_tail;
607	volatile uintptr_t	mb_global;
608};
609
610#endif /* _KERNEL && !_LOCORE */
611#endif	/* _AMD64_PMAP_H_ */
612