pmap.h revision 1.1 1 1.1 cgd /*
2 1.1 cgd * Copyright (c) 1991 Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * This code is derived from software contributed to Berkeley by
6 1.1 cgd * the Systems Programming Group of the University of Utah Computer
7 1.1 cgd * Science Department and William Jolitz of UUNET Technologies Inc.
8 1.1 cgd *
9 1.1 cgd * Redistribution and use in source and binary forms, with or without
10 1.1 cgd * modification, are permitted provided that the following conditions
11 1.1 cgd * are met:
12 1.1 cgd * 1. Redistributions of source code must retain the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer.
14 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 cgd * notice, this list of conditions and the following disclaimer in the
16 1.1 cgd * documentation and/or other materials provided with the distribution.
17 1.1 cgd * 3. All advertising materials mentioning features or use of this software
18 1.1 cgd * must display the following acknowledgement:
19 1.1 cgd * This product includes software developed by the University of
20 1.1 cgd * California, Berkeley and its contributors.
21 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
22 1.1 cgd * may be used to endorse or promote products derived from this software
23 1.1 cgd * without specific prior written permission.
24 1.1 cgd *
25 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 cgd * SUCH DAMAGE.
36 1.1 cgd *
37 1.1 cgd * @(#)pmap.h 7.4 (Berkeley) 5/12/91
38 1.1 cgd */
39 1.1 cgd
40 1.1 cgd /*
41 1.1 cgd * Derived from hp300 version by Mike Hibler, this version by William
42 1.1 cgd * Jolitz uses a recursive map [a pde points to the page directory] to
43 1.1 cgd * map the page tables using the pagetables themselves. This is done to
44 1.1 cgd * reduce the impact on kernel virtual memory for lots of sparse address
45 1.1 cgd * space, and to reduce the cost of memory to each process.
46 1.1 cgd *
47 1.1 cgd * from hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
48 1.1 cgd */
49 1.1 cgd
50 1.1 cgd #ifndef _PMAP_MACHINE_
51 1.1 cgd #define _PMAP_MACHINE_ 1
52 1.1 cgd
53 1.1 cgd /*
54 1.1 cgd * 386 page table entry and page table directory
55 1.1 cgd * W.Jolitz, 8/89
56 1.1 cgd */
57 1.1 cgd
58 1.1 cgd struct pde
59 1.1 cgd {
60 1.1 cgd unsigned int
61 1.1 cgd pd_v:1, /* valid bit */
62 1.1 cgd pd_prot:2, /* access control */
63 1.1 cgd pd_mbz1:2, /* reserved, must be zero */
64 1.1 cgd pd_u:1, /* hardware maintained 'used' bit */
65 1.1 cgd :1, /* not used */
66 1.1 cgd pd_mbz2:2, /* reserved, must be zero */
67 1.1 cgd :3, /* reserved for software */
68 1.1 cgd pd_pfnum:20; /* physical page frame number of pte's*/
69 1.1 cgd };
70 1.1 cgd
71 1.1 cgd #define PD_MASK 0xffc00000 /* page directory address bits */
72 1.1 cgd #define PT_MASK 0x003ff000 /* page table address bits */
73 1.1 cgd #define PD_SHIFT 22 /* page directory address shift */
74 1.1 cgd #define PG_SHIFT 12 /* page table address shift */
75 1.1 cgd
76 1.1 cgd struct pte
77 1.1 cgd {
78 1.1 cgd unsigned int
79 1.1 cgd pg_v:1, /* valid bit */
80 1.1 cgd pg_prot:2, /* access control */
81 1.1 cgd pg_mbz1:2, /* reserved, must be zero */
82 1.1 cgd pg_u:1, /* hardware maintained 'used' bit */
83 1.1 cgd pg_m:1, /* hardware maintained modified bit */
84 1.1 cgd pg_mbz2:2, /* reserved, must be zero */
85 1.1 cgd pg_w:1, /* software, wired down page */
86 1.1 cgd :1, /* software (unused) */
87 1.1 cgd pg_nc:1, /* 'uncacheable page' bit */
88 1.1 cgd pg_pfnum:20; /* physical page frame number */
89 1.1 cgd };
90 1.1 cgd
91 1.1 cgd #define PG_V 0x00000001
92 1.1 cgd #define PG_RO 0x00000000
93 1.1 cgd #define PG_RW 0x00000002
94 1.1 cgd #define PG_u 0x00000004
95 1.1 cgd #define PG_PROT 0x00000006 /* all protection bits . */
96 1.1 cgd #define PG_W 0x00000200
97 1.1 cgd #define PG_N 0x00000800 /* Non-cacheable */
98 1.1 cgd #define PG_M 0x00000040
99 1.1 cgd #define PG_U 0x00000020
100 1.1 cgd #define PG_FRAME 0xfffff000
101 1.1 cgd
102 1.1 cgd #define PG_NOACC 0
103 1.1 cgd #define PG_KR 0x00000000
104 1.1 cgd #define PG_KW 0x00000002
105 1.1 cgd #define PG_URKR 0x00000004
106 1.1 cgd #define PG_URKW 0x00000004
107 1.1 cgd #define PG_UW 0x00000006
108 1.1 cgd
109 1.1 cgd /* Garbage for current bastardized pager that assumes a hp300 */
110 1.1 cgd #define PG_NV 0
111 1.1 cgd #define PG_CI 0
112 1.1 cgd /*
113 1.1 cgd * Page Protection Exception bits
114 1.1 cgd */
115 1.1 cgd
116 1.1 cgd #define PGEX_P 0x01 /* Protection violation vs. not present */
117 1.1 cgd #define PGEX_W 0x02 /* during a Write cycle */
118 1.1 cgd #define PGEX_U 0x04 /* access from User mode (UPL) */
119 1.1 cgd
120 1.1 cgd typedef struct pde pd_entry_t; /* page directory entry */
121 1.1 cgd typedef struct pte pt_entry_t; /* Mach page table entry */
122 1.1 cgd
123 1.1 cgd /*
124 1.1 cgd * One page directory, shared between
125 1.1 cgd * kernel and user modes.
126 1.1 cgd */
127 1.1 cgd #define I386_PAGE_SIZE NBPG
128 1.1 cgd #define I386_PDR_SIZE NBPDR
129 1.1 cgd
130 1.1 cgd #define I386_KPDES 8 /* KPT page directory size */
131 1.1 cgd #define I386_UPDES NBPDR/sizeof(struct pde)-8 /* UPT page directory size */
132 1.1 cgd
133 1.1 cgd #define UPTDI 0x3f6 /* ptd entry for u./kernel&user stack */
134 1.1 cgd #define PTDPTDI 0x3f7 /* ptd entry that points to ptd! */
135 1.1 cgd #define KPTDI_FIRST 0x3f8 /* start of kernel virtual pde's */
136 1.1 cgd #define KPTDI_LAST 0x3fA /* last of kernel virtual pde's */
137 1.1 cgd
138 1.1 cgd /*
139 1.1 cgd * Address of current and alternate address space page table maps
140 1.1 cgd * and directories.
141 1.1 cgd */
142 1.1 cgd #ifdef KERNEL
143 1.1 cgd extern struct pte PTmap[], APTmap[], Upte;
144 1.1 cgd extern struct pde PTD[], APTD[], PTDpde, APTDpde, Upde;
145 1.1 cgd extern pt_entry_t *Sysmap;
146 1.1 cgd
147 1.1 cgd extern int IdlePTD; /* physical address of "Idle" state directory */
148 1.1 cgd #endif
149 1.1 cgd
150 1.1 cgd /*
151 1.1 cgd * virtual address to page table entry and
152 1.1 cgd * to physical address. Likewise for alternate address space.
153 1.1 cgd * Note: these work recursively, thus vtopte of a pte will give
154 1.1 cgd * the corresponding pde that in turn maps it.
155 1.1 cgd */
156 1.1 cgd #define vtopte(va) (PTmap + i386_btop(va))
157 1.1 cgd #define kvtopte(va) vtopte(va)
158 1.1 cgd #define ptetov(pt) (i386_ptob(pt - PTmap))
159 1.1 cgd #define vtophys(va) (i386_ptob(vtopte(va)->pg_pfnum) | ((int)(va) & PGOFSET))
160 1.1 cgd #define ispt(va) ((va) >= UPT_MIN_ADDRESS && (va) <= KPT_MAX_ADDRESS)
161 1.1 cgd
162 1.1 cgd #define avtopte(va) (APTmap + i386_btop(va))
163 1.1 cgd #define ptetoav(pt) (i386_ptob(pt - APTmap))
164 1.1 cgd #define avtophys(va) (i386_ptob(avtopte(va)->pg_pfnum) | ((int)(va) & PGOFSET))
165 1.1 cgd
166 1.1 cgd /*
167 1.1 cgd * macros to generate page directory/table indicies
168 1.1 cgd */
169 1.1 cgd
170 1.1 cgd #define pdei(va) (((va)&PD_MASK)>>PD_SHIFT)
171 1.1 cgd #define ptei(va) (((va)&PT_MASK)>>PG_SHIFT)
172 1.1 cgd
173 1.1 cgd /*
174 1.1 cgd * Pmap stuff
175 1.1 cgd */
176 1.1 cgd
177 1.1 cgd struct pmap {
178 1.1 cgd pd_entry_t *pm_pdir; /* KVA of page directory */
179 1.1 cgd boolean_t pm_pdchanged; /* pdir changed */
180 1.1 cgd short pm_dref; /* page directory ref count */
181 1.1 cgd short pm_count; /* pmap reference count */
182 1.1 cgd simple_lock_data_t pm_lock; /* lock on pmap */
183 1.1 cgd struct pmap_statistics pm_stats; /* pmap statistics */
184 1.1 cgd long pm_ptpages; /* more stats: PT pages */
185 1.1 cgd };
186 1.1 cgd
187 1.1 cgd typedef struct pmap *pmap_t;
188 1.1 cgd
189 1.1 cgd #ifdef KERNEL
190 1.1 cgd extern pmap_t kernel_pmap;
191 1.1 cgd #endif
192 1.1 cgd
193 1.1 cgd /*
194 1.1 cgd * Macros for speed
195 1.1 cgd */
196 1.1 cgd #define PMAP_ACTIVATE(pmapp, pcbp) \
197 1.1 cgd if ((pmapp) != NULL /*&& (pmapp)->pm_pdchanged */) { \
198 1.1 cgd (pcbp)->pcb_cr3 = \
199 1.1 cgd pmap_extract(kernel_pmap, (pmapp)->pm_pdir); \
200 1.1 cgd if ((pmapp) == &curproc->p_vmspace->vm_pmap) \
201 1.1 cgd load_cr3((pcbp)->pcb_cr3); \
202 1.1 cgd (pmapp)->pm_pdchanged = FALSE; \
203 1.1 cgd }
204 1.1 cgd
205 1.1 cgd #define PMAP_DEACTIVATE(pmapp, pcbp)
206 1.1 cgd
207 1.1 cgd /*
208 1.1 cgd * For each vm_page_t, there is a list of all currently valid virtual
209 1.1 cgd * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
210 1.1 cgd */
211 1.1 cgd typedef struct pv_entry {
212 1.1 cgd struct pv_entry *pv_next; /* next pv_entry */
213 1.1 cgd pmap_t pv_pmap; /* pmap where mapping lies */
214 1.1 cgd vm_offset_t pv_va; /* virtual address for mapping */
215 1.1 cgd int pv_flags; /* flags */
216 1.1 cgd } *pv_entry_t;
217 1.1 cgd
218 1.1 cgd #define PV_ENTRY_NULL ((pv_entry_t) 0)
219 1.1 cgd
220 1.1 cgd #define PV_CI 0x01 /* all entries must be cache inhibited */
221 1.1 cgd #define PV_PTPAGE 0x02 /* entry maps a page table page */
222 1.1 cgd
223 1.1 cgd #ifdef KERNEL
224 1.1 cgd
225 1.1 cgd pv_entry_t pv_table; /* array of entries, one per page */
226 1.1 cgd
227 1.1 cgd #define pa_index(pa) atop(pa - vm_first_phys)
228 1.1 cgd #define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
229 1.1 cgd
230 1.1 cgd #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
231 1.1 cgd
232 1.1 cgd #endif KERNEL
233 1.1 cgd
234 1.1 cgd #endif _PMAP_MACHINE_
235