pmap.h revision 1.78 1 1.78 uebayasi /* $NetBSD: pmap.h,v 1.78 2010/11/12 13:35:51 uebayasi Exp $ */
2 1.4 cgd
3 1.1 ragge /*
4 1.1 ragge * Copyright (c) 1991 Regents of the University of California.
5 1.1 ragge * All rights reserved.
6 1.59 agc *
7 1.59 agc * Changed for the VAX port. /IC
8 1.59 agc *
9 1.59 agc * This code is derived from software contributed to Berkeley by
10 1.59 agc * the Systems Programming Group of the University of Utah Computer
11 1.59 agc * Science Department.
12 1.59 agc *
13 1.59 agc * Redistribution and use in source and binary forms, with or without
14 1.59 agc * modification, are permitted provided that the following conditions
15 1.59 agc * are met:
16 1.59 agc * 1. Redistributions of source code must retain the above copyright
17 1.59 agc * notice, this list of conditions and the following disclaimer.
18 1.59 agc * 2. Redistributions in binary form must reproduce the above copyright
19 1.59 agc * notice, this list of conditions and the following disclaimer in the
20 1.59 agc * documentation and/or other materials provided with the distribution.
21 1.59 agc * 3. Neither the name of the University nor the names of its contributors
22 1.59 agc * may be used to endorse or promote products derived from this software
23 1.59 agc * without specific prior written permission.
24 1.59 agc *
25 1.59 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 1.59 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 1.59 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 1.59 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 1.59 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 1.59 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 1.59 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.59 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.59 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.59 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.59 agc * SUCH DAMAGE.
36 1.59 agc *
37 1.59 agc * @(#)pmap.h 7.6 (Berkeley) 5/10/91
38 1.59 agc */
39 1.59 agc
40 1.59 agc /*
41 1.59 agc * Copyright (c) 1987 Carnegie-Mellon University
42 1.1 ragge *
43 1.1 ragge * Changed for the VAX port. /IC
44 1.1 ragge *
45 1.1 ragge * This code is derived from software contributed to Berkeley by
46 1.1 ragge * the Systems Programming Group of the University of Utah Computer
47 1.1 ragge * Science Department.
48 1.1 ragge *
49 1.1 ragge * Redistribution and use in source and binary forms, with or without
50 1.1 ragge * modification, are permitted provided that the following conditions
51 1.1 ragge * are met:
52 1.1 ragge * 1. Redistributions of source code must retain the above copyright
53 1.1 ragge * notice, this list of conditions and the following disclaimer.
54 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
55 1.1 ragge * notice, this list of conditions and the following disclaimer in the
56 1.1 ragge * documentation and/or other materials provided with the distribution.
57 1.1 ragge * 3. All advertising materials mentioning features or use of this software
58 1.1 ragge * must display the following acknowledgement:
59 1.1 ragge * This product includes software developed by the University of
60 1.1 ragge * California, Berkeley and its contributors.
61 1.1 ragge * 4. Neither the name of the University nor the names of its contributors
62 1.1 ragge * may be used to endorse or promote products derived from this software
63 1.1 ragge * without specific prior written permission.
64 1.1 ragge *
65 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 1.1 ragge * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 1.1 ragge * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 1.1 ragge * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 1.1 ragge * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 1.1 ragge * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 1.1 ragge * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 1.1 ragge * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 1.1 ragge * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 1.1 ragge * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 1.1 ragge * SUCH DAMAGE.
76 1.1 ragge *
77 1.4 cgd * @(#)pmap.h 7.6 (Berkeley) 5/10/91
78 1.1 ragge */
79 1.1 ragge
80 1.1 ragge
81 1.37 ragge #ifndef PMAP_H
82 1.37 ragge #define PMAP_H
83 1.1 ragge
84 1.72 joerg #include <sys/simplelock.h>
85 1.72 joerg
86 1.78 uebayasi #include <uvm/uvm_page.h>
87 1.78 uebayasi
88 1.37 ragge #include <machine/pte.h>
89 1.12 mycroft #include <machine/mtpr.h>
90 1.37 ragge #include <machine/pcb.h>
91 1.5 ragge
92 1.37 ragge /*
93 1.37 ragge * Some constants to make life easier.
94 1.37 ragge */
95 1.37 ragge #define LTOHPS (PGSHIFT - VAX_PGSHIFT)
96 1.37 ragge #define LTOHPN (1 << LTOHPS)
97 1.52 ragge
98 1.52 ragge /*
99 1.20 ragge * Pmap structure
100 1.20 ragge * pm_stack holds lowest allocated memory for the process stack.
101 1.1 ragge */
102 1.1 ragge
103 1.76 pooka struct pmap {
104 1.52 ragge struct pte *pm_p1ap; /* Base of alloced p1 pte space */
105 1.53 ragge int pm_count; /* reference count */
106 1.74 matt struct pcb *pm_pcbs; /* PCBs using this pmap */
107 1.30 ragge struct pte *pm_p0br; /* page 0 base register */
108 1.30 ragge long pm_p0lr; /* page 0 length register */
109 1.30 ragge struct pte *pm_p1br; /* page 1 base register */
110 1.30 ragge long pm_p1lr; /* page 1 length register */
111 1.39 ragge struct simplelock pm_lock; /* Lock entry in MP environment */
112 1.30 ragge struct pmap_statistics pm_stats; /* Some statistics */
113 1.76 pooka };
114 1.1 ragge
115 1.1 ragge /*
116 1.45 chs * For each struct vm_page, there is a list of all currently valid virtual
117 1.1 ragge * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
118 1.1 ragge */
119 1.1 ragge
120 1.30 ragge struct pv_entry {
121 1.37 ragge struct pv_entry *pv_next; /* next pv_entry */
122 1.52 ragge vaddr_t pv_vaddr; /* address for this physical page */
123 1.29 ragge struct pmap *pv_pmap; /* pmap this entry belongs to */
124 1.29 ragge int pv_attr; /* write/modified bits */
125 1.30 ragge };
126 1.54 matt
127 1.54 matt extern struct pv_entry *pv_table;
128 1.11 ragge
129 1.11 ragge /* Mapping macros used when allocating SPT */
130 1.38 matt #define MAPVIRT(ptr, count) \
131 1.67 matt ptr = virtual_avail; \
132 1.30 ragge virtual_avail += (count) * VAX_NBPG;
133 1.11 ragge
134 1.38 matt #define MAPPHYS(ptr, count, perm) \
135 1.67 matt ptr = avail_start + KERNBASE; \
136 1.30 ragge avail_start += (count) * VAX_NBPG;
137 1.1 ragge
138 1.53 ragge
139 1.33 ragge /*
140 1.33 ragge * Real nice (fast) routines to get the virtual address of a physical page
141 1.33 ragge * (and vice versa).
142 1.33 ragge */
143 1.73 matt #define PMAP_VTOPHYS(va) ((va) & ~KERNBASE)
144 1.37 ragge #define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
145 1.37 ragge #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
146 1.37 ragge
147 1.37 ragge #define PMAP_STEAL_MEMORY
148 1.37 ragge
149 1.37 ragge /*
150 1.37 ragge * This is the by far most used pmap routine. Make it inline.
151 1.37 ragge */
152 1.70 thorpej __inline static bool
153 1.37 ragge pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
154 1.37 ragge {
155 1.37 ragge int *pte, sva;
156 1.37 ragge
157 1.37 ragge if (va & KERNBASE) {
158 1.69 chs paddr_t pa;
159 1.69 chs
160 1.37 ragge pa = kvtophys(va); /* Is 0 if not mapped */
161 1.37 ragge if (pap)
162 1.37 ragge *pap = pa;
163 1.37 ragge if (pa)
164 1.71 thorpej return (true);
165 1.71 thorpej return (false);
166 1.37 ragge }
167 1.37 ragge
168 1.37 ragge sva = PG_PFNUM(va);
169 1.37 ragge if (va < 0x40000000) {
170 1.75 matt if (sva >= (pmap->pm_p0lr & ~AST_MASK))
171 1.68 matt goto fail;
172 1.37 ragge pte = (int *)pmap->pm_p0br;
173 1.37 ragge } else {
174 1.37 ragge if (sva < pmap->pm_p1lr)
175 1.68 matt goto fail;
176 1.37 ragge pte = (int *)pmap->pm_p1br;
177 1.37 ragge }
178 1.75 matt /*
179 1.75 matt * Since the PTE tables are sparsely allocated, make sure the page
180 1.75 matt * table page actually exists before deferencing the pte itself.
181 1.75 matt */
182 1.75 matt if (kvtopte(&pte[sva])->pg_v && (pte[sva] & PG_FRAME)) {
183 1.37 ragge if (pap)
184 1.37 ragge *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
185 1.71 thorpej return (true);
186 1.37 ragge }
187 1.68 matt fail:
188 1.68 matt if (pap)
189 1.68 matt *pap = 0;
190 1.71 thorpej return (false);
191 1.37 ragge }
192 1.36 ragge
193 1.70 thorpej bool pmap_clear_modify_long(struct pv_entry *);
194 1.70 thorpej bool pmap_clear_reference_long(struct pv_entry *);
195 1.70 thorpej bool pmap_is_modified_long(struct pv_entry *);
196 1.53 ragge void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
197 1.53 ragge void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
198 1.53 ragge
199 1.70 thorpej __inline static bool
200 1.64 is pmap_is_referenced(struct vm_page *pg)
201 1.64 is {
202 1.64 is struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
203 1.70 thorpej bool rv = (pv->pv_attr & PG_V) != 0;
204 1.64 is
205 1.64 is return rv;
206 1.64 is }
207 1.64 is
208 1.70 thorpej __inline static bool
209 1.53 ragge pmap_clear_reference(struct vm_page *pg)
210 1.53 ragge {
211 1.53 ragge struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
212 1.70 thorpej bool rv = (pv->pv_attr & PG_V) != 0;
213 1.53 ragge
214 1.53 ragge pv->pv_attr &= ~PG_V;
215 1.53 ragge if (pv->pv_pmap != NULL || pv->pv_next != NULL)
216 1.53 ragge rv |= pmap_clear_reference_long(pv);
217 1.53 ragge return rv;
218 1.53 ragge }
219 1.53 ragge
220 1.70 thorpej __inline static bool
221 1.53 ragge pmap_clear_modify(struct vm_page *pg)
222 1.53 ragge {
223 1.53 ragge struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
224 1.70 thorpej bool rv = (pv->pv_attr & PG_M) != 0;
225 1.53 ragge
226 1.53 ragge pv->pv_attr &= ~PG_M;
227 1.53 ragge if (pv->pv_pmap != NULL || pv->pv_next != NULL)
228 1.53 ragge rv |= pmap_clear_modify_long(pv);
229 1.53 ragge return rv;
230 1.53 ragge }
231 1.53 ragge
232 1.70 thorpej __inline static bool
233 1.53 ragge pmap_is_modified(struct vm_page *pg)
234 1.53 ragge {
235 1.53 ragge struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
236 1.53 ragge if (pv->pv_attr & PG_M)
237 1.53 ragge return 1;
238 1.53 ragge else
239 1.53 ragge return pmap_is_modified_long(pv);
240 1.53 ragge }
241 1.53 ragge
242 1.65 perry __inline static void
243 1.53 ragge pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
244 1.53 ragge {
245 1.53 ragge struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
246 1.53 ragge
247 1.53 ragge if (pv->pv_pmap != NULL || pv->pv_next != NULL)
248 1.53 ragge pmap_page_protect_long(pv, prot);
249 1.53 ragge }
250 1.53 ragge
251 1.65 perry __inline static void
252 1.53 ragge pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
253 1.53 ragge {
254 1.53 ragge if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
255 1.53 ragge (start & KERNBASE) != 0)
256 1.53 ragge pmap_protect_long(pmap, start, end, prot);
257 1.55 chs }
258 1.55 chs
259 1.65 perry static __inline void
260 1.56 chs pmap_remove_all(struct pmap *pmap)
261 1.55 chs {
262 1.55 chs /* Nothing. */
263 1.53 ragge }
264 1.5 ragge
265 1.5 ragge /* Routines that are best to define as macros */
266 1.37 ragge #define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
267 1.37 ragge #define pmap_copy(a,b,c,d,e) /* Dont do anything */
268 1.48 chris #define pmap_update(pmap) /* nothing (yet) */
269 1.37 ragge #define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
270 1.37 ragge #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
271 1.49 chs #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
272 1.52 ragge #define pmap_reference(pmap) (pmap)->pm_count++
273 1.20 ragge
274 1.20 ragge /* These can be done as efficient inline macros */
275 1.50 matt #define pmap_copy_page(src, dst) \
276 1.63 perry __asm("addl3 $0x80000000,%0,%%r0;" \
277 1.50 matt "addl3 $0x80000000,%1,%%r1;" \
278 1.50 matt "movc3 $4096,(%%r0),(%%r1)" \
279 1.50 matt :: "r"(src), "r"(dst) \
280 1.50 matt : "r0","r1","r2","r3","r4","r5");
281 1.50 matt
282 1.50 matt #define pmap_zero_page(phys) \
283 1.63 perry __asm("addl3 $0x80000000,%0,%%r0;" \
284 1.50 matt "movc5 $0,(%%r0),$0,$4096,(%%r0)" \
285 1.50 matt :: "r"(phys) \
286 1.50 matt : "r0","r1","r2","r3","r4","r5");
287 1.1 ragge
288 1.16 ragge /* Prototypes */
289 1.75 matt void pmap_bootstrap(void);
290 1.75 matt vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int);
291 1.22 thorpej
292 1.47 simonb #endif /* PMAP_H */
293