pmap.h revision 1.57 1 /* $NetBSD: pmap.h,v 1.57 2003/02/26 21:54:38 ragge Exp $ */
2
3 /*
4 * Copyright (c) 1987 Carnegie-Mellon University
5 * Copyright (c) 1991 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Changed for the VAX port. /IC
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)pmap.h 7.6 (Berkeley) 5/10/91
43 */
44
45
46 #ifndef PMAP_H
47 #define PMAP_H
48
49 #include <machine/pte.h>
50 #include <machine/mtpr.h>
51 #include <machine/pcb.h>
52
53 /*
54 * Some constants to make life easier.
55 */
56 #define LTOHPS (PGSHIFT - VAX_PGSHIFT)
57 #define LTOHPN (1 << LTOHPS)
58 #if USE_TOPDOWN_VM==0
59 #define PROCPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ) / VAX_NBPG)
60 #else
61 #define PROCPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG)
62 #endif
63 #define NPTEPGS (PROCPTSIZE / (NBPG / (sizeof(struct pte) * LTOHPN)))
64
65 /*
66 * Link struct if more than one process share pmap (like vfork).
67 * This is rarely used.
68 */
69 struct pm_share {
70 struct pm_share *ps_next;
71 struct pcb *ps_pcb;
72 };
73
74 /*
75 * Pmap structure
76 * pm_stack holds lowest allocated memory for the process stack.
77 */
78
79 typedef struct pmap {
80 struct pte *pm_p1ap; /* Base of alloced p1 pte space */
81 int pm_count; /* reference count */
82 struct pm_share *pm_share; /* PCBs using this pmap */
83 struct pte *pm_p0br; /* page 0 base register */
84 long pm_p0lr; /* page 0 length register */
85 struct pte *pm_p1br; /* page 1 base register */
86 long pm_p1lr; /* page 1 length register */
87 u_char *pm_pref; /* pte reference count array */
88 struct simplelock pm_lock; /* Lock entry in MP environment */
89 struct pmap_statistics pm_stats; /* Some statistics */
90 } *pmap_t;
91
92 /*
93 * For each struct vm_page, there is a list of all currently valid virtual
94 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
95 */
96
97 struct pv_entry {
98 struct pv_entry *pv_next; /* next pv_entry */
99 vaddr_t pv_vaddr; /* address for this physical page */
100 struct pmap *pv_pmap; /* pmap this entry belongs to */
101 int pv_attr; /* write/modified bits */
102 };
103
104 extern struct pv_entry *pv_table;
105
106 /* Mapping macros used when allocating SPT */
107 #define MAPVIRT(ptr, count) \
108 (vaddr_t)ptr = virtual_avail; \
109 virtual_avail += (count) * VAX_NBPG;
110
111 #define MAPPHYS(ptr, count, perm) \
112 (vaddr_t)ptr = avail_start + KERNBASE; \
113 avail_start += (count) * VAX_NBPG;
114
115 #ifdef _KERNEL
116
117 extern struct pmap kernel_pmap_store;
118
119 #define pmap_kernel() (&kernel_pmap_store)
120
121 #endif /* _KERNEL */
122
123
124 /*
125 * Real nice (fast) routines to get the virtual address of a physical page
126 * (and vice versa).
127 */
128 #define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
129 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
130
131 #define PMAP_STEAL_MEMORY
132
133 /*
134 * This is the by far most used pmap routine. Make it inline.
135 */
136 __inline static boolean_t
137 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
138 {
139 paddr_t pa = 0;
140 int *pte, sva;
141
142 if (va & KERNBASE) {
143 pa = kvtophys(va); /* Is 0 if not mapped */
144 if (pap)
145 *pap = pa;
146 if (pa)
147 return (TRUE);
148 return (FALSE);
149 }
150
151 sva = PG_PFNUM(va);
152 if (va < 0x40000000) {
153 if (sva > (pmap->pm_p0lr & ~AST_MASK))
154 return FALSE;
155 pte = (int *)pmap->pm_p0br;
156 } else {
157 if (sva < pmap->pm_p1lr)
158 return FALSE;
159 pte = (int *)pmap->pm_p1br;
160 }
161 if (kvtopte(&pte[sva])->pg_pfn) {
162 if (pap)
163 *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
164 return (TRUE);
165 }
166 return (FALSE);
167 }
168
169 boolean_t pmap_clear_modify_long(struct pv_entry *);
170 boolean_t pmap_clear_reference_long(struct pv_entry *);
171 boolean_t pmap_is_modified_long(struct pv_entry *);
172 void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
173 void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
174
175 __inline static boolean_t
176 pmap_clear_reference(struct vm_page *pg)
177 {
178 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
179 boolean_t rv = (pv->pv_attr & PG_V) != 0;
180
181 pv->pv_attr &= ~PG_V;
182 if (pv->pv_pmap != NULL || pv->pv_next != NULL)
183 rv |= pmap_clear_reference_long(pv);
184 return rv;
185 }
186
187 __inline static boolean_t
188 pmap_clear_modify(struct vm_page *pg)
189 {
190 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
191 boolean_t rv = (pv->pv_attr & PG_M) != 0;
192
193 pv->pv_attr &= ~PG_M;
194 if (pv->pv_pmap != NULL || pv->pv_next != NULL)
195 rv |= pmap_clear_modify_long(pv);
196 return rv;
197 }
198
199 __inline static boolean_t
200 pmap_is_modified(struct vm_page *pg)
201 {
202 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
203 if (pv->pv_attr & PG_M)
204 return 1;
205 else
206 return pmap_is_modified_long(pv);
207 }
208
209 __inline static void
210 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
211 {
212 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
213
214 if (pv->pv_pmap != NULL || pv->pv_next != NULL)
215 pmap_page_protect_long(pv, prot);
216 }
217
218 __inline static void
219 pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
220 {
221 if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
222 (start & KERNBASE) != 0)
223 pmap_protect_long(pmap, start, end, prot);
224 }
225
226 static __inline void
227 pmap_remove_all(struct pmap *pmap)
228 {
229 /* Nothing. */
230 }
231
232 /* Routines that are best to define as macros */
233 #define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
234 #define pmap_copy(a,b,c,d,e) /* Dont do anything */
235 #define pmap_update(pmap) /* nothing (yet) */
236 #define pmap_collect(pmap) /* No need so far */
237 #define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
238 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
239 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
240 #define pmap_reference(pmap) (pmap)->pm_count++
241
242 /* These can be done as efficient inline macros */
243 #define pmap_copy_page(src, dst) \
244 __asm__("addl3 $0x80000000,%0,%%r0;" \
245 "addl3 $0x80000000,%1,%%r1;" \
246 "movc3 $4096,(%%r0),(%%r1)" \
247 :: "r"(src), "r"(dst) \
248 : "r0","r1","r2","r3","r4","r5");
249
250 #define pmap_zero_page(phys) \
251 __asm__("addl3 $0x80000000,%0,%%r0;" \
252 "movc5 $0,(%%r0),$0,$4096,(%%r0)" \
253 :: "r"(phys) \
254 : "r0","r1","r2","r3","r4","r5");
255
256 /* Prototypes */
257 void pmap_bootstrap __P((void));
258 vaddr_t pmap_map __P((vaddr_t, vaddr_t, vaddr_t, int));
259
260 #endif /* PMAP_H */
261