pmap.c revision 1.30.2.5 1 1.30.2.5 nathanw /* $NetBSD: pmap.c,v 1.30.2.5 2002/02/28 04:07:23 nathanw Exp $ */
2 1.30.2.2 thorpej
3 1.30.2.2 thorpej /*
4 1.30.2.2 thorpej * Copyright (c) 2001 Richard Earnshaw
5 1.30.2.2 thorpej * Copyright (c) 2001 Christopher Gilbert
6 1.30.2.2 thorpej * All rights reserved.
7 1.30.2.2 thorpej *
8 1.30.2.2 thorpej * 1. Redistributions of source code must retain the above copyright
9 1.30.2.2 thorpej * notice, this list of conditions and the following disclaimer.
10 1.30.2.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
11 1.30.2.2 thorpej * notice, this list of conditions and the following disclaimer in the
12 1.30.2.2 thorpej * documentation and/or other materials provided with the distribution.
13 1.30.2.2 thorpej * 3. The name of the company nor the name of the author may be used to
14 1.30.2.2 thorpej * endorse or promote products derived from this software without specific
15 1.30.2.2 thorpej * prior written permission.
16 1.30.2.2 thorpej *
17 1.30.2.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 1.30.2.2 thorpej * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 1.30.2.2 thorpej * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 1.30.2.2 thorpej * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
21 1.30.2.2 thorpej * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 1.30.2.2 thorpej * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 1.30.2.2 thorpej * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.30.2.2 thorpej * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.30.2.2 thorpej * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.30.2.2 thorpej * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.30.2.2 thorpej * SUCH DAMAGE.
28 1.30.2.2 thorpej */
29 1.30.2.2 thorpej
30 1.30.2.2 thorpej /*-
31 1.30.2.2 thorpej * Copyright (c) 1999 The NetBSD Foundation, Inc.
32 1.30.2.2 thorpej * All rights reserved.
33 1.30.2.2 thorpej *
34 1.30.2.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
35 1.30.2.2 thorpej * by Charles M. Hannum.
36 1.30.2.2 thorpej *
37 1.30.2.2 thorpej * Redistribution and use in source and binary forms, with or without
38 1.30.2.2 thorpej * modification, are permitted provided that the following conditions
39 1.30.2.2 thorpej * are met:
40 1.30.2.2 thorpej * 1. Redistributions of source code must retain the above copyright
41 1.30.2.2 thorpej * notice, this list of conditions and the following disclaimer.
42 1.30.2.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
43 1.30.2.2 thorpej * notice, this list of conditions and the following disclaimer in the
44 1.30.2.2 thorpej * documentation and/or other materials provided with the distribution.
45 1.30.2.2 thorpej * 3. All advertising materials mentioning features or use of this software
46 1.30.2.2 thorpej * must display the following acknowledgement:
47 1.30.2.2 thorpej * This product includes software developed by the NetBSD
48 1.30.2.2 thorpej * Foundation, Inc. and its contributors.
49 1.30.2.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
50 1.30.2.2 thorpej * contributors may be used to endorse or promote products derived
51 1.30.2.2 thorpej * from this software without specific prior written permission.
52 1.30.2.2 thorpej *
53 1.30.2.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 1.30.2.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 1.30.2.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 1.30.2.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 1.30.2.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 1.30.2.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 1.30.2.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 1.30.2.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 1.30.2.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 1.30.2.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 1.30.2.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
64 1.30.2.2 thorpej */
65 1.30.2.2 thorpej
66 1.30.2.2 thorpej /*
67 1.30.2.2 thorpej * Copyright (c) 1994-1998 Mark Brinicombe.
68 1.30.2.2 thorpej * Copyright (c) 1994 Brini.
69 1.30.2.2 thorpej * All rights reserved.
70 1.30.2.2 thorpej *
71 1.30.2.2 thorpej * This code is derived from software written for Brini by Mark Brinicombe
72 1.30.2.2 thorpej *
73 1.30.2.2 thorpej * Redistribution and use in source and binary forms, with or without
74 1.30.2.2 thorpej * modification, are permitted provided that the following conditions
75 1.30.2.2 thorpej * are met:
76 1.30.2.2 thorpej * 1. Redistributions of source code must retain the above copyright
77 1.30.2.2 thorpej * notice, this list of conditions and the following disclaimer.
78 1.30.2.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
79 1.30.2.2 thorpej * notice, this list of conditions and the following disclaimer in the
80 1.30.2.2 thorpej * documentation and/or other materials provided with the distribution.
81 1.30.2.2 thorpej * 3. All advertising materials mentioning features or use of this software
82 1.30.2.2 thorpej * must display the following acknowledgement:
83 1.30.2.2 thorpej * This product includes software developed by Mark Brinicombe.
84 1.30.2.2 thorpej * 4. The name of the author may not be used to endorse or promote products
85 1.30.2.2 thorpej * derived from this software without specific prior written permission.
86 1.30.2.2 thorpej *
87 1.30.2.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
88 1.30.2.2 thorpej * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
89 1.30.2.2 thorpej * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
90 1.30.2.2 thorpej * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
91 1.30.2.2 thorpej * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
92 1.30.2.2 thorpej * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93 1.30.2.2 thorpej * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94 1.30.2.2 thorpej * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95 1.30.2.2 thorpej * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
96 1.30.2.2 thorpej *
97 1.30.2.2 thorpej * RiscBSD kernel project
98 1.30.2.2 thorpej *
99 1.30.2.2 thorpej * pmap.c
100 1.30.2.2 thorpej *
101 1.30.2.2 thorpej * Machine dependant vm stuff
102 1.30.2.2 thorpej *
103 1.30.2.2 thorpej * Created : 20/09/94
104 1.30.2.2 thorpej */
105 1.30.2.2 thorpej
106 1.30.2.2 thorpej /*
107 1.30.2.2 thorpej * Performance improvements, UVM changes, overhauls and part-rewrites
108 1.30.2.2 thorpej * were contributed by Neil A. Carson <neil (at) causality.com>.
109 1.30.2.2 thorpej */
110 1.30.2.2 thorpej
111 1.30.2.2 thorpej /*
112 1.30.2.2 thorpej * The dram block info is currently referenced from the bootconfig.
113 1.30.2.2 thorpej * This should be placed in a separate structure.
114 1.30.2.2 thorpej */
115 1.30.2.2 thorpej
116 1.30.2.2 thorpej /*
117 1.30.2.2 thorpej * Special compilation symbols
118 1.30.2.2 thorpej * PMAP_DEBUG - Build in pmap_debug_level code
119 1.30.2.2 thorpej */
120 1.30.2.2 thorpej
121 1.30.2.2 thorpej /* Include header files */
122 1.30.2.2 thorpej
123 1.30.2.2 thorpej #include "opt_pmap_debug.h"
124 1.30.2.2 thorpej #include "opt_ddb.h"
125 1.30.2.2 thorpej
126 1.30.2.2 thorpej #include <sys/types.h>
127 1.30.2.2 thorpej #include <sys/param.h>
128 1.30.2.2 thorpej #include <sys/kernel.h>
129 1.30.2.2 thorpej #include <sys/systm.h>
130 1.30.2.2 thorpej #include <sys/proc.h>
131 1.30.2.2 thorpej #include <sys/malloc.h>
132 1.30.2.2 thorpej #include <sys/user.h>
133 1.30.2.2 thorpej #include <sys/pool.h>
134 1.30.2.2 thorpej #include <sys/cdefs.h>
135 1.30.2.2 thorpej
136 1.30.2.2 thorpej #include <uvm/uvm.h>
137 1.30.2.2 thorpej
138 1.30.2.2 thorpej #include <machine/bootconfig.h>
139 1.30.2.2 thorpej #include <machine/bus.h>
140 1.30.2.2 thorpej #include <machine/pmap.h>
141 1.30.2.2 thorpej #include <machine/pcb.h>
142 1.30.2.2 thorpej #include <machine/param.h>
143 1.30.2.3 nathanw #include <arm/arm32/katelib.h>
144 1.30.2.2 thorpej
145 1.30.2.5 nathanw __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.30.2.5 2002/02/28 04:07:23 nathanw Exp $");
146 1.30.2.2 thorpej #ifdef PMAP_DEBUG
147 1.30.2.2 thorpej #define PDEBUG(_lev_,_stat_) \
148 1.30.2.2 thorpej if (pmap_debug_level >= (_lev_)) \
149 1.30.2.2 thorpej ((_stat_))
150 1.30.2.2 thorpej int pmap_debug_level = -2;
151 1.30.2.2 thorpej
152 1.30.2.2 thorpej /*
153 1.30.2.2 thorpej * for switching to potentially finer grained debugging
154 1.30.2.2 thorpej */
155 1.30.2.2 thorpej #define PDB_FOLLOW 0x0001
156 1.30.2.2 thorpej #define PDB_INIT 0x0002
157 1.30.2.2 thorpej #define PDB_ENTER 0x0004
158 1.30.2.2 thorpej #define PDB_REMOVE 0x0008
159 1.30.2.2 thorpej #define PDB_CREATE 0x0010
160 1.30.2.2 thorpej #define PDB_PTPAGE 0x0020
161 1.30.2.2 thorpej #define PDB_ASN 0x0040
162 1.30.2.2 thorpej #define PDB_BITS 0x0080
163 1.30.2.2 thorpej #define PDB_COLLECT 0x0100
164 1.30.2.2 thorpej #define PDB_PROTECT 0x0200
165 1.30.2.2 thorpej #define PDB_BOOTSTRAP 0x1000
166 1.30.2.2 thorpej #define PDB_PARANOIA 0x2000
167 1.30.2.2 thorpej #define PDB_WIRING 0x4000
168 1.30.2.2 thorpej #define PDB_PVDUMP 0x8000
169 1.30.2.2 thorpej
170 1.30.2.2 thorpej int debugmap = 0;
171 1.30.2.2 thorpej int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
172 1.30.2.2 thorpej #define NPDEBUG(_lev_,_stat_) \
173 1.30.2.2 thorpej if (pmapdebug & (_lev_)) \
174 1.30.2.2 thorpej ((_stat_))
175 1.30.2.2 thorpej
176 1.30.2.2 thorpej #else /* PMAP_DEBUG */
177 1.30.2.2 thorpej #define PDEBUG(_lev_,_stat_) /* Nothing */
178 1.30.2.2 thorpej #define PDEBUG(_lev_,_stat_) /* Nothing */
179 1.30.2.2 thorpej #endif /* PMAP_DEBUG */
180 1.30.2.2 thorpej
181 1.30.2.2 thorpej struct pmap kernel_pmap_store;
182 1.30.2.2 thorpej
183 1.30.2.2 thorpej /*
184 1.30.2.2 thorpej * pool that pmap structures are allocated from
185 1.30.2.2 thorpej */
186 1.30.2.2 thorpej
187 1.30.2.2 thorpej struct pool pmap_pmap_pool;
188 1.30.2.2 thorpej
189 1.30.2.2 thorpej pagehook_t page_hook0;
190 1.30.2.2 thorpej pagehook_t page_hook1;
191 1.30.2.2 thorpej char *memhook;
192 1.30.2.2 thorpej pt_entry_t msgbufpte;
193 1.30.2.2 thorpej extern caddr_t msgbufaddr;
194 1.30.2.2 thorpej
195 1.30.2.2 thorpej boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
196 1.30.2.2 thorpej /*
197 1.30.2.2 thorpej * locking data structures
198 1.30.2.2 thorpej */
199 1.30.2.2 thorpej
200 1.30.2.2 thorpej static struct lock pmap_main_lock;
201 1.30.2.2 thorpej static struct simplelock pvalloc_lock;
202 1.30.2.2 thorpej #ifdef LOCKDEBUG
203 1.30.2.2 thorpej #define PMAP_MAP_TO_HEAD_LOCK() \
204 1.30.2.2 thorpej (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
205 1.30.2.2 thorpej #define PMAP_MAP_TO_HEAD_UNLOCK() \
206 1.30.2.2 thorpej (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
207 1.30.2.2 thorpej
208 1.30.2.2 thorpej #define PMAP_HEAD_TO_MAP_LOCK() \
209 1.30.2.2 thorpej (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
210 1.30.2.2 thorpej #define PMAP_HEAD_TO_MAP_UNLOCK() \
211 1.30.2.2 thorpej (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
212 1.30.2.2 thorpej #else
213 1.30.2.2 thorpej #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
214 1.30.2.2 thorpej #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
215 1.30.2.2 thorpej #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
216 1.30.2.2 thorpej #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
217 1.30.2.2 thorpej #endif /* LOCKDEBUG */
218 1.30.2.2 thorpej
219 1.30.2.2 thorpej /*
220 1.30.2.2 thorpej * pv_page management structures: locked by pvalloc_lock
221 1.30.2.2 thorpej */
222 1.30.2.2 thorpej
223 1.30.2.2 thorpej TAILQ_HEAD(pv_pagelist, pv_page);
224 1.30.2.2 thorpej static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
225 1.30.2.2 thorpej static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
226 1.30.2.2 thorpej static int pv_nfpvents; /* # of free pv entries */
227 1.30.2.2 thorpej static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
228 1.30.2.2 thorpej static vaddr_t pv_cachedva; /* cached VA for later use */
229 1.30.2.2 thorpej
230 1.30.2.2 thorpej #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
231 1.30.2.2 thorpej #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
232 1.30.2.2 thorpej /* high water mark */
233 1.30.2.2 thorpej
234 1.30.2.2 thorpej /*
235 1.30.2.2 thorpej * local prototypes
236 1.30.2.2 thorpej */
237 1.30.2.2 thorpej
238 1.30.2.2 thorpej static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
239 1.30.2.2 thorpej static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
240 1.30.2.2 thorpej #define ALLOCPV_NEED 0 /* need PV now */
241 1.30.2.2 thorpej #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
242 1.30.2.2 thorpej #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
243 1.30.2.2 thorpej static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
244 1.30.2.2 thorpej static void pmap_enter_pv __P((struct pv_head *,
245 1.30.2.2 thorpej struct pv_entry *, struct pmap *,
246 1.30.2.2 thorpej vaddr_t, struct vm_page *, int));
247 1.30.2.2 thorpej static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
248 1.30.2.2 thorpej static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
249 1.30.2.2 thorpej static void pmap_free_pv_doit __P((struct pv_entry *));
250 1.30.2.2 thorpej static void pmap_free_pvpage __P((void));
251 1.30.2.2 thorpej static boolean_t pmap_is_curpmap __P((struct pmap *));
252 1.30.2.2 thorpej static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *,
253 1.30.2.2 thorpej vaddr_t));
254 1.30.2.2 thorpej #define PMAP_REMOVE_ALL 0 /* remove all mappings */
255 1.30.2.2 thorpej #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
256 1.30.2.2 thorpej
257 1.30.2.4 nathanw static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct pv_head *,
258 1.30.2.4 nathanw u_int, u_int));
259 1.30.2.4 nathanw
260 1.30.2.4 nathanw static void pmap_free_l1pt __P((struct l1pt *));
261 1.30.2.4 nathanw static int pmap_allocpagedir __P((struct pmap *));
262 1.30.2.4 nathanw static int pmap_clean_page __P((struct pv_entry *, boolean_t));
263 1.30.2.4 nathanw static struct pv_head *pmap_find_pvh __P((paddr_t));
264 1.30.2.4 nathanw static void pmap_remove_all __P((paddr_t));
265 1.30.2.4 nathanw
266 1.30.2.4 nathanw
267 1.30.2.2 thorpej vsize_t npages;
268 1.30.2.2 thorpej
269 1.30.2.2 thorpej static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t, boolean_t));
270 1.30.2.2 thorpej static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t, boolean_t));
271 1.30.2.2 thorpej __inline static void pmap_clearbit __P((paddr_t, unsigned int));
272 1.30.2.2 thorpej __inline static boolean_t pmap_testbit __P((paddr_t, unsigned int));
273 1.30.2.2 thorpej
274 1.30.2.2 thorpej extern paddr_t physical_start;
275 1.30.2.2 thorpej extern paddr_t physical_freestart;
276 1.30.2.2 thorpej extern paddr_t physical_end;
277 1.30.2.2 thorpej extern paddr_t physical_freeend;
278 1.30.2.2 thorpej extern unsigned int free_pages;
279 1.30.2.2 thorpej extern int max_processes;
280 1.30.2.2 thorpej
281 1.30.2.2 thorpej vaddr_t virtual_start;
282 1.30.2.2 thorpej vaddr_t virtual_end;
283 1.30.2.2 thorpej
284 1.30.2.2 thorpej vaddr_t avail_start;
285 1.30.2.2 thorpej vaddr_t avail_end;
286 1.30.2.2 thorpej
287 1.30.2.2 thorpej extern pv_addr_t systempage;
288 1.30.2.2 thorpej
289 1.30.2.2 thorpej #define ALLOC_PAGE_HOOK(x, s) \
290 1.30.2.2 thorpej x.va = virtual_start; \
291 1.30.2.2 thorpej x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
292 1.30.2.2 thorpej virtual_start += s;
293 1.30.2.2 thorpej
294 1.30.2.2 thorpej /* Variables used by the L1 page table queue code */
295 1.30.2.2 thorpej SIMPLEQ_HEAD(l1pt_queue, l1pt);
296 1.30.2.2 thorpej struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
297 1.30.2.2 thorpej int l1pt_static_queue_count; /* items in the static l1 queue */
298 1.30.2.2 thorpej int l1pt_static_create_count; /* static l1 items created */
299 1.30.2.2 thorpej struct l1pt_queue l1pt_queue; /* head of our l1 queue */
300 1.30.2.2 thorpej int l1pt_queue_count; /* items in the l1 queue */
301 1.30.2.2 thorpej int l1pt_create_count; /* stat - L1's create count */
302 1.30.2.2 thorpej int l1pt_reuse_count; /* stat - L1's reused count */
303 1.30.2.2 thorpej
304 1.30.2.2 thorpej /* Local function prototypes (not used outside this file) */
305 1.30.2.2 thorpej pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
306 1.30.2.2 thorpej void pmap_copy_on_write __P((paddr_t pa));
307 1.30.2.2 thorpej void pmap_pinit __P((struct pmap *));
308 1.30.2.2 thorpej void pmap_freepagedir __P((struct pmap *));
309 1.30.2.2 thorpej
310 1.30.2.2 thorpej /* Other function prototypes */
311 1.30.2.2 thorpej extern void bzero_page __P((vaddr_t));
312 1.30.2.2 thorpej extern void bcopy_page __P((vaddr_t, vaddr_t));
313 1.30.2.2 thorpej
314 1.30.2.2 thorpej struct l1pt *pmap_alloc_l1pt __P((void));
315 1.30.2.2 thorpej static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
316 1.30.2.2 thorpej vaddr_t l2pa, boolean_t));
317 1.30.2.2 thorpej
318 1.30.2.2 thorpej static pt_entry_t *pmap_map_ptes __P((struct pmap *));
319 1.30.2.2 thorpej static void pmap_unmap_ptes __P((struct pmap *));
320 1.30.2.2 thorpej
321 1.30.2.2 thorpej __inline static void pmap_vac_me_harder __P((struct pmap *, struct pv_head *,
322 1.30.2.2 thorpej pt_entry_t *, boolean_t));
323 1.30.2.2 thorpej static void pmap_vac_me_kpmap __P((struct pmap *, struct pv_head *,
324 1.30.2.2 thorpej pt_entry_t *, boolean_t));
325 1.30.2.2 thorpej static void pmap_vac_me_user __P((struct pmap *, struct pv_head *,
326 1.30.2.2 thorpej pt_entry_t *, boolean_t));
327 1.30.2.2 thorpej
328 1.30.2.2 thorpej /*
329 1.30.2.2 thorpej * Cache enable bits in PTE to use on pages that are cacheable.
330 1.30.2.2 thorpej * On most machines this is cacheable/bufferable, but on some, eg arm10, we
331 1.30.2.2 thorpej * can chose between write-through and write-back cacheing.
332 1.30.2.2 thorpej */
333 1.30.2.2 thorpej pt_entry_t pte_cache_mode = (PT_C | PT_B);
334 1.30.2.2 thorpej
335 1.30.2.2 thorpej /*
336 1.30.2.2 thorpej * real definition of pv_entry.
337 1.30.2.2 thorpej */
338 1.30.2.2 thorpej
339 1.30.2.2 thorpej struct pv_entry {
340 1.30.2.2 thorpej struct pv_entry *pv_next; /* next pv_entry */
341 1.30.2.2 thorpej struct pmap *pv_pmap; /* pmap where mapping lies */
342 1.30.2.2 thorpej vaddr_t pv_va; /* virtual address for mapping */
343 1.30.2.2 thorpej int pv_flags; /* flags */
344 1.30.2.2 thorpej struct vm_page *pv_ptp; /* vm_page for the ptp */
345 1.30.2.2 thorpej };
346 1.30.2.2 thorpej
347 1.30.2.2 thorpej /*
348 1.30.2.2 thorpej * pv_entrys are dynamically allocated in chunks from a single page.
349 1.30.2.2 thorpej * we keep track of how many pv_entrys are in use for each page and
350 1.30.2.2 thorpej * we can free pv_entry pages if needed. there is one lock for the
351 1.30.2.2 thorpej * entire allocation system.
352 1.30.2.2 thorpej */
353 1.30.2.2 thorpej
354 1.30.2.2 thorpej struct pv_page_info {
355 1.30.2.2 thorpej TAILQ_ENTRY(pv_page) pvpi_list;
356 1.30.2.2 thorpej struct pv_entry *pvpi_pvfree;
357 1.30.2.2 thorpej int pvpi_nfree;
358 1.30.2.2 thorpej };
359 1.30.2.2 thorpej
360 1.30.2.2 thorpej /*
361 1.30.2.2 thorpej * number of pv_entry's in a pv_page
362 1.30.2.2 thorpej * (note: won't work on systems where NPBG isn't a constant)
363 1.30.2.2 thorpej */
364 1.30.2.2 thorpej
365 1.30.2.2 thorpej #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
366 1.30.2.2 thorpej sizeof(struct pv_entry))
367 1.30.2.2 thorpej
368 1.30.2.2 thorpej /*
369 1.30.2.2 thorpej * a pv_page: where pv_entrys are allocated from
370 1.30.2.2 thorpej */
371 1.30.2.2 thorpej
372 1.30.2.2 thorpej struct pv_page {
373 1.30.2.2 thorpej struct pv_page_info pvinfo;
374 1.30.2.2 thorpej struct pv_entry pvents[PVE_PER_PVPAGE];
375 1.30.2.2 thorpej };
376 1.30.2.2 thorpej
377 1.30.2.2 thorpej #ifdef MYCROFT_HACK
378 1.30.2.2 thorpej int mycroft_hack = 0;
379 1.30.2.2 thorpej #endif
380 1.30.2.2 thorpej
381 1.30.2.2 thorpej /* Function to set the debug level of the pmap code */
382 1.30.2.2 thorpej
383 1.30.2.2 thorpej #ifdef PMAP_DEBUG
384 1.30.2.2 thorpej void
385 1.30.2.2 thorpej pmap_debug(level)
386 1.30.2.2 thorpej int level;
387 1.30.2.2 thorpej {
388 1.30.2.2 thorpej pmap_debug_level = level;
389 1.30.2.2 thorpej printf("pmap_debug: level=%d\n", pmap_debug_level);
390 1.30.2.2 thorpej }
391 1.30.2.2 thorpej #endif /* PMAP_DEBUG */
392 1.30.2.2 thorpej
393 1.30.2.2 thorpej __inline static boolean_t
394 1.30.2.2 thorpej pmap_is_curpmap(struct pmap *pmap)
395 1.30.2.2 thorpej {
396 1.30.2.2 thorpej if ((curproc && curproc->l_proc->p_vmspace->vm_map.pmap == pmap)
397 1.30.2.2 thorpej || (pmap == pmap_kernel()))
398 1.30.2.2 thorpej return (TRUE);
399 1.30.2.2 thorpej return (FALSE);
400 1.30.2.2 thorpej }
401 1.30.2.2 thorpej #include "isadma.h"
402 1.30.2.2 thorpej
403 1.30.2.2 thorpej #if NISADMA > 0
404 1.30.2.2 thorpej /*
405 1.30.2.2 thorpej * Used to protect memory for ISA DMA bounce buffers. If, when loading
406 1.30.2.2 thorpej * pages into the system, memory intersects with any of these ranges,
407 1.30.2.2 thorpej * the intersecting memory will be loaded into a lower-priority free list.
408 1.30.2.2 thorpej */
409 1.30.2.2 thorpej bus_dma_segment_t *pmap_isa_dma_ranges;
410 1.30.2.2 thorpej int pmap_isa_dma_nranges;
411 1.30.2.2 thorpej
412 1.30.2.2 thorpej boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
413 1.30.2.2 thorpej paddr_t *, psize_t *));
414 1.30.2.2 thorpej
415 1.30.2.2 thorpej /*
416 1.30.2.2 thorpej * Check if a memory range intersects with an ISA DMA range, and
417 1.30.2.2 thorpej * return the page-rounded intersection if it does. The intersection
418 1.30.2.2 thorpej * will be placed on a lower-priority free list.
419 1.30.2.2 thorpej */
420 1.30.2.2 thorpej boolean_t
421 1.30.2.2 thorpej pmap_isa_dma_range_intersect(pa, size, pap, sizep)
422 1.30.2.2 thorpej paddr_t pa;
423 1.30.2.2 thorpej psize_t size;
424 1.30.2.2 thorpej paddr_t *pap;
425 1.30.2.2 thorpej psize_t *sizep;
426 1.30.2.2 thorpej {
427 1.30.2.2 thorpej bus_dma_segment_t *ds;
428 1.30.2.2 thorpej int i;
429 1.30.2.2 thorpej
430 1.30.2.2 thorpej if (pmap_isa_dma_ranges == NULL)
431 1.30.2.2 thorpej return (FALSE);
432 1.30.2.2 thorpej
433 1.30.2.2 thorpej for (i = 0, ds = pmap_isa_dma_ranges;
434 1.30.2.2 thorpej i < pmap_isa_dma_nranges; i++, ds++) {
435 1.30.2.2 thorpej if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
436 1.30.2.2 thorpej /*
437 1.30.2.2 thorpej * Beginning of region intersects with this range.
438 1.30.2.2 thorpej */
439 1.30.2.2 thorpej *pap = trunc_page(pa);
440 1.30.2.2 thorpej *sizep = round_page(min(pa + size,
441 1.30.2.2 thorpej ds->ds_addr + ds->ds_len) - pa);
442 1.30.2.2 thorpej return (TRUE);
443 1.30.2.2 thorpej }
444 1.30.2.2 thorpej if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
445 1.30.2.2 thorpej /*
446 1.30.2.2 thorpej * End of region intersects with this range.
447 1.30.2.2 thorpej */
448 1.30.2.2 thorpej *pap = trunc_page(ds->ds_addr);
449 1.30.2.2 thorpej *sizep = round_page(min((pa + size) - ds->ds_addr,
450 1.30.2.2 thorpej ds->ds_len));
451 1.30.2.2 thorpej return (TRUE);
452 1.30.2.2 thorpej }
453 1.30.2.2 thorpej }
454 1.30.2.2 thorpej
455 1.30.2.2 thorpej /*
456 1.30.2.2 thorpej * No intersection found.
457 1.30.2.2 thorpej */
458 1.30.2.2 thorpej return (FALSE);
459 1.30.2.2 thorpej }
460 1.30.2.2 thorpej #endif /* NISADMA > 0 */
461 1.30.2.2 thorpej
462 1.30.2.2 thorpej /*
463 1.30.2.2 thorpej * p v _ e n t r y f u n c t i o n s
464 1.30.2.2 thorpej */
465 1.30.2.2 thorpej
466 1.30.2.2 thorpej /*
467 1.30.2.2 thorpej * pv_entry allocation functions:
468 1.30.2.2 thorpej * the main pv_entry allocation functions are:
469 1.30.2.2 thorpej * pmap_alloc_pv: allocate a pv_entry structure
470 1.30.2.2 thorpej * pmap_free_pv: free one pv_entry
471 1.30.2.2 thorpej * pmap_free_pvs: free a list of pv_entrys
472 1.30.2.2 thorpej *
473 1.30.2.2 thorpej * the rest are helper functions
474 1.30.2.2 thorpej */
475 1.30.2.2 thorpej
476 1.30.2.2 thorpej /*
477 1.30.2.2 thorpej * pmap_alloc_pv: inline function to allocate a pv_entry structure
478 1.30.2.2 thorpej * => we lock pvalloc_lock
479 1.30.2.2 thorpej * => if we fail, we call out to pmap_alloc_pvpage
480 1.30.2.2 thorpej * => 3 modes:
481 1.30.2.2 thorpej * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it
482 1.30.2.2 thorpej * ALLOCPV_TRY = we want a pv_entry, but not enough to steal
483 1.30.2.2 thorpej * ALLOCPV_NONEED = we are trying to grow our free list, don't really need
484 1.30.2.2 thorpej * one now
485 1.30.2.2 thorpej *
486 1.30.2.2 thorpej * "try" is for optional functions like pmap_copy().
487 1.30.2.2 thorpej */
488 1.30.2.2 thorpej
489 1.30.2.2 thorpej __inline static struct pv_entry *
490 1.30.2.2 thorpej pmap_alloc_pv(pmap, mode)
491 1.30.2.2 thorpej struct pmap *pmap;
492 1.30.2.2 thorpej int mode;
493 1.30.2.2 thorpej {
494 1.30.2.2 thorpej struct pv_page *pvpage;
495 1.30.2.2 thorpej struct pv_entry *pv;
496 1.30.2.2 thorpej
497 1.30.2.2 thorpej simple_lock(&pvalloc_lock);
498 1.30.2.2 thorpej
499 1.30.2.2 thorpej if (pv_freepages.tqh_first != NULL) {
500 1.30.2.2 thorpej pvpage = pv_freepages.tqh_first;
501 1.30.2.2 thorpej pvpage->pvinfo.pvpi_nfree--;
502 1.30.2.2 thorpej if (pvpage->pvinfo.pvpi_nfree == 0) {
503 1.30.2.2 thorpej /* nothing left in this one? */
504 1.30.2.2 thorpej TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
505 1.30.2.2 thorpej }
506 1.30.2.2 thorpej pv = pvpage->pvinfo.pvpi_pvfree;
507 1.30.2.2 thorpej #ifdef DIAGNOSTIC
508 1.30.2.2 thorpej if (pv == NULL)
509 1.30.2.2 thorpej panic("pmap_alloc_pv: pvpi_nfree off");
510 1.30.2.2 thorpej #endif
511 1.30.2.2 thorpej pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
512 1.30.2.2 thorpej pv_nfpvents--; /* took one from pool */
513 1.30.2.2 thorpej } else {
514 1.30.2.2 thorpej pv = NULL; /* need more of them */
515 1.30.2.2 thorpej }
516 1.30.2.2 thorpej
517 1.30.2.2 thorpej /*
518 1.30.2.2 thorpej * if below low water mark or we didn't get a pv_entry we try and
519 1.30.2.2 thorpej * create more pv_entrys ...
520 1.30.2.2 thorpej */
521 1.30.2.2 thorpej
522 1.30.2.2 thorpej if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
523 1.30.2.2 thorpej if (pv == NULL)
524 1.30.2.2 thorpej pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
525 1.30.2.2 thorpej mode : ALLOCPV_NEED);
526 1.30.2.2 thorpej else
527 1.30.2.2 thorpej (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
528 1.30.2.2 thorpej }
529 1.30.2.2 thorpej
530 1.30.2.2 thorpej simple_unlock(&pvalloc_lock);
531 1.30.2.2 thorpej return(pv);
532 1.30.2.2 thorpej }
533 1.30.2.2 thorpej
534 1.30.2.2 thorpej /*
535 1.30.2.2 thorpej * pmap_alloc_pvpage: maybe allocate a new pvpage
536 1.30.2.2 thorpej *
537 1.30.2.2 thorpej * if need_entry is false: try and allocate a new pv_page
538 1.30.2.2 thorpej * if need_entry is true: try and allocate a new pv_page and return a
539 1.30.2.2 thorpej * new pv_entry from it. if we are unable to allocate a pv_page
540 1.30.2.2 thorpej * we make a last ditch effort to steal a pv_page from some other
541 1.30.2.2 thorpej * mapping. if that fails, we panic...
542 1.30.2.2 thorpej *
543 1.30.2.2 thorpej * => we assume that the caller holds pvalloc_lock
544 1.30.2.2 thorpej */
545 1.30.2.2 thorpej
546 1.30.2.2 thorpej static struct pv_entry *
547 1.30.2.2 thorpej pmap_alloc_pvpage(pmap, mode)
548 1.30.2.2 thorpej struct pmap *pmap;
549 1.30.2.2 thorpej int mode;
550 1.30.2.2 thorpej {
551 1.30.2.2 thorpej struct vm_page *pg;
552 1.30.2.2 thorpej struct pv_page *pvpage;
553 1.30.2.2 thorpej struct pv_entry *pv;
554 1.30.2.2 thorpej int s;
555 1.30.2.2 thorpej
556 1.30.2.2 thorpej /*
557 1.30.2.2 thorpej * if we need_entry and we've got unused pv_pages, allocate from there
558 1.30.2.2 thorpej */
559 1.30.2.2 thorpej
560 1.30.2.2 thorpej if (mode != ALLOCPV_NONEED && pv_unusedpgs.tqh_first != NULL) {
561 1.30.2.2 thorpej
562 1.30.2.2 thorpej /* move it to pv_freepages list */
563 1.30.2.2 thorpej pvpage = pv_unusedpgs.tqh_first;
564 1.30.2.2 thorpej TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
565 1.30.2.2 thorpej TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
566 1.30.2.2 thorpej
567 1.30.2.2 thorpej /* allocate a pv_entry */
568 1.30.2.2 thorpej pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */
569 1.30.2.2 thorpej pv = pvpage->pvinfo.pvpi_pvfree;
570 1.30.2.2 thorpej #ifdef DIAGNOSTIC
571 1.30.2.2 thorpej if (pv == NULL)
572 1.30.2.2 thorpej panic("pmap_alloc_pvpage: pvpi_nfree off");
573 1.30.2.2 thorpej #endif
574 1.30.2.2 thorpej pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
575 1.30.2.2 thorpej
576 1.30.2.2 thorpej pv_nfpvents--; /* took one from pool */
577 1.30.2.2 thorpej return(pv);
578 1.30.2.2 thorpej }
579 1.30.2.2 thorpej
580 1.30.2.2 thorpej /*
581 1.30.2.2 thorpej * see if we've got a cached unmapped VA that we can map a page in.
582 1.30.2.2 thorpej * if not, try to allocate one.
583 1.30.2.2 thorpej */
584 1.30.2.2 thorpej
585 1.30.2.2 thorpej
586 1.30.2.2 thorpej if (pv_cachedva == 0) {
587 1.30.2.2 thorpej s = splvm();
588 1.30.2.2 thorpej pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
589 1.30.2.2 thorpej PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
590 1.30.2.2 thorpej splx(s);
591 1.30.2.2 thorpej if (pv_cachedva == 0) {
592 1.30.2.2 thorpej return (NULL);
593 1.30.2.2 thorpej }
594 1.30.2.2 thorpej }
595 1.30.2.2 thorpej
596 1.30.2.2 thorpej pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
597 1.30.2.2 thorpej UVM_PGA_USERESERVE);
598 1.30.2.2 thorpej if (pg)
599 1.30.2.2 thorpej pg->flags &= ~PG_BUSY; /* never busy */
600 1.30.2.2 thorpej
601 1.30.2.2 thorpej if (pg == NULL)
602 1.30.2.2 thorpej return (NULL);
603 1.30.2.2 thorpej
604 1.30.2.2 thorpej /*
605 1.30.2.2 thorpej * add a mapping for our new pv_page and free its entrys (save one!)
606 1.30.2.2 thorpej *
607 1.30.2.2 thorpej * NOTE: If we are allocating a PV page for the kernel pmap, the
608 1.30.2.2 thorpej * pmap is already locked! (...but entering the mapping is safe...)
609 1.30.2.2 thorpej */
610 1.30.2.2 thorpej
611 1.30.2.2 thorpej pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
612 1.30.2.2 thorpej pmap_update(pmap_kernel());
613 1.30.2.2 thorpej pvpage = (struct pv_page *) pv_cachedva;
614 1.30.2.2 thorpej pv_cachedva = 0;
615 1.30.2.2 thorpej return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
616 1.30.2.2 thorpej }
617 1.30.2.2 thorpej
618 1.30.2.2 thorpej /*
619 1.30.2.2 thorpej * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
620 1.30.2.2 thorpej *
621 1.30.2.2 thorpej * => caller must hold pvalloc_lock
622 1.30.2.2 thorpej * => if need_entry is true, we allocate and return one pv_entry
623 1.30.2.2 thorpej */
624 1.30.2.2 thorpej
625 1.30.2.2 thorpej static struct pv_entry *
626 1.30.2.2 thorpej pmap_add_pvpage(pvp, need_entry)
627 1.30.2.2 thorpej struct pv_page *pvp;
628 1.30.2.2 thorpej boolean_t need_entry;
629 1.30.2.2 thorpej {
630 1.30.2.2 thorpej int tofree, lcv;
631 1.30.2.2 thorpej
632 1.30.2.2 thorpej /* do we need to return one? */
633 1.30.2.2 thorpej tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
634 1.30.2.2 thorpej
635 1.30.2.2 thorpej pvp->pvinfo.pvpi_pvfree = NULL;
636 1.30.2.2 thorpej pvp->pvinfo.pvpi_nfree = tofree;
637 1.30.2.2 thorpej for (lcv = 0 ; lcv < tofree ; lcv++) {
638 1.30.2.2 thorpej pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
639 1.30.2.2 thorpej pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
640 1.30.2.2 thorpej }
641 1.30.2.2 thorpej if (need_entry)
642 1.30.2.2 thorpej TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
643 1.30.2.2 thorpej else
644 1.30.2.2 thorpej TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
645 1.30.2.2 thorpej pv_nfpvents += tofree;
646 1.30.2.2 thorpej return((need_entry) ? &pvp->pvents[lcv] : NULL);
647 1.30.2.2 thorpej }
648 1.30.2.2 thorpej
649 1.30.2.2 thorpej /*
650 1.30.2.2 thorpej * pmap_free_pv_doit: actually free a pv_entry
651 1.30.2.2 thorpej *
652 1.30.2.2 thorpej * => do not call this directly! instead use either
653 1.30.2.2 thorpej * 1. pmap_free_pv ==> free a single pv_entry
654 1.30.2.2 thorpej * 2. pmap_free_pvs => free a list of pv_entrys
655 1.30.2.2 thorpej * => we must be holding pvalloc_lock
656 1.30.2.2 thorpej */
657 1.30.2.2 thorpej
658 1.30.2.2 thorpej __inline static void
659 1.30.2.2 thorpej pmap_free_pv_doit(pv)
660 1.30.2.2 thorpej struct pv_entry *pv;
661 1.30.2.2 thorpej {
662 1.30.2.2 thorpej struct pv_page *pvp;
663 1.30.2.2 thorpej
664 1.30.2.2 thorpej pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
665 1.30.2.2 thorpej pv_nfpvents++;
666 1.30.2.2 thorpej pvp->pvinfo.pvpi_nfree++;
667 1.30.2.2 thorpej
668 1.30.2.2 thorpej /* nfree == 1 => fully allocated page just became partly allocated */
669 1.30.2.2 thorpej if (pvp->pvinfo.pvpi_nfree == 1) {
670 1.30.2.2 thorpej TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
671 1.30.2.2 thorpej }
672 1.30.2.2 thorpej
673 1.30.2.2 thorpej /* free it */
674 1.30.2.2 thorpej pv->pv_next = pvp->pvinfo.pvpi_pvfree;
675 1.30.2.2 thorpej pvp->pvinfo.pvpi_pvfree = pv;
676 1.30.2.2 thorpej
677 1.30.2.2 thorpej /*
678 1.30.2.2 thorpej * are all pv_page's pv_entry's free? move it to unused queue.
679 1.30.2.2 thorpej */
680 1.30.2.2 thorpej
681 1.30.2.2 thorpej if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
682 1.30.2.2 thorpej TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
683 1.30.2.2 thorpej TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
684 1.30.2.2 thorpej }
685 1.30.2.2 thorpej }
686 1.30.2.2 thorpej
687 1.30.2.2 thorpej /*
688 1.30.2.2 thorpej * pmap_free_pv: free a single pv_entry
689 1.30.2.2 thorpej *
690 1.30.2.2 thorpej * => we gain the pvalloc_lock
691 1.30.2.2 thorpej */
692 1.30.2.2 thorpej
693 1.30.2.2 thorpej __inline static void
694 1.30.2.2 thorpej pmap_free_pv(pmap, pv)
695 1.30.2.2 thorpej struct pmap *pmap;
696 1.30.2.2 thorpej struct pv_entry *pv;
697 1.30.2.2 thorpej {
698 1.30.2.2 thorpej simple_lock(&pvalloc_lock);
699 1.30.2.2 thorpej pmap_free_pv_doit(pv);
700 1.30.2.2 thorpej
701 1.30.2.2 thorpej /*
702 1.30.2.2 thorpej * Can't free the PV page if the PV entries were associated with
703 1.30.2.2 thorpej * the kernel pmap; the pmap is already locked.
704 1.30.2.2 thorpej */
705 1.30.2.2 thorpej if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
706 1.30.2.2 thorpej pmap != pmap_kernel())
707 1.30.2.2 thorpej pmap_free_pvpage();
708 1.30.2.2 thorpej
709 1.30.2.2 thorpej simple_unlock(&pvalloc_lock);
710 1.30.2.2 thorpej }
711 1.30.2.2 thorpej
712 1.30.2.2 thorpej /*
713 1.30.2.2 thorpej * pmap_free_pvs: free a list of pv_entrys
714 1.30.2.2 thorpej *
715 1.30.2.2 thorpej * => we gain the pvalloc_lock
716 1.30.2.2 thorpej */
717 1.30.2.2 thorpej
718 1.30.2.2 thorpej __inline static void
719 1.30.2.2 thorpej pmap_free_pvs(pmap, pvs)
720 1.30.2.2 thorpej struct pmap *pmap;
721 1.30.2.2 thorpej struct pv_entry *pvs;
722 1.30.2.2 thorpej {
723 1.30.2.2 thorpej struct pv_entry *nextpv;
724 1.30.2.2 thorpej
725 1.30.2.2 thorpej simple_lock(&pvalloc_lock);
726 1.30.2.2 thorpej
727 1.30.2.2 thorpej for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
728 1.30.2.2 thorpej nextpv = pvs->pv_next;
729 1.30.2.2 thorpej pmap_free_pv_doit(pvs);
730 1.30.2.2 thorpej }
731 1.30.2.2 thorpej
732 1.30.2.2 thorpej /*
733 1.30.2.2 thorpej * Can't free the PV page if the PV entries were associated with
734 1.30.2.2 thorpej * the kernel pmap; the pmap is already locked.
735 1.30.2.2 thorpej */
736 1.30.2.2 thorpej if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
737 1.30.2.2 thorpej pmap != pmap_kernel())
738 1.30.2.2 thorpej pmap_free_pvpage();
739 1.30.2.2 thorpej
740 1.30.2.2 thorpej simple_unlock(&pvalloc_lock);
741 1.30.2.2 thorpej }
742 1.30.2.2 thorpej
743 1.30.2.2 thorpej
744 1.30.2.2 thorpej /*
745 1.30.2.2 thorpej * pmap_free_pvpage: try and free an unused pv_page structure
746 1.30.2.2 thorpej *
747 1.30.2.2 thorpej * => assume caller is holding the pvalloc_lock and that
748 1.30.2.2 thorpej * there is a page on the pv_unusedpgs list
749 1.30.2.2 thorpej * => if we can't get a lock on the kmem_map we try again later
750 1.30.2.2 thorpej * => note: analysis of MI kmem_map usage [i.e. malloc/free] shows
751 1.30.2.2 thorpej * that if we can lock the kmem_map then we are not already
752 1.30.2.2 thorpej * holding kmem_object's lock.
753 1.30.2.2 thorpej */
754 1.30.2.2 thorpej
755 1.30.2.2 thorpej static void
756 1.30.2.2 thorpej pmap_free_pvpage()
757 1.30.2.2 thorpej {
758 1.30.2.2 thorpej int s;
759 1.30.2.2 thorpej struct vm_map *map;
760 1.30.2.2 thorpej struct vm_map_entry *dead_entries;
761 1.30.2.2 thorpej struct pv_page *pvp;
762 1.30.2.2 thorpej
763 1.30.2.2 thorpej s = splvm(); /* protect kmem_map */
764 1.30.2.2 thorpej
765 1.30.2.2 thorpej pvp = pv_unusedpgs.tqh_first;
766 1.30.2.2 thorpej
767 1.30.2.2 thorpej /*
768 1.30.2.2 thorpej * note: watch out for pv_initpage which is allocated out of
769 1.30.2.2 thorpej * kernel_map rather than kmem_map.
770 1.30.2.2 thorpej */
771 1.30.2.2 thorpej if (pvp == pv_initpage)
772 1.30.2.2 thorpej map = kernel_map;
773 1.30.2.2 thorpej else
774 1.30.2.2 thorpej map = kmem_map;
775 1.30.2.2 thorpej
776 1.30.2.2 thorpej if (vm_map_lock_try(map)) {
777 1.30.2.2 thorpej
778 1.30.2.2 thorpej /* remove pvp from pv_unusedpgs */
779 1.30.2.2 thorpej TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
780 1.30.2.2 thorpej
781 1.30.2.2 thorpej /* unmap the page */
782 1.30.2.2 thorpej dead_entries = NULL;
783 1.30.2.2 thorpej uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
784 1.30.2.2 thorpej &dead_entries);
785 1.30.2.2 thorpej vm_map_unlock(map);
786 1.30.2.2 thorpej
787 1.30.2.2 thorpej if (dead_entries != NULL)
788 1.30.2.2 thorpej uvm_unmap_detach(dead_entries, 0);
789 1.30.2.2 thorpej
790 1.30.2.2 thorpej pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
791 1.30.2.2 thorpej }
792 1.30.2.2 thorpej
793 1.30.2.2 thorpej if (pvp == pv_initpage)
794 1.30.2.2 thorpej /* no more initpage, we've freed it */
795 1.30.2.2 thorpej pv_initpage = NULL;
796 1.30.2.2 thorpej
797 1.30.2.2 thorpej splx(s);
798 1.30.2.2 thorpej }
799 1.30.2.2 thorpej
800 1.30.2.2 thorpej /*
801 1.30.2.2 thorpej * main pv_entry manipulation functions:
802 1.30.2.2 thorpej * pmap_enter_pv: enter a mapping onto a pv_head list
803 1.30.2.2 thorpej * pmap_remove_pv: remove a mappiing from a pv_head list
804 1.30.2.2 thorpej *
805 1.30.2.2 thorpej * NOTE: pmap_enter_pv expects to lock the pvh itself
806 1.30.2.2 thorpej * pmap_remove_pv expects te caller to lock the pvh before calling
807 1.30.2.2 thorpej */
808 1.30.2.2 thorpej
809 1.30.2.2 thorpej /*
810 1.30.2.2 thorpej * pmap_enter_pv: enter a mapping onto a pv_head lst
811 1.30.2.2 thorpej *
812 1.30.2.2 thorpej * => caller should hold the proper lock on pmap_main_lock
813 1.30.2.2 thorpej * => caller should have pmap locked
814 1.30.2.2 thorpej * => we will gain the lock on the pv_head and allocate the new pv_entry
815 1.30.2.2 thorpej * => caller should adjust ptp's wire_count before calling
816 1.30.2.2 thorpej * => caller should not adjust pmap's wire_count
817 1.30.2.2 thorpej */
818 1.30.2.2 thorpej
819 1.30.2.2 thorpej __inline static void
820 1.30.2.2 thorpej pmap_enter_pv(pvh, pve, pmap, va, ptp, flags)
821 1.30.2.2 thorpej struct pv_head *pvh;
822 1.30.2.2 thorpej struct pv_entry *pve; /* preallocated pve for us to use */
823 1.30.2.2 thorpej struct pmap *pmap;
824 1.30.2.2 thorpej vaddr_t va;
825 1.30.2.2 thorpej struct vm_page *ptp; /* PTP in pmap that maps this VA */
826 1.30.2.2 thorpej int flags;
827 1.30.2.2 thorpej {
828 1.30.2.2 thorpej pve->pv_pmap = pmap;
829 1.30.2.2 thorpej pve->pv_va = va;
830 1.30.2.2 thorpej pve->pv_ptp = ptp; /* NULL for kernel pmap */
831 1.30.2.2 thorpej pve->pv_flags = flags;
832 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock); /* lock pv_head */
833 1.30.2.2 thorpej pve->pv_next = pvh->pvh_list; /* add to ... */
834 1.30.2.2 thorpej pvh->pvh_list = pve; /* ... locked list */
835 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock); /* unlock, done! */
836 1.30.2.2 thorpej if (pve->pv_flags & PT_W)
837 1.30.2.2 thorpej ++pmap->pm_stats.wired_count;
838 1.30.2.2 thorpej }
839 1.30.2.2 thorpej
840 1.30.2.2 thorpej /*
841 1.30.2.2 thorpej * pmap_remove_pv: try to remove a mapping from a pv_list
842 1.30.2.2 thorpej *
843 1.30.2.2 thorpej * => caller should hold proper lock on pmap_main_lock
844 1.30.2.2 thorpej * => pmap should be locked
845 1.30.2.2 thorpej * => caller should hold lock on pv_head [so that attrs can be adjusted]
846 1.30.2.2 thorpej * => caller should adjust ptp's wire_count and free PTP if needed
847 1.30.2.2 thorpej * => caller should NOT adjust pmap's wire_count
848 1.30.2.2 thorpej * => we return the removed pve
849 1.30.2.2 thorpej */
850 1.30.2.2 thorpej
851 1.30.2.2 thorpej __inline static struct pv_entry *
852 1.30.2.2 thorpej pmap_remove_pv(pvh, pmap, va)
853 1.30.2.2 thorpej struct pv_head *pvh;
854 1.30.2.2 thorpej struct pmap *pmap;
855 1.30.2.2 thorpej vaddr_t va;
856 1.30.2.2 thorpej {
857 1.30.2.2 thorpej struct pv_entry *pve, **prevptr;
858 1.30.2.2 thorpej
859 1.30.2.2 thorpej prevptr = &pvh->pvh_list; /* previous pv_entry pointer */
860 1.30.2.2 thorpej pve = *prevptr;
861 1.30.2.2 thorpej while (pve) {
862 1.30.2.2 thorpej if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
863 1.30.2.2 thorpej *prevptr = pve->pv_next; /* remove it! */
864 1.30.2.2 thorpej if (pve->pv_flags & PT_W)
865 1.30.2.2 thorpej --pmap->pm_stats.wired_count;
866 1.30.2.2 thorpej break;
867 1.30.2.2 thorpej }
868 1.30.2.2 thorpej prevptr = &pve->pv_next; /* previous pointer */
869 1.30.2.2 thorpej pve = pve->pv_next; /* advance */
870 1.30.2.2 thorpej }
871 1.30.2.2 thorpej return(pve); /* return removed pve */
872 1.30.2.2 thorpej }
873 1.30.2.2 thorpej
874 1.30.2.2 thorpej /*
875 1.30.2.2 thorpej *
876 1.30.2.2 thorpej * pmap_modify_pv: Update pv flags
877 1.30.2.2 thorpej *
878 1.30.2.2 thorpej * => caller should hold lock on pv_head [so that attrs can be adjusted]
879 1.30.2.2 thorpej * => caller should NOT adjust pmap's wire_count
880 1.30.2.2 thorpej * => caller must call pmap_vac_me_harder() if writable status of a page
881 1.30.2.2 thorpej * may have changed.
882 1.30.2.2 thorpej * => we return the old flags
883 1.30.2.2 thorpej *
884 1.30.2.2 thorpej * Modify a physical-virtual mapping in the pv table
885 1.30.2.2 thorpej */
886 1.30.2.2 thorpej
887 1.30.2.4 nathanw /*__inline */
888 1.30.2.4 nathanw static u_int
889 1.30.2.2 thorpej pmap_modify_pv(pmap, va, pvh, bic_mask, eor_mask)
890 1.30.2.2 thorpej struct pmap *pmap;
891 1.30.2.2 thorpej vaddr_t va;
892 1.30.2.2 thorpej struct pv_head *pvh;
893 1.30.2.2 thorpej u_int bic_mask;
894 1.30.2.2 thorpej u_int eor_mask;
895 1.30.2.2 thorpej {
896 1.30.2.2 thorpej struct pv_entry *npv;
897 1.30.2.2 thorpej u_int flags, oflags;
898 1.30.2.2 thorpej
899 1.30.2.2 thorpej /*
900 1.30.2.2 thorpej * There is at least one VA mapping this page.
901 1.30.2.2 thorpej */
902 1.30.2.2 thorpej
903 1.30.2.2 thorpej for (npv = pvh->pvh_list; npv; npv = npv->pv_next) {
904 1.30.2.2 thorpej if (pmap == npv->pv_pmap && va == npv->pv_va) {
905 1.30.2.2 thorpej oflags = npv->pv_flags;
906 1.30.2.2 thorpej npv->pv_flags = flags =
907 1.30.2.2 thorpej ((oflags & ~bic_mask) ^ eor_mask);
908 1.30.2.2 thorpej if ((flags ^ oflags) & PT_W) {
909 1.30.2.2 thorpej if (flags & PT_W)
910 1.30.2.2 thorpej ++pmap->pm_stats.wired_count;
911 1.30.2.2 thorpej else
912 1.30.2.2 thorpej --pmap->pm_stats.wired_count;
913 1.30.2.2 thorpej }
914 1.30.2.2 thorpej return (oflags);
915 1.30.2.2 thorpej }
916 1.30.2.2 thorpej }
917 1.30.2.2 thorpej return (0);
918 1.30.2.2 thorpej }
919 1.30.2.2 thorpej
920 1.30.2.2 thorpej /*
921 1.30.2.2 thorpej * Map the specified level 2 pagetable into the level 1 page table for
922 1.30.2.2 thorpej * the given pmap to cover a chunk of virtual address space starting from the
923 1.30.2.2 thorpej * address specified.
924 1.30.2.2 thorpej */
925 1.30.2.2 thorpej static /*__inline*/ void
926 1.30.2.2 thorpej pmap_map_in_l1(pmap, va, l2pa, selfref)
927 1.30.2.2 thorpej struct pmap *pmap;
928 1.30.2.2 thorpej vaddr_t va, l2pa;
929 1.30.2.2 thorpej boolean_t selfref;
930 1.30.2.2 thorpej {
931 1.30.2.2 thorpej vaddr_t ptva;
932 1.30.2.2 thorpej
933 1.30.2.2 thorpej /* Calculate the index into the L1 page table. */
934 1.30.2.2 thorpej ptva = (va >> PDSHIFT) & ~3;
935 1.30.2.2 thorpej
936 1.30.2.2 thorpej PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
937 1.30.2.2 thorpej pmap->pm_pdir, L1_PTE(l2pa), ptva));
938 1.30.2.2 thorpej
939 1.30.2.2 thorpej /* Map page table into the L1. */
940 1.30.2.2 thorpej pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
941 1.30.2.2 thorpej pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
942 1.30.2.2 thorpej pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
943 1.30.2.2 thorpej pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
944 1.30.2.2 thorpej
945 1.30.2.2 thorpej PDEBUG(0, printf("pt self reference %lx in %lx\n",
946 1.30.2.2 thorpej L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
947 1.30.2.2 thorpej
948 1.30.2.2 thorpej /* Map the page table into the page table area. */
949 1.30.2.2 thorpej if (selfref) {
950 1.30.2.2 thorpej *((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
951 1.30.2.2 thorpej L2_PTE_NC_NB(l2pa, AP_KRW);
952 1.30.2.2 thorpej }
953 1.30.2.2 thorpej /* XXX should be a purge */
954 1.30.2.2 thorpej /* cpu_tlb_flushD();*/
955 1.30.2.2 thorpej }
956 1.30.2.2 thorpej
957 1.30.2.2 thorpej #if 0
958 1.30.2.2 thorpej static /*__inline*/ void
959 1.30.2.2 thorpej pmap_unmap_in_l1(pmap, va)
960 1.30.2.2 thorpej struct pmap *pmap;
961 1.30.2.2 thorpej vaddr_t va;
962 1.30.2.2 thorpej {
963 1.30.2.2 thorpej vaddr_t ptva;
964 1.30.2.2 thorpej
965 1.30.2.2 thorpej /* Calculate the index into the L1 page table. */
966 1.30.2.2 thorpej ptva = (va >> PDSHIFT) & ~3;
967 1.30.2.2 thorpej
968 1.30.2.2 thorpej /* Unmap page table from the L1. */
969 1.30.2.2 thorpej pmap->pm_pdir[ptva + 0] = 0;
970 1.30.2.2 thorpej pmap->pm_pdir[ptva + 1] = 0;
971 1.30.2.2 thorpej pmap->pm_pdir[ptva + 2] = 0;
972 1.30.2.2 thorpej pmap->pm_pdir[ptva + 3] = 0;
973 1.30.2.2 thorpej
974 1.30.2.2 thorpej /* Unmap the page table from the page table area. */
975 1.30.2.2 thorpej *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
976 1.30.2.2 thorpej
977 1.30.2.2 thorpej /* XXX should be a purge */
978 1.30.2.2 thorpej /* cpu_tlb_flushD();*/
979 1.30.2.2 thorpej }
980 1.30.2.2 thorpej #endif
981 1.30.2.2 thorpej
982 1.30.2.2 thorpej /*
983 1.30.2.2 thorpej * Used to map a range of physical addresses into kernel
984 1.30.2.2 thorpej * virtual address space.
985 1.30.2.2 thorpej *
986 1.30.2.2 thorpej * For now, VM is already on, we only need to map the
987 1.30.2.2 thorpej * specified memory.
988 1.30.2.2 thorpej */
989 1.30.2.2 thorpej vaddr_t
990 1.30.2.2 thorpej pmap_map(va, spa, epa, prot)
991 1.30.2.2 thorpej vaddr_t va, spa, epa;
992 1.30.2.2 thorpej int prot;
993 1.30.2.2 thorpej {
994 1.30.2.2 thorpej while (spa < epa) {
995 1.30.2.2 thorpej pmap_kenter_pa(va, spa, prot);
996 1.30.2.2 thorpej va += NBPG;
997 1.30.2.2 thorpej spa += NBPG;
998 1.30.2.2 thorpej }
999 1.30.2.2 thorpej pmap_update(pmap_kernel());
1000 1.30.2.2 thorpej return(va);
1001 1.30.2.2 thorpej }
1002 1.30.2.2 thorpej
1003 1.30.2.2 thorpej
1004 1.30.2.2 thorpej /*
1005 1.30.2.2 thorpej * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1006 1.30.2.2 thorpej *
1007 1.30.2.2 thorpej * bootstrap the pmap system. This is called from initarm and allows
1008 1.30.2.2 thorpej * the pmap system to initailise any structures it requires.
1009 1.30.2.2 thorpej *
1010 1.30.2.2 thorpej * Currently this sets up the kernel_pmap that is statically allocated
1011 1.30.2.2 thorpej * and also allocated virtual addresses for certain page hooks.
1012 1.30.2.2 thorpej * Currently the only one page hook is allocated that is used
1013 1.30.2.2 thorpej * to zero physical pages of memory.
1014 1.30.2.2 thorpej * It also initialises the start and end address of the kernel data space.
1015 1.30.2.2 thorpej */
1016 1.30.2.2 thorpej extern paddr_t physical_freestart;
1017 1.30.2.2 thorpej extern paddr_t physical_freeend;
1018 1.30.2.2 thorpej
1019 1.30.2.2 thorpej char *boot_head;
1020 1.30.2.2 thorpej
1021 1.30.2.2 thorpej void
1022 1.30.2.2 thorpej pmap_bootstrap(kernel_l1pt, kernel_ptpt)
1023 1.30.2.2 thorpej pd_entry_t *kernel_l1pt;
1024 1.30.2.2 thorpej pv_addr_t kernel_ptpt;
1025 1.30.2.2 thorpej {
1026 1.30.2.2 thorpej int loop;
1027 1.30.2.2 thorpej paddr_t start, end;
1028 1.30.2.2 thorpej #if NISADMA > 0
1029 1.30.2.2 thorpej paddr_t istart;
1030 1.30.2.2 thorpej psize_t isize;
1031 1.30.2.2 thorpej #endif
1032 1.30.2.2 thorpej
1033 1.30.2.2 thorpej pmap_kernel()->pm_pdir = kernel_l1pt;
1034 1.30.2.2 thorpej pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
1035 1.30.2.2 thorpej pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
1036 1.30.2.2 thorpej simple_lock_init(&pmap_kernel()->pm_lock);
1037 1.30.2.2 thorpej pmap_kernel()->pm_obj.pgops = NULL;
1038 1.30.2.2 thorpej TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
1039 1.30.2.2 thorpej pmap_kernel()->pm_obj.uo_npages = 0;
1040 1.30.2.2 thorpej pmap_kernel()->pm_obj.uo_refs = 1;
1041 1.30.2.2 thorpej
1042 1.30.2.2 thorpej /*
1043 1.30.2.2 thorpej * Initialize PAGE_SIZE-dependent variables.
1044 1.30.2.2 thorpej */
1045 1.30.2.2 thorpej uvm_setpagesize();
1046 1.30.2.2 thorpej
1047 1.30.2.2 thorpej npages = 0;
1048 1.30.2.2 thorpej loop = 0;
1049 1.30.2.2 thorpej while (loop < bootconfig.dramblocks) {
1050 1.30.2.2 thorpej start = (paddr_t)bootconfig.dram[loop].address;
1051 1.30.2.2 thorpej end = start + (bootconfig.dram[loop].pages * NBPG);
1052 1.30.2.2 thorpej if (start < physical_freestart)
1053 1.30.2.2 thorpej start = physical_freestart;
1054 1.30.2.2 thorpej if (end > physical_freeend)
1055 1.30.2.2 thorpej end = physical_freeend;
1056 1.30.2.2 thorpej #if 0
1057 1.30.2.2 thorpej printf("%d: %lx -> %lx\n", loop, start, end - 1);
1058 1.30.2.2 thorpej #endif
1059 1.30.2.2 thorpej #if NISADMA > 0
1060 1.30.2.2 thorpej if (pmap_isa_dma_range_intersect(start, end - start,
1061 1.30.2.2 thorpej &istart, &isize)) {
1062 1.30.2.2 thorpej /*
1063 1.30.2.2 thorpej * Place the pages that intersect with the
1064 1.30.2.2 thorpej * ISA DMA range onto the ISA DMA free list.
1065 1.30.2.2 thorpej */
1066 1.30.2.2 thorpej #if 0
1067 1.30.2.2 thorpej printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
1068 1.30.2.2 thorpej istart + isize - 1);
1069 1.30.2.2 thorpej #endif
1070 1.30.2.2 thorpej uvm_page_physload(atop(istart),
1071 1.30.2.2 thorpej atop(istart + isize), atop(istart),
1072 1.30.2.2 thorpej atop(istart + isize), VM_FREELIST_ISADMA);
1073 1.30.2.2 thorpej npages += atop(istart + isize) - atop(istart);
1074 1.30.2.2 thorpej
1075 1.30.2.2 thorpej /*
1076 1.30.2.2 thorpej * Load the pieces that come before
1077 1.30.2.2 thorpej * the intersection into the default
1078 1.30.2.2 thorpej * free list.
1079 1.30.2.2 thorpej */
1080 1.30.2.2 thorpej if (start < istart) {
1081 1.30.2.2 thorpej #if 0
1082 1.30.2.2 thorpej printf(" BEFORE 0x%lx -> 0x%lx\n",
1083 1.30.2.2 thorpej start, istart - 1);
1084 1.30.2.2 thorpej #endif
1085 1.30.2.2 thorpej uvm_page_physload(atop(start),
1086 1.30.2.2 thorpej atop(istart), atop(start),
1087 1.30.2.2 thorpej atop(istart), VM_FREELIST_DEFAULT);
1088 1.30.2.2 thorpej npages += atop(istart) - atop(start);
1089 1.30.2.2 thorpej }
1090 1.30.2.2 thorpej
1091 1.30.2.2 thorpej /*
1092 1.30.2.2 thorpej * Load the pieces that come after
1093 1.30.2.2 thorpej * the intersection into the default
1094 1.30.2.2 thorpej * free list.
1095 1.30.2.2 thorpej */
1096 1.30.2.2 thorpej if ((istart + isize) < end) {
1097 1.30.2.2 thorpej #if 0
1098 1.30.2.2 thorpej printf(" AFTER 0x%lx -> 0x%lx\n",
1099 1.30.2.2 thorpej (istart + isize), end - 1);
1100 1.30.2.2 thorpej #endif
1101 1.30.2.2 thorpej uvm_page_physload(atop(istart + isize),
1102 1.30.2.2 thorpej atop(end), atop(istart + isize),
1103 1.30.2.2 thorpej atop(end), VM_FREELIST_DEFAULT);
1104 1.30.2.2 thorpej npages += atop(end) - atop(istart + isize);
1105 1.30.2.2 thorpej }
1106 1.30.2.2 thorpej } else {
1107 1.30.2.2 thorpej uvm_page_physload(atop(start), atop(end),
1108 1.30.2.2 thorpej atop(start), atop(end), VM_FREELIST_DEFAULT);
1109 1.30.2.2 thorpej npages += atop(end) - atop(start);
1110 1.30.2.2 thorpej }
1111 1.30.2.2 thorpej #else /* NISADMA > 0 */
1112 1.30.2.2 thorpej uvm_page_physload(atop(start), atop(end),
1113 1.30.2.2 thorpej atop(start), atop(end), VM_FREELIST_DEFAULT);
1114 1.30.2.2 thorpej npages += atop(end) - atop(start);
1115 1.30.2.2 thorpej #endif /* NISADMA > 0 */
1116 1.30.2.2 thorpej ++loop;
1117 1.30.2.2 thorpej }
1118 1.30.2.2 thorpej
1119 1.30.2.2 thorpej #ifdef MYCROFT_HACK
1120 1.30.2.2 thorpej printf("npages = %ld\n", npages);
1121 1.30.2.2 thorpej #endif
1122 1.30.2.2 thorpej
1123 1.30.2.2 thorpej virtual_start = KERNEL_VM_BASE;
1124 1.30.2.2 thorpej virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
1125 1.30.2.2 thorpej
1126 1.30.2.2 thorpej ALLOC_PAGE_HOOK(page_hook0, NBPG);
1127 1.30.2.2 thorpej ALLOC_PAGE_HOOK(page_hook1, NBPG);
1128 1.30.2.2 thorpej
1129 1.30.2.2 thorpej /*
1130 1.30.2.2 thorpej * The mem special device needs a virtual hook but we don't
1131 1.30.2.2 thorpej * need a pte
1132 1.30.2.2 thorpej */
1133 1.30.2.2 thorpej memhook = (char *)virtual_start;
1134 1.30.2.2 thorpej virtual_start += NBPG;
1135 1.30.2.2 thorpej
1136 1.30.2.2 thorpej msgbufaddr = (caddr_t)virtual_start;
1137 1.30.2.2 thorpej msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
1138 1.30.2.2 thorpej virtual_start += round_page(MSGBUFSIZE);
1139 1.30.2.2 thorpej
1140 1.30.2.2 thorpej /*
1141 1.30.2.2 thorpej * init the static-global locks and global lists.
1142 1.30.2.2 thorpej */
1143 1.30.2.2 thorpej spinlockinit(&pmap_main_lock, "pmaplk", 0);
1144 1.30.2.2 thorpej simple_lock_init(&pvalloc_lock);
1145 1.30.2.2 thorpej TAILQ_INIT(&pv_freepages);
1146 1.30.2.2 thorpej TAILQ_INIT(&pv_unusedpgs);
1147 1.30.2.2 thorpej
1148 1.30.2.2 thorpej /*
1149 1.30.2.2 thorpej * compute the number of pages we have and then allocate RAM
1150 1.30.2.2 thorpej * for each pages' pv_head and saved attributes.
1151 1.30.2.2 thorpej */
1152 1.30.2.2 thorpej {
1153 1.30.2.2 thorpej int npages, lcv;
1154 1.30.2.2 thorpej vsize_t s;
1155 1.30.2.2 thorpej
1156 1.30.2.2 thorpej npages = 0;
1157 1.30.2.2 thorpej for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
1158 1.30.2.2 thorpej npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
1159 1.30.2.2 thorpej s = (vsize_t) (sizeof(struct pv_head) * npages +
1160 1.30.2.2 thorpej sizeof(char) * npages);
1161 1.30.2.2 thorpej s = round_page(s); /* round up */
1162 1.30.2.2 thorpej boot_head = (char *)uvm_pageboot_alloc(s);
1163 1.30.2.2 thorpej bzero((char *)boot_head, s);
1164 1.30.2.2 thorpej if (boot_head == 0)
1165 1.30.2.2 thorpej panic("pmap_init: unable to allocate pv_heads");
1166 1.30.2.2 thorpej }
1167 1.30.2.2 thorpej
1168 1.30.2.2 thorpej /*
1169 1.30.2.2 thorpej * initialize the pmap pool.
1170 1.30.2.2 thorpej */
1171 1.30.2.2 thorpej
1172 1.30.2.2 thorpej pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1173 1.30.2.2 thorpej 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
1174 1.30.2.2 thorpej
1175 1.30.2.5 nathanw cpu_dcache_wbinv_all();
1176 1.30.2.2 thorpej }
1177 1.30.2.2 thorpej
1178 1.30.2.2 thorpej /*
1179 1.30.2.2 thorpej * void pmap_init(void)
1180 1.30.2.2 thorpej *
1181 1.30.2.2 thorpej * Initialize the pmap module.
1182 1.30.2.2 thorpej * Called by vm_init() in vm/vm_init.c in order to initialise
1183 1.30.2.2 thorpej * any structures that the pmap system needs to map virtual memory.
1184 1.30.2.2 thorpej */
1185 1.30.2.2 thorpej
1186 1.30.2.2 thorpej extern int physmem;
1187 1.30.2.2 thorpej
1188 1.30.2.2 thorpej void
1189 1.30.2.2 thorpej pmap_init()
1190 1.30.2.2 thorpej {
1191 1.30.2.2 thorpej int lcv, i;
1192 1.30.2.2 thorpej
1193 1.30.2.2 thorpej #ifdef MYCROFT_HACK
1194 1.30.2.2 thorpej printf("physmem = %d\n", physmem);
1195 1.30.2.2 thorpej #endif
1196 1.30.2.2 thorpej
1197 1.30.2.2 thorpej /*
1198 1.30.2.2 thorpej * Set the available memory vars - These do not map to real memory
1199 1.30.2.2 thorpej * addresses and cannot as the physical memory is fragmented.
1200 1.30.2.2 thorpej * They are used by ps for %mem calculations.
1201 1.30.2.2 thorpej * One could argue whether this should be the entire memory or just
1202 1.30.2.2 thorpej * the memory that is useable in a user process.
1203 1.30.2.2 thorpej */
1204 1.30.2.2 thorpej avail_start = 0;
1205 1.30.2.2 thorpej avail_end = physmem * NBPG;
1206 1.30.2.2 thorpej
1207 1.30.2.2 thorpej /* allocate pv_head stuff first */
1208 1.30.2.2 thorpej for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
1209 1.30.2.2 thorpej vm_physmem[lcv].pmseg.pvhead = (struct pv_head *)boot_head;
1210 1.30.2.2 thorpej boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.pvhead +
1211 1.30.2.2 thorpej (vm_physmem[lcv].end - vm_physmem[lcv].start));
1212 1.30.2.2 thorpej for (i = 0;
1213 1.30.2.2 thorpej i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) {
1214 1.30.2.2 thorpej simple_lock_init(
1215 1.30.2.2 thorpej &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock);
1216 1.30.2.2 thorpej }
1217 1.30.2.2 thorpej }
1218 1.30.2.2 thorpej
1219 1.30.2.2 thorpej /* now allocate attrs */
1220 1.30.2.2 thorpej for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
1221 1.30.2.2 thorpej vm_physmem[lcv].pmseg.attrs = (char *) boot_head;
1222 1.30.2.2 thorpej boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.attrs +
1223 1.30.2.2 thorpej (vm_physmem[lcv].end - vm_physmem[lcv].start));
1224 1.30.2.2 thorpej }
1225 1.30.2.2 thorpej
1226 1.30.2.2 thorpej /*
1227 1.30.2.2 thorpej * now we need to free enough pv_entry structures to allow us to get
1228 1.30.2.2 thorpej * the kmem_map/kmem_object allocated and inited (done after this
1229 1.30.2.2 thorpej * function is finished). to do this we allocate one bootstrap page out
1230 1.30.2.2 thorpej * of kernel_map and use it to provide an initial pool of pv_entry
1231 1.30.2.2 thorpej * structures. we never free this page.
1232 1.30.2.2 thorpej */
1233 1.30.2.2 thorpej
1234 1.30.2.2 thorpej pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1235 1.30.2.2 thorpej if (pv_initpage == NULL)
1236 1.30.2.2 thorpej panic("pmap_init: pv_initpage");
1237 1.30.2.2 thorpej pv_cachedva = 0; /* a VA we have allocated but not used yet */
1238 1.30.2.2 thorpej pv_nfpvents = 0;
1239 1.30.2.2 thorpej (void) pmap_add_pvpage(pv_initpage, FALSE);
1240 1.30.2.2 thorpej
1241 1.30.2.2 thorpej #ifdef MYCROFT_HACK
1242 1.30.2.2 thorpej for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
1243 1.30.2.2 thorpej printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
1244 1.30.2.2 thorpej lcv,
1245 1.30.2.2 thorpej vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
1246 1.30.2.2 thorpej vm_physmem[lcv].start, vm_physmem[lcv].end);
1247 1.30.2.2 thorpej }
1248 1.30.2.2 thorpej #endif
1249 1.30.2.2 thorpej pmap_initialized = TRUE;
1250 1.30.2.2 thorpej
1251 1.30.2.2 thorpej /* Initialise our L1 page table queues and counters */
1252 1.30.2.2 thorpej SIMPLEQ_INIT(&l1pt_static_queue);
1253 1.30.2.2 thorpej l1pt_static_queue_count = 0;
1254 1.30.2.2 thorpej l1pt_static_create_count = 0;
1255 1.30.2.2 thorpej SIMPLEQ_INIT(&l1pt_queue);
1256 1.30.2.2 thorpej l1pt_queue_count = 0;
1257 1.30.2.2 thorpej l1pt_create_count = 0;
1258 1.30.2.2 thorpej l1pt_reuse_count = 0;
1259 1.30.2.2 thorpej }
1260 1.30.2.2 thorpej
1261 1.30.2.2 thorpej /*
1262 1.30.2.2 thorpej * pmap_postinit()
1263 1.30.2.2 thorpej *
1264 1.30.2.2 thorpej * This routine is called after the vm and kmem subsystems have been
1265 1.30.2.2 thorpej * initialised. This allows the pmap code to perform any initialisation
1266 1.30.2.2 thorpej * that can only be done one the memory allocation is in place.
1267 1.30.2.2 thorpej */
1268 1.30.2.2 thorpej
1269 1.30.2.2 thorpej void
1270 1.30.2.2 thorpej pmap_postinit()
1271 1.30.2.2 thorpej {
1272 1.30.2.2 thorpej int loop;
1273 1.30.2.2 thorpej struct l1pt *pt;
1274 1.30.2.2 thorpej
1275 1.30.2.2 thorpej #ifdef PMAP_STATIC_L1S
1276 1.30.2.2 thorpej for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1277 1.30.2.2 thorpej #else /* PMAP_STATIC_L1S */
1278 1.30.2.2 thorpej for (loop = 0; loop < max_processes; ++loop) {
1279 1.30.2.2 thorpej #endif /* PMAP_STATIC_L1S */
1280 1.30.2.2 thorpej /* Allocate a L1 page table */
1281 1.30.2.2 thorpej pt = pmap_alloc_l1pt();
1282 1.30.2.2 thorpej if (!pt)
1283 1.30.2.2 thorpej panic("Cannot allocate static L1 page tables\n");
1284 1.30.2.2 thorpej
1285 1.30.2.2 thorpej /* Clean it */
1286 1.30.2.2 thorpej bzero((void *)pt->pt_va, PD_SIZE);
1287 1.30.2.2 thorpej pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1288 1.30.2.2 thorpej /* Add the page table to the queue */
1289 1.30.2.2 thorpej SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1290 1.30.2.2 thorpej ++l1pt_static_queue_count;
1291 1.30.2.2 thorpej ++l1pt_static_create_count;
1292 1.30.2.2 thorpej }
1293 1.30.2.2 thorpej }
1294 1.30.2.2 thorpej
1295 1.30.2.2 thorpej
1296 1.30.2.2 thorpej /*
1297 1.30.2.2 thorpej * Create and return a physical map.
1298 1.30.2.2 thorpej *
1299 1.30.2.2 thorpej * If the size specified for the map is zero, the map is an actual physical
1300 1.30.2.2 thorpej * map, and may be referenced by the hardware.
1301 1.30.2.2 thorpej *
1302 1.30.2.2 thorpej * If the size specified is non-zero, the map will be used in software only,
1303 1.30.2.2 thorpej * and is bounded by that size.
1304 1.30.2.2 thorpej */
1305 1.30.2.2 thorpej
1306 1.30.2.2 thorpej pmap_t
1307 1.30.2.2 thorpej pmap_create()
1308 1.30.2.2 thorpej {
1309 1.30.2.2 thorpej struct pmap *pmap;
1310 1.30.2.2 thorpej
1311 1.30.2.2 thorpej /*
1312 1.30.2.2 thorpej * Fetch pmap entry from the pool
1313 1.30.2.2 thorpej */
1314 1.30.2.2 thorpej
1315 1.30.2.2 thorpej pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1316 1.30.2.2 thorpej /* XXX is this really needed! */
1317 1.30.2.2 thorpej memset(pmap, 0, sizeof(*pmap));
1318 1.30.2.2 thorpej
1319 1.30.2.2 thorpej simple_lock_init(&pmap->pm_obj.vmobjlock);
1320 1.30.2.2 thorpej pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
1321 1.30.2.2 thorpej TAILQ_INIT(&pmap->pm_obj.memq);
1322 1.30.2.2 thorpej pmap->pm_obj.uo_npages = 0;
1323 1.30.2.2 thorpej pmap->pm_obj.uo_refs = 1;
1324 1.30.2.2 thorpej pmap->pm_stats.wired_count = 0;
1325 1.30.2.2 thorpej pmap->pm_stats.resident_count = 1;
1326 1.30.2.2 thorpej
1327 1.30.2.2 thorpej /* Now init the machine part of the pmap */
1328 1.30.2.2 thorpej pmap_pinit(pmap);
1329 1.30.2.2 thorpej return(pmap);
1330 1.30.2.2 thorpej }
1331 1.30.2.2 thorpej
1332 1.30.2.2 thorpej /*
1333 1.30.2.2 thorpej * pmap_alloc_l1pt()
1334 1.30.2.2 thorpej *
1335 1.30.2.2 thorpej * This routine allocates physical and virtual memory for a L1 page table
1336 1.30.2.2 thorpej * and wires it.
1337 1.30.2.2 thorpej * A l1pt structure is returned to describe the allocated page table.
1338 1.30.2.2 thorpej *
1339 1.30.2.2 thorpej * This routine is allowed to fail if the required memory cannot be allocated.
1340 1.30.2.2 thorpej * In this case NULL is returned.
1341 1.30.2.2 thorpej */
1342 1.30.2.2 thorpej
1343 1.30.2.2 thorpej struct l1pt *
1344 1.30.2.2 thorpej pmap_alloc_l1pt(void)
1345 1.30.2.2 thorpej {
1346 1.30.2.2 thorpej paddr_t pa;
1347 1.30.2.2 thorpej vaddr_t va;
1348 1.30.2.2 thorpej struct l1pt *pt;
1349 1.30.2.2 thorpej int error;
1350 1.30.2.2 thorpej struct vm_page *m;
1351 1.30.2.2 thorpej pt_entry_t *ptes;
1352 1.30.2.2 thorpej
1353 1.30.2.2 thorpej /* Allocate virtual address space for the L1 page table */
1354 1.30.2.2 thorpej va = uvm_km_valloc(kernel_map, PD_SIZE);
1355 1.30.2.2 thorpej if (va == 0) {
1356 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1357 1.30.2.2 thorpej PDEBUG(0,
1358 1.30.2.2 thorpej printf("pmap: Cannot allocate pageable memory for L1\n"));
1359 1.30.2.2 thorpej #endif /* DIAGNOSTIC */
1360 1.30.2.2 thorpej return(NULL);
1361 1.30.2.2 thorpej }
1362 1.30.2.2 thorpej
1363 1.30.2.2 thorpej /* Allocate memory for the l1pt structure */
1364 1.30.2.2 thorpej pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1365 1.30.2.2 thorpej
1366 1.30.2.2 thorpej /*
1367 1.30.2.2 thorpej * Allocate pages from the VM system.
1368 1.30.2.2 thorpej */
1369 1.30.2.2 thorpej TAILQ_INIT(&pt->pt_plist);
1370 1.30.2.2 thorpej error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
1371 1.30.2.2 thorpej PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1372 1.30.2.2 thorpej if (error) {
1373 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1374 1.30.2.2 thorpej PDEBUG(0,
1375 1.30.2.2 thorpej printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1376 1.30.2.2 thorpej error));
1377 1.30.2.2 thorpej #endif /* DIAGNOSTIC */
1378 1.30.2.2 thorpej /* Release the resources we already have claimed */
1379 1.30.2.2 thorpej free(pt, M_VMPMAP);
1380 1.30.2.2 thorpej uvm_km_free(kernel_map, va, PD_SIZE);
1381 1.30.2.2 thorpej return(NULL);
1382 1.30.2.2 thorpej }
1383 1.30.2.2 thorpej
1384 1.30.2.2 thorpej /* Map our physical pages into our virtual space */
1385 1.30.2.2 thorpej pt->pt_va = va;
1386 1.30.2.2 thorpej m = pt->pt_plist.tqh_first;
1387 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap_kernel());
1388 1.30.2.2 thorpej while (m && va < (pt->pt_va + PD_SIZE)) {
1389 1.30.2.2 thorpej pa = VM_PAGE_TO_PHYS(m);
1390 1.30.2.2 thorpej
1391 1.30.2.2 thorpej pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
1392 1.30.2.2 thorpej
1393 1.30.2.2 thorpej /* Revoke cacheability and bufferability */
1394 1.30.2.2 thorpej /* XXX should be done better than this */
1395 1.30.2.2 thorpej ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
1396 1.30.2.2 thorpej
1397 1.30.2.2 thorpej va += NBPG;
1398 1.30.2.2 thorpej m = m->pageq.tqe_next;
1399 1.30.2.2 thorpej }
1400 1.30.2.2 thorpej pmap_unmap_ptes(pmap_kernel());
1401 1.30.2.2 thorpej pmap_update(pmap_kernel());
1402 1.30.2.2 thorpej
1403 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1404 1.30.2.2 thorpej if (m)
1405 1.30.2.2 thorpej panic("pmap_alloc_l1pt: pglist not empty\n");
1406 1.30.2.2 thorpej #endif /* DIAGNOSTIC */
1407 1.30.2.2 thorpej
1408 1.30.2.2 thorpej pt->pt_flags = 0;
1409 1.30.2.2 thorpej return(pt);
1410 1.30.2.2 thorpej }
1411 1.30.2.2 thorpej
1412 1.30.2.2 thorpej /*
1413 1.30.2.2 thorpej * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1414 1.30.2.2 thorpej */
1415 1.30.2.4 nathanw static void
1416 1.30.2.2 thorpej pmap_free_l1pt(pt)
1417 1.30.2.2 thorpej struct l1pt *pt;
1418 1.30.2.2 thorpej {
1419 1.30.2.2 thorpej /* Separate the physical memory for the virtual space */
1420 1.30.2.2 thorpej pmap_kremove(pt->pt_va, PD_SIZE);
1421 1.30.2.2 thorpej pmap_update(pmap_kernel());
1422 1.30.2.2 thorpej
1423 1.30.2.2 thorpej /* Return the physical memory */
1424 1.30.2.2 thorpej uvm_pglistfree(&pt->pt_plist);
1425 1.30.2.2 thorpej
1426 1.30.2.2 thorpej /* Free the virtual space */
1427 1.30.2.2 thorpej uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1428 1.30.2.2 thorpej
1429 1.30.2.2 thorpej /* Free the l1pt structure */
1430 1.30.2.2 thorpej free(pt, M_VMPMAP);
1431 1.30.2.2 thorpej }
1432 1.30.2.2 thorpej
1433 1.30.2.2 thorpej /*
1434 1.30.2.2 thorpej * Allocate a page directory.
1435 1.30.2.2 thorpej * This routine will either allocate a new page directory from the pool
1436 1.30.2.2 thorpej * of L1 page tables currently held by the kernel or it will allocate
1437 1.30.2.2 thorpej * a new one via pmap_alloc_l1pt().
1438 1.30.2.2 thorpej * It will then initialise the l1 page table for use.
1439 1.30.2.2 thorpej */
1440 1.30.2.4 nathanw static int
1441 1.30.2.2 thorpej pmap_allocpagedir(pmap)
1442 1.30.2.2 thorpej struct pmap *pmap;
1443 1.30.2.2 thorpej {
1444 1.30.2.2 thorpej paddr_t pa;
1445 1.30.2.2 thorpej struct l1pt *pt;
1446 1.30.2.2 thorpej pt_entry_t *pte;
1447 1.30.2.2 thorpej
1448 1.30.2.2 thorpej PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1449 1.30.2.2 thorpej
1450 1.30.2.2 thorpej /* Do we have any spare L1's lying around ? */
1451 1.30.2.2 thorpej if (l1pt_static_queue_count) {
1452 1.30.2.2 thorpej --l1pt_static_queue_count;
1453 1.30.2.2 thorpej pt = l1pt_static_queue.sqh_first;
1454 1.30.2.2 thorpej SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1455 1.30.2.2 thorpej } else if (l1pt_queue_count) {
1456 1.30.2.2 thorpej --l1pt_queue_count;
1457 1.30.2.2 thorpej pt = l1pt_queue.sqh_first;
1458 1.30.2.2 thorpej SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1459 1.30.2.2 thorpej ++l1pt_reuse_count;
1460 1.30.2.2 thorpej } else {
1461 1.30.2.2 thorpej pt = pmap_alloc_l1pt();
1462 1.30.2.2 thorpej if (!pt)
1463 1.30.2.2 thorpej return(ENOMEM);
1464 1.30.2.2 thorpej ++l1pt_create_count;
1465 1.30.2.2 thorpej }
1466 1.30.2.2 thorpej
1467 1.30.2.2 thorpej /* Store the pointer to the l1 descriptor in the pmap. */
1468 1.30.2.2 thorpej pmap->pm_l1pt = pt;
1469 1.30.2.2 thorpej
1470 1.30.2.2 thorpej /* Get the physical address of the start of the l1 */
1471 1.30.2.2 thorpej pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1472 1.30.2.2 thorpej
1473 1.30.2.2 thorpej /* Store the virtual address of the l1 in the pmap. */
1474 1.30.2.2 thorpej pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1475 1.30.2.2 thorpej
1476 1.30.2.2 thorpej /* Clean the L1 if it is dirty */
1477 1.30.2.2 thorpej if (!(pt->pt_flags & PTFLAG_CLEAN))
1478 1.30.2.2 thorpej bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1479 1.30.2.2 thorpej
1480 1.30.2.2 thorpej /* Do we already have the kernel mappings ? */
1481 1.30.2.2 thorpej if (!(pt->pt_flags & PTFLAG_KPT)) {
1482 1.30.2.2 thorpej /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1483 1.30.2.2 thorpej
1484 1.30.2.2 thorpej bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1485 1.30.2.2 thorpej (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1486 1.30.2.2 thorpej KERNEL_PD_SIZE);
1487 1.30.2.2 thorpej pt->pt_flags |= PTFLAG_KPT;
1488 1.30.2.2 thorpej }
1489 1.30.2.2 thorpej
1490 1.30.2.2 thorpej /* Allocate a page table to map all the page tables for this pmap */
1491 1.30.2.2 thorpej
1492 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1493 1.30.2.2 thorpej if (pmap->pm_vptpt) {
1494 1.30.2.2 thorpej /* XXX What if we have one already ? */
1495 1.30.2.2 thorpej panic("pmap_allocpagedir: have pt already\n");
1496 1.30.2.2 thorpej }
1497 1.30.2.2 thorpej #endif /* DIAGNOSTIC */
1498 1.30.2.2 thorpej pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1499 1.30.2.2 thorpej if (pmap->pm_vptpt == 0) {
1500 1.30.2.2 thorpej pmap_freepagedir(pmap);
1501 1.30.2.2 thorpej return(ENOMEM);
1502 1.30.2.2 thorpej }
1503 1.30.2.2 thorpej
1504 1.30.2.2 thorpej (void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
1505 1.30.2.2 thorpej pmap->pm_pptpt &= PG_FRAME;
1506 1.30.2.2 thorpej /* Revoke cacheability and bufferability */
1507 1.30.2.2 thorpej /* XXX should be done better than this */
1508 1.30.2.2 thorpej pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
1509 1.30.2.2 thorpej *pte = *pte & ~(PT_C | PT_B);
1510 1.30.2.2 thorpej
1511 1.30.2.2 thorpej /* Wire in this page table */
1512 1.30.2.2 thorpej pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
1513 1.30.2.2 thorpej
1514 1.30.2.2 thorpej pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1515 1.30.2.2 thorpej
1516 1.30.2.2 thorpej /*
1517 1.30.2.2 thorpej * Map the kernel page tables for 0xf0000000 +
1518 1.30.2.2 thorpej * into the page table used to map the
1519 1.30.2.2 thorpej * pmap's page tables
1520 1.30.2.2 thorpej */
1521 1.30.2.2 thorpej bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1522 1.30.2.2 thorpej + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1523 1.30.2.2 thorpej + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1524 1.30.2.2 thorpej (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1525 1.30.2.2 thorpej (KERNEL_PD_SIZE >> 2));
1526 1.30.2.2 thorpej
1527 1.30.2.2 thorpej return(0);
1528 1.30.2.2 thorpej }
1529 1.30.2.2 thorpej
1530 1.30.2.2 thorpej
1531 1.30.2.2 thorpej /*
1532 1.30.2.2 thorpej * Initialize a preallocated and zeroed pmap structure,
1533 1.30.2.2 thorpej * such as one in a vmspace structure.
1534 1.30.2.2 thorpej */
1535 1.30.2.2 thorpej
1536 1.30.2.2 thorpej void
1537 1.30.2.2 thorpej pmap_pinit(pmap)
1538 1.30.2.2 thorpej struct pmap *pmap;
1539 1.30.2.2 thorpej {
1540 1.30.2.2 thorpej int backoff = 6;
1541 1.30.2.2 thorpej int retry = 10;
1542 1.30.2.2 thorpej
1543 1.30.2.2 thorpej PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1544 1.30.2.2 thorpej
1545 1.30.2.2 thorpej /* Keep looping until we succeed in allocating a page directory */
1546 1.30.2.2 thorpej while (pmap_allocpagedir(pmap) != 0) {
1547 1.30.2.2 thorpej /*
1548 1.30.2.2 thorpej * Ok we failed to allocate a suitable block of memory for an
1549 1.30.2.2 thorpej * L1 page table. This means that either:
1550 1.30.2.2 thorpej * 1. 16KB of virtual address space could not be allocated
1551 1.30.2.2 thorpej * 2. 16KB of physically contiguous memory on a 16KB boundary
1552 1.30.2.2 thorpej * could not be allocated.
1553 1.30.2.2 thorpej *
1554 1.30.2.2 thorpej * Since we cannot fail we will sleep for a while and try
1555 1.30.2.2 thorpej * again.
1556 1.30.2.2 thorpej *
1557 1.30.2.2 thorpej * Searching for a suitable L1 PT is expensive:
1558 1.30.2.2 thorpej * to avoid hogging the system when memory is really
1559 1.30.2.2 thorpej * scarce, use an exponential back-off so that
1560 1.30.2.2 thorpej * eventually we won't retry more than once every 8
1561 1.30.2.2 thorpej * seconds. This should allow other processes to run
1562 1.30.2.2 thorpej * to completion and free up resources.
1563 1.30.2.2 thorpej */
1564 1.30.2.2 thorpej (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1565 1.30.2.2 thorpej NULL);
1566 1.30.2.2 thorpej if (--retry == 0) {
1567 1.30.2.2 thorpej retry = 10;
1568 1.30.2.2 thorpej if (backoff)
1569 1.30.2.2 thorpej --backoff;
1570 1.30.2.2 thorpej }
1571 1.30.2.2 thorpej }
1572 1.30.2.2 thorpej
1573 1.30.2.2 thorpej /* Map zero page for the pmap. This will also map the L2 for it */
1574 1.30.2.2 thorpej pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1575 1.30.2.2 thorpej VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1576 1.30.2.2 thorpej pmap_update(pmap);
1577 1.30.2.2 thorpej }
1578 1.30.2.2 thorpej
1579 1.30.2.2 thorpej
1580 1.30.2.2 thorpej void
1581 1.30.2.2 thorpej pmap_freepagedir(pmap)
1582 1.30.2.2 thorpej struct pmap *pmap;
1583 1.30.2.2 thorpej {
1584 1.30.2.2 thorpej /* Free the memory used for the page table mapping */
1585 1.30.2.2 thorpej if (pmap->pm_vptpt != 0)
1586 1.30.2.2 thorpej uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1587 1.30.2.2 thorpej
1588 1.30.2.2 thorpej /* junk the L1 page table */
1589 1.30.2.2 thorpej if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1590 1.30.2.2 thorpej /* Add the page table to the queue */
1591 1.30.2.2 thorpej SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1592 1.30.2.2 thorpej ++l1pt_static_queue_count;
1593 1.30.2.2 thorpej } else if (l1pt_queue_count < 8) {
1594 1.30.2.2 thorpej /* Add the page table to the queue */
1595 1.30.2.2 thorpej SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1596 1.30.2.2 thorpej ++l1pt_queue_count;
1597 1.30.2.2 thorpej } else
1598 1.30.2.2 thorpej pmap_free_l1pt(pmap->pm_l1pt);
1599 1.30.2.2 thorpej }
1600 1.30.2.2 thorpej
1601 1.30.2.2 thorpej
1602 1.30.2.2 thorpej /*
1603 1.30.2.2 thorpej * Retire the given physical map from service.
1604 1.30.2.2 thorpej * Should only be called if the map contains no valid mappings.
1605 1.30.2.2 thorpej */
1606 1.30.2.2 thorpej
1607 1.30.2.2 thorpej void
1608 1.30.2.2 thorpej pmap_destroy(pmap)
1609 1.30.2.2 thorpej struct pmap *pmap;
1610 1.30.2.2 thorpej {
1611 1.30.2.2 thorpej struct vm_page *page;
1612 1.30.2.2 thorpej int count;
1613 1.30.2.2 thorpej
1614 1.30.2.2 thorpej if (pmap == NULL)
1615 1.30.2.2 thorpej return;
1616 1.30.2.2 thorpej
1617 1.30.2.2 thorpej PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1618 1.30.2.2 thorpej
1619 1.30.2.2 thorpej /*
1620 1.30.2.2 thorpej * Drop reference count
1621 1.30.2.2 thorpej */
1622 1.30.2.2 thorpej simple_lock(&pmap->pm_obj.vmobjlock);
1623 1.30.2.2 thorpej count = --pmap->pm_obj.uo_refs;
1624 1.30.2.2 thorpej simple_unlock(&pmap->pm_obj.vmobjlock);
1625 1.30.2.2 thorpej if (count > 0) {
1626 1.30.2.2 thorpej return;
1627 1.30.2.2 thorpej }
1628 1.30.2.2 thorpej
1629 1.30.2.2 thorpej /*
1630 1.30.2.2 thorpej * reference count is zero, free pmap resources and then free pmap.
1631 1.30.2.2 thorpej */
1632 1.30.2.2 thorpej
1633 1.30.2.2 thorpej /* Remove the zero page mapping */
1634 1.30.2.2 thorpej pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1635 1.30.2.2 thorpej pmap_update(pmap);
1636 1.30.2.2 thorpej
1637 1.30.2.2 thorpej /*
1638 1.30.2.2 thorpej * Free any page tables still mapped
1639 1.30.2.2 thorpej * This is only temporay until pmap_enter can count the number
1640 1.30.2.2 thorpej * of mappings made in a page table. Then pmap_remove() can
1641 1.30.2.2 thorpej * reduce the count and free the pagetable when the count
1642 1.30.2.2 thorpej * reaches zero. Note that entries in this list should match the
1643 1.30.2.2 thorpej * contents of the ptpt, however this is faster than walking a 1024
1644 1.30.2.2 thorpej * entries looking for pt's
1645 1.30.2.2 thorpej * taken from i386 pmap.c
1646 1.30.2.2 thorpej */
1647 1.30.2.2 thorpej while (pmap->pm_obj.memq.tqh_first != NULL) {
1648 1.30.2.2 thorpej page = pmap->pm_obj.memq.tqh_first;
1649 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1650 1.30.2.2 thorpej if (page->flags & PG_BUSY)
1651 1.30.2.2 thorpej panic("pmap_release: busy page table page");
1652 1.30.2.2 thorpej #endif
1653 1.30.2.2 thorpej /* pmap_page_protect? currently no need for it. */
1654 1.30.2.2 thorpej
1655 1.30.2.2 thorpej page->wire_count = 0;
1656 1.30.2.2 thorpej uvm_pagefree(page);
1657 1.30.2.2 thorpej }
1658 1.30.2.2 thorpej
1659 1.30.2.2 thorpej /* Free the page dir */
1660 1.30.2.2 thorpej pmap_freepagedir(pmap);
1661 1.30.2.2 thorpej
1662 1.30.2.2 thorpej /* return the pmap to the pool */
1663 1.30.2.2 thorpej pool_put(&pmap_pmap_pool, pmap);
1664 1.30.2.2 thorpej }
1665 1.30.2.2 thorpej
1666 1.30.2.2 thorpej
1667 1.30.2.2 thorpej /*
1668 1.30.2.2 thorpej * void pmap_reference(struct pmap *pmap)
1669 1.30.2.2 thorpej *
1670 1.30.2.2 thorpej * Add a reference to the specified pmap.
1671 1.30.2.2 thorpej */
1672 1.30.2.2 thorpej
1673 1.30.2.2 thorpej void
1674 1.30.2.2 thorpej pmap_reference(pmap)
1675 1.30.2.2 thorpej struct pmap *pmap;
1676 1.30.2.2 thorpej {
1677 1.30.2.2 thorpej if (pmap == NULL)
1678 1.30.2.2 thorpej return;
1679 1.30.2.2 thorpej
1680 1.30.2.2 thorpej simple_lock(&pmap->pm_lock);
1681 1.30.2.2 thorpej pmap->pm_obj.uo_refs++;
1682 1.30.2.2 thorpej simple_unlock(&pmap->pm_lock);
1683 1.30.2.2 thorpej }
1684 1.30.2.2 thorpej
1685 1.30.2.2 thorpej /*
1686 1.30.2.2 thorpej * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1687 1.30.2.2 thorpej *
1688 1.30.2.2 thorpej * Return the start and end addresses of the kernel's virtual space.
1689 1.30.2.2 thorpej * These values are setup in pmap_bootstrap and are updated as pages
1690 1.30.2.2 thorpej * are allocated.
1691 1.30.2.2 thorpej */
1692 1.30.2.2 thorpej
1693 1.30.2.2 thorpej void
1694 1.30.2.2 thorpej pmap_virtual_space(start, end)
1695 1.30.2.2 thorpej vaddr_t *start;
1696 1.30.2.2 thorpej vaddr_t *end;
1697 1.30.2.2 thorpej {
1698 1.30.2.2 thorpej *start = virtual_start;
1699 1.30.2.2 thorpej *end = virtual_end;
1700 1.30.2.2 thorpej }
1701 1.30.2.2 thorpej
1702 1.30.2.2 thorpej
1703 1.30.2.2 thorpej /*
1704 1.30.2.2 thorpej * Activate the address space for the specified process. If the process
1705 1.30.2.2 thorpej * is the current process, load the new MMU context.
1706 1.30.2.2 thorpej */
1707 1.30.2.2 thorpej void
1708 1.30.2.2 thorpej pmap_activate(l)
1709 1.30.2.2 thorpej struct lwp *l;
1710 1.30.2.2 thorpej {
1711 1.30.2.2 thorpej struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
1712 1.30.2.2 thorpej struct pcb *pcb = &l->l_addr->u_pcb;
1713 1.30.2.2 thorpej
1714 1.30.2.2 thorpej (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1715 1.30.2.2 thorpej (paddr_t *)&pcb->pcb_pagedir);
1716 1.30.2.2 thorpej
1717 1.30.2.2 thorpej PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1718 1.30.2.2 thorpej p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1719 1.30.2.2 thorpej
1720 1.30.2.2 thorpej if (l == curproc) {
1721 1.30.2.2 thorpej PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1722 1.30.2.2 thorpej setttb((u_int)pcb->pcb_pagedir);
1723 1.30.2.2 thorpej }
1724 1.30.2.2 thorpej #if 0
1725 1.30.2.2 thorpej pmap->pm_pdchanged = FALSE;
1726 1.30.2.2 thorpej #endif
1727 1.30.2.2 thorpej }
1728 1.30.2.2 thorpej
1729 1.30.2.2 thorpej
1730 1.30.2.2 thorpej /*
1731 1.30.2.2 thorpej * Deactivate the address space of the specified process.
1732 1.30.2.2 thorpej */
1733 1.30.2.2 thorpej void
1734 1.30.2.2 thorpej pmap_deactivate(l)
1735 1.30.2.2 thorpej struct lwp *l;
1736 1.30.2.2 thorpej {
1737 1.30.2.2 thorpej }
1738 1.30.2.2 thorpej
1739 1.30.2.3 nathanw /*
1740 1.30.2.3 nathanw * Perform any deferred pmap operations.
1741 1.30.2.3 nathanw */
1742 1.30.2.3 nathanw void
1743 1.30.2.3 nathanw pmap_update(struct pmap *pmap)
1744 1.30.2.3 nathanw {
1745 1.30.2.3 nathanw
1746 1.30.2.3 nathanw /*
1747 1.30.2.3 nathanw * We haven't deferred any pmap operations, but we do need to
1748 1.30.2.3 nathanw * make sure TLB/cache operations have completed.
1749 1.30.2.3 nathanw */
1750 1.30.2.3 nathanw cpu_cpwait();
1751 1.30.2.3 nathanw }
1752 1.30.2.2 thorpej
1753 1.30.2.2 thorpej /*
1754 1.30.2.2 thorpej * pmap_clean_page()
1755 1.30.2.2 thorpej *
1756 1.30.2.2 thorpej * This is a local function used to work out the best strategy to clean
1757 1.30.2.2 thorpej * a single page referenced by its entry in the PV table. It's used by
1758 1.30.2.2 thorpej * pmap_copy_page, pmap_zero page and maybe some others later on.
1759 1.30.2.2 thorpej *
1760 1.30.2.2 thorpej * Its policy is effectively:
1761 1.30.2.2 thorpej * o If there are no mappings, we don't bother doing anything with the cache.
1762 1.30.2.2 thorpej * o If there is one mapping, we clean just that page.
1763 1.30.2.2 thorpej * o If there are multiple mappings, we clean the entire cache.
1764 1.30.2.2 thorpej *
1765 1.30.2.2 thorpej * So that some functions can be further optimised, it returns 0 if it didn't
1766 1.30.2.2 thorpej * clean the entire cache, or 1 if it did.
1767 1.30.2.2 thorpej *
1768 1.30.2.2 thorpej * XXX One bug in this routine is that if the pv_entry has a single page
1769 1.30.2.2 thorpej * mapped at 0x00000000 a whole cache clean will be performed rather than
1770 1.30.2.2 thorpej * just the 1 page. Since this should not occur in everyday use and if it does
1771 1.30.2.2 thorpej * it will just result in not the most efficient clean for the page.
1772 1.30.2.2 thorpej */
1773 1.30.2.2 thorpej static int
1774 1.30.2.2 thorpej pmap_clean_page(pv, is_src)
1775 1.30.2.2 thorpej struct pv_entry *pv;
1776 1.30.2.2 thorpej boolean_t is_src;
1777 1.30.2.2 thorpej {
1778 1.30.2.2 thorpej struct pmap *pmap;
1779 1.30.2.2 thorpej struct pv_entry *npv;
1780 1.30.2.2 thorpej int cache_needs_cleaning = 0;
1781 1.30.2.2 thorpej vaddr_t page_to_clean = 0;
1782 1.30.2.2 thorpej
1783 1.30.2.2 thorpej if (pv == NULL)
1784 1.30.2.2 thorpej /* nothing mapped in so nothing to flush */
1785 1.30.2.2 thorpej return (0);
1786 1.30.2.2 thorpej
1787 1.30.2.2 thorpej /* Since we flush the cache each time we change curproc, we
1788 1.30.2.2 thorpej * only need to flush the page if it is in the current pmap.
1789 1.30.2.2 thorpej */
1790 1.30.2.2 thorpej if (curproc)
1791 1.30.2.2 thorpej pmap = curproc->l_proc->p_vmspace->vm_map.pmap;
1792 1.30.2.2 thorpej else
1793 1.30.2.2 thorpej pmap = pmap_kernel();
1794 1.30.2.2 thorpej
1795 1.30.2.2 thorpej for (npv = pv; npv; npv = npv->pv_next) {
1796 1.30.2.2 thorpej if (npv->pv_pmap == pmap) {
1797 1.30.2.2 thorpej /* The page is mapped non-cacheable in
1798 1.30.2.2 thorpej * this map. No need to flush the cache.
1799 1.30.2.2 thorpej */
1800 1.30.2.2 thorpej if (npv->pv_flags & PT_NC) {
1801 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1802 1.30.2.2 thorpej if (cache_needs_cleaning)
1803 1.30.2.2 thorpej panic("pmap_clean_page: "
1804 1.30.2.2 thorpej "cache inconsistency");
1805 1.30.2.2 thorpej #endif
1806 1.30.2.2 thorpej break;
1807 1.30.2.2 thorpej }
1808 1.30.2.2 thorpej #if 0
1809 1.30.2.2 thorpej /* This doesn't work, because pmap_protect
1810 1.30.2.2 thorpej doesn't flush changes on pages that it
1811 1.30.2.2 thorpej has write-protected. */
1812 1.30.2.2 thorpej
1813 1.30.2.2 thorpej /* If the page is not writable and this
1814 1.30.2.2 thorpej is the source, then there is no need
1815 1.30.2.2 thorpej to flush it from the cache. */
1816 1.30.2.2 thorpej else if (is_src && ! (npv->pv_flags & PT_Wr))
1817 1.30.2.2 thorpej continue;
1818 1.30.2.2 thorpej #endif
1819 1.30.2.2 thorpej if (cache_needs_cleaning){
1820 1.30.2.2 thorpej page_to_clean = 0;
1821 1.30.2.2 thorpej break;
1822 1.30.2.2 thorpej }
1823 1.30.2.2 thorpej else
1824 1.30.2.2 thorpej page_to_clean = npv->pv_va;
1825 1.30.2.2 thorpej cache_needs_cleaning = 1;
1826 1.30.2.2 thorpej }
1827 1.30.2.2 thorpej }
1828 1.30.2.2 thorpej
1829 1.30.2.2 thorpej if (page_to_clean)
1830 1.30.2.5 nathanw cpu_idcache_wbinv_range(page_to_clean, NBPG);
1831 1.30.2.2 thorpej else if (cache_needs_cleaning) {
1832 1.30.2.5 nathanw cpu_idcache_wbinv_all();
1833 1.30.2.2 thorpej return (1);
1834 1.30.2.2 thorpej }
1835 1.30.2.2 thorpej return (0);
1836 1.30.2.2 thorpej }
1837 1.30.2.2 thorpej
1838 1.30.2.2 thorpej /*
1839 1.30.2.2 thorpej * pmap_find_pv()
1840 1.30.2.2 thorpej *
1841 1.30.2.2 thorpej * This is a local function that finds a PV head for a given physical page.
1842 1.30.2.2 thorpej * This is a common op, and this function removes loads of ifdefs in the code.
1843 1.30.2.2 thorpej */
1844 1.30.2.2 thorpej static __inline struct pv_head *
1845 1.30.2.2 thorpej pmap_find_pvh(phys)
1846 1.30.2.2 thorpej paddr_t phys;
1847 1.30.2.2 thorpej {
1848 1.30.2.2 thorpej int bank, off;
1849 1.30.2.2 thorpej struct pv_head *pvh;
1850 1.30.2.2 thorpej
1851 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1852 1.30.2.2 thorpej panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1853 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
1854 1.30.2.2 thorpej return (pvh);
1855 1.30.2.2 thorpej }
1856 1.30.2.2 thorpej
1857 1.30.2.2 thorpej /*
1858 1.30.2.2 thorpej * pmap_zero_page()
1859 1.30.2.2 thorpej *
1860 1.30.2.2 thorpej * Zero a given physical page by mapping it at a page hook point.
1861 1.30.2.2 thorpej * In doing the zero page op, the page we zero is mapped cachable, as with
1862 1.30.2.2 thorpej * StrongARM accesses to non-cached pages are non-burst making writing
1863 1.30.2.2 thorpej * _any_ bulk data very slow.
1864 1.30.2.2 thorpej */
1865 1.30.2.2 thorpej void
1866 1.30.2.2 thorpej pmap_zero_page(phys)
1867 1.30.2.2 thorpej paddr_t phys;
1868 1.30.2.2 thorpej {
1869 1.30.2.2 thorpej struct pv_head *pvh;
1870 1.30.2.2 thorpej
1871 1.30.2.2 thorpej /* Get an entry for this page, and clean it it. */
1872 1.30.2.2 thorpej pvh = pmap_find_pvh(phys);
1873 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
1874 1.30.2.2 thorpej pmap_clean_page(pvh->pvh_list, FALSE);
1875 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
1876 1.30.2.2 thorpej
1877 1.30.2.2 thorpej /*
1878 1.30.2.2 thorpej * Hook in the page, zero it, and purge the cache for that
1879 1.30.2.2 thorpej * zeroed page. Invalidate the TLB as needed.
1880 1.30.2.2 thorpej */
1881 1.30.2.2 thorpej *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1882 1.30.2.2 thorpej cpu_tlb_flushD_SE(page_hook0.va);
1883 1.30.2.3 nathanw cpu_cpwait();
1884 1.30.2.2 thorpej bzero_page(page_hook0.va);
1885 1.30.2.5 nathanw cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1886 1.30.2.2 thorpej }
1887 1.30.2.2 thorpej
1888 1.30.2.2 thorpej /* pmap_pageidlezero()
1889 1.30.2.2 thorpej *
1890 1.30.2.2 thorpej * The same as above, except that we assume that the page is not
1891 1.30.2.2 thorpej * mapped. This means we never have to flush the cache first. Called
1892 1.30.2.2 thorpej * from the idle loop.
1893 1.30.2.2 thorpej */
1894 1.30.2.2 thorpej boolean_t
1895 1.30.2.2 thorpej pmap_pageidlezero(phys)
1896 1.30.2.2 thorpej paddr_t phys;
1897 1.30.2.2 thorpej {
1898 1.30.2.2 thorpej int i, *ptr;
1899 1.30.2.2 thorpej boolean_t rv = TRUE;
1900 1.30.2.2 thorpej
1901 1.30.2.2 thorpej #ifdef DIAGNOSTIC
1902 1.30.2.2 thorpej struct pv_head *pvh;
1903 1.30.2.2 thorpej
1904 1.30.2.2 thorpej pvh = pmap_find_pvh(phys);
1905 1.30.2.2 thorpej if (pvh->pvh_list != NULL)
1906 1.30.2.2 thorpej panic("pmap_pageidlezero: zeroing mapped page\n");
1907 1.30.2.2 thorpej #endif
1908 1.30.2.2 thorpej
1909 1.30.2.2 thorpej /*
1910 1.30.2.2 thorpej * Hook in the page, zero it, and purge the cache for that
1911 1.30.2.2 thorpej * zeroed page. Invalidate the TLB as needed.
1912 1.30.2.2 thorpej */
1913 1.30.2.2 thorpej *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1914 1.30.2.2 thorpej cpu_tlb_flushD_SE(page_hook0.va);
1915 1.30.2.3 nathanw cpu_cpwait();
1916 1.30.2.3 nathanw
1917 1.30.2.2 thorpej for (i = 0, ptr = (int *)page_hook0.va;
1918 1.30.2.2 thorpej i < (NBPG / sizeof(int)); i++) {
1919 1.30.2.2 thorpej if (sched_whichqs != 0) {
1920 1.30.2.2 thorpej /*
1921 1.30.2.2 thorpej * A process has become ready. Abort now,
1922 1.30.2.2 thorpej * so we don't keep it waiting while we
1923 1.30.2.2 thorpej * do slow memory access to finish this
1924 1.30.2.2 thorpej * page.
1925 1.30.2.2 thorpej */
1926 1.30.2.2 thorpej rv = FALSE;
1927 1.30.2.2 thorpej break;
1928 1.30.2.2 thorpej }
1929 1.30.2.2 thorpej *ptr++ = 0;
1930 1.30.2.2 thorpej }
1931 1.30.2.2 thorpej
1932 1.30.2.2 thorpej if (rv)
1933 1.30.2.2 thorpej /*
1934 1.30.2.2 thorpej * if we aborted we'll rezero this page again later so don't
1935 1.30.2.2 thorpej * purge it unless we finished it
1936 1.30.2.2 thorpej */
1937 1.30.2.5 nathanw cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1938 1.30.2.2 thorpej return (rv);
1939 1.30.2.2 thorpej }
1940 1.30.2.2 thorpej
1941 1.30.2.2 thorpej /*
1942 1.30.2.2 thorpej * pmap_copy_page()
1943 1.30.2.2 thorpej *
1944 1.30.2.2 thorpej * Copy one physical page into another, by mapping the pages into
1945 1.30.2.2 thorpej * hook points. The same comment regarding cachability as in
1946 1.30.2.2 thorpej * pmap_zero_page also applies here.
1947 1.30.2.2 thorpej */
1948 1.30.2.2 thorpej void
1949 1.30.2.2 thorpej pmap_copy_page(src, dest)
1950 1.30.2.2 thorpej paddr_t src;
1951 1.30.2.2 thorpej paddr_t dest;
1952 1.30.2.2 thorpej {
1953 1.30.2.2 thorpej struct pv_head *src_pvh, *dest_pvh;
1954 1.30.2.2 thorpej boolean_t cleanedcache;
1955 1.30.2.2 thorpej
1956 1.30.2.2 thorpej /* Get PV entries for the pages, and clean them if needed. */
1957 1.30.2.2 thorpej src_pvh = pmap_find_pvh(src);
1958 1.30.2.2 thorpej
1959 1.30.2.2 thorpej simple_lock(&src_pvh->pvh_lock);
1960 1.30.2.2 thorpej cleanedcache = pmap_clean_page(src_pvh->pvh_list, TRUE);
1961 1.30.2.2 thorpej simple_unlock(&src_pvh->pvh_lock);
1962 1.30.2.2 thorpej
1963 1.30.2.2 thorpej if (cleanedcache == 0) {
1964 1.30.2.2 thorpej dest_pvh = pmap_find_pvh(dest);
1965 1.30.2.2 thorpej simple_lock(&dest_pvh->pvh_lock);
1966 1.30.2.2 thorpej pmap_clean_page(dest_pvh->pvh_list, FALSE);
1967 1.30.2.2 thorpej simple_unlock(&dest_pvh->pvh_lock);
1968 1.30.2.2 thorpej }
1969 1.30.2.2 thorpej /*
1970 1.30.2.2 thorpej * Map the pages into the page hook points, copy them, and purge
1971 1.30.2.2 thorpej * the cache for the appropriate page. Invalidate the TLB
1972 1.30.2.2 thorpej * as required.
1973 1.30.2.2 thorpej */
1974 1.30.2.2 thorpej *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1975 1.30.2.2 thorpej *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1976 1.30.2.2 thorpej cpu_tlb_flushD_SE(page_hook0.va);
1977 1.30.2.2 thorpej cpu_tlb_flushD_SE(page_hook1.va);
1978 1.30.2.3 nathanw cpu_cpwait();
1979 1.30.2.2 thorpej bcopy_page(page_hook0.va, page_hook1.va);
1980 1.30.2.5 nathanw cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1981 1.30.2.5 nathanw cpu_dcache_wbinv_range(page_hook1.va, NBPG);
1982 1.30.2.2 thorpej }
1983 1.30.2.2 thorpej
1984 1.30.2.2 thorpej #if 0
1985 1.30.2.2 thorpej void
1986 1.30.2.2 thorpej pmap_pte_addref(pmap, va)
1987 1.30.2.2 thorpej struct pmap *pmap;
1988 1.30.2.2 thorpej vaddr_t va;
1989 1.30.2.2 thorpej {
1990 1.30.2.2 thorpej pd_entry_t *pde;
1991 1.30.2.2 thorpej paddr_t pa;
1992 1.30.2.2 thorpej struct vm_page *m;
1993 1.30.2.2 thorpej
1994 1.30.2.2 thorpej if (pmap == pmap_kernel())
1995 1.30.2.2 thorpej return;
1996 1.30.2.2 thorpej
1997 1.30.2.2 thorpej pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1998 1.30.2.2 thorpej pa = pmap_pte_pa(pde);
1999 1.30.2.2 thorpej m = PHYS_TO_VM_PAGE(pa);
2000 1.30.2.2 thorpej ++m->wire_count;
2001 1.30.2.2 thorpej #ifdef MYCROFT_HACK
2002 1.30.2.2 thorpej printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2003 1.30.2.2 thorpej pmap, va, pde, pa, m, m->wire_count);
2004 1.30.2.2 thorpej #endif
2005 1.30.2.2 thorpej }
2006 1.30.2.2 thorpej
2007 1.30.2.2 thorpej void
2008 1.30.2.2 thorpej pmap_pte_delref(pmap, va)
2009 1.30.2.2 thorpej struct pmap *pmap;
2010 1.30.2.2 thorpej vaddr_t va;
2011 1.30.2.2 thorpej {
2012 1.30.2.2 thorpej pd_entry_t *pde;
2013 1.30.2.2 thorpej paddr_t pa;
2014 1.30.2.2 thorpej struct vm_page *m;
2015 1.30.2.2 thorpej
2016 1.30.2.2 thorpej if (pmap == pmap_kernel())
2017 1.30.2.2 thorpej return;
2018 1.30.2.2 thorpej
2019 1.30.2.2 thorpej pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
2020 1.30.2.2 thorpej pa = pmap_pte_pa(pde);
2021 1.30.2.2 thorpej m = PHYS_TO_VM_PAGE(pa);
2022 1.30.2.2 thorpej --m->wire_count;
2023 1.30.2.2 thorpej #ifdef MYCROFT_HACK
2024 1.30.2.2 thorpej printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2025 1.30.2.2 thorpej pmap, va, pde, pa, m, m->wire_count);
2026 1.30.2.2 thorpej #endif
2027 1.30.2.2 thorpej if (m->wire_count == 0) {
2028 1.30.2.2 thorpej #ifdef MYCROFT_HACK
2029 1.30.2.2 thorpej printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
2030 1.30.2.2 thorpej pmap, va, pde, pa, m);
2031 1.30.2.2 thorpej #endif
2032 1.30.2.2 thorpej pmap_unmap_in_l1(pmap, va);
2033 1.30.2.2 thorpej uvm_pagefree(m);
2034 1.30.2.2 thorpej --pmap->pm_stats.resident_count;
2035 1.30.2.2 thorpej }
2036 1.30.2.2 thorpej }
2037 1.30.2.2 thorpej #else
2038 1.30.2.2 thorpej #define pmap_pte_addref(pmap, va)
2039 1.30.2.2 thorpej #define pmap_pte_delref(pmap, va)
2040 1.30.2.2 thorpej #endif
2041 1.30.2.2 thorpej
2042 1.30.2.2 thorpej /*
2043 1.30.2.2 thorpej * Since we have a virtually indexed cache, we may need to inhibit caching if
2044 1.30.2.2 thorpej * there is more than one mapping and at least one of them is writable.
2045 1.30.2.2 thorpej * Since we purge the cache on every context switch, we only need to check for
2046 1.30.2.2 thorpej * other mappings within the same pmap, or kernel_pmap.
2047 1.30.2.2 thorpej * This function is also called when a page is unmapped, to possibly reenable
2048 1.30.2.2 thorpej * caching on any remaining mappings.
2049 1.30.2.2 thorpej *
2050 1.30.2.2 thorpej * The code implements the following logic, where:
2051 1.30.2.2 thorpej *
2052 1.30.2.2 thorpej * KW = # of kernel read/write pages
2053 1.30.2.2 thorpej * KR = # of kernel read only pages
2054 1.30.2.2 thorpej * UW = # of user read/write pages
2055 1.30.2.2 thorpej * UR = # of user read only pages
2056 1.30.2.2 thorpej * OW = # of user read/write pages in another pmap, then
2057 1.30.2.2 thorpej *
2058 1.30.2.2 thorpej * KC = kernel mapping is cacheable
2059 1.30.2.2 thorpej * UC = user mapping is cacheable
2060 1.30.2.2 thorpej *
2061 1.30.2.2 thorpej * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
2062 1.30.2.2 thorpej * +---------------------------------------------
2063 1.30.2.2 thorpej * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0
2064 1.30.2.2 thorpej * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
2065 1.30.2.2 thorpej * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0
2066 1.30.2.2 thorpej * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2067 1.30.2.2 thorpej * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2068 1.30.2.2 thorpej *
2069 1.30.2.2 thorpej * Note that the pmap must have it's ptes mapped in, and passed with ptes.
2070 1.30.2.2 thorpej */
2071 1.30.2.2 thorpej __inline static void
2072 1.30.2.2 thorpej pmap_vac_me_harder(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
2073 1.30.2.2 thorpej boolean_t clear_cache)
2074 1.30.2.2 thorpej {
2075 1.30.2.2 thorpej if (pmap == pmap_kernel())
2076 1.30.2.2 thorpej pmap_vac_me_kpmap(pmap, pvh, ptes, clear_cache);
2077 1.30.2.2 thorpej else
2078 1.30.2.2 thorpej pmap_vac_me_user(pmap, pvh, ptes, clear_cache);
2079 1.30.2.2 thorpej }
2080 1.30.2.2 thorpej
2081 1.30.2.2 thorpej static void
2082 1.30.2.2 thorpej pmap_vac_me_kpmap(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
2083 1.30.2.2 thorpej boolean_t clear_cache)
2084 1.30.2.2 thorpej {
2085 1.30.2.2 thorpej int user_entries = 0;
2086 1.30.2.2 thorpej int user_writable = 0;
2087 1.30.2.2 thorpej int user_cacheable = 0;
2088 1.30.2.2 thorpej int kernel_entries = 0;
2089 1.30.2.2 thorpej int kernel_writable = 0;
2090 1.30.2.2 thorpej int kernel_cacheable = 0;
2091 1.30.2.2 thorpej struct pv_entry *pv;
2092 1.30.2.2 thorpej struct pmap *last_pmap = pmap;
2093 1.30.2.2 thorpej
2094 1.30.2.2 thorpej #ifdef DIAGNOSTIC
2095 1.30.2.2 thorpej if (pmap != pmap_kernel())
2096 1.30.2.2 thorpej panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2097 1.30.2.2 thorpej #endif
2098 1.30.2.2 thorpej
2099 1.30.2.2 thorpej /*
2100 1.30.2.2 thorpej * Pass one, see if there are both kernel and user pmaps for
2101 1.30.2.2 thorpej * this page. Calculate whether there are user-writable or
2102 1.30.2.2 thorpej * kernel-writable pages.
2103 1.30.2.2 thorpej */
2104 1.30.2.2 thorpej for (pv = pvh->pvh_list; pv != NULL; pv = pv->pv_next) {
2105 1.30.2.2 thorpej if (pv->pv_pmap != pmap) {
2106 1.30.2.2 thorpej user_entries++;
2107 1.30.2.2 thorpej if (pv->pv_flags & PT_Wr)
2108 1.30.2.2 thorpej user_writable++;
2109 1.30.2.2 thorpej if ((pv->pv_flags & PT_NC) == 0)
2110 1.30.2.2 thorpej user_cacheable++;
2111 1.30.2.2 thorpej } else {
2112 1.30.2.2 thorpej kernel_entries++;
2113 1.30.2.2 thorpej if (pv->pv_flags & PT_Wr)
2114 1.30.2.2 thorpej kernel_writable++;
2115 1.30.2.2 thorpej if ((pv->pv_flags & PT_NC) == 0)
2116 1.30.2.2 thorpej kernel_cacheable++;
2117 1.30.2.2 thorpej }
2118 1.30.2.2 thorpej }
2119 1.30.2.2 thorpej
2120 1.30.2.2 thorpej /*
2121 1.30.2.2 thorpej * We know we have just been updating a kernel entry, so if
2122 1.30.2.2 thorpej * all user pages are already cacheable, then there is nothing
2123 1.30.2.2 thorpej * further to do.
2124 1.30.2.2 thorpej */
2125 1.30.2.2 thorpej if (kernel_entries == 0 &&
2126 1.30.2.2 thorpej user_cacheable == user_entries)
2127 1.30.2.2 thorpej return;
2128 1.30.2.2 thorpej
2129 1.30.2.2 thorpej if (user_entries) {
2130 1.30.2.2 thorpej /*
2131 1.30.2.2 thorpej * Scan over the list again, for each entry, if it
2132 1.30.2.2 thorpej * might not be set correctly, call pmap_vac_me_user
2133 1.30.2.2 thorpej * to recalculate the settings.
2134 1.30.2.2 thorpej */
2135 1.30.2.2 thorpej for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
2136 1.30.2.2 thorpej /*
2137 1.30.2.2 thorpej * We know kernel mappings will get set
2138 1.30.2.2 thorpej * correctly in other calls. We also know
2139 1.30.2.2 thorpej * that if the pmap is the same as last_pmap
2140 1.30.2.2 thorpej * then we've just handled this entry.
2141 1.30.2.2 thorpej */
2142 1.30.2.2 thorpej if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2143 1.30.2.2 thorpej continue;
2144 1.30.2.2 thorpej /*
2145 1.30.2.2 thorpej * If there are kernel entries and this page
2146 1.30.2.2 thorpej * is writable but non-cacheable, then we can
2147 1.30.2.2 thorpej * skip this entry also.
2148 1.30.2.2 thorpej */
2149 1.30.2.2 thorpej if (kernel_entries > 0 &&
2150 1.30.2.2 thorpej (pv->pv_flags & (PT_NC | PT_Wr)) ==
2151 1.30.2.2 thorpej (PT_NC | PT_Wr))
2152 1.30.2.2 thorpej continue;
2153 1.30.2.2 thorpej /*
2154 1.30.2.2 thorpej * Similarly if there are no kernel-writable
2155 1.30.2.2 thorpej * entries and the page is already
2156 1.30.2.2 thorpej * read-only/cacheable.
2157 1.30.2.2 thorpej */
2158 1.30.2.2 thorpej if (kernel_writable == 0 &&
2159 1.30.2.2 thorpej (pv->pv_flags & (PT_NC | PT_Wr)) == 0)
2160 1.30.2.2 thorpej continue;
2161 1.30.2.2 thorpej /*
2162 1.30.2.2 thorpej * For some of the remaining cases, we know
2163 1.30.2.2 thorpej * that we must recalculate, but for others we
2164 1.30.2.2 thorpej * can't tell if they are correct or not, so
2165 1.30.2.2 thorpej * we recalculate anyway.
2166 1.30.2.2 thorpej */
2167 1.30.2.2 thorpej pmap_unmap_ptes(last_pmap);
2168 1.30.2.2 thorpej last_pmap = pv->pv_pmap;
2169 1.30.2.2 thorpej ptes = pmap_map_ptes(last_pmap);
2170 1.30.2.2 thorpej pmap_vac_me_user(last_pmap, pvh, ptes,
2171 1.30.2.2 thorpej pmap_is_curpmap(last_pmap));
2172 1.30.2.2 thorpej }
2173 1.30.2.2 thorpej /* Restore the pte mapping that was passed to us. */
2174 1.30.2.2 thorpej if (last_pmap != pmap) {
2175 1.30.2.2 thorpej pmap_unmap_ptes(last_pmap);
2176 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap);
2177 1.30.2.2 thorpej }
2178 1.30.2.2 thorpej if (kernel_entries == 0)
2179 1.30.2.2 thorpej return;
2180 1.30.2.2 thorpej }
2181 1.30.2.2 thorpej
2182 1.30.2.2 thorpej pmap_vac_me_user(pmap, pvh, ptes, clear_cache);
2183 1.30.2.2 thorpej return;
2184 1.30.2.2 thorpej }
2185 1.30.2.2 thorpej
2186 1.30.2.2 thorpej static void
2187 1.30.2.2 thorpej pmap_vac_me_user(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
2188 1.30.2.2 thorpej boolean_t clear_cache)
2189 1.30.2.2 thorpej {
2190 1.30.2.2 thorpej struct pmap *kpmap = pmap_kernel();
2191 1.30.2.2 thorpej struct pv_entry *pv, *npv;
2192 1.30.2.2 thorpej int entries = 0;
2193 1.30.2.2 thorpej int writable = 0;
2194 1.30.2.2 thorpej int cacheable_entries = 0;
2195 1.30.2.2 thorpej int kern_cacheable = 0;
2196 1.30.2.2 thorpej int other_writable = 0;
2197 1.30.2.2 thorpej
2198 1.30.2.2 thorpej pv = pvh->pvh_list;
2199 1.30.2.2 thorpej KASSERT(ptes != NULL);
2200 1.30.2.2 thorpej
2201 1.30.2.2 thorpej /*
2202 1.30.2.2 thorpej * Count mappings and writable mappings in this pmap.
2203 1.30.2.2 thorpej * Include kernel mappings as part of our own.
2204 1.30.2.2 thorpej * Keep a pointer to the first one.
2205 1.30.2.2 thorpej */
2206 1.30.2.2 thorpej for (npv = pv; npv; npv = npv->pv_next) {
2207 1.30.2.2 thorpej /* Count mappings in the same pmap */
2208 1.30.2.2 thorpej if (pmap == npv->pv_pmap ||
2209 1.30.2.2 thorpej kpmap == npv->pv_pmap) {
2210 1.30.2.2 thorpej if (entries++ == 0)
2211 1.30.2.2 thorpej pv = npv;
2212 1.30.2.2 thorpej /* Cacheable mappings */
2213 1.30.2.2 thorpej if ((npv->pv_flags & PT_NC) == 0) {
2214 1.30.2.2 thorpej cacheable_entries++;
2215 1.30.2.2 thorpej if (kpmap == npv->pv_pmap)
2216 1.30.2.2 thorpej kern_cacheable++;
2217 1.30.2.2 thorpej }
2218 1.30.2.2 thorpej /* Writable mappings */
2219 1.30.2.2 thorpej if (npv->pv_flags & PT_Wr)
2220 1.30.2.2 thorpej ++writable;
2221 1.30.2.2 thorpej } else if (npv->pv_flags & PT_Wr)
2222 1.30.2.2 thorpej other_writable = 1;
2223 1.30.2.2 thorpej }
2224 1.30.2.2 thorpej
2225 1.30.2.2 thorpej PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
2226 1.30.2.2 thorpej "writable %d cacheable %d %s\n", pmap, entries, writable,
2227 1.30.2.2 thorpej cacheable_entries, clear_cache ? "clean" : "no clean"));
2228 1.30.2.2 thorpej
2229 1.30.2.2 thorpej /*
2230 1.30.2.2 thorpej * Enable or disable caching as necessary.
2231 1.30.2.2 thorpej * Note: the first entry might be part of the kernel pmap,
2232 1.30.2.2 thorpej * so we can't assume this is indicative of the state of the
2233 1.30.2.2 thorpej * other (maybe non-kpmap) entries.
2234 1.30.2.2 thorpej */
2235 1.30.2.2 thorpej if ((entries > 1 && writable) ||
2236 1.30.2.2 thorpej (entries > 0 && pmap == kpmap && other_writable)) {
2237 1.30.2.2 thorpej if (cacheable_entries == 0)
2238 1.30.2.2 thorpej return;
2239 1.30.2.2 thorpej for (npv = pv; npv; npv = npv->pv_next) {
2240 1.30.2.2 thorpej if ((pmap == npv->pv_pmap
2241 1.30.2.2 thorpej || kpmap == npv->pv_pmap) &&
2242 1.30.2.2 thorpej (npv->pv_flags & PT_NC) == 0) {
2243 1.30.2.2 thorpej ptes[arm_byte_to_page(npv->pv_va)] &=
2244 1.30.2.2 thorpej ~(PT_C | PT_B);
2245 1.30.2.2 thorpej npv->pv_flags |= PT_NC;
2246 1.30.2.2 thorpej /*
2247 1.30.2.2 thorpej * If this page needs flushing from the
2248 1.30.2.2 thorpej * cache, and we aren't going to do it
2249 1.30.2.2 thorpej * below, do it now.
2250 1.30.2.2 thorpej */
2251 1.30.2.2 thorpej if ((cacheable_entries < 4 &&
2252 1.30.2.2 thorpej (clear_cache || npv->pv_pmap == kpmap)) ||
2253 1.30.2.2 thorpej (npv->pv_pmap == kpmap &&
2254 1.30.2.2 thorpej !clear_cache && kern_cacheable < 4)) {
2255 1.30.2.5 nathanw cpu_idcache_wbinv_range(npv->pv_va,
2256 1.30.2.2 thorpej NBPG);
2257 1.30.2.2 thorpej cpu_tlb_flushID_SE(npv->pv_va);
2258 1.30.2.2 thorpej }
2259 1.30.2.2 thorpej }
2260 1.30.2.2 thorpej }
2261 1.30.2.2 thorpej if ((clear_cache && cacheable_entries >= 4) ||
2262 1.30.2.2 thorpej kern_cacheable >= 4) {
2263 1.30.2.5 nathanw cpu_idcache_wbinv_all();
2264 1.30.2.2 thorpej cpu_tlb_flushID();
2265 1.30.2.2 thorpej }
2266 1.30.2.3 nathanw cpu_cpwait();
2267 1.30.2.2 thorpej } else if (entries > 0) {
2268 1.30.2.2 thorpej /*
2269 1.30.2.2 thorpej * Turn cacheing back on for some pages. If it is a kernel
2270 1.30.2.2 thorpej * page, only do so if there are no other writable pages.
2271 1.30.2.2 thorpej */
2272 1.30.2.2 thorpej for (npv = pv; npv; npv = npv->pv_next) {
2273 1.30.2.2 thorpej if ((pmap == npv->pv_pmap ||
2274 1.30.2.2 thorpej (kpmap == npv->pv_pmap && other_writable == 0)) &&
2275 1.30.2.2 thorpej (npv->pv_flags & PT_NC)) {
2276 1.30.2.2 thorpej ptes[arm_byte_to_page(npv->pv_va)] |=
2277 1.30.2.2 thorpej pte_cache_mode;
2278 1.30.2.2 thorpej npv->pv_flags &= ~PT_NC;
2279 1.30.2.2 thorpej }
2280 1.30.2.2 thorpej }
2281 1.30.2.2 thorpej }
2282 1.30.2.2 thorpej }
2283 1.30.2.2 thorpej
2284 1.30.2.2 thorpej /*
2285 1.30.2.2 thorpej * pmap_remove()
2286 1.30.2.2 thorpej *
2287 1.30.2.2 thorpej * pmap_remove is responsible for nuking a number of mappings for a range
2288 1.30.2.2 thorpej * of virtual address space in the current pmap. To do this efficiently
2289 1.30.2.2 thorpej * is interesting, because in a number of cases a wide virtual address
2290 1.30.2.2 thorpej * range may be supplied that contains few actual mappings. So, the
2291 1.30.2.2 thorpej * optimisations are:
2292 1.30.2.2 thorpej * 1. Try and skip over hunks of address space for which an L1 entry
2293 1.30.2.2 thorpej * does not exist.
2294 1.30.2.2 thorpej * 2. Build up a list of pages we've hit, up to a maximum, so we can
2295 1.30.2.2 thorpej * maybe do just a partial cache clean. This path of execution is
2296 1.30.2.2 thorpej * complicated by the fact that the cache must be flushed _before_
2297 1.30.2.2 thorpej * the PTE is nuked, being a VAC :-)
2298 1.30.2.2 thorpej * 3. Maybe later fast-case a single page, but I don't think this is
2299 1.30.2.2 thorpej * going to make _that_ much difference overall.
2300 1.30.2.2 thorpej */
2301 1.30.2.2 thorpej
2302 1.30.2.2 thorpej #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
2303 1.30.2.2 thorpej
2304 1.30.2.2 thorpej void
2305 1.30.2.2 thorpej pmap_remove(pmap, sva, eva)
2306 1.30.2.2 thorpej struct pmap *pmap;
2307 1.30.2.2 thorpej vaddr_t sva;
2308 1.30.2.2 thorpej vaddr_t eva;
2309 1.30.2.2 thorpej {
2310 1.30.2.2 thorpej int cleanlist_idx = 0;
2311 1.30.2.2 thorpej struct pagelist {
2312 1.30.2.2 thorpej vaddr_t va;
2313 1.30.2.2 thorpej pt_entry_t *pte;
2314 1.30.2.2 thorpej } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
2315 1.30.2.2 thorpej pt_entry_t *pte = 0, *ptes;
2316 1.30.2.2 thorpej paddr_t pa;
2317 1.30.2.2 thorpej int pmap_active;
2318 1.30.2.2 thorpej struct pv_head *pvh;
2319 1.30.2.2 thorpej
2320 1.30.2.2 thorpej /* Exit quick if there is no pmap */
2321 1.30.2.2 thorpej if (!pmap)
2322 1.30.2.2 thorpej return;
2323 1.30.2.2 thorpej
2324 1.30.2.2 thorpej PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
2325 1.30.2.2 thorpej
2326 1.30.2.2 thorpej sva &= PG_FRAME;
2327 1.30.2.2 thorpej eva &= PG_FRAME;
2328 1.30.2.2 thorpej
2329 1.30.2.2 thorpej /*
2330 1.30.2.2 thorpej * we lock in the pmap => pv_head direction
2331 1.30.2.2 thorpej */
2332 1.30.2.2 thorpej PMAP_MAP_TO_HEAD_LOCK();
2333 1.30.2.2 thorpej
2334 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap);
2335 1.30.2.2 thorpej /* Get a page table pointer */
2336 1.30.2.2 thorpej while (sva < eva) {
2337 1.30.2.2 thorpej if (pmap_pde_page(pmap_pde(pmap, sva)))
2338 1.30.2.2 thorpej break;
2339 1.30.2.2 thorpej sva = (sva & PD_MASK) + NBPD;
2340 1.30.2.2 thorpej }
2341 1.30.2.2 thorpej
2342 1.30.2.2 thorpej pte = &ptes[arm_byte_to_page(sva)];
2343 1.30.2.2 thorpej /* Note if the pmap is active thus require cache and tlb cleans */
2344 1.30.2.2 thorpej if ((curproc && curproc->l_proc->p_vmspace->vm_map.pmap == pmap)
2345 1.30.2.2 thorpej || (pmap == pmap_kernel()))
2346 1.30.2.2 thorpej pmap_active = 1;
2347 1.30.2.2 thorpej else
2348 1.30.2.2 thorpej pmap_active = 0;
2349 1.30.2.2 thorpej
2350 1.30.2.2 thorpej /* Now loop along */
2351 1.30.2.2 thorpej while (sva < eva) {
2352 1.30.2.2 thorpej /* Check if we can move to the next PDE (l1 chunk) */
2353 1.30.2.2 thorpej if (!(sva & PT_MASK))
2354 1.30.2.2 thorpej if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2355 1.30.2.2 thorpej sva += NBPD;
2356 1.30.2.2 thorpej pte += arm_byte_to_page(NBPD);
2357 1.30.2.2 thorpej continue;
2358 1.30.2.2 thorpej }
2359 1.30.2.2 thorpej
2360 1.30.2.2 thorpej /* We've found a valid PTE, so this page of PTEs has to go. */
2361 1.30.2.2 thorpej if (pmap_pte_v(pte)) {
2362 1.30.2.2 thorpej int bank, off;
2363 1.30.2.2 thorpej
2364 1.30.2.2 thorpej /* Update statistics */
2365 1.30.2.2 thorpej --pmap->pm_stats.resident_count;
2366 1.30.2.2 thorpej
2367 1.30.2.2 thorpej /*
2368 1.30.2.2 thorpej * Add this page to our cache remove list, if we can.
2369 1.30.2.2 thorpej * If, however the cache remove list is totally full,
2370 1.30.2.2 thorpej * then do a complete cache invalidation taking note
2371 1.30.2.2 thorpej * to backtrack the PTE table beforehand, and ignore
2372 1.30.2.2 thorpej * the lists in future because there's no longer any
2373 1.30.2.2 thorpej * point in bothering with them (we've paid the
2374 1.30.2.2 thorpej * penalty, so will carry on unhindered). Otherwise,
2375 1.30.2.2 thorpej * when we fall out, we just clean the list.
2376 1.30.2.2 thorpej */
2377 1.30.2.2 thorpej PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2378 1.30.2.2 thorpej pa = pmap_pte_pa(pte);
2379 1.30.2.2 thorpej
2380 1.30.2.2 thorpej if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2381 1.30.2.2 thorpej /* Add to the clean list. */
2382 1.30.2.2 thorpej cleanlist[cleanlist_idx].pte = pte;
2383 1.30.2.2 thorpej cleanlist[cleanlist_idx].va = sva;
2384 1.30.2.2 thorpej cleanlist_idx++;
2385 1.30.2.2 thorpej } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2386 1.30.2.2 thorpej int cnt;
2387 1.30.2.2 thorpej
2388 1.30.2.2 thorpej /* Nuke everything if needed. */
2389 1.30.2.2 thorpej if (pmap_active) {
2390 1.30.2.5 nathanw cpu_idcache_wbinv_all();
2391 1.30.2.2 thorpej cpu_tlb_flushID();
2392 1.30.2.2 thorpej }
2393 1.30.2.2 thorpej
2394 1.30.2.2 thorpej /*
2395 1.30.2.2 thorpej * Roll back the previous PTE list,
2396 1.30.2.2 thorpej * and zero out the current PTE.
2397 1.30.2.2 thorpej */
2398 1.30.2.2 thorpej for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
2399 1.30.2.2 thorpej *cleanlist[cnt].pte = 0;
2400 1.30.2.2 thorpej pmap_pte_delref(pmap, cleanlist[cnt].va);
2401 1.30.2.2 thorpej }
2402 1.30.2.2 thorpej *pte = 0;
2403 1.30.2.2 thorpej pmap_pte_delref(pmap, sva);
2404 1.30.2.2 thorpej cleanlist_idx++;
2405 1.30.2.2 thorpej } else {
2406 1.30.2.2 thorpej /*
2407 1.30.2.2 thorpej * We've already nuked the cache and
2408 1.30.2.2 thorpej * TLB, so just carry on regardless,
2409 1.30.2.2 thorpej * and we won't need to do it again
2410 1.30.2.2 thorpej */
2411 1.30.2.2 thorpej *pte = 0;
2412 1.30.2.2 thorpej pmap_pte_delref(pmap, sva);
2413 1.30.2.2 thorpej }
2414 1.30.2.2 thorpej
2415 1.30.2.2 thorpej /*
2416 1.30.2.2 thorpej * Update flags. In a number of circumstances,
2417 1.30.2.2 thorpej * we could cluster a lot of these and do a
2418 1.30.2.2 thorpej * number of sequential pages in one go.
2419 1.30.2.2 thorpej */
2420 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2421 1.30.2.2 thorpej struct pv_entry *pve;
2422 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
2423 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
2424 1.30.2.2 thorpej pve = pmap_remove_pv(pvh, pmap, sva);
2425 1.30.2.2 thorpej pmap_free_pv(pmap, pve);
2426 1.30.2.2 thorpej pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
2427 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2428 1.30.2.2 thorpej }
2429 1.30.2.2 thorpej }
2430 1.30.2.2 thorpej sva += NBPG;
2431 1.30.2.2 thorpej pte++;
2432 1.30.2.2 thorpej }
2433 1.30.2.2 thorpej
2434 1.30.2.2 thorpej pmap_unmap_ptes(pmap);
2435 1.30.2.2 thorpej /*
2436 1.30.2.2 thorpej * Now, if we've fallen through down to here, chances are that there
2437 1.30.2.2 thorpej * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2438 1.30.2.2 thorpej */
2439 1.30.2.2 thorpej if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2440 1.30.2.2 thorpej u_int cnt;
2441 1.30.2.2 thorpej
2442 1.30.2.2 thorpej for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2443 1.30.2.2 thorpej if (pmap_active) {
2444 1.30.2.5 nathanw cpu_idcache_wbinv_range(cleanlist[cnt].va,
2445 1.30.2.5 nathanw NBPG);
2446 1.30.2.2 thorpej *cleanlist[cnt].pte = 0;
2447 1.30.2.2 thorpej cpu_tlb_flushID_SE(cleanlist[cnt].va);
2448 1.30.2.2 thorpej } else
2449 1.30.2.2 thorpej *cleanlist[cnt].pte = 0;
2450 1.30.2.2 thorpej pmap_pte_delref(pmap, cleanlist[cnt].va);
2451 1.30.2.2 thorpej }
2452 1.30.2.2 thorpej }
2453 1.30.2.2 thorpej PMAP_MAP_TO_HEAD_UNLOCK();
2454 1.30.2.2 thorpej }
2455 1.30.2.2 thorpej
2456 1.30.2.2 thorpej /*
2457 1.30.2.2 thorpej * Routine: pmap_remove_all
2458 1.30.2.2 thorpej * Function:
2459 1.30.2.2 thorpej * Removes this physical page from
2460 1.30.2.2 thorpej * all physical maps in which it resides.
2461 1.30.2.2 thorpej * Reflects back modify bits to the pager.
2462 1.30.2.2 thorpej */
2463 1.30.2.2 thorpej
2464 1.30.2.4 nathanw static void
2465 1.30.2.2 thorpej pmap_remove_all(pa)
2466 1.30.2.2 thorpej paddr_t pa;
2467 1.30.2.2 thorpej {
2468 1.30.2.2 thorpej struct pv_entry *pv, *npv;
2469 1.30.2.2 thorpej struct pv_head *pvh;
2470 1.30.2.2 thorpej struct pmap *pmap;
2471 1.30.2.2 thorpej pt_entry_t *pte, *ptes;
2472 1.30.2.2 thorpej
2473 1.30.2.2 thorpej PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
2474 1.30.2.2 thorpej
2475 1.30.2.2 thorpej /* set pv_head => pmap locking */
2476 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_LOCK();
2477 1.30.2.2 thorpej
2478 1.30.2.2 thorpej pvh = pmap_find_pvh(pa);
2479 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
2480 1.30.2.2 thorpej
2481 1.30.2.2 thorpej pv = pvh->pvh_list;
2482 1.30.2.2 thorpej if (pv == NULL)
2483 1.30.2.2 thorpej {
2484 1.30.2.2 thorpej PDEBUG(0, printf("free page\n"));
2485 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2486 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
2487 1.30.2.2 thorpej return;
2488 1.30.2.2 thorpej }
2489 1.30.2.2 thorpej pmap_clean_page(pv, FALSE);
2490 1.30.2.2 thorpej
2491 1.30.2.2 thorpej while (pv) {
2492 1.30.2.2 thorpej pmap = pv->pv_pmap;
2493 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap);
2494 1.30.2.2 thorpej pte = &ptes[arm_byte_to_page(pv->pv_va)];
2495 1.30.2.2 thorpej
2496 1.30.2.2 thorpej PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2497 1.30.2.2 thorpej pv->pv_va, pv->pv_flags));
2498 1.30.2.2 thorpej #ifdef DEBUG
2499 1.30.2.3 nathanw if (!pmap_pde_page(pmap_pde(pmap, pv->pv_va)) ||
2500 1.30.2.2 thorpej !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
2501 1.30.2.2 thorpej panic("pmap_remove_all: bad mapping");
2502 1.30.2.2 thorpej #endif /* DEBUG */
2503 1.30.2.2 thorpej
2504 1.30.2.2 thorpej /*
2505 1.30.2.2 thorpej * Update statistics
2506 1.30.2.2 thorpej */
2507 1.30.2.2 thorpej --pmap->pm_stats.resident_count;
2508 1.30.2.2 thorpej
2509 1.30.2.2 thorpej /* Wired bit */
2510 1.30.2.2 thorpej if (pv->pv_flags & PT_W)
2511 1.30.2.2 thorpej --pmap->pm_stats.wired_count;
2512 1.30.2.2 thorpej
2513 1.30.2.2 thorpej /*
2514 1.30.2.2 thorpej * Invalidate the PTEs.
2515 1.30.2.2 thorpej * XXX: should cluster them up and invalidate as many
2516 1.30.2.2 thorpej * as possible at once.
2517 1.30.2.2 thorpej */
2518 1.30.2.2 thorpej
2519 1.30.2.2 thorpej #ifdef needednotdone
2520 1.30.2.2 thorpej reduce wiring count on page table pages as references drop
2521 1.30.2.2 thorpej #endif
2522 1.30.2.2 thorpej
2523 1.30.2.2 thorpej *pte = 0;
2524 1.30.2.2 thorpej pmap_pte_delref(pmap, pv->pv_va);
2525 1.30.2.2 thorpej
2526 1.30.2.2 thorpej npv = pv->pv_next;
2527 1.30.2.2 thorpej pmap_free_pv(pmap, pv);
2528 1.30.2.2 thorpej pv = npv;
2529 1.30.2.2 thorpej pmap_unmap_ptes(pmap);
2530 1.30.2.2 thorpej }
2531 1.30.2.2 thorpej pvh->pvh_list = NULL;
2532 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2533 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
2534 1.30.2.2 thorpej
2535 1.30.2.2 thorpej PDEBUG(0, printf("done\n"));
2536 1.30.2.2 thorpej cpu_tlb_flushID();
2537 1.30.2.3 nathanw cpu_cpwait();
2538 1.30.2.2 thorpej }
2539 1.30.2.2 thorpej
2540 1.30.2.2 thorpej
2541 1.30.2.2 thorpej /*
2542 1.30.2.2 thorpej * Set the physical protection on the specified range of this map as requested.
2543 1.30.2.2 thorpej */
2544 1.30.2.2 thorpej
2545 1.30.2.2 thorpej void
2546 1.30.2.2 thorpej pmap_protect(pmap, sva, eva, prot)
2547 1.30.2.2 thorpej struct pmap *pmap;
2548 1.30.2.2 thorpej vaddr_t sva;
2549 1.30.2.2 thorpej vaddr_t eva;
2550 1.30.2.2 thorpej vm_prot_t prot;
2551 1.30.2.2 thorpej {
2552 1.30.2.2 thorpej pt_entry_t *pte = NULL, *ptes;
2553 1.30.2.2 thorpej int armprot;
2554 1.30.2.2 thorpej int flush = 0;
2555 1.30.2.2 thorpej paddr_t pa;
2556 1.30.2.2 thorpej int bank, off;
2557 1.30.2.2 thorpej struct pv_head *pvh;
2558 1.30.2.2 thorpej
2559 1.30.2.2 thorpej PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2560 1.30.2.2 thorpej pmap, sva, eva, prot));
2561 1.30.2.2 thorpej
2562 1.30.2.2 thorpej if (~prot & VM_PROT_READ) {
2563 1.30.2.2 thorpej /* Just remove the mappings. */
2564 1.30.2.2 thorpej pmap_remove(pmap, sva, eva);
2565 1.30.2.4 nathanw /* pmap_update not needed as it should be called by the caller
2566 1.30.2.4 nathanw * of pmap_protect */
2567 1.30.2.2 thorpej return;
2568 1.30.2.2 thorpej }
2569 1.30.2.2 thorpej if (prot & VM_PROT_WRITE) {
2570 1.30.2.2 thorpej /*
2571 1.30.2.2 thorpej * If this is a read->write transition, just ignore it and let
2572 1.30.2.2 thorpej * uvm_fault() take care of it later.
2573 1.30.2.2 thorpej */
2574 1.30.2.2 thorpej return;
2575 1.30.2.2 thorpej }
2576 1.30.2.2 thorpej
2577 1.30.2.2 thorpej sva &= PG_FRAME;
2578 1.30.2.2 thorpej eva &= PG_FRAME;
2579 1.30.2.2 thorpej
2580 1.30.2.2 thorpej /* Need to lock map->head */
2581 1.30.2.2 thorpej PMAP_MAP_TO_HEAD_LOCK();
2582 1.30.2.2 thorpej
2583 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap);
2584 1.30.2.2 thorpej /*
2585 1.30.2.2 thorpej * We need to acquire a pointer to a page table page before entering
2586 1.30.2.2 thorpej * the following loop.
2587 1.30.2.2 thorpej */
2588 1.30.2.2 thorpej while (sva < eva) {
2589 1.30.2.2 thorpej if (pmap_pde_page(pmap_pde(pmap, sva)))
2590 1.30.2.2 thorpej break;
2591 1.30.2.2 thorpej sva = (sva & PD_MASK) + NBPD;
2592 1.30.2.2 thorpej }
2593 1.30.2.2 thorpej
2594 1.30.2.2 thorpej pte = &ptes[arm_byte_to_page(sva)];
2595 1.30.2.2 thorpej
2596 1.30.2.2 thorpej while (sva < eva) {
2597 1.30.2.2 thorpej /* only check once in a while */
2598 1.30.2.2 thorpej if ((sva & PT_MASK) == 0) {
2599 1.30.2.2 thorpej if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2600 1.30.2.2 thorpej /* We can race ahead here, to the next pde. */
2601 1.30.2.2 thorpej sva += NBPD;
2602 1.30.2.2 thorpej pte += arm_byte_to_page(NBPD);
2603 1.30.2.2 thorpej continue;
2604 1.30.2.2 thorpej }
2605 1.30.2.2 thorpej }
2606 1.30.2.2 thorpej
2607 1.30.2.2 thorpej if (!pmap_pte_v(pte))
2608 1.30.2.2 thorpej goto next;
2609 1.30.2.2 thorpej
2610 1.30.2.2 thorpej flush = 1;
2611 1.30.2.2 thorpej
2612 1.30.2.2 thorpej armprot = 0;
2613 1.30.2.2 thorpej if (sva < VM_MAXUSER_ADDRESS)
2614 1.30.2.2 thorpej armprot |= PT_AP(AP_U);
2615 1.30.2.2 thorpej else if (sva < VM_MAX_ADDRESS)
2616 1.30.2.2 thorpej armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
2617 1.30.2.2 thorpej *pte = (*pte & 0xfffff00f) | armprot;
2618 1.30.2.2 thorpej
2619 1.30.2.2 thorpej pa = pmap_pte_pa(pte);
2620 1.30.2.2 thorpej
2621 1.30.2.2 thorpej /* Get the physical page index */
2622 1.30.2.2 thorpej
2623 1.30.2.2 thorpej /* Clear write flag */
2624 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2625 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
2626 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
2627 1.30.2.2 thorpej (void) pmap_modify_pv(pmap, sva, pvh, PT_Wr, 0);
2628 1.30.2.2 thorpej pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
2629 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2630 1.30.2.2 thorpej }
2631 1.30.2.2 thorpej
2632 1.30.2.2 thorpej next:
2633 1.30.2.2 thorpej sva += NBPG;
2634 1.30.2.2 thorpej pte++;
2635 1.30.2.2 thorpej }
2636 1.30.2.2 thorpej pmap_unmap_ptes(pmap);
2637 1.30.2.2 thorpej PMAP_MAP_TO_HEAD_UNLOCK();
2638 1.30.2.2 thorpej if (flush)
2639 1.30.2.2 thorpej cpu_tlb_flushID();
2640 1.30.2.2 thorpej }
2641 1.30.2.2 thorpej
2642 1.30.2.2 thorpej /*
2643 1.30.2.2 thorpej * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2644 1.30.2.2 thorpej * int flags)
2645 1.30.2.2 thorpej *
2646 1.30.2.2 thorpej * Insert the given physical page (p) at
2647 1.30.2.2 thorpej * the specified virtual address (v) in the
2648 1.30.2.2 thorpej * target physical map with the protection requested.
2649 1.30.2.2 thorpej *
2650 1.30.2.2 thorpej * If specified, the page will be wired down, meaning
2651 1.30.2.2 thorpej * that the related pte can not be reclaimed.
2652 1.30.2.2 thorpej *
2653 1.30.2.2 thorpej * NB: This is the only routine which MAY NOT lazy-evaluate
2654 1.30.2.2 thorpej * or lose information. That is, this routine must actually
2655 1.30.2.2 thorpej * insert this page into the given map NOW.
2656 1.30.2.2 thorpej */
2657 1.30.2.2 thorpej
2658 1.30.2.2 thorpej int
2659 1.30.2.2 thorpej pmap_enter(pmap, va, pa, prot, flags)
2660 1.30.2.2 thorpej struct pmap *pmap;
2661 1.30.2.2 thorpej vaddr_t va;
2662 1.30.2.2 thorpej paddr_t pa;
2663 1.30.2.2 thorpej vm_prot_t prot;
2664 1.30.2.2 thorpej int flags;
2665 1.30.2.2 thorpej {
2666 1.30.2.2 thorpej pt_entry_t *pte, *ptes;
2667 1.30.2.2 thorpej u_int npte;
2668 1.30.2.2 thorpej int bank, off;
2669 1.30.2.2 thorpej paddr_t opa;
2670 1.30.2.2 thorpej int nflags;
2671 1.30.2.2 thorpej boolean_t wired = (flags & PMAP_WIRED) != 0;
2672 1.30.2.2 thorpej struct pv_entry *pve;
2673 1.30.2.2 thorpej struct pv_head *pvh;
2674 1.30.2.2 thorpej int error;
2675 1.30.2.2 thorpej
2676 1.30.2.2 thorpej PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2677 1.30.2.2 thorpej va, pa, pmap, prot, wired));
2678 1.30.2.2 thorpej
2679 1.30.2.2 thorpej #ifdef DIAGNOSTIC
2680 1.30.2.2 thorpej /* Valid address ? */
2681 1.30.2.2 thorpej if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
2682 1.30.2.2 thorpej panic("pmap_enter: too big");
2683 1.30.2.2 thorpej if (pmap != pmap_kernel() && va != 0) {
2684 1.30.2.2 thorpej if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2685 1.30.2.2 thorpej panic("pmap_enter: kernel page in user map");
2686 1.30.2.2 thorpej } else {
2687 1.30.2.2 thorpej if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2688 1.30.2.2 thorpej panic("pmap_enter: user page in kernel map");
2689 1.30.2.2 thorpej if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2690 1.30.2.2 thorpej panic("pmap_enter: entering PT page");
2691 1.30.2.2 thorpej }
2692 1.30.2.2 thorpej #endif
2693 1.30.2.2 thorpej /* get lock */
2694 1.30.2.2 thorpej PMAP_MAP_TO_HEAD_LOCK();
2695 1.30.2.2 thorpej /*
2696 1.30.2.2 thorpej * Get a pointer to the pte for this virtual address. If the
2697 1.30.2.2 thorpej * pte pointer is NULL then we are missing the L2 page table
2698 1.30.2.2 thorpej * so we need to create one.
2699 1.30.2.2 thorpej */
2700 1.30.2.2 thorpej /* XXX horrible hack to get us working with lockdebug */
2701 1.30.2.2 thorpej simple_lock(&pmap->pm_obj.vmobjlock);
2702 1.30.2.2 thorpej pte = pmap_pte(pmap, va);
2703 1.30.2.2 thorpej if (!pte) {
2704 1.30.2.2 thorpej struct vm_page *ptp;
2705 1.30.2.2 thorpej
2706 1.30.2.2 thorpej /* if failure is allowed then don't try too hard */
2707 1.30.2.2 thorpej ptp = pmap_get_ptp(pmap, va, flags & PMAP_CANFAIL);
2708 1.30.2.2 thorpej if (ptp == NULL) {
2709 1.30.2.2 thorpej if (flags & PMAP_CANFAIL) {
2710 1.30.2.2 thorpej error = ENOMEM;
2711 1.30.2.2 thorpej goto out;
2712 1.30.2.2 thorpej }
2713 1.30.2.2 thorpej panic("pmap_enter: get ptp failed");
2714 1.30.2.2 thorpej }
2715 1.30.2.2 thorpej
2716 1.30.2.2 thorpej pte = pmap_pte(pmap, va);
2717 1.30.2.2 thorpej #ifdef DIAGNOSTIC
2718 1.30.2.2 thorpej if (!pte)
2719 1.30.2.2 thorpej panic("pmap_enter: no pte");
2720 1.30.2.2 thorpej #endif
2721 1.30.2.2 thorpej }
2722 1.30.2.2 thorpej
2723 1.30.2.2 thorpej nflags = 0;
2724 1.30.2.2 thorpej if (prot & VM_PROT_WRITE)
2725 1.30.2.2 thorpej nflags |= PT_Wr;
2726 1.30.2.2 thorpej if (wired)
2727 1.30.2.2 thorpej nflags |= PT_W;
2728 1.30.2.2 thorpej
2729 1.30.2.2 thorpej /* More debugging info */
2730 1.30.2.2 thorpej PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2731 1.30.2.2 thorpej *pte));
2732 1.30.2.2 thorpej
2733 1.30.2.2 thorpej /* Is the pte valid ? If so then this page is already mapped */
2734 1.30.2.2 thorpej if (pmap_pte_v(pte)) {
2735 1.30.2.2 thorpej /* Get the physical address of the current page mapped */
2736 1.30.2.2 thorpej opa = pmap_pte_pa(pte);
2737 1.30.2.2 thorpej
2738 1.30.2.2 thorpej #ifdef MYCROFT_HACK
2739 1.30.2.2 thorpej printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2740 1.30.2.2 thorpej #endif
2741 1.30.2.2 thorpej
2742 1.30.2.2 thorpej /* Are we mapping the same page ? */
2743 1.30.2.2 thorpej if (opa == pa) {
2744 1.30.2.2 thorpej /* All we must be doing is changing the protection */
2745 1.30.2.2 thorpej PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2746 1.30.2.2 thorpej va, pa));
2747 1.30.2.2 thorpej
2748 1.30.2.2 thorpej /* Has the wiring changed ? */
2749 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2750 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
2751 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
2752 1.30.2.2 thorpej (void) pmap_modify_pv(pmap, va, pvh,
2753 1.30.2.2 thorpej PT_Wr | PT_W, nflags);
2754 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2755 1.30.2.2 thorpej } else {
2756 1.30.2.2 thorpej pvh = NULL;
2757 1.30.2.2 thorpej }
2758 1.30.2.2 thorpej } else {
2759 1.30.2.2 thorpej /* We are replacing the page with a new one. */
2760 1.30.2.5 nathanw cpu_idcache_wbinv_range(va, NBPG);
2761 1.30.2.2 thorpej
2762 1.30.2.2 thorpej PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2763 1.30.2.2 thorpej va, pa, opa));
2764 1.30.2.2 thorpej
2765 1.30.2.2 thorpej /*
2766 1.30.2.2 thorpej * If it is part of our managed memory then we
2767 1.30.2.2 thorpej * must remove it from the PV list
2768 1.30.2.2 thorpej */
2769 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2770 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
2771 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
2772 1.30.2.2 thorpej pve = pmap_remove_pv(pvh, pmap, va);
2773 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2774 1.30.2.2 thorpej } else {
2775 1.30.2.2 thorpej pve = NULL;
2776 1.30.2.2 thorpej }
2777 1.30.2.2 thorpej
2778 1.30.2.2 thorpej goto enter;
2779 1.30.2.2 thorpej }
2780 1.30.2.2 thorpej } else {
2781 1.30.2.2 thorpej opa = 0;
2782 1.30.2.2 thorpej pve = NULL;
2783 1.30.2.2 thorpej pmap_pte_addref(pmap, va);
2784 1.30.2.2 thorpej
2785 1.30.2.2 thorpej /* pte is not valid so we must be hooking in a new page */
2786 1.30.2.2 thorpej ++pmap->pm_stats.resident_count;
2787 1.30.2.2 thorpej
2788 1.30.2.2 thorpej enter:
2789 1.30.2.2 thorpej /*
2790 1.30.2.2 thorpej * Enter on the PV list if part of our managed memory
2791 1.30.2.2 thorpej */
2792 1.30.2.2 thorpej bank = vm_physseg_find(atop(pa), &off);
2793 1.30.2.2 thorpej
2794 1.30.2.2 thorpej if (pmap_initialized && (bank != -1)) {
2795 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
2796 1.30.2.2 thorpej if (pve == NULL) {
2797 1.30.2.2 thorpej pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2798 1.30.2.2 thorpej if (pve == NULL) {
2799 1.30.2.2 thorpej if (flags & PMAP_CANFAIL) {
2800 1.30.2.2 thorpej error = ENOMEM;
2801 1.30.2.2 thorpej goto out;
2802 1.30.2.2 thorpej }
2803 1.30.2.2 thorpej panic("pmap_enter: no pv entries available");
2804 1.30.2.2 thorpej }
2805 1.30.2.2 thorpej }
2806 1.30.2.2 thorpej /* enter_pv locks pvh when adding */
2807 1.30.2.2 thorpej pmap_enter_pv(pvh, pve, pmap, va, NULL, nflags);
2808 1.30.2.2 thorpej } else {
2809 1.30.2.2 thorpej pvh = NULL;
2810 1.30.2.2 thorpej if (pve != NULL)
2811 1.30.2.2 thorpej pmap_free_pv(pmap, pve);
2812 1.30.2.2 thorpej }
2813 1.30.2.2 thorpej }
2814 1.30.2.2 thorpej
2815 1.30.2.2 thorpej #ifdef MYCROFT_HACK
2816 1.30.2.2 thorpej if (mycroft_hack)
2817 1.30.2.2 thorpej printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2818 1.30.2.2 thorpej #endif
2819 1.30.2.2 thorpej
2820 1.30.2.2 thorpej /* Construct the pte, giving the correct access. */
2821 1.30.2.2 thorpej npte = (pa & PG_FRAME);
2822 1.30.2.2 thorpej
2823 1.30.2.2 thorpej /* VA 0 is magic. */
2824 1.30.2.2 thorpej if (pmap != pmap_kernel() && va != 0)
2825 1.30.2.2 thorpej npte |= PT_AP(AP_U);
2826 1.30.2.2 thorpej
2827 1.30.2.2 thorpej if (pmap_initialized && bank != -1) {
2828 1.30.2.2 thorpej #ifdef DIAGNOSTIC
2829 1.30.2.2 thorpej if ((flags & VM_PROT_ALL) & ~prot)
2830 1.30.2.2 thorpej panic("pmap_enter: access_type exceeds prot");
2831 1.30.2.2 thorpej #endif
2832 1.30.2.2 thorpej npte |= pte_cache_mode;
2833 1.30.2.2 thorpej if (flags & VM_PROT_WRITE) {
2834 1.30.2.2 thorpej npte |= L2_SPAGE | PT_AP(AP_W);
2835 1.30.2.2 thorpej vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2836 1.30.2.2 thorpej } else if (flags & VM_PROT_ALL) {
2837 1.30.2.2 thorpej npte |= L2_SPAGE;
2838 1.30.2.2 thorpej vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2839 1.30.2.2 thorpej } else
2840 1.30.2.2 thorpej npte |= L2_INVAL;
2841 1.30.2.2 thorpej } else {
2842 1.30.2.2 thorpej if (prot & VM_PROT_WRITE)
2843 1.30.2.2 thorpej npte |= L2_SPAGE | PT_AP(AP_W);
2844 1.30.2.2 thorpej else if (prot & VM_PROT_ALL)
2845 1.30.2.2 thorpej npte |= L2_SPAGE;
2846 1.30.2.2 thorpej else
2847 1.30.2.2 thorpej npte |= L2_INVAL;
2848 1.30.2.2 thorpej }
2849 1.30.2.2 thorpej
2850 1.30.2.2 thorpej #ifdef MYCROFT_HACK
2851 1.30.2.2 thorpej if (mycroft_hack)
2852 1.30.2.2 thorpej printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2853 1.30.2.2 thorpej #endif
2854 1.30.2.2 thorpej
2855 1.30.2.2 thorpej *pte = npte;
2856 1.30.2.2 thorpej
2857 1.30.2.2 thorpej if (pmap_initialized && bank != -1)
2858 1.30.2.2 thorpej {
2859 1.30.2.2 thorpej boolean_t pmap_active = FALSE;
2860 1.30.2.2 thorpej /* XXX this will change once the whole of pmap_enter uses
2861 1.30.2.2 thorpej * map_ptes
2862 1.30.2.2 thorpej */
2863 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap);
2864 1.30.2.2 thorpej if ((curproc && curproc->l_proc->p_vmspace->vm_map.pmap == pmap)
2865 1.30.2.2 thorpej || (pmap == pmap_kernel()))
2866 1.30.2.2 thorpej pmap_active = TRUE;
2867 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
2868 1.30.2.2 thorpej pmap_vac_me_harder(pmap, pvh, ptes, pmap_active);
2869 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
2870 1.30.2.2 thorpej pmap_unmap_ptes(pmap);
2871 1.30.2.2 thorpej }
2872 1.30.2.2 thorpej
2873 1.30.2.2 thorpej /* Better flush the TLB ... */
2874 1.30.2.2 thorpej cpu_tlb_flushID_SE(va);
2875 1.30.2.2 thorpej error = 0;
2876 1.30.2.2 thorpej out:
2877 1.30.2.2 thorpej simple_unlock(&pmap->pm_obj.vmobjlock);
2878 1.30.2.2 thorpej PMAP_MAP_TO_HEAD_UNLOCK();
2879 1.30.2.2 thorpej PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2880 1.30.2.2 thorpej
2881 1.30.2.2 thorpej return error;
2882 1.30.2.2 thorpej }
2883 1.30.2.2 thorpej
2884 1.30.2.2 thorpej void
2885 1.30.2.2 thorpej pmap_kenter_pa(va, pa, prot)
2886 1.30.2.2 thorpej vaddr_t va;
2887 1.30.2.2 thorpej paddr_t pa;
2888 1.30.2.2 thorpej vm_prot_t prot;
2889 1.30.2.2 thorpej {
2890 1.30.2.2 thorpej struct pmap *pmap = pmap_kernel();
2891 1.30.2.2 thorpej pt_entry_t *pte;
2892 1.30.2.2 thorpej struct vm_page *pg;
2893 1.30.2.2 thorpej
2894 1.30.2.2 thorpej if (!pmap_pde_page(pmap_pde(pmap, va))) {
2895 1.30.2.2 thorpej
2896 1.30.2.2 thorpej #ifdef DIAGNOSTIC
2897 1.30.2.2 thorpej if (pmap_pde_v(pmap_pde(pmap, va)))
2898 1.30.2.2 thorpej panic("Trying to map kernel page into section mapping"
2899 1.30.2.2 thorpej " VA=%lx PA=%lx", va, pa);
2900 1.30.2.2 thorpej #endif
2901 1.30.2.2 thorpej /*
2902 1.30.2.2 thorpej * For the kernel pmaps it would be better to ensure
2903 1.30.2.2 thorpej * that they are always present, and to grow the
2904 1.30.2.2 thorpej * kernel as required.
2905 1.30.2.2 thorpej */
2906 1.30.2.2 thorpej
2907 1.30.2.2 thorpej /* must lock the pmap */
2908 1.30.2.2 thorpej simple_lock(&(pmap_kernel()->pm_obj.vmobjlock));
2909 1.30.2.2 thorpej /* Allocate a page table */
2910 1.30.2.2 thorpej pg = uvm_pagealloc(&(pmap_kernel()->pm_obj), 0, NULL,
2911 1.30.2.2 thorpej UVM_PGA_USERESERVE | UVM_PGA_ZERO);
2912 1.30.2.2 thorpej if (pg == NULL) {
2913 1.30.2.2 thorpej panic("pmap_kenter_pa: no free pages");
2914 1.30.2.2 thorpej }
2915 1.30.2.2 thorpej pg->flags &= ~PG_BUSY; /* never busy */
2916 1.30.2.2 thorpej
2917 1.30.2.2 thorpej /* Wire this page table into the L1. */
2918 1.30.2.2 thorpej pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(pg), TRUE);
2919 1.30.2.2 thorpej simple_unlock(&(pmap_kernel()->pm_obj.vmobjlock));
2920 1.30.2.2 thorpej }
2921 1.30.2.2 thorpej pte = vtopte(va);
2922 1.30.2.2 thorpej KASSERT(!pmap_pte_v(pte));
2923 1.30.2.2 thorpej *pte = L2_PTE(pa, AP_KRW);
2924 1.30.2.2 thorpej }
2925 1.30.2.2 thorpej
2926 1.30.2.2 thorpej void
2927 1.30.2.2 thorpej pmap_kremove(va, len)
2928 1.30.2.2 thorpej vaddr_t va;
2929 1.30.2.2 thorpej vsize_t len;
2930 1.30.2.2 thorpej {
2931 1.30.2.2 thorpej pt_entry_t *pte;
2932 1.30.2.2 thorpej
2933 1.30.2.2 thorpej for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2934 1.30.2.2 thorpej
2935 1.30.2.2 thorpej /*
2936 1.30.2.2 thorpej * We assume that we will only be called with small
2937 1.30.2.2 thorpej * regions of memory.
2938 1.30.2.2 thorpej */
2939 1.30.2.2 thorpej
2940 1.30.2.2 thorpej KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
2941 1.30.2.2 thorpej pte = vtopte(va);
2942 1.30.2.5 nathanw cpu_idcache_wbinv_range(va, PAGE_SIZE);
2943 1.30.2.2 thorpej *pte = 0;
2944 1.30.2.2 thorpej cpu_tlb_flushID_SE(va);
2945 1.30.2.2 thorpej }
2946 1.30.2.2 thorpej }
2947 1.30.2.2 thorpej
2948 1.30.2.2 thorpej /*
2949 1.30.2.2 thorpej * pmap_page_protect:
2950 1.30.2.2 thorpej *
2951 1.30.2.2 thorpej * Lower the permission for all mappings to a given page.
2952 1.30.2.2 thorpej */
2953 1.30.2.2 thorpej
2954 1.30.2.2 thorpej void
2955 1.30.2.2 thorpej pmap_page_protect(pg, prot)
2956 1.30.2.2 thorpej struct vm_page *pg;
2957 1.30.2.2 thorpej vm_prot_t prot;
2958 1.30.2.2 thorpej {
2959 1.30.2.2 thorpej paddr_t pa = VM_PAGE_TO_PHYS(pg);
2960 1.30.2.2 thorpej
2961 1.30.2.2 thorpej PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2962 1.30.2.2 thorpej
2963 1.30.2.2 thorpej switch(prot) {
2964 1.30.2.2 thorpej case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
2965 1.30.2.2 thorpej case VM_PROT_READ|VM_PROT_WRITE:
2966 1.30.2.2 thorpej return;
2967 1.30.2.2 thorpej
2968 1.30.2.2 thorpej case VM_PROT_READ:
2969 1.30.2.2 thorpej case VM_PROT_READ|VM_PROT_EXECUTE:
2970 1.30.2.2 thorpej pmap_copy_on_write(pa);
2971 1.30.2.2 thorpej break;
2972 1.30.2.2 thorpej
2973 1.30.2.2 thorpej default:
2974 1.30.2.2 thorpej pmap_remove_all(pa);
2975 1.30.2.2 thorpej break;
2976 1.30.2.2 thorpej }
2977 1.30.2.2 thorpej }
2978 1.30.2.2 thorpej
2979 1.30.2.2 thorpej
2980 1.30.2.2 thorpej /*
2981 1.30.2.2 thorpej * Routine: pmap_unwire
2982 1.30.2.2 thorpej * Function: Clear the wired attribute for a map/virtual-address
2983 1.30.2.2 thorpej * pair.
2984 1.30.2.2 thorpej * In/out conditions:
2985 1.30.2.2 thorpej * The mapping must already exist in the pmap.
2986 1.30.2.2 thorpej */
2987 1.30.2.2 thorpej
2988 1.30.2.2 thorpej void
2989 1.30.2.2 thorpej pmap_unwire(pmap, va)
2990 1.30.2.2 thorpej struct pmap *pmap;
2991 1.30.2.2 thorpej vaddr_t va;
2992 1.30.2.2 thorpej {
2993 1.30.2.2 thorpej pt_entry_t *pte;
2994 1.30.2.2 thorpej paddr_t pa;
2995 1.30.2.2 thorpej int bank, off;
2996 1.30.2.2 thorpej struct pv_head *pvh;
2997 1.30.2.2 thorpej
2998 1.30.2.2 thorpej /*
2999 1.30.2.2 thorpej * Make sure pmap is valid. -dct
3000 1.30.2.2 thorpej */
3001 1.30.2.2 thorpej if (pmap == NULL)
3002 1.30.2.2 thorpej return;
3003 1.30.2.2 thorpej
3004 1.30.2.2 thorpej /* Get the pte */
3005 1.30.2.2 thorpej pte = pmap_pte(pmap, va);
3006 1.30.2.2 thorpej if (!pte)
3007 1.30.2.2 thorpej return;
3008 1.30.2.2 thorpej
3009 1.30.2.2 thorpej /* Extract the physical address of the page */
3010 1.30.2.2 thorpej pa = pmap_pte_pa(pte);
3011 1.30.2.2 thorpej
3012 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
3013 1.30.2.2 thorpej return;
3014 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
3015 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
3016 1.30.2.2 thorpej /* Update the wired bit in the pv entry for this page. */
3017 1.30.2.2 thorpej (void) pmap_modify_pv(pmap, va, pvh, PT_W, 0);
3018 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
3019 1.30.2.2 thorpej }
3020 1.30.2.2 thorpej
3021 1.30.2.2 thorpej /*
3022 1.30.2.2 thorpej * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
3023 1.30.2.2 thorpej *
3024 1.30.2.2 thorpej * Return the pointer to a page table entry corresponding to the supplied
3025 1.30.2.2 thorpej * virtual address.
3026 1.30.2.2 thorpej *
3027 1.30.2.2 thorpej * The page directory is first checked to make sure that a page table
3028 1.30.2.2 thorpej * for the address in question exists and if it does a pointer to the
3029 1.30.2.2 thorpej * entry is returned.
3030 1.30.2.2 thorpej *
3031 1.30.2.2 thorpej * The way this works is that that the kernel page tables are mapped
3032 1.30.2.2 thorpej * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
3033 1.30.2.2 thorpej * This allows page tables to be located quickly.
3034 1.30.2.2 thorpej */
3035 1.30.2.2 thorpej pt_entry_t *
3036 1.30.2.2 thorpej pmap_pte(pmap, va)
3037 1.30.2.2 thorpej struct pmap *pmap;
3038 1.30.2.2 thorpej vaddr_t va;
3039 1.30.2.2 thorpej {
3040 1.30.2.2 thorpej pt_entry_t *ptp;
3041 1.30.2.2 thorpej pt_entry_t *result;
3042 1.30.2.2 thorpej
3043 1.30.2.2 thorpej /* The pmap must be valid */
3044 1.30.2.2 thorpej if (!pmap)
3045 1.30.2.2 thorpej return(NULL);
3046 1.30.2.2 thorpej
3047 1.30.2.2 thorpej /* Return the address of the pte */
3048 1.30.2.2 thorpej PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
3049 1.30.2.2 thorpej pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
3050 1.30.2.2 thorpej
3051 1.30.2.2 thorpej /* Do we have a valid pde ? If not we don't have a page table */
3052 1.30.2.2 thorpej if (!pmap_pde_page(pmap_pde(pmap, va))) {
3053 1.30.2.2 thorpej PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
3054 1.30.2.2 thorpej pmap_pde(pmap, va)));
3055 1.30.2.2 thorpej return(NULL);
3056 1.30.2.2 thorpej }
3057 1.30.2.2 thorpej
3058 1.30.2.2 thorpej PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
3059 1.30.2.2 thorpej pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
3060 1.30.2.2 thorpej + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
3061 1.30.2.2 thorpej (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
3062 1.30.2.2 thorpej
3063 1.30.2.2 thorpej /*
3064 1.30.2.2 thorpej * If the pmap is the kernel pmap or the pmap is the active one
3065 1.30.2.2 thorpej * then we can just return a pointer to entry relative to
3066 1.30.2.2 thorpej * PROCESS_PAGE_TBLS_BASE.
3067 1.30.2.2 thorpej * Otherwise we need to map the page tables to an alternative
3068 1.30.2.2 thorpej * address and reference them there.
3069 1.30.2.2 thorpej */
3070 1.30.2.2 thorpej if (pmap == pmap_kernel() || pmap->pm_pptpt
3071 1.30.2.2 thorpej == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
3072 1.30.2.2 thorpej + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
3073 1.30.2.2 thorpej ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
3074 1.30.2.2 thorpej ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
3075 1.30.2.2 thorpej } else {
3076 1.30.2.2 thorpej struct lwp *l = curproc;
3077 1.30.2.2 thorpej struct proc *p;
3078 1.30.2.2 thorpej
3079 1.30.2.2 thorpej /* If we don't have a valid curproc use proc0 */
3080 1.30.2.2 thorpej /* Perhaps we should just use kernel_pmap instead */
3081 1.30.2.2 thorpej if (l == NULL)
3082 1.30.2.2 thorpej l = &lwp0;
3083 1.30.2.2 thorpej p = l->l_proc;
3084 1.30.2.2 thorpej
3085 1.30.2.2 thorpej #ifdef DIAGNOSTIC
3086 1.30.2.2 thorpej /*
3087 1.30.2.2 thorpej * The pmap should always be valid for the process so
3088 1.30.2.2 thorpej * panic if it is not.
3089 1.30.2.2 thorpej */
3090 1.30.2.2 thorpej if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
3091 1.30.2.2 thorpej printf("pmap_pte: va=%08lx p=%p vm=%p\n",
3092 1.30.2.2 thorpej va, p, p->p_vmspace);
3093 1.30.2.2 thorpej console_debugger();
3094 1.30.2.2 thorpej }
3095 1.30.2.2 thorpej /*
3096 1.30.2.2 thorpej * The pmap for the current process should be mapped. If it
3097 1.30.2.2 thorpej * is not then we have a problem.
3098 1.30.2.2 thorpej */
3099 1.30.2.2 thorpej if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
3100 1.30.2.2 thorpej (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
3101 1.30.2.2 thorpej + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
3102 1.30.2.2 thorpej (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
3103 1.30.2.2 thorpej printf("pmap pagetable = P%08lx current = P%08x ",
3104 1.30.2.2 thorpej pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
3105 1.30.2.2 thorpej + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
3106 1.30.2.2 thorpej (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
3107 1.30.2.2 thorpej PG_FRAME));
3108 1.30.2.2 thorpej printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
3109 1.30.2.2 thorpej panic("pmap_pte: current and pmap mismatch\n");
3110 1.30.2.2 thorpej }
3111 1.30.2.2 thorpej #endif
3112 1.30.2.2 thorpej
3113 1.30.2.2 thorpej ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
3114 1.30.2.2 thorpej pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
3115 1.30.2.2 thorpej pmap->pm_pptpt, FALSE);
3116 1.30.2.2 thorpej cpu_tlb_flushD();
3117 1.30.2.3 nathanw cpu_cpwait();
3118 1.30.2.2 thorpej }
3119 1.30.2.2 thorpej PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
3120 1.30.2.2 thorpej ((va >> (PGSHIFT-2)) & ~3)));
3121 1.30.2.2 thorpej result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
3122 1.30.2.2 thorpej return(result);
3123 1.30.2.2 thorpej }
3124 1.30.2.2 thorpej
3125 1.30.2.2 thorpej /*
3126 1.30.2.2 thorpej * Routine: pmap_extract
3127 1.30.2.2 thorpej * Function:
3128 1.30.2.2 thorpej * Extract the physical page address associated
3129 1.30.2.2 thorpej * with the given map/virtual_address pair.
3130 1.30.2.2 thorpej */
3131 1.30.2.2 thorpej boolean_t
3132 1.30.2.2 thorpej pmap_extract(pmap, va, pap)
3133 1.30.2.2 thorpej struct pmap *pmap;
3134 1.30.2.2 thorpej vaddr_t va;
3135 1.30.2.2 thorpej paddr_t *pap;
3136 1.30.2.2 thorpej {
3137 1.30.2.5 nathanw pd_entry_t *pde;
3138 1.30.2.2 thorpej pt_entry_t *pte, *ptes;
3139 1.30.2.2 thorpej paddr_t pa;
3140 1.30.2.5 nathanw boolean_t rv = TRUE;
3141 1.30.2.2 thorpej
3142 1.30.2.2 thorpej PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
3143 1.30.2.2 thorpej
3144 1.30.2.2 thorpej /*
3145 1.30.2.2 thorpej * Get the pte for this virtual address.
3146 1.30.2.2 thorpej */
3147 1.30.2.5 nathanw pde = pmap_pde(pmap, va);
3148 1.30.2.2 thorpej ptes = pmap_map_ptes(pmap);
3149 1.30.2.2 thorpej pte = &ptes[arm_byte_to_page(va)];
3150 1.30.2.2 thorpej
3151 1.30.2.5 nathanw if (pmap_pde_section(pde)) {
3152 1.30.2.5 nathanw pa = (*pde & PD_MASK) | (va & (L1_SEC_SIZE - 1));
3153 1.30.2.5 nathanw goto out;
3154 1.30.2.5 nathanw } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
3155 1.30.2.5 nathanw rv = FALSE;
3156 1.30.2.5 nathanw goto out;
3157 1.30.2.2 thorpej }
3158 1.30.2.2 thorpej
3159 1.30.2.5 nathanw if ((*pte & L2_MASK) == L2_LPAGE) {
3160 1.30.2.2 thorpej /* Extract the physical address from the pte */
3161 1.30.2.5 nathanw pa = *pte & ~(L2_LPAGE_SIZE - 1);
3162 1.30.2.2 thorpej
3163 1.30.2.2 thorpej PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
3164 1.30.2.2 thorpej (pa | (va & (L2_LPAGE_SIZE - 1)))));
3165 1.30.2.2 thorpej
3166 1.30.2.2 thorpej if (pap != NULL)
3167 1.30.2.2 thorpej *pap = pa | (va & (L2_LPAGE_SIZE - 1));
3168 1.30.2.5 nathanw goto out;
3169 1.30.2.5 nathanw }
3170 1.30.2.2 thorpej
3171 1.30.2.5 nathanw /* Extract the physical address from the pte */
3172 1.30.2.5 nathanw pa = pmap_pte_pa(pte);
3173 1.30.2.2 thorpej
3174 1.30.2.5 nathanw PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
3175 1.30.2.5 nathanw (pa | (va & ~PG_FRAME))));
3176 1.30.2.5 nathanw
3177 1.30.2.5 nathanw if (pap != NULL)
3178 1.30.2.5 nathanw *pap = pa | (va & ~PG_FRAME);
3179 1.30.2.5 nathanw out:
3180 1.30.2.2 thorpej pmap_unmap_ptes(pmap);
3181 1.30.2.5 nathanw return (rv);
3182 1.30.2.2 thorpej }
3183 1.30.2.2 thorpej
3184 1.30.2.2 thorpej
3185 1.30.2.2 thorpej /*
3186 1.30.2.2 thorpej * Copy the range specified by src_addr/len from the source map to the
3187 1.30.2.2 thorpej * range dst_addr/len in the destination map.
3188 1.30.2.2 thorpej *
3189 1.30.2.2 thorpej * This routine is only advisory and need not do anything.
3190 1.30.2.2 thorpej */
3191 1.30.2.2 thorpej
3192 1.30.2.2 thorpej void
3193 1.30.2.2 thorpej pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
3194 1.30.2.2 thorpej struct pmap *dst_pmap;
3195 1.30.2.2 thorpej struct pmap *src_pmap;
3196 1.30.2.2 thorpej vaddr_t dst_addr;
3197 1.30.2.2 thorpej vsize_t len;
3198 1.30.2.2 thorpej vaddr_t src_addr;
3199 1.30.2.2 thorpej {
3200 1.30.2.2 thorpej PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
3201 1.30.2.2 thorpej dst_pmap, src_pmap, dst_addr, len, src_addr));
3202 1.30.2.2 thorpej }
3203 1.30.2.2 thorpej
3204 1.30.2.2 thorpej #if defined(PMAP_DEBUG)
3205 1.30.2.2 thorpej void
3206 1.30.2.2 thorpej pmap_dump_pvlist(phys, m)
3207 1.30.2.2 thorpej vaddr_t phys;
3208 1.30.2.2 thorpej char *m;
3209 1.30.2.2 thorpej {
3210 1.30.2.2 thorpej struct pv_head *pvh;
3211 1.30.2.2 thorpej struct pv_entry *pv;
3212 1.30.2.2 thorpej int bank, off;
3213 1.30.2.2 thorpej
3214 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
3215 1.30.2.2 thorpej printf("INVALID PA\n");
3216 1.30.2.2 thorpej return;
3217 1.30.2.2 thorpej }
3218 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
3219 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
3220 1.30.2.2 thorpej printf("%s %08lx:", m, phys);
3221 1.30.2.2 thorpej if (pvh->pvh_list == NULL) {
3222 1.30.2.2 thorpej printf(" no mappings\n");
3223 1.30.2.2 thorpej return;
3224 1.30.2.2 thorpej }
3225 1.30.2.2 thorpej
3226 1.30.2.2 thorpej for (pv = pvh->pvh_list; pv; pv = pv->pv_next)
3227 1.30.2.2 thorpej printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3228 1.30.2.2 thorpej pv->pv_va, pv->pv_flags);
3229 1.30.2.2 thorpej
3230 1.30.2.2 thorpej printf("\n");
3231 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
3232 1.30.2.2 thorpej }
3233 1.30.2.2 thorpej
3234 1.30.2.2 thorpej #endif /* PMAP_DEBUG */
3235 1.30.2.2 thorpej
3236 1.30.2.2 thorpej __inline static boolean_t
3237 1.30.2.2 thorpej pmap_testbit(pa, setbits)
3238 1.30.2.2 thorpej paddr_t pa;
3239 1.30.2.2 thorpej unsigned int setbits;
3240 1.30.2.2 thorpej {
3241 1.30.2.2 thorpej int bank, off;
3242 1.30.2.2 thorpej
3243 1.30.2.2 thorpej PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
3244 1.30.2.2 thorpej
3245 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
3246 1.30.2.2 thorpej return(FALSE);
3247 1.30.2.2 thorpej
3248 1.30.2.2 thorpej /*
3249 1.30.2.2 thorpej * Check saved info only
3250 1.30.2.2 thorpej */
3251 1.30.2.2 thorpej if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
3252 1.30.2.2 thorpej PDEBUG(0, printf("pmap_attributes = %02x\n",
3253 1.30.2.2 thorpej vm_physmem[bank].pmseg.attrs[off]));
3254 1.30.2.2 thorpej return(TRUE);
3255 1.30.2.2 thorpej }
3256 1.30.2.2 thorpej
3257 1.30.2.2 thorpej return(FALSE);
3258 1.30.2.2 thorpej }
3259 1.30.2.2 thorpej
3260 1.30.2.2 thorpej static pt_entry_t *
3261 1.30.2.2 thorpej pmap_map_ptes(struct pmap *pmap)
3262 1.30.2.2 thorpej {
3263 1.30.2.2 thorpej struct lwp *l;
3264 1.30.2.2 thorpej struct proc *p;
3265 1.30.2.2 thorpej
3266 1.30.2.2 thorpej /* the kernel's pmap is always accessible */
3267 1.30.2.2 thorpej if (pmap == pmap_kernel()) {
3268 1.30.2.2 thorpej return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
3269 1.30.2.2 thorpej }
3270 1.30.2.2 thorpej
3271 1.30.2.2 thorpej if (pmap_is_curpmap(pmap)) {
3272 1.30.2.2 thorpej simple_lock(&pmap->pm_obj.vmobjlock);
3273 1.30.2.2 thorpej return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
3274 1.30.2.2 thorpej }
3275 1.30.2.2 thorpej
3276 1.30.2.2 thorpej l = curproc;
3277 1.30.2.2 thorpej if (l == NULL)
3278 1.30.2.2 thorpej l = &lwp0;
3279 1.30.2.2 thorpej p = l->l_proc;
3280 1.30.2.2 thorpej
3281 1.30.2.2 thorpej /* need to lock both curpmap and pmap: use ordered locking */
3282 1.30.2.2 thorpej if ((unsigned) pmap < (unsigned) p->p_vmspace->vm_map.pmap) {
3283 1.30.2.2 thorpej simple_lock(&pmap->pm_obj.vmobjlock);
3284 1.30.2.2 thorpej simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3285 1.30.2.2 thorpej } else {
3286 1.30.2.2 thorpej simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3287 1.30.2.2 thorpej simple_lock(&pmap->pm_obj.vmobjlock);
3288 1.30.2.2 thorpej }
3289 1.30.2.2 thorpej
3290 1.30.2.2 thorpej pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
3291 1.30.2.2 thorpej pmap->pm_pptpt, FALSE);
3292 1.30.2.2 thorpej cpu_tlb_flushD();
3293 1.30.2.3 nathanw cpu_cpwait();
3294 1.30.2.2 thorpej return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
3295 1.30.2.2 thorpej }
3296 1.30.2.2 thorpej
3297 1.30.2.2 thorpej /*
3298 1.30.2.2 thorpej * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3299 1.30.2.2 thorpej */
3300 1.30.2.2 thorpej
3301 1.30.2.2 thorpej static void
3302 1.30.2.2 thorpej pmap_unmap_ptes(pmap)
3303 1.30.2.2 thorpej struct pmap *pmap;
3304 1.30.2.2 thorpej {
3305 1.30.2.2 thorpej struct lwp *l;
3306 1.30.2.2 thorpej struct proc *p;
3307 1.30.2.2 thorpej
3308 1.30.2.2 thorpej if (pmap == pmap_kernel()) {
3309 1.30.2.2 thorpej return;
3310 1.30.2.2 thorpej }
3311 1.30.2.2 thorpej if (pmap_is_curpmap(pmap)) {
3312 1.30.2.2 thorpej simple_unlock(&pmap->pm_obj.vmobjlock);
3313 1.30.2.2 thorpej } else {
3314 1.30.2.2 thorpej l = curproc;
3315 1.30.2.2 thorpej if (l == NULL)
3316 1.30.2.2 thorpej l = &lwp0;
3317 1.30.2.2 thorpej p = l->l_proc;
3318 1.30.2.2 thorpej
3319 1.30.2.2 thorpej simple_unlock(&pmap->pm_obj.vmobjlock);
3320 1.30.2.2 thorpej simple_unlock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3321 1.30.2.2 thorpej }
3322 1.30.2.2 thorpej }
3323 1.30.2.2 thorpej
3324 1.30.2.2 thorpej /*
3325 1.30.2.2 thorpej * Modify pte bits for all ptes corresponding to the given physical address.
3326 1.30.2.2 thorpej * We use `maskbits' rather than `clearbits' because we're always passing
3327 1.30.2.2 thorpej * constants and the latter would require an extra inversion at run-time.
3328 1.30.2.2 thorpej */
3329 1.30.2.2 thorpej
3330 1.30.2.2 thorpej static void
3331 1.30.2.2 thorpej pmap_clearbit(pa, maskbits)
3332 1.30.2.2 thorpej paddr_t pa;
3333 1.30.2.2 thorpej unsigned int maskbits;
3334 1.30.2.2 thorpej {
3335 1.30.2.2 thorpej struct pv_entry *pv;
3336 1.30.2.2 thorpej struct pv_head *pvh;
3337 1.30.2.2 thorpej pt_entry_t *pte;
3338 1.30.2.2 thorpej vaddr_t va;
3339 1.30.2.2 thorpej int bank, off, tlbentry;
3340 1.30.2.2 thorpej
3341 1.30.2.2 thorpej PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
3342 1.30.2.2 thorpej pa, maskbits));
3343 1.30.2.2 thorpej
3344 1.30.2.2 thorpej tlbentry = 0;
3345 1.30.2.2 thorpej
3346 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
3347 1.30.2.2 thorpej return;
3348 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_LOCK();
3349 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
3350 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
3351 1.30.2.2 thorpej
3352 1.30.2.2 thorpej /*
3353 1.30.2.2 thorpej * Clear saved attributes (modify, reference)
3354 1.30.2.2 thorpej */
3355 1.30.2.2 thorpej vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
3356 1.30.2.2 thorpej
3357 1.30.2.2 thorpej if (pvh->pvh_list == NULL) {
3358 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
3359 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
3360 1.30.2.2 thorpej return;
3361 1.30.2.2 thorpej }
3362 1.30.2.2 thorpej
3363 1.30.2.2 thorpej /*
3364 1.30.2.2 thorpej * Loop over all current mappings setting/clearing as appropos
3365 1.30.2.2 thorpej */
3366 1.30.2.2 thorpej for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
3367 1.30.2.2 thorpej va = pv->pv_va;
3368 1.30.2.2 thorpej pv->pv_flags &= ~maskbits;
3369 1.30.2.2 thorpej pte = pmap_pte(pv->pv_pmap, va);
3370 1.30.2.2 thorpej KASSERT(pte != NULL);
3371 1.30.2.2 thorpej if (maskbits & (PT_Wr|PT_M)) {
3372 1.30.2.2 thorpej if ((pv->pv_flags & PT_NC)) {
3373 1.30.2.2 thorpej /*
3374 1.30.2.2 thorpej * Entry is not cacheable: reenable
3375 1.30.2.2 thorpej * the cache, nothing to flush
3376 1.30.2.2 thorpej *
3377 1.30.2.2 thorpej * Don't turn caching on again if this
3378 1.30.2.2 thorpej * is a modified emulation. This
3379 1.30.2.2 thorpej * would be inconsitent with the
3380 1.30.2.2 thorpej * settings created by
3381 1.30.2.2 thorpej * pmap_vac_me_harder().
3382 1.30.2.2 thorpej *
3383 1.30.2.2 thorpej * There's no need to call
3384 1.30.2.2 thorpej * pmap_vac_me_harder() here: all
3385 1.30.2.2 thorpej * pages are loosing their write
3386 1.30.2.2 thorpej * permission.
3387 1.30.2.2 thorpej *
3388 1.30.2.2 thorpej */
3389 1.30.2.2 thorpej if (maskbits & PT_Wr) {
3390 1.30.2.2 thorpej *pte |= pte_cache_mode;
3391 1.30.2.2 thorpej pv->pv_flags &= ~PT_NC;
3392 1.30.2.2 thorpej }
3393 1.30.2.2 thorpej } else if (pmap_is_curpmap(pv->pv_pmap))
3394 1.30.2.2 thorpej /*
3395 1.30.2.2 thorpej * Entry is cacheable: check if pmap is
3396 1.30.2.2 thorpej * current if it is flush it,
3397 1.30.2.2 thorpej * otherwise it won't be in the cache
3398 1.30.2.2 thorpej */
3399 1.30.2.5 nathanw cpu_idcache_wbinv_range(pv->pv_va, NBPG);
3400 1.30.2.2 thorpej
3401 1.30.2.2 thorpej /* make the pte read only */
3402 1.30.2.2 thorpej *pte &= ~PT_AP(AP_W);
3403 1.30.2.2 thorpej }
3404 1.30.2.2 thorpej
3405 1.30.2.2 thorpej if (maskbits & PT_H)
3406 1.30.2.2 thorpej *pte = (*pte & ~L2_MASK) | L2_INVAL;
3407 1.30.2.2 thorpej
3408 1.30.2.2 thorpej if (pmap_is_curpmap(pv->pv_pmap))
3409 1.30.2.2 thorpej /*
3410 1.30.2.2 thorpej * if we had cacheable pte's we'd clean the
3411 1.30.2.2 thorpej * pte out to memory here
3412 1.30.2.2 thorpej *
3413 1.30.2.2 thorpej * flush tlb entry as it's in the current pmap
3414 1.30.2.2 thorpej */
3415 1.30.2.2 thorpej cpu_tlb_flushID_SE(pv->pv_va);
3416 1.30.2.2 thorpej }
3417 1.30.2.3 nathanw cpu_cpwait();
3418 1.30.2.2 thorpej
3419 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
3420 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
3421 1.30.2.2 thorpej }
3422 1.30.2.2 thorpej
3423 1.30.2.2 thorpej
3424 1.30.2.2 thorpej boolean_t
3425 1.30.2.2 thorpej pmap_clear_modify(pg)
3426 1.30.2.2 thorpej struct vm_page *pg;
3427 1.30.2.2 thorpej {
3428 1.30.2.2 thorpej paddr_t pa = VM_PAGE_TO_PHYS(pg);
3429 1.30.2.2 thorpej boolean_t rv;
3430 1.30.2.2 thorpej
3431 1.30.2.2 thorpej PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
3432 1.30.2.2 thorpej rv = pmap_testbit(pa, PT_M);
3433 1.30.2.2 thorpej pmap_clearbit(pa, PT_M);
3434 1.30.2.2 thorpej return rv;
3435 1.30.2.2 thorpej }
3436 1.30.2.2 thorpej
3437 1.30.2.2 thorpej
3438 1.30.2.2 thorpej boolean_t
3439 1.30.2.2 thorpej pmap_clear_reference(pg)
3440 1.30.2.2 thorpej struct vm_page *pg;
3441 1.30.2.2 thorpej {
3442 1.30.2.2 thorpej paddr_t pa = VM_PAGE_TO_PHYS(pg);
3443 1.30.2.2 thorpej boolean_t rv;
3444 1.30.2.2 thorpej
3445 1.30.2.2 thorpej PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
3446 1.30.2.2 thorpej rv = pmap_testbit(pa, PT_H);
3447 1.30.2.2 thorpej pmap_clearbit(pa, PT_H);
3448 1.30.2.2 thorpej return rv;
3449 1.30.2.2 thorpej }
3450 1.30.2.2 thorpej
3451 1.30.2.2 thorpej
3452 1.30.2.2 thorpej void
3453 1.30.2.2 thorpej pmap_copy_on_write(pa)
3454 1.30.2.2 thorpej paddr_t pa;
3455 1.30.2.2 thorpej {
3456 1.30.2.2 thorpej PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
3457 1.30.2.2 thorpej pmap_clearbit(pa, PT_Wr);
3458 1.30.2.2 thorpej }
3459 1.30.2.2 thorpej
3460 1.30.2.2 thorpej
3461 1.30.2.2 thorpej boolean_t
3462 1.30.2.2 thorpej pmap_is_modified(pg)
3463 1.30.2.2 thorpej struct vm_page *pg;
3464 1.30.2.2 thorpej {
3465 1.30.2.2 thorpej paddr_t pa = VM_PAGE_TO_PHYS(pg);
3466 1.30.2.2 thorpej boolean_t result;
3467 1.30.2.2 thorpej
3468 1.30.2.2 thorpej result = pmap_testbit(pa, PT_M);
3469 1.30.2.2 thorpej PDEBUG(1, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
3470 1.30.2.2 thorpej return (result);
3471 1.30.2.2 thorpej }
3472 1.30.2.2 thorpej
3473 1.30.2.2 thorpej
3474 1.30.2.2 thorpej boolean_t
3475 1.30.2.2 thorpej pmap_is_referenced(pg)
3476 1.30.2.2 thorpej struct vm_page *pg;
3477 1.30.2.2 thorpej {
3478 1.30.2.2 thorpej paddr_t pa = VM_PAGE_TO_PHYS(pg);
3479 1.30.2.2 thorpej boolean_t result;
3480 1.30.2.2 thorpej
3481 1.30.2.2 thorpej result = pmap_testbit(pa, PT_H);
3482 1.30.2.2 thorpej PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
3483 1.30.2.2 thorpej return (result);
3484 1.30.2.2 thorpej }
3485 1.30.2.2 thorpej
3486 1.30.2.2 thorpej
3487 1.30.2.2 thorpej int
3488 1.30.2.2 thorpej pmap_modified_emulation(pmap, va)
3489 1.30.2.2 thorpej struct pmap *pmap;
3490 1.30.2.2 thorpej vaddr_t va;
3491 1.30.2.2 thorpej {
3492 1.30.2.2 thorpej pt_entry_t *pte;
3493 1.30.2.2 thorpej paddr_t pa;
3494 1.30.2.2 thorpej int bank, off;
3495 1.30.2.2 thorpej struct pv_head *pvh;
3496 1.30.2.2 thorpej u_int flags;
3497 1.30.2.2 thorpej
3498 1.30.2.2 thorpej PDEBUG(2, printf("pmap_modified_emulation\n"));
3499 1.30.2.2 thorpej
3500 1.30.2.2 thorpej /* Get the pte */
3501 1.30.2.2 thorpej pte = pmap_pte(pmap, va);
3502 1.30.2.2 thorpej if (!pte) {
3503 1.30.2.2 thorpej PDEBUG(2, printf("no pte\n"));
3504 1.30.2.2 thorpej return(0);
3505 1.30.2.2 thorpej }
3506 1.30.2.2 thorpej
3507 1.30.2.2 thorpej PDEBUG(1, printf("*pte=%08x\n", *pte));
3508 1.30.2.2 thorpej
3509 1.30.2.2 thorpej /* Check for a zero pte */
3510 1.30.2.2 thorpej if (*pte == 0)
3511 1.30.2.2 thorpej return(0);
3512 1.30.2.2 thorpej
3513 1.30.2.2 thorpej /* This can happen if user code tries to access kernel memory. */
3514 1.30.2.2 thorpej if ((*pte & PT_AP(AP_W)) != 0)
3515 1.30.2.2 thorpej return (0);
3516 1.30.2.2 thorpej
3517 1.30.2.2 thorpej /* Extract the physical address of the page */
3518 1.30.2.2 thorpej pa = pmap_pte_pa(pte);
3519 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
3520 1.30.2.2 thorpej return(0);
3521 1.30.2.2 thorpej
3522 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_LOCK();
3523 1.30.2.2 thorpej /* Get the current flags for this page. */
3524 1.30.2.2 thorpej pvh = &vm_physmem[bank].pmseg.pvhead[off];
3525 1.30.2.2 thorpej /* XXX: needed if we hold head->map lock? */
3526 1.30.2.2 thorpej simple_lock(&pvh->pvh_lock);
3527 1.30.2.2 thorpej
3528 1.30.2.2 thorpej flags = pmap_modify_pv(pmap, va, pvh, 0, 0);
3529 1.30.2.2 thorpej PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3530 1.30.2.2 thorpej
3531 1.30.2.2 thorpej /*
3532 1.30.2.2 thorpej * Do the flags say this page is writable ? If not then it is a
3533 1.30.2.2 thorpej * genuine write fault. If yes then the write fault is our fault
3534 1.30.2.2 thorpej * as we did not reflect the write access in the PTE. Now we know
3535 1.30.2.2 thorpej * a write has occurred we can correct this and also set the
3536 1.30.2.2 thorpej * modified bit
3537 1.30.2.2 thorpej */
3538 1.30.2.2 thorpej if (~flags & PT_Wr) {
3539 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
3540 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
3541 1.30.2.2 thorpej return(0);
3542 1.30.2.2 thorpej }
3543 1.30.2.2 thorpej
3544 1.30.2.2 thorpej PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
3545 1.30.2.2 thorpej va, pte, *pte));
3546 1.30.2.2 thorpej vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
3547 1.30.2.2 thorpej
3548 1.30.2.2 thorpej /*
3549 1.30.2.2 thorpej * Re-enable write permissions for the page. No need to call
3550 1.30.2.2 thorpej * pmap_vac_me_harder(), since this is just a
3551 1.30.2.2 thorpej * modified-emulation fault, and the PT_Wr bit isn't changing. We've
3552 1.30.2.2 thorpej * already set the cacheable bits based on the assumption that we
3553 1.30.2.2 thorpej * can write to this page.
3554 1.30.2.2 thorpej */
3555 1.30.2.2 thorpej *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
3556 1.30.2.2 thorpej PDEBUG(0, printf("->(%08x)\n", *pte));
3557 1.30.2.2 thorpej
3558 1.30.2.2 thorpej simple_unlock(&pvh->pvh_lock);
3559 1.30.2.2 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
3560 1.30.2.2 thorpej /* Return, indicating the problem has been dealt with */
3561 1.30.2.2 thorpej cpu_tlb_flushID_SE(va);
3562 1.30.2.3 nathanw cpu_cpwait();
3563 1.30.2.2 thorpej return(1);
3564 1.30.2.2 thorpej }
3565 1.30.2.2 thorpej
3566 1.30.2.2 thorpej
3567 1.30.2.2 thorpej int
3568 1.30.2.2 thorpej pmap_handled_emulation(pmap, va)
3569 1.30.2.2 thorpej struct pmap *pmap;
3570 1.30.2.2 thorpej vaddr_t va;
3571 1.30.2.2 thorpej {
3572 1.30.2.2 thorpej pt_entry_t *pte;
3573 1.30.2.2 thorpej paddr_t pa;
3574 1.30.2.2 thorpej int bank, off;
3575 1.30.2.2 thorpej
3576 1.30.2.2 thorpej PDEBUG(2, printf("pmap_handled_emulation\n"));
3577 1.30.2.2 thorpej
3578 1.30.2.2 thorpej /* Get the pte */
3579 1.30.2.2 thorpej pte = pmap_pte(pmap, va);
3580 1.30.2.2 thorpej if (!pte) {
3581 1.30.2.2 thorpej PDEBUG(2, printf("no pte\n"));
3582 1.30.2.2 thorpej return(0);
3583 1.30.2.2 thorpej }
3584 1.30.2.2 thorpej
3585 1.30.2.2 thorpej PDEBUG(1, printf("*pte=%08x\n", *pte));
3586 1.30.2.2 thorpej
3587 1.30.2.2 thorpej /* Check for a zero pte */
3588 1.30.2.2 thorpej if (*pte == 0)
3589 1.30.2.2 thorpej return(0);
3590 1.30.2.2 thorpej
3591 1.30.2.2 thorpej /* This can happen if user code tries to access kernel memory. */
3592 1.30.2.2 thorpej if ((*pte & L2_MASK) != L2_INVAL)
3593 1.30.2.2 thorpej return (0);
3594 1.30.2.2 thorpej
3595 1.30.2.2 thorpej /* Extract the physical address of the page */
3596 1.30.2.2 thorpej pa = pmap_pte_pa(pte);
3597 1.30.2.2 thorpej if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
3598 1.30.2.2 thorpej return(0);
3599 1.30.2.2 thorpej
3600 1.30.2.2 thorpej /*
3601 1.30.2.2 thorpej * Ok we just enable the pte and mark the attibs as handled
3602 1.30.2.2 thorpej */
3603 1.30.2.2 thorpej PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
3604 1.30.2.2 thorpej va, pte, *pte));
3605 1.30.2.2 thorpej vm_physmem[bank].pmseg.attrs[off] |= PT_H;
3606 1.30.2.2 thorpej *pte = (*pte & ~L2_MASK) | L2_SPAGE;
3607 1.30.2.2 thorpej PDEBUG(0, printf("->(%08x)\n", *pte));
3608 1.30.2.2 thorpej
3609 1.30.2.2 thorpej /* Return, indicating the problem has been dealt with */
3610 1.30.2.2 thorpej cpu_tlb_flushID_SE(va);
3611 1.30.2.3 nathanw cpu_cpwait();
3612 1.30.2.2 thorpej return(1);
3613 1.30.2.2 thorpej }
3614 1.30.2.2 thorpej
3615 1.30.2.2 thorpej
3616 1.30.2.2 thorpej
3617 1.30.2.2 thorpej
3618 1.30.2.2 thorpej /*
3619 1.30.2.2 thorpej * pmap_collect: free resources held by a pmap
3620 1.30.2.2 thorpej *
3621 1.30.2.2 thorpej * => optional function.
3622 1.30.2.2 thorpej * => called when a process is swapped out to free memory.
3623 1.30.2.2 thorpej */
3624 1.30.2.2 thorpej
3625 1.30.2.2 thorpej void
3626 1.30.2.2 thorpej pmap_collect(pmap)
3627 1.30.2.2 thorpej struct pmap *pmap;
3628 1.30.2.2 thorpej {
3629 1.30.2.2 thorpej }
3630 1.30.2.2 thorpej
3631 1.30.2.2 thorpej /*
3632 1.30.2.2 thorpej * Routine: pmap_procwr
3633 1.30.2.2 thorpej *
3634 1.30.2.2 thorpej * Function:
3635 1.30.2.2 thorpej * Synchronize caches corresponding to [addr, addr+len) in p.
3636 1.30.2.2 thorpej *
3637 1.30.2.2 thorpej */
3638 1.30.2.2 thorpej void
3639 1.30.2.2 thorpej pmap_procwr(p, va, len)
3640 1.30.2.2 thorpej struct proc *p;
3641 1.30.2.2 thorpej vaddr_t va;
3642 1.30.2.2 thorpej int len;
3643 1.30.2.2 thorpej {
3644 1.30.2.2 thorpej /* We only need to do anything if it is the current process. */
3645 1.30.2.2 thorpej if (curproc != NULL && p == curproc->l_proc)
3646 1.30.2.5 nathanw cpu_icache_sync_range(va, len);
3647 1.30.2.2 thorpej }
3648 1.30.2.2 thorpej /*
3649 1.30.2.2 thorpej * PTP functions
3650 1.30.2.2 thorpej */
3651 1.30.2.2 thorpej
3652 1.30.2.2 thorpej /*
3653 1.30.2.2 thorpej * pmap_steal_ptp: Steal a PTP from somewhere else.
3654 1.30.2.2 thorpej *
3655 1.30.2.2 thorpej * This is just a placeholder, for now we never steal.
3656 1.30.2.2 thorpej */
3657 1.30.2.2 thorpej
3658 1.30.2.2 thorpej static struct vm_page *
3659 1.30.2.2 thorpej pmap_steal_ptp(struct pmap *pmap, vaddr_t va)
3660 1.30.2.2 thorpej {
3661 1.30.2.2 thorpej return (NULL);
3662 1.30.2.2 thorpej }
3663 1.30.2.2 thorpej
3664 1.30.2.2 thorpej /*
3665 1.30.2.2 thorpej * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3666 1.30.2.2 thorpej *
3667 1.30.2.2 thorpej * => pmap should NOT be pmap_kernel()
3668 1.30.2.2 thorpej * => pmap should be locked
3669 1.30.2.2 thorpej */
3670 1.30.2.2 thorpej
3671 1.30.2.2 thorpej static struct vm_page *
3672 1.30.2.2 thorpej pmap_get_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
3673 1.30.2.2 thorpej {
3674 1.30.2.2 thorpej struct vm_page *ptp;
3675 1.30.2.2 thorpej
3676 1.30.2.2 thorpej if (pmap_pde_page(pmap_pde(pmap, va))) {
3677 1.30.2.2 thorpej
3678 1.30.2.2 thorpej /* valid... check hint (saves us a PA->PG lookup) */
3679 1.30.2.2 thorpej #if 0
3680 1.30.2.2 thorpej if (pmap->pm_ptphint &&
3681 1.30.2.2 thorpej ((unsigned)pmap_pde(pmap, va) & PG_FRAME) ==
3682 1.30.2.2 thorpej VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3683 1.30.2.2 thorpej return (pmap->pm_ptphint);
3684 1.30.2.2 thorpej #endif
3685 1.30.2.2 thorpej ptp = uvm_pagelookup(&pmap->pm_obj, va);
3686 1.30.2.2 thorpej #ifdef DIAGNOSTIC
3687 1.30.2.2 thorpej if (ptp == NULL)
3688 1.30.2.2 thorpej panic("pmap_get_ptp: unmanaged user PTP");
3689 1.30.2.2 thorpej #endif
3690 1.30.2.2 thorpej // pmap->pm_ptphint = ptp;
3691 1.30.2.2 thorpej return(ptp);
3692 1.30.2.2 thorpej }
3693 1.30.2.2 thorpej
3694 1.30.2.2 thorpej /* allocate a new PTP (updates ptphint) */
3695 1.30.2.2 thorpej return(pmap_alloc_ptp(pmap, va, just_try));
3696 1.30.2.2 thorpej }
3697 1.30.2.2 thorpej
3698 1.30.2.2 thorpej /*
3699 1.30.2.2 thorpej * pmap_alloc_ptp: allocate a PTP for a PMAP
3700 1.30.2.2 thorpej *
3701 1.30.2.2 thorpej * => pmap should already be locked by caller
3702 1.30.2.2 thorpej * => we use the ptp's wire_count to count the number of active mappings
3703 1.30.2.2 thorpej * in the PTP (we start it at one to prevent any chance this PTP
3704 1.30.2.2 thorpej * will ever leak onto the active/inactive queues)
3705 1.30.2.2 thorpej */
3706 1.30.2.2 thorpej
3707 1.30.2.2 thorpej /*__inline */ static struct vm_page *
3708 1.30.2.2 thorpej pmap_alloc_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
3709 1.30.2.2 thorpej {
3710 1.30.2.2 thorpej struct vm_page *ptp;
3711 1.30.2.2 thorpej
3712 1.30.2.2 thorpej ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3713 1.30.2.2 thorpej UVM_PGA_USERESERVE|UVM_PGA_ZERO);
3714 1.30.2.2 thorpej if (ptp == NULL) {
3715 1.30.2.2 thorpej if (just_try)
3716 1.30.2.2 thorpej return (NULL);
3717 1.30.2.2 thorpej
3718 1.30.2.2 thorpej ptp = pmap_steal_ptp(pmap, va);
3719 1.30.2.2 thorpej
3720 1.30.2.2 thorpej if (ptp == NULL)
3721 1.30.2.2 thorpej return (NULL);
3722 1.30.2.2 thorpej /* Stole a page, zero it. */
3723 1.30.2.2 thorpej pmap_zero_page(VM_PAGE_TO_PHYS(ptp));
3724 1.30.2.2 thorpej }
3725 1.30.2.2 thorpej
3726 1.30.2.2 thorpej /* got one! */
3727 1.30.2.2 thorpej ptp->flags &= ~PG_BUSY; /* never busy */
3728 1.30.2.2 thorpej ptp->wire_count = 1; /* no mappings yet */
3729 1.30.2.2 thorpej pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
3730 1.30.2.2 thorpej pmap->pm_stats.resident_count++; /* count PTP as resident */
3731 1.30.2.2 thorpej // pmap->pm_ptphint = ptp;
3732 1.30.2.2 thorpej return (ptp);
3733 1.30.2.2 thorpej }
3734 1.30.2.2 thorpej
3735 1.30.2.5 nathanw /************************ Bootstrapping routines ****************************/
3736 1.30.2.5 nathanw
3737 1.30.2.5 nathanw /*
3738 1.30.2.5 nathanw * This list exists for the benefit of pmap_map_chunk(). It keeps track
3739 1.30.2.5 nathanw * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3740 1.30.2.5 nathanw * find them as necessary.
3741 1.30.2.5 nathanw *
3742 1.30.2.5 nathanw * Note that the data on this list is not valid after initarm() returns.
3743 1.30.2.5 nathanw */
3744 1.30.2.5 nathanw SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3745 1.30.2.5 nathanw
3746 1.30.2.5 nathanw static vaddr_t
3747 1.30.2.5 nathanw kernel_pt_lookup(paddr_t pa)
3748 1.30.2.5 nathanw {
3749 1.30.2.5 nathanw pv_addr_t *pv;
3750 1.30.2.5 nathanw
3751 1.30.2.5 nathanw SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3752 1.30.2.5 nathanw if (pv->pv_pa == pa)
3753 1.30.2.5 nathanw return (pv->pv_va);
3754 1.30.2.5 nathanw }
3755 1.30.2.5 nathanw return (0);
3756 1.30.2.5 nathanw }
3757 1.30.2.5 nathanw
3758 1.30.2.5 nathanw /*
3759 1.30.2.5 nathanw * pmap_map_section:
3760 1.30.2.5 nathanw *
3761 1.30.2.5 nathanw * Create a single section mapping.
3762 1.30.2.5 nathanw */
3763 1.30.2.5 nathanw void
3764 1.30.2.5 nathanw pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3765 1.30.2.5 nathanw {
3766 1.30.2.5 nathanw pd_entry_t *pde = (pd_entry_t *) l1pt;
3767 1.30.2.5 nathanw pd_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3768 1.30.2.5 nathanw pd_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
3769 1.30.2.5 nathanw
3770 1.30.2.5 nathanw KASSERT(((va | pa) & (L1_SEC_SIZE - 1)) == 0);
3771 1.30.2.5 nathanw
3772 1.30.2.5 nathanw pde[va >> PDSHIFT] = L1_SECPTE(pa & PD_MASK, ap, fl);
3773 1.30.2.5 nathanw }
3774 1.30.2.5 nathanw
3775 1.30.2.5 nathanw /*
3776 1.30.2.5 nathanw * pmap_map_entry:
3777 1.30.2.5 nathanw *
3778 1.30.2.5 nathanw * Create a single page mapping.
3779 1.30.2.5 nathanw */
3780 1.30.2.5 nathanw void
3781 1.30.2.5 nathanw pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3782 1.30.2.5 nathanw {
3783 1.30.2.5 nathanw pd_entry_t *pde = (pd_entry_t *) l1pt;
3784 1.30.2.5 nathanw pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3785 1.30.2.5 nathanw pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
3786 1.30.2.5 nathanw pt_entry_t *pte;
3787 1.30.2.5 nathanw
3788 1.30.2.5 nathanw KASSERT(((va | pa) & PGOFSET) == 0);
3789 1.30.2.5 nathanw
3790 1.30.2.5 nathanw if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
3791 1.30.2.5 nathanw panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3792 1.30.2.5 nathanw
3793 1.30.2.5 nathanw pte = (pt_entry_t *)
3794 1.30.2.5 nathanw kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
3795 1.30.2.5 nathanw if (pte == NULL)
3796 1.30.2.5 nathanw panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3797 1.30.2.5 nathanw
3798 1.30.2.5 nathanw pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
3799 1.30.2.5 nathanw }
3800 1.30.2.5 nathanw
3801 1.30.2.5 nathanw /*
3802 1.30.2.5 nathanw * pmap_link_l2pt:
3803 1.30.2.5 nathanw *
3804 1.30.2.5 nathanw * Link the L2 page table specified by "pa" into the L1
3805 1.30.2.5 nathanw * page table at the slot for "va".
3806 1.30.2.5 nathanw */
3807 1.30.2.5 nathanw void
3808 1.30.2.5 nathanw pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
3809 1.30.2.5 nathanw {
3810 1.30.2.5 nathanw pd_entry_t *pde = (pd_entry_t *) l1pt;
3811 1.30.2.5 nathanw u_int slot = va >> PDSHIFT;
3812 1.30.2.5 nathanw
3813 1.30.2.5 nathanw KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3814 1.30.2.5 nathanw
3815 1.30.2.5 nathanw pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
3816 1.30.2.5 nathanw pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
3817 1.30.2.5 nathanw pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
3818 1.30.2.5 nathanw pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
3819 1.30.2.5 nathanw
3820 1.30.2.5 nathanw SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
3821 1.30.2.5 nathanw }
3822 1.30.2.5 nathanw
3823 1.30.2.5 nathanw /*
3824 1.30.2.5 nathanw * pmap_map_chunk:
3825 1.30.2.5 nathanw *
3826 1.30.2.5 nathanw * Map a chunk of memory using the most efficient mappings
3827 1.30.2.5 nathanw * possible (section, large page, small page) into the
3828 1.30.2.5 nathanw * provided L1 and L2 tables at the specified virtual address.
3829 1.30.2.5 nathanw */
3830 1.30.2.5 nathanw vsize_t
3831 1.30.2.5 nathanw pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3832 1.30.2.5 nathanw int prot, int cache)
3833 1.30.2.5 nathanw {
3834 1.30.2.5 nathanw pd_entry_t *pde = (pd_entry_t *) l1pt;
3835 1.30.2.5 nathanw pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3836 1.30.2.5 nathanw pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
3837 1.30.2.5 nathanw pt_entry_t *pte;
3838 1.30.2.5 nathanw vsize_t resid;
3839 1.30.2.5 nathanw int i;
3840 1.30.2.5 nathanw
3841 1.30.2.5 nathanw resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3842 1.30.2.5 nathanw
3843 1.30.2.5 nathanw if (l1pt == 0)
3844 1.30.2.5 nathanw panic("pmap_map_chunk: no L1 table provided");
3845 1.30.2.5 nathanw
3846 1.30.2.5 nathanw #ifdef VERBOSE_INIT_ARM
3847 1.30.2.5 nathanw printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3848 1.30.2.5 nathanw "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3849 1.30.2.5 nathanw #endif
3850 1.30.2.5 nathanw
3851 1.30.2.5 nathanw size = resid;
3852 1.30.2.5 nathanw
3853 1.30.2.5 nathanw while (resid > 0) {
3854 1.30.2.5 nathanw /* See if we can use a section mapping. */
3855 1.30.2.5 nathanw if (((pa | va) & (L1_SEC_SIZE - 1)) == 0 &&
3856 1.30.2.5 nathanw resid >= L1_SEC_SIZE) {
3857 1.30.2.5 nathanw #ifdef VERBOSE_INIT_ARM
3858 1.30.2.5 nathanw printf("S");
3859 1.30.2.5 nathanw #endif
3860 1.30.2.5 nathanw pde[va >> PDSHIFT] = L1_SECPTE(pa, ap, fl);
3861 1.30.2.5 nathanw va += L1_SEC_SIZE;
3862 1.30.2.5 nathanw pa += L1_SEC_SIZE;
3863 1.30.2.5 nathanw resid -= L1_SEC_SIZE;
3864 1.30.2.5 nathanw continue;
3865 1.30.2.5 nathanw }
3866 1.30.2.5 nathanw
3867 1.30.2.5 nathanw /*
3868 1.30.2.5 nathanw * Ok, we're going to use an L2 table. Make sure
3869 1.30.2.5 nathanw * one is actually in the corresponding L1 slot
3870 1.30.2.5 nathanw * for the current VA.
3871 1.30.2.5 nathanw */
3872 1.30.2.5 nathanw if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
3873 1.30.2.5 nathanw panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3874 1.30.2.5 nathanw
3875 1.30.2.5 nathanw pte = (pt_entry_t *)
3876 1.30.2.5 nathanw kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
3877 1.30.2.5 nathanw if (pte == NULL)
3878 1.30.2.5 nathanw panic("pmap_map_chunk: can't find L2 table for VA"
3879 1.30.2.5 nathanw "0x%08lx", va);
3880 1.30.2.5 nathanw
3881 1.30.2.5 nathanw /* See if we can use a L2 large page mapping. */
3882 1.30.2.5 nathanw if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
3883 1.30.2.5 nathanw resid >= L2_LPAGE_SIZE) {
3884 1.30.2.5 nathanw #ifdef VERBOSE_INIT_ARM
3885 1.30.2.5 nathanw printf("L");
3886 1.30.2.5 nathanw #endif
3887 1.30.2.5 nathanw for (i = 0; i < 16; i++) {
3888 1.30.2.5 nathanw pte[((va >> PGSHIFT) & 0x3f0) + i] =
3889 1.30.2.5 nathanw L2_LPTE(pa, ap, fl);
3890 1.30.2.5 nathanw }
3891 1.30.2.5 nathanw va += L2_LPAGE_SIZE;
3892 1.30.2.5 nathanw pa += L2_LPAGE_SIZE;
3893 1.30.2.5 nathanw resid -= L2_LPAGE_SIZE;
3894 1.30.2.5 nathanw continue;
3895 1.30.2.5 nathanw }
3896 1.30.2.5 nathanw
3897 1.30.2.5 nathanw /* Use a small page mapping. */
3898 1.30.2.5 nathanw #ifdef VERBOSE_INIT_ARM
3899 1.30.2.5 nathanw printf("P");
3900 1.30.2.5 nathanw #endif
3901 1.30.2.5 nathanw pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa, ap, fl);
3902 1.30.2.5 nathanw va += NBPG;
3903 1.30.2.5 nathanw pa += NBPG;
3904 1.30.2.5 nathanw resid -= NBPG;
3905 1.30.2.5 nathanw }
3906 1.30.2.5 nathanw #ifdef VERBOSE_INIT_ARM
3907 1.30.2.5 nathanw printf("\n");
3908 1.30.2.5 nathanw #endif
3909 1.30.2.5 nathanw return (size);
3910 1.30.2.5 nathanw }
3911