pmap.c revision 1.116 1 1.116 jdolecek /* $NetBSD: pmap.c,v 1.116 2002/09/05 18:34:00 jdolecek Exp $ */
2 1.12 chris
3 1.12 chris /*
4 1.49 thorpej * Copyright (c) 2002 Wasabi Systems, Inc.
5 1.12 chris * Copyright (c) 2001 Richard Earnshaw
6 1.12 chris * Copyright (c) 2001 Christopher Gilbert
7 1.12 chris * All rights reserved.
8 1.12 chris *
9 1.12 chris * 1. Redistributions of source code must retain the above copyright
10 1.12 chris * notice, this list of conditions and the following disclaimer.
11 1.12 chris * 2. Redistributions in binary form must reproduce the above copyright
12 1.12 chris * notice, this list of conditions and the following disclaimer in the
13 1.12 chris * documentation and/or other materials provided with the distribution.
14 1.12 chris * 3. The name of the company nor the name of the author may be used to
15 1.12 chris * endorse or promote products derived from this software without specific
16 1.12 chris * prior written permission.
17 1.12 chris *
18 1.12 chris * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 1.12 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 1.12 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.12 chris * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22 1.12 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 1.12 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.12 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.12 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.12 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.12 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.12 chris * SUCH DAMAGE.
29 1.12 chris */
30 1.1 matt
31 1.1 matt /*-
32 1.1 matt * Copyright (c) 1999 The NetBSD Foundation, Inc.
33 1.1 matt * All rights reserved.
34 1.1 matt *
35 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
36 1.1 matt * by Charles M. Hannum.
37 1.1 matt *
38 1.1 matt * Redistribution and use in source and binary forms, with or without
39 1.1 matt * modification, are permitted provided that the following conditions
40 1.1 matt * are met:
41 1.1 matt * 1. Redistributions of source code must retain the above copyright
42 1.1 matt * notice, this list of conditions and the following disclaimer.
43 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
44 1.1 matt * notice, this list of conditions and the following disclaimer in the
45 1.1 matt * documentation and/or other materials provided with the distribution.
46 1.1 matt * 3. All advertising materials mentioning features or use of this software
47 1.1 matt * must display the following acknowledgement:
48 1.1 matt * This product includes software developed by the NetBSD
49 1.1 matt * Foundation, Inc. and its contributors.
50 1.1 matt * 4. Neither the name of The NetBSD Foundation nor the names of its
51 1.1 matt * contributors may be used to endorse or promote products derived
52 1.1 matt * from this software without specific prior written permission.
53 1.1 matt *
54 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
65 1.1 matt */
66 1.1 matt
67 1.1 matt /*
68 1.1 matt * Copyright (c) 1994-1998 Mark Brinicombe.
69 1.1 matt * Copyright (c) 1994 Brini.
70 1.1 matt * All rights reserved.
71 1.1 matt *
72 1.1 matt * This code is derived from software written for Brini by Mark Brinicombe
73 1.1 matt *
74 1.1 matt * Redistribution and use in source and binary forms, with or without
75 1.1 matt * modification, are permitted provided that the following conditions
76 1.1 matt * are met:
77 1.1 matt * 1. Redistributions of source code must retain the above copyright
78 1.1 matt * notice, this list of conditions and the following disclaimer.
79 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
80 1.1 matt * notice, this list of conditions and the following disclaimer in the
81 1.1 matt * documentation and/or other materials provided with the distribution.
82 1.1 matt * 3. All advertising materials mentioning features or use of this software
83 1.1 matt * must display the following acknowledgement:
84 1.1 matt * This product includes software developed by Mark Brinicombe.
85 1.1 matt * 4. The name of the author may not be used to endorse or promote products
86 1.1 matt * derived from this software without specific prior written permission.
87 1.1 matt *
88 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
89 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
90 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
91 1.1 matt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
92 1.1 matt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
93 1.1 matt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
94 1.1 matt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95 1.1 matt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96 1.1 matt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
97 1.1 matt *
98 1.1 matt * RiscBSD kernel project
99 1.1 matt *
100 1.1 matt * pmap.c
101 1.1 matt *
102 1.1 matt * Machine dependant vm stuff
103 1.1 matt *
104 1.1 matt * Created : 20/09/94
105 1.1 matt */
106 1.1 matt
107 1.1 matt /*
108 1.1 matt * Performance improvements, UVM changes, overhauls and part-rewrites
109 1.1 matt * were contributed by Neil A. Carson <neil (at) causality.com>.
110 1.1 matt */
111 1.1 matt
112 1.1 matt /*
113 1.1 matt * The dram block info is currently referenced from the bootconfig.
114 1.1 matt * This should be placed in a separate structure.
115 1.1 matt */
116 1.1 matt
117 1.1 matt /*
118 1.1 matt * Special compilation symbols
119 1.1 matt * PMAP_DEBUG - Build in pmap_debug_level code
120 1.1 matt */
121 1.1 matt
122 1.1 matt /* Include header files */
123 1.1 matt
124 1.1 matt #include "opt_pmap_debug.h"
125 1.1 matt #include "opt_ddb.h"
126 1.1 matt
127 1.1 matt #include <sys/types.h>
128 1.1 matt #include <sys/param.h>
129 1.1 matt #include <sys/kernel.h>
130 1.1 matt #include <sys/systm.h>
131 1.1 matt #include <sys/proc.h>
132 1.1 matt #include <sys/malloc.h>
133 1.1 matt #include <sys/user.h>
134 1.10 chris #include <sys/pool.h>
135 1.16 chris #include <sys/cdefs.h>
136 1.16 chris
137 1.1 matt #include <uvm/uvm.h>
138 1.1 matt
139 1.1 matt #include <machine/bootconfig.h>
140 1.1 matt #include <machine/bus.h>
141 1.1 matt #include <machine/pmap.h>
142 1.1 matt #include <machine/pcb.h>
143 1.1 matt #include <machine/param.h>
144 1.32 thorpej #include <arm/arm32/katelib.h>
145 1.16 chris
146 1.116 jdolecek __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.116 2002/09/05 18:34:00 jdolecek Exp $");
147 1.116 jdolecek
148 1.1 matt #ifdef PMAP_DEBUG
149 1.1 matt #define PDEBUG(_lev_,_stat_) \
150 1.1 matt if (pmap_debug_level >= (_lev_)) \
151 1.1 matt ((_stat_))
152 1.1 matt int pmap_debug_level = -2;
153 1.48 chris void pmap_dump_pvlist(vaddr_t phys, char *m);
154 1.17 chris
155 1.17 chris /*
156 1.17 chris * for switching to potentially finer grained debugging
157 1.17 chris */
158 1.17 chris #define PDB_FOLLOW 0x0001
159 1.17 chris #define PDB_INIT 0x0002
160 1.17 chris #define PDB_ENTER 0x0004
161 1.17 chris #define PDB_REMOVE 0x0008
162 1.17 chris #define PDB_CREATE 0x0010
163 1.17 chris #define PDB_PTPAGE 0x0020
164 1.48 chris #define PDB_GROWKERN 0x0040
165 1.17 chris #define PDB_BITS 0x0080
166 1.17 chris #define PDB_COLLECT 0x0100
167 1.17 chris #define PDB_PROTECT 0x0200
168 1.48 chris #define PDB_MAP_L1 0x0400
169 1.17 chris #define PDB_BOOTSTRAP 0x1000
170 1.17 chris #define PDB_PARANOIA 0x2000
171 1.17 chris #define PDB_WIRING 0x4000
172 1.17 chris #define PDB_PVDUMP 0x8000
173 1.17 chris
174 1.17 chris int debugmap = 0;
175 1.17 chris int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
176 1.17 chris #define NPDEBUG(_lev_,_stat_) \
177 1.17 chris if (pmapdebug & (_lev_)) \
178 1.17 chris ((_stat_))
179 1.17 chris
180 1.1 matt #else /* PMAP_DEBUG */
181 1.1 matt #define PDEBUG(_lev_,_stat_) /* Nothing */
182 1.48 chris #define NPDEBUG(_lev_,_stat_) /* Nothing */
183 1.1 matt #endif /* PMAP_DEBUG */
184 1.1 matt
185 1.1 matt struct pmap kernel_pmap_store;
186 1.1 matt
187 1.10 chris /*
188 1.48 chris * linked list of all non-kernel pmaps
189 1.48 chris */
190 1.48 chris
191 1.69 thorpej static LIST_HEAD(, pmap) pmaps;
192 1.48 chris
193 1.48 chris /*
194 1.10 chris * pool that pmap structures are allocated from
195 1.10 chris */
196 1.10 chris
197 1.10 chris struct pool pmap_pmap_pool;
198 1.10 chris
199 1.111 thorpej /*
200 1.111 thorpej * pool/cache that PT-PT's are allocated from
201 1.111 thorpej */
202 1.111 thorpej
203 1.111 thorpej struct pool pmap_ptpt_pool;
204 1.111 thorpej struct pool_cache pmap_ptpt_cache;
205 1.111 thorpej u_int pmap_ptpt_cache_generation;
206 1.111 thorpej
207 1.111 thorpej static void *pmap_ptpt_page_alloc(struct pool *, int);
208 1.111 thorpej static void pmap_ptpt_page_free(struct pool *, void *);
209 1.111 thorpej
210 1.111 thorpej struct pool_allocator pmap_ptpt_allocator = {
211 1.111 thorpej pmap_ptpt_page_alloc, pmap_ptpt_page_free,
212 1.111 thorpej };
213 1.111 thorpej
214 1.111 thorpej static int pmap_ptpt_ctor(void *, void *, int);
215 1.111 thorpej
216 1.54 thorpej static pt_entry_t *csrc_pte, *cdst_pte;
217 1.54 thorpej static vaddr_t csrcp, cdstp;
218 1.54 thorpej
219 1.1 matt char *memhook;
220 1.1 matt extern caddr_t msgbufaddr;
221 1.1 matt
222 1.1 matt boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
223 1.17 chris /*
224 1.17 chris * locking data structures
225 1.17 chris */
226 1.1 matt
227 1.17 chris static struct lock pmap_main_lock;
228 1.17 chris static struct simplelock pvalloc_lock;
229 1.48 chris static struct simplelock pmaps_lock;
230 1.17 chris #ifdef LOCKDEBUG
231 1.17 chris #define PMAP_MAP_TO_HEAD_LOCK() \
232 1.17 chris (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
233 1.17 chris #define PMAP_MAP_TO_HEAD_UNLOCK() \
234 1.17 chris (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
235 1.17 chris
236 1.17 chris #define PMAP_HEAD_TO_MAP_LOCK() \
237 1.17 chris (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
238 1.17 chris #define PMAP_HEAD_TO_MAP_UNLOCK() \
239 1.17 chris (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
240 1.17 chris #else
241 1.17 chris #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
242 1.17 chris #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
243 1.17 chris #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
244 1.17 chris #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
245 1.17 chris #endif /* LOCKDEBUG */
246 1.17 chris
247 1.17 chris /*
248 1.17 chris * pv_page management structures: locked by pvalloc_lock
249 1.17 chris */
250 1.1 matt
251 1.17 chris TAILQ_HEAD(pv_pagelist, pv_page);
252 1.17 chris static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
253 1.17 chris static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
254 1.17 chris static int pv_nfpvents; /* # of free pv entries */
255 1.17 chris static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
256 1.17 chris static vaddr_t pv_cachedva; /* cached VA for later use */
257 1.17 chris
258 1.17 chris #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
259 1.17 chris #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
260 1.17 chris /* high water mark */
261 1.17 chris
262 1.17 chris /*
263 1.17 chris * local prototypes
264 1.17 chris */
265 1.17 chris
266 1.17 chris static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
267 1.17 chris static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
268 1.17 chris #define ALLOCPV_NEED 0 /* need PV now */
269 1.17 chris #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
270 1.17 chris #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
271 1.17 chris static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
272 1.49 thorpej static void pmap_enter_pv __P((struct vm_page *,
273 1.17 chris struct pv_entry *, struct pmap *,
274 1.17 chris vaddr_t, struct vm_page *, int));
275 1.17 chris static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
276 1.17 chris static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
277 1.17 chris static void pmap_free_pv_doit __P((struct pv_entry *));
278 1.17 chris static void pmap_free_pvpage __P((void));
279 1.17 chris static boolean_t pmap_is_curpmap __P((struct pmap *));
280 1.49 thorpej static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
281 1.17 chris vaddr_t));
282 1.17 chris #define PMAP_REMOVE_ALL 0 /* remove all mappings */
283 1.17 chris #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
284 1.1 matt
285 1.49 thorpej static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
286 1.33 chris u_int, u_int));
287 1.33 chris
288 1.69 thorpej /*
289 1.69 thorpej * Structure that describes and L1 table.
290 1.69 thorpej */
291 1.69 thorpej struct l1pt {
292 1.69 thorpej SIMPLEQ_ENTRY(l1pt) pt_queue; /* Queue pointers */
293 1.69 thorpej struct pglist pt_plist; /* Allocated page list */
294 1.69 thorpej vaddr_t pt_va; /* Allocated virtual address */
295 1.69 thorpej int pt_flags; /* Flags */
296 1.69 thorpej };
297 1.69 thorpej #define PTFLAG_STATIC 0x01 /* Statically allocated */
298 1.69 thorpej #define PTFLAG_KPT 0x02 /* Kernel pt's are mapped */
299 1.69 thorpej #define PTFLAG_CLEAN 0x04 /* L1 is clean */
300 1.69 thorpej
301 1.33 chris static void pmap_free_l1pt __P((struct l1pt *));
302 1.33 chris static int pmap_allocpagedir __P((struct pmap *));
303 1.33 chris static int pmap_clean_page __P((struct pv_entry *, boolean_t));
304 1.49 thorpej static void pmap_remove_all __P((struct vm_page *));
305 1.33 chris
306 1.57 thorpej static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
307 1.57 thorpej static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t));
308 1.49 thorpej __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
309 1.17 chris
310 1.2 matt extern paddr_t physical_start;
311 1.2 matt extern paddr_t physical_end;
312 1.1 matt extern unsigned int free_pages;
313 1.1 matt extern int max_processes;
314 1.1 matt
315 1.54 thorpej vaddr_t virtual_avail;
316 1.1 matt vaddr_t virtual_end;
317 1.48 chris vaddr_t pmap_curmaxkvaddr;
318 1.1 matt
319 1.1 matt vaddr_t avail_start;
320 1.1 matt vaddr_t avail_end;
321 1.1 matt
322 1.1 matt extern pv_addr_t systempage;
323 1.1 matt
324 1.1 matt /* Variables used by the L1 page table queue code */
325 1.1 matt SIMPLEQ_HEAD(l1pt_queue, l1pt);
326 1.73 thorpej static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
327 1.73 thorpej static int l1pt_static_queue_count; /* items in the static l1 queue */
328 1.73 thorpej static int l1pt_static_create_count; /* static l1 items created */
329 1.73 thorpej static struct l1pt_queue l1pt_queue; /* head of our l1 queue */
330 1.73 thorpej static int l1pt_queue_count; /* items in the l1 queue */
331 1.73 thorpej static int l1pt_create_count; /* stat - L1's create count */
332 1.73 thorpej static int l1pt_reuse_count; /* stat - L1's reused count */
333 1.1 matt
334 1.1 matt /* Local function prototypes (not used outside this file) */
335 1.15 chris void pmap_pinit __P((struct pmap *));
336 1.15 chris void pmap_freepagedir __P((struct pmap *));
337 1.1 matt
338 1.1 matt /* Other function prototypes */
339 1.1 matt extern void bzero_page __P((vaddr_t));
340 1.1 matt extern void bcopy_page __P((vaddr_t, vaddr_t));
341 1.1 matt
342 1.1 matt struct l1pt *pmap_alloc_l1pt __P((void));
343 1.15 chris static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
344 1.113 thorpej vaddr_t l2pa, int));
345 1.1 matt
346 1.11 chris static pt_entry_t *pmap_map_ptes __P((struct pmap *));
347 1.17 chris static void pmap_unmap_ptes __P((struct pmap *));
348 1.11 chris
349 1.49 thorpej __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
350 1.25 rearnsha pt_entry_t *, boolean_t));
351 1.49 thorpej static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
352 1.25 rearnsha pt_entry_t *, boolean_t));
353 1.49 thorpej static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
354 1.25 rearnsha pt_entry_t *, boolean_t));
355 1.11 chris
356 1.17 chris /*
357 1.17 chris * real definition of pv_entry.
358 1.17 chris */
359 1.17 chris
360 1.17 chris struct pv_entry {
361 1.17 chris struct pv_entry *pv_next; /* next pv_entry */
362 1.17 chris struct pmap *pv_pmap; /* pmap where mapping lies */
363 1.17 chris vaddr_t pv_va; /* virtual address for mapping */
364 1.17 chris int pv_flags; /* flags */
365 1.17 chris struct vm_page *pv_ptp; /* vm_page for the ptp */
366 1.17 chris };
367 1.17 chris
368 1.17 chris /*
369 1.17 chris * pv_entrys are dynamically allocated in chunks from a single page.
370 1.17 chris * we keep track of how many pv_entrys are in use for each page and
371 1.17 chris * we can free pv_entry pages if needed. there is one lock for the
372 1.17 chris * entire allocation system.
373 1.17 chris */
374 1.17 chris
375 1.17 chris struct pv_page_info {
376 1.17 chris TAILQ_ENTRY(pv_page) pvpi_list;
377 1.17 chris struct pv_entry *pvpi_pvfree;
378 1.17 chris int pvpi_nfree;
379 1.17 chris };
380 1.17 chris
381 1.17 chris /*
382 1.17 chris * number of pv_entry's in a pv_page
383 1.17 chris * (note: won't work on systems where NPBG isn't a constant)
384 1.17 chris */
385 1.17 chris
386 1.17 chris #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
387 1.17 chris sizeof(struct pv_entry))
388 1.17 chris
389 1.17 chris /*
390 1.17 chris * a pv_page: where pv_entrys are allocated from
391 1.17 chris */
392 1.17 chris
393 1.17 chris struct pv_page {
394 1.17 chris struct pv_page_info pvinfo;
395 1.17 chris struct pv_entry pvents[PVE_PER_PVPAGE];
396 1.17 chris };
397 1.17 chris
398 1.1 matt #ifdef MYCROFT_HACK
399 1.1 matt int mycroft_hack = 0;
400 1.1 matt #endif
401 1.1 matt
402 1.1 matt /* Function to set the debug level of the pmap code */
403 1.1 matt
404 1.1 matt #ifdef PMAP_DEBUG
405 1.1 matt void
406 1.73 thorpej pmap_debug(int level)
407 1.1 matt {
408 1.1 matt pmap_debug_level = level;
409 1.1 matt printf("pmap_debug: level=%d\n", pmap_debug_level);
410 1.1 matt }
411 1.1 matt #endif /* PMAP_DEBUG */
412 1.1 matt
413 1.22 chris __inline static boolean_t
414 1.17 chris pmap_is_curpmap(struct pmap *pmap)
415 1.17 chris {
416 1.58 thorpej
417 1.58 thorpej if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
418 1.58 thorpej pmap == pmap_kernel())
419 1.58 thorpej return (TRUE);
420 1.58 thorpej
421 1.58 thorpej return (FALSE);
422 1.17 chris }
423 1.1 matt
424 1.1 matt /*
425 1.113 thorpej * PTE_SYNC_CURRENT:
426 1.113 thorpej *
427 1.113 thorpej * Make sure the pte is flushed to RAM. If the pmap is
428 1.113 thorpej * not the current pmap, then also evict the pte from
429 1.113 thorpej * any cache lines.
430 1.113 thorpej */
431 1.113 thorpej #define PTE_SYNC_CURRENT(pmap, pte) \
432 1.113 thorpej do { \
433 1.113 thorpej if (pmap_is_curpmap(pmap)) \
434 1.113 thorpej PTE_SYNC(pte); \
435 1.113 thorpej else \
436 1.113 thorpej PTE_FLUSH(pte); \
437 1.113 thorpej } while (/*CONSTCOND*/0)
438 1.113 thorpej
439 1.113 thorpej /*
440 1.113 thorpej * PTE_FLUSH_ALT:
441 1.113 thorpej *
442 1.113 thorpej * Make sure the pte is not in any cache lines. We expect
443 1.113 thorpej * this to be used only when a pte has not been modified.
444 1.113 thorpej */
445 1.113 thorpej #define PTE_FLUSH_ALT(pmap, pte) \
446 1.113 thorpej do { \
447 1.113 thorpej if (pmap_is_curpmap(pmap) == 0) \
448 1.113 thorpej PTE_FLUSH(pte); \
449 1.113 thorpej } while (/*CONSTCOND*/0)
450 1.113 thorpej
451 1.113 thorpej /*
452 1.17 chris * p v _ e n t r y f u n c t i o n s
453 1.17 chris */
454 1.17 chris
455 1.17 chris /*
456 1.17 chris * pv_entry allocation functions:
457 1.17 chris * the main pv_entry allocation functions are:
458 1.17 chris * pmap_alloc_pv: allocate a pv_entry structure
459 1.17 chris * pmap_free_pv: free one pv_entry
460 1.17 chris * pmap_free_pvs: free a list of pv_entrys
461 1.17 chris *
462 1.17 chris * the rest are helper functions
463 1.1 matt */
464 1.1 matt
465 1.1 matt /*
466 1.17 chris * pmap_alloc_pv: inline function to allocate a pv_entry structure
467 1.17 chris * => we lock pvalloc_lock
468 1.17 chris * => if we fail, we call out to pmap_alloc_pvpage
469 1.17 chris * => 3 modes:
470 1.17 chris * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it
471 1.17 chris * ALLOCPV_TRY = we want a pv_entry, but not enough to steal
472 1.17 chris * ALLOCPV_NONEED = we are trying to grow our free list, don't really need
473 1.17 chris * one now
474 1.17 chris *
475 1.17 chris * "try" is for optional functions like pmap_copy().
476 1.1 matt */
477 1.17 chris
478 1.17 chris __inline static struct pv_entry *
479 1.73 thorpej pmap_alloc_pv(struct pmap *pmap, int mode)
480 1.1 matt {
481 1.17 chris struct pv_page *pvpage;
482 1.17 chris struct pv_entry *pv;
483 1.17 chris
484 1.17 chris simple_lock(&pvalloc_lock);
485 1.17 chris
486 1.51 chris pvpage = TAILQ_FIRST(&pv_freepages);
487 1.51 chris
488 1.51 chris if (pvpage != NULL) {
489 1.17 chris pvpage->pvinfo.pvpi_nfree--;
490 1.17 chris if (pvpage->pvinfo.pvpi_nfree == 0) {
491 1.17 chris /* nothing left in this one? */
492 1.17 chris TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
493 1.17 chris }
494 1.17 chris pv = pvpage->pvinfo.pvpi_pvfree;
495 1.51 chris KASSERT(pv);
496 1.17 chris pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
497 1.17 chris pv_nfpvents--; /* took one from pool */
498 1.17 chris } else {
499 1.17 chris pv = NULL; /* need more of them */
500 1.17 chris }
501 1.17 chris
502 1.17 chris /*
503 1.17 chris * if below low water mark or we didn't get a pv_entry we try and
504 1.17 chris * create more pv_entrys ...
505 1.17 chris */
506 1.17 chris
507 1.17 chris if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
508 1.17 chris if (pv == NULL)
509 1.17 chris pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
510 1.17 chris mode : ALLOCPV_NEED);
511 1.17 chris else
512 1.17 chris (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
513 1.17 chris }
514 1.17 chris
515 1.17 chris simple_unlock(&pvalloc_lock);
516 1.17 chris return(pv);
517 1.17 chris }
518 1.17 chris
519 1.17 chris /*
520 1.17 chris * pmap_alloc_pvpage: maybe allocate a new pvpage
521 1.17 chris *
522 1.17 chris * if need_entry is false: try and allocate a new pv_page
523 1.17 chris * if need_entry is true: try and allocate a new pv_page and return a
524 1.17 chris * new pv_entry from it. if we are unable to allocate a pv_page
525 1.17 chris * we make a last ditch effort to steal a pv_page from some other
526 1.17 chris * mapping. if that fails, we panic...
527 1.17 chris *
528 1.17 chris * => we assume that the caller holds pvalloc_lock
529 1.17 chris */
530 1.17 chris
531 1.17 chris static struct pv_entry *
532 1.73 thorpej pmap_alloc_pvpage(struct pmap *pmap, int mode)
533 1.17 chris {
534 1.17 chris struct vm_page *pg;
535 1.17 chris struct pv_page *pvpage;
536 1.1 matt struct pv_entry *pv;
537 1.17 chris int s;
538 1.17 chris
539 1.17 chris /*
540 1.17 chris * if we need_entry and we've got unused pv_pages, allocate from there
541 1.17 chris */
542 1.17 chris
543 1.51 chris pvpage = TAILQ_FIRST(&pv_unusedpgs);
544 1.51 chris if (mode != ALLOCPV_NONEED && pvpage != NULL) {
545 1.17 chris
546 1.17 chris /* move it to pv_freepages list */
547 1.17 chris TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
548 1.17 chris TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
549 1.17 chris
550 1.17 chris /* allocate a pv_entry */
551 1.17 chris pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */
552 1.17 chris pv = pvpage->pvinfo.pvpi_pvfree;
553 1.51 chris KASSERT(pv);
554 1.17 chris pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
555 1.17 chris
556 1.17 chris pv_nfpvents--; /* took one from pool */
557 1.17 chris return(pv);
558 1.17 chris }
559 1.1 matt
560 1.1 matt /*
561 1.17 chris * see if we've got a cached unmapped VA that we can map a page in.
562 1.17 chris * if not, try to allocate one.
563 1.1 matt */
564 1.1 matt
565 1.23 chs
566 1.17 chris if (pv_cachedva == 0) {
567 1.23 chs s = splvm();
568 1.23 chs pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
569 1.17 chris PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
570 1.23 chs splx(s);
571 1.17 chris if (pv_cachedva == 0) {
572 1.17 chris return (NULL);
573 1.1 matt }
574 1.1 matt }
575 1.17 chris
576 1.23 chs pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
577 1.23 chs UVM_PGA_USERESERVE);
578 1.17 chris
579 1.17 chris if (pg == NULL)
580 1.17 chris return (NULL);
581 1.51 chris pg->flags &= ~PG_BUSY; /* never busy */
582 1.17 chris
583 1.17 chris /*
584 1.17 chris * add a mapping for our new pv_page and free its entrys (save one!)
585 1.17 chris *
586 1.17 chris * NOTE: If we are allocating a PV page for the kernel pmap, the
587 1.17 chris * pmap is already locked! (...but entering the mapping is safe...)
588 1.17 chris */
589 1.17 chris
590 1.51 chris pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
591 1.51 chris VM_PROT_READ|VM_PROT_WRITE);
592 1.19 chris pmap_update(pmap_kernel());
593 1.17 chris pvpage = (struct pv_page *) pv_cachedva;
594 1.17 chris pv_cachedva = 0;
595 1.17 chris return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
596 1.1 matt }
597 1.1 matt
598 1.1 matt /*
599 1.17 chris * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
600 1.17 chris *
601 1.17 chris * => caller must hold pvalloc_lock
602 1.17 chris * => if need_entry is true, we allocate and return one pv_entry
603 1.1 matt */
604 1.1 matt
605 1.17 chris static struct pv_entry *
606 1.73 thorpej pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
607 1.1 matt {
608 1.17 chris int tofree, lcv;
609 1.17 chris
610 1.17 chris /* do we need to return one? */
611 1.17 chris tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
612 1.1 matt
613 1.17 chris pvp->pvinfo.pvpi_pvfree = NULL;
614 1.17 chris pvp->pvinfo.pvpi_nfree = tofree;
615 1.17 chris for (lcv = 0 ; lcv < tofree ; lcv++) {
616 1.17 chris pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
617 1.17 chris pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
618 1.1 matt }
619 1.17 chris if (need_entry)
620 1.17 chris TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
621 1.17 chris else
622 1.17 chris TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
623 1.17 chris pv_nfpvents += tofree;
624 1.17 chris return((need_entry) ? &pvp->pvents[lcv] : NULL);
625 1.1 matt }
626 1.1 matt
627 1.17 chris /*
628 1.17 chris * pmap_free_pv_doit: actually free a pv_entry
629 1.17 chris *
630 1.17 chris * => do not call this directly! instead use either
631 1.17 chris * 1. pmap_free_pv ==> free a single pv_entry
632 1.17 chris * 2. pmap_free_pvs => free a list of pv_entrys
633 1.17 chris * => we must be holding pvalloc_lock
634 1.17 chris */
635 1.17 chris
636 1.17 chris __inline static void
637 1.73 thorpej pmap_free_pv_doit(struct pv_entry *pv)
638 1.1 matt {
639 1.17 chris struct pv_page *pvp;
640 1.1 matt
641 1.17 chris pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
642 1.17 chris pv_nfpvents++;
643 1.17 chris pvp->pvinfo.pvpi_nfree++;
644 1.1 matt
645 1.17 chris /* nfree == 1 => fully allocated page just became partly allocated */
646 1.17 chris if (pvp->pvinfo.pvpi_nfree == 1) {
647 1.17 chris TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
648 1.1 matt }
649 1.1 matt
650 1.17 chris /* free it */
651 1.17 chris pv->pv_next = pvp->pvinfo.pvpi_pvfree;
652 1.17 chris pvp->pvinfo.pvpi_pvfree = pv;
653 1.1 matt
654 1.17 chris /*
655 1.17 chris * are all pv_page's pv_entry's free? move it to unused queue.
656 1.17 chris */
657 1.1 matt
658 1.17 chris if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
659 1.17 chris TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
660 1.17 chris TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
661 1.1 matt }
662 1.1 matt }
663 1.1 matt
664 1.1 matt /*
665 1.17 chris * pmap_free_pv: free a single pv_entry
666 1.17 chris *
667 1.17 chris * => we gain the pvalloc_lock
668 1.1 matt */
669 1.1 matt
670 1.17 chris __inline static void
671 1.73 thorpej pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
672 1.1 matt {
673 1.17 chris simple_lock(&pvalloc_lock);
674 1.17 chris pmap_free_pv_doit(pv);
675 1.17 chris
676 1.17 chris /*
677 1.17 chris * Can't free the PV page if the PV entries were associated with
678 1.17 chris * the kernel pmap; the pmap is already locked.
679 1.17 chris */
680 1.51 chris if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
681 1.17 chris pmap != pmap_kernel())
682 1.17 chris pmap_free_pvpage();
683 1.17 chris
684 1.17 chris simple_unlock(&pvalloc_lock);
685 1.17 chris }
686 1.1 matt
687 1.17 chris /*
688 1.17 chris * pmap_free_pvs: free a list of pv_entrys
689 1.17 chris *
690 1.17 chris * => we gain the pvalloc_lock
691 1.17 chris */
692 1.1 matt
693 1.17 chris __inline static void
694 1.73 thorpej pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
695 1.17 chris {
696 1.17 chris struct pv_entry *nextpv;
697 1.1 matt
698 1.17 chris simple_lock(&pvalloc_lock);
699 1.1 matt
700 1.17 chris for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
701 1.17 chris nextpv = pvs->pv_next;
702 1.17 chris pmap_free_pv_doit(pvs);
703 1.1 matt }
704 1.1 matt
705 1.17 chris /*
706 1.17 chris * Can't free the PV page if the PV entries were associated with
707 1.17 chris * the kernel pmap; the pmap is already locked.
708 1.17 chris */
709 1.51 chris if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
710 1.17 chris pmap != pmap_kernel())
711 1.17 chris pmap_free_pvpage();
712 1.1 matt
713 1.17 chris simple_unlock(&pvalloc_lock);
714 1.1 matt }
715 1.1 matt
716 1.1 matt
717 1.1 matt /*
718 1.17 chris * pmap_free_pvpage: try and free an unused pv_page structure
719 1.17 chris *
720 1.17 chris * => assume caller is holding the pvalloc_lock and that
721 1.17 chris * there is a page on the pv_unusedpgs list
722 1.17 chris * => if we can't get a lock on the kmem_map we try again later
723 1.1 matt */
724 1.1 matt
725 1.17 chris static void
726 1.73 thorpej pmap_free_pvpage(void)
727 1.1 matt {
728 1.17 chris int s;
729 1.17 chris struct vm_map *map;
730 1.17 chris struct vm_map_entry *dead_entries;
731 1.17 chris struct pv_page *pvp;
732 1.17 chris
733 1.17 chris s = splvm(); /* protect kmem_map */
734 1.1 matt
735 1.51 chris pvp = TAILQ_FIRST(&pv_unusedpgs);
736 1.1 matt
737 1.1 matt /*
738 1.17 chris * note: watch out for pv_initpage which is allocated out of
739 1.17 chris * kernel_map rather than kmem_map.
740 1.1 matt */
741 1.17 chris if (pvp == pv_initpage)
742 1.17 chris map = kernel_map;
743 1.17 chris else
744 1.17 chris map = kmem_map;
745 1.17 chris if (vm_map_lock_try(map)) {
746 1.17 chris
747 1.17 chris /* remove pvp from pv_unusedpgs */
748 1.17 chris TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
749 1.17 chris
750 1.17 chris /* unmap the page */
751 1.17 chris dead_entries = NULL;
752 1.17 chris uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
753 1.17 chris &dead_entries);
754 1.17 chris vm_map_unlock(map);
755 1.17 chris
756 1.17 chris if (dead_entries != NULL)
757 1.17 chris uvm_unmap_detach(dead_entries, 0);
758 1.1 matt
759 1.17 chris pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
760 1.1 matt }
761 1.17 chris if (pvp == pv_initpage)
762 1.17 chris /* no more initpage, we've freed it */
763 1.17 chris pv_initpage = NULL;
764 1.1 matt
765 1.1 matt splx(s);
766 1.1 matt }
767 1.1 matt
768 1.1 matt /*
769 1.17 chris * main pv_entry manipulation functions:
770 1.49 thorpej * pmap_enter_pv: enter a mapping onto a vm_page list
771 1.49 thorpej * pmap_remove_pv: remove a mappiing from a vm_page list
772 1.17 chris *
773 1.17 chris * NOTE: pmap_enter_pv expects to lock the pvh itself
774 1.17 chris * pmap_remove_pv expects te caller to lock the pvh before calling
775 1.17 chris */
776 1.17 chris
777 1.17 chris /*
778 1.49 thorpej * pmap_enter_pv: enter a mapping onto a vm_page lst
779 1.17 chris *
780 1.17 chris * => caller should hold the proper lock on pmap_main_lock
781 1.17 chris * => caller should have pmap locked
782 1.49 thorpej * => we will gain the lock on the vm_page and allocate the new pv_entry
783 1.17 chris * => caller should adjust ptp's wire_count before calling
784 1.17 chris * => caller should not adjust pmap's wire_count
785 1.17 chris */
786 1.17 chris
787 1.17 chris __inline static void
788 1.73 thorpej pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
789 1.73 thorpej vaddr_t va, struct vm_page *ptp, int flags)
790 1.17 chris {
791 1.17 chris pve->pv_pmap = pmap;
792 1.17 chris pve->pv_va = va;
793 1.17 chris pve->pv_ptp = ptp; /* NULL for kernel pmap */
794 1.17 chris pve->pv_flags = flags;
795 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
796 1.49 thorpej pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
797 1.49 thorpej pg->mdpage.pvh_list = pve; /* ... locked list */
798 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
799 1.78 thorpej if (pve->pv_flags & PVF_WIRED)
800 1.17 chris ++pmap->pm_stats.wired_count;
801 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
802 1.105 thorpej {
803 1.105 thorpej int s = splhigh();
804 1.105 thorpej if (pve->pv_flags & PVF_WRITE)
805 1.105 thorpej pg->mdpage.rw_mappings++;
806 1.105 thorpej else
807 1.105 thorpej pg->mdpage.ro_mappings++;
808 1.105 thorpej if (pg->mdpage.rw_mappings != 0 &&
809 1.105 thorpej (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
810 1.105 thorpej printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
811 1.105 thorpej pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
812 1.105 thorpej pg->mdpage.krw_mappings);
813 1.105 thorpej }
814 1.105 thorpej splx(s);
815 1.105 thorpej }
816 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
817 1.17 chris }
818 1.17 chris
819 1.17 chris /*
820 1.17 chris * pmap_remove_pv: try to remove a mapping from a pv_list
821 1.17 chris *
822 1.17 chris * => caller should hold proper lock on pmap_main_lock
823 1.17 chris * => pmap should be locked
824 1.49 thorpej * => caller should hold lock on vm_page [so that attrs can be adjusted]
825 1.17 chris * => caller should adjust ptp's wire_count and free PTP if needed
826 1.17 chris * => caller should NOT adjust pmap's wire_count
827 1.17 chris * => we return the removed pve
828 1.17 chris */
829 1.17 chris
830 1.17 chris __inline static struct pv_entry *
831 1.73 thorpej pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
832 1.17 chris {
833 1.17 chris struct pv_entry *pve, **prevptr;
834 1.17 chris
835 1.49 thorpej prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
836 1.17 chris pve = *prevptr;
837 1.17 chris while (pve) {
838 1.17 chris if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
839 1.17 chris *prevptr = pve->pv_next; /* remove it! */
840 1.78 thorpej if (pve->pv_flags & PVF_WIRED)
841 1.17 chris --pmap->pm_stats.wired_count;
842 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
843 1.105 thorpej {
844 1.105 thorpej int s = splhigh();
845 1.105 thorpej if (pve->pv_flags & PVF_WRITE) {
846 1.105 thorpej KASSERT(pg->mdpage.rw_mappings != 0);
847 1.105 thorpej pg->mdpage.rw_mappings--;
848 1.105 thorpej } else {
849 1.105 thorpej KASSERT(pg->mdpage.ro_mappings != 0);
850 1.105 thorpej pg->mdpage.ro_mappings--;
851 1.105 thorpej }
852 1.105 thorpej splx(s);
853 1.105 thorpej }
854 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
855 1.17 chris break;
856 1.17 chris }
857 1.17 chris prevptr = &pve->pv_next; /* previous pointer */
858 1.17 chris pve = pve->pv_next; /* advance */
859 1.17 chris }
860 1.17 chris return(pve); /* return removed pve */
861 1.17 chris }
862 1.17 chris
863 1.17 chris /*
864 1.17 chris *
865 1.17 chris * pmap_modify_pv: Update pv flags
866 1.17 chris *
867 1.49 thorpej * => caller should hold lock on vm_page [so that attrs can be adjusted]
868 1.17 chris * => caller should NOT adjust pmap's wire_count
869 1.29 rearnsha * => caller must call pmap_vac_me_harder() if writable status of a page
870 1.29 rearnsha * may have changed.
871 1.17 chris * => we return the old flags
872 1.17 chris *
873 1.1 matt * Modify a physical-virtual mapping in the pv table
874 1.1 matt */
875 1.1 matt
876 1.73 thorpej static /* __inline */ u_int
877 1.73 thorpej pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
878 1.73 thorpej u_int bic_mask, u_int eor_mask)
879 1.1 matt {
880 1.1 matt struct pv_entry *npv;
881 1.1 matt u_int flags, oflags;
882 1.1 matt
883 1.1 matt /*
884 1.1 matt * There is at least one VA mapping this page.
885 1.1 matt */
886 1.1 matt
887 1.49 thorpej for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
888 1.1 matt if (pmap == npv->pv_pmap && va == npv->pv_va) {
889 1.1 matt oflags = npv->pv_flags;
890 1.1 matt npv->pv_flags = flags =
891 1.1 matt ((oflags & ~bic_mask) ^ eor_mask);
892 1.78 thorpej if ((flags ^ oflags) & PVF_WIRED) {
893 1.78 thorpej if (flags & PVF_WIRED)
894 1.1 matt ++pmap->pm_stats.wired_count;
895 1.1 matt else
896 1.1 matt --pmap->pm_stats.wired_count;
897 1.1 matt }
898 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
899 1.105 thorpej {
900 1.105 thorpej int s = splhigh();
901 1.105 thorpej if ((flags ^ oflags) & PVF_WRITE) {
902 1.105 thorpej if (flags & PVF_WRITE) {
903 1.105 thorpej pg->mdpage.rw_mappings++;
904 1.105 thorpej pg->mdpage.ro_mappings--;
905 1.105 thorpej if (pg->mdpage.rw_mappings != 0 &&
906 1.105 thorpej (pg->mdpage.kro_mappings != 0 ||
907 1.105 thorpej pg->mdpage.krw_mappings != 0)) {
908 1.105 thorpej printf("pmap_modify_pv: rw %u, "
909 1.105 thorpej "kro %u, krw %u\n",
910 1.105 thorpej pg->mdpage.rw_mappings,
911 1.105 thorpej pg->mdpage.kro_mappings,
912 1.105 thorpej pg->mdpage.krw_mappings);
913 1.105 thorpej }
914 1.105 thorpej } else {
915 1.105 thorpej KASSERT(pg->mdpage.rw_mappings != 0);
916 1.105 thorpej pg->mdpage.rw_mappings--;
917 1.105 thorpej pg->mdpage.ro_mappings++;
918 1.105 thorpej }
919 1.105 thorpej }
920 1.105 thorpej splx(s);
921 1.105 thorpej }
922 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
923 1.1 matt return (oflags);
924 1.1 matt }
925 1.1 matt }
926 1.1 matt return (0);
927 1.1 matt }
928 1.1 matt
929 1.1 matt /*
930 1.1 matt * Map the specified level 2 pagetable into the level 1 page table for
931 1.1 matt * the given pmap to cover a chunk of virtual address space starting from the
932 1.1 matt * address specified.
933 1.1 matt */
934 1.113 thorpej #define PMAP_PTP_SELFREF 0x01
935 1.113 thorpej #define PMAP_PTP_CACHEABLE 0x02
936 1.113 thorpej
937 1.73 thorpej static __inline void
938 1.113 thorpej pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, int flags)
939 1.1 matt {
940 1.1 matt vaddr_t ptva;
941 1.1 matt
942 1.115 thorpej KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
943 1.115 thorpej
944 1.1 matt /* Calculate the index into the L1 page table. */
945 1.115 thorpej ptva = va >> L1_S_SHIFT;
946 1.1 matt
947 1.1 matt /* Map page table into the L1. */
948 1.83 thorpej pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
949 1.83 thorpej pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
950 1.83 thorpej pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
951 1.83 thorpej pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
952 1.110 thorpej cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
953 1.1 matt
954 1.1 matt /* Map the page table into the page table area. */
955 1.113 thorpej if (flags & PMAP_PTP_SELFREF) {
956 1.83 thorpej *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
957 1.113 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
958 1.113 thorpej ((flags & PMAP_PTP_CACHEABLE) ? pte_l2_s_cache_mode : 0);
959 1.113 thorpej PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
960 1.113 thorpej }
961 1.1 matt }
962 1.1 matt
963 1.1 matt #if 0
964 1.73 thorpej static __inline void
965 1.73 thorpej pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
966 1.1 matt {
967 1.1 matt vaddr_t ptva;
968 1.1 matt
969 1.115 thorpej KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
970 1.115 thorpej
971 1.1 matt /* Calculate the index into the L1 page table. */
972 1.115 thorpej ptva = va >> L1_S_SHIFT;
973 1.1 matt
974 1.1 matt /* Unmap page table from the L1. */
975 1.1 matt pmap->pm_pdir[ptva + 0] = 0;
976 1.1 matt pmap->pm_pdir[ptva + 1] = 0;
977 1.1 matt pmap->pm_pdir[ptva + 2] = 0;
978 1.1 matt pmap->pm_pdir[ptva + 3] = 0;
979 1.110 thorpej cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
980 1.1 matt
981 1.1 matt /* Unmap the page table from the page table area. */
982 1.1 matt *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
983 1.113 thorpej PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
984 1.1 matt }
985 1.1 matt #endif
986 1.1 matt
987 1.1 matt /*
988 1.1 matt * Used to map a range of physical addresses into kernel
989 1.1 matt * virtual address space.
990 1.1 matt *
991 1.1 matt * For now, VM is already on, we only need to map the
992 1.1 matt * specified memory.
993 1.100 thorpej *
994 1.100 thorpej * XXX This routine should eventually go away; it's only used
995 1.100 thorpej * XXX by machine-dependent crash dump code.
996 1.1 matt */
997 1.1 matt vaddr_t
998 1.73 thorpej pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
999 1.1 matt {
1000 1.100 thorpej pt_entry_t *pte;
1001 1.100 thorpej
1002 1.1 matt while (spa < epa) {
1003 1.100 thorpej pte = vtopte(va);
1004 1.100 thorpej
1005 1.100 thorpej *pte = L2_S_PROTO | spa |
1006 1.100 thorpej L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1007 1.112 thorpej PTE_SYNC(pte);
1008 1.100 thorpej cpu_tlb_flushID_SE(va);
1009 1.1 matt va += NBPG;
1010 1.1 matt spa += NBPG;
1011 1.1 matt }
1012 1.19 chris pmap_update(pmap_kernel());
1013 1.1 matt return(va);
1014 1.1 matt }
1015 1.1 matt
1016 1.1 matt
1017 1.1 matt /*
1018 1.3 matt * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1019 1.1 matt *
1020 1.1 matt * bootstrap the pmap system. This is called from initarm and allows
1021 1.1 matt * the pmap system to initailise any structures it requires.
1022 1.1 matt *
1023 1.1 matt * Currently this sets up the kernel_pmap that is statically allocated
1024 1.1 matt * and also allocated virtual addresses for certain page hooks.
1025 1.1 matt * Currently the only one page hook is allocated that is used
1026 1.1 matt * to zero physical pages of memory.
1027 1.1 matt * It also initialises the start and end address of the kernel data space.
1028 1.1 matt */
1029 1.1 matt
1030 1.17 chris char *boot_head;
1031 1.1 matt
1032 1.1 matt void
1033 1.73 thorpej pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1034 1.1 matt {
1035 1.54 thorpej pt_entry_t *pte;
1036 1.1 matt
1037 1.15 chris pmap_kernel()->pm_pdir = kernel_l1pt;
1038 1.15 chris pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
1039 1.15 chris pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
1040 1.15 chris simple_lock_init(&pmap_kernel()->pm_lock);
1041 1.16 chris pmap_kernel()->pm_obj.pgops = NULL;
1042 1.16 chris TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
1043 1.16 chris pmap_kernel()->pm_obj.uo_npages = 0;
1044 1.16 chris pmap_kernel()->pm_obj.uo_refs = 1;
1045 1.1 matt
1046 1.54 thorpej virtual_avail = KERNEL_VM_BASE;
1047 1.74 thorpej virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
1048 1.1 matt
1049 1.1 matt /*
1050 1.54 thorpej * now we allocate the "special" VAs which are used for tmp mappings
1051 1.54 thorpej * by the pmap (and other modules). we allocate the VAs by advancing
1052 1.54 thorpej * virtual_avail (note that there are no pages mapped at these VAs).
1053 1.54 thorpej * we find the PTE that maps the allocated VA via the linear PTE
1054 1.54 thorpej * mapping.
1055 1.1 matt */
1056 1.1 matt
1057 1.54 thorpej pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
1058 1.54 thorpej
1059 1.54 thorpej csrcp = virtual_avail; csrc_pte = pte;
1060 1.54 thorpej virtual_avail += PAGE_SIZE; pte++;
1061 1.54 thorpej
1062 1.54 thorpej cdstp = virtual_avail; cdst_pte = pte;
1063 1.54 thorpej virtual_avail += PAGE_SIZE; pte++;
1064 1.54 thorpej
1065 1.54 thorpej memhook = (char *) virtual_avail; /* don't need pte */
1066 1.54 thorpej virtual_avail += PAGE_SIZE; pte++;
1067 1.54 thorpej
1068 1.54 thorpej msgbufaddr = (caddr_t) virtual_avail; /* don't need pte */
1069 1.54 thorpej virtual_avail += round_page(MSGBUFSIZE);
1070 1.54 thorpej pte += atop(round_page(MSGBUFSIZE));
1071 1.1 matt
1072 1.17 chris /*
1073 1.17 chris * init the static-global locks and global lists.
1074 1.17 chris */
1075 1.17 chris spinlockinit(&pmap_main_lock, "pmaplk", 0);
1076 1.17 chris simple_lock_init(&pvalloc_lock);
1077 1.48 chris simple_lock_init(&pmaps_lock);
1078 1.48 chris LIST_INIT(&pmaps);
1079 1.17 chris TAILQ_INIT(&pv_freepages);
1080 1.17 chris TAILQ_INIT(&pv_unusedpgs);
1081 1.1 matt
1082 1.10 chris /*
1083 1.10 chris * initialize the pmap pool.
1084 1.10 chris */
1085 1.10 chris
1086 1.10 chris pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1087 1.52 thorpej &pool_allocator_nointr);
1088 1.111 thorpej
1089 1.111 thorpej /*
1090 1.111 thorpej * initialize the PT-PT pool and cache.
1091 1.111 thorpej */
1092 1.111 thorpej
1093 1.111 thorpej pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
1094 1.111 thorpej &pmap_ptpt_allocator);
1095 1.111 thorpej pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
1096 1.111 thorpej pmap_ptpt_ctor, NULL, NULL);
1097 1.111 thorpej
1098 1.36 thorpej cpu_dcache_wbinv_all();
1099 1.1 matt }
1100 1.1 matt
1101 1.1 matt /*
1102 1.1 matt * void pmap_init(void)
1103 1.1 matt *
1104 1.1 matt * Initialize the pmap module.
1105 1.1 matt * Called by vm_init() in vm/vm_init.c in order to initialise
1106 1.1 matt * any structures that the pmap system needs to map virtual memory.
1107 1.1 matt */
1108 1.1 matt
1109 1.1 matt extern int physmem;
1110 1.1 matt
1111 1.1 matt void
1112 1.73 thorpej pmap_init(void)
1113 1.1 matt {
1114 1.1 matt
1115 1.1 matt /*
1116 1.1 matt * Set the available memory vars - These do not map to real memory
1117 1.1 matt * addresses and cannot as the physical memory is fragmented.
1118 1.1 matt * They are used by ps for %mem calculations.
1119 1.1 matt * One could argue whether this should be the entire memory or just
1120 1.1 matt * the memory that is useable in a user process.
1121 1.1 matt */
1122 1.1 matt avail_start = 0;
1123 1.1 matt avail_end = physmem * NBPG;
1124 1.1 matt
1125 1.17 chris /*
1126 1.17 chris * now we need to free enough pv_entry structures to allow us to get
1127 1.17 chris * the kmem_map/kmem_object allocated and inited (done after this
1128 1.17 chris * function is finished). to do this we allocate one bootstrap page out
1129 1.17 chris * of kernel_map and use it to provide an initial pool of pv_entry
1130 1.17 chris * structures. we never free this page.
1131 1.17 chris */
1132 1.17 chris
1133 1.17 chris pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1134 1.17 chris if (pv_initpage == NULL)
1135 1.17 chris panic("pmap_init: pv_initpage");
1136 1.17 chris pv_cachedva = 0; /* a VA we have allocated but not used yet */
1137 1.17 chris pv_nfpvents = 0;
1138 1.17 chris (void) pmap_add_pvpage(pv_initpage, FALSE);
1139 1.17 chris
1140 1.1 matt pmap_initialized = TRUE;
1141 1.1 matt
1142 1.1 matt /* Initialise our L1 page table queues and counters */
1143 1.1 matt SIMPLEQ_INIT(&l1pt_static_queue);
1144 1.1 matt l1pt_static_queue_count = 0;
1145 1.1 matt l1pt_static_create_count = 0;
1146 1.1 matt SIMPLEQ_INIT(&l1pt_queue);
1147 1.1 matt l1pt_queue_count = 0;
1148 1.1 matt l1pt_create_count = 0;
1149 1.1 matt l1pt_reuse_count = 0;
1150 1.1 matt }
1151 1.1 matt
1152 1.1 matt /*
1153 1.1 matt * pmap_postinit()
1154 1.1 matt *
1155 1.1 matt * This routine is called after the vm and kmem subsystems have been
1156 1.1 matt * initialised. This allows the pmap code to perform any initialisation
1157 1.1 matt * that can only be done one the memory allocation is in place.
1158 1.1 matt */
1159 1.1 matt
1160 1.1 matt void
1161 1.73 thorpej pmap_postinit(void)
1162 1.1 matt {
1163 1.1 matt int loop;
1164 1.1 matt struct l1pt *pt;
1165 1.1 matt
1166 1.1 matt #ifdef PMAP_STATIC_L1S
1167 1.1 matt for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1168 1.1 matt #else /* PMAP_STATIC_L1S */
1169 1.1 matt for (loop = 0; loop < max_processes; ++loop) {
1170 1.1 matt #endif /* PMAP_STATIC_L1S */
1171 1.1 matt /* Allocate a L1 page table */
1172 1.1 matt pt = pmap_alloc_l1pt();
1173 1.1 matt if (!pt)
1174 1.1 matt panic("Cannot allocate static L1 page tables\n");
1175 1.1 matt
1176 1.1 matt /* Clean it */
1177 1.81 thorpej bzero((void *)pt->pt_va, L1_TABLE_SIZE);
1178 1.1 matt pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1179 1.1 matt /* Add the page table to the queue */
1180 1.1 matt SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1181 1.1 matt ++l1pt_static_queue_count;
1182 1.1 matt ++l1pt_static_create_count;
1183 1.1 matt }
1184 1.1 matt }
1185 1.1 matt
1186 1.1 matt
1187 1.1 matt /*
1188 1.1 matt * Create and return a physical map.
1189 1.1 matt *
1190 1.1 matt * If the size specified for the map is zero, the map is an actual physical
1191 1.1 matt * map, and may be referenced by the hardware.
1192 1.1 matt *
1193 1.1 matt * If the size specified is non-zero, the map will be used in software only,
1194 1.1 matt * and is bounded by that size.
1195 1.1 matt */
1196 1.1 matt
1197 1.1 matt pmap_t
1198 1.73 thorpej pmap_create(void)
1199 1.1 matt {
1200 1.15 chris struct pmap *pmap;
1201 1.1 matt
1202 1.10 chris /*
1203 1.10 chris * Fetch pmap entry from the pool
1204 1.10 chris */
1205 1.10 chris
1206 1.10 chris pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1207 1.17 chris /* XXX is this really needed! */
1208 1.17 chris memset(pmap, 0, sizeof(*pmap));
1209 1.1 matt
1210 1.16 chris simple_lock_init(&pmap->pm_obj.vmobjlock);
1211 1.16 chris pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
1212 1.16 chris TAILQ_INIT(&pmap->pm_obj.memq);
1213 1.16 chris pmap->pm_obj.uo_npages = 0;
1214 1.16 chris pmap->pm_obj.uo_refs = 1;
1215 1.16 chris pmap->pm_stats.wired_count = 0;
1216 1.16 chris pmap->pm_stats.resident_count = 1;
1217 1.70 thorpej pmap->pm_ptphint = NULL;
1218 1.16 chris
1219 1.1 matt /* Now init the machine part of the pmap */
1220 1.1 matt pmap_pinit(pmap);
1221 1.1 matt return(pmap);
1222 1.1 matt }
1223 1.1 matt
1224 1.1 matt /*
1225 1.1 matt * pmap_alloc_l1pt()
1226 1.1 matt *
1227 1.1 matt * This routine allocates physical and virtual memory for a L1 page table
1228 1.1 matt * and wires it.
1229 1.1 matt * A l1pt structure is returned to describe the allocated page table.
1230 1.1 matt *
1231 1.1 matt * This routine is allowed to fail if the required memory cannot be allocated.
1232 1.1 matt * In this case NULL is returned.
1233 1.1 matt */
1234 1.1 matt
1235 1.1 matt struct l1pt *
1236 1.1 matt pmap_alloc_l1pt(void)
1237 1.1 matt {
1238 1.2 matt paddr_t pa;
1239 1.2 matt vaddr_t va;
1240 1.1 matt struct l1pt *pt;
1241 1.1 matt int error;
1242 1.9 chs struct vm_page *m;
1243 1.1 matt
1244 1.1 matt /* Allocate virtual address space for the L1 page table */
1245 1.81 thorpej va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
1246 1.1 matt if (va == 0) {
1247 1.1 matt #ifdef DIAGNOSTIC
1248 1.26 rearnsha PDEBUG(0,
1249 1.26 rearnsha printf("pmap: Cannot allocate pageable memory for L1\n"));
1250 1.1 matt #endif /* DIAGNOSTIC */
1251 1.1 matt return(NULL);
1252 1.1 matt }
1253 1.1 matt
1254 1.1 matt /* Allocate memory for the l1pt structure */
1255 1.1 matt pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1256 1.1 matt
1257 1.1 matt /*
1258 1.1 matt * Allocate pages from the VM system.
1259 1.1 matt */
1260 1.81 thorpej error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
1261 1.81 thorpej L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1262 1.1 matt if (error) {
1263 1.1 matt #ifdef DIAGNOSTIC
1264 1.26 rearnsha PDEBUG(0,
1265 1.26 rearnsha printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1266 1.26 rearnsha error));
1267 1.1 matt #endif /* DIAGNOSTIC */
1268 1.1 matt /* Release the resources we already have claimed */
1269 1.1 matt free(pt, M_VMPMAP);
1270 1.81 thorpej uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
1271 1.1 matt return(NULL);
1272 1.1 matt }
1273 1.1 matt
1274 1.1 matt /* Map our physical pages into our virtual space */
1275 1.1 matt pt->pt_va = va;
1276 1.51 chris m = TAILQ_FIRST(&pt->pt_plist);
1277 1.81 thorpej while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
1278 1.1 matt pa = VM_PAGE_TO_PHYS(m);
1279 1.1 matt
1280 1.110 thorpej pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
1281 1.1 matt
1282 1.1 matt va += NBPG;
1283 1.1 matt m = m->pageq.tqe_next;
1284 1.1 matt }
1285 1.1 matt
1286 1.1 matt #ifdef DIAGNOSTIC
1287 1.1 matt if (m)
1288 1.1 matt panic("pmap_alloc_l1pt: pglist not empty\n");
1289 1.1 matt #endif /* DIAGNOSTIC */
1290 1.1 matt
1291 1.1 matt pt->pt_flags = 0;
1292 1.1 matt return(pt);
1293 1.1 matt }
1294 1.1 matt
1295 1.1 matt /*
1296 1.1 matt * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1297 1.1 matt */
1298 1.33 chris static void
1299 1.73 thorpej pmap_free_l1pt(struct l1pt *pt)
1300 1.1 matt {
1301 1.1 matt /* Separate the physical memory for the virtual space */
1302 1.81 thorpej pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
1303 1.19 chris pmap_update(pmap_kernel());
1304 1.1 matt
1305 1.1 matt /* Return the physical memory */
1306 1.1 matt uvm_pglistfree(&pt->pt_plist);
1307 1.1 matt
1308 1.1 matt /* Free the virtual space */
1309 1.81 thorpej uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
1310 1.1 matt
1311 1.1 matt /* Free the l1pt structure */
1312 1.1 matt free(pt, M_VMPMAP);
1313 1.1 matt }
1314 1.1 matt
1315 1.1 matt /*
1316 1.111 thorpej * pmap_ptpt_page_alloc:
1317 1.93 thorpej *
1318 1.111 thorpej * Back-end page allocator for the PT-PT pool.
1319 1.93 thorpej */
1320 1.111 thorpej static void *
1321 1.111 thorpej pmap_ptpt_page_alloc(struct pool *pp, int flags)
1322 1.93 thorpej {
1323 1.93 thorpej struct vm_page *pg;
1324 1.93 thorpej pt_entry_t *pte;
1325 1.111 thorpej vaddr_t va;
1326 1.93 thorpej
1327 1.111 thorpej /* XXX PR_WAITOK? */
1328 1.111 thorpej va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
1329 1.111 thorpej if (va == 0)
1330 1.111 thorpej return (NULL);
1331 1.93 thorpej
1332 1.93 thorpej for (;;) {
1333 1.93 thorpej pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
1334 1.93 thorpej if (pg != NULL)
1335 1.93 thorpej break;
1336 1.111 thorpej if ((flags & PR_WAITOK) == 0) {
1337 1.111 thorpej uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
1338 1.111 thorpej return (NULL);
1339 1.111 thorpej }
1340 1.93 thorpej uvm_wait("pmap_ptpt");
1341 1.93 thorpej }
1342 1.93 thorpej
1343 1.111 thorpej pte = vtopte(va);
1344 1.93 thorpej KDASSERT(pmap_pte_v(pte) == 0);
1345 1.93 thorpej
1346 1.111 thorpej *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
1347 1.111 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1348 1.112 thorpej PTE_SYNC(pte);
1349 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
1350 1.105 thorpej {
1351 1.105 thorpej int s = splhigh();
1352 1.105 thorpej pg->mdpage.krw_mappings++;
1353 1.105 thorpej splx(s);
1354 1.105 thorpej }
1355 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
1356 1.93 thorpej
1357 1.111 thorpej return ((void *) va);
1358 1.93 thorpej }
1359 1.93 thorpej
1360 1.93 thorpej /*
1361 1.111 thorpej * pmap_ptpt_page_free:
1362 1.93 thorpej *
1363 1.111 thorpej * Back-end page free'er for the PT-PT pool.
1364 1.93 thorpej */
1365 1.93 thorpej static void
1366 1.111 thorpej pmap_ptpt_page_free(struct pool *pp, void *v)
1367 1.93 thorpej {
1368 1.111 thorpej vaddr_t va = (vaddr_t) v;
1369 1.111 thorpej paddr_t pa;
1370 1.111 thorpej
1371 1.111 thorpej pa = vtophys(va);
1372 1.93 thorpej
1373 1.111 thorpej pmap_kremove(va, L2_TABLE_SIZE);
1374 1.93 thorpej pmap_update(pmap_kernel());
1375 1.93 thorpej
1376 1.111 thorpej uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1377 1.111 thorpej
1378 1.111 thorpej uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
1379 1.111 thorpej }
1380 1.111 thorpej
1381 1.111 thorpej /*
1382 1.111 thorpej * pmap_ptpt_ctor:
1383 1.111 thorpej *
1384 1.111 thorpej * Constructor for the PT-PT cache.
1385 1.111 thorpej */
1386 1.111 thorpej static int
1387 1.111 thorpej pmap_ptpt_ctor(void *arg, void *object, int flags)
1388 1.111 thorpej {
1389 1.111 thorpej caddr_t vptpt = object;
1390 1.111 thorpej
1391 1.111 thorpej /* Page is already zero'd. */
1392 1.93 thorpej
1393 1.111 thorpej /*
1394 1.111 thorpej * Map in kernel PTs.
1395 1.111 thorpej *
1396 1.111 thorpej * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
1397 1.111 thorpej */
1398 1.111 thorpej memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
1399 1.111 thorpej (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
1400 1.111 thorpej ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
1401 1.111 thorpej (KERNEL_PD_SIZE >> 2));
1402 1.111 thorpej
1403 1.111 thorpej return (0);
1404 1.93 thorpej }
1405 1.93 thorpej
1406 1.93 thorpej /*
1407 1.1 matt * Allocate a page directory.
1408 1.1 matt * This routine will either allocate a new page directory from the pool
1409 1.1 matt * of L1 page tables currently held by the kernel or it will allocate
1410 1.1 matt * a new one via pmap_alloc_l1pt().
1411 1.1 matt * It will then initialise the l1 page table for use.
1412 1.1 matt */
1413 1.33 chris static int
1414 1.73 thorpej pmap_allocpagedir(struct pmap *pmap)
1415 1.1 matt {
1416 1.111 thorpej vaddr_t vptpt;
1417 1.2 matt paddr_t pa;
1418 1.1 matt struct l1pt *pt;
1419 1.111 thorpej u_int gen;
1420 1.1 matt
1421 1.1 matt PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1422 1.1 matt
1423 1.1 matt /* Do we have any spare L1's lying around ? */
1424 1.1 matt if (l1pt_static_queue_count) {
1425 1.1 matt --l1pt_static_queue_count;
1426 1.98 lukem pt = SIMPLEQ_FIRST(&l1pt_static_queue);
1427 1.98 lukem SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
1428 1.1 matt } else if (l1pt_queue_count) {
1429 1.1 matt --l1pt_queue_count;
1430 1.98 lukem pt = SIMPLEQ_FIRST(&l1pt_queue);
1431 1.98 lukem SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
1432 1.1 matt ++l1pt_reuse_count;
1433 1.1 matt } else {
1434 1.1 matt pt = pmap_alloc_l1pt();
1435 1.1 matt if (!pt)
1436 1.1 matt return(ENOMEM);
1437 1.1 matt ++l1pt_create_count;
1438 1.1 matt }
1439 1.1 matt
1440 1.1 matt /* Store the pointer to the l1 descriptor in the pmap. */
1441 1.1 matt pmap->pm_l1pt = pt;
1442 1.1 matt
1443 1.1 matt /* Get the physical address of the start of the l1 */
1444 1.51 chris pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1445 1.1 matt
1446 1.1 matt /* Store the virtual address of the l1 in the pmap. */
1447 1.1 matt pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1448 1.1 matt
1449 1.1 matt /* Clean the L1 if it is dirty */
1450 1.110 thorpej if (!(pt->pt_flags & PTFLAG_CLEAN)) {
1451 1.81 thorpej bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1452 1.110 thorpej cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
1453 1.110 thorpej (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1454 1.110 thorpej }
1455 1.1 matt
1456 1.1 matt /* Allocate a page table to map all the page tables for this pmap */
1457 1.111 thorpej KASSERT(pmap->pm_vptpt == 0);
1458 1.111 thorpej
1459 1.111 thorpej try_again:
1460 1.111 thorpej gen = pmap_ptpt_cache_generation;
1461 1.111 thorpej vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
1462 1.111 thorpej if (vptpt == NULL) {
1463 1.111 thorpej PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
1464 1.93 thorpej pmap_freepagedir(pmap);
1465 1.111 thorpej return (ENOMEM);
1466 1.5 toshii }
1467 1.5 toshii
1468 1.93 thorpej /* need to lock this all up for growkernel */
1469 1.48 chris simple_lock(&pmaps_lock);
1470 1.48 chris
1471 1.111 thorpej if (gen != pmap_ptpt_cache_generation) {
1472 1.111 thorpej simple_unlock(&pmaps_lock);
1473 1.111 thorpej pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
1474 1.111 thorpej goto try_again;
1475 1.111 thorpej }
1476 1.111 thorpej
1477 1.111 thorpej pmap->pm_vptpt = vptpt;
1478 1.111 thorpej pmap->pm_pptpt = vtophys(vptpt);
1479 1.111 thorpej
1480 1.64 thorpej /* Duplicate the kernel mappings. */
1481 1.81 thorpej bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1482 1.81 thorpej (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1483 1.48 chris KERNEL_PD_SIZE);
1484 1.110 thorpej cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
1485 1.110 thorpej (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
1486 1.48 chris
1487 1.1 matt /* Wire in this page table */
1488 1.113 thorpej pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, PMAP_PTP_SELFREF);
1489 1.1 matt
1490 1.1 matt pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1491 1.110 thorpej
1492 1.48 chris LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
1493 1.48 chris simple_unlock(&pmaps_lock);
1494 1.48 chris
1495 1.1 matt return(0);
1496 1.1 matt }
1497 1.1 matt
1498 1.1 matt
1499 1.1 matt /*
1500 1.1 matt * Initialize a preallocated and zeroed pmap structure,
1501 1.1 matt * such as one in a vmspace structure.
1502 1.1 matt */
1503 1.1 matt
1504 1.1 matt void
1505 1.73 thorpej pmap_pinit(struct pmap *pmap)
1506 1.1 matt {
1507 1.26 rearnsha int backoff = 6;
1508 1.26 rearnsha int retry = 10;
1509 1.26 rearnsha
1510 1.1 matt PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1511 1.1 matt
1512 1.1 matt /* Keep looping until we succeed in allocating a page directory */
1513 1.1 matt while (pmap_allocpagedir(pmap) != 0) {
1514 1.1 matt /*
1515 1.1 matt * Ok we failed to allocate a suitable block of memory for an
1516 1.1 matt * L1 page table. This means that either:
1517 1.1 matt * 1. 16KB of virtual address space could not be allocated
1518 1.1 matt * 2. 16KB of physically contiguous memory on a 16KB boundary
1519 1.1 matt * could not be allocated.
1520 1.1 matt *
1521 1.1 matt * Since we cannot fail we will sleep for a while and try
1522 1.17 chris * again.
1523 1.26 rearnsha *
1524 1.26 rearnsha * Searching for a suitable L1 PT is expensive:
1525 1.26 rearnsha * to avoid hogging the system when memory is really
1526 1.26 rearnsha * scarce, use an exponential back-off so that
1527 1.26 rearnsha * eventually we won't retry more than once every 8
1528 1.26 rearnsha * seconds. This should allow other processes to run
1529 1.26 rearnsha * to completion and free up resources.
1530 1.1 matt */
1531 1.26 rearnsha (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1532 1.26 rearnsha NULL);
1533 1.26 rearnsha if (--retry == 0) {
1534 1.26 rearnsha retry = 10;
1535 1.26 rearnsha if (backoff)
1536 1.26 rearnsha --backoff;
1537 1.26 rearnsha }
1538 1.1 matt }
1539 1.1 matt
1540 1.76 thorpej if (vector_page < KERNEL_BASE) {
1541 1.76 thorpej /*
1542 1.76 thorpej * Map the vector page. This will also allocate and map
1543 1.76 thorpej * an L2 table for it.
1544 1.76 thorpej */
1545 1.76 thorpej pmap_enter(pmap, vector_page, systempage.pv_pa,
1546 1.76 thorpej VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1547 1.76 thorpej pmap_update(pmap);
1548 1.76 thorpej }
1549 1.1 matt }
1550 1.1 matt
1551 1.1 matt void
1552 1.73 thorpej pmap_freepagedir(struct pmap *pmap)
1553 1.1 matt {
1554 1.1 matt /* Free the memory used for the page table mapping */
1555 1.111 thorpej if (pmap->pm_vptpt != 0) {
1556 1.111 thorpej /*
1557 1.111 thorpej * XXX Objects freed to a pool cache must be in constructed
1558 1.111 thorpej * XXX form when freed, but we don't free page tables as we
1559 1.111 thorpej * XXX go, so we need to zap the mappings here.
1560 1.111 thorpej *
1561 1.111 thorpej * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
1562 1.111 thorpej */
1563 1.111 thorpej memset((caddr_t) pmap->pm_vptpt, 0,
1564 1.111 thorpej ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
1565 1.111 thorpej pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
1566 1.111 thorpej }
1567 1.1 matt
1568 1.1 matt /* junk the L1 page table */
1569 1.1 matt if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1570 1.1 matt /* Add the page table to the queue */
1571 1.111 thorpej SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
1572 1.111 thorpej pmap->pm_l1pt, pt_queue);
1573 1.1 matt ++l1pt_static_queue_count;
1574 1.1 matt } else if (l1pt_queue_count < 8) {
1575 1.1 matt /* Add the page table to the queue */
1576 1.1 matt SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1577 1.1 matt ++l1pt_queue_count;
1578 1.1 matt } else
1579 1.1 matt pmap_free_l1pt(pmap->pm_l1pt);
1580 1.1 matt }
1581 1.1 matt
1582 1.1 matt /*
1583 1.1 matt * Retire the given physical map from service.
1584 1.1 matt * Should only be called if the map contains no valid mappings.
1585 1.1 matt */
1586 1.1 matt
1587 1.1 matt void
1588 1.73 thorpej pmap_destroy(struct pmap *pmap)
1589 1.1 matt {
1590 1.17 chris struct vm_page *page;
1591 1.1 matt int count;
1592 1.1 matt
1593 1.1 matt if (pmap == NULL)
1594 1.1 matt return;
1595 1.1 matt
1596 1.1 matt PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1597 1.17 chris
1598 1.17 chris /*
1599 1.17 chris * Drop reference count
1600 1.17 chris */
1601 1.17 chris simple_lock(&pmap->pm_obj.vmobjlock);
1602 1.16 chris count = --pmap->pm_obj.uo_refs;
1603 1.17 chris simple_unlock(&pmap->pm_obj.vmobjlock);
1604 1.17 chris if (count > 0) {
1605 1.17 chris return;
1606 1.1 matt }
1607 1.1 matt
1608 1.17 chris /*
1609 1.17 chris * reference count is zero, free pmap resources and then free pmap.
1610 1.17 chris */
1611 1.48 chris
1612 1.48 chris /*
1613 1.48 chris * remove it from global list of pmaps
1614 1.48 chris */
1615 1.48 chris
1616 1.48 chris simple_lock(&pmaps_lock);
1617 1.48 chris LIST_REMOVE(pmap, pm_list);
1618 1.48 chris simple_unlock(&pmaps_lock);
1619 1.17 chris
1620 1.77 thorpej if (vector_page < KERNEL_BASE) {
1621 1.77 thorpej /* Remove the vector page mapping */
1622 1.77 thorpej pmap_remove(pmap, vector_page, vector_page + NBPG);
1623 1.77 thorpej pmap_update(pmap);
1624 1.77 thorpej }
1625 1.1 matt
1626 1.1 matt /*
1627 1.1 matt * Free any page tables still mapped
1628 1.1 matt * This is only temporay until pmap_enter can count the number
1629 1.1 matt * of mappings made in a page table. Then pmap_remove() can
1630 1.1 matt * reduce the count and free the pagetable when the count
1631 1.16 chris * reaches zero. Note that entries in this list should match the
1632 1.16 chris * contents of the ptpt, however this is faster than walking a 1024
1633 1.16 chris * entries looking for pt's
1634 1.16 chris * taken from i386 pmap.c
1635 1.1 matt */
1636 1.97 chris /*
1637 1.97 chris * vmobjlock must be held while freeing pages
1638 1.97 chris */
1639 1.97 chris simple_lock(&pmap->pm_obj.vmobjlock);
1640 1.51 chris while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
1641 1.51 chris KASSERT((page->flags & PG_BUSY) == 0);
1642 1.114 thorpej
1643 1.114 thorpej /* Freeing a PT page? The contents are a throw-away. */
1644 1.114 thorpej KASSERT((page->offset & PD_OFFSET) == 0);/* XXX KDASSERT */
1645 1.114 thorpej cpu_dcache_inv_range((vaddr_t)vtopte(page->offset), PAGE_SIZE);
1646 1.114 thorpej
1647 1.16 chris page->wire_count = 0;
1648 1.16 chris uvm_pagefree(page);
1649 1.1 matt }
1650 1.97 chris simple_unlock(&pmap->pm_obj.vmobjlock);
1651 1.111 thorpej
1652 1.1 matt /* Free the page dir */
1653 1.1 matt pmap_freepagedir(pmap);
1654 1.111 thorpej
1655 1.17 chris /* return the pmap to the pool */
1656 1.17 chris pool_put(&pmap_pmap_pool, pmap);
1657 1.1 matt }
1658 1.1 matt
1659 1.1 matt
1660 1.1 matt /*
1661 1.15 chris * void pmap_reference(struct pmap *pmap)
1662 1.1 matt *
1663 1.1 matt * Add a reference to the specified pmap.
1664 1.1 matt */
1665 1.1 matt
1666 1.1 matt void
1667 1.73 thorpej pmap_reference(struct pmap *pmap)
1668 1.1 matt {
1669 1.1 matt if (pmap == NULL)
1670 1.1 matt return;
1671 1.1 matt
1672 1.1 matt simple_lock(&pmap->pm_lock);
1673 1.16 chris pmap->pm_obj.uo_refs++;
1674 1.1 matt simple_unlock(&pmap->pm_lock);
1675 1.1 matt }
1676 1.1 matt
1677 1.1 matt /*
1678 1.1 matt * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1679 1.1 matt *
1680 1.1 matt * Return the start and end addresses of the kernel's virtual space.
1681 1.1 matt * These values are setup in pmap_bootstrap and are updated as pages
1682 1.1 matt * are allocated.
1683 1.1 matt */
1684 1.1 matt
1685 1.1 matt void
1686 1.73 thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1687 1.1 matt {
1688 1.54 thorpej *start = virtual_avail;
1689 1.1 matt *end = virtual_end;
1690 1.1 matt }
1691 1.1 matt
1692 1.1 matt /*
1693 1.1 matt * Activate the address space for the specified process. If the process
1694 1.1 matt * is the current process, load the new MMU context.
1695 1.1 matt */
1696 1.1 matt void
1697 1.73 thorpej pmap_activate(struct proc *p)
1698 1.1 matt {
1699 1.15 chris struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1700 1.1 matt struct pcb *pcb = &p->p_addr->u_pcb;
1701 1.1 matt
1702 1.15 chris (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1703 1.1 matt (paddr_t *)&pcb->pcb_pagedir);
1704 1.1 matt
1705 1.1 matt PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1706 1.1 matt p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1707 1.1 matt
1708 1.1 matt if (p == curproc) {
1709 1.1 matt PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1710 1.1 matt setttb((u_int)pcb->pcb_pagedir);
1711 1.1 matt }
1712 1.1 matt }
1713 1.1 matt
1714 1.1 matt /*
1715 1.1 matt * Deactivate the address space of the specified process.
1716 1.1 matt */
1717 1.1 matt void
1718 1.73 thorpej pmap_deactivate(struct proc *p)
1719 1.1 matt {
1720 1.1 matt }
1721 1.1 matt
1722 1.31 thorpej /*
1723 1.31 thorpej * Perform any deferred pmap operations.
1724 1.31 thorpej */
1725 1.31 thorpej void
1726 1.31 thorpej pmap_update(struct pmap *pmap)
1727 1.31 thorpej {
1728 1.31 thorpej
1729 1.31 thorpej /*
1730 1.31 thorpej * We haven't deferred any pmap operations, but we do need to
1731 1.31 thorpej * make sure TLB/cache operations have completed.
1732 1.31 thorpej */
1733 1.31 thorpej cpu_cpwait();
1734 1.31 thorpej }
1735 1.1 matt
1736 1.1 matt /*
1737 1.1 matt * pmap_clean_page()
1738 1.1 matt *
1739 1.1 matt * This is a local function used to work out the best strategy to clean
1740 1.1 matt * a single page referenced by its entry in the PV table. It's used by
1741 1.1 matt * pmap_copy_page, pmap_zero page and maybe some others later on.
1742 1.1 matt *
1743 1.1 matt * Its policy is effectively:
1744 1.1 matt * o If there are no mappings, we don't bother doing anything with the cache.
1745 1.1 matt * o If there is one mapping, we clean just that page.
1746 1.1 matt * o If there are multiple mappings, we clean the entire cache.
1747 1.1 matt *
1748 1.1 matt * So that some functions can be further optimised, it returns 0 if it didn't
1749 1.1 matt * clean the entire cache, or 1 if it did.
1750 1.1 matt *
1751 1.1 matt * XXX One bug in this routine is that if the pv_entry has a single page
1752 1.1 matt * mapped at 0x00000000 a whole cache clean will be performed rather than
1753 1.1 matt * just the 1 page. Since this should not occur in everyday use and if it does
1754 1.1 matt * it will just result in not the most efficient clean for the page.
1755 1.1 matt */
1756 1.1 matt static int
1757 1.73 thorpej pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
1758 1.1 matt {
1759 1.17 chris struct pmap *pmap;
1760 1.17 chris struct pv_entry *npv;
1761 1.1 matt int cache_needs_cleaning = 0;
1762 1.1 matt vaddr_t page_to_clean = 0;
1763 1.1 matt
1764 1.108 thorpej if (pv == NULL) {
1765 1.17 chris /* nothing mapped in so nothing to flush */
1766 1.17 chris return (0);
1767 1.108 thorpej }
1768 1.17 chris
1769 1.108 thorpej /*
1770 1.108 thorpej * Since we flush the cache each time we change curproc, we
1771 1.17 chris * only need to flush the page if it is in the current pmap.
1772 1.17 chris */
1773 1.17 chris if (curproc)
1774 1.17 chris pmap = curproc->p_vmspace->vm_map.pmap;
1775 1.17 chris else
1776 1.17 chris pmap = pmap_kernel();
1777 1.17 chris
1778 1.17 chris for (npv = pv; npv; npv = npv->pv_next) {
1779 1.17 chris if (npv->pv_pmap == pmap) {
1780 1.108 thorpej /*
1781 1.108 thorpej * The page is mapped non-cacheable in
1782 1.17 chris * this map. No need to flush the cache.
1783 1.17 chris */
1784 1.78 thorpej if (npv->pv_flags & PVF_NC) {
1785 1.17 chris #ifdef DIAGNOSTIC
1786 1.17 chris if (cache_needs_cleaning)
1787 1.17 chris panic("pmap_clean_page: "
1788 1.108 thorpej "cache inconsistency");
1789 1.17 chris #endif
1790 1.17 chris break;
1791 1.108 thorpej } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1792 1.17 chris continue;
1793 1.108 thorpej if (cache_needs_cleaning) {
1794 1.17 chris page_to_clean = 0;
1795 1.17 chris break;
1796 1.108 thorpej } else
1797 1.17 chris page_to_clean = npv->pv_va;
1798 1.17 chris cache_needs_cleaning = 1;
1799 1.17 chris }
1800 1.1 matt }
1801 1.1 matt
1802 1.108 thorpej if (page_to_clean) {
1803 1.108 thorpej /*
1804 1.108 thorpej * XXX If is_src, we really only need to write-back,
1805 1.108 thorpej * XXX not invalidate, too. Investigate further.
1806 1.108 thorpej * XXX --thorpej (at) netbsd.org
1807 1.108 thorpej */
1808 1.36 thorpej cpu_idcache_wbinv_range(page_to_clean, NBPG);
1809 1.108 thorpej } else if (cache_needs_cleaning) {
1810 1.36 thorpej cpu_idcache_wbinv_all();
1811 1.1 matt return (1);
1812 1.1 matt }
1813 1.1 matt return (0);
1814 1.1 matt }
1815 1.1 matt
1816 1.1 matt /*
1817 1.1 matt * pmap_zero_page()
1818 1.1 matt *
1819 1.1 matt * Zero a given physical page by mapping it at a page hook point.
1820 1.1 matt * In doing the zero page op, the page we zero is mapped cachable, as with
1821 1.1 matt * StrongARM accesses to non-cached pages are non-burst making writing
1822 1.1 matt * _any_ bulk data very slow.
1823 1.1 matt */
1824 1.88 thorpej #if ARM_MMU_GENERIC == 1
1825 1.1 matt void
1826 1.88 thorpej pmap_zero_page_generic(paddr_t phys)
1827 1.1 matt {
1828 1.71 thorpej #ifdef DEBUG
1829 1.71 thorpej struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1830 1.71 thorpej
1831 1.71 thorpej if (pg->mdpage.pvh_list != NULL)
1832 1.71 thorpej panic("pmap_zero_page: page has mappings");
1833 1.71 thorpej #endif
1834 1.1 matt
1835 1.79 thorpej KDASSERT((phys & PGOFSET) == 0);
1836 1.79 thorpej
1837 1.1 matt /*
1838 1.1 matt * Hook in the page, zero it, and purge the cache for that
1839 1.1 matt * zeroed page. Invalidate the TLB as needed.
1840 1.1 matt */
1841 1.83 thorpej *cdst_pte = L2_S_PROTO | phys |
1842 1.86 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1843 1.113 thorpej PTE_SYNC(cdst_pte);
1844 1.54 thorpej cpu_tlb_flushD_SE(cdstp);
1845 1.32 thorpej cpu_cpwait();
1846 1.54 thorpej bzero_page(cdstp);
1847 1.54 thorpej cpu_dcache_wbinv_range(cdstp, NBPG);
1848 1.1 matt }
1849 1.88 thorpej #endif /* ARM_MMU_GENERIC == 1 */
1850 1.88 thorpej
1851 1.88 thorpej #if ARM_MMU_XSCALE == 1
1852 1.88 thorpej void
1853 1.88 thorpej pmap_zero_page_xscale(paddr_t phys)
1854 1.88 thorpej {
1855 1.88 thorpej #ifdef DEBUG
1856 1.88 thorpej struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1857 1.88 thorpej
1858 1.88 thorpej if (pg->mdpage.pvh_list != NULL)
1859 1.88 thorpej panic("pmap_zero_page: page has mappings");
1860 1.88 thorpej #endif
1861 1.88 thorpej
1862 1.88 thorpej KDASSERT((phys & PGOFSET) == 0);
1863 1.88 thorpej
1864 1.88 thorpej /*
1865 1.88 thorpej * Hook in the page, zero it, and purge the cache for that
1866 1.88 thorpej * zeroed page. Invalidate the TLB as needed.
1867 1.88 thorpej */
1868 1.88 thorpej *cdst_pte = L2_S_PROTO | phys |
1869 1.88 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1870 1.88 thorpej L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
1871 1.113 thorpej PTE_SYNC(cdst_pte);
1872 1.88 thorpej cpu_tlb_flushD_SE(cdstp);
1873 1.88 thorpej cpu_cpwait();
1874 1.88 thorpej bzero_page(cdstp);
1875 1.88 thorpej xscale_cache_clean_minidata();
1876 1.88 thorpej }
1877 1.88 thorpej #endif /* ARM_MMU_XSCALE == 1 */
1878 1.1 matt
1879 1.17 chris /* pmap_pageidlezero()
1880 1.17 chris *
1881 1.17 chris * The same as above, except that we assume that the page is not
1882 1.17 chris * mapped. This means we never have to flush the cache first. Called
1883 1.17 chris * from the idle loop.
1884 1.17 chris */
1885 1.17 chris boolean_t
1886 1.73 thorpej pmap_pageidlezero(paddr_t phys)
1887 1.17 chris {
1888 1.17 chris int i, *ptr;
1889 1.17 chris boolean_t rv = TRUE;
1890 1.71 thorpej #ifdef DEBUG
1891 1.49 thorpej struct vm_page *pg;
1892 1.17 chris
1893 1.49 thorpej pg = PHYS_TO_VM_PAGE(phys);
1894 1.49 thorpej if (pg->mdpage.pvh_list != NULL)
1895 1.71 thorpej panic("pmap_pageidlezero: page has mappings");
1896 1.17 chris #endif
1897 1.79 thorpej
1898 1.79 thorpej KDASSERT((phys & PGOFSET) == 0);
1899 1.79 thorpej
1900 1.17 chris /*
1901 1.17 chris * Hook in the page, zero it, and purge the cache for that
1902 1.17 chris * zeroed page. Invalidate the TLB as needed.
1903 1.17 chris */
1904 1.83 thorpej *cdst_pte = L2_S_PROTO | phys |
1905 1.86 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1906 1.113 thorpej PTE_SYNC(cdst_pte);
1907 1.54 thorpej cpu_tlb_flushD_SE(cdstp);
1908 1.32 thorpej cpu_cpwait();
1909 1.32 thorpej
1910 1.54 thorpej for (i = 0, ptr = (int *)cdstp;
1911 1.17 chris i < (NBPG / sizeof(int)); i++) {
1912 1.17 chris if (sched_whichqs != 0) {
1913 1.17 chris /*
1914 1.17 chris * A process has become ready. Abort now,
1915 1.17 chris * so we don't keep it waiting while we
1916 1.17 chris * do slow memory access to finish this
1917 1.17 chris * page.
1918 1.17 chris */
1919 1.17 chris rv = FALSE;
1920 1.17 chris break;
1921 1.17 chris }
1922 1.17 chris *ptr++ = 0;
1923 1.17 chris }
1924 1.17 chris
1925 1.17 chris if (rv)
1926 1.17 chris /*
1927 1.17 chris * if we aborted we'll rezero this page again later so don't
1928 1.17 chris * purge it unless we finished it
1929 1.17 chris */
1930 1.54 thorpej cpu_dcache_wbinv_range(cdstp, NBPG);
1931 1.17 chris return (rv);
1932 1.17 chris }
1933 1.17 chris
1934 1.1 matt /*
1935 1.1 matt * pmap_copy_page()
1936 1.1 matt *
1937 1.1 matt * Copy one physical page into another, by mapping the pages into
1938 1.1 matt * hook points. The same comment regarding cachability as in
1939 1.1 matt * pmap_zero_page also applies here.
1940 1.1 matt */
1941 1.88 thorpej #if ARM_MMU_GENERIC == 1
1942 1.1 matt void
1943 1.88 thorpej pmap_copy_page_generic(paddr_t src, paddr_t dst)
1944 1.1 matt {
1945 1.71 thorpej struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1946 1.71 thorpej #ifdef DEBUG
1947 1.71 thorpej struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1948 1.71 thorpej
1949 1.71 thorpej if (dst_pg->mdpage.pvh_list != NULL)
1950 1.71 thorpej panic("pmap_copy_page: dst page has mappings");
1951 1.71 thorpej #endif
1952 1.71 thorpej
1953 1.79 thorpej KDASSERT((src & PGOFSET) == 0);
1954 1.79 thorpej KDASSERT((dst & PGOFSET) == 0);
1955 1.79 thorpej
1956 1.71 thorpej /*
1957 1.71 thorpej * Clean the source page. Hold the source page's lock for
1958 1.71 thorpej * the duration of the copy so that no other mappings can
1959 1.71 thorpej * be created while we have a potentially aliased mapping.
1960 1.71 thorpej */
1961 1.49 thorpej simple_lock(&src_pg->mdpage.pvh_slock);
1962 1.71 thorpej (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1963 1.1 matt
1964 1.1 matt /*
1965 1.1 matt * Map the pages into the page hook points, copy them, and purge
1966 1.1 matt * the cache for the appropriate page. Invalidate the TLB
1967 1.1 matt * as required.
1968 1.1 matt */
1969 1.83 thorpej *csrc_pte = L2_S_PROTO | src |
1970 1.86 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
1971 1.113 thorpej PTE_SYNC(csrc_pte);
1972 1.83 thorpej *cdst_pte = L2_S_PROTO | dst |
1973 1.86 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1974 1.113 thorpej PTE_SYNC(cdst_pte);
1975 1.54 thorpej cpu_tlb_flushD_SE(csrcp);
1976 1.54 thorpej cpu_tlb_flushD_SE(cdstp);
1977 1.32 thorpej cpu_cpwait();
1978 1.54 thorpej bcopy_page(csrcp, cdstp);
1979 1.65 chris cpu_dcache_inv_range(csrcp, NBPG);
1980 1.71 thorpej simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
1981 1.54 thorpej cpu_dcache_wbinv_range(cdstp, NBPG);
1982 1.1 matt }
1983 1.88 thorpej #endif /* ARM_MMU_GENERIC == 1 */
1984 1.88 thorpej
1985 1.88 thorpej #if ARM_MMU_XSCALE == 1
1986 1.88 thorpej void
1987 1.88 thorpej pmap_copy_page_xscale(paddr_t src, paddr_t dst)
1988 1.88 thorpej {
1989 1.88 thorpej struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1990 1.88 thorpej #ifdef DEBUG
1991 1.88 thorpej struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1992 1.88 thorpej
1993 1.88 thorpej if (dst_pg->mdpage.pvh_list != NULL)
1994 1.88 thorpej panic("pmap_copy_page: dst page has mappings");
1995 1.88 thorpej #endif
1996 1.88 thorpej
1997 1.88 thorpej KDASSERT((src & PGOFSET) == 0);
1998 1.88 thorpej KDASSERT((dst & PGOFSET) == 0);
1999 1.88 thorpej
2000 1.88 thorpej /*
2001 1.88 thorpej * Clean the source page. Hold the source page's lock for
2002 1.88 thorpej * the duration of the copy so that no other mappings can
2003 1.88 thorpej * be created while we have a potentially aliased mapping.
2004 1.88 thorpej */
2005 1.88 thorpej simple_lock(&src_pg->mdpage.pvh_slock);
2006 1.88 thorpej (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
2007 1.88 thorpej
2008 1.88 thorpej /*
2009 1.88 thorpej * Map the pages into the page hook points, copy them, and purge
2010 1.88 thorpej * the cache for the appropriate page. Invalidate the TLB
2011 1.88 thorpej * as required.
2012 1.88 thorpej */
2013 1.88 thorpej *csrc_pte = L2_S_PROTO | src |
2014 1.89 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
2015 1.89 thorpej L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
2016 1.113 thorpej PTE_SYNC(csrc_pte);
2017 1.88 thorpej *cdst_pte = L2_S_PROTO | dst |
2018 1.88 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
2019 1.88 thorpej L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
2020 1.113 thorpej PTE_SYNC(cdst_pte);
2021 1.88 thorpej cpu_tlb_flushD_SE(csrcp);
2022 1.88 thorpej cpu_tlb_flushD_SE(cdstp);
2023 1.88 thorpej cpu_cpwait();
2024 1.88 thorpej bcopy_page(csrcp, cdstp);
2025 1.88 thorpej simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
2026 1.88 thorpej xscale_cache_clean_minidata();
2027 1.88 thorpej }
2028 1.88 thorpej #endif /* ARM_MMU_XSCALE == 1 */
2029 1.1 matt
2030 1.1 matt #if 0
2031 1.1 matt void
2032 1.73 thorpej pmap_pte_addref(struct pmap *pmap, vaddr_t va)
2033 1.1 matt {
2034 1.1 matt pd_entry_t *pde;
2035 1.2 matt paddr_t pa;
2036 1.1 matt struct vm_page *m;
2037 1.1 matt
2038 1.1 matt if (pmap == pmap_kernel())
2039 1.1 matt return;
2040 1.1 matt
2041 1.115 thorpej pde = pmap_pde(pmap, va & PD_FRAME);
2042 1.1 matt pa = pmap_pte_pa(pde);
2043 1.1 matt m = PHYS_TO_VM_PAGE(pa);
2044 1.115 thorpej m->wire_count++;
2045 1.1 matt #ifdef MYCROFT_HACK
2046 1.1 matt printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2047 1.1 matt pmap, va, pde, pa, m, m->wire_count);
2048 1.1 matt #endif
2049 1.1 matt }
2050 1.1 matt
2051 1.1 matt void
2052 1.73 thorpej pmap_pte_delref(struct pmap *pmap, vaddr_t va)
2053 1.1 matt {
2054 1.1 matt pd_entry_t *pde;
2055 1.2 matt paddr_t pa;
2056 1.1 matt struct vm_page *m;
2057 1.1 matt
2058 1.1 matt if (pmap == pmap_kernel())
2059 1.1 matt return;
2060 1.1 matt
2061 1.115 thorpej pde = pmap_pde(pmap, va & PD_FRAME);
2062 1.1 matt pa = pmap_pte_pa(pde);
2063 1.1 matt m = PHYS_TO_VM_PAGE(pa);
2064 1.115 thorpej m->wire_count--;
2065 1.1 matt #ifdef MYCROFT_HACK
2066 1.1 matt printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2067 1.1 matt pmap, va, pde, pa, m, m->wire_count);
2068 1.1 matt #endif
2069 1.1 matt if (m->wire_count == 0) {
2070 1.1 matt #ifdef MYCROFT_HACK
2071 1.1 matt printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
2072 1.1 matt pmap, va, pde, pa, m);
2073 1.1 matt #endif
2074 1.115 thorpej pmap_unmap_in_l1(pmap, va & PD_FRAME);
2075 1.1 matt uvm_pagefree(m);
2076 1.1 matt --pmap->pm_stats.resident_count;
2077 1.1 matt }
2078 1.1 matt }
2079 1.1 matt #else
2080 1.1 matt #define pmap_pte_addref(pmap, va)
2081 1.1 matt #define pmap_pte_delref(pmap, va)
2082 1.1 matt #endif
2083 1.1 matt
2084 1.1 matt /*
2085 1.1 matt * Since we have a virtually indexed cache, we may need to inhibit caching if
2086 1.1 matt * there is more than one mapping and at least one of them is writable.
2087 1.1 matt * Since we purge the cache on every context switch, we only need to check for
2088 1.1 matt * other mappings within the same pmap, or kernel_pmap.
2089 1.1 matt * This function is also called when a page is unmapped, to possibly reenable
2090 1.1 matt * caching on any remaining mappings.
2091 1.28 rearnsha *
2092 1.28 rearnsha * The code implements the following logic, where:
2093 1.28 rearnsha *
2094 1.28 rearnsha * KW = # of kernel read/write pages
2095 1.28 rearnsha * KR = # of kernel read only pages
2096 1.28 rearnsha * UW = # of user read/write pages
2097 1.28 rearnsha * UR = # of user read only pages
2098 1.28 rearnsha * OW = # of user read/write pages in another pmap, then
2099 1.28 rearnsha *
2100 1.28 rearnsha * KC = kernel mapping is cacheable
2101 1.28 rearnsha * UC = user mapping is cacheable
2102 1.28 rearnsha *
2103 1.28 rearnsha * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
2104 1.28 rearnsha * +---------------------------------------------
2105 1.28 rearnsha * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0
2106 1.28 rearnsha * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
2107 1.28 rearnsha * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0
2108 1.28 rearnsha * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2109 1.28 rearnsha * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2110 1.11 chris *
2111 1.11 chris * Note that the pmap must have it's ptes mapped in, and passed with ptes.
2112 1.1 matt */
2113 1.25 rearnsha __inline static void
2114 1.49 thorpej pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2115 1.12 chris boolean_t clear_cache)
2116 1.1 matt {
2117 1.25 rearnsha if (pmap == pmap_kernel())
2118 1.49 thorpej pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
2119 1.25 rearnsha else
2120 1.49 thorpej pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2121 1.25 rearnsha }
2122 1.25 rearnsha
2123 1.25 rearnsha static void
2124 1.49 thorpej pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2125 1.25 rearnsha boolean_t clear_cache)
2126 1.25 rearnsha {
2127 1.25 rearnsha int user_entries = 0;
2128 1.25 rearnsha int user_writable = 0;
2129 1.25 rearnsha int user_cacheable = 0;
2130 1.25 rearnsha int kernel_entries = 0;
2131 1.25 rearnsha int kernel_writable = 0;
2132 1.25 rearnsha int kernel_cacheable = 0;
2133 1.25 rearnsha struct pv_entry *pv;
2134 1.25 rearnsha struct pmap *last_pmap = pmap;
2135 1.25 rearnsha
2136 1.25 rearnsha #ifdef DIAGNOSTIC
2137 1.25 rearnsha if (pmap != pmap_kernel())
2138 1.25 rearnsha panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2139 1.25 rearnsha #endif
2140 1.25 rearnsha
2141 1.25 rearnsha /*
2142 1.25 rearnsha * Pass one, see if there are both kernel and user pmaps for
2143 1.25 rearnsha * this page. Calculate whether there are user-writable or
2144 1.25 rearnsha * kernel-writable pages.
2145 1.25 rearnsha */
2146 1.49 thorpej for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
2147 1.25 rearnsha if (pv->pv_pmap != pmap) {
2148 1.25 rearnsha user_entries++;
2149 1.78 thorpej if (pv->pv_flags & PVF_WRITE)
2150 1.25 rearnsha user_writable++;
2151 1.78 thorpej if ((pv->pv_flags & PVF_NC) == 0)
2152 1.25 rearnsha user_cacheable++;
2153 1.25 rearnsha } else {
2154 1.25 rearnsha kernel_entries++;
2155 1.78 thorpej if (pv->pv_flags & PVF_WRITE)
2156 1.25 rearnsha kernel_writable++;
2157 1.78 thorpej if ((pv->pv_flags & PVF_NC) == 0)
2158 1.25 rearnsha kernel_cacheable++;
2159 1.25 rearnsha }
2160 1.25 rearnsha }
2161 1.25 rearnsha
2162 1.25 rearnsha /*
2163 1.25 rearnsha * We know we have just been updating a kernel entry, so if
2164 1.25 rearnsha * all user pages are already cacheable, then there is nothing
2165 1.25 rearnsha * further to do.
2166 1.25 rearnsha */
2167 1.25 rearnsha if (kernel_entries == 0 &&
2168 1.25 rearnsha user_cacheable == user_entries)
2169 1.25 rearnsha return;
2170 1.25 rearnsha
2171 1.25 rearnsha if (user_entries) {
2172 1.25 rearnsha /*
2173 1.25 rearnsha * Scan over the list again, for each entry, if it
2174 1.25 rearnsha * might not be set correctly, call pmap_vac_me_user
2175 1.25 rearnsha * to recalculate the settings.
2176 1.25 rearnsha */
2177 1.49 thorpej for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
2178 1.25 rearnsha /*
2179 1.25 rearnsha * We know kernel mappings will get set
2180 1.25 rearnsha * correctly in other calls. We also know
2181 1.25 rearnsha * that if the pmap is the same as last_pmap
2182 1.25 rearnsha * then we've just handled this entry.
2183 1.25 rearnsha */
2184 1.25 rearnsha if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2185 1.25 rearnsha continue;
2186 1.25 rearnsha /*
2187 1.25 rearnsha * If there are kernel entries and this page
2188 1.25 rearnsha * is writable but non-cacheable, then we can
2189 1.25 rearnsha * skip this entry also.
2190 1.25 rearnsha */
2191 1.25 rearnsha if (kernel_entries > 0 &&
2192 1.78 thorpej (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
2193 1.78 thorpej (PVF_NC | PVF_WRITE))
2194 1.25 rearnsha continue;
2195 1.25 rearnsha /*
2196 1.25 rearnsha * Similarly if there are no kernel-writable
2197 1.25 rearnsha * entries and the page is already
2198 1.25 rearnsha * read-only/cacheable.
2199 1.25 rearnsha */
2200 1.25 rearnsha if (kernel_writable == 0 &&
2201 1.78 thorpej (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
2202 1.25 rearnsha continue;
2203 1.25 rearnsha /*
2204 1.25 rearnsha * For some of the remaining cases, we know
2205 1.25 rearnsha * that we must recalculate, but for others we
2206 1.25 rearnsha * can't tell if they are correct or not, so
2207 1.25 rearnsha * we recalculate anyway.
2208 1.25 rearnsha */
2209 1.25 rearnsha pmap_unmap_ptes(last_pmap);
2210 1.25 rearnsha last_pmap = pv->pv_pmap;
2211 1.25 rearnsha ptes = pmap_map_ptes(last_pmap);
2212 1.49 thorpej pmap_vac_me_user(last_pmap, pg, ptes,
2213 1.25 rearnsha pmap_is_curpmap(last_pmap));
2214 1.25 rearnsha }
2215 1.25 rearnsha /* Restore the pte mapping that was passed to us. */
2216 1.25 rearnsha if (last_pmap != pmap) {
2217 1.25 rearnsha pmap_unmap_ptes(last_pmap);
2218 1.25 rearnsha ptes = pmap_map_ptes(pmap);
2219 1.25 rearnsha }
2220 1.25 rearnsha if (kernel_entries == 0)
2221 1.25 rearnsha return;
2222 1.25 rearnsha }
2223 1.25 rearnsha
2224 1.49 thorpej pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2225 1.25 rearnsha return;
2226 1.25 rearnsha }
2227 1.25 rearnsha
2228 1.25 rearnsha static void
2229 1.49 thorpej pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2230 1.25 rearnsha boolean_t clear_cache)
2231 1.25 rearnsha {
2232 1.25 rearnsha struct pmap *kpmap = pmap_kernel();
2233 1.17 chris struct pv_entry *pv, *npv;
2234 1.1 matt int entries = 0;
2235 1.25 rearnsha int writable = 0;
2236 1.12 chris int cacheable_entries = 0;
2237 1.25 rearnsha int kern_cacheable = 0;
2238 1.25 rearnsha int other_writable = 0;
2239 1.1 matt
2240 1.49 thorpej pv = pg->mdpage.pvh_list;
2241 1.11 chris KASSERT(ptes != NULL);
2242 1.1 matt
2243 1.1 matt /*
2244 1.1 matt * Count mappings and writable mappings in this pmap.
2245 1.25 rearnsha * Include kernel mappings as part of our own.
2246 1.1 matt * Keep a pointer to the first one.
2247 1.1 matt */
2248 1.1 matt for (npv = pv; npv; npv = npv->pv_next) {
2249 1.1 matt /* Count mappings in the same pmap */
2250 1.25 rearnsha if (pmap == npv->pv_pmap ||
2251 1.25 rearnsha kpmap == npv->pv_pmap) {
2252 1.1 matt if (entries++ == 0)
2253 1.1 matt pv = npv;
2254 1.12 chris /* Cacheable mappings */
2255 1.78 thorpej if ((npv->pv_flags & PVF_NC) == 0) {
2256 1.12 chris cacheable_entries++;
2257 1.25 rearnsha if (kpmap == npv->pv_pmap)
2258 1.25 rearnsha kern_cacheable++;
2259 1.25 rearnsha }
2260 1.25 rearnsha /* Writable mappings */
2261 1.78 thorpej if (npv->pv_flags & PVF_WRITE)
2262 1.25 rearnsha ++writable;
2263 1.78 thorpej } else if (npv->pv_flags & PVF_WRITE)
2264 1.25 rearnsha other_writable = 1;
2265 1.1 matt }
2266 1.1 matt
2267 1.12 chris PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
2268 1.25 rearnsha "writable %d cacheable %d %s\n", pmap, entries, writable,
2269 1.12 chris cacheable_entries, clear_cache ? "clean" : "no clean"));
2270 1.12 chris
2271 1.1 matt /*
2272 1.1 matt * Enable or disable caching as necessary.
2273 1.25 rearnsha * Note: the first entry might be part of the kernel pmap,
2274 1.25 rearnsha * so we can't assume this is indicative of the state of the
2275 1.25 rearnsha * other (maybe non-kpmap) entries.
2276 1.1 matt */
2277 1.25 rearnsha if ((entries > 1 && writable) ||
2278 1.25 rearnsha (entries > 0 && pmap == kpmap && other_writable)) {
2279 1.12 chris if (cacheable_entries == 0)
2280 1.12 chris return;
2281 1.25 rearnsha for (npv = pv; npv; npv = npv->pv_next) {
2282 1.25 rearnsha if ((pmap == npv->pv_pmap
2283 1.25 rearnsha || kpmap == npv->pv_pmap) &&
2284 1.78 thorpej (npv->pv_flags & PVF_NC) == 0) {
2285 1.91 thorpej ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
2286 1.113 thorpej PTE_SYNC_CURRENT(pmap,
2287 1.113 thorpej &ptes[arm_btop(npv->pv_va)]);
2288 1.78 thorpej npv->pv_flags |= PVF_NC;
2289 1.25 rearnsha /*
2290 1.25 rearnsha * If this page needs flushing from the
2291 1.25 rearnsha * cache, and we aren't going to do it
2292 1.25 rearnsha * below, do it now.
2293 1.25 rearnsha */
2294 1.25 rearnsha if ((cacheable_entries < 4 &&
2295 1.25 rearnsha (clear_cache || npv->pv_pmap == kpmap)) ||
2296 1.25 rearnsha (npv->pv_pmap == kpmap &&
2297 1.25 rearnsha !clear_cache && kern_cacheable < 4)) {
2298 1.36 thorpej cpu_idcache_wbinv_range(npv->pv_va,
2299 1.12 chris NBPG);
2300 1.12 chris cpu_tlb_flushID_SE(npv->pv_va);
2301 1.12 chris }
2302 1.1 matt }
2303 1.1 matt }
2304 1.25 rearnsha if ((clear_cache && cacheable_entries >= 4) ||
2305 1.25 rearnsha kern_cacheable >= 4) {
2306 1.36 thorpej cpu_idcache_wbinv_all();
2307 1.12 chris cpu_tlb_flushID();
2308 1.12 chris }
2309 1.32 thorpej cpu_cpwait();
2310 1.1 matt } else if (entries > 0) {
2311 1.25 rearnsha /*
2312 1.25 rearnsha * Turn cacheing back on for some pages. If it is a kernel
2313 1.25 rearnsha * page, only do so if there are no other writable pages.
2314 1.25 rearnsha */
2315 1.25 rearnsha for (npv = pv; npv; npv = npv->pv_next) {
2316 1.25 rearnsha if ((pmap == npv->pv_pmap ||
2317 1.25 rearnsha (kpmap == npv->pv_pmap && other_writable == 0)) &&
2318 1.78 thorpej (npv->pv_flags & PVF_NC)) {
2319 1.86 thorpej ptes[arm_btop(npv->pv_va)] |=
2320 1.86 thorpej pte_l2_s_cache_mode;
2321 1.113 thorpej PTE_SYNC_CURRENT(pmap,
2322 1.113 thorpej &ptes[arm_btop(npv->pv_va)]);
2323 1.78 thorpej npv->pv_flags &= ~PVF_NC;
2324 1.1 matt }
2325 1.1 matt }
2326 1.1 matt }
2327 1.1 matt }
2328 1.1 matt
2329 1.1 matt /*
2330 1.1 matt * pmap_remove()
2331 1.1 matt *
2332 1.1 matt * pmap_remove is responsible for nuking a number of mappings for a range
2333 1.1 matt * of virtual address space in the current pmap. To do this efficiently
2334 1.1 matt * is interesting, because in a number of cases a wide virtual address
2335 1.1 matt * range may be supplied that contains few actual mappings. So, the
2336 1.1 matt * optimisations are:
2337 1.1 matt * 1. Try and skip over hunks of address space for which an L1 entry
2338 1.1 matt * does not exist.
2339 1.1 matt * 2. Build up a list of pages we've hit, up to a maximum, so we can
2340 1.1 matt * maybe do just a partial cache clean. This path of execution is
2341 1.1 matt * complicated by the fact that the cache must be flushed _before_
2342 1.1 matt * the PTE is nuked, being a VAC :-)
2343 1.1 matt * 3. Maybe later fast-case a single page, but I don't think this is
2344 1.1 matt * going to make _that_ much difference overall.
2345 1.1 matt */
2346 1.1 matt
2347 1.1 matt #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
2348 1.1 matt
2349 1.1 matt void
2350 1.73 thorpej pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
2351 1.1 matt {
2352 1.1 matt int cleanlist_idx = 0;
2353 1.1 matt struct pagelist {
2354 1.1 matt vaddr_t va;
2355 1.1 matt pt_entry_t *pte;
2356 1.1 matt } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
2357 1.11 chris pt_entry_t *pte = 0, *ptes;
2358 1.2 matt paddr_t pa;
2359 1.1 matt int pmap_active;
2360 1.49 thorpej struct vm_page *pg;
2361 1.1 matt
2362 1.1 matt /* Exit quick if there is no pmap */
2363 1.1 matt if (!pmap)
2364 1.1 matt return;
2365 1.1 matt
2366 1.79 thorpej PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
2367 1.79 thorpej pmap, sva, eva));
2368 1.1 matt
2369 1.17 chris /*
2370 1.49 thorpej * we lock in the pmap => vm_page direction
2371 1.17 chris */
2372 1.17 chris PMAP_MAP_TO_HEAD_LOCK();
2373 1.17 chris
2374 1.11 chris ptes = pmap_map_ptes(pmap);
2375 1.1 matt /* Get a page table pointer */
2376 1.1 matt while (sva < eva) {
2377 1.30 rearnsha if (pmap_pde_page(pmap_pde(pmap, sva)))
2378 1.1 matt break;
2379 1.81 thorpej sva = (sva & L1_S_FRAME) + L1_S_SIZE;
2380 1.1 matt }
2381 1.11 chris
2382 1.56 thorpej pte = &ptes[arm_btop(sva)];
2383 1.1 matt /* Note if the pmap is active thus require cache and tlb cleans */
2384 1.58 thorpej pmap_active = pmap_is_curpmap(pmap);
2385 1.1 matt
2386 1.1 matt /* Now loop along */
2387 1.1 matt while (sva < eva) {
2388 1.1 matt /* Check if we can move to the next PDE (l1 chunk) */
2389 1.113 thorpej if ((sva & L2_ADDR_BITS) == 0) {
2390 1.30 rearnsha if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2391 1.81 thorpej sva += L1_S_SIZE;
2392 1.81 thorpej pte += arm_btop(L1_S_SIZE);
2393 1.1 matt continue;
2394 1.1 matt }
2395 1.113 thorpej }
2396 1.1 matt
2397 1.1 matt /* We've found a valid PTE, so this page of PTEs has to go. */
2398 1.1 matt if (pmap_pte_v(pte)) {
2399 1.1 matt /* Update statistics */
2400 1.1 matt --pmap->pm_stats.resident_count;
2401 1.1 matt
2402 1.1 matt /*
2403 1.1 matt * Add this page to our cache remove list, if we can.
2404 1.1 matt * If, however the cache remove list is totally full,
2405 1.1 matt * then do a complete cache invalidation taking note
2406 1.1 matt * to backtrack the PTE table beforehand, and ignore
2407 1.1 matt * the lists in future because there's no longer any
2408 1.1 matt * point in bothering with them (we've paid the
2409 1.1 matt * penalty, so will carry on unhindered). Otherwise,
2410 1.1 matt * when we fall out, we just clean the list.
2411 1.1 matt */
2412 1.1 matt PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2413 1.1 matt pa = pmap_pte_pa(pte);
2414 1.1 matt
2415 1.1 matt if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2416 1.1 matt /* Add to the clean list. */
2417 1.1 matt cleanlist[cleanlist_idx].pte = pte;
2418 1.1 matt cleanlist[cleanlist_idx].va = sva;
2419 1.1 matt cleanlist_idx++;
2420 1.1 matt } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2421 1.1 matt int cnt;
2422 1.1 matt
2423 1.1 matt /* Nuke everything if needed. */
2424 1.1 matt if (pmap_active) {
2425 1.36 thorpej cpu_idcache_wbinv_all();
2426 1.1 matt cpu_tlb_flushID();
2427 1.1 matt }
2428 1.1 matt
2429 1.1 matt /*
2430 1.1 matt * Roll back the previous PTE list,
2431 1.1 matt * and zero out the current PTE.
2432 1.1 matt */
2433 1.113 thorpej for (cnt = 0;
2434 1.113 thorpej cnt < PMAP_REMOVE_CLEAN_LIST_SIZE;
2435 1.113 thorpej cnt++) {
2436 1.1 matt *cleanlist[cnt].pte = 0;
2437 1.113 thorpej if (pmap_active)
2438 1.113 thorpej PTE_SYNC(cleanlist[cnt].pte);
2439 1.113 thorpej else
2440 1.113 thorpej PTE_FLUSH(cleanlist[cnt].pte);
2441 1.113 thorpej pmap_pte_delref(pmap,
2442 1.113 thorpej cleanlist[cnt].va);
2443 1.1 matt }
2444 1.1 matt *pte = 0;
2445 1.113 thorpej if (pmap_active)
2446 1.113 thorpej PTE_SYNC(pte);
2447 1.113 thorpej else
2448 1.113 thorpej PTE_FLUSH(pte);
2449 1.1 matt pmap_pte_delref(pmap, sva);
2450 1.1 matt cleanlist_idx++;
2451 1.1 matt } else {
2452 1.1 matt /*
2453 1.1 matt * We've already nuked the cache and
2454 1.1 matt * TLB, so just carry on regardless,
2455 1.1 matt * and we won't need to do it again
2456 1.1 matt */
2457 1.1 matt *pte = 0;
2458 1.113 thorpej if (pmap_active)
2459 1.113 thorpej PTE_SYNC(pte);
2460 1.113 thorpej else
2461 1.113 thorpej PTE_FLUSH(pte);
2462 1.1 matt pmap_pte_delref(pmap, sva);
2463 1.1 matt }
2464 1.1 matt
2465 1.1 matt /*
2466 1.1 matt * Update flags. In a number of circumstances,
2467 1.1 matt * we could cluster a lot of these and do a
2468 1.1 matt * number of sequential pages in one go.
2469 1.1 matt */
2470 1.49 thorpej if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2471 1.17 chris struct pv_entry *pve;
2472 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
2473 1.49 thorpej pve = pmap_remove_pv(pg, pmap, sva);
2474 1.17 chris pmap_free_pv(pmap, pve);
2475 1.49 thorpej pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2476 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
2477 1.1 matt }
2478 1.113 thorpej } else if (pmap_active == 0)
2479 1.113 thorpej PTE_FLUSH(pte);
2480 1.1 matt sva += NBPG;
2481 1.1 matt pte++;
2482 1.1 matt }
2483 1.1 matt
2484 1.1 matt /*
2485 1.1 matt * Now, if we've fallen through down to here, chances are that there
2486 1.1 matt * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2487 1.1 matt */
2488 1.1 matt if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2489 1.1 matt u_int cnt;
2490 1.1 matt
2491 1.1 matt for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2492 1.1 matt if (pmap_active) {
2493 1.36 thorpej cpu_idcache_wbinv_range(cleanlist[cnt].va,
2494 1.36 thorpej NBPG);
2495 1.1 matt *cleanlist[cnt].pte = 0;
2496 1.1 matt cpu_tlb_flushID_SE(cleanlist[cnt].va);
2497 1.113 thorpej PTE_SYNC(cleanlist[cnt].pte);
2498 1.113 thorpej } else {
2499 1.1 matt *cleanlist[cnt].pte = 0;
2500 1.113 thorpej PTE_FLUSH(cleanlist[cnt].pte);
2501 1.113 thorpej }
2502 1.1 matt pmap_pte_delref(pmap, cleanlist[cnt].va);
2503 1.1 matt }
2504 1.1 matt }
2505 1.104 thorpej
2506 1.104 thorpej pmap_unmap_ptes(pmap);
2507 1.104 thorpej
2508 1.17 chris PMAP_MAP_TO_HEAD_UNLOCK();
2509 1.1 matt }
2510 1.1 matt
2511 1.1 matt /*
2512 1.1 matt * Routine: pmap_remove_all
2513 1.1 matt * Function:
2514 1.1 matt * Removes this physical page from
2515 1.1 matt * all physical maps in which it resides.
2516 1.1 matt * Reflects back modify bits to the pager.
2517 1.1 matt */
2518 1.1 matt
2519 1.33 chris static void
2520 1.73 thorpej pmap_remove_all(struct vm_page *pg)
2521 1.1 matt {
2522 1.17 chris struct pv_entry *pv, *npv;
2523 1.15 chris struct pmap *pmap;
2524 1.11 chris pt_entry_t *pte, *ptes;
2525 1.1 matt
2526 1.49 thorpej PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
2527 1.1 matt
2528 1.49 thorpej /* set vm_page => pmap locking */
2529 1.17 chris PMAP_HEAD_TO_MAP_LOCK();
2530 1.1 matt
2531 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
2532 1.17 chris
2533 1.49 thorpej pv = pg->mdpage.pvh_list;
2534 1.49 thorpej if (pv == NULL) {
2535 1.49 thorpej PDEBUG(0, printf("free page\n"));
2536 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
2537 1.49 thorpej PMAP_HEAD_TO_MAP_UNLOCK();
2538 1.49 thorpej return;
2539 1.1 matt }
2540 1.17 chris pmap_clean_page(pv, FALSE);
2541 1.1 matt
2542 1.1 matt while (pv) {
2543 1.1 matt pmap = pv->pv_pmap;
2544 1.11 chris ptes = pmap_map_ptes(pmap);
2545 1.56 thorpej pte = &ptes[arm_btop(pv->pv_va)];
2546 1.1 matt
2547 1.1 matt PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2548 1.1 matt pv->pv_va, pv->pv_flags));
2549 1.1 matt #ifdef DEBUG
2550 1.79 thorpej if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
2551 1.79 thorpej pmap_pte_v(pte) == 0 ||
2552 1.79 thorpej pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
2553 1.1 matt panic("pmap_remove_all: bad mapping");
2554 1.1 matt #endif /* DEBUG */
2555 1.1 matt
2556 1.1 matt /*
2557 1.1 matt * Update statistics
2558 1.1 matt */
2559 1.1 matt --pmap->pm_stats.resident_count;
2560 1.1 matt
2561 1.1 matt /* Wired bit */
2562 1.78 thorpej if (pv->pv_flags & PVF_WIRED)
2563 1.1 matt --pmap->pm_stats.wired_count;
2564 1.1 matt
2565 1.1 matt /*
2566 1.1 matt * Invalidate the PTEs.
2567 1.1 matt * XXX: should cluster them up and invalidate as many
2568 1.1 matt * as possible at once.
2569 1.1 matt */
2570 1.1 matt
2571 1.1 matt #ifdef needednotdone
2572 1.1 matt reduce wiring count on page table pages as references drop
2573 1.1 matt #endif
2574 1.1 matt
2575 1.1 matt *pte = 0;
2576 1.113 thorpej PTE_SYNC_CURRENT(pmap, pte);
2577 1.1 matt pmap_pte_delref(pmap, pv->pv_va);
2578 1.1 matt
2579 1.1 matt npv = pv->pv_next;
2580 1.17 chris pmap_free_pv(pmap, pv);
2581 1.1 matt pv = npv;
2582 1.11 chris pmap_unmap_ptes(pmap);
2583 1.1 matt }
2584 1.49 thorpej pg->mdpage.pvh_list = NULL;
2585 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
2586 1.17 chris PMAP_HEAD_TO_MAP_UNLOCK();
2587 1.1 matt
2588 1.1 matt PDEBUG(0, printf("done\n"));
2589 1.1 matt cpu_tlb_flushID();
2590 1.32 thorpej cpu_cpwait();
2591 1.1 matt }
2592 1.1 matt
2593 1.1 matt
2594 1.1 matt /*
2595 1.1 matt * Set the physical protection on the specified range of this map as requested.
2596 1.1 matt */
2597 1.1 matt
2598 1.1 matt void
2599 1.73 thorpej pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
2600 1.1 matt {
2601 1.11 chris pt_entry_t *pte = NULL, *ptes;
2602 1.49 thorpej struct vm_page *pg;
2603 1.1 matt int flush = 0;
2604 1.1 matt
2605 1.1 matt PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2606 1.1 matt pmap, sva, eva, prot));
2607 1.1 matt
2608 1.1 matt if (~prot & VM_PROT_READ) {
2609 1.107 thorpej /*
2610 1.107 thorpej * Just remove the mappings. pmap_update() is not required
2611 1.107 thorpej * here since the caller should do it.
2612 1.107 thorpej */
2613 1.1 matt pmap_remove(pmap, sva, eva);
2614 1.1 matt return;
2615 1.1 matt }
2616 1.1 matt if (prot & VM_PROT_WRITE) {
2617 1.1 matt /*
2618 1.1 matt * If this is a read->write transition, just ignore it and let
2619 1.1 matt * uvm_fault() take care of it later.
2620 1.1 matt */
2621 1.1 matt return;
2622 1.1 matt }
2623 1.1 matt
2624 1.17 chris /* Need to lock map->head */
2625 1.17 chris PMAP_MAP_TO_HEAD_LOCK();
2626 1.17 chris
2627 1.11 chris ptes = pmap_map_ptes(pmap);
2628 1.96 thorpej
2629 1.96 thorpej /*
2630 1.96 thorpej * OK, at this point, we know we're doing write-protect operation.
2631 1.96 thorpej * If the pmap is active, write-back the range.
2632 1.96 thorpej */
2633 1.96 thorpej if (pmap_is_curpmap(pmap))
2634 1.96 thorpej cpu_dcache_wb_range(sva, eva - sva);
2635 1.96 thorpej
2636 1.1 matt /*
2637 1.1 matt * We need to acquire a pointer to a page table page before entering
2638 1.1 matt * the following loop.
2639 1.1 matt */
2640 1.1 matt while (sva < eva) {
2641 1.30 rearnsha if (pmap_pde_page(pmap_pde(pmap, sva)))
2642 1.1 matt break;
2643 1.81 thorpej sva = (sva & L1_S_FRAME) + L1_S_SIZE;
2644 1.1 matt }
2645 1.11 chris
2646 1.56 thorpej pte = &ptes[arm_btop(sva)];
2647 1.17 chris
2648 1.1 matt while (sva < eva) {
2649 1.1 matt /* only check once in a while */
2650 1.81 thorpej if ((sva & L2_ADDR_BITS) == 0) {
2651 1.30 rearnsha if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2652 1.1 matt /* We can race ahead here, to the next pde. */
2653 1.81 thorpej sva += L1_S_SIZE;
2654 1.81 thorpej pte += arm_btop(L1_S_SIZE);
2655 1.1 matt continue;
2656 1.1 matt }
2657 1.1 matt }
2658 1.1 matt
2659 1.113 thorpej if (!pmap_pte_v(pte)) {
2660 1.113 thorpej PTE_FLUSH_ALT(pmap, pte);
2661 1.1 matt goto next;
2662 1.113 thorpej }
2663 1.1 matt
2664 1.1 matt flush = 1;
2665 1.1 matt
2666 1.113 thorpej pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
2667 1.113 thorpej
2668 1.107 thorpej *pte &= ~L2_S_PROT_W; /* clear write bit */
2669 1.113 thorpej PTE_SYNC_CURRENT(pmap, pte); /* XXXJRT optimize */
2670 1.1 matt
2671 1.1 matt /* Clear write flag */
2672 1.113 thorpej if (pg != NULL) {
2673 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
2674 1.78 thorpej (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
2675 1.49 thorpej pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2676 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
2677 1.1 matt }
2678 1.1 matt
2679 1.107 thorpej next:
2680 1.1 matt sva += NBPG;
2681 1.1 matt pte++;
2682 1.1 matt }
2683 1.11 chris pmap_unmap_ptes(pmap);
2684 1.17 chris PMAP_MAP_TO_HEAD_UNLOCK();
2685 1.1 matt if (flush)
2686 1.1 matt cpu_tlb_flushID();
2687 1.1 matt }
2688 1.1 matt
2689 1.1 matt /*
2690 1.15 chris * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2691 1.1 matt * int flags)
2692 1.1 matt *
2693 1.1 matt * Insert the given physical page (p) at
2694 1.1 matt * the specified virtual address (v) in the
2695 1.1 matt * target physical map with the protection requested.
2696 1.1 matt *
2697 1.1 matt * If specified, the page will be wired down, meaning
2698 1.1 matt * that the related pte can not be reclaimed.
2699 1.1 matt *
2700 1.1 matt * NB: This is the only routine which MAY NOT lazy-evaluate
2701 1.1 matt * or lose information. That is, this routine must actually
2702 1.1 matt * insert this page into the given map NOW.
2703 1.1 matt */
2704 1.1 matt
2705 1.1 matt int
2706 1.73 thorpej pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2707 1.73 thorpej int flags)
2708 1.1 matt {
2709 1.66 thorpej pt_entry_t *ptes, opte, npte;
2710 1.2 matt paddr_t opa;
2711 1.1 matt boolean_t wired = (flags & PMAP_WIRED) != 0;
2712 1.49 thorpej struct vm_page *pg;
2713 1.17 chris struct pv_entry *pve;
2714 1.66 thorpej int error, nflags;
2715 1.1 matt
2716 1.1 matt PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2717 1.1 matt va, pa, pmap, prot, wired));
2718 1.1 matt
2719 1.1 matt #ifdef DIAGNOSTIC
2720 1.1 matt /* Valid address ? */
2721 1.48 chris if (va >= (pmap_curmaxkvaddr))
2722 1.1 matt panic("pmap_enter: too big");
2723 1.1 matt if (pmap != pmap_kernel() && va != 0) {
2724 1.1 matt if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2725 1.1 matt panic("pmap_enter: kernel page in user map");
2726 1.1 matt } else {
2727 1.1 matt if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2728 1.1 matt panic("pmap_enter: user page in kernel map");
2729 1.1 matt if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2730 1.1 matt panic("pmap_enter: entering PT page");
2731 1.1 matt }
2732 1.1 matt #endif
2733 1.79 thorpej
2734 1.79 thorpej KDASSERT(((va | pa) & PGOFSET) == 0);
2735 1.79 thorpej
2736 1.49 thorpej /*
2737 1.49 thorpej * Get a pointer to the page. Later on in this function, we
2738 1.49 thorpej * test for a managed page by checking pg != NULL.
2739 1.49 thorpej */
2740 1.55 thorpej pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2741 1.49 thorpej
2742 1.17 chris /* get lock */
2743 1.17 chris PMAP_MAP_TO_HEAD_LOCK();
2744 1.66 thorpej
2745 1.1 matt /*
2746 1.66 thorpej * map the ptes. If there's not already an L2 table for this
2747 1.66 thorpej * address, allocate one.
2748 1.1 matt */
2749 1.66 thorpej ptes = pmap_map_ptes(pmap); /* locks pmap */
2750 1.66 thorpej if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
2751 1.17 chris struct vm_page *ptp;
2752 1.57 thorpej
2753 1.57 thorpej /* kernel should be pre-grown */
2754 1.57 thorpej KASSERT(pmap != pmap_kernel());
2755 1.17 chris
2756 1.17 chris /* if failure is allowed then don't try too hard */
2757 1.114 thorpej ptp = pmap_get_ptp(pmap, va & PD_FRAME);
2758 1.17 chris if (ptp == NULL) {
2759 1.17 chris if (flags & PMAP_CANFAIL) {
2760 1.17 chris error = ENOMEM;
2761 1.17 chris goto out;
2762 1.17 chris }
2763 1.17 chris panic("pmap_enter: get ptp failed");
2764 1.1 matt }
2765 1.1 matt }
2766 1.66 thorpej opte = ptes[arm_btop(va)];
2767 1.1 matt
2768 1.1 matt nflags = 0;
2769 1.1 matt if (prot & VM_PROT_WRITE)
2770 1.78 thorpej nflags |= PVF_WRITE;
2771 1.1 matt if (wired)
2772 1.78 thorpej nflags |= PVF_WIRED;
2773 1.1 matt
2774 1.1 matt /* Is the pte valid ? If so then this page is already mapped */
2775 1.66 thorpej if (l2pte_valid(opte)) {
2776 1.1 matt /* Get the physical address of the current page mapped */
2777 1.66 thorpej opa = l2pte_pa(opte);
2778 1.1 matt
2779 1.1 matt /* Are we mapping the same page ? */
2780 1.1 matt if (opa == pa) {
2781 1.104 thorpej /* Check to see if we're doing rw->ro. */
2782 1.104 thorpej if ((opte & L2_S_PROT_W) != 0 &&
2783 1.104 thorpej (prot & VM_PROT_WRITE) == 0) {
2784 1.104 thorpej /* Yup, flush the cache if current pmap. */
2785 1.104 thorpej if (pmap_is_curpmap(pmap))
2786 1.104 thorpej cpu_dcache_wb_range(va, NBPG);
2787 1.104 thorpej }
2788 1.104 thorpej
2789 1.1 matt /* Has the wiring changed ? */
2790 1.49 thorpej if (pg != NULL) {
2791 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
2792 1.49 thorpej (void) pmap_modify_pv(pmap, va, pg,
2793 1.78 thorpej PVF_WRITE | PVF_WIRED, nflags);
2794 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
2795 1.49 thorpej }
2796 1.1 matt } else {
2797 1.49 thorpej struct vm_page *opg;
2798 1.49 thorpej
2799 1.1 matt /* We are replacing the page with a new one. */
2800 1.36 thorpej cpu_idcache_wbinv_range(va, NBPG);
2801 1.1 matt
2802 1.1 matt /*
2803 1.1 matt * If it is part of our managed memory then we
2804 1.1 matt * must remove it from the PV list
2805 1.1 matt */
2806 1.49 thorpej if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
2807 1.49 thorpej simple_lock(&opg->mdpage.pvh_slock);
2808 1.49 thorpej pve = pmap_remove_pv(opg, pmap, va);
2809 1.49 thorpej simple_unlock(&opg->mdpage.pvh_slock);
2810 1.17 chris } else {
2811 1.17 chris pve = NULL;
2812 1.1 matt }
2813 1.1 matt
2814 1.1 matt goto enter;
2815 1.1 matt }
2816 1.1 matt } else {
2817 1.1 matt opa = 0;
2818 1.17 chris pve = NULL;
2819 1.1 matt pmap_pte_addref(pmap, va);
2820 1.1 matt
2821 1.1 matt /* pte is not valid so we must be hooking in a new page */
2822 1.1 matt ++pmap->pm_stats.resident_count;
2823 1.1 matt
2824 1.1 matt enter:
2825 1.1 matt /*
2826 1.1 matt * Enter on the PV list if part of our managed memory
2827 1.1 matt */
2828 1.55 thorpej if (pg != NULL) {
2829 1.17 chris if (pve == NULL) {
2830 1.17 chris pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2831 1.17 chris if (pve == NULL) {
2832 1.17 chris if (flags & PMAP_CANFAIL) {
2833 1.113 thorpej PTE_FLUSH_ALT(pmap,
2834 1.113 thorpej ptes[arm_btop(va)]);
2835 1.17 chris error = ENOMEM;
2836 1.17 chris goto out;
2837 1.17 chris }
2838 1.66 thorpej panic("pmap_enter: no pv entries "
2839 1.66 thorpej "available");
2840 1.17 chris }
2841 1.17 chris }
2842 1.17 chris /* enter_pv locks pvh when adding */
2843 1.49 thorpej pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
2844 1.17 chris } else {
2845 1.17 chris if (pve != NULL)
2846 1.17 chris pmap_free_pv(pmap, pve);
2847 1.1 matt }
2848 1.1 matt }
2849 1.1 matt
2850 1.1 matt /* Construct the pte, giving the correct access. */
2851 1.79 thorpej npte = pa;
2852 1.1 matt
2853 1.1 matt /* VA 0 is magic. */
2854 1.77 thorpej if (pmap != pmap_kernel() && va != vector_page)
2855 1.83 thorpej npte |= L2_S_PROT_U;
2856 1.1 matt
2857 1.55 thorpej if (pg != NULL) {
2858 1.1 matt #ifdef DIAGNOSTIC
2859 1.1 matt if ((flags & VM_PROT_ALL) & ~prot)
2860 1.1 matt panic("pmap_enter: access_type exceeds prot");
2861 1.1 matt #endif
2862 1.86 thorpej npte |= pte_l2_s_cache_mode;
2863 1.1 matt if (flags & VM_PROT_WRITE) {
2864 1.84 thorpej npte |= L2_S_PROTO | L2_S_PROT_W;
2865 1.78 thorpej pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
2866 1.1 matt } else if (flags & VM_PROT_ALL) {
2867 1.84 thorpej npte |= L2_S_PROTO;
2868 1.78 thorpej pg->mdpage.pvh_attrs |= PVF_REF;
2869 1.1 matt } else
2870 1.81 thorpej npte |= L2_TYPE_INV;
2871 1.1 matt } else {
2872 1.1 matt if (prot & VM_PROT_WRITE)
2873 1.84 thorpej npte |= L2_S_PROTO | L2_S_PROT_W;
2874 1.1 matt else if (prot & VM_PROT_ALL)
2875 1.84 thorpej npte |= L2_S_PROTO;
2876 1.1 matt else
2877 1.81 thorpej npte |= L2_TYPE_INV;
2878 1.1 matt }
2879 1.1 matt
2880 1.109 thorpej #if ARM_MMU_XSCALE == 1 && defined(XSCALE_CACHE_READ_WRITE_ALLOCATE)
2881 1.109 thorpej #if ARM_NMMUS > 1
2882 1.109 thorpej # error "XXX Unable to use read/write-allocate and configure non-XScale"
2883 1.109 thorpej #endif
2884 1.109 thorpej /*
2885 1.109 thorpej * XXX BRUTAL HACK! This allows us to limp along with
2886 1.109 thorpej * XXX the read/write-allocate cache mode.
2887 1.109 thorpej */
2888 1.109 thorpej if (pmap == pmap_kernel())
2889 1.109 thorpej npte &= ~L2_XSCALE_T_TEX(TEX_XSCALE_X);
2890 1.109 thorpej #endif
2891 1.66 thorpej ptes[arm_btop(va)] = npte;
2892 1.113 thorpej PTE_SYNC_CURRENT(pmap, &ptes[arm_btop(va)]);
2893 1.1 matt
2894 1.55 thorpej if (pg != NULL) {
2895 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
2896 1.59 thorpej pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
2897 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
2898 1.11 chris }
2899 1.1 matt
2900 1.1 matt /* Better flush the TLB ... */
2901 1.1 matt cpu_tlb_flushID_SE(va);
2902 1.17 chris error = 0;
2903 1.17 chris out:
2904 1.66 thorpej pmap_unmap_ptes(pmap); /* unlocks pmap */
2905 1.17 chris PMAP_MAP_TO_HEAD_UNLOCK();
2906 1.1 matt
2907 1.17 chris return error;
2908 1.1 matt }
2909 1.1 matt
2910 1.48 chris /*
2911 1.48 chris * pmap_kenter_pa: enter a kernel mapping
2912 1.48 chris *
2913 1.48 chris * => no need to lock anything assume va is already allocated
2914 1.48 chris * => should be faster than normal pmap enter function
2915 1.48 chris */
2916 1.1 matt void
2917 1.73 thorpej pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2918 1.1 matt {
2919 1.13 chris pt_entry_t *pte;
2920 1.105 thorpej
2921 1.13 chris pte = vtopte(va);
2922 1.14 chs KASSERT(!pmap_pte_v(pte));
2923 1.83 thorpej
2924 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
2925 1.105 thorpej {
2926 1.105 thorpej struct vm_page *pg;
2927 1.105 thorpej int s;
2928 1.105 thorpej
2929 1.105 thorpej pg = PHYS_TO_VM_PAGE(pa);
2930 1.105 thorpej if (pg != NULL) {
2931 1.105 thorpej s = splhigh();
2932 1.105 thorpej if (pg->mdpage.ro_mappings == 0 &&
2933 1.105 thorpej pg->mdpage.rw_mappings == 0 &&
2934 1.105 thorpej pg->mdpage.kro_mappings == 0 &&
2935 1.105 thorpej pg->mdpage.krw_mappings == 0) {
2936 1.105 thorpej /* This case is okay. */
2937 1.105 thorpej } else if (pg->mdpage.rw_mappings == 0 &&
2938 1.105 thorpej pg->mdpage.krw_mappings == 0 &&
2939 1.105 thorpej (prot & VM_PROT_WRITE) == 0) {
2940 1.105 thorpej /* This case is okay. */
2941 1.105 thorpej } else {
2942 1.105 thorpej /* Something is awry. */
2943 1.105 thorpej printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
2944 1.105 thorpej "prot 0x%x\n", pg->mdpage.ro_mappings,
2945 1.105 thorpej pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
2946 1.105 thorpej pg->mdpage.krw_mappings, prot);
2947 1.105 thorpej Debugger();
2948 1.105 thorpej }
2949 1.105 thorpej if (prot & VM_PROT_WRITE)
2950 1.105 thorpej pg->mdpage.krw_mappings++;
2951 1.105 thorpej else
2952 1.105 thorpej pg->mdpage.kro_mappings++;
2953 1.105 thorpej splx(s);
2954 1.105 thorpej }
2955 1.105 thorpej }
2956 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
2957 1.105 thorpej
2958 1.83 thorpej *pte = L2_S_PROTO | pa |
2959 1.90 thorpej L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
2960 1.112 thorpej PTE_SYNC(pte);
2961 1.1 matt }
2962 1.1 matt
2963 1.1 matt void
2964 1.73 thorpej pmap_kremove(vaddr_t va, vsize_t len)
2965 1.1 matt {
2966 1.14 chs pt_entry_t *pte;
2967 1.112 thorpej vaddr_t ova = va;
2968 1.112 thorpej vaddr_t olen = len;
2969 1.14 chs
2970 1.1 matt for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2971 1.13 chris
2972 1.14 chs /*
2973 1.14 chs * We assume that we will only be called with small
2974 1.14 chs * regions of memory.
2975 1.14 chs */
2976 1.14 chs
2977 1.30 rearnsha KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
2978 1.13 chris pte = vtopte(va);
2979 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
2980 1.105 thorpej {
2981 1.105 thorpej struct vm_page *pg;
2982 1.105 thorpej int s;
2983 1.105 thorpej
2984 1.105 thorpej if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
2985 1.105 thorpej (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
2986 1.105 thorpej s = splhigh();
2987 1.105 thorpej if (*pte & L2_S_PROT_W) {
2988 1.105 thorpej KASSERT(pg->mdpage.krw_mappings != 0);
2989 1.105 thorpej pg->mdpage.krw_mappings--;
2990 1.105 thorpej } else {
2991 1.105 thorpej KASSERT(pg->mdpage.kro_mappings != 0);
2992 1.105 thorpej pg->mdpage.kro_mappings--;
2993 1.105 thorpej }
2994 1.105 thorpej splx(s);
2995 1.105 thorpej }
2996 1.105 thorpej }
2997 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
2998 1.36 thorpej cpu_idcache_wbinv_range(va, PAGE_SIZE);
2999 1.13 chris *pte = 0;
3000 1.13 chris cpu_tlb_flushID_SE(va);
3001 1.1 matt }
3002 1.112 thorpej PTE_SYNC_RANGE(vtopte(ova), olen >> PAGE_SHIFT);
3003 1.1 matt }
3004 1.1 matt
3005 1.1 matt /*
3006 1.1 matt * pmap_page_protect:
3007 1.1 matt *
3008 1.1 matt * Lower the permission for all mappings to a given page.
3009 1.1 matt */
3010 1.1 matt
3011 1.1 matt void
3012 1.73 thorpej pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
3013 1.1 matt {
3014 1.1 matt
3015 1.49 thorpej PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
3016 1.49 thorpej VM_PAGE_TO_PHYS(pg), prot));
3017 1.1 matt
3018 1.1 matt switch(prot) {
3019 1.17 chris case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3020 1.17 chris case VM_PROT_READ|VM_PROT_WRITE:
3021 1.17 chris return;
3022 1.17 chris
3023 1.1 matt case VM_PROT_READ:
3024 1.1 matt case VM_PROT_READ|VM_PROT_EXECUTE:
3025 1.78 thorpej pmap_clearbit(pg, PVF_WRITE);
3026 1.1 matt break;
3027 1.1 matt
3028 1.1 matt default:
3029 1.49 thorpej pmap_remove_all(pg);
3030 1.1 matt break;
3031 1.1 matt }
3032 1.1 matt }
3033 1.1 matt
3034 1.1 matt
3035 1.1 matt /*
3036 1.1 matt * Routine: pmap_unwire
3037 1.1 matt * Function: Clear the wired attribute for a map/virtual-address
3038 1.1 matt * pair.
3039 1.1 matt * In/out conditions:
3040 1.1 matt * The mapping must already exist in the pmap.
3041 1.1 matt */
3042 1.1 matt
3043 1.1 matt void
3044 1.73 thorpej pmap_unwire(struct pmap *pmap, vaddr_t va)
3045 1.1 matt {
3046 1.60 thorpej pt_entry_t *ptes;
3047 1.60 thorpej struct vm_page *pg;
3048 1.2 matt paddr_t pa;
3049 1.1 matt
3050 1.60 thorpej PMAP_MAP_TO_HEAD_LOCK();
3051 1.60 thorpej ptes = pmap_map_ptes(pmap); /* locks pmap */
3052 1.1 matt
3053 1.60 thorpej if (pmap_pde_v(pmap_pde(pmap, va))) {
3054 1.60 thorpej #ifdef DIAGNOSTIC
3055 1.60 thorpej if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3056 1.60 thorpej panic("pmap_unwire: invalid L2 PTE");
3057 1.60 thorpej #endif
3058 1.60 thorpej /* Extract the physical address of the page */
3059 1.60 thorpej pa = l2pte_pa(ptes[arm_btop(va)]);
3060 1.113 thorpej PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
3061 1.1 matt
3062 1.60 thorpej if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3063 1.60 thorpej goto out;
3064 1.1 matt
3065 1.60 thorpej /* Update the wired bit in the pv entry for this page. */
3066 1.60 thorpej simple_lock(&pg->mdpage.pvh_slock);
3067 1.78 thorpej (void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
3068 1.60 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3069 1.60 thorpej }
3070 1.60 thorpej #ifdef DIAGNOSTIC
3071 1.60 thorpej else {
3072 1.60 thorpej panic("pmap_unwire: invalid L1 PTE");
3073 1.60 thorpej }
3074 1.60 thorpej #endif
3075 1.60 thorpej out:
3076 1.60 thorpej pmap_unmap_ptes(pmap); /* unlocks pmap */
3077 1.60 thorpej PMAP_MAP_TO_HEAD_UNLOCK();
3078 1.1 matt }
3079 1.1 matt
3080 1.1 matt /*
3081 1.1 matt * Routine: pmap_extract
3082 1.1 matt * Function:
3083 1.1 matt * Extract the physical page address associated
3084 1.1 matt * with the given map/virtual_address pair.
3085 1.1 matt */
3086 1.1 matt boolean_t
3087 1.73 thorpej pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
3088 1.1 matt {
3089 1.34 thorpej pd_entry_t *pde;
3090 1.11 chris pt_entry_t *pte, *ptes;
3091 1.1 matt paddr_t pa;
3092 1.1 matt
3093 1.82 thorpej PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
3094 1.82 thorpej
3095 1.82 thorpej ptes = pmap_map_ptes(pmap); /* locks pmap */
3096 1.1 matt
3097 1.34 thorpej pde = pmap_pde(pmap, va);
3098 1.56 thorpej pte = &ptes[arm_btop(va)];
3099 1.1 matt
3100 1.82 thorpej if (pmap_pde_section(pde)) {
3101 1.82 thorpej pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
3102 1.82 thorpej PDEBUG(5, printf("section pa=0x%08lx\n", pa));
3103 1.82 thorpej goto out;
3104 1.82 thorpej } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
3105 1.82 thorpej PDEBUG(5, printf("no mapping\n"));
3106 1.82 thorpej goto failed;
3107 1.82 thorpej }
3108 1.75 reinoud
3109 1.82 thorpej if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
3110 1.82 thorpej pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3111 1.82 thorpej PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
3112 1.82 thorpej goto out;
3113 1.82 thorpej }
3114 1.1 matt
3115 1.82 thorpej pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3116 1.82 thorpej PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
3117 1.1 matt
3118 1.82 thorpej out:
3119 1.82 thorpej if (pap != NULL)
3120 1.82 thorpej *pap = pa;
3121 1.1 matt
3122 1.113 thorpej PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
3123 1.82 thorpej pmap_unmap_ptes(pmap); /* unlocks pmap */
3124 1.82 thorpej return (TRUE);
3125 1.34 thorpej
3126 1.82 thorpej failed:
3127 1.113 thorpej PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
3128 1.82 thorpej pmap_unmap_ptes(pmap); /* unlocks pmap */
3129 1.82 thorpej return (FALSE);
3130 1.1 matt }
3131 1.1 matt
3132 1.1 matt
3133 1.1 matt /*
3134 1.73 thorpej * pmap_copy:
3135 1.1 matt *
3136 1.73 thorpej * Copy the range specified by src_addr/len from the source map to the
3137 1.73 thorpej * range dst_addr/len in the destination map.
3138 1.73 thorpej *
3139 1.73 thorpej * This routine is only advisory and need not do anything.
3140 1.1 matt */
3141 1.73 thorpej /* Call deleted in <arm/arm32/pmap.h> */
3142 1.1 matt
3143 1.1 matt #if defined(PMAP_DEBUG)
3144 1.1 matt void
3145 1.1 matt pmap_dump_pvlist(phys, m)
3146 1.1 matt vaddr_t phys;
3147 1.1 matt char *m;
3148 1.1 matt {
3149 1.49 thorpej struct vm_page *pg;
3150 1.1 matt struct pv_entry *pv;
3151 1.1 matt
3152 1.49 thorpej if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
3153 1.1 matt printf("INVALID PA\n");
3154 1.1 matt return;
3155 1.1 matt }
3156 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
3157 1.1 matt printf("%s %08lx:", m, phys);
3158 1.49 thorpej if (pg->mdpage.pvh_list == NULL) {
3159 1.97 chris simple_unlock(&pg->mdpage.pvh_slock);
3160 1.1 matt printf(" no mappings\n");
3161 1.1 matt return;
3162 1.1 matt }
3163 1.1 matt
3164 1.49 thorpej for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
3165 1.1 matt printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3166 1.1 matt pv->pv_va, pv->pv_flags);
3167 1.1 matt
3168 1.1 matt printf("\n");
3169 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3170 1.1 matt }
3171 1.1 matt
3172 1.1 matt #endif /* PMAP_DEBUG */
3173 1.1 matt
3174 1.11 chris static pt_entry_t *
3175 1.11 chris pmap_map_ptes(struct pmap *pmap)
3176 1.11 chris {
3177 1.72 thorpej struct proc *p;
3178 1.17 chris
3179 1.17 chris /* the kernel's pmap is always accessible */
3180 1.17 chris if (pmap == pmap_kernel()) {
3181 1.72 thorpej return (pt_entry_t *)PTE_BASE;
3182 1.17 chris }
3183 1.17 chris
3184 1.17 chris if (pmap_is_curpmap(pmap)) {
3185 1.17 chris simple_lock(&pmap->pm_obj.vmobjlock);
3186 1.53 thorpej return (pt_entry_t *)PTE_BASE;
3187 1.17 chris }
3188 1.72 thorpej
3189 1.17 chris p = curproc;
3190 1.72 thorpej KDASSERT(p != NULL);
3191 1.17 chris
3192 1.17 chris /* need to lock both curpmap and pmap: use ordered locking */
3193 1.72 thorpej if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
3194 1.17 chris simple_lock(&pmap->pm_obj.vmobjlock);
3195 1.72 thorpej simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3196 1.17 chris } else {
3197 1.72 thorpej simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3198 1.17 chris simple_lock(&pmap->pm_obj.vmobjlock);
3199 1.17 chris }
3200 1.11 chris
3201 1.113 thorpej pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE,
3202 1.113 thorpej pmap->pm_pptpt, 0);
3203 1.17 chris cpu_tlb_flushD();
3204 1.32 thorpej cpu_cpwait();
3205 1.53 thorpej return (pt_entry_t *)APTE_BASE;
3206 1.17 chris }
3207 1.17 chris
3208 1.17 chris /*
3209 1.17 chris * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3210 1.17 chris */
3211 1.17 chris
3212 1.17 chris static void
3213 1.73 thorpej pmap_unmap_ptes(struct pmap *pmap)
3214 1.17 chris {
3215 1.72 thorpej
3216 1.17 chris if (pmap == pmap_kernel()) {
3217 1.17 chris return;
3218 1.17 chris }
3219 1.17 chris if (pmap_is_curpmap(pmap)) {
3220 1.17 chris simple_unlock(&pmap->pm_obj.vmobjlock);
3221 1.17 chris } else {
3222 1.72 thorpej KDASSERT(curproc != NULL);
3223 1.17 chris simple_unlock(&pmap->pm_obj.vmobjlock);
3224 1.72 thorpej simple_unlock(
3225 1.72 thorpej &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3226 1.17 chris }
3227 1.11 chris }
3228 1.1 matt
3229 1.1 matt /*
3230 1.1 matt * Modify pte bits for all ptes corresponding to the given physical address.
3231 1.1 matt * We use `maskbits' rather than `clearbits' because we're always passing
3232 1.1 matt * constants and the latter would require an extra inversion at run-time.
3233 1.1 matt */
3234 1.1 matt
3235 1.22 chris static void
3236 1.73 thorpej pmap_clearbit(struct vm_page *pg, u_int maskbits)
3237 1.1 matt {
3238 1.1 matt struct pv_entry *pv;
3239 1.104 thorpej pt_entry_t *ptes, npte, opte;
3240 1.1 matt vaddr_t va;
3241 1.1 matt
3242 1.1 matt PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
3243 1.49 thorpej VM_PAGE_TO_PHYS(pg), maskbits));
3244 1.21 chris
3245 1.17 chris PMAP_HEAD_TO_MAP_LOCK();
3246 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
3247 1.17 chris
3248 1.1 matt /*
3249 1.1 matt * Clear saved attributes (modify, reference)
3250 1.1 matt */
3251 1.49 thorpej pg->mdpage.pvh_attrs &= ~maskbits;
3252 1.1 matt
3253 1.49 thorpej if (pg->mdpage.pvh_list == NULL) {
3254 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3255 1.17 chris PMAP_HEAD_TO_MAP_UNLOCK();
3256 1.1 matt return;
3257 1.1 matt }
3258 1.1 matt
3259 1.1 matt /*
3260 1.1 matt * Loop over all current mappings setting/clearing as appropos
3261 1.1 matt */
3262 1.49 thorpej for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
3263 1.105 thorpej #ifdef PMAP_ALIAS_DEBUG
3264 1.105 thorpej {
3265 1.105 thorpej int s = splhigh();
3266 1.105 thorpej if ((maskbits & PVF_WRITE) != 0 &&
3267 1.105 thorpej (pv->pv_flags & PVF_WRITE) != 0) {
3268 1.105 thorpej KASSERT(pg->mdpage.rw_mappings != 0);
3269 1.105 thorpej pg->mdpage.rw_mappings--;
3270 1.105 thorpej pg->mdpage.ro_mappings++;
3271 1.105 thorpej }
3272 1.105 thorpej splx(s);
3273 1.105 thorpej }
3274 1.105 thorpej #endif /* PMAP_ALIAS_DEBUG */
3275 1.1 matt va = pv->pv_va;
3276 1.1 matt pv->pv_flags &= ~maskbits;
3277 1.59 thorpej ptes = pmap_map_ptes(pv->pv_pmap); /* locks pmap */
3278 1.59 thorpej KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
3279 1.104 thorpej npte = opte = ptes[arm_btop(va)];
3280 1.78 thorpej if (maskbits & (PVF_WRITE|PVF_MOD)) {
3281 1.78 thorpej if ((pv->pv_flags & PVF_NC)) {
3282 1.29 rearnsha /*
3283 1.29 rearnsha * Entry is not cacheable: reenable
3284 1.29 rearnsha * the cache, nothing to flush
3285 1.29 rearnsha *
3286 1.29 rearnsha * Don't turn caching on again if this
3287 1.29 rearnsha * is a modified emulation. This
3288 1.29 rearnsha * would be inconsitent with the
3289 1.29 rearnsha * settings created by
3290 1.29 rearnsha * pmap_vac_me_harder().
3291 1.29 rearnsha *
3292 1.29 rearnsha * There's no need to call
3293 1.29 rearnsha * pmap_vac_me_harder() here: all
3294 1.29 rearnsha * pages are loosing their write
3295 1.29 rearnsha * permission.
3296 1.29 rearnsha *
3297 1.29 rearnsha */
3298 1.78 thorpej if (maskbits & PVF_WRITE) {
3299 1.104 thorpej npte |= pte_l2_s_cache_mode;
3300 1.78 thorpej pv->pv_flags &= ~PVF_NC;
3301 1.29 rearnsha }
3302 1.59 thorpej } else if (pmap_is_curpmap(pv->pv_pmap)) {
3303 1.29 rearnsha /*
3304 1.29 rearnsha * Entry is cacheable: check if pmap is
3305 1.29 rearnsha * current if it is flush it,
3306 1.29 rearnsha * otherwise it won't be in the cache
3307 1.29 rearnsha */
3308 1.36 thorpej cpu_idcache_wbinv_range(pv->pv_va, NBPG);
3309 1.59 thorpej }
3310 1.29 rearnsha
3311 1.29 rearnsha /* make the pte read only */
3312 1.104 thorpej npte &= ~L2_S_PROT_W;
3313 1.29 rearnsha }
3314 1.29 rearnsha
3315 1.104 thorpej if (maskbits & PVF_REF) {
3316 1.104 thorpej if (pmap_is_curpmap(pv->pv_pmap) &&
3317 1.104 thorpej (pv->pv_flags & PVF_NC) == 0) {
3318 1.104 thorpej /*
3319 1.104 thorpej * Check npte here; we may have already
3320 1.104 thorpej * done the wbinv above, and the validity
3321 1.104 thorpej * of the PTE is the same for opte and
3322 1.104 thorpej * npte.
3323 1.104 thorpej */
3324 1.104 thorpej if (npte & L2_S_PROT_W) {
3325 1.104 thorpej cpu_idcache_wbinv_range(pv->pv_va,
3326 1.104 thorpej NBPG);
3327 1.104 thorpej } else if ((npte & L2_TYPE_MASK)
3328 1.104 thorpej != L2_TYPE_INV) {
3329 1.104 thorpej /* XXXJRT need idcache_inv_range */
3330 1.104 thorpej cpu_idcache_wbinv_range(pv->pv_va,
3331 1.104 thorpej NBPG);
3332 1.104 thorpej }
3333 1.104 thorpej }
3334 1.104 thorpej
3335 1.104 thorpej /* make the pte invalid */
3336 1.104 thorpej npte = (npte & ~L2_TYPE_MASK) | L2_TYPE_INV;
3337 1.104 thorpej }
3338 1.21 chris
3339 1.104 thorpej if (npte != opte) {
3340 1.104 thorpej ptes[arm_btop(va)] = npte;
3341 1.113 thorpej PTE_SYNC_CURRENT(pv->pv_pmap, &ptes[arm_btop(va)]);
3342 1.104 thorpej /* Flush the TLB entry if a current pmap. */
3343 1.104 thorpej if (pmap_is_curpmap(pv->pv_pmap))
3344 1.104 thorpej cpu_tlb_flushID_SE(pv->pv_va);
3345 1.113 thorpej } else
3346 1.113 thorpej PTE_FLUSH_ALT(pv->pv_pmap, &ptes[arm_btop(va)]);
3347 1.104 thorpej
3348 1.59 thorpej pmap_unmap_ptes(pv->pv_pmap); /* unlocks pmap */
3349 1.29 rearnsha }
3350 1.32 thorpej cpu_cpwait();
3351 1.21 chris
3352 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3353 1.17 chris PMAP_HEAD_TO_MAP_UNLOCK();
3354 1.1 matt }
3355 1.1 matt
3356 1.50 thorpej /*
3357 1.50 thorpej * pmap_clear_modify:
3358 1.50 thorpej *
3359 1.50 thorpej * Clear the "modified" attribute for a page.
3360 1.50 thorpej */
3361 1.1 matt boolean_t
3362 1.73 thorpej pmap_clear_modify(struct vm_page *pg)
3363 1.1 matt {
3364 1.1 matt boolean_t rv;
3365 1.1 matt
3366 1.78 thorpej if (pg->mdpage.pvh_attrs & PVF_MOD) {
3367 1.50 thorpej rv = TRUE;
3368 1.78 thorpej pmap_clearbit(pg, PVF_MOD);
3369 1.50 thorpej } else
3370 1.50 thorpej rv = FALSE;
3371 1.50 thorpej
3372 1.50 thorpej PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
3373 1.50 thorpej VM_PAGE_TO_PHYS(pg), rv));
3374 1.50 thorpej
3375 1.50 thorpej return (rv);
3376 1.1 matt }
3377 1.1 matt
3378 1.50 thorpej /*
3379 1.50 thorpej * pmap_clear_reference:
3380 1.50 thorpej *
3381 1.50 thorpej * Clear the "referenced" attribute for a page.
3382 1.50 thorpej */
3383 1.1 matt boolean_t
3384 1.73 thorpej pmap_clear_reference(struct vm_page *pg)
3385 1.1 matt {
3386 1.1 matt boolean_t rv;
3387 1.1 matt
3388 1.78 thorpej if (pg->mdpage.pvh_attrs & PVF_REF) {
3389 1.50 thorpej rv = TRUE;
3390 1.78 thorpej pmap_clearbit(pg, PVF_REF);
3391 1.50 thorpej } else
3392 1.50 thorpej rv = FALSE;
3393 1.50 thorpej
3394 1.50 thorpej PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
3395 1.50 thorpej VM_PAGE_TO_PHYS(pg), rv));
3396 1.50 thorpej
3397 1.50 thorpej return (rv);
3398 1.1 matt }
3399 1.1 matt
3400 1.50 thorpej /*
3401 1.50 thorpej * pmap_is_modified:
3402 1.50 thorpej *
3403 1.50 thorpej * Test if a page has the "modified" attribute.
3404 1.50 thorpej */
3405 1.50 thorpej /* See <arm/arm32/pmap.h> */
3406 1.39 thorpej
3407 1.50 thorpej /*
3408 1.50 thorpej * pmap_is_referenced:
3409 1.50 thorpej *
3410 1.50 thorpej * Test if a page has the "referenced" attribute.
3411 1.50 thorpej */
3412 1.50 thorpej /* See <arm/arm32/pmap.h> */
3413 1.1 matt
3414 1.1 matt int
3415 1.73 thorpej pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
3416 1.1 matt {
3417 1.61 thorpej pt_entry_t *ptes;
3418 1.61 thorpej struct vm_page *pg;
3419 1.2 matt paddr_t pa;
3420 1.1 matt u_int flags;
3421 1.61 thorpej int rv = 0;
3422 1.1 matt
3423 1.1 matt PDEBUG(2, printf("pmap_modified_emulation\n"));
3424 1.1 matt
3425 1.61 thorpej PMAP_MAP_TO_HEAD_LOCK();
3426 1.62 thorpej ptes = pmap_map_ptes(pmap); /* locks pmap */
3427 1.61 thorpej
3428 1.61 thorpej if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3429 1.61 thorpej PDEBUG(2, printf("L1 PTE invalid\n"));
3430 1.61 thorpej goto out;
3431 1.1 matt }
3432 1.1 matt
3433 1.61 thorpej PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3434 1.1 matt
3435 1.113 thorpej /*
3436 1.113 thorpej * Don't need to PTE_FLUSH_ALT() here; this is always done
3437 1.113 thorpej * with the current pmap.
3438 1.113 thorpej */
3439 1.113 thorpej
3440 1.61 thorpej /* Check for a invalid pte */
3441 1.61 thorpej if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3442 1.61 thorpej goto out;
3443 1.1 matt
3444 1.1 matt /* This can happen if user code tries to access kernel memory. */
3445 1.83 thorpej if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
3446 1.61 thorpej goto out;
3447 1.1 matt
3448 1.1 matt /* Extract the physical address of the page */
3449 1.61 thorpej pa = l2pte_pa(ptes[arm_btop(va)]);
3450 1.49 thorpej if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3451 1.61 thorpej goto out;
3452 1.1 matt
3453 1.49 thorpej /* Get the current flags for this page. */
3454 1.49 thorpej simple_lock(&pg->mdpage.pvh_slock);
3455 1.17 chris
3456 1.49 thorpej flags = pmap_modify_pv(pmap, va, pg, 0, 0);
3457 1.1 matt PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3458 1.1 matt
3459 1.1 matt /*
3460 1.1 matt * Do the flags say this page is writable ? If not then it is a
3461 1.1 matt * genuine write fault. If yes then the write fault is our fault
3462 1.1 matt * as we did not reflect the write access in the PTE. Now we know
3463 1.1 matt * a write has occurred we can correct this and also set the
3464 1.1 matt * modified bit
3465 1.1 matt */
3466 1.78 thorpej if (~flags & PVF_WRITE) {
3467 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3468 1.61 thorpej goto out;
3469 1.17 chris }
3470 1.1 matt
3471 1.61 thorpej PDEBUG(0,
3472 1.61 thorpej printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
3473 1.61 thorpej va, ptes[arm_btop(va)]));
3474 1.78 thorpej pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
3475 1.29 rearnsha
3476 1.29 rearnsha /*
3477 1.29 rearnsha * Re-enable write permissions for the page. No need to call
3478 1.29 rearnsha * pmap_vac_me_harder(), since this is just a
3479 1.78 thorpej * modified-emulation fault, and the PVF_WRITE bit isn't changing.
3480 1.78 thorpej * We've already set the cacheable bits based on the assumption
3481 1.78 thorpej * that we can write to this page.
3482 1.29 rearnsha */
3483 1.61 thorpej ptes[arm_btop(va)] =
3484 1.84 thorpej (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
3485 1.113 thorpej PTE_SYNC(&ptes[arm_btop(va)]);
3486 1.61 thorpej PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3487 1.1 matt
3488 1.49 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3489 1.61 thorpej
3490 1.1 matt cpu_tlb_flushID_SE(va);
3491 1.32 thorpej cpu_cpwait();
3492 1.61 thorpej rv = 1;
3493 1.61 thorpej out:
3494 1.61 thorpej pmap_unmap_ptes(pmap); /* unlocks pmap */
3495 1.61 thorpej PMAP_MAP_TO_HEAD_UNLOCK();
3496 1.61 thorpej return (rv);
3497 1.1 matt }
3498 1.1 matt
3499 1.1 matt int
3500 1.73 thorpej pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
3501 1.1 matt {
3502 1.62 thorpej pt_entry_t *ptes;
3503 1.62 thorpej struct vm_page *pg;
3504 1.2 matt paddr_t pa;
3505 1.62 thorpej int rv = 0;
3506 1.1 matt
3507 1.1 matt PDEBUG(2, printf("pmap_handled_emulation\n"));
3508 1.1 matt
3509 1.63 thorpej PMAP_MAP_TO_HEAD_LOCK();
3510 1.62 thorpej ptes = pmap_map_ptes(pmap); /* locks pmap */
3511 1.62 thorpej
3512 1.62 thorpej if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3513 1.62 thorpej PDEBUG(2, printf("L1 PTE invalid\n"));
3514 1.62 thorpej goto out;
3515 1.1 matt }
3516 1.1 matt
3517 1.62 thorpej PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3518 1.1 matt
3519 1.113 thorpej /*
3520 1.113 thorpej * Don't need to PTE_FLUSH_ALT() here; this is always done
3521 1.113 thorpej * with the current pmap.
3522 1.113 thorpej */
3523 1.113 thorpej
3524 1.62 thorpej /* Check for invalid pte */
3525 1.62 thorpej if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3526 1.62 thorpej goto out;
3527 1.1 matt
3528 1.1 matt /* This can happen if user code tries to access kernel memory. */
3529 1.81 thorpej if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
3530 1.62 thorpej goto out;
3531 1.1 matt
3532 1.1 matt /* Extract the physical address of the page */
3533 1.62 thorpej pa = l2pte_pa(ptes[arm_btop(va)]);
3534 1.49 thorpej if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3535 1.62 thorpej goto out;
3536 1.1 matt
3537 1.63 thorpej simple_lock(&pg->mdpage.pvh_slock);
3538 1.63 thorpej
3539 1.1 matt /*
3540 1.1 matt * Ok we just enable the pte and mark the attibs as handled
3541 1.63 thorpej * XXX Should we traverse the PV list and enable all PTEs?
3542 1.1 matt */
3543 1.62 thorpej PDEBUG(0,
3544 1.62 thorpej printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
3545 1.62 thorpej va, ptes[arm_btop(va)]));
3546 1.78 thorpej pg->mdpage.pvh_attrs |= PVF_REF;
3547 1.1 matt
3548 1.84 thorpej ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
3549 1.113 thorpej PTE_SYNC(&ptes[arm_btop(va)]);
3550 1.62 thorpej PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3551 1.62 thorpej
3552 1.63 thorpej simple_unlock(&pg->mdpage.pvh_slock);
3553 1.63 thorpej
3554 1.1 matt cpu_tlb_flushID_SE(va);
3555 1.32 thorpej cpu_cpwait();
3556 1.62 thorpej rv = 1;
3557 1.62 thorpej out:
3558 1.62 thorpej pmap_unmap_ptes(pmap); /* unlocks pmap */
3559 1.63 thorpej PMAP_MAP_TO_HEAD_UNLOCK();
3560 1.62 thorpej return (rv);
3561 1.1 matt }
3562 1.17 chris
3563 1.1 matt /*
3564 1.1 matt * pmap_collect: free resources held by a pmap
3565 1.1 matt *
3566 1.1 matt * => optional function.
3567 1.1 matt * => called when a process is swapped out to free memory.
3568 1.1 matt */
3569 1.1 matt
3570 1.1 matt void
3571 1.73 thorpej pmap_collect(struct pmap *pmap)
3572 1.1 matt {
3573 1.1 matt }
3574 1.1 matt
3575 1.1 matt /*
3576 1.1 matt * Routine: pmap_procwr
3577 1.1 matt *
3578 1.1 matt * Function:
3579 1.1 matt * Synchronize caches corresponding to [addr, addr+len) in p.
3580 1.1 matt *
3581 1.1 matt */
3582 1.1 matt void
3583 1.73 thorpej pmap_procwr(struct proc *p, vaddr_t va, int len)
3584 1.1 matt {
3585 1.1 matt /* We only need to do anything if it is the current process. */
3586 1.1 matt if (p == curproc)
3587 1.36 thorpej cpu_icache_sync_range(va, len);
3588 1.17 chris }
3589 1.17 chris /*
3590 1.17 chris * PTP functions
3591 1.17 chris */
3592 1.17 chris
3593 1.17 chris /*
3594 1.17 chris * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3595 1.17 chris *
3596 1.17 chris * => pmap should NOT be pmap_kernel()
3597 1.17 chris * => pmap should be locked
3598 1.17 chris */
3599 1.17 chris
3600 1.17 chris static struct vm_page *
3601 1.57 thorpej pmap_get_ptp(struct pmap *pmap, vaddr_t va)
3602 1.17 chris {
3603 1.57 thorpej struct vm_page *ptp;
3604 1.17 chris
3605 1.114 thorpej KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
3606 1.114 thorpej
3607 1.57 thorpej if (pmap_pde_page(pmap_pde(pmap, va))) {
3608 1.17 chris
3609 1.57 thorpej /* valid... check hint (saves us a PA->PG lookup) */
3610 1.57 thorpej if (pmap->pm_ptphint &&
3611 1.81 thorpej (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
3612 1.57 thorpej VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3613 1.57 thorpej return (pmap->pm_ptphint);
3614 1.57 thorpej ptp = uvm_pagelookup(&pmap->pm_obj, va);
3615 1.17 chris #ifdef DIAGNOSTIC
3616 1.57 thorpej if (ptp == NULL)
3617 1.57 thorpej panic("pmap_get_ptp: unmanaged user PTP");
3618 1.17 chris #endif
3619 1.70 thorpej pmap->pm_ptphint = ptp;
3620 1.57 thorpej return(ptp);
3621 1.57 thorpej }
3622 1.17 chris
3623 1.57 thorpej /* allocate a new PTP (updates ptphint) */
3624 1.114 thorpej return (pmap_alloc_ptp(pmap, va));
3625 1.17 chris }
3626 1.17 chris
3627 1.17 chris /*
3628 1.17 chris * pmap_alloc_ptp: allocate a PTP for a PMAP
3629 1.17 chris *
3630 1.17 chris * => pmap should already be locked by caller
3631 1.17 chris * => we use the ptp's wire_count to count the number of active mappings
3632 1.17 chris * in the PTP (we start it at one to prevent any chance this PTP
3633 1.17 chris * will ever leak onto the active/inactive queues)
3634 1.17 chris */
3635 1.17 chris
3636 1.17 chris /*__inline */ static struct vm_page *
3637 1.57 thorpej pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
3638 1.17 chris {
3639 1.17 chris struct vm_page *ptp;
3640 1.114 thorpej
3641 1.114 thorpej KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
3642 1.17 chris
3643 1.17 chris ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3644 1.17 chris UVM_PGA_USERESERVE|UVM_PGA_ZERO);
3645 1.57 thorpej if (ptp == NULL)
3646 1.17 chris return (NULL);
3647 1.17 chris
3648 1.17 chris /* got one! */
3649 1.17 chris ptp->flags &= ~PG_BUSY; /* never busy */
3650 1.17 chris ptp->wire_count = 1; /* no mappings yet */
3651 1.113 thorpej pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp),
3652 1.113 thorpej PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
3653 1.17 chris pmap->pm_stats.resident_count++; /* count PTP as resident */
3654 1.70 thorpej pmap->pm_ptphint = ptp;
3655 1.17 chris return (ptp);
3656 1.1 matt }
3657 1.48 chris
3658 1.48 chris vaddr_t
3659 1.73 thorpej pmap_growkernel(vaddr_t maxkvaddr)
3660 1.48 chris {
3661 1.48 chris struct pmap *kpm = pmap_kernel(), *pm;
3662 1.48 chris int s;
3663 1.48 chris paddr_t ptaddr;
3664 1.48 chris struct vm_page *ptp;
3665 1.48 chris
3666 1.48 chris if (maxkvaddr <= pmap_curmaxkvaddr)
3667 1.48 chris goto out; /* we are OK */
3668 1.48 chris NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
3669 1.48 chris pmap_curmaxkvaddr, maxkvaddr));
3670 1.48 chris
3671 1.48 chris /*
3672 1.48 chris * whoops! we need to add kernel PTPs
3673 1.48 chris */
3674 1.48 chris
3675 1.48 chris s = splhigh(); /* to be safe */
3676 1.48 chris simple_lock(&kpm->pm_obj.vmobjlock);
3677 1.48 chris /* due to the way the arm pmap works we map 4MB at a time */
3678 1.70 thorpej for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
3679 1.81 thorpej pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
3680 1.48 chris
3681 1.48 chris if (uvm.page_init_done == FALSE) {
3682 1.48 chris
3683 1.48 chris /*
3684 1.48 chris * we're growing the kernel pmap early (from
3685 1.48 chris * uvm_pageboot_alloc()). this case must be
3686 1.48 chris * handled a little differently.
3687 1.48 chris */
3688 1.48 chris
3689 1.48 chris if (uvm_page_physget(&ptaddr) == FALSE)
3690 1.48 chris panic("pmap_growkernel: out of memory");
3691 1.48 chris pmap_zero_page(ptaddr);
3692 1.48 chris
3693 1.48 chris /* map this page in */
3694 1.113 thorpej pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr,
3695 1.113 thorpej PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
3696 1.48 chris
3697 1.48 chris /* count PTP as resident */
3698 1.48 chris kpm->pm_stats.resident_count++;
3699 1.48 chris continue;
3700 1.48 chris }
3701 1.48 chris
3702 1.48 chris /*
3703 1.48 chris * THIS *MUST* BE CODED SO AS TO WORK IN THE
3704 1.48 chris * pmap_initialized == FALSE CASE! WE MAY BE
3705 1.48 chris * INVOKED WHILE pmap_init() IS RUNNING!
3706 1.48 chris */
3707 1.48 chris
3708 1.70 thorpej if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
3709 1.48 chris panic("pmap_growkernel: alloc ptp failed");
3710 1.48 chris
3711 1.48 chris /* distribute new kernel PTP to all active pmaps */
3712 1.48 chris simple_lock(&pmaps_lock);
3713 1.48 chris LIST_FOREACH(pm, &pmaps, pm_list) {
3714 1.70 thorpej pmap_map_in_l1(pm, pmap_curmaxkvaddr,
3715 1.113 thorpej VM_PAGE_TO_PHYS(ptp),
3716 1.113 thorpej PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
3717 1.48 chris }
3718 1.111 thorpej
3719 1.111 thorpej /* Invalidate the PTPT cache. */
3720 1.111 thorpej pool_cache_invalidate(&pmap_ptpt_cache);
3721 1.111 thorpej pmap_ptpt_cache_generation++;
3722 1.48 chris
3723 1.48 chris simple_unlock(&pmaps_lock);
3724 1.48 chris }
3725 1.48 chris
3726 1.48 chris /*
3727 1.48 chris * flush out the cache, expensive but growkernel will happen so
3728 1.48 chris * rarely
3729 1.48 chris */
3730 1.48 chris cpu_tlb_flushD();
3731 1.48 chris cpu_cpwait();
3732 1.48 chris
3733 1.48 chris simple_unlock(&kpm->pm_obj.vmobjlock);
3734 1.48 chris splx(s);
3735 1.48 chris
3736 1.48 chris out:
3737 1.48 chris return (pmap_curmaxkvaddr);
3738 1.48 chris }
3739 1.48 chris
3740 1.76 thorpej /************************ Utility routines ****************************/
3741 1.76 thorpej
3742 1.76 thorpej /*
3743 1.76 thorpej * vector_page_setprot:
3744 1.76 thorpej *
3745 1.76 thorpej * Manipulate the protection of the vector page.
3746 1.76 thorpej */
3747 1.76 thorpej void
3748 1.76 thorpej vector_page_setprot(int prot)
3749 1.76 thorpej {
3750 1.76 thorpej pt_entry_t *pte;
3751 1.76 thorpej
3752 1.76 thorpej pte = vtopte(vector_page);
3753 1.48 chris
3754 1.83 thorpej *pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
3755 1.112 thorpej PTE_SYNC(pte);
3756 1.76 thorpej cpu_tlb_flushD_SE(vector_page);
3757 1.76 thorpej cpu_cpwait();
3758 1.76 thorpej }
3759 1.1 matt
3760 1.40 thorpej /************************ Bootstrapping routines ****************************/
3761 1.40 thorpej
3762 1.40 thorpej /*
3763 1.46 thorpej * This list exists for the benefit of pmap_map_chunk(). It keeps track
3764 1.46 thorpej * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3765 1.46 thorpej * find them as necessary.
3766 1.46 thorpej *
3767 1.46 thorpej * Note that the data on this list is not valid after initarm() returns.
3768 1.46 thorpej */
3769 1.46 thorpej SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3770 1.46 thorpej
3771 1.46 thorpej static vaddr_t
3772 1.46 thorpej kernel_pt_lookup(paddr_t pa)
3773 1.46 thorpej {
3774 1.46 thorpej pv_addr_t *pv;
3775 1.46 thorpej
3776 1.46 thorpej SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3777 1.46 thorpej if (pv->pv_pa == pa)
3778 1.46 thorpej return (pv->pv_va);
3779 1.46 thorpej }
3780 1.46 thorpej return (0);
3781 1.46 thorpej }
3782 1.46 thorpej
3783 1.46 thorpej /*
3784 1.40 thorpej * pmap_map_section:
3785 1.40 thorpej *
3786 1.40 thorpej * Create a single section mapping.
3787 1.40 thorpej */
3788 1.40 thorpej void
3789 1.40 thorpej pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3790 1.40 thorpej {
3791 1.40 thorpej pd_entry_t *pde = (pd_entry_t *) l1pt;
3792 1.86 thorpej pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
3793 1.40 thorpej
3794 1.81 thorpej KASSERT(((va | pa) & L1_S_OFFSET) == 0);
3795 1.40 thorpej
3796 1.83 thorpej pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3797 1.83 thorpej L1_S_PROT(PTE_KERNEL, prot) | fl;
3798 1.41 thorpej }
3799 1.41 thorpej
3800 1.41 thorpej /*
3801 1.41 thorpej * pmap_map_entry:
3802 1.41 thorpej *
3803 1.41 thorpej * Create a single page mapping.
3804 1.41 thorpej */
3805 1.41 thorpej void
3806 1.47 thorpej pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3807 1.41 thorpej {
3808 1.47 thorpej pd_entry_t *pde = (pd_entry_t *) l1pt;
3809 1.86 thorpej pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
3810 1.47 thorpej pt_entry_t *pte;
3811 1.41 thorpej
3812 1.41 thorpej KASSERT(((va | pa) & PGOFSET) == 0);
3813 1.41 thorpej
3814 1.81 thorpej if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
3815 1.47 thorpej panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3816 1.47 thorpej
3817 1.47 thorpej pte = (pt_entry_t *)
3818 1.81 thorpej kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3819 1.47 thorpej if (pte == NULL)
3820 1.47 thorpej panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3821 1.47 thorpej
3822 1.83 thorpej pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3823 1.83 thorpej L2_S_PROT(PTE_KERNEL, prot) | fl;
3824 1.42 thorpej }
3825 1.42 thorpej
3826 1.42 thorpej /*
3827 1.42 thorpej * pmap_link_l2pt:
3828 1.42 thorpej *
3829 1.42 thorpej * Link the L2 page table specified by "pa" into the L1
3830 1.42 thorpej * page table at the slot for "va".
3831 1.42 thorpej */
3832 1.42 thorpej void
3833 1.46 thorpej pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
3834 1.42 thorpej {
3835 1.42 thorpej pd_entry_t *pde = (pd_entry_t *) l1pt;
3836 1.81 thorpej u_int slot = va >> L1_S_SHIFT;
3837 1.42 thorpej
3838 1.46 thorpej KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3839 1.46 thorpej
3840 1.83 thorpej pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
3841 1.83 thorpej pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
3842 1.83 thorpej pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
3843 1.83 thorpej pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
3844 1.42 thorpej
3845 1.46 thorpej SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
3846 1.43 thorpej }
3847 1.43 thorpej
3848 1.43 thorpej /*
3849 1.43 thorpej * pmap_map_chunk:
3850 1.43 thorpej *
3851 1.43 thorpej * Map a chunk of memory using the most efficient mappings
3852 1.43 thorpej * possible (section, large page, small page) into the
3853 1.43 thorpej * provided L1 and L2 tables at the specified virtual address.
3854 1.43 thorpej */
3855 1.43 thorpej vsize_t
3856 1.46 thorpej pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3857 1.46 thorpej int prot, int cache)
3858 1.43 thorpej {
3859 1.43 thorpej pd_entry_t *pde = (pd_entry_t *) l1pt;
3860 1.86 thorpej pt_entry_t *pte, fl;
3861 1.43 thorpej vsize_t resid;
3862 1.43 thorpej int i;
3863 1.43 thorpej
3864 1.43 thorpej resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3865 1.43 thorpej
3866 1.44 thorpej if (l1pt == 0)
3867 1.44 thorpej panic("pmap_map_chunk: no L1 table provided");
3868 1.44 thorpej
3869 1.43 thorpej #ifdef VERBOSE_INIT_ARM
3870 1.43 thorpej printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3871 1.43 thorpej "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3872 1.43 thorpej #endif
3873 1.43 thorpej
3874 1.43 thorpej size = resid;
3875 1.43 thorpej
3876 1.43 thorpej while (resid > 0) {
3877 1.43 thorpej /* See if we can use a section mapping. */
3878 1.81 thorpej if (((pa | va) & L1_S_OFFSET) == 0 &&
3879 1.81 thorpej resid >= L1_S_SIZE) {
3880 1.86 thorpej fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
3881 1.43 thorpej #ifdef VERBOSE_INIT_ARM
3882 1.43 thorpej printf("S");
3883 1.43 thorpej #endif
3884 1.83 thorpej pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3885 1.83 thorpej L1_S_PROT(PTE_KERNEL, prot) | fl;
3886 1.81 thorpej va += L1_S_SIZE;
3887 1.81 thorpej pa += L1_S_SIZE;
3888 1.81 thorpej resid -= L1_S_SIZE;
3889 1.43 thorpej continue;
3890 1.43 thorpej }
3891 1.45 thorpej
3892 1.45 thorpej /*
3893 1.45 thorpej * Ok, we're going to use an L2 table. Make sure
3894 1.45 thorpej * one is actually in the corresponding L1 slot
3895 1.45 thorpej * for the current VA.
3896 1.45 thorpej */
3897 1.81 thorpej if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
3898 1.46 thorpej panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3899 1.46 thorpej
3900 1.46 thorpej pte = (pt_entry_t *)
3901 1.81 thorpej kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3902 1.46 thorpej if (pte == NULL)
3903 1.46 thorpej panic("pmap_map_chunk: can't find L2 table for VA"
3904 1.46 thorpej "0x%08lx", va);
3905 1.43 thorpej
3906 1.43 thorpej /* See if we can use a L2 large page mapping. */
3907 1.81 thorpej if (((pa | va) & L2_L_OFFSET) == 0 &&
3908 1.81 thorpej resid >= L2_L_SIZE) {
3909 1.86 thorpej fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
3910 1.43 thorpej #ifdef VERBOSE_INIT_ARM
3911 1.43 thorpej printf("L");
3912 1.43 thorpej #endif
3913 1.43 thorpej for (i = 0; i < 16; i++) {
3914 1.43 thorpej pte[((va >> PGSHIFT) & 0x3f0) + i] =
3915 1.83 thorpej L2_L_PROTO | pa |
3916 1.83 thorpej L2_L_PROT(PTE_KERNEL, prot) | fl;
3917 1.43 thorpej }
3918 1.81 thorpej va += L2_L_SIZE;
3919 1.81 thorpej pa += L2_L_SIZE;
3920 1.81 thorpej resid -= L2_L_SIZE;
3921 1.43 thorpej continue;
3922 1.43 thorpej }
3923 1.43 thorpej
3924 1.43 thorpej /* Use a small page mapping. */
3925 1.86 thorpej fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
3926 1.43 thorpej #ifdef VERBOSE_INIT_ARM
3927 1.43 thorpej printf("P");
3928 1.43 thorpej #endif
3929 1.83 thorpej pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3930 1.83 thorpej L2_S_PROT(PTE_KERNEL, prot) | fl;
3931 1.43 thorpej va += NBPG;
3932 1.43 thorpej pa += NBPG;
3933 1.43 thorpej resid -= NBPG;
3934 1.43 thorpej }
3935 1.43 thorpej #ifdef VERBOSE_INIT_ARM
3936 1.43 thorpej printf("\n");
3937 1.43 thorpej #endif
3938 1.43 thorpej return (size);
3939 1.40 thorpej }
3940 1.85 thorpej
3941 1.85 thorpej /********************** PTE initialization routines **************************/
3942 1.85 thorpej
3943 1.85 thorpej /*
3944 1.85 thorpej * These routines are called when the CPU type is identified to set up
3945 1.85 thorpej * the PTE prototypes, cache modes, etc.
3946 1.85 thorpej *
3947 1.85 thorpej * The variables are always here, just in case LKMs need to reference
3948 1.85 thorpej * them (though, they shouldn't).
3949 1.85 thorpej */
3950 1.85 thorpej
3951 1.86 thorpej pt_entry_t pte_l1_s_cache_mode;
3952 1.86 thorpej pt_entry_t pte_l1_s_cache_mask;
3953 1.86 thorpej
3954 1.86 thorpej pt_entry_t pte_l2_l_cache_mode;
3955 1.86 thorpej pt_entry_t pte_l2_l_cache_mask;
3956 1.86 thorpej
3957 1.86 thorpej pt_entry_t pte_l2_s_cache_mode;
3958 1.86 thorpej pt_entry_t pte_l2_s_cache_mask;
3959 1.85 thorpej
3960 1.85 thorpej pt_entry_t pte_l2_s_prot_u;
3961 1.85 thorpej pt_entry_t pte_l2_s_prot_w;
3962 1.85 thorpej pt_entry_t pte_l2_s_prot_mask;
3963 1.85 thorpej
3964 1.85 thorpej pt_entry_t pte_l1_s_proto;
3965 1.85 thorpej pt_entry_t pte_l1_c_proto;
3966 1.85 thorpej pt_entry_t pte_l2_s_proto;
3967 1.85 thorpej
3968 1.88 thorpej void (*pmap_copy_page_func)(paddr_t, paddr_t);
3969 1.88 thorpej void (*pmap_zero_page_func)(paddr_t);
3970 1.88 thorpej
3971 1.85 thorpej #if ARM_MMU_GENERIC == 1
3972 1.85 thorpej void
3973 1.85 thorpej pmap_pte_init_generic(void)
3974 1.85 thorpej {
3975 1.85 thorpej
3976 1.86 thorpej pte_l1_s_cache_mode = L1_S_B|L1_S_C;
3977 1.86 thorpej pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
3978 1.86 thorpej
3979 1.86 thorpej pte_l2_l_cache_mode = L2_B|L2_C;
3980 1.86 thorpej pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
3981 1.86 thorpej
3982 1.86 thorpej pte_l2_s_cache_mode = L2_B|L2_C;
3983 1.86 thorpej pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
3984 1.85 thorpej
3985 1.85 thorpej pte_l2_s_prot_u = L2_S_PROT_U_generic;
3986 1.85 thorpej pte_l2_s_prot_w = L2_S_PROT_W_generic;
3987 1.85 thorpej pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
3988 1.85 thorpej
3989 1.85 thorpej pte_l1_s_proto = L1_S_PROTO_generic;
3990 1.85 thorpej pte_l1_c_proto = L1_C_PROTO_generic;
3991 1.85 thorpej pte_l2_s_proto = L2_S_PROTO_generic;
3992 1.88 thorpej
3993 1.88 thorpej pmap_copy_page_func = pmap_copy_page_generic;
3994 1.88 thorpej pmap_zero_page_func = pmap_zero_page_generic;
3995 1.85 thorpej }
3996 1.85 thorpej
3997 1.85 thorpej #if defined(CPU_ARM9)
3998 1.85 thorpej void
3999 1.85 thorpej pmap_pte_init_arm9(void)
4000 1.85 thorpej {
4001 1.85 thorpej
4002 1.85 thorpej /*
4003 1.85 thorpej * ARM9 is compatible with generic, but we want to use
4004 1.85 thorpej * write-through caching for now.
4005 1.85 thorpej */
4006 1.85 thorpej pmap_pte_init_generic();
4007 1.86 thorpej
4008 1.86 thorpej pte_l1_s_cache_mode = L1_S_C;
4009 1.86 thorpej pte_l2_l_cache_mode = L2_C;
4010 1.86 thorpej pte_l2_s_cache_mode = L2_C;
4011 1.85 thorpej }
4012 1.85 thorpej #endif /* CPU_ARM9 */
4013 1.85 thorpej #endif /* ARM_MMU_GENERIC == 1 */
4014 1.85 thorpej
4015 1.85 thorpej #if ARM_MMU_XSCALE == 1
4016 1.85 thorpej void
4017 1.85 thorpej pmap_pte_init_xscale(void)
4018 1.85 thorpej {
4019 1.96 thorpej uint32_t auxctl;
4020 1.85 thorpej
4021 1.96 thorpej pte_l1_s_cache_mode = L1_S_B|L1_S_C;
4022 1.86 thorpej pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
4023 1.86 thorpej
4024 1.96 thorpej pte_l2_l_cache_mode = L2_B|L2_C;
4025 1.86 thorpej pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
4026 1.86 thorpej
4027 1.96 thorpej pte_l2_s_cache_mode = L2_B|L2_C;
4028 1.86 thorpej pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
4029 1.106 thorpej
4030 1.106 thorpej #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
4031 1.106 thorpej /*
4032 1.106 thorpej * The XScale core has an enhanced mode where writes that
4033 1.106 thorpej * miss the cache cause a cache line to be allocated. This
4034 1.106 thorpej * is significantly faster than the traditional, write-through
4035 1.106 thorpej * behavior of this case.
4036 1.106 thorpej *
4037 1.106 thorpej * However, there is a bug lurking in this pmap module, or in
4038 1.106 thorpej * other parts of the VM system, or both, which causes corruption
4039 1.106 thorpej * of NFS-backed files when this cache mode is used. We have
4040 1.106 thorpej * an ugly work-around for this problem (disable r/w-allocate
4041 1.106 thorpej * for managed kernel mappings), but the bug is still evil enough
4042 1.106 thorpej * to consider this cache mode "experimental".
4043 1.106 thorpej */
4044 1.106 thorpej pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
4045 1.106 thorpej pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
4046 1.106 thorpej pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
4047 1.106 thorpej #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
4048 1.85 thorpej
4049 1.95 thorpej #ifdef XSCALE_CACHE_WRITE_THROUGH
4050 1.95 thorpej /*
4051 1.95 thorpej * Some versions of the XScale core have various bugs in
4052 1.95 thorpej * their cache units, the work-around for which is to run
4053 1.95 thorpej * the cache in write-through mode. Unfortunately, this
4054 1.95 thorpej * has a major (negative) impact on performance. So, we
4055 1.95 thorpej * go ahead and run fast-and-loose, in the hopes that we
4056 1.95 thorpej * don't line up the planets in a way that will trip the
4057 1.95 thorpej * bugs.
4058 1.95 thorpej *
4059 1.95 thorpej * However, we give you the option to be slow-but-correct.
4060 1.95 thorpej */
4061 1.95 thorpej pte_l1_s_cache_mode = L1_S_C;
4062 1.95 thorpej pte_l2_l_cache_mode = L2_C;
4063 1.95 thorpej pte_l2_s_cache_mode = L2_C;
4064 1.95 thorpej #endif /* XSCALE_CACHE_WRITE_THROUGH */
4065 1.95 thorpej
4066 1.85 thorpej pte_l2_s_prot_u = L2_S_PROT_U_xscale;
4067 1.85 thorpej pte_l2_s_prot_w = L2_S_PROT_W_xscale;
4068 1.85 thorpej pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
4069 1.85 thorpej
4070 1.85 thorpej pte_l1_s_proto = L1_S_PROTO_xscale;
4071 1.85 thorpej pte_l1_c_proto = L1_C_PROTO_xscale;
4072 1.85 thorpej pte_l2_s_proto = L2_S_PROTO_xscale;
4073 1.88 thorpej
4074 1.88 thorpej pmap_copy_page_func = pmap_copy_page_xscale;
4075 1.88 thorpej pmap_zero_page_func = pmap_zero_page_xscale;
4076 1.96 thorpej
4077 1.96 thorpej /*
4078 1.96 thorpej * Disable ECC protection of page table access, for now.
4079 1.96 thorpej */
4080 1.96 thorpej __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
4081 1.96 thorpej : "=r" (auxctl));
4082 1.96 thorpej auxctl &= ~XSCALE_AUXCTL_P;
4083 1.96 thorpej __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
4084 1.96 thorpej :
4085 1.96 thorpej : "r" (auxctl));
4086 1.85 thorpej }
4087 1.87 thorpej
4088 1.87 thorpej /*
4089 1.87 thorpej * xscale_setup_minidata:
4090 1.87 thorpej *
4091 1.87 thorpej * Set up the mini-data cache clean area. We require the
4092 1.87 thorpej * caller to allocate the right amount of physically and
4093 1.87 thorpej * virtually contiguous space.
4094 1.87 thorpej */
4095 1.87 thorpej void
4096 1.87 thorpej xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
4097 1.87 thorpej {
4098 1.87 thorpej extern vaddr_t xscale_minidata_clean_addr;
4099 1.87 thorpej extern vsize_t xscale_minidata_clean_size; /* already initialized */
4100 1.87 thorpej pd_entry_t *pde = (pd_entry_t *) l1pt;
4101 1.87 thorpej pt_entry_t *pte;
4102 1.87 thorpej vsize_t size;
4103 1.96 thorpej uint32_t auxctl;
4104 1.87 thorpej
4105 1.87 thorpej xscale_minidata_clean_addr = va;
4106 1.87 thorpej
4107 1.87 thorpej /* Round it to page size. */
4108 1.87 thorpej size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
4109 1.87 thorpej
4110 1.87 thorpej for (; size != 0;
4111 1.87 thorpej va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
4112 1.87 thorpej pte = (pt_entry_t *)
4113 1.87 thorpej kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
4114 1.87 thorpej if (pte == NULL)
4115 1.87 thorpej panic("xscale_setup_minidata: can't find L2 table for "
4116 1.87 thorpej "VA 0x%08lx", va);
4117 1.87 thorpej pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
4118 1.87 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4119 1.87 thorpej L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
4120 1.87 thorpej }
4121 1.96 thorpej
4122 1.96 thorpej /*
4123 1.96 thorpej * Configure the mini-data cache for write-back with
4124 1.96 thorpej * read/write-allocate.
4125 1.96 thorpej *
4126 1.96 thorpej * NOTE: In order to reconfigure the mini-data cache, we must
4127 1.96 thorpej * make sure it contains no valid data! In order to do that,
4128 1.96 thorpej * we must issue a global data cache invalidate command!
4129 1.96 thorpej *
4130 1.96 thorpej * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
4131 1.96 thorpej * THIS IS VERY IMPORTANT!
4132 1.96 thorpej */
4133 1.96 thorpej
4134 1.96 thorpej /* Invalidate data and mini-data. */
4135 1.96 thorpej __asm __volatile("mcr p15, 0, %0, c7, c6, 0"
4136 1.96 thorpej :
4137 1.96 thorpej : "r" (auxctl));
4138 1.96 thorpej
4139 1.96 thorpej
4140 1.96 thorpej __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
4141 1.96 thorpej : "=r" (auxctl));
4142 1.96 thorpej auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
4143 1.96 thorpej __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
4144 1.96 thorpej :
4145 1.96 thorpej : "r" (auxctl));
4146 1.87 thorpej }
4147 1.85 thorpej #endif /* ARM_MMU_XSCALE == 1 */
4148