pmap.c revision 1.113 1 /* $NetBSD: pmap.c,v 1.113 2002/08/24 02:16:31 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * Copyright (c) 2001 Richard Earnshaw
6 * Copyright (c) 2001 Christopher Gilbert
7 * All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the company nor the name of the author may be used to
15 * endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*-
32 * Copyright (c) 1999 The NetBSD Foundation, Inc.
33 * All rights reserved.
34 *
35 * This code is derived from software contributed to The NetBSD Foundation
36 * by Charles M. Hannum.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the NetBSD
49 * Foundation, Inc. and its contributors.
50 * 4. Neither the name of The NetBSD Foundation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * Copyright (c) 1994-1998 Mark Brinicombe.
69 * Copyright (c) 1994 Brini.
70 * All rights reserved.
71 *
72 * This code is derived from software written for Brini by Mark Brinicombe
73 *
74 * Redistribution and use in source and binary forms, with or without
75 * modification, are permitted provided that the following conditions
76 * are met:
77 * 1. Redistributions of source code must retain the above copyright
78 * notice, this list of conditions and the following disclaimer.
79 * 2. Redistributions in binary form must reproduce the above copyright
80 * notice, this list of conditions and the following disclaimer in the
81 * documentation and/or other materials provided with the distribution.
82 * 3. All advertising materials mentioning features or use of this software
83 * must display the following acknowledgement:
84 * This product includes software developed by Mark Brinicombe.
85 * 4. The name of the author may not be used to endorse or promote products
86 * derived from this software without specific prior written permission.
87 *
88 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
89 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
90 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
91 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
92 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
93 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
94 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
97 *
98 * RiscBSD kernel project
99 *
100 * pmap.c
101 *
102 * Machine dependant vm stuff
103 *
104 * Created : 20/09/94
105 */
106
107 /*
108 * Performance improvements, UVM changes, overhauls and part-rewrites
109 * were contributed by Neil A. Carson <neil (at) causality.com>.
110 */
111
112 /*
113 * The dram block info is currently referenced from the bootconfig.
114 * This should be placed in a separate structure.
115 */
116
117 /*
118 * Special compilation symbols
119 * PMAP_DEBUG - Build in pmap_debug_level code
120 */
121
122 /* Include header files */
123
124 #include "opt_pmap_debug.h"
125 #include "opt_ddb.h"
126
127 #include <sys/types.h>
128 #include <sys/param.h>
129 #include <sys/kernel.h>
130 #include <sys/systm.h>
131 #include <sys/proc.h>
132 #include <sys/malloc.h>
133 #include <sys/user.h>
134 #include <sys/pool.h>
135 #include <sys/cdefs.h>
136
137 #include <uvm/uvm.h>
138
139 #include <machine/bootconfig.h>
140 #include <machine/bus.h>
141 #include <machine/pmap.h>
142 #include <machine/pcb.h>
143 #include <machine/param.h>
144 #include <arm/arm32/katelib.h>
145
146 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.113 2002/08/24 02:16:31 thorpej Exp $");
147 #ifdef PMAP_DEBUG
148 #define PDEBUG(_lev_,_stat_) \
149 if (pmap_debug_level >= (_lev_)) \
150 ((_stat_))
151 int pmap_debug_level = -2;
152 void pmap_dump_pvlist(vaddr_t phys, char *m);
153
154 /*
155 * for switching to potentially finer grained debugging
156 */
157 #define PDB_FOLLOW 0x0001
158 #define PDB_INIT 0x0002
159 #define PDB_ENTER 0x0004
160 #define PDB_REMOVE 0x0008
161 #define PDB_CREATE 0x0010
162 #define PDB_PTPAGE 0x0020
163 #define PDB_GROWKERN 0x0040
164 #define PDB_BITS 0x0080
165 #define PDB_COLLECT 0x0100
166 #define PDB_PROTECT 0x0200
167 #define PDB_MAP_L1 0x0400
168 #define PDB_BOOTSTRAP 0x1000
169 #define PDB_PARANOIA 0x2000
170 #define PDB_WIRING 0x4000
171 #define PDB_PVDUMP 0x8000
172
173 int debugmap = 0;
174 int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
175 #define NPDEBUG(_lev_,_stat_) \
176 if (pmapdebug & (_lev_)) \
177 ((_stat_))
178
179 #else /* PMAP_DEBUG */
180 #define PDEBUG(_lev_,_stat_) /* Nothing */
181 #define NPDEBUG(_lev_,_stat_) /* Nothing */
182 #endif /* PMAP_DEBUG */
183
184 struct pmap kernel_pmap_store;
185
186 /*
187 * linked list of all non-kernel pmaps
188 */
189
190 static LIST_HEAD(, pmap) pmaps;
191
192 /*
193 * pool that pmap structures are allocated from
194 */
195
196 struct pool pmap_pmap_pool;
197
198 /*
199 * pool/cache that PT-PT's are allocated from
200 */
201
202 struct pool pmap_ptpt_pool;
203 struct pool_cache pmap_ptpt_cache;
204 u_int pmap_ptpt_cache_generation;
205
206 static void *pmap_ptpt_page_alloc(struct pool *, int);
207 static void pmap_ptpt_page_free(struct pool *, void *);
208
209 struct pool_allocator pmap_ptpt_allocator = {
210 pmap_ptpt_page_alloc, pmap_ptpt_page_free,
211 };
212
213 static int pmap_ptpt_ctor(void *, void *, int);
214
215 static pt_entry_t *csrc_pte, *cdst_pte;
216 static vaddr_t csrcp, cdstp;
217
218 char *memhook;
219 extern caddr_t msgbufaddr;
220
221 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
222 /*
223 * locking data structures
224 */
225
226 static struct lock pmap_main_lock;
227 static struct simplelock pvalloc_lock;
228 static struct simplelock pmaps_lock;
229 #ifdef LOCKDEBUG
230 #define PMAP_MAP_TO_HEAD_LOCK() \
231 (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
232 #define PMAP_MAP_TO_HEAD_UNLOCK() \
233 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
234
235 #define PMAP_HEAD_TO_MAP_LOCK() \
236 (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
237 #define PMAP_HEAD_TO_MAP_UNLOCK() \
238 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
239 #else
240 #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
241 #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
242 #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
243 #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
244 #endif /* LOCKDEBUG */
245
246 /*
247 * pv_page management structures: locked by pvalloc_lock
248 */
249
250 TAILQ_HEAD(pv_pagelist, pv_page);
251 static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
252 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
253 static int pv_nfpvents; /* # of free pv entries */
254 static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
255 static vaddr_t pv_cachedva; /* cached VA for later use */
256
257 #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
258 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
259 /* high water mark */
260
261 /*
262 * local prototypes
263 */
264
265 static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
266 static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
267 #define ALLOCPV_NEED 0 /* need PV now */
268 #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
269 #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
270 static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
271 static void pmap_enter_pv __P((struct vm_page *,
272 struct pv_entry *, struct pmap *,
273 vaddr_t, struct vm_page *, int));
274 static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
275 static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
276 static void pmap_free_pv_doit __P((struct pv_entry *));
277 static void pmap_free_pvpage __P((void));
278 static boolean_t pmap_is_curpmap __P((struct pmap *));
279 static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
280 vaddr_t));
281 #define PMAP_REMOVE_ALL 0 /* remove all mappings */
282 #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
283
284 static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
285 u_int, u_int));
286
287 /*
288 * Structure that describes and L1 table.
289 */
290 struct l1pt {
291 SIMPLEQ_ENTRY(l1pt) pt_queue; /* Queue pointers */
292 struct pglist pt_plist; /* Allocated page list */
293 vaddr_t pt_va; /* Allocated virtual address */
294 int pt_flags; /* Flags */
295 };
296 #define PTFLAG_STATIC 0x01 /* Statically allocated */
297 #define PTFLAG_KPT 0x02 /* Kernel pt's are mapped */
298 #define PTFLAG_CLEAN 0x04 /* L1 is clean */
299
300 static void pmap_free_l1pt __P((struct l1pt *));
301 static int pmap_allocpagedir __P((struct pmap *));
302 static int pmap_clean_page __P((struct pv_entry *, boolean_t));
303 static void pmap_remove_all __P((struct vm_page *));
304
305 static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
306 static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t));
307 __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
308
309 extern paddr_t physical_start;
310 extern paddr_t physical_end;
311 extern unsigned int free_pages;
312 extern int max_processes;
313
314 vaddr_t virtual_avail;
315 vaddr_t virtual_end;
316 vaddr_t pmap_curmaxkvaddr;
317
318 vaddr_t avail_start;
319 vaddr_t avail_end;
320
321 extern pv_addr_t systempage;
322
323 /* Variables used by the L1 page table queue code */
324 SIMPLEQ_HEAD(l1pt_queue, l1pt);
325 static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
326 static int l1pt_static_queue_count; /* items in the static l1 queue */
327 static int l1pt_static_create_count; /* static l1 items created */
328 static struct l1pt_queue l1pt_queue; /* head of our l1 queue */
329 static int l1pt_queue_count; /* items in the l1 queue */
330 static int l1pt_create_count; /* stat - L1's create count */
331 static int l1pt_reuse_count; /* stat - L1's reused count */
332
333 /* Local function prototypes (not used outside this file) */
334 void pmap_pinit __P((struct pmap *));
335 void pmap_freepagedir __P((struct pmap *));
336
337 /* Other function prototypes */
338 extern void bzero_page __P((vaddr_t));
339 extern void bcopy_page __P((vaddr_t, vaddr_t));
340
341 struct l1pt *pmap_alloc_l1pt __P((void));
342 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
343 vaddr_t l2pa, int));
344
345 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
346 static void pmap_unmap_ptes __P((struct pmap *));
347
348 __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
349 pt_entry_t *, boolean_t));
350 static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
351 pt_entry_t *, boolean_t));
352 static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
353 pt_entry_t *, boolean_t));
354
355 /*
356 * real definition of pv_entry.
357 */
358
359 struct pv_entry {
360 struct pv_entry *pv_next; /* next pv_entry */
361 struct pmap *pv_pmap; /* pmap where mapping lies */
362 vaddr_t pv_va; /* virtual address for mapping */
363 int pv_flags; /* flags */
364 struct vm_page *pv_ptp; /* vm_page for the ptp */
365 };
366
367 /*
368 * pv_entrys are dynamically allocated in chunks from a single page.
369 * we keep track of how many pv_entrys are in use for each page and
370 * we can free pv_entry pages if needed. there is one lock for the
371 * entire allocation system.
372 */
373
374 struct pv_page_info {
375 TAILQ_ENTRY(pv_page) pvpi_list;
376 struct pv_entry *pvpi_pvfree;
377 int pvpi_nfree;
378 };
379
380 /*
381 * number of pv_entry's in a pv_page
382 * (note: won't work on systems where NPBG isn't a constant)
383 */
384
385 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
386 sizeof(struct pv_entry))
387
388 /*
389 * a pv_page: where pv_entrys are allocated from
390 */
391
392 struct pv_page {
393 struct pv_page_info pvinfo;
394 struct pv_entry pvents[PVE_PER_PVPAGE];
395 };
396
397 #ifdef MYCROFT_HACK
398 int mycroft_hack = 0;
399 #endif
400
401 /* Function to set the debug level of the pmap code */
402
403 #ifdef PMAP_DEBUG
404 void
405 pmap_debug(int level)
406 {
407 pmap_debug_level = level;
408 printf("pmap_debug: level=%d\n", pmap_debug_level);
409 }
410 #endif /* PMAP_DEBUG */
411
412 __inline static boolean_t
413 pmap_is_curpmap(struct pmap *pmap)
414 {
415
416 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
417 pmap == pmap_kernel())
418 return (TRUE);
419
420 return (FALSE);
421 }
422
423 /*
424 * PTE_SYNC_CURRENT:
425 *
426 * Make sure the pte is flushed to RAM. If the pmap is
427 * not the current pmap, then also evict the pte from
428 * any cache lines.
429 */
430 #define PTE_SYNC_CURRENT(pmap, pte) \
431 do { \
432 if (pmap_is_curpmap(pmap)) \
433 PTE_SYNC(pte); \
434 else \
435 PTE_FLUSH(pte); \
436 } while (/*CONSTCOND*/0)
437
438 /*
439 * PTE_FLUSH_ALT:
440 *
441 * Make sure the pte is not in any cache lines. We expect
442 * this to be used only when a pte has not been modified.
443 */
444 #define PTE_FLUSH_ALT(pmap, pte) \
445 do { \
446 if (pmap_is_curpmap(pmap) == 0) \
447 PTE_FLUSH(pte); \
448 } while (/*CONSTCOND*/0)
449
450 /*
451 * p v _ e n t r y f u n c t i o n s
452 */
453
454 /*
455 * pv_entry allocation functions:
456 * the main pv_entry allocation functions are:
457 * pmap_alloc_pv: allocate a pv_entry structure
458 * pmap_free_pv: free one pv_entry
459 * pmap_free_pvs: free a list of pv_entrys
460 *
461 * the rest are helper functions
462 */
463
464 /*
465 * pmap_alloc_pv: inline function to allocate a pv_entry structure
466 * => we lock pvalloc_lock
467 * => if we fail, we call out to pmap_alloc_pvpage
468 * => 3 modes:
469 * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it
470 * ALLOCPV_TRY = we want a pv_entry, but not enough to steal
471 * ALLOCPV_NONEED = we are trying to grow our free list, don't really need
472 * one now
473 *
474 * "try" is for optional functions like pmap_copy().
475 */
476
477 __inline static struct pv_entry *
478 pmap_alloc_pv(struct pmap *pmap, int mode)
479 {
480 struct pv_page *pvpage;
481 struct pv_entry *pv;
482
483 simple_lock(&pvalloc_lock);
484
485 pvpage = TAILQ_FIRST(&pv_freepages);
486
487 if (pvpage != NULL) {
488 pvpage->pvinfo.pvpi_nfree--;
489 if (pvpage->pvinfo.pvpi_nfree == 0) {
490 /* nothing left in this one? */
491 TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
492 }
493 pv = pvpage->pvinfo.pvpi_pvfree;
494 KASSERT(pv);
495 pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
496 pv_nfpvents--; /* took one from pool */
497 } else {
498 pv = NULL; /* need more of them */
499 }
500
501 /*
502 * if below low water mark or we didn't get a pv_entry we try and
503 * create more pv_entrys ...
504 */
505
506 if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
507 if (pv == NULL)
508 pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
509 mode : ALLOCPV_NEED);
510 else
511 (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
512 }
513
514 simple_unlock(&pvalloc_lock);
515 return(pv);
516 }
517
518 /*
519 * pmap_alloc_pvpage: maybe allocate a new pvpage
520 *
521 * if need_entry is false: try and allocate a new pv_page
522 * if need_entry is true: try and allocate a new pv_page and return a
523 * new pv_entry from it. if we are unable to allocate a pv_page
524 * we make a last ditch effort to steal a pv_page from some other
525 * mapping. if that fails, we panic...
526 *
527 * => we assume that the caller holds pvalloc_lock
528 */
529
530 static struct pv_entry *
531 pmap_alloc_pvpage(struct pmap *pmap, int mode)
532 {
533 struct vm_page *pg;
534 struct pv_page *pvpage;
535 struct pv_entry *pv;
536 int s;
537
538 /*
539 * if we need_entry and we've got unused pv_pages, allocate from there
540 */
541
542 pvpage = TAILQ_FIRST(&pv_unusedpgs);
543 if (mode != ALLOCPV_NONEED && pvpage != NULL) {
544
545 /* move it to pv_freepages list */
546 TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
547 TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
548
549 /* allocate a pv_entry */
550 pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */
551 pv = pvpage->pvinfo.pvpi_pvfree;
552 KASSERT(pv);
553 pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
554
555 pv_nfpvents--; /* took one from pool */
556 return(pv);
557 }
558
559 /*
560 * see if we've got a cached unmapped VA that we can map a page in.
561 * if not, try to allocate one.
562 */
563
564
565 if (pv_cachedva == 0) {
566 s = splvm();
567 pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
568 PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
569 splx(s);
570 if (pv_cachedva == 0) {
571 return (NULL);
572 }
573 }
574
575 pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
576 UVM_PGA_USERESERVE);
577
578 if (pg == NULL)
579 return (NULL);
580 pg->flags &= ~PG_BUSY; /* never busy */
581
582 /*
583 * add a mapping for our new pv_page and free its entrys (save one!)
584 *
585 * NOTE: If we are allocating a PV page for the kernel pmap, the
586 * pmap is already locked! (...but entering the mapping is safe...)
587 */
588
589 pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
590 VM_PROT_READ|VM_PROT_WRITE);
591 pmap_update(pmap_kernel());
592 pvpage = (struct pv_page *) pv_cachedva;
593 pv_cachedva = 0;
594 return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
595 }
596
597 /*
598 * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
599 *
600 * => caller must hold pvalloc_lock
601 * => if need_entry is true, we allocate and return one pv_entry
602 */
603
604 static struct pv_entry *
605 pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
606 {
607 int tofree, lcv;
608
609 /* do we need to return one? */
610 tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
611
612 pvp->pvinfo.pvpi_pvfree = NULL;
613 pvp->pvinfo.pvpi_nfree = tofree;
614 for (lcv = 0 ; lcv < tofree ; lcv++) {
615 pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
616 pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
617 }
618 if (need_entry)
619 TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
620 else
621 TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
622 pv_nfpvents += tofree;
623 return((need_entry) ? &pvp->pvents[lcv] : NULL);
624 }
625
626 /*
627 * pmap_free_pv_doit: actually free a pv_entry
628 *
629 * => do not call this directly! instead use either
630 * 1. pmap_free_pv ==> free a single pv_entry
631 * 2. pmap_free_pvs => free a list of pv_entrys
632 * => we must be holding pvalloc_lock
633 */
634
635 __inline static void
636 pmap_free_pv_doit(struct pv_entry *pv)
637 {
638 struct pv_page *pvp;
639
640 pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
641 pv_nfpvents++;
642 pvp->pvinfo.pvpi_nfree++;
643
644 /* nfree == 1 => fully allocated page just became partly allocated */
645 if (pvp->pvinfo.pvpi_nfree == 1) {
646 TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
647 }
648
649 /* free it */
650 pv->pv_next = pvp->pvinfo.pvpi_pvfree;
651 pvp->pvinfo.pvpi_pvfree = pv;
652
653 /*
654 * are all pv_page's pv_entry's free? move it to unused queue.
655 */
656
657 if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
658 TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
659 TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
660 }
661 }
662
663 /*
664 * pmap_free_pv: free a single pv_entry
665 *
666 * => we gain the pvalloc_lock
667 */
668
669 __inline static void
670 pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
671 {
672 simple_lock(&pvalloc_lock);
673 pmap_free_pv_doit(pv);
674
675 /*
676 * Can't free the PV page if the PV entries were associated with
677 * the kernel pmap; the pmap is already locked.
678 */
679 if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
680 pmap != pmap_kernel())
681 pmap_free_pvpage();
682
683 simple_unlock(&pvalloc_lock);
684 }
685
686 /*
687 * pmap_free_pvs: free a list of pv_entrys
688 *
689 * => we gain the pvalloc_lock
690 */
691
692 __inline static void
693 pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
694 {
695 struct pv_entry *nextpv;
696
697 simple_lock(&pvalloc_lock);
698
699 for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
700 nextpv = pvs->pv_next;
701 pmap_free_pv_doit(pvs);
702 }
703
704 /*
705 * Can't free the PV page if the PV entries were associated with
706 * the kernel pmap; the pmap is already locked.
707 */
708 if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
709 pmap != pmap_kernel())
710 pmap_free_pvpage();
711
712 simple_unlock(&pvalloc_lock);
713 }
714
715
716 /*
717 * pmap_free_pvpage: try and free an unused pv_page structure
718 *
719 * => assume caller is holding the pvalloc_lock and that
720 * there is a page on the pv_unusedpgs list
721 * => if we can't get a lock on the kmem_map we try again later
722 */
723
724 static void
725 pmap_free_pvpage(void)
726 {
727 int s;
728 struct vm_map *map;
729 struct vm_map_entry *dead_entries;
730 struct pv_page *pvp;
731
732 s = splvm(); /* protect kmem_map */
733
734 pvp = TAILQ_FIRST(&pv_unusedpgs);
735
736 /*
737 * note: watch out for pv_initpage which is allocated out of
738 * kernel_map rather than kmem_map.
739 */
740 if (pvp == pv_initpage)
741 map = kernel_map;
742 else
743 map = kmem_map;
744 if (vm_map_lock_try(map)) {
745
746 /* remove pvp from pv_unusedpgs */
747 TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
748
749 /* unmap the page */
750 dead_entries = NULL;
751 uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
752 &dead_entries);
753 vm_map_unlock(map);
754
755 if (dead_entries != NULL)
756 uvm_unmap_detach(dead_entries, 0);
757
758 pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
759 }
760 if (pvp == pv_initpage)
761 /* no more initpage, we've freed it */
762 pv_initpage = NULL;
763
764 splx(s);
765 }
766
767 /*
768 * main pv_entry manipulation functions:
769 * pmap_enter_pv: enter a mapping onto a vm_page list
770 * pmap_remove_pv: remove a mappiing from a vm_page list
771 *
772 * NOTE: pmap_enter_pv expects to lock the pvh itself
773 * pmap_remove_pv expects te caller to lock the pvh before calling
774 */
775
776 /*
777 * pmap_enter_pv: enter a mapping onto a vm_page lst
778 *
779 * => caller should hold the proper lock on pmap_main_lock
780 * => caller should have pmap locked
781 * => we will gain the lock on the vm_page and allocate the new pv_entry
782 * => caller should adjust ptp's wire_count before calling
783 * => caller should not adjust pmap's wire_count
784 */
785
786 __inline static void
787 pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
788 vaddr_t va, struct vm_page *ptp, int flags)
789 {
790 pve->pv_pmap = pmap;
791 pve->pv_va = va;
792 pve->pv_ptp = ptp; /* NULL for kernel pmap */
793 pve->pv_flags = flags;
794 simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
795 pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
796 pg->mdpage.pvh_list = pve; /* ... locked list */
797 simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
798 if (pve->pv_flags & PVF_WIRED)
799 ++pmap->pm_stats.wired_count;
800 #ifdef PMAP_ALIAS_DEBUG
801 {
802 int s = splhigh();
803 if (pve->pv_flags & PVF_WRITE)
804 pg->mdpage.rw_mappings++;
805 else
806 pg->mdpage.ro_mappings++;
807 if (pg->mdpage.rw_mappings != 0 &&
808 (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
809 printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
810 pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
811 pg->mdpage.krw_mappings);
812 }
813 splx(s);
814 }
815 #endif /* PMAP_ALIAS_DEBUG */
816 }
817
818 /*
819 * pmap_remove_pv: try to remove a mapping from a pv_list
820 *
821 * => caller should hold proper lock on pmap_main_lock
822 * => pmap should be locked
823 * => caller should hold lock on vm_page [so that attrs can be adjusted]
824 * => caller should adjust ptp's wire_count and free PTP if needed
825 * => caller should NOT adjust pmap's wire_count
826 * => we return the removed pve
827 */
828
829 __inline static struct pv_entry *
830 pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
831 {
832 struct pv_entry *pve, **prevptr;
833
834 prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
835 pve = *prevptr;
836 while (pve) {
837 if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
838 *prevptr = pve->pv_next; /* remove it! */
839 if (pve->pv_flags & PVF_WIRED)
840 --pmap->pm_stats.wired_count;
841 #ifdef PMAP_ALIAS_DEBUG
842 {
843 int s = splhigh();
844 if (pve->pv_flags & PVF_WRITE) {
845 KASSERT(pg->mdpage.rw_mappings != 0);
846 pg->mdpage.rw_mappings--;
847 } else {
848 KASSERT(pg->mdpage.ro_mappings != 0);
849 pg->mdpage.ro_mappings--;
850 }
851 splx(s);
852 }
853 #endif /* PMAP_ALIAS_DEBUG */
854 break;
855 }
856 prevptr = &pve->pv_next; /* previous pointer */
857 pve = pve->pv_next; /* advance */
858 }
859 return(pve); /* return removed pve */
860 }
861
862 /*
863 *
864 * pmap_modify_pv: Update pv flags
865 *
866 * => caller should hold lock on vm_page [so that attrs can be adjusted]
867 * => caller should NOT adjust pmap's wire_count
868 * => caller must call pmap_vac_me_harder() if writable status of a page
869 * may have changed.
870 * => we return the old flags
871 *
872 * Modify a physical-virtual mapping in the pv table
873 */
874
875 static /* __inline */ u_int
876 pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
877 u_int bic_mask, u_int eor_mask)
878 {
879 struct pv_entry *npv;
880 u_int flags, oflags;
881
882 /*
883 * There is at least one VA mapping this page.
884 */
885
886 for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
887 if (pmap == npv->pv_pmap && va == npv->pv_va) {
888 oflags = npv->pv_flags;
889 npv->pv_flags = flags =
890 ((oflags & ~bic_mask) ^ eor_mask);
891 if ((flags ^ oflags) & PVF_WIRED) {
892 if (flags & PVF_WIRED)
893 ++pmap->pm_stats.wired_count;
894 else
895 --pmap->pm_stats.wired_count;
896 }
897 #ifdef PMAP_ALIAS_DEBUG
898 {
899 int s = splhigh();
900 if ((flags ^ oflags) & PVF_WRITE) {
901 if (flags & PVF_WRITE) {
902 pg->mdpage.rw_mappings++;
903 pg->mdpage.ro_mappings--;
904 if (pg->mdpage.rw_mappings != 0 &&
905 (pg->mdpage.kro_mappings != 0 ||
906 pg->mdpage.krw_mappings != 0)) {
907 printf("pmap_modify_pv: rw %u, "
908 "kro %u, krw %u\n",
909 pg->mdpage.rw_mappings,
910 pg->mdpage.kro_mappings,
911 pg->mdpage.krw_mappings);
912 }
913 } else {
914 KASSERT(pg->mdpage.rw_mappings != 0);
915 pg->mdpage.rw_mappings--;
916 pg->mdpage.ro_mappings++;
917 }
918 }
919 splx(s);
920 }
921 #endif /* PMAP_ALIAS_DEBUG */
922 return (oflags);
923 }
924 }
925 return (0);
926 }
927
928 /*
929 * Map the specified level 2 pagetable into the level 1 page table for
930 * the given pmap to cover a chunk of virtual address space starting from the
931 * address specified.
932 */
933 #define PMAP_PTP_SELFREF 0x01
934 #define PMAP_PTP_CACHEABLE 0x02
935
936 static __inline void
937 pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, int flags)
938 {
939 vaddr_t ptva;
940
941 /* Calculate the index into the L1 page table. */
942 ptva = (va >> L1_S_SHIFT) & ~3;
943
944 /* Map page table into the L1. */
945 pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
946 pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
947 pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
948 pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
949 cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
950
951 /* Map the page table into the page table area. */
952 if (flags & PMAP_PTP_SELFREF) {
953 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
954 L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
955 ((flags & PMAP_PTP_CACHEABLE) ? pte_l2_s_cache_mode : 0);
956 PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
957 }
958 }
959
960 #if 0
961 static __inline void
962 pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
963 {
964 vaddr_t ptva;
965
966 /* Calculate the index into the L1 page table. */
967 ptva = (va >> L1_S_SHIFT) & ~3;
968
969 /* Unmap page table from the L1. */
970 pmap->pm_pdir[ptva + 0] = 0;
971 pmap->pm_pdir[ptva + 1] = 0;
972 pmap->pm_pdir[ptva + 2] = 0;
973 pmap->pm_pdir[ptva + 3] = 0;
974 cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
975
976 /* Unmap the page table from the page table area. */
977 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
978 PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
979 }
980 #endif
981
982 /*
983 * Used to map a range of physical addresses into kernel
984 * virtual address space.
985 *
986 * For now, VM is already on, we only need to map the
987 * specified memory.
988 *
989 * XXX This routine should eventually go away; it's only used
990 * XXX by machine-dependent crash dump code.
991 */
992 vaddr_t
993 pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
994 {
995 pt_entry_t *pte;
996
997 while (spa < epa) {
998 pte = vtopte(va);
999
1000 *pte = L2_S_PROTO | spa |
1001 L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1002 PTE_SYNC(pte);
1003 cpu_tlb_flushID_SE(va);
1004 va += NBPG;
1005 spa += NBPG;
1006 }
1007 pmap_update(pmap_kernel());
1008 return(va);
1009 }
1010
1011
1012 /*
1013 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1014 *
1015 * bootstrap the pmap system. This is called from initarm and allows
1016 * the pmap system to initailise any structures it requires.
1017 *
1018 * Currently this sets up the kernel_pmap that is statically allocated
1019 * and also allocated virtual addresses for certain page hooks.
1020 * Currently the only one page hook is allocated that is used
1021 * to zero physical pages of memory.
1022 * It also initialises the start and end address of the kernel data space.
1023 */
1024
1025 char *boot_head;
1026
1027 void
1028 pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1029 {
1030 pt_entry_t *pte;
1031
1032 pmap_kernel()->pm_pdir = kernel_l1pt;
1033 pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
1034 pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
1035 simple_lock_init(&pmap_kernel()->pm_lock);
1036 pmap_kernel()->pm_obj.pgops = NULL;
1037 TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
1038 pmap_kernel()->pm_obj.uo_npages = 0;
1039 pmap_kernel()->pm_obj.uo_refs = 1;
1040
1041 virtual_avail = KERNEL_VM_BASE;
1042 virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
1043
1044 /*
1045 * now we allocate the "special" VAs which are used for tmp mappings
1046 * by the pmap (and other modules). we allocate the VAs by advancing
1047 * virtual_avail (note that there are no pages mapped at these VAs).
1048 * we find the PTE that maps the allocated VA via the linear PTE
1049 * mapping.
1050 */
1051
1052 pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
1053
1054 csrcp = virtual_avail; csrc_pte = pte;
1055 virtual_avail += PAGE_SIZE; pte++;
1056
1057 cdstp = virtual_avail; cdst_pte = pte;
1058 virtual_avail += PAGE_SIZE; pte++;
1059
1060 memhook = (char *) virtual_avail; /* don't need pte */
1061 virtual_avail += PAGE_SIZE; pte++;
1062
1063 msgbufaddr = (caddr_t) virtual_avail; /* don't need pte */
1064 virtual_avail += round_page(MSGBUFSIZE);
1065 pte += atop(round_page(MSGBUFSIZE));
1066
1067 /*
1068 * init the static-global locks and global lists.
1069 */
1070 spinlockinit(&pmap_main_lock, "pmaplk", 0);
1071 simple_lock_init(&pvalloc_lock);
1072 simple_lock_init(&pmaps_lock);
1073 LIST_INIT(&pmaps);
1074 TAILQ_INIT(&pv_freepages);
1075 TAILQ_INIT(&pv_unusedpgs);
1076
1077 /*
1078 * initialize the pmap pool.
1079 */
1080
1081 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1082 &pool_allocator_nointr);
1083
1084 /*
1085 * initialize the PT-PT pool and cache.
1086 */
1087
1088 pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
1089 &pmap_ptpt_allocator);
1090 pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
1091 pmap_ptpt_ctor, NULL, NULL);
1092
1093 cpu_dcache_wbinv_all();
1094 }
1095
1096 /*
1097 * void pmap_init(void)
1098 *
1099 * Initialize the pmap module.
1100 * Called by vm_init() in vm/vm_init.c in order to initialise
1101 * any structures that the pmap system needs to map virtual memory.
1102 */
1103
1104 extern int physmem;
1105
1106 void
1107 pmap_init(void)
1108 {
1109
1110 /*
1111 * Set the available memory vars - These do not map to real memory
1112 * addresses and cannot as the physical memory is fragmented.
1113 * They are used by ps for %mem calculations.
1114 * One could argue whether this should be the entire memory or just
1115 * the memory that is useable in a user process.
1116 */
1117 avail_start = 0;
1118 avail_end = physmem * NBPG;
1119
1120 /*
1121 * now we need to free enough pv_entry structures to allow us to get
1122 * the kmem_map/kmem_object allocated and inited (done after this
1123 * function is finished). to do this we allocate one bootstrap page out
1124 * of kernel_map and use it to provide an initial pool of pv_entry
1125 * structures. we never free this page.
1126 */
1127
1128 pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1129 if (pv_initpage == NULL)
1130 panic("pmap_init: pv_initpage");
1131 pv_cachedva = 0; /* a VA we have allocated but not used yet */
1132 pv_nfpvents = 0;
1133 (void) pmap_add_pvpage(pv_initpage, FALSE);
1134
1135 pmap_initialized = TRUE;
1136
1137 /* Initialise our L1 page table queues and counters */
1138 SIMPLEQ_INIT(&l1pt_static_queue);
1139 l1pt_static_queue_count = 0;
1140 l1pt_static_create_count = 0;
1141 SIMPLEQ_INIT(&l1pt_queue);
1142 l1pt_queue_count = 0;
1143 l1pt_create_count = 0;
1144 l1pt_reuse_count = 0;
1145 }
1146
1147 /*
1148 * pmap_postinit()
1149 *
1150 * This routine is called after the vm and kmem subsystems have been
1151 * initialised. This allows the pmap code to perform any initialisation
1152 * that can only be done one the memory allocation is in place.
1153 */
1154
1155 void
1156 pmap_postinit(void)
1157 {
1158 int loop;
1159 struct l1pt *pt;
1160
1161 #ifdef PMAP_STATIC_L1S
1162 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1163 #else /* PMAP_STATIC_L1S */
1164 for (loop = 0; loop < max_processes; ++loop) {
1165 #endif /* PMAP_STATIC_L1S */
1166 /* Allocate a L1 page table */
1167 pt = pmap_alloc_l1pt();
1168 if (!pt)
1169 panic("Cannot allocate static L1 page tables\n");
1170
1171 /* Clean it */
1172 bzero((void *)pt->pt_va, L1_TABLE_SIZE);
1173 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1174 /* Add the page table to the queue */
1175 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1176 ++l1pt_static_queue_count;
1177 ++l1pt_static_create_count;
1178 }
1179 }
1180
1181
1182 /*
1183 * Create and return a physical map.
1184 *
1185 * If the size specified for the map is zero, the map is an actual physical
1186 * map, and may be referenced by the hardware.
1187 *
1188 * If the size specified is non-zero, the map will be used in software only,
1189 * and is bounded by that size.
1190 */
1191
1192 pmap_t
1193 pmap_create(void)
1194 {
1195 struct pmap *pmap;
1196
1197 /*
1198 * Fetch pmap entry from the pool
1199 */
1200
1201 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1202 /* XXX is this really needed! */
1203 memset(pmap, 0, sizeof(*pmap));
1204
1205 simple_lock_init(&pmap->pm_obj.vmobjlock);
1206 pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
1207 TAILQ_INIT(&pmap->pm_obj.memq);
1208 pmap->pm_obj.uo_npages = 0;
1209 pmap->pm_obj.uo_refs = 1;
1210 pmap->pm_stats.wired_count = 0;
1211 pmap->pm_stats.resident_count = 1;
1212 pmap->pm_ptphint = NULL;
1213
1214 /* Now init the machine part of the pmap */
1215 pmap_pinit(pmap);
1216 return(pmap);
1217 }
1218
1219 /*
1220 * pmap_alloc_l1pt()
1221 *
1222 * This routine allocates physical and virtual memory for a L1 page table
1223 * and wires it.
1224 * A l1pt structure is returned to describe the allocated page table.
1225 *
1226 * This routine is allowed to fail if the required memory cannot be allocated.
1227 * In this case NULL is returned.
1228 */
1229
1230 struct l1pt *
1231 pmap_alloc_l1pt(void)
1232 {
1233 paddr_t pa;
1234 vaddr_t va;
1235 struct l1pt *pt;
1236 int error;
1237 struct vm_page *m;
1238
1239 /* Allocate virtual address space for the L1 page table */
1240 va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
1241 if (va == 0) {
1242 #ifdef DIAGNOSTIC
1243 PDEBUG(0,
1244 printf("pmap: Cannot allocate pageable memory for L1\n"));
1245 #endif /* DIAGNOSTIC */
1246 return(NULL);
1247 }
1248
1249 /* Allocate memory for the l1pt structure */
1250 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1251
1252 /*
1253 * Allocate pages from the VM system.
1254 */
1255 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
1256 L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1257 if (error) {
1258 #ifdef DIAGNOSTIC
1259 PDEBUG(0,
1260 printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1261 error));
1262 #endif /* DIAGNOSTIC */
1263 /* Release the resources we already have claimed */
1264 free(pt, M_VMPMAP);
1265 uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
1266 return(NULL);
1267 }
1268
1269 /* Map our physical pages into our virtual space */
1270 pt->pt_va = va;
1271 m = TAILQ_FIRST(&pt->pt_plist);
1272 while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
1273 pa = VM_PAGE_TO_PHYS(m);
1274
1275 pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
1276
1277 va += NBPG;
1278 m = m->pageq.tqe_next;
1279 }
1280
1281 #ifdef DIAGNOSTIC
1282 if (m)
1283 panic("pmap_alloc_l1pt: pglist not empty\n");
1284 #endif /* DIAGNOSTIC */
1285
1286 pt->pt_flags = 0;
1287 return(pt);
1288 }
1289
1290 /*
1291 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1292 */
1293 static void
1294 pmap_free_l1pt(struct l1pt *pt)
1295 {
1296 /* Separate the physical memory for the virtual space */
1297 pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
1298 pmap_update(pmap_kernel());
1299
1300 /* Return the physical memory */
1301 uvm_pglistfree(&pt->pt_plist);
1302
1303 /* Free the virtual space */
1304 uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
1305
1306 /* Free the l1pt structure */
1307 free(pt, M_VMPMAP);
1308 }
1309
1310 /*
1311 * pmap_ptpt_page_alloc:
1312 *
1313 * Back-end page allocator for the PT-PT pool.
1314 */
1315 static void *
1316 pmap_ptpt_page_alloc(struct pool *pp, int flags)
1317 {
1318 struct vm_page *pg;
1319 pt_entry_t *pte;
1320 vaddr_t va;
1321
1322 /* XXX PR_WAITOK? */
1323 va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
1324 if (va == 0)
1325 return (NULL);
1326
1327 for (;;) {
1328 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
1329 if (pg != NULL)
1330 break;
1331 if ((flags & PR_WAITOK) == 0) {
1332 uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
1333 return (NULL);
1334 }
1335 uvm_wait("pmap_ptpt");
1336 }
1337
1338 pte = vtopte(va);
1339 KDASSERT(pmap_pte_v(pte) == 0);
1340
1341 *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
1342 L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1343 PTE_SYNC(pte);
1344 #ifdef PMAP_ALIAS_DEBUG
1345 {
1346 int s = splhigh();
1347 pg->mdpage.krw_mappings++;
1348 splx(s);
1349 }
1350 #endif /* PMAP_ALIAS_DEBUG */
1351
1352 return ((void *) va);
1353 }
1354
1355 /*
1356 * pmap_ptpt_page_free:
1357 *
1358 * Back-end page free'er for the PT-PT pool.
1359 */
1360 static void
1361 pmap_ptpt_page_free(struct pool *pp, void *v)
1362 {
1363 vaddr_t va = (vaddr_t) v;
1364 paddr_t pa;
1365
1366 pa = vtophys(va);
1367
1368 pmap_kremove(va, L2_TABLE_SIZE);
1369 pmap_update(pmap_kernel());
1370
1371 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1372
1373 uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
1374 }
1375
1376 /*
1377 * pmap_ptpt_ctor:
1378 *
1379 * Constructor for the PT-PT cache.
1380 */
1381 static int
1382 pmap_ptpt_ctor(void *arg, void *object, int flags)
1383 {
1384 caddr_t vptpt = object;
1385
1386 /* Page is already zero'd. */
1387
1388 /*
1389 * Map in kernel PTs.
1390 *
1391 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
1392 */
1393 memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
1394 (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
1395 ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
1396 (KERNEL_PD_SIZE >> 2));
1397
1398 return (0);
1399 }
1400
1401 /*
1402 * Allocate a page directory.
1403 * This routine will either allocate a new page directory from the pool
1404 * of L1 page tables currently held by the kernel or it will allocate
1405 * a new one via pmap_alloc_l1pt().
1406 * It will then initialise the l1 page table for use.
1407 */
1408 static int
1409 pmap_allocpagedir(struct pmap *pmap)
1410 {
1411 vaddr_t vptpt;
1412 paddr_t pa;
1413 struct l1pt *pt;
1414 u_int gen;
1415
1416 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1417
1418 /* Do we have any spare L1's lying around ? */
1419 if (l1pt_static_queue_count) {
1420 --l1pt_static_queue_count;
1421 pt = SIMPLEQ_FIRST(&l1pt_static_queue);
1422 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
1423 } else if (l1pt_queue_count) {
1424 --l1pt_queue_count;
1425 pt = SIMPLEQ_FIRST(&l1pt_queue);
1426 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
1427 ++l1pt_reuse_count;
1428 } else {
1429 pt = pmap_alloc_l1pt();
1430 if (!pt)
1431 return(ENOMEM);
1432 ++l1pt_create_count;
1433 }
1434
1435 /* Store the pointer to the l1 descriptor in the pmap. */
1436 pmap->pm_l1pt = pt;
1437
1438 /* Get the physical address of the start of the l1 */
1439 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1440
1441 /* Store the virtual address of the l1 in the pmap. */
1442 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1443
1444 /* Clean the L1 if it is dirty */
1445 if (!(pt->pt_flags & PTFLAG_CLEAN)) {
1446 bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1447 cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
1448 (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1449 }
1450
1451 /* Allocate a page table to map all the page tables for this pmap */
1452 KASSERT(pmap->pm_vptpt == 0);
1453
1454 try_again:
1455 gen = pmap_ptpt_cache_generation;
1456 vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
1457 if (vptpt == NULL) {
1458 PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
1459 pmap_freepagedir(pmap);
1460 return (ENOMEM);
1461 }
1462
1463 /* need to lock this all up for growkernel */
1464 simple_lock(&pmaps_lock);
1465
1466 if (gen != pmap_ptpt_cache_generation) {
1467 simple_unlock(&pmaps_lock);
1468 pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
1469 goto try_again;
1470 }
1471
1472 pmap->pm_vptpt = vptpt;
1473 pmap->pm_pptpt = vtophys(vptpt);
1474
1475 /* Duplicate the kernel mappings. */
1476 bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1477 (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1478 KERNEL_PD_SIZE);
1479 cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
1480 (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
1481
1482 /* Wire in this page table */
1483 pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, PMAP_PTP_SELFREF);
1484
1485 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1486
1487 LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
1488 simple_unlock(&pmaps_lock);
1489
1490 return(0);
1491 }
1492
1493
1494 /*
1495 * Initialize a preallocated and zeroed pmap structure,
1496 * such as one in a vmspace structure.
1497 */
1498
1499 void
1500 pmap_pinit(struct pmap *pmap)
1501 {
1502 int backoff = 6;
1503 int retry = 10;
1504
1505 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1506
1507 /* Keep looping until we succeed in allocating a page directory */
1508 while (pmap_allocpagedir(pmap) != 0) {
1509 /*
1510 * Ok we failed to allocate a suitable block of memory for an
1511 * L1 page table. This means that either:
1512 * 1. 16KB of virtual address space could not be allocated
1513 * 2. 16KB of physically contiguous memory on a 16KB boundary
1514 * could not be allocated.
1515 *
1516 * Since we cannot fail we will sleep for a while and try
1517 * again.
1518 *
1519 * Searching for a suitable L1 PT is expensive:
1520 * to avoid hogging the system when memory is really
1521 * scarce, use an exponential back-off so that
1522 * eventually we won't retry more than once every 8
1523 * seconds. This should allow other processes to run
1524 * to completion and free up resources.
1525 */
1526 (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1527 NULL);
1528 if (--retry == 0) {
1529 retry = 10;
1530 if (backoff)
1531 --backoff;
1532 }
1533 }
1534
1535 if (vector_page < KERNEL_BASE) {
1536 /*
1537 * Map the vector page. This will also allocate and map
1538 * an L2 table for it.
1539 */
1540 pmap_enter(pmap, vector_page, systempage.pv_pa,
1541 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1542 pmap_update(pmap);
1543 }
1544 }
1545
1546 void
1547 pmap_freepagedir(struct pmap *pmap)
1548 {
1549 /* Free the memory used for the page table mapping */
1550 if (pmap->pm_vptpt != 0) {
1551 /*
1552 * XXX Objects freed to a pool cache must be in constructed
1553 * XXX form when freed, but we don't free page tables as we
1554 * XXX go, so we need to zap the mappings here.
1555 *
1556 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
1557 */
1558 memset((caddr_t) pmap->pm_vptpt, 0,
1559 ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
1560 pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
1561 }
1562
1563 /* junk the L1 page table */
1564 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1565 /* Add the page table to the queue */
1566 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
1567 pmap->pm_l1pt, pt_queue);
1568 ++l1pt_static_queue_count;
1569 } else if (l1pt_queue_count < 8) {
1570 /* Add the page table to the queue */
1571 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1572 ++l1pt_queue_count;
1573 } else
1574 pmap_free_l1pt(pmap->pm_l1pt);
1575 }
1576
1577 /*
1578 * Retire the given physical map from service.
1579 * Should only be called if the map contains no valid mappings.
1580 */
1581
1582 void
1583 pmap_destroy(struct pmap *pmap)
1584 {
1585 struct vm_page *page;
1586 int count;
1587
1588 if (pmap == NULL)
1589 return;
1590
1591 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1592
1593 /*
1594 * Drop reference count
1595 */
1596 simple_lock(&pmap->pm_obj.vmobjlock);
1597 count = --pmap->pm_obj.uo_refs;
1598 simple_unlock(&pmap->pm_obj.vmobjlock);
1599 if (count > 0) {
1600 return;
1601 }
1602
1603 /*
1604 * reference count is zero, free pmap resources and then free pmap.
1605 */
1606
1607 /*
1608 * remove it from global list of pmaps
1609 */
1610
1611 simple_lock(&pmaps_lock);
1612 LIST_REMOVE(pmap, pm_list);
1613 simple_unlock(&pmaps_lock);
1614
1615 if (vector_page < KERNEL_BASE) {
1616 /* Remove the vector page mapping */
1617 pmap_remove(pmap, vector_page, vector_page + NBPG);
1618 pmap_update(pmap);
1619 }
1620
1621 /*
1622 * Free any page tables still mapped
1623 * This is only temporay until pmap_enter can count the number
1624 * of mappings made in a page table. Then pmap_remove() can
1625 * reduce the count and free the pagetable when the count
1626 * reaches zero. Note that entries in this list should match the
1627 * contents of the ptpt, however this is faster than walking a 1024
1628 * entries looking for pt's
1629 * taken from i386 pmap.c
1630 */
1631 /*
1632 * vmobjlock must be held while freeing pages
1633 */
1634 simple_lock(&pmap->pm_obj.vmobjlock);
1635 while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
1636 KASSERT((page->flags & PG_BUSY) == 0);
1637 /* XXXJRT Clean this up. */
1638 cpu_dcache_inv_range(trunc_page((vaddr_t)vtopte(page->offset)),
1639 PAGE_SIZE);
1640 page->wire_count = 0;
1641 uvm_pagefree(page);
1642 }
1643 simple_unlock(&pmap->pm_obj.vmobjlock);
1644
1645 /* Free the page dir */
1646 pmap_freepagedir(pmap);
1647
1648 /* return the pmap to the pool */
1649 pool_put(&pmap_pmap_pool, pmap);
1650 }
1651
1652
1653 /*
1654 * void pmap_reference(struct pmap *pmap)
1655 *
1656 * Add a reference to the specified pmap.
1657 */
1658
1659 void
1660 pmap_reference(struct pmap *pmap)
1661 {
1662 if (pmap == NULL)
1663 return;
1664
1665 simple_lock(&pmap->pm_lock);
1666 pmap->pm_obj.uo_refs++;
1667 simple_unlock(&pmap->pm_lock);
1668 }
1669
1670 /*
1671 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1672 *
1673 * Return the start and end addresses of the kernel's virtual space.
1674 * These values are setup in pmap_bootstrap and are updated as pages
1675 * are allocated.
1676 */
1677
1678 void
1679 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1680 {
1681 *start = virtual_avail;
1682 *end = virtual_end;
1683 }
1684
1685 /*
1686 * Activate the address space for the specified process. If the process
1687 * is the current process, load the new MMU context.
1688 */
1689 void
1690 pmap_activate(struct proc *p)
1691 {
1692 struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1693 struct pcb *pcb = &p->p_addr->u_pcb;
1694
1695 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1696 (paddr_t *)&pcb->pcb_pagedir);
1697
1698 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1699 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1700
1701 if (p == curproc) {
1702 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1703 setttb((u_int)pcb->pcb_pagedir);
1704 }
1705 }
1706
1707 /*
1708 * Deactivate the address space of the specified process.
1709 */
1710 void
1711 pmap_deactivate(struct proc *p)
1712 {
1713 }
1714
1715 /*
1716 * Perform any deferred pmap operations.
1717 */
1718 void
1719 pmap_update(struct pmap *pmap)
1720 {
1721
1722 /*
1723 * We haven't deferred any pmap operations, but we do need to
1724 * make sure TLB/cache operations have completed.
1725 */
1726 cpu_cpwait();
1727 }
1728
1729 /*
1730 * pmap_clean_page()
1731 *
1732 * This is a local function used to work out the best strategy to clean
1733 * a single page referenced by its entry in the PV table. It's used by
1734 * pmap_copy_page, pmap_zero page and maybe some others later on.
1735 *
1736 * Its policy is effectively:
1737 * o If there are no mappings, we don't bother doing anything with the cache.
1738 * o If there is one mapping, we clean just that page.
1739 * o If there are multiple mappings, we clean the entire cache.
1740 *
1741 * So that some functions can be further optimised, it returns 0 if it didn't
1742 * clean the entire cache, or 1 if it did.
1743 *
1744 * XXX One bug in this routine is that if the pv_entry has a single page
1745 * mapped at 0x00000000 a whole cache clean will be performed rather than
1746 * just the 1 page. Since this should not occur in everyday use and if it does
1747 * it will just result in not the most efficient clean for the page.
1748 */
1749 static int
1750 pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
1751 {
1752 struct pmap *pmap;
1753 struct pv_entry *npv;
1754 int cache_needs_cleaning = 0;
1755 vaddr_t page_to_clean = 0;
1756
1757 if (pv == NULL) {
1758 /* nothing mapped in so nothing to flush */
1759 return (0);
1760 }
1761
1762 /*
1763 * Since we flush the cache each time we change curproc, we
1764 * only need to flush the page if it is in the current pmap.
1765 */
1766 if (curproc)
1767 pmap = curproc->p_vmspace->vm_map.pmap;
1768 else
1769 pmap = pmap_kernel();
1770
1771 for (npv = pv; npv; npv = npv->pv_next) {
1772 if (npv->pv_pmap == pmap) {
1773 /*
1774 * The page is mapped non-cacheable in
1775 * this map. No need to flush the cache.
1776 */
1777 if (npv->pv_flags & PVF_NC) {
1778 #ifdef DIAGNOSTIC
1779 if (cache_needs_cleaning)
1780 panic("pmap_clean_page: "
1781 "cache inconsistency");
1782 #endif
1783 break;
1784 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1785 continue;
1786 if (cache_needs_cleaning) {
1787 page_to_clean = 0;
1788 break;
1789 } else
1790 page_to_clean = npv->pv_va;
1791 cache_needs_cleaning = 1;
1792 }
1793 }
1794
1795 if (page_to_clean) {
1796 /*
1797 * XXX If is_src, we really only need to write-back,
1798 * XXX not invalidate, too. Investigate further.
1799 * XXX --thorpej (at) netbsd.org
1800 */
1801 cpu_idcache_wbinv_range(page_to_clean, NBPG);
1802 } else if (cache_needs_cleaning) {
1803 cpu_idcache_wbinv_all();
1804 return (1);
1805 }
1806 return (0);
1807 }
1808
1809 /*
1810 * pmap_zero_page()
1811 *
1812 * Zero a given physical page by mapping it at a page hook point.
1813 * In doing the zero page op, the page we zero is mapped cachable, as with
1814 * StrongARM accesses to non-cached pages are non-burst making writing
1815 * _any_ bulk data very slow.
1816 */
1817 #if ARM_MMU_GENERIC == 1
1818 void
1819 pmap_zero_page_generic(paddr_t phys)
1820 {
1821 #ifdef DEBUG
1822 struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1823
1824 if (pg->mdpage.pvh_list != NULL)
1825 panic("pmap_zero_page: page has mappings");
1826 #endif
1827
1828 KDASSERT((phys & PGOFSET) == 0);
1829
1830 /*
1831 * Hook in the page, zero it, and purge the cache for that
1832 * zeroed page. Invalidate the TLB as needed.
1833 */
1834 *cdst_pte = L2_S_PROTO | phys |
1835 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1836 PTE_SYNC(cdst_pte);
1837 cpu_tlb_flushD_SE(cdstp);
1838 cpu_cpwait();
1839 bzero_page(cdstp);
1840 cpu_dcache_wbinv_range(cdstp, NBPG);
1841 }
1842 #endif /* ARM_MMU_GENERIC == 1 */
1843
1844 #if ARM_MMU_XSCALE == 1
1845 void
1846 pmap_zero_page_xscale(paddr_t phys)
1847 {
1848 #ifdef DEBUG
1849 struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1850
1851 if (pg->mdpage.pvh_list != NULL)
1852 panic("pmap_zero_page: page has mappings");
1853 #endif
1854
1855 KDASSERT((phys & PGOFSET) == 0);
1856
1857 /*
1858 * Hook in the page, zero it, and purge the cache for that
1859 * zeroed page. Invalidate the TLB as needed.
1860 */
1861 *cdst_pte = L2_S_PROTO | phys |
1862 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1863 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
1864 PTE_SYNC(cdst_pte);
1865 cpu_tlb_flushD_SE(cdstp);
1866 cpu_cpwait();
1867 bzero_page(cdstp);
1868 xscale_cache_clean_minidata();
1869 }
1870 #endif /* ARM_MMU_XSCALE == 1 */
1871
1872 /* pmap_pageidlezero()
1873 *
1874 * The same as above, except that we assume that the page is not
1875 * mapped. This means we never have to flush the cache first. Called
1876 * from the idle loop.
1877 */
1878 boolean_t
1879 pmap_pageidlezero(paddr_t phys)
1880 {
1881 int i, *ptr;
1882 boolean_t rv = TRUE;
1883 #ifdef DEBUG
1884 struct vm_page *pg;
1885
1886 pg = PHYS_TO_VM_PAGE(phys);
1887 if (pg->mdpage.pvh_list != NULL)
1888 panic("pmap_pageidlezero: page has mappings");
1889 #endif
1890
1891 KDASSERT((phys & PGOFSET) == 0);
1892
1893 /*
1894 * Hook in the page, zero it, and purge the cache for that
1895 * zeroed page. Invalidate the TLB as needed.
1896 */
1897 *cdst_pte = L2_S_PROTO | phys |
1898 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1899 PTE_SYNC(cdst_pte);
1900 cpu_tlb_flushD_SE(cdstp);
1901 cpu_cpwait();
1902
1903 for (i = 0, ptr = (int *)cdstp;
1904 i < (NBPG / sizeof(int)); i++) {
1905 if (sched_whichqs != 0) {
1906 /*
1907 * A process has become ready. Abort now,
1908 * so we don't keep it waiting while we
1909 * do slow memory access to finish this
1910 * page.
1911 */
1912 rv = FALSE;
1913 break;
1914 }
1915 *ptr++ = 0;
1916 }
1917
1918 if (rv)
1919 /*
1920 * if we aborted we'll rezero this page again later so don't
1921 * purge it unless we finished it
1922 */
1923 cpu_dcache_wbinv_range(cdstp, NBPG);
1924 return (rv);
1925 }
1926
1927 /*
1928 * pmap_copy_page()
1929 *
1930 * Copy one physical page into another, by mapping the pages into
1931 * hook points. The same comment regarding cachability as in
1932 * pmap_zero_page also applies here.
1933 */
1934 #if ARM_MMU_GENERIC == 1
1935 void
1936 pmap_copy_page_generic(paddr_t src, paddr_t dst)
1937 {
1938 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1939 #ifdef DEBUG
1940 struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1941
1942 if (dst_pg->mdpage.pvh_list != NULL)
1943 panic("pmap_copy_page: dst page has mappings");
1944 #endif
1945
1946 KDASSERT((src & PGOFSET) == 0);
1947 KDASSERT((dst & PGOFSET) == 0);
1948
1949 /*
1950 * Clean the source page. Hold the source page's lock for
1951 * the duration of the copy so that no other mappings can
1952 * be created while we have a potentially aliased mapping.
1953 */
1954 simple_lock(&src_pg->mdpage.pvh_slock);
1955 (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1956
1957 /*
1958 * Map the pages into the page hook points, copy them, and purge
1959 * the cache for the appropriate page. Invalidate the TLB
1960 * as required.
1961 */
1962 *csrc_pte = L2_S_PROTO | src |
1963 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
1964 PTE_SYNC(csrc_pte);
1965 *cdst_pte = L2_S_PROTO | dst |
1966 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1967 PTE_SYNC(cdst_pte);
1968 cpu_tlb_flushD_SE(csrcp);
1969 cpu_tlb_flushD_SE(cdstp);
1970 cpu_cpwait();
1971 bcopy_page(csrcp, cdstp);
1972 cpu_dcache_inv_range(csrcp, NBPG);
1973 simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
1974 cpu_dcache_wbinv_range(cdstp, NBPG);
1975 }
1976 #endif /* ARM_MMU_GENERIC == 1 */
1977
1978 #if ARM_MMU_XSCALE == 1
1979 void
1980 pmap_copy_page_xscale(paddr_t src, paddr_t dst)
1981 {
1982 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1983 #ifdef DEBUG
1984 struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1985
1986 if (dst_pg->mdpage.pvh_list != NULL)
1987 panic("pmap_copy_page: dst page has mappings");
1988 #endif
1989
1990 KDASSERT((src & PGOFSET) == 0);
1991 KDASSERT((dst & PGOFSET) == 0);
1992
1993 /*
1994 * Clean the source page. Hold the source page's lock for
1995 * the duration of the copy so that no other mappings can
1996 * be created while we have a potentially aliased mapping.
1997 */
1998 simple_lock(&src_pg->mdpage.pvh_slock);
1999 (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
2000
2001 /*
2002 * Map the pages into the page hook points, copy them, and purge
2003 * the cache for the appropriate page. Invalidate the TLB
2004 * as required.
2005 */
2006 *csrc_pte = L2_S_PROTO | src |
2007 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
2008 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
2009 PTE_SYNC(csrc_pte);
2010 *cdst_pte = L2_S_PROTO | dst |
2011 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
2012 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
2013 PTE_SYNC(cdst_pte);
2014 cpu_tlb_flushD_SE(csrcp);
2015 cpu_tlb_flushD_SE(cdstp);
2016 cpu_cpwait();
2017 bcopy_page(csrcp, cdstp);
2018 simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
2019 xscale_cache_clean_minidata();
2020 }
2021 #endif /* ARM_MMU_XSCALE == 1 */
2022
2023 #if 0
2024 void
2025 pmap_pte_addref(struct pmap *pmap, vaddr_t va)
2026 {
2027 pd_entry_t *pde;
2028 paddr_t pa;
2029 struct vm_page *m;
2030
2031 if (pmap == pmap_kernel())
2032 return;
2033
2034 pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
2035 pa = pmap_pte_pa(pde);
2036 m = PHYS_TO_VM_PAGE(pa);
2037 ++m->wire_count;
2038 #ifdef MYCROFT_HACK
2039 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2040 pmap, va, pde, pa, m, m->wire_count);
2041 #endif
2042 }
2043
2044 void
2045 pmap_pte_delref(struct pmap *pmap, vaddr_t va)
2046 {
2047 pd_entry_t *pde;
2048 paddr_t pa;
2049 struct vm_page *m;
2050
2051 if (pmap == pmap_kernel())
2052 return;
2053
2054 pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
2055 pa = pmap_pte_pa(pde);
2056 m = PHYS_TO_VM_PAGE(pa);
2057 --m->wire_count;
2058 #ifdef MYCROFT_HACK
2059 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2060 pmap, va, pde, pa, m, m->wire_count);
2061 #endif
2062 if (m->wire_count == 0) {
2063 #ifdef MYCROFT_HACK
2064 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
2065 pmap, va, pde, pa, m);
2066 #endif
2067 pmap_unmap_in_l1(pmap, va);
2068 uvm_pagefree(m);
2069 --pmap->pm_stats.resident_count;
2070 }
2071 }
2072 #else
2073 #define pmap_pte_addref(pmap, va)
2074 #define pmap_pte_delref(pmap, va)
2075 #endif
2076
2077 /*
2078 * Since we have a virtually indexed cache, we may need to inhibit caching if
2079 * there is more than one mapping and at least one of them is writable.
2080 * Since we purge the cache on every context switch, we only need to check for
2081 * other mappings within the same pmap, or kernel_pmap.
2082 * This function is also called when a page is unmapped, to possibly reenable
2083 * caching on any remaining mappings.
2084 *
2085 * The code implements the following logic, where:
2086 *
2087 * KW = # of kernel read/write pages
2088 * KR = # of kernel read only pages
2089 * UW = # of user read/write pages
2090 * UR = # of user read only pages
2091 * OW = # of user read/write pages in another pmap, then
2092 *
2093 * KC = kernel mapping is cacheable
2094 * UC = user mapping is cacheable
2095 *
2096 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
2097 * +---------------------------------------------
2098 * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0
2099 * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
2100 * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0
2101 * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2102 * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2103 *
2104 * Note that the pmap must have it's ptes mapped in, and passed with ptes.
2105 */
2106 __inline static void
2107 pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2108 boolean_t clear_cache)
2109 {
2110 if (pmap == pmap_kernel())
2111 pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
2112 else
2113 pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2114 }
2115
2116 static void
2117 pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2118 boolean_t clear_cache)
2119 {
2120 int user_entries = 0;
2121 int user_writable = 0;
2122 int user_cacheable = 0;
2123 int kernel_entries = 0;
2124 int kernel_writable = 0;
2125 int kernel_cacheable = 0;
2126 struct pv_entry *pv;
2127 struct pmap *last_pmap = pmap;
2128
2129 #ifdef DIAGNOSTIC
2130 if (pmap != pmap_kernel())
2131 panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2132 #endif
2133
2134 /*
2135 * Pass one, see if there are both kernel and user pmaps for
2136 * this page. Calculate whether there are user-writable or
2137 * kernel-writable pages.
2138 */
2139 for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
2140 if (pv->pv_pmap != pmap) {
2141 user_entries++;
2142 if (pv->pv_flags & PVF_WRITE)
2143 user_writable++;
2144 if ((pv->pv_flags & PVF_NC) == 0)
2145 user_cacheable++;
2146 } else {
2147 kernel_entries++;
2148 if (pv->pv_flags & PVF_WRITE)
2149 kernel_writable++;
2150 if ((pv->pv_flags & PVF_NC) == 0)
2151 kernel_cacheable++;
2152 }
2153 }
2154
2155 /*
2156 * We know we have just been updating a kernel entry, so if
2157 * all user pages are already cacheable, then there is nothing
2158 * further to do.
2159 */
2160 if (kernel_entries == 0 &&
2161 user_cacheable == user_entries)
2162 return;
2163
2164 if (user_entries) {
2165 /*
2166 * Scan over the list again, for each entry, if it
2167 * might not be set correctly, call pmap_vac_me_user
2168 * to recalculate the settings.
2169 */
2170 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
2171 /*
2172 * We know kernel mappings will get set
2173 * correctly in other calls. We also know
2174 * that if the pmap is the same as last_pmap
2175 * then we've just handled this entry.
2176 */
2177 if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2178 continue;
2179 /*
2180 * If there are kernel entries and this page
2181 * is writable but non-cacheable, then we can
2182 * skip this entry also.
2183 */
2184 if (kernel_entries > 0 &&
2185 (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
2186 (PVF_NC | PVF_WRITE))
2187 continue;
2188 /*
2189 * Similarly if there are no kernel-writable
2190 * entries and the page is already
2191 * read-only/cacheable.
2192 */
2193 if (kernel_writable == 0 &&
2194 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
2195 continue;
2196 /*
2197 * For some of the remaining cases, we know
2198 * that we must recalculate, but for others we
2199 * can't tell if they are correct or not, so
2200 * we recalculate anyway.
2201 */
2202 pmap_unmap_ptes(last_pmap);
2203 last_pmap = pv->pv_pmap;
2204 ptes = pmap_map_ptes(last_pmap);
2205 pmap_vac_me_user(last_pmap, pg, ptes,
2206 pmap_is_curpmap(last_pmap));
2207 }
2208 /* Restore the pte mapping that was passed to us. */
2209 if (last_pmap != pmap) {
2210 pmap_unmap_ptes(last_pmap);
2211 ptes = pmap_map_ptes(pmap);
2212 }
2213 if (kernel_entries == 0)
2214 return;
2215 }
2216
2217 pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2218 return;
2219 }
2220
2221 static void
2222 pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2223 boolean_t clear_cache)
2224 {
2225 struct pmap *kpmap = pmap_kernel();
2226 struct pv_entry *pv, *npv;
2227 int entries = 0;
2228 int writable = 0;
2229 int cacheable_entries = 0;
2230 int kern_cacheable = 0;
2231 int other_writable = 0;
2232
2233 pv = pg->mdpage.pvh_list;
2234 KASSERT(ptes != NULL);
2235
2236 /*
2237 * Count mappings and writable mappings in this pmap.
2238 * Include kernel mappings as part of our own.
2239 * Keep a pointer to the first one.
2240 */
2241 for (npv = pv; npv; npv = npv->pv_next) {
2242 /* Count mappings in the same pmap */
2243 if (pmap == npv->pv_pmap ||
2244 kpmap == npv->pv_pmap) {
2245 if (entries++ == 0)
2246 pv = npv;
2247 /* Cacheable mappings */
2248 if ((npv->pv_flags & PVF_NC) == 0) {
2249 cacheable_entries++;
2250 if (kpmap == npv->pv_pmap)
2251 kern_cacheable++;
2252 }
2253 /* Writable mappings */
2254 if (npv->pv_flags & PVF_WRITE)
2255 ++writable;
2256 } else if (npv->pv_flags & PVF_WRITE)
2257 other_writable = 1;
2258 }
2259
2260 PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
2261 "writable %d cacheable %d %s\n", pmap, entries, writable,
2262 cacheable_entries, clear_cache ? "clean" : "no clean"));
2263
2264 /*
2265 * Enable or disable caching as necessary.
2266 * Note: the first entry might be part of the kernel pmap,
2267 * so we can't assume this is indicative of the state of the
2268 * other (maybe non-kpmap) entries.
2269 */
2270 if ((entries > 1 && writable) ||
2271 (entries > 0 && pmap == kpmap && other_writable)) {
2272 if (cacheable_entries == 0)
2273 return;
2274 for (npv = pv; npv; npv = npv->pv_next) {
2275 if ((pmap == npv->pv_pmap
2276 || kpmap == npv->pv_pmap) &&
2277 (npv->pv_flags & PVF_NC) == 0) {
2278 ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
2279 PTE_SYNC_CURRENT(pmap,
2280 &ptes[arm_btop(npv->pv_va)]);
2281 npv->pv_flags |= PVF_NC;
2282 /*
2283 * If this page needs flushing from the
2284 * cache, and we aren't going to do it
2285 * below, do it now.
2286 */
2287 if ((cacheable_entries < 4 &&
2288 (clear_cache || npv->pv_pmap == kpmap)) ||
2289 (npv->pv_pmap == kpmap &&
2290 !clear_cache && kern_cacheable < 4)) {
2291 cpu_idcache_wbinv_range(npv->pv_va,
2292 NBPG);
2293 cpu_tlb_flushID_SE(npv->pv_va);
2294 }
2295 }
2296 }
2297 if ((clear_cache && cacheable_entries >= 4) ||
2298 kern_cacheable >= 4) {
2299 cpu_idcache_wbinv_all();
2300 cpu_tlb_flushID();
2301 }
2302 cpu_cpwait();
2303 } else if (entries > 0) {
2304 /*
2305 * Turn cacheing back on for some pages. If it is a kernel
2306 * page, only do so if there are no other writable pages.
2307 */
2308 for (npv = pv; npv; npv = npv->pv_next) {
2309 if ((pmap == npv->pv_pmap ||
2310 (kpmap == npv->pv_pmap && other_writable == 0)) &&
2311 (npv->pv_flags & PVF_NC)) {
2312 ptes[arm_btop(npv->pv_va)] |=
2313 pte_l2_s_cache_mode;
2314 PTE_SYNC_CURRENT(pmap,
2315 &ptes[arm_btop(npv->pv_va)]);
2316 npv->pv_flags &= ~PVF_NC;
2317 }
2318 }
2319 }
2320 }
2321
2322 /*
2323 * pmap_remove()
2324 *
2325 * pmap_remove is responsible for nuking a number of mappings for a range
2326 * of virtual address space in the current pmap. To do this efficiently
2327 * is interesting, because in a number of cases a wide virtual address
2328 * range may be supplied that contains few actual mappings. So, the
2329 * optimisations are:
2330 * 1. Try and skip over hunks of address space for which an L1 entry
2331 * does not exist.
2332 * 2. Build up a list of pages we've hit, up to a maximum, so we can
2333 * maybe do just a partial cache clean. This path of execution is
2334 * complicated by the fact that the cache must be flushed _before_
2335 * the PTE is nuked, being a VAC :-)
2336 * 3. Maybe later fast-case a single page, but I don't think this is
2337 * going to make _that_ much difference overall.
2338 */
2339
2340 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
2341
2342 void
2343 pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
2344 {
2345 int cleanlist_idx = 0;
2346 struct pagelist {
2347 vaddr_t va;
2348 pt_entry_t *pte;
2349 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
2350 pt_entry_t *pte = 0, *ptes;
2351 paddr_t pa;
2352 int pmap_active;
2353 struct vm_page *pg;
2354
2355 /* Exit quick if there is no pmap */
2356 if (!pmap)
2357 return;
2358
2359 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
2360 pmap, sva, eva));
2361
2362 /*
2363 * we lock in the pmap => vm_page direction
2364 */
2365 PMAP_MAP_TO_HEAD_LOCK();
2366
2367 ptes = pmap_map_ptes(pmap);
2368 /* Get a page table pointer */
2369 while (sva < eva) {
2370 if (pmap_pde_page(pmap_pde(pmap, sva)))
2371 break;
2372 sva = (sva & L1_S_FRAME) + L1_S_SIZE;
2373 }
2374
2375 pte = &ptes[arm_btop(sva)];
2376 /* Note if the pmap is active thus require cache and tlb cleans */
2377 pmap_active = pmap_is_curpmap(pmap);
2378
2379 /* Now loop along */
2380 while (sva < eva) {
2381 /* Check if we can move to the next PDE (l1 chunk) */
2382 if ((sva & L2_ADDR_BITS) == 0) {
2383 if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2384 sva += L1_S_SIZE;
2385 pte += arm_btop(L1_S_SIZE);
2386 continue;
2387 }
2388 }
2389
2390 /* We've found a valid PTE, so this page of PTEs has to go. */
2391 if (pmap_pte_v(pte)) {
2392 /* Update statistics */
2393 --pmap->pm_stats.resident_count;
2394
2395 /*
2396 * Add this page to our cache remove list, if we can.
2397 * If, however the cache remove list is totally full,
2398 * then do a complete cache invalidation taking note
2399 * to backtrack the PTE table beforehand, and ignore
2400 * the lists in future because there's no longer any
2401 * point in bothering with them (we've paid the
2402 * penalty, so will carry on unhindered). Otherwise,
2403 * when we fall out, we just clean the list.
2404 */
2405 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2406 pa = pmap_pte_pa(pte);
2407
2408 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2409 /* Add to the clean list. */
2410 cleanlist[cleanlist_idx].pte = pte;
2411 cleanlist[cleanlist_idx].va = sva;
2412 cleanlist_idx++;
2413 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2414 int cnt;
2415
2416 /* Nuke everything if needed. */
2417 if (pmap_active) {
2418 cpu_idcache_wbinv_all();
2419 cpu_tlb_flushID();
2420 }
2421
2422 /*
2423 * Roll back the previous PTE list,
2424 * and zero out the current PTE.
2425 */
2426 for (cnt = 0;
2427 cnt < PMAP_REMOVE_CLEAN_LIST_SIZE;
2428 cnt++) {
2429 *cleanlist[cnt].pte = 0;
2430 if (pmap_active)
2431 PTE_SYNC(cleanlist[cnt].pte);
2432 else
2433 PTE_FLUSH(cleanlist[cnt].pte);
2434 pmap_pte_delref(pmap,
2435 cleanlist[cnt].va);
2436 }
2437 *pte = 0;
2438 if (pmap_active)
2439 PTE_SYNC(pte);
2440 else
2441 PTE_FLUSH(pte);
2442 pmap_pte_delref(pmap, sva);
2443 cleanlist_idx++;
2444 } else {
2445 /*
2446 * We've already nuked the cache and
2447 * TLB, so just carry on regardless,
2448 * and we won't need to do it again
2449 */
2450 *pte = 0;
2451 if (pmap_active)
2452 PTE_SYNC(pte);
2453 else
2454 PTE_FLUSH(pte);
2455 pmap_pte_delref(pmap, sva);
2456 }
2457
2458 /*
2459 * Update flags. In a number of circumstances,
2460 * we could cluster a lot of these and do a
2461 * number of sequential pages in one go.
2462 */
2463 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2464 struct pv_entry *pve;
2465 simple_lock(&pg->mdpage.pvh_slock);
2466 pve = pmap_remove_pv(pg, pmap, sva);
2467 pmap_free_pv(pmap, pve);
2468 pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2469 simple_unlock(&pg->mdpage.pvh_slock);
2470 }
2471 } else if (pmap_active == 0)
2472 PTE_FLUSH(pte);
2473 sva += NBPG;
2474 pte++;
2475 }
2476
2477 /*
2478 * Now, if we've fallen through down to here, chances are that there
2479 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2480 */
2481 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2482 u_int cnt;
2483
2484 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2485 if (pmap_active) {
2486 cpu_idcache_wbinv_range(cleanlist[cnt].va,
2487 NBPG);
2488 *cleanlist[cnt].pte = 0;
2489 cpu_tlb_flushID_SE(cleanlist[cnt].va);
2490 PTE_SYNC(cleanlist[cnt].pte);
2491 } else {
2492 *cleanlist[cnt].pte = 0;
2493 PTE_FLUSH(cleanlist[cnt].pte);
2494 }
2495 pmap_pte_delref(pmap, cleanlist[cnt].va);
2496 }
2497 }
2498
2499 pmap_unmap_ptes(pmap);
2500
2501 PMAP_MAP_TO_HEAD_UNLOCK();
2502 }
2503
2504 /*
2505 * Routine: pmap_remove_all
2506 * Function:
2507 * Removes this physical page from
2508 * all physical maps in which it resides.
2509 * Reflects back modify bits to the pager.
2510 */
2511
2512 static void
2513 pmap_remove_all(struct vm_page *pg)
2514 {
2515 struct pv_entry *pv, *npv;
2516 struct pmap *pmap;
2517 pt_entry_t *pte, *ptes;
2518
2519 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
2520
2521 /* set vm_page => pmap locking */
2522 PMAP_HEAD_TO_MAP_LOCK();
2523
2524 simple_lock(&pg->mdpage.pvh_slock);
2525
2526 pv = pg->mdpage.pvh_list;
2527 if (pv == NULL) {
2528 PDEBUG(0, printf("free page\n"));
2529 simple_unlock(&pg->mdpage.pvh_slock);
2530 PMAP_HEAD_TO_MAP_UNLOCK();
2531 return;
2532 }
2533 pmap_clean_page(pv, FALSE);
2534
2535 while (pv) {
2536 pmap = pv->pv_pmap;
2537 ptes = pmap_map_ptes(pmap);
2538 pte = &ptes[arm_btop(pv->pv_va)];
2539
2540 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2541 pv->pv_va, pv->pv_flags));
2542 #ifdef DEBUG
2543 if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
2544 pmap_pte_v(pte) == 0 ||
2545 pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
2546 panic("pmap_remove_all: bad mapping");
2547 #endif /* DEBUG */
2548
2549 /*
2550 * Update statistics
2551 */
2552 --pmap->pm_stats.resident_count;
2553
2554 /* Wired bit */
2555 if (pv->pv_flags & PVF_WIRED)
2556 --pmap->pm_stats.wired_count;
2557
2558 /*
2559 * Invalidate the PTEs.
2560 * XXX: should cluster them up and invalidate as many
2561 * as possible at once.
2562 */
2563
2564 #ifdef needednotdone
2565 reduce wiring count on page table pages as references drop
2566 #endif
2567
2568 *pte = 0;
2569 PTE_SYNC_CURRENT(pmap, pte);
2570 pmap_pte_delref(pmap, pv->pv_va);
2571
2572 npv = pv->pv_next;
2573 pmap_free_pv(pmap, pv);
2574 pv = npv;
2575 pmap_unmap_ptes(pmap);
2576 }
2577 pg->mdpage.pvh_list = NULL;
2578 simple_unlock(&pg->mdpage.pvh_slock);
2579 PMAP_HEAD_TO_MAP_UNLOCK();
2580
2581 PDEBUG(0, printf("done\n"));
2582 cpu_tlb_flushID();
2583 cpu_cpwait();
2584 }
2585
2586
2587 /*
2588 * Set the physical protection on the specified range of this map as requested.
2589 */
2590
2591 void
2592 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
2593 {
2594 pt_entry_t *pte = NULL, *ptes;
2595 struct vm_page *pg;
2596 int flush = 0;
2597
2598 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2599 pmap, sva, eva, prot));
2600
2601 if (~prot & VM_PROT_READ) {
2602 /*
2603 * Just remove the mappings. pmap_update() is not required
2604 * here since the caller should do it.
2605 */
2606 pmap_remove(pmap, sva, eva);
2607 return;
2608 }
2609 if (prot & VM_PROT_WRITE) {
2610 /*
2611 * If this is a read->write transition, just ignore it and let
2612 * uvm_fault() take care of it later.
2613 */
2614 return;
2615 }
2616
2617 /* Need to lock map->head */
2618 PMAP_MAP_TO_HEAD_LOCK();
2619
2620 ptes = pmap_map_ptes(pmap);
2621
2622 /*
2623 * OK, at this point, we know we're doing write-protect operation.
2624 * If the pmap is active, write-back the range.
2625 */
2626 if (pmap_is_curpmap(pmap))
2627 cpu_dcache_wb_range(sva, eva - sva);
2628
2629 /*
2630 * We need to acquire a pointer to a page table page before entering
2631 * the following loop.
2632 */
2633 while (sva < eva) {
2634 if (pmap_pde_page(pmap_pde(pmap, sva)))
2635 break;
2636 sva = (sva & L1_S_FRAME) + L1_S_SIZE;
2637 }
2638
2639 pte = &ptes[arm_btop(sva)];
2640
2641 while (sva < eva) {
2642 /* only check once in a while */
2643 if ((sva & L2_ADDR_BITS) == 0) {
2644 if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2645 /* We can race ahead here, to the next pde. */
2646 sva += L1_S_SIZE;
2647 pte += arm_btop(L1_S_SIZE);
2648 continue;
2649 }
2650 }
2651
2652 if (!pmap_pte_v(pte)) {
2653 PTE_FLUSH_ALT(pmap, pte);
2654 goto next;
2655 }
2656
2657 flush = 1;
2658
2659 pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
2660
2661 *pte &= ~L2_S_PROT_W; /* clear write bit */
2662 PTE_SYNC_CURRENT(pmap, pte); /* XXXJRT optimize */
2663
2664 /* Clear write flag */
2665 if (pg != NULL) {
2666 simple_lock(&pg->mdpage.pvh_slock);
2667 (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
2668 pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2669 simple_unlock(&pg->mdpage.pvh_slock);
2670 }
2671
2672 next:
2673 sva += NBPG;
2674 pte++;
2675 }
2676 pmap_unmap_ptes(pmap);
2677 PMAP_MAP_TO_HEAD_UNLOCK();
2678 if (flush)
2679 cpu_tlb_flushID();
2680 }
2681
2682 /*
2683 * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2684 * int flags)
2685 *
2686 * Insert the given physical page (p) at
2687 * the specified virtual address (v) in the
2688 * target physical map with the protection requested.
2689 *
2690 * If specified, the page will be wired down, meaning
2691 * that the related pte can not be reclaimed.
2692 *
2693 * NB: This is the only routine which MAY NOT lazy-evaluate
2694 * or lose information. That is, this routine must actually
2695 * insert this page into the given map NOW.
2696 */
2697
2698 int
2699 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2700 int flags)
2701 {
2702 pt_entry_t *ptes, opte, npte;
2703 paddr_t opa;
2704 boolean_t wired = (flags & PMAP_WIRED) != 0;
2705 struct vm_page *pg;
2706 struct pv_entry *pve;
2707 int error, nflags;
2708
2709 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2710 va, pa, pmap, prot, wired));
2711
2712 #ifdef DIAGNOSTIC
2713 /* Valid address ? */
2714 if (va >= (pmap_curmaxkvaddr))
2715 panic("pmap_enter: too big");
2716 if (pmap != pmap_kernel() && va != 0) {
2717 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2718 panic("pmap_enter: kernel page in user map");
2719 } else {
2720 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2721 panic("pmap_enter: user page in kernel map");
2722 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2723 panic("pmap_enter: entering PT page");
2724 }
2725 #endif
2726
2727 KDASSERT(((va | pa) & PGOFSET) == 0);
2728
2729 /*
2730 * Get a pointer to the page. Later on in this function, we
2731 * test for a managed page by checking pg != NULL.
2732 */
2733 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2734
2735 /* get lock */
2736 PMAP_MAP_TO_HEAD_LOCK();
2737
2738 /*
2739 * map the ptes. If there's not already an L2 table for this
2740 * address, allocate one.
2741 */
2742 ptes = pmap_map_ptes(pmap); /* locks pmap */
2743 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
2744 struct vm_page *ptp;
2745
2746 /* kernel should be pre-grown */
2747 KASSERT(pmap != pmap_kernel());
2748
2749 /* if failure is allowed then don't try too hard */
2750 ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
2751 if (ptp == NULL) {
2752 if (flags & PMAP_CANFAIL) {
2753 error = ENOMEM;
2754 goto out;
2755 }
2756 panic("pmap_enter: get ptp failed");
2757 }
2758 }
2759 opte = ptes[arm_btop(va)];
2760
2761 nflags = 0;
2762 if (prot & VM_PROT_WRITE)
2763 nflags |= PVF_WRITE;
2764 if (wired)
2765 nflags |= PVF_WIRED;
2766
2767 /* Is the pte valid ? If so then this page is already mapped */
2768 if (l2pte_valid(opte)) {
2769 /* Get the physical address of the current page mapped */
2770 opa = l2pte_pa(opte);
2771
2772 /* Are we mapping the same page ? */
2773 if (opa == pa) {
2774 /* Check to see if we're doing rw->ro. */
2775 if ((opte & L2_S_PROT_W) != 0 &&
2776 (prot & VM_PROT_WRITE) == 0) {
2777 /* Yup, flush the cache if current pmap. */
2778 if (pmap_is_curpmap(pmap))
2779 cpu_dcache_wb_range(va, NBPG);
2780 }
2781
2782 /* Has the wiring changed ? */
2783 if (pg != NULL) {
2784 simple_lock(&pg->mdpage.pvh_slock);
2785 (void) pmap_modify_pv(pmap, va, pg,
2786 PVF_WRITE | PVF_WIRED, nflags);
2787 simple_unlock(&pg->mdpage.pvh_slock);
2788 }
2789 } else {
2790 struct vm_page *opg;
2791
2792 /* We are replacing the page with a new one. */
2793 cpu_idcache_wbinv_range(va, NBPG);
2794
2795 /*
2796 * If it is part of our managed memory then we
2797 * must remove it from the PV list
2798 */
2799 if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
2800 simple_lock(&opg->mdpage.pvh_slock);
2801 pve = pmap_remove_pv(opg, pmap, va);
2802 simple_unlock(&opg->mdpage.pvh_slock);
2803 } else {
2804 pve = NULL;
2805 }
2806
2807 goto enter;
2808 }
2809 } else {
2810 opa = 0;
2811 pve = NULL;
2812 pmap_pte_addref(pmap, va);
2813
2814 /* pte is not valid so we must be hooking in a new page */
2815 ++pmap->pm_stats.resident_count;
2816
2817 enter:
2818 /*
2819 * Enter on the PV list if part of our managed memory
2820 */
2821 if (pg != NULL) {
2822 if (pve == NULL) {
2823 pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2824 if (pve == NULL) {
2825 if (flags & PMAP_CANFAIL) {
2826 PTE_FLUSH_ALT(pmap,
2827 ptes[arm_btop(va)]);
2828 error = ENOMEM;
2829 goto out;
2830 }
2831 panic("pmap_enter: no pv entries "
2832 "available");
2833 }
2834 }
2835 /* enter_pv locks pvh when adding */
2836 pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
2837 } else {
2838 if (pve != NULL)
2839 pmap_free_pv(pmap, pve);
2840 }
2841 }
2842
2843 /* Construct the pte, giving the correct access. */
2844 npte = pa;
2845
2846 /* VA 0 is magic. */
2847 if (pmap != pmap_kernel() && va != vector_page)
2848 npte |= L2_S_PROT_U;
2849
2850 if (pg != NULL) {
2851 #ifdef DIAGNOSTIC
2852 if ((flags & VM_PROT_ALL) & ~prot)
2853 panic("pmap_enter: access_type exceeds prot");
2854 #endif
2855 npte |= pte_l2_s_cache_mode;
2856 if (flags & VM_PROT_WRITE) {
2857 npte |= L2_S_PROTO | L2_S_PROT_W;
2858 pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
2859 } else if (flags & VM_PROT_ALL) {
2860 npte |= L2_S_PROTO;
2861 pg->mdpage.pvh_attrs |= PVF_REF;
2862 } else
2863 npte |= L2_TYPE_INV;
2864 } else {
2865 if (prot & VM_PROT_WRITE)
2866 npte |= L2_S_PROTO | L2_S_PROT_W;
2867 else if (prot & VM_PROT_ALL)
2868 npte |= L2_S_PROTO;
2869 else
2870 npte |= L2_TYPE_INV;
2871 }
2872
2873 #if ARM_MMU_XSCALE == 1 && defined(XSCALE_CACHE_READ_WRITE_ALLOCATE)
2874 #if ARM_NMMUS > 1
2875 # error "XXX Unable to use read/write-allocate and configure non-XScale"
2876 #endif
2877 /*
2878 * XXX BRUTAL HACK! This allows us to limp along with
2879 * XXX the read/write-allocate cache mode.
2880 */
2881 if (pmap == pmap_kernel())
2882 npte &= ~L2_XSCALE_T_TEX(TEX_XSCALE_X);
2883 #endif
2884 ptes[arm_btop(va)] = npte;
2885 PTE_SYNC_CURRENT(pmap, &ptes[arm_btop(va)]);
2886
2887 if (pg != NULL) {
2888 simple_lock(&pg->mdpage.pvh_slock);
2889 pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
2890 simple_unlock(&pg->mdpage.pvh_slock);
2891 }
2892
2893 /* Better flush the TLB ... */
2894 cpu_tlb_flushID_SE(va);
2895 error = 0;
2896 out:
2897 pmap_unmap_ptes(pmap); /* unlocks pmap */
2898 PMAP_MAP_TO_HEAD_UNLOCK();
2899
2900 return error;
2901 }
2902
2903 /*
2904 * pmap_kenter_pa: enter a kernel mapping
2905 *
2906 * => no need to lock anything assume va is already allocated
2907 * => should be faster than normal pmap enter function
2908 */
2909 void
2910 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2911 {
2912 pt_entry_t *pte;
2913
2914 pte = vtopte(va);
2915 KASSERT(!pmap_pte_v(pte));
2916
2917 #ifdef PMAP_ALIAS_DEBUG
2918 {
2919 struct vm_page *pg;
2920 int s;
2921
2922 pg = PHYS_TO_VM_PAGE(pa);
2923 if (pg != NULL) {
2924 s = splhigh();
2925 if (pg->mdpage.ro_mappings == 0 &&
2926 pg->mdpage.rw_mappings == 0 &&
2927 pg->mdpage.kro_mappings == 0 &&
2928 pg->mdpage.krw_mappings == 0) {
2929 /* This case is okay. */
2930 } else if (pg->mdpage.rw_mappings == 0 &&
2931 pg->mdpage.krw_mappings == 0 &&
2932 (prot & VM_PROT_WRITE) == 0) {
2933 /* This case is okay. */
2934 } else {
2935 /* Something is awry. */
2936 printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
2937 "prot 0x%x\n", pg->mdpage.ro_mappings,
2938 pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
2939 pg->mdpage.krw_mappings, prot);
2940 Debugger();
2941 }
2942 if (prot & VM_PROT_WRITE)
2943 pg->mdpage.krw_mappings++;
2944 else
2945 pg->mdpage.kro_mappings++;
2946 splx(s);
2947 }
2948 }
2949 #endif /* PMAP_ALIAS_DEBUG */
2950
2951 *pte = L2_S_PROTO | pa |
2952 L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
2953 PTE_SYNC(pte);
2954 }
2955
2956 void
2957 pmap_kremove(vaddr_t va, vsize_t len)
2958 {
2959 pt_entry_t *pte;
2960 vaddr_t ova = va;
2961 vaddr_t olen = len;
2962
2963 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2964
2965 /*
2966 * We assume that we will only be called with small
2967 * regions of memory.
2968 */
2969
2970 KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
2971 pte = vtopte(va);
2972 #ifdef PMAP_ALIAS_DEBUG
2973 {
2974 struct vm_page *pg;
2975 int s;
2976
2977 if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
2978 (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
2979 s = splhigh();
2980 if (*pte & L2_S_PROT_W) {
2981 KASSERT(pg->mdpage.krw_mappings != 0);
2982 pg->mdpage.krw_mappings--;
2983 } else {
2984 KASSERT(pg->mdpage.kro_mappings != 0);
2985 pg->mdpage.kro_mappings--;
2986 }
2987 splx(s);
2988 }
2989 }
2990 #endif /* PMAP_ALIAS_DEBUG */
2991 cpu_idcache_wbinv_range(va, PAGE_SIZE);
2992 *pte = 0;
2993 cpu_tlb_flushID_SE(va);
2994 }
2995 PTE_SYNC_RANGE(vtopte(ova), olen >> PAGE_SHIFT);
2996 }
2997
2998 /*
2999 * pmap_page_protect:
3000 *
3001 * Lower the permission for all mappings to a given page.
3002 */
3003
3004 void
3005 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
3006 {
3007
3008 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
3009 VM_PAGE_TO_PHYS(pg), prot));
3010
3011 switch(prot) {
3012 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3013 case VM_PROT_READ|VM_PROT_WRITE:
3014 return;
3015
3016 case VM_PROT_READ:
3017 case VM_PROT_READ|VM_PROT_EXECUTE:
3018 pmap_clearbit(pg, PVF_WRITE);
3019 break;
3020
3021 default:
3022 pmap_remove_all(pg);
3023 break;
3024 }
3025 }
3026
3027
3028 /*
3029 * Routine: pmap_unwire
3030 * Function: Clear the wired attribute for a map/virtual-address
3031 * pair.
3032 * In/out conditions:
3033 * The mapping must already exist in the pmap.
3034 */
3035
3036 void
3037 pmap_unwire(struct pmap *pmap, vaddr_t va)
3038 {
3039 pt_entry_t *ptes;
3040 struct vm_page *pg;
3041 paddr_t pa;
3042
3043 PMAP_MAP_TO_HEAD_LOCK();
3044 ptes = pmap_map_ptes(pmap); /* locks pmap */
3045
3046 if (pmap_pde_v(pmap_pde(pmap, va))) {
3047 #ifdef DIAGNOSTIC
3048 if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3049 panic("pmap_unwire: invalid L2 PTE");
3050 #endif
3051 /* Extract the physical address of the page */
3052 pa = l2pte_pa(ptes[arm_btop(va)]);
3053 PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
3054
3055 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3056 goto out;
3057
3058 /* Update the wired bit in the pv entry for this page. */
3059 simple_lock(&pg->mdpage.pvh_slock);
3060 (void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
3061 simple_unlock(&pg->mdpage.pvh_slock);
3062 }
3063 #ifdef DIAGNOSTIC
3064 else {
3065 panic("pmap_unwire: invalid L1 PTE");
3066 }
3067 #endif
3068 out:
3069 pmap_unmap_ptes(pmap); /* unlocks pmap */
3070 PMAP_MAP_TO_HEAD_UNLOCK();
3071 }
3072
3073 /*
3074 * Routine: pmap_extract
3075 * Function:
3076 * Extract the physical page address associated
3077 * with the given map/virtual_address pair.
3078 */
3079 boolean_t
3080 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
3081 {
3082 pd_entry_t *pde;
3083 pt_entry_t *pte, *ptes;
3084 paddr_t pa;
3085
3086 PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
3087
3088 ptes = pmap_map_ptes(pmap); /* locks pmap */
3089
3090 pde = pmap_pde(pmap, va);
3091 pte = &ptes[arm_btop(va)];
3092
3093 if (pmap_pde_section(pde)) {
3094 pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
3095 PDEBUG(5, printf("section pa=0x%08lx\n", pa));
3096 goto out;
3097 } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
3098 PDEBUG(5, printf("no mapping\n"));
3099 goto failed;
3100 }
3101
3102 if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
3103 pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3104 PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
3105 goto out;
3106 }
3107
3108 pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3109 PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
3110
3111 out:
3112 if (pap != NULL)
3113 *pap = pa;
3114
3115 PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
3116 pmap_unmap_ptes(pmap); /* unlocks pmap */
3117 return (TRUE);
3118
3119 failed:
3120 PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
3121 pmap_unmap_ptes(pmap); /* unlocks pmap */
3122 return (FALSE);
3123 }
3124
3125
3126 /*
3127 * pmap_copy:
3128 *
3129 * Copy the range specified by src_addr/len from the source map to the
3130 * range dst_addr/len in the destination map.
3131 *
3132 * This routine is only advisory and need not do anything.
3133 */
3134 /* Call deleted in <arm/arm32/pmap.h> */
3135
3136 #if defined(PMAP_DEBUG)
3137 void
3138 pmap_dump_pvlist(phys, m)
3139 vaddr_t phys;
3140 char *m;
3141 {
3142 struct vm_page *pg;
3143 struct pv_entry *pv;
3144
3145 if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
3146 printf("INVALID PA\n");
3147 return;
3148 }
3149 simple_lock(&pg->mdpage.pvh_slock);
3150 printf("%s %08lx:", m, phys);
3151 if (pg->mdpage.pvh_list == NULL) {
3152 simple_unlock(&pg->mdpage.pvh_slock);
3153 printf(" no mappings\n");
3154 return;
3155 }
3156
3157 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
3158 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3159 pv->pv_va, pv->pv_flags);
3160
3161 printf("\n");
3162 simple_unlock(&pg->mdpage.pvh_slock);
3163 }
3164
3165 #endif /* PMAP_DEBUG */
3166
3167 static pt_entry_t *
3168 pmap_map_ptes(struct pmap *pmap)
3169 {
3170 struct proc *p;
3171
3172 /* the kernel's pmap is always accessible */
3173 if (pmap == pmap_kernel()) {
3174 return (pt_entry_t *)PTE_BASE;
3175 }
3176
3177 if (pmap_is_curpmap(pmap)) {
3178 simple_lock(&pmap->pm_obj.vmobjlock);
3179 return (pt_entry_t *)PTE_BASE;
3180 }
3181
3182 p = curproc;
3183 KDASSERT(p != NULL);
3184
3185 /* need to lock both curpmap and pmap: use ordered locking */
3186 if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
3187 simple_lock(&pmap->pm_obj.vmobjlock);
3188 simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3189 } else {
3190 simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3191 simple_lock(&pmap->pm_obj.vmobjlock);
3192 }
3193
3194 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE,
3195 pmap->pm_pptpt, 0);
3196 cpu_tlb_flushD();
3197 cpu_cpwait();
3198 return (pt_entry_t *)APTE_BASE;
3199 }
3200
3201 /*
3202 * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3203 */
3204
3205 static void
3206 pmap_unmap_ptes(struct pmap *pmap)
3207 {
3208
3209 if (pmap == pmap_kernel()) {
3210 return;
3211 }
3212 if (pmap_is_curpmap(pmap)) {
3213 simple_unlock(&pmap->pm_obj.vmobjlock);
3214 } else {
3215 KDASSERT(curproc != NULL);
3216 simple_unlock(&pmap->pm_obj.vmobjlock);
3217 simple_unlock(
3218 &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3219 }
3220 }
3221
3222 /*
3223 * Modify pte bits for all ptes corresponding to the given physical address.
3224 * We use `maskbits' rather than `clearbits' because we're always passing
3225 * constants and the latter would require an extra inversion at run-time.
3226 */
3227
3228 static void
3229 pmap_clearbit(struct vm_page *pg, u_int maskbits)
3230 {
3231 struct pv_entry *pv;
3232 pt_entry_t *ptes, npte, opte;
3233 vaddr_t va;
3234
3235 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
3236 VM_PAGE_TO_PHYS(pg), maskbits));
3237
3238 PMAP_HEAD_TO_MAP_LOCK();
3239 simple_lock(&pg->mdpage.pvh_slock);
3240
3241 /*
3242 * Clear saved attributes (modify, reference)
3243 */
3244 pg->mdpage.pvh_attrs &= ~maskbits;
3245
3246 if (pg->mdpage.pvh_list == NULL) {
3247 simple_unlock(&pg->mdpage.pvh_slock);
3248 PMAP_HEAD_TO_MAP_UNLOCK();
3249 return;
3250 }
3251
3252 /*
3253 * Loop over all current mappings setting/clearing as appropos
3254 */
3255 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
3256 #ifdef PMAP_ALIAS_DEBUG
3257 {
3258 int s = splhigh();
3259 if ((maskbits & PVF_WRITE) != 0 &&
3260 (pv->pv_flags & PVF_WRITE) != 0) {
3261 KASSERT(pg->mdpage.rw_mappings != 0);
3262 pg->mdpage.rw_mappings--;
3263 pg->mdpage.ro_mappings++;
3264 }
3265 splx(s);
3266 }
3267 #endif /* PMAP_ALIAS_DEBUG */
3268 va = pv->pv_va;
3269 pv->pv_flags &= ~maskbits;
3270 ptes = pmap_map_ptes(pv->pv_pmap); /* locks pmap */
3271 KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
3272 npte = opte = ptes[arm_btop(va)];
3273 if (maskbits & (PVF_WRITE|PVF_MOD)) {
3274 if ((pv->pv_flags & PVF_NC)) {
3275 /*
3276 * Entry is not cacheable: reenable
3277 * the cache, nothing to flush
3278 *
3279 * Don't turn caching on again if this
3280 * is a modified emulation. This
3281 * would be inconsitent with the
3282 * settings created by
3283 * pmap_vac_me_harder().
3284 *
3285 * There's no need to call
3286 * pmap_vac_me_harder() here: all
3287 * pages are loosing their write
3288 * permission.
3289 *
3290 */
3291 if (maskbits & PVF_WRITE) {
3292 npte |= pte_l2_s_cache_mode;
3293 pv->pv_flags &= ~PVF_NC;
3294 }
3295 } else if (pmap_is_curpmap(pv->pv_pmap)) {
3296 /*
3297 * Entry is cacheable: check if pmap is
3298 * current if it is flush it,
3299 * otherwise it won't be in the cache
3300 */
3301 cpu_idcache_wbinv_range(pv->pv_va, NBPG);
3302 }
3303
3304 /* make the pte read only */
3305 npte &= ~L2_S_PROT_W;
3306 }
3307
3308 if (maskbits & PVF_REF) {
3309 if (pmap_is_curpmap(pv->pv_pmap) &&
3310 (pv->pv_flags & PVF_NC) == 0) {
3311 /*
3312 * Check npte here; we may have already
3313 * done the wbinv above, and the validity
3314 * of the PTE is the same for opte and
3315 * npte.
3316 */
3317 if (npte & L2_S_PROT_W) {
3318 cpu_idcache_wbinv_range(pv->pv_va,
3319 NBPG);
3320 } else if ((npte & L2_TYPE_MASK)
3321 != L2_TYPE_INV) {
3322 /* XXXJRT need idcache_inv_range */
3323 cpu_idcache_wbinv_range(pv->pv_va,
3324 NBPG);
3325 }
3326 }
3327
3328 /* make the pte invalid */
3329 npte = (npte & ~L2_TYPE_MASK) | L2_TYPE_INV;
3330 }
3331
3332 if (npte != opte) {
3333 ptes[arm_btop(va)] = npte;
3334 PTE_SYNC_CURRENT(pv->pv_pmap, &ptes[arm_btop(va)]);
3335 /* Flush the TLB entry if a current pmap. */
3336 if (pmap_is_curpmap(pv->pv_pmap))
3337 cpu_tlb_flushID_SE(pv->pv_va);
3338 } else
3339 PTE_FLUSH_ALT(pv->pv_pmap, &ptes[arm_btop(va)]);
3340
3341 pmap_unmap_ptes(pv->pv_pmap); /* unlocks pmap */
3342 }
3343 cpu_cpwait();
3344
3345 simple_unlock(&pg->mdpage.pvh_slock);
3346 PMAP_HEAD_TO_MAP_UNLOCK();
3347 }
3348
3349 /*
3350 * pmap_clear_modify:
3351 *
3352 * Clear the "modified" attribute for a page.
3353 */
3354 boolean_t
3355 pmap_clear_modify(struct vm_page *pg)
3356 {
3357 boolean_t rv;
3358
3359 if (pg->mdpage.pvh_attrs & PVF_MOD) {
3360 rv = TRUE;
3361 pmap_clearbit(pg, PVF_MOD);
3362 } else
3363 rv = FALSE;
3364
3365 PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
3366 VM_PAGE_TO_PHYS(pg), rv));
3367
3368 return (rv);
3369 }
3370
3371 /*
3372 * pmap_clear_reference:
3373 *
3374 * Clear the "referenced" attribute for a page.
3375 */
3376 boolean_t
3377 pmap_clear_reference(struct vm_page *pg)
3378 {
3379 boolean_t rv;
3380
3381 if (pg->mdpage.pvh_attrs & PVF_REF) {
3382 rv = TRUE;
3383 pmap_clearbit(pg, PVF_REF);
3384 } else
3385 rv = FALSE;
3386
3387 PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
3388 VM_PAGE_TO_PHYS(pg), rv));
3389
3390 return (rv);
3391 }
3392
3393 /*
3394 * pmap_is_modified:
3395 *
3396 * Test if a page has the "modified" attribute.
3397 */
3398 /* See <arm/arm32/pmap.h> */
3399
3400 /*
3401 * pmap_is_referenced:
3402 *
3403 * Test if a page has the "referenced" attribute.
3404 */
3405 /* See <arm/arm32/pmap.h> */
3406
3407 int
3408 pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
3409 {
3410 pt_entry_t *ptes;
3411 struct vm_page *pg;
3412 paddr_t pa;
3413 u_int flags;
3414 int rv = 0;
3415
3416 PDEBUG(2, printf("pmap_modified_emulation\n"));
3417
3418 PMAP_MAP_TO_HEAD_LOCK();
3419 ptes = pmap_map_ptes(pmap); /* locks pmap */
3420
3421 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3422 PDEBUG(2, printf("L1 PTE invalid\n"));
3423 goto out;
3424 }
3425
3426 PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3427
3428 /*
3429 * Don't need to PTE_FLUSH_ALT() here; this is always done
3430 * with the current pmap.
3431 */
3432
3433 /* Check for a invalid pte */
3434 if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3435 goto out;
3436
3437 /* This can happen if user code tries to access kernel memory. */
3438 if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
3439 goto out;
3440
3441 /* Extract the physical address of the page */
3442 pa = l2pte_pa(ptes[arm_btop(va)]);
3443 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3444 goto out;
3445
3446 /* Get the current flags for this page. */
3447 simple_lock(&pg->mdpage.pvh_slock);
3448
3449 flags = pmap_modify_pv(pmap, va, pg, 0, 0);
3450 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3451
3452 /*
3453 * Do the flags say this page is writable ? If not then it is a
3454 * genuine write fault. If yes then the write fault is our fault
3455 * as we did not reflect the write access in the PTE. Now we know
3456 * a write has occurred we can correct this and also set the
3457 * modified bit
3458 */
3459 if (~flags & PVF_WRITE) {
3460 simple_unlock(&pg->mdpage.pvh_slock);
3461 goto out;
3462 }
3463
3464 PDEBUG(0,
3465 printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
3466 va, ptes[arm_btop(va)]));
3467 pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
3468
3469 /*
3470 * Re-enable write permissions for the page. No need to call
3471 * pmap_vac_me_harder(), since this is just a
3472 * modified-emulation fault, and the PVF_WRITE bit isn't changing.
3473 * We've already set the cacheable bits based on the assumption
3474 * that we can write to this page.
3475 */
3476 ptes[arm_btop(va)] =
3477 (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
3478 PTE_SYNC(&ptes[arm_btop(va)]);
3479 PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3480
3481 simple_unlock(&pg->mdpage.pvh_slock);
3482
3483 cpu_tlb_flushID_SE(va);
3484 cpu_cpwait();
3485 rv = 1;
3486 out:
3487 pmap_unmap_ptes(pmap); /* unlocks pmap */
3488 PMAP_MAP_TO_HEAD_UNLOCK();
3489 return (rv);
3490 }
3491
3492 int
3493 pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
3494 {
3495 pt_entry_t *ptes;
3496 struct vm_page *pg;
3497 paddr_t pa;
3498 int rv = 0;
3499
3500 PDEBUG(2, printf("pmap_handled_emulation\n"));
3501
3502 PMAP_MAP_TO_HEAD_LOCK();
3503 ptes = pmap_map_ptes(pmap); /* locks pmap */
3504
3505 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3506 PDEBUG(2, printf("L1 PTE invalid\n"));
3507 goto out;
3508 }
3509
3510 PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3511
3512 /*
3513 * Don't need to PTE_FLUSH_ALT() here; this is always done
3514 * with the current pmap.
3515 */
3516
3517 /* Check for invalid pte */
3518 if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3519 goto out;
3520
3521 /* This can happen if user code tries to access kernel memory. */
3522 if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
3523 goto out;
3524
3525 /* Extract the physical address of the page */
3526 pa = l2pte_pa(ptes[arm_btop(va)]);
3527 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3528 goto out;
3529
3530 simple_lock(&pg->mdpage.pvh_slock);
3531
3532 /*
3533 * Ok we just enable the pte and mark the attibs as handled
3534 * XXX Should we traverse the PV list and enable all PTEs?
3535 */
3536 PDEBUG(0,
3537 printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
3538 va, ptes[arm_btop(va)]));
3539 pg->mdpage.pvh_attrs |= PVF_REF;
3540
3541 ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
3542 PTE_SYNC(&ptes[arm_btop(va)]);
3543 PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3544
3545 simple_unlock(&pg->mdpage.pvh_slock);
3546
3547 cpu_tlb_flushID_SE(va);
3548 cpu_cpwait();
3549 rv = 1;
3550 out:
3551 pmap_unmap_ptes(pmap); /* unlocks pmap */
3552 PMAP_MAP_TO_HEAD_UNLOCK();
3553 return (rv);
3554 }
3555
3556 /*
3557 * pmap_collect: free resources held by a pmap
3558 *
3559 * => optional function.
3560 * => called when a process is swapped out to free memory.
3561 */
3562
3563 void
3564 pmap_collect(struct pmap *pmap)
3565 {
3566 }
3567
3568 /*
3569 * Routine: pmap_procwr
3570 *
3571 * Function:
3572 * Synchronize caches corresponding to [addr, addr+len) in p.
3573 *
3574 */
3575 void
3576 pmap_procwr(struct proc *p, vaddr_t va, int len)
3577 {
3578 /* We only need to do anything if it is the current process. */
3579 if (p == curproc)
3580 cpu_icache_sync_range(va, len);
3581 }
3582 /*
3583 * PTP functions
3584 */
3585
3586 /*
3587 * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3588 *
3589 * => pmap should NOT be pmap_kernel()
3590 * => pmap should be locked
3591 */
3592
3593 static struct vm_page *
3594 pmap_get_ptp(struct pmap *pmap, vaddr_t va)
3595 {
3596 struct vm_page *ptp;
3597
3598 if (pmap_pde_page(pmap_pde(pmap, va))) {
3599
3600 /* valid... check hint (saves us a PA->PG lookup) */
3601 if (pmap->pm_ptphint &&
3602 (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
3603 VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3604 return (pmap->pm_ptphint);
3605 ptp = uvm_pagelookup(&pmap->pm_obj, va);
3606 #ifdef DIAGNOSTIC
3607 if (ptp == NULL)
3608 panic("pmap_get_ptp: unmanaged user PTP");
3609 #endif
3610 pmap->pm_ptphint = ptp;
3611 return(ptp);
3612 }
3613
3614 /* allocate a new PTP (updates ptphint) */
3615 return(pmap_alloc_ptp(pmap, va));
3616 }
3617
3618 /*
3619 * pmap_alloc_ptp: allocate a PTP for a PMAP
3620 *
3621 * => pmap should already be locked by caller
3622 * => we use the ptp's wire_count to count the number of active mappings
3623 * in the PTP (we start it at one to prevent any chance this PTP
3624 * will ever leak onto the active/inactive queues)
3625 */
3626
3627 /*__inline */ static struct vm_page *
3628 pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
3629 {
3630 struct vm_page *ptp;
3631
3632 ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3633 UVM_PGA_USERESERVE|UVM_PGA_ZERO);
3634 if (ptp == NULL)
3635 return (NULL);
3636
3637 /* got one! */
3638 ptp->flags &= ~PG_BUSY; /* never busy */
3639 ptp->wire_count = 1; /* no mappings yet */
3640 pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp),
3641 PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
3642 pmap->pm_stats.resident_count++; /* count PTP as resident */
3643 pmap->pm_ptphint = ptp;
3644 return (ptp);
3645 }
3646
3647 vaddr_t
3648 pmap_growkernel(vaddr_t maxkvaddr)
3649 {
3650 struct pmap *kpm = pmap_kernel(), *pm;
3651 int s;
3652 paddr_t ptaddr;
3653 struct vm_page *ptp;
3654
3655 if (maxkvaddr <= pmap_curmaxkvaddr)
3656 goto out; /* we are OK */
3657 NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
3658 pmap_curmaxkvaddr, maxkvaddr));
3659
3660 /*
3661 * whoops! we need to add kernel PTPs
3662 */
3663
3664 s = splhigh(); /* to be safe */
3665 simple_lock(&kpm->pm_obj.vmobjlock);
3666 /* due to the way the arm pmap works we map 4MB at a time */
3667 for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
3668 pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
3669
3670 if (uvm.page_init_done == FALSE) {
3671
3672 /*
3673 * we're growing the kernel pmap early (from
3674 * uvm_pageboot_alloc()). this case must be
3675 * handled a little differently.
3676 */
3677
3678 if (uvm_page_physget(&ptaddr) == FALSE)
3679 panic("pmap_growkernel: out of memory");
3680 pmap_zero_page(ptaddr);
3681
3682 /* map this page in */
3683 pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr,
3684 PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
3685
3686 /* count PTP as resident */
3687 kpm->pm_stats.resident_count++;
3688 continue;
3689 }
3690
3691 /*
3692 * THIS *MUST* BE CODED SO AS TO WORK IN THE
3693 * pmap_initialized == FALSE CASE! WE MAY BE
3694 * INVOKED WHILE pmap_init() IS RUNNING!
3695 */
3696
3697 if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
3698 panic("pmap_growkernel: alloc ptp failed");
3699
3700 /* distribute new kernel PTP to all active pmaps */
3701 simple_lock(&pmaps_lock);
3702 LIST_FOREACH(pm, &pmaps, pm_list) {
3703 pmap_map_in_l1(pm, pmap_curmaxkvaddr,
3704 VM_PAGE_TO_PHYS(ptp),
3705 PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
3706 }
3707
3708 /* Invalidate the PTPT cache. */
3709 pool_cache_invalidate(&pmap_ptpt_cache);
3710 pmap_ptpt_cache_generation++;
3711
3712 simple_unlock(&pmaps_lock);
3713 }
3714
3715 /*
3716 * flush out the cache, expensive but growkernel will happen so
3717 * rarely
3718 */
3719 cpu_tlb_flushD();
3720 cpu_cpwait();
3721
3722 simple_unlock(&kpm->pm_obj.vmobjlock);
3723 splx(s);
3724
3725 out:
3726 return (pmap_curmaxkvaddr);
3727 }
3728
3729 /************************ Utility routines ****************************/
3730
3731 /*
3732 * vector_page_setprot:
3733 *
3734 * Manipulate the protection of the vector page.
3735 */
3736 void
3737 vector_page_setprot(int prot)
3738 {
3739 pt_entry_t *pte;
3740
3741 pte = vtopte(vector_page);
3742
3743 *pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
3744 PTE_SYNC(pte);
3745 cpu_tlb_flushD_SE(vector_page);
3746 cpu_cpwait();
3747 }
3748
3749 /************************ Bootstrapping routines ****************************/
3750
3751 /*
3752 * This list exists for the benefit of pmap_map_chunk(). It keeps track
3753 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3754 * find them as necessary.
3755 *
3756 * Note that the data on this list is not valid after initarm() returns.
3757 */
3758 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3759
3760 static vaddr_t
3761 kernel_pt_lookup(paddr_t pa)
3762 {
3763 pv_addr_t *pv;
3764
3765 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3766 if (pv->pv_pa == pa)
3767 return (pv->pv_va);
3768 }
3769 return (0);
3770 }
3771
3772 /*
3773 * pmap_map_section:
3774 *
3775 * Create a single section mapping.
3776 */
3777 void
3778 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3779 {
3780 pd_entry_t *pde = (pd_entry_t *) l1pt;
3781 pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
3782
3783 KASSERT(((va | pa) & L1_S_OFFSET) == 0);
3784
3785 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3786 L1_S_PROT(PTE_KERNEL, prot) | fl;
3787 }
3788
3789 /*
3790 * pmap_map_entry:
3791 *
3792 * Create a single page mapping.
3793 */
3794 void
3795 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3796 {
3797 pd_entry_t *pde = (pd_entry_t *) l1pt;
3798 pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
3799 pt_entry_t *pte;
3800
3801 KASSERT(((va | pa) & PGOFSET) == 0);
3802
3803 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
3804 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3805
3806 pte = (pt_entry_t *)
3807 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3808 if (pte == NULL)
3809 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3810
3811 pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3812 L2_S_PROT(PTE_KERNEL, prot) | fl;
3813 }
3814
3815 /*
3816 * pmap_link_l2pt:
3817 *
3818 * Link the L2 page table specified by "pa" into the L1
3819 * page table at the slot for "va".
3820 */
3821 void
3822 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
3823 {
3824 pd_entry_t *pde = (pd_entry_t *) l1pt;
3825 u_int slot = va >> L1_S_SHIFT;
3826
3827 KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3828
3829 pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
3830 pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
3831 pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
3832 pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
3833
3834 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
3835 }
3836
3837 /*
3838 * pmap_map_chunk:
3839 *
3840 * Map a chunk of memory using the most efficient mappings
3841 * possible (section, large page, small page) into the
3842 * provided L1 and L2 tables at the specified virtual address.
3843 */
3844 vsize_t
3845 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3846 int prot, int cache)
3847 {
3848 pd_entry_t *pde = (pd_entry_t *) l1pt;
3849 pt_entry_t *pte, fl;
3850 vsize_t resid;
3851 int i;
3852
3853 resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3854
3855 if (l1pt == 0)
3856 panic("pmap_map_chunk: no L1 table provided");
3857
3858 #ifdef VERBOSE_INIT_ARM
3859 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3860 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3861 #endif
3862
3863 size = resid;
3864
3865 while (resid > 0) {
3866 /* See if we can use a section mapping. */
3867 if (((pa | va) & L1_S_OFFSET) == 0 &&
3868 resid >= L1_S_SIZE) {
3869 fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
3870 #ifdef VERBOSE_INIT_ARM
3871 printf("S");
3872 #endif
3873 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3874 L1_S_PROT(PTE_KERNEL, prot) | fl;
3875 va += L1_S_SIZE;
3876 pa += L1_S_SIZE;
3877 resid -= L1_S_SIZE;
3878 continue;
3879 }
3880
3881 /*
3882 * Ok, we're going to use an L2 table. Make sure
3883 * one is actually in the corresponding L1 slot
3884 * for the current VA.
3885 */
3886 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
3887 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3888
3889 pte = (pt_entry_t *)
3890 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
3891 if (pte == NULL)
3892 panic("pmap_map_chunk: can't find L2 table for VA"
3893 "0x%08lx", va);
3894
3895 /* See if we can use a L2 large page mapping. */
3896 if (((pa | va) & L2_L_OFFSET) == 0 &&
3897 resid >= L2_L_SIZE) {
3898 fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
3899 #ifdef VERBOSE_INIT_ARM
3900 printf("L");
3901 #endif
3902 for (i = 0; i < 16; i++) {
3903 pte[((va >> PGSHIFT) & 0x3f0) + i] =
3904 L2_L_PROTO | pa |
3905 L2_L_PROT(PTE_KERNEL, prot) | fl;
3906 }
3907 va += L2_L_SIZE;
3908 pa += L2_L_SIZE;
3909 resid -= L2_L_SIZE;
3910 continue;
3911 }
3912
3913 /* Use a small page mapping. */
3914 fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
3915 #ifdef VERBOSE_INIT_ARM
3916 printf("P");
3917 #endif
3918 pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3919 L2_S_PROT(PTE_KERNEL, prot) | fl;
3920 va += NBPG;
3921 pa += NBPG;
3922 resid -= NBPG;
3923 }
3924 #ifdef VERBOSE_INIT_ARM
3925 printf("\n");
3926 #endif
3927 return (size);
3928 }
3929
3930 /********************** PTE initialization routines **************************/
3931
3932 /*
3933 * These routines are called when the CPU type is identified to set up
3934 * the PTE prototypes, cache modes, etc.
3935 *
3936 * The variables are always here, just in case LKMs need to reference
3937 * them (though, they shouldn't).
3938 */
3939
3940 pt_entry_t pte_l1_s_cache_mode;
3941 pt_entry_t pte_l1_s_cache_mask;
3942
3943 pt_entry_t pte_l2_l_cache_mode;
3944 pt_entry_t pte_l2_l_cache_mask;
3945
3946 pt_entry_t pte_l2_s_cache_mode;
3947 pt_entry_t pte_l2_s_cache_mask;
3948
3949 pt_entry_t pte_l2_s_prot_u;
3950 pt_entry_t pte_l2_s_prot_w;
3951 pt_entry_t pte_l2_s_prot_mask;
3952
3953 pt_entry_t pte_l1_s_proto;
3954 pt_entry_t pte_l1_c_proto;
3955 pt_entry_t pte_l2_s_proto;
3956
3957 void (*pmap_copy_page_func)(paddr_t, paddr_t);
3958 void (*pmap_zero_page_func)(paddr_t);
3959
3960 #if ARM_MMU_GENERIC == 1
3961 void
3962 pmap_pte_init_generic(void)
3963 {
3964
3965 pte_l1_s_cache_mode = L1_S_B|L1_S_C;
3966 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
3967
3968 pte_l2_l_cache_mode = L2_B|L2_C;
3969 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
3970
3971 pte_l2_s_cache_mode = L2_B|L2_C;
3972 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
3973
3974 pte_l2_s_prot_u = L2_S_PROT_U_generic;
3975 pte_l2_s_prot_w = L2_S_PROT_W_generic;
3976 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
3977
3978 pte_l1_s_proto = L1_S_PROTO_generic;
3979 pte_l1_c_proto = L1_C_PROTO_generic;
3980 pte_l2_s_proto = L2_S_PROTO_generic;
3981
3982 pmap_copy_page_func = pmap_copy_page_generic;
3983 pmap_zero_page_func = pmap_zero_page_generic;
3984 }
3985
3986 #if defined(CPU_ARM9)
3987 void
3988 pmap_pte_init_arm9(void)
3989 {
3990
3991 /*
3992 * ARM9 is compatible with generic, but we want to use
3993 * write-through caching for now.
3994 */
3995 pmap_pte_init_generic();
3996
3997 pte_l1_s_cache_mode = L1_S_C;
3998 pte_l2_l_cache_mode = L2_C;
3999 pte_l2_s_cache_mode = L2_C;
4000 }
4001 #endif /* CPU_ARM9 */
4002 #endif /* ARM_MMU_GENERIC == 1 */
4003
4004 #if ARM_MMU_XSCALE == 1
4005 void
4006 pmap_pte_init_xscale(void)
4007 {
4008 uint32_t auxctl;
4009
4010 pte_l1_s_cache_mode = L1_S_B|L1_S_C;
4011 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
4012
4013 pte_l2_l_cache_mode = L2_B|L2_C;
4014 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
4015
4016 pte_l2_s_cache_mode = L2_B|L2_C;
4017 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
4018
4019 #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
4020 /*
4021 * The XScale core has an enhanced mode where writes that
4022 * miss the cache cause a cache line to be allocated. This
4023 * is significantly faster than the traditional, write-through
4024 * behavior of this case.
4025 *
4026 * However, there is a bug lurking in this pmap module, or in
4027 * other parts of the VM system, or both, which causes corruption
4028 * of NFS-backed files when this cache mode is used. We have
4029 * an ugly work-around for this problem (disable r/w-allocate
4030 * for managed kernel mappings), but the bug is still evil enough
4031 * to consider this cache mode "experimental".
4032 */
4033 pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
4034 pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
4035 pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
4036 #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
4037
4038 #ifdef XSCALE_CACHE_WRITE_THROUGH
4039 /*
4040 * Some versions of the XScale core have various bugs in
4041 * their cache units, the work-around for which is to run
4042 * the cache in write-through mode. Unfortunately, this
4043 * has a major (negative) impact on performance. So, we
4044 * go ahead and run fast-and-loose, in the hopes that we
4045 * don't line up the planets in a way that will trip the
4046 * bugs.
4047 *
4048 * However, we give you the option to be slow-but-correct.
4049 */
4050 pte_l1_s_cache_mode = L1_S_C;
4051 pte_l2_l_cache_mode = L2_C;
4052 pte_l2_s_cache_mode = L2_C;
4053 #endif /* XSCALE_CACHE_WRITE_THROUGH */
4054
4055 pte_l2_s_prot_u = L2_S_PROT_U_xscale;
4056 pte_l2_s_prot_w = L2_S_PROT_W_xscale;
4057 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
4058
4059 pte_l1_s_proto = L1_S_PROTO_xscale;
4060 pte_l1_c_proto = L1_C_PROTO_xscale;
4061 pte_l2_s_proto = L2_S_PROTO_xscale;
4062
4063 pmap_copy_page_func = pmap_copy_page_xscale;
4064 pmap_zero_page_func = pmap_zero_page_xscale;
4065
4066 /*
4067 * Disable ECC protection of page table access, for now.
4068 */
4069 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
4070 : "=r" (auxctl));
4071 auxctl &= ~XSCALE_AUXCTL_P;
4072 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
4073 :
4074 : "r" (auxctl));
4075 }
4076
4077 /*
4078 * xscale_setup_minidata:
4079 *
4080 * Set up the mini-data cache clean area. We require the
4081 * caller to allocate the right amount of physically and
4082 * virtually contiguous space.
4083 */
4084 void
4085 xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
4086 {
4087 extern vaddr_t xscale_minidata_clean_addr;
4088 extern vsize_t xscale_minidata_clean_size; /* already initialized */
4089 pd_entry_t *pde = (pd_entry_t *) l1pt;
4090 pt_entry_t *pte;
4091 vsize_t size;
4092 uint32_t auxctl;
4093
4094 xscale_minidata_clean_addr = va;
4095
4096 /* Round it to page size. */
4097 size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
4098
4099 for (; size != 0;
4100 va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
4101 pte = (pt_entry_t *)
4102 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
4103 if (pte == NULL)
4104 panic("xscale_setup_minidata: can't find L2 table for "
4105 "VA 0x%08lx", va);
4106 pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
4107 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4108 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
4109 }
4110
4111 /*
4112 * Configure the mini-data cache for write-back with
4113 * read/write-allocate.
4114 *
4115 * NOTE: In order to reconfigure the mini-data cache, we must
4116 * make sure it contains no valid data! In order to do that,
4117 * we must issue a global data cache invalidate command!
4118 *
4119 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
4120 * THIS IS VERY IMPORTANT!
4121 */
4122
4123 /* Invalidate data and mini-data. */
4124 __asm __volatile("mcr p15, 0, %0, c7, c6, 0"
4125 :
4126 : "r" (auxctl));
4127
4128
4129 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
4130 : "=r" (auxctl));
4131 auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
4132 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
4133 :
4134 : "r" (auxctl));
4135 }
4136 #endif /* ARM_MMU_XSCALE == 1 */
4137