pmap.c revision 1.62 1 /* $NetBSD: pmap.c,v 1.62 2002/03/24 05:52:10 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * Copyright (c) 2001 Richard Earnshaw
6 * Copyright (c) 2001 Christopher Gilbert
7 * All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the company nor the name of the author may be used to
15 * endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*-
32 * Copyright (c) 1999 The NetBSD Foundation, Inc.
33 * All rights reserved.
34 *
35 * This code is derived from software contributed to The NetBSD Foundation
36 * by Charles M. Hannum.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the NetBSD
49 * Foundation, Inc. and its contributors.
50 * 4. Neither the name of The NetBSD Foundation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * Copyright (c) 1994-1998 Mark Brinicombe.
69 * Copyright (c) 1994 Brini.
70 * All rights reserved.
71 *
72 * This code is derived from software written for Brini by Mark Brinicombe
73 *
74 * Redistribution and use in source and binary forms, with or without
75 * modification, are permitted provided that the following conditions
76 * are met:
77 * 1. Redistributions of source code must retain the above copyright
78 * notice, this list of conditions and the following disclaimer.
79 * 2. Redistributions in binary form must reproduce the above copyright
80 * notice, this list of conditions and the following disclaimer in the
81 * documentation and/or other materials provided with the distribution.
82 * 3. All advertising materials mentioning features or use of this software
83 * must display the following acknowledgement:
84 * This product includes software developed by Mark Brinicombe.
85 * 4. The name of the author may not be used to endorse or promote products
86 * derived from this software without specific prior written permission.
87 *
88 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
89 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
90 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
91 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
92 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
93 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
94 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
97 *
98 * RiscBSD kernel project
99 *
100 * pmap.c
101 *
102 * Machine dependant vm stuff
103 *
104 * Created : 20/09/94
105 */
106
107 /*
108 * Performance improvements, UVM changes, overhauls and part-rewrites
109 * were contributed by Neil A. Carson <neil (at) causality.com>.
110 */
111
112 /*
113 * The dram block info is currently referenced from the bootconfig.
114 * This should be placed in a separate structure.
115 */
116
117 /*
118 * Special compilation symbols
119 * PMAP_DEBUG - Build in pmap_debug_level code
120 */
121
122 /* Include header files */
123
124 #include "opt_pmap_debug.h"
125 #include "opt_ddb.h"
126
127 #include <sys/types.h>
128 #include <sys/param.h>
129 #include <sys/kernel.h>
130 #include <sys/systm.h>
131 #include <sys/proc.h>
132 #include <sys/malloc.h>
133 #include <sys/user.h>
134 #include <sys/pool.h>
135 #include <sys/cdefs.h>
136
137 #include <uvm/uvm.h>
138
139 #include <machine/bootconfig.h>
140 #include <machine/bus.h>
141 #include <machine/pmap.h>
142 #include <machine/pcb.h>
143 #include <machine/param.h>
144 #include <arm/arm32/katelib.h>
145
146 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.62 2002/03/24 05:52:10 thorpej Exp $");
147 #ifdef PMAP_DEBUG
148 #define PDEBUG(_lev_,_stat_) \
149 if (pmap_debug_level >= (_lev_)) \
150 ((_stat_))
151 int pmap_debug_level = -2;
152 void pmap_dump_pvlist(vaddr_t phys, char *m);
153
154 /*
155 * for switching to potentially finer grained debugging
156 */
157 #define PDB_FOLLOW 0x0001
158 #define PDB_INIT 0x0002
159 #define PDB_ENTER 0x0004
160 #define PDB_REMOVE 0x0008
161 #define PDB_CREATE 0x0010
162 #define PDB_PTPAGE 0x0020
163 #define PDB_GROWKERN 0x0040
164 #define PDB_BITS 0x0080
165 #define PDB_COLLECT 0x0100
166 #define PDB_PROTECT 0x0200
167 #define PDB_MAP_L1 0x0400
168 #define PDB_BOOTSTRAP 0x1000
169 #define PDB_PARANOIA 0x2000
170 #define PDB_WIRING 0x4000
171 #define PDB_PVDUMP 0x8000
172
173 int debugmap = 0;
174 int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
175 #define NPDEBUG(_lev_,_stat_) \
176 if (pmapdebug & (_lev_)) \
177 ((_stat_))
178
179 #else /* PMAP_DEBUG */
180 #define PDEBUG(_lev_,_stat_) /* Nothing */
181 #define NPDEBUG(_lev_,_stat_) /* Nothing */
182 #endif /* PMAP_DEBUG */
183
184 struct pmap kernel_pmap_store;
185
186 /*
187 * linked list of all non-kernel pmaps
188 */
189
190 static struct pmap_head pmaps;
191
192 /*
193 * pool that pmap structures are allocated from
194 */
195
196 struct pool pmap_pmap_pool;
197
198 static pt_entry_t *csrc_pte, *cdst_pte;
199 static vaddr_t csrcp, cdstp;
200
201 char *memhook;
202 extern caddr_t msgbufaddr;
203
204 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
205 /*
206 * locking data structures
207 */
208
209 static struct lock pmap_main_lock;
210 static struct simplelock pvalloc_lock;
211 static struct simplelock pmaps_lock;
212 #ifdef LOCKDEBUG
213 #define PMAP_MAP_TO_HEAD_LOCK() \
214 (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
215 #define PMAP_MAP_TO_HEAD_UNLOCK() \
216 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
217
218 #define PMAP_HEAD_TO_MAP_LOCK() \
219 (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
220 #define PMAP_HEAD_TO_MAP_UNLOCK() \
221 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
222 #else
223 #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
224 #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
225 #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
226 #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
227 #endif /* LOCKDEBUG */
228
229 /*
230 * pv_page management structures: locked by pvalloc_lock
231 */
232
233 TAILQ_HEAD(pv_pagelist, pv_page);
234 static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
235 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
236 static int pv_nfpvents; /* # of free pv entries */
237 static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
238 static vaddr_t pv_cachedva; /* cached VA for later use */
239
240 #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
241 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
242 /* high water mark */
243
244 /*
245 * local prototypes
246 */
247
248 static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
249 static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
250 #define ALLOCPV_NEED 0 /* need PV now */
251 #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
252 #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
253 static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
254 static void pmap_enter_pv __P((struct vm_page *,
255 struct pv_entry *, struct pmap *,
256 vaddr_t, struct vm_page *, int));
257 static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
258 static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
259 static void pmap_free_pv_doit __P((struct pv_entry *));
260 static void pmap_free_pvpage __P((void));
261 static boolean_t pmap_is_curpmap __P((struct pmap *));
262 static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
263 vaddr_t));
264 #define PMAP_REMOVE_ALL 0 /* remove all mappings */
265 #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
266
267 static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
268 u_int, u_int));
269
270 static void pmap_free_l1pt __P((struct l1pt *));
271 static int pmap_allocpagedir __P((struct pmap *));
272 static int pmap_clean_page __P((struct pv_entry *, boolean_t));
273 static void pmap_remove_all __P((struct vm_page *));
274
275
276 vsize_t npages;
277
278 static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
279 static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t));
280 __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
281
282 extern paddr_t physical_start;
283 extern paddr_t physical_freestart;
284 extern paddr_t physical_end;
285 extern paddr_t physical_freeend;
286 extern unsigned int free_pages;
287 extern int max_processes;
288
289 vaddr_t virtual_avail;
290 vaddr_t virtual_end;
291 vaddr_t pmap_curmaxkvaddr;
292
293 vaddr_t avail_start;
294 vaddr_t avail_end;
295
296 extern pv_addr_t systempage;
297
298 /* Variables used by the L1 page table queue code */
299 SIMPLEQ_HEAD(l1pt_queue, l1pt);
300 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
301 int l1pt_static_queue_count; /* items in the static l1 queue */
302 int l1pt_static_create_count; /* static l1 items created */
303 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
304 int l1pt_queue_count; /* items in the l1 queue */
305 int l1pt_create_count; /* stat - L1's create count */
306 int l1pt_reuse_count; /* stat - L1's reused count */
307
308 /* Local function prototypes (not used outside this file) */
309 pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
310 void pmap_copy_on_write __P((struct vm_page *));
311 void pmap_pinit __P((struct pmap *));
312 void pmap_freepagedir __P((struct pmap *));
313
314 /* Other function prototypes */
315 extern void bzero_page __P((vaddr_t));
316 extern void bcopy_page __P((vaddr_t, vaddr_t));
317
318 struct l1pt *pmap_alloc_l1pt __P((void));
319 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
320 vaddr_t l2pa, boolean_t));
321
322 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
323 static void pmap_unmap_ptes __P((struct pmap *));
324
325 __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
326 pt_entry_t *, boolean_t));
327 static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
328 pt_entry_t *, boolean_t));
329 static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
330 pt_entry_t *, boolean_t));
331
332 /*
333 * Cache enable bits in PTE to use on pages that are cacheable.
334 * On most machines this is cacheable/bufferable, but on some, eg arm10, we
335 * can chose between write-through and write-back cacheing.
336 */
337 pt_entry_t pte_cache_mode = (PT_C | PT_B);
338
339 /*
340 * real definition of pv_entry.
341 */
342
343 struct pv_entry {
344 struct pv_entry *pv_next; /* next pv_entry */
345 struct pmap *pv_pmap; /* pmap where mapping lies */
346 vaddr_t pv_va; /* virtual address for mapping */
347 int pv_flags; /* flags */
348 struct vm_page *pv_ptp; /* vm_page for the ptp */
349 };
350
351 /*
352 * pv_entrys are dynamically allocated in chunks from a single page.
353 * we keep track of how many pv_entrys are in use for each page and
354 * we can free pv_entry pages if needed. there is one lock for the
355 * entire allocation system.
356 */
357
358 struct pv_page_info {
359 TAILQ_ENTRY(pv_page) pvpi_list;
360 struct pv_entry *pvpi_pvfree;
361 int pvpi_nfree;
362 };
363
364 /*
365 * number of pv_entry's in a pv_page
366 * (note: won't work on systems where NPBG isn't a constant)
367 */
368
369 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
370 sizeof(struct pv_entry))
371
372 /*
373 * a pv_page: where pv_entrys are allocated from
374 */
375
376 struct pv_page {
377 struct pv_page_info pvinfo;
378 struct pv_entry pvents[PVE_PER_PVPAGE];
379 };
380
381 #ifdef MYCROFT_HACK
382 int mycroft_hack = 0;
383 #endif
384
385 /* Function to set the debug level of the pmap code */
386
387 #ifdef PMAP_DEBUG
388 void
389 pmap_debug(level)
390 int level;
391 {
392 pmap_debug_level = level;
393 printf("pmap_debug: level=%d\n", pmap_debug_level);
394 }
395 #endif /* PMAP_DEBUG */
396
397 __inline static boolean_t
398 pmap_is_curpmap(struct pmap *pmap)
399 {
400
401 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
402 pmap == pmap_kernel())
403 return (TRUE);
404
405 return (FALSE);
406 }
407
408 #include "isadma.h"
409
410 #if NISADMA > 0
411 /*
412 * Used to protect memory for ISA DMA bounce buffers. If, when loading
413 * pages into the system, memory intersects with any of these ranges,
414 * the intersecting memory will be loaded into a lower-priority free list.
415 */
416 bus_dma_segment_t *pmap_isa_dma_ranges;
417 int pmap_isa_dma_nranges;
418
419 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
420 paddr_t *, psize_t *));
421
422 /*
423 * Check if a memory range intersects with an ISA DMA range, and
424 * return the page-rounded intersection if it does. The intersection
425 * will be placed on a lower-priority free list.
426 */
427 boolean_t
428 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
429 paddr_t pa;
430 psize_t size;
431 paddr_t *pap;
432 psize_t *sizep;
433 {
434 bus_dma_segment_t *ds;
435 int i;
436
437 if (pmap_isa_dma_ranges == NULL)
438 return (FALSE);
439
440 for (i = 0, ds = pmap_isa_dma_ranges;
441 i < pmap_isa_dma_nranges; i++, ds++) {
442 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
443 /*
444 * Beginning of region intersects with this range.
445 */
446 *pap = trunc_page(pa);
447 *sizep = round_page(min(pa + size,
448 ds->ds_addr + ds->ds_len) - pa);
449 return (TRUE);
450 }
451 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
452 /*
453 * End of region intersects with this range.
454 */
455 *pap = trunc_page(ds->ds_addr);
456 *sizep = round_page(min((pa + size) - ds->ds_addr,
457 ds->ds_len));
458 return (TRUE);
459 }
460 }
461
462 /*
463 * No intersection found.
464 */
465 return (FALSE);
466 }
467 #endif /* NISADMA > 0 */
468
469 /*
470 * p v _ e n t r y f u n c t i o n s
471 */
472
473 /*
474 * pv_entry allocation functions:
475 * the main pv_entry allocation functions are:
476 * pmap_alloc_pv: allocate a pv_entry structure
477 * pmap_free_pv: free one pv_entry
478 * pmap_free_pvs: free a list of pv_entrys
479 *
480 * the rest are helper functions
481 */
482
483 /*
484 * pmap_alloc_pv: inline function to allocate a pv_entry structure
485 * => we lock pvalloc_lock
486 * => if we fail, we call out to pmap_alloc_pvpage
487 * => 3 modes:
488 * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it
489 * ALLOCPV_TRY = we want a pv_entry, but not enough to steal
490 * ALLOCPV_NONEED = we are trying to grow our free list, don't really need
491 * one now
492 *
493 * "try" is for optional functions like pmap_copy().
494 */
495
496 __inline static struct pv_entry *
497 pmap_alloc_pv(pmap, mode)
498 struct pmap *pmap;
499 int mode;
500 {
501 struct pv_page *pvpage;
502 struct pv_entry *pv;
503
504 simple_lock(&pvalloc_lock);
505
506 pvpage = TAILQ_FIRST(&pv_freepages);
507
508 if (pvpage != NULL) {
509 pvpage->pvinfo.pvpi_nfree--;
510 if (pvpage->pvinfo.pvpi_nfree == 0) {
511 /* nothing left in this one? */
512 TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
513 }
514 pv = pvpage->pvinfo.pvpi_pvfree;
515 KASSERT(pv);
516 pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
517 pv_nfpvents--; /* took one from pool */
518 } else {
519 pv = NULL; /* need more of them */
520 }
521
522 /*
523 * if below low water mark or we didn't get a pv_entry we try and
524 * create more pv_entrys ...
525 */
526
527 if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
528 if (pv == NULL)
529 pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
530 mode : ALLOCPV_NEED);
531 else
532 (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
533 }
534
535 simple_unlock(&pvalloc_lock);
536 return(pv);
537 }
538
539 /*
540 * pmap_alloc_pvpage: maybe allocate a new pvpage
541 *
542 * if need_entry is false: try and allocate a new pv_page
543 * if need_entry is true: try and allocate a new pv_page and return a
544 * new pv_entry from it. if we are unable to allocate a pv_page
545 * we make a last ditch effort to steal a pv_page from some other
546 * mapping. if that fails, we panic...
547 *
548 * => we assume that the caller holds pvalloc_lock
549 */
550
551 static struct pv_entry *
552 pmap_alloc_pvpage(pmap, mode)
553 struct pmap *pmap;
554 int mode;
555 {
556 struct vm_page *pg;
557 struct pv_page *pvpage;
558 struct pv_entry *pv;
559 int s;
560
561 /*
562 * if we need_entry and we've got unused pv_pages, allocate from there
563 */
564
565 pvpage = TAILQ_FIRST(&pv_unusedpgs);
566 if (mode != ALLOCPV_NONEED && pvpage != NULL) {
567
568 /* move it to pv_freepages list */
569 TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
570 TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
571
572 /* allocate a pv_entry */
573 pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */
574 pv = pvpage->pvinfo.pvpi_pvfree;
575 KASSERT(pv);
576 pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
577
578 pv_nfpvents--; /* took one from pool */
579 return(pv);
580 }
581
582 /*
583 * see if we've got a cached unmapped VA that we can map a page in.
584 * if not, try to allocate one.
585 */
586
587
588 if (pv_cachedva == 0) {
589 s = splvm();
590 pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
591 PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
592 splx(s);
593 if (pv_cachedva == 0) {
594 return (NULL);
595 }
596 }
597
598 pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
599 UVM_PGA_USERESERVE);
600
601 if (pg == NULL)
602 return (NULL);
603 pg->flags &= ~PG_BUSY; /* never busy */
604
605 /*
606 * add a mapping for our new pv_page and free its entrys (save one!)
607 *
608 * NOTE: If we are allocating a PV page for the kernel pmap, the
609 * pmap is already locked! (...but entering the mapping is safe...)
610 */
611
612 pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
613 VM_PROT_READ|VM_PROT_WRITE);
614 pmap_update(pmap_kernel());
615 pvpage = (struct pv_page *) pv_cachedva;
616 pv_cachedva = 0;
617 return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
618 }
619
620 /*
621 * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
622 *
623 * => caller must hold pvalloc_lock
624 * => if need_entry is true, we allocate and return one pv_entry
625 */
626
627 static struct pv_entry *
628 pmap_add_pvpage(pvp, need_entry)
629 struct pv_page *pvp;
630 boolean_t need_entry;
631 {
632 int tofree, lcv;
633
634 /* do we need to return one? */
635 tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
636
637 pvp->pvinfo.pvpi_pvfree = NULL;
638 pvp->pvinfo.pvpi_nfree = tofree;
639 for (lcv = 0 ; lcv < tofree ; lcv++) {
640 pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
641 pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
642 }
643 if (need_entry)
644 TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
645 else
646 TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
647 pv_nfpvents += tofree;
648 return((need_entry) ? &pvp->pvents[lcv] : NULL);
649 }
650
651 /*
652 * pmap_free_pv_doit: actually free a pv_entry
653 *
654 * => do not call this directly! instead use either
655 * 1. pmap_free_pv ==> free a single pv_entry
656 * 2. pmap_free_pvs => free a list of pv_entrys
657 * => we must be holding pvalloc_lock
658 */
659
660 __inline static void
661 pmap_free_pv_doit(pv)
662 struct pv_entry *pv;
663 {
664 struct pv_page *pvp;
665
666 pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
667 pv_nfpvents++;
668 pvp->pvinfo.pvpi_nfree++;
669
670 /* nfree == 1 => fully allocated page just became partly allocated */
671 if (pvp->pvinfo.pvpi_nfree == 1) {
672 TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
673 }
674
675 /* free it */
676 pv->pv_next = pvp->pvinfo.pvpi_pvfree;
677 pvp->pvinfo.pvpi_pvfree = pv;
678
679 /*
680 * are all pv_page's pv_entry's free? move it to unused queue.
681 */
682
683 if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
684 TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
685 TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
686 }
687 }
688
689 /*
690 * pmap_free_pv: free a single pv_entry
691 *
692 * => we gain the pvalloc_lock
693 */
694
695 __inline static void
696 pmap_free_pv(pmap, pv)
697 struct pmap *pmap;
698 struct pv_entry *pv;
699 {
700 simple_lock(&pvalloc_lock);
701 pmap_free_pv_doit(pv);
702
703 /*
704 * Can't free the PV page if the PV entries were associated with
705 * the kernel pmap; the pmap is already locked.
706 */
707 if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
708 pmap != pmap_kernel())
709 pmap_free_pvpage();
710
711 simple_unlock(&pvalloc_lock);
712 }
713
714 /*
715 * pmap_free_pvs: free a list of pv_entrys
716 *
717 * => we gain the pvalloc_lock
718 */
719
720 __inline static void
721 pmap_free_pvs(pmap, pvs)
722 struct pmap *pmap;
723 struct pv_entry *pvs;
724 {
725 struct pv_entry *nextpv;
726
727 simple_lock(&pvalloc_lock);
728
729 for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
730 nextpv = pvs->pv_next;
731 pmap_free_pv_doit(pvs);
732 }
733
734 /*
735 * Can't free the PV page if the PV entries were associated with
736 * the kernel pmap; the pmap is already locked.
737 */
738 if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
739 pmap != pmap_kernel())
740 pmap_free_pvpage();
741
742 simple_unlock(&pvalloc_lock);
743 }
744
745
746 /*
747 * pmap_free_pvpage: try and free an unused pv_page structure
748 *
749 * => assume caller is holding the pvalloc_lock and that
750 * there is a page on the pv_unusedpgs list
751 * => if we can't get a lock on the kmem_map we try again later
752 */
753
754 static void
755 pmap_free_pvpage()
756 {
757 int s;
758 struct vm_map *map;
759 struct vm_map_entry *dead_entries;
760 struct pv_page *pvp;
761
762 s = splvm(); /* protect kmem_map */
763
764 pvp = TAILQ_FIRST(&pv_unusedpgs);
765
766 /*
767 * note: watch out for pv_initpage which is allocated out of
768 * kernel_map rather than kmem_map.
769 */
770 if (pvp == pv_initpage)
771 map = kernel_map;
772 else
773 map = kmem_map;
774 if (vm_map_lock_try(map)) {
775
776 /* remove pvp from pv_unusedpgs */
777 TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
778
779 /* unmap the page */
780 dead_entries = NULL;
781 uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
782 &dead_entries);
783 vm_map_unlock(map);
784
785 if (dead_entries != NULL)
786 uvm_unmap_detach(dead_entries, 0);
787
788 pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
789 }
790 if (pvp == pv_initpage)
791 /* no more initpage, we've freed it */
792 pv_initpage = NULL;
793
794 splx(s);
795 }
796
797 /*
798 * main pv_entry manipulation functions:
799 * pmap_enter_pv: enter a mapping onto a vm_page list
800 * pmap_remove_pv: remove a mappiing from a vm_page list
801 *
802 * NOTE: pmap_enter_pv expects to lock the pvh itself
803 * pmap_remove_pv expects te caller to lock the pvh before calling
804 */
805
806 /*
807 * pmap_enter_pv: enter a mapping onto a vm_page lst
808 *
809 * => caller should hold the proper lock on pmap_main_lock
810 * => caller should have pmap locked
811 * => we will gain the lock on the vm_page and allocate the new pv_entry
812 * => caller should adjust ptp's wire_count before calling
813 * => caller should not adjust pmap's wire_count
814 */
815
816 __inline static void
817 pmap_enter_pv(pg, pve, pmap, va, ptp, flags)
818 struct vm_page *pg;
819 struct pv_entry *pve; /* preallocated pve for us to use */
820 struct pmap *pmap;
821 vaddr_t va;
822 struct vm_page *ptp; /* PTP in pmap that maps this VA */
823 int flags;
824 {
825 pve->pv_pmap = pmap;
826 pve->pv_va = va;
827 pve->pv_ptp = ptp; /* NULL for kernel pmap */
828 pve->pv_flags = flags;
829 simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
830 pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
831 pg->mdpage.pvh_list = pve; /* ... locked list */
832 simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
833 if (pve->pv_flags & PT_W)
834 ++pmap->pm_stats.wired_count;
835 }
836
837 /*
838 * pmap_remove_pv: try to remove a mapping from a pv_list
839 *
840 * => caller should hold proper lock on pmap_main_lock
841 * => pmap should be locked
842 * => caller should hold lock on vm_page [so that attrs can be adjusted]
843 * => caller should adjust ptp's wire_count and free PTP if needed
844 * => caller should NOT adjust pmap's wire_count
845 * => we return the removed pve
846 */
847
848 __inline static struct pv_entry *
849 pmap_remove_pv(pg, pmap, va)
850 struct vm_page *pg;
851 struct pmap *pmap;
852 vaddr_t va;
853 {
854 struct pv_entry *pve, **prevptr;
855
856 prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
857 pve = *prevptr;
858 while (pve) {
859 if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
860 *prevptr = pve->pv_next; /* remove it! */
861 if (pve->pv_flags & PT_W)
862 --pmap->pm_stats.wired_count;
863 break;
864 }
865 prevptr = &pve->pv_next; /* previous pointer */
866 pve = pve->pv_next; /* advance */
867 }
868 return(pve); /* return removed pve */
869 }
870
871 /*
872 *
873 * pmap_modify_pv: Update pv flags
874 *
875 * => caller should hold lock on vm_page [so that attrs can be adjusted]
876 * => caller should NOT adjust pmap's wire_count
877 * => caller must call pmap_vac_me_harder() if writable status of a page
878 * may have changed.
879 * => we return the old flags
880 *
881 * Modify a physical-virtual mapping in the pv table
882 */
883
884 /*__inline */
885 static u_int
886 pmap_modify_pv(pmap, va, pg, bic_mask, eor_mask)
887 struct pmap *pmap;
888 vaddr_t va;
889 struct vm_page *pg;
890 u_int bic_mask;
891 u_int eor_mask;
892 {
893 struct pv_entry *npv;
894 u_int flags, oflags;
895
896 /*
897 * There is at least one VA mapping this page.
898 */
899
900 for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
901 if (pmap == npv->pv_pmap && va == npv->pv_va) {
902 oflags = npv->pv_flags;
903 npv->pv_flags = flags =
904 ((oflags & ~bic_mask) ^ eor_mask);
905 if ((flags ^ oflags) & PT_W) {
906 if (flags & PT_W)
907 ++pmap->pm_stats.wired_count;
908 else
909 --pmap->pm_stats.wired_count;
910 }
911 return (oflags);
912 }
913 }
914 return (0);
915 }
916
917 /*
918 * Map the specified level 2 pagetable into the level 1 page table for
919 * the given pmap to cover a chunk of virtual address space starting from the
920 * address specified.
921 */
922 static /*__inline*/ void
923 pmap_map_in_l1(pmap, va, l2pa, selfref)
924 struct pmap *pmap;
925 vaddr_t va, l2pa;
926 boolean_t selfref;
927 {
928 vaddr_t ptva;
929
930 /* Calculate the index into the L1 page table. */
931 ptva = (va >> PDSHIFT) & ~3;
932
933 NPDEBUG(PDB_MAP_L1, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
934 pmap->pm_pdir, L1_PTE(l2pa), ptva));
935
936 /* Map page table into the L1. */
937 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
938 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
939 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
940 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
941
942 /* Map the page table into the page table area. */
943 if (selfref) {
944 NPDEBUG(PDB_MAP_L1, printf("pt self reference %lx in %lx\n",
945 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
946 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
947 L2_PTE_NC_NB(l2pa, AP_KRW);
948 }
949 /* XXX should be a purge */
950 /* cpu_tlb_flushD();*/
951 }
952
953 #if 0
954 static /*__inline*/ void
955 pmap_unmap_in_l1(pmap, va)
956 struct pmap *pmap;
957 vaddr_t va;
958 {
959 vaddr_t ptva;
960
961 /* Calculate the index into the L1 page table. */
962 ptva = (va >> PDSHIFT) & ~3;
963
964 /* Unmap page table from the L1. */
965 pmap->pm_pdir[ptva + 0] = 0;
966 pmap->pm_pdir[ptva + 1] = 0;
967 pmap->pm_pdir[ptva + 2] = 0;
968 pmap->pm_pdir[ptva + 3] = 0;
969
970 /* Unmap the page table from the page table area. */
971 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
972
973 /* XXX should be a purge */
974 /* cpu_tlb_flushD();*/
975 }
976 #endif
977
978 /*
979 * Used to map a range of physical addresses into kernel
980 * virtual address space.
981 *
982 * For now, VM is already on, we only need to map the
983 * specified memory.
984 */
985 vaddr_t
986 pmap_map(va, spa, epa, prot)
987 vaddr_t va, spa, epa;
988 int prot;
989 {
990 while (spa < epa) {
991 pmap_kenter_pa(va, spa, prot);
992 va += NBPG;
993 spa += NBPG;
994 }
995 pmap_update(pmap_kernel());
996 return(va);
997 }
998
999
1000 /*
1001 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1002 *
1003 * bootstrap the pmap system. This is called from initarm and allows
1004 * the pmap system to initailise any structures it requires.
1005 *
1006 * Currently this sets up the kernel_pmap that is statically allocated
1007 * and also allocated virtual addresses for certain page hooks.
1008 * Currently the only one page hook is allocated that is used
1009 * to zero physical pages of memory.
1010 * It also initialises the start and end address of the kernel data space.
1011 */
1012 extern paddr_t physical_freestart;
1013 extern paddr_t physical_freeend;
1014
1015 char *boot_head;
1016
1017 void
1018 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
1019 pd_entry_t *kernel_l1pt;
1020 pv_addr_t kernel_ptpt;
1021 {
1022 pt_entry_t *pte;
1023 int loop;
1024 paddr_t start, end;
1025 #if NISADMA > 0
1026 paddr_t istart;
1027 psize_t isize;
1028 #endif
1029
1030 pmap_kernel()->pm_pdir = kernel_l1pt;
1031 pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
1032 pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
1033 simple_lock_init(&pmap_kernel()->pm_lock);
1034 pmap_kernel()->pm_obj.pgops = NULL;
1035 TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
1036 pmap_kernel()->pm_obj.uo_npages = 0;
1037 pmap_kernel()->pm_obj.uo_refs = 1;
1038
1039 /*
1040 * Initialize PAGE_SIZE-dependent variables.
1041 */
1042 uvm_setpagesize();
1043
1044 npages = 0;
1045 loop = 0;
1046 while (loop < bootconfig.dramblocks) {
1047 start = (paddr_t)bootconfig.dram[loop].address;
1048 end = start + (bootconfig.dram[loop].pages * NBPG);
1049 if (start < physical_freestart)
1050 start = physical_freestart;
1051 if (end > physical_freeend)
1052 end = physical_freeend;
1053 #if 0
1054 printf("%d: %lx -> %lx\n", loop, start, end - 1);
1055 #endif
1056 #if NISADMA > 0
1057 if (pmap_isa_dma_range_intersect(start, end - start,
1058 &istart, &isize)) {
1059 /*
1060 * Place the pages that intersect with the
1061 * ISA DMA range onto the ISA DMA free list.
1062 */
1063 #if 0
1064 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
1065 istart + isize - 1);
1066 #endif
1067 uvm_page_physload(atop(istart),
1068 atop(istart + isize), atop(istart),
1069 atop(istart + isize), VM_FREELIST_ISADMA);
1070 npages += atop(istart + isize) - atop(istart);
1071
1072 /*
1073 * Load the pieces that come before
1074 * the intersection into the default
1075 * free list.
1076 */
1077 if (start < istart) {
1078 #if 0
1079 printf(" BEFORE 0x%lx -> 0x%lx\n",
1080 start, istart - 1);
1081 #endif
1082 uvm_page_physload(atop(start),
1083 atop(istart), atop(start),
1084 atop(istart), VM_FREELIST_DEFAULT);
1085 npages += atop(istart) - atop(start);
1086 }
1087
1088 /*
1089 * Load the pieces that come after
1090 * the intersection into the default
1091 * free list.
1092 */
1093 if ((istart + isize) < end) {
1094 #if 0
1095 printf(" AFTER 0x%lx -> 0x%lx\n",
1096 (istart + isize), end - 1);
1097 #endif
1098 uvm_page_physload(atop(istart + isize),
1099 atop(end), atop(istart + isize),
1100 atop(end), VM_FREELIST_DEFAULT);
1101 npages += atop(end) - atop(istart + isize);
1102 }
1103 } else {
1104 uvm_page_physload(atop(start), atop(end),
1105 atop(start), atop(end), VM_FREELIST_DEFAULT);
1106 npages += atop(end) - atop(start);
1107 }
1108 #else /* NISADMA > 0 */
1109 uvm_page_physload(atop(start), atop(end),
1110 atop(start), atop(end), VM_FREELIST_DEFAULT);
1111 npages += atop(end) - atop(start);
1112 #endif /* NISADMA > 0 */
1113 ++loop;
1114 }
1115
1116 #ifdef MYCROFT_HACK
1117 printf("npages = %ld\n", npages);
1118 #endif
1119
1120 virtual_avail = KERNEL_VM_BASE;
1121 virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE - 1;
1122
1123 /*
1124 * now we allocate the "special" VAs which are used for tmp mappings
1125 * by the pmap (and other modules). we allocate the VAs by advancing
1126 * virtual_avail (note that there are no pages mapped at these VAs).
1127 * we find the PTE that maps the allocated VA via the linear PTE
1128 * mapping.
1129 */
1130
1131 pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
1132
1133 csrcp = virtual_avail; csrc_pte = pte;
1134 virtual_avail += PAGE_SIZE; pte++;
1135
1136 cdstp = virtual_avail; cdst_pte = pte;
1137 virtual_avail += PAGE_SIZE; pte++;
1138
1139 memhook = (char *) virtual_avail; /* don't need pte */
1140 virtual_avail += PAGE_SIZE; pte++;
1141
1142 msgbufaddr = (caddr_t) virtual_avail; /* don't need pte */
1143 virtual_avail += round_page(MSGBUFSIZE);
1144 pte += atop(round_page(MSGBUFSIZE));
1145
1146 /*
1147 * init the static-global locks and global lists.
1148 */
1149 spinlockinit(&pmap_main_lock, "pmaplk", 0);
1150 simple_lock_init(&pvalloc_lock);
1151 simple_lock_init(&pmaps_lock);
1152 LIST_INIT(&pmaps);
1153 TAILQ_INIT(&pv_freepages);
1154 TAILQ_INIT(&pv_unusedpgs);
1155
1156 /*
1157 * initialize the pmap pool.
1158 */
1159
1160 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1161 &pool_allocator_nointr);
1162
1163 cpu_dcache_wbinv_all();
1164 }
1165
1166 /*
1167 * void pmap_init(void)
1168 *
1169 * Initialize the pmap module.
1170 * Called by vm_init() in vm/vm_init.c in order to initialise
1171 * any structures that the pmap system needs to map virtual memory.
1172 */
1173
1174 extern int physmem;
1175
1176 void
1177 pmap_init()
1178 {
1179
1180 /*
1181 * Set the available memory vars - These do not map to real memory
1182 * addresses and cannot as the physical memory is fragmented.
1183 * They are used by ps for %mem calculations.
1184 * One could argue whether this should be the entire memory or just
1185 * the memory that is useable in a user process.
1186 */
1187 avail_start = 0;
1188 avail_end = physmem * NBPG;
1189
1190 /*
1191 * now we need to free enough pv_entry structures to allow us to get
1192 * the kmem_map/kmem_object allocated and inited (done after this
1193 * function is finished). to do this we allocate one bootstrap page out
1194 * of kernel_map and use it to provide an initial pool of pv_entry
1195 * structures. we never free this page.
1196 */
1197
1198 pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1199 if (pv_initpage == NULL)
1200 panic("pmap_init: pv_initpage");
1201 pv_cachedva = 0; /* a VA we have allocated but not used yet */
1202 pv_nfpvents = 0;
1203 (void) pmap_add_pvpage(pv_initpage, FALSE);
1204
1205 pmap_initialized = TRUE;
1206
1207 /* Initialise our L1 page table queues and counters */
1208 SIMPLEQ_INIT(&l1pt_static_queue);
1209 l1pt_static_queue_count = 0;
1210 l1pt_static_create_count = 0;
1211 SIMPLEQ_INIT(&l1pt_queue);
1212 l1pt_queue_count = 0;
1213 l1pt_create_count = 0;
1214 l1pt_reuse_count = 0;
1215 }
1216
1217 /*
1218 * pmap_postinit()
1219 *
1220 * This routine is called after the vm and kmem subsystems have been
1221 * initialised. This allows the pmap code to perform any initialisation
1222 * that can only be done one the memory allocation is in place.
1223 */
1224
1225 void
1226 pmap_postinit()
1227 {
1228 int loop;
1229 struct l1pt *pt;
1230
1231 #ifdef PMAP_STATIC_L1S
1232 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1233 #else /* PMAP_STATIC_L1S */
1234 for (loop = 0; loop < max_processes; ++loop) {
1235 #endif /* PMAP_STATIC_L1S */
1236 /* Allocate a L1 page table */
1237 pt = pmap_alloc_l1pt();
1238 if (!pt)
1239 panic("Cannot allocate static L1 page tables\n");
1240
1241 /* Clean it */
1242 bzero((void *)pt->pt_va, PD_SIZE);
1243 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1244 /* Add the page table to the queue */
1245 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1246 ++l1pt_static_queue_count;
1247 ++l1pt_static_create_count;
1248 }
1249 }
1250
1251
1252 /*
1253 * Create and return a physical map.
1254 *
1255 * If the size specified for the map is zero, the map is an actual physical
1256 * map, and may be referenced by the hardware.
1257 *
1258 * If the size specified is non-zero, the map will be used in software only,
1259 * and is bounded by that size.
1260 */
1261
1262 pmap_t
1263 pmap_create()
1264 {
1265 struct pmap *pmap;
1266
1267 /*
1268 * Fetch pmap entry from the pool
1269 */
1270
1271 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1272 /* XXX is this really needed! */
1273 memset(pmap, 0, sizeof(*pmap));
1274
1275 simple_lock_init(&pmap->pm_obj.vmobjlock);
1276 pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
1277 TAILQ_INIT(&pmap->pm_obj.memq);
1278 pmap->pm_obj.uo_npages = 0;
1279 pmap->pm_obj.uo_refs = 1;
1280 pmap->pm_stats.wired_count = 0;
1281 pmap->pm_stats.resident_count = 1;
1282
1283 /* Now init the machine part of the pmap */
1284 pmap_pinit(pmap);
1285 return(pmap);
1286 }
1287
1288 /*
1289 * pmap_alloc_l1pt()
1290 *
1291 * This routine allocates physical and virtual memory for a L1 page table
1292 * and wires it.
1293 * A l1pt structure is returned to describe the allocated page table.
1294 *
1295 * This routine is allowed to fail if the required memory cannot be allocated.
1296 * In this case NULL is returned.
1297 */
1298
1299 struct l1pt *
1300 pmap_alloc_l1pt(void)
1301 {
1302 paddr_t pa;
1303 vaddr_t va;
1304 struct l1pt *pt;
1305 int error;
1306 struct vm_page *m;
1307 pt_entry_t *ptes;
1308
1309 /* Allocate virtual address space for the L1 page table */
1310 va = uvm_km_valloc(kernel_map, PD_SIZE);
1311 if (va == 0) {
1312 #ifdef DIAGNOSTIC
1313 PDEBUG(0,
1314 printf("pmap: Cannot allocate pageable memory for L1\n"));
1315 #endif /* DIAGNOSTIC */
1316 return(NULL);
1317 }
1318
1319 /* Allocate memory for the l1pt structure */
1320 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1321
1322 /*
1323 * Allocate pages from the VM system.
1324 */
1325 TAILQ_INIT(&pt->pt_plist);
1326 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
1327 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1328 if (error) {
1329 #ifdef DIAGNOSTIC
1330 PDEBUG(0,
1331 printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1332 error));
1333 #endif /* DIAGNOSTIC */
1334 /* Release the resources we already have claimed */
1335 free(pt, M_VMPMAP);
1336 uvm_km_free(kernel_map, va, PD_SIZE);
1337 return(NULL);
1338 }
1339
1340 /* Map our physical pages into our virtual space */
1341 pt->pt_va = va;
1342 m = TAILQ_FIRST(&pt->pt_plist);
1343 ptes = pmap_map_ptes(pmap_kernel());
1344 while (m && va < (pt->pt_va + PD_SIZE)) {
1345 pa = VM_PAGE_TO_PHYS(m);
1346
1347 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
1348
1349 /* Revoke cacheability and bufferability */
1350 /* XXX should be done better than this */
1351 ptes[arm_btop(va)] &= ~(PT_C | PT_B);
1352
1353 va += NBPG;
1354 m = m->pageq.tqe_next;
1355 }
1356 pmap_unmap_ptes(pmap_kernel());
1357 pmap_update(pmap_kernel());
1358
1359 #ifdef DIAGNOSTIC
1360 if (m)
1361 panic("pmap_alloc_l1pt: pglist not empty\n");
1362 #endif /* DIAGNOSTIC */
1363
1364 pt->pt_flags = 0;
1365 return(pt);
1366 }
1367
1368 /*
1369 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1370 */
1371 static void
1372 pmap_free_l1pt(pt)
1373 struct l1pt *pt;
1374 {
1375 /* Separate the physical memory for the virtual space */
1376 pmap_kremove(pt->pt_va, PD_SIZE);
1377 pmap_update(pmap_kernel());
1378
1379 /* Return the physical memory */
1380 uvm_pglistfree(&pt->pt_plist);
1381
1382 /* Free the virtual space */
1383 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1384
1385 /* Free the l1pt structure */
1386 free(pt, M_VMPMAP);
1387 }
1388
1389 /*
1390 * Allocate a page directory.
1391 * This routine will either allocate a new page directory from the pool
1392 * of L1 page tables currently held by the kernel or it will allocate
1393 * a new one via pmap_alloc_l1pt().
1394 * It will then initialise the l1 page table for use.
1395 *
1396 * XXX must tidy up and fix this code, not happy about how it does the pmaps_locking
1397 */
1398 static int
1399 pmap_allocpagedir(pmap)
1400 struct pmap *pmap;
1401 {
1402 paddr_t pa;
1403 struct l1pt *pt;
1404 pt_entry_t *pte;
1405
1406 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1407
1408 /* Do we have any spare L1's lying around ? */
1409 if (l1pt_static_queue_count) {
1410 --l1pt_static_queue_count;
1411 pt = l1pt_static_queue.sqh_first;
1412 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1413 } else if (l1pt_queue_count) {
1414 --l1pt_queue_count;
1415 pt = l1pt_queue.sqh_first;
1416 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1417 ++l1pt_reuse_count;
1418 } else {
1419 pt = pmap_alloc_l1pt();
1420 if (!pt)
1421 return(ENOMEM);
1422 ++l1pt_create_count;
1423 }
1424
1425 /* Store the pointer to the l1 descriptor in the pmap. */
1426 pmap->pm_l1pt = pt;
1427
1428 /* Get the physical address of the start of the l1 */
1429 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1430
1431 /* Store the virtual address of the l1 in the pmap. */
1432 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1433
1434 /* Clean the L1 if it is dirty */
1435 if (!(pt->pt_flags & PTFLAG_CLEAN))
1436 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1437
1438 /* Allocate a page table to map all the page tables for this pmap */
1439
1440 #ifdef DIAGNOSTIC
1441 if (pmap->pm_vptpt) {
1442 /* XXX What if we have one already ? */
1443 panic("pmap_allocpagedir: have pt already\n");
1444 }
1445 #endif /* DIAGNOSTIC */
1446 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1447 if (pmap->pm_vptpt == 0) {
1448 pmap_freepagedir(pmap);
1449 return(ENOMEM);
1450 }
1451
1452 /* need to lock this all up for growkernel */
1453 simple_lock(&pmaps_lock);
1454 /* wish we didn't have to keep this locked... */
1455
1456 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1457 bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1458 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1459 KERNEL_PD_SIZE);
1460
1461 (void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
1462 pmap->pm_pptpt &= PG_FRAME;
1463 /* Revoke cacheability and bufferability */
1464 /* XXX should be done better than this */
1465 pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
1466 *pte = *pte & ~(PT_C | PT_B);
1467
1468 /* Wire in this page table */
1469 pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
1470
1471 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1472
1473 /*
1474 * Map the kernel page tables for 0xf0000000 +
1475 * into the page table used to map the
1476 * pmap's page tables
1477 */
1478 bcopy((char *)(PTE_BASE
1479 + (PTE_BASE >> (PGSHIFT - 2))
1480 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1481 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1482 (KERNEL_PD_SIZE >> 2));
1483
1484 LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
1485 simple_unlock(&pmaps_lock);
1486
1487 return(0);
1488 }
1489
1490
1491 /*
1492 * Initialize a preallocated and zeroed pmap structure,
1493 * such as one in a vmspace structure.
1494 */
1495
1496 void
1497 pmap_pinit(pmap)
1498 struct pmap *pmap;
1499 {
1500 int backoff = 6;
1501 int retry = 10;
1502
1503 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1504
1505 /* Keep looping until we succeed in allocating a page directory */
1506 while (pmap_allocpagedir(pmap) != 0) {
1507 /*
1508 * Ok we failed to allocate a suitable block of memory for an
1509 * L1 page table. This means that either:
1510 * 1. 16KB of virtual address space could not be allocated
1511 * 2. 16KB of physically contiguous memory on a 16KB boundary
1512 * could not be allocated.
1513 *
1514 * Since we cannot fail we will sleep for a while and try
1515 * again.
1516 *
1517 * Searching for a suitable L1 PT is expensive:
1518 * to avoid hogging the system when memory is really
1519 * scarce, use an exponential back-off so that
1520 * eventually we won't retry more than once every 8
1521 * seconds. This should allow other processes to run
1522 * to completion and free up resources.
1523 */
1524 (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1525 NULL);
1526 if (--retry == 0) {
1527 retry = 10;
1528 if (backoff)
1529 --backoff;
1530 }
1531 }
1532
1533 /* Map zero page for the pmap. This will also map the L2 for it */
1534 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1535 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1536 pmap_update(pmap);
1537 }
1538
1539
1540 void
1541 pmap_freepagedir(pmap)
1542 struct pmap *pmap;
1543 {
1544 /* Free the memory used for the page table mapping */
1545 if (pmap->pm_vptpt != 0)
1546 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1547
1548 /* junk the L1 page table */
1549 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1550 /* Add the page table to the queue */
1551 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1552 ++l1pt_static_queue_count;
1553 } else if (l1pt_queue_count < 8) {
1554 /* Add the page table to the queue */
1555 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1556 ++l1pt_queue_count;
1557 } else
1558 pmap_free_l1pt(pmap->pm_l1pt);
1559 }
1560
1561
1562 /*
1563 * Retire the given physical map from service.
1564 * Should only be called if the map contains no valid mappings.
1565 */
1566
1567 void
1568 pmap_destroy(pmap)
1569 struct pmap *pmap;
1570 {
1571 struct vm_page *page;
1572 int count;
1573
1574 if (pmap == NULL)
1575 return;
1576
1577 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1578
1579 /*
1580 * Drop reference count
1581 */
1582 simple_lock(&pmap->pm_obj.vmobjlock);
1583 count = --pmap->pm_obj.uo_refs;
1584 simple_unlock(&pmap->pm_obj.vmobjlock);
1585 if (count > 0) {
1586 return;
1587 }
1588
1589 /*
1590 * reference count is zero, free pmap resources and then free pmap.
1591 */
1592
1593 /*
1594 * remove it from global list of pmaps
1595 */
1596
1597 simple_lock(&pmaps_lock);
1598 LIST_REMOVE(pmap, pm_list);
1599 simple_unlock(&pmaps_lock);
1600
1601 /* Remove the zero page mapping */
1602 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1603 pmap_update(pmap);
1604
1605 /*
1606 * Free any page tables still mapped
1607 * This is only temporay until pmap_enter can count the number
1608 * of mappings made in a page table. Then pmap_remove() can
1609 * reduce the count and free the pagetable when the count
1610 * reaches zero. Note that entries in this list should match the
1611 * contents of the ptpt, however this is faster than walking a 1024
1612 * entries looking for pt's
1613 * taken from i386 pmap.c
1614 */
1615 while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
1616 KASSERT((page->flags & PG_BUSY) == 0);
1617 page->wire_count = 0;
1618 uvm_pagefree(page);
1619 }
1620
1621 /* Free the page dir */
1622 pmap_freepagedir(pmap);
1623
1624 /* return the pmap to the pool */
1625 pool_put(&pmap_pmap_pool, pmap);
1626 }
1627
1628
1629 /*
1630 * void pmap_reference(struct pmap *pmap)
1631 *
1632 * Add a reference to the specified pmap.
1633 */
1634
1635 void
1636 pmap_reference(pmap)
1637 struct pmap *pmap;
1638 {
1639 if (pmap == NULL)
1640 return;
1641
1642 simple_lock(&pmap->pm_lock);
1643 pmap->pm_obj.uo_refs++;
1644 simple_unlock(&pmap->pm_lock);
1645 }
1646
1647 /*
1648 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1649 *
1650 * Return the start and end addresses of the kernel's virtual space.
1651 * These values are setup in pmap_bootstrap and are updated as pages
1652 * are allocated.
1653 */
1654
1655 void
1656 pmap_virtual_space(start, end)
1657 vaddr_t *start;
1658 vaddr_t *end;
1659 {
1660 *start = virtual_avail;
1661 *end = virtual_end;
1662 }
1663
1664
1665 /*
1666 * Activate the address space for the specified process. If the process
1667 * is the current process, load the new MMU context.
1668 */
1669 void
1670 pmap_activate(p)
1671 struct proc *p;
1672 {
1673 struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1674 struct pcb *pcb = &p->p_addr->u_pcb;
1675
1676 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1677 (paddr_t *)&pcb->pcb_pagedir);
1678
1679 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1680 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1681
1682 if (p == curproc) {
1683 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1684 setttb((u_int)pcb->pcb_pagedir);
1685 }
1686 #if 0
1687 pmap->pm_pdchanged = FALSE;
1688 #endif
1689 }
1690
1691
1692 /*
1693 * Deactivate the address space of the specified process.
1694 */
1695 void
1696 pmap_deactivate(p)
1697 struct proc *p;
1698 {
1699 }
1700
1701 /*
1702 * Perform any deferred pmap operations.
1703 */
1704 void
1705 pmap_update(struct pmap *pmap)
1706 {
1707
1708 /*
1709 * We haven't deferred any pmap operations, but we do need to
1710 * make sure TLB/cache operations have completed.
1711 */
1712 cpu_cpwait();
1713 }
1714
1715 /*
1716 * pmap_clean_page()
1717 *
1718 * This is a local function used to work out the best strategy to clean
1719 * a single page referenced by its entry in the PV table. It's used by
1720 * pmap_copy_page, pmap_zero page and maybe some others later on.
1721 *
1722 * Its policy is effectively:
1723 * o If there are no mappings, we don't bother doing anything with the cache.
1724 * o If there is one mapping, we clean just that page.
1725 * o If there are multiple mappings, we clean the entire cache.
1726 *
1727 * So that some functions can be further optimised, it returns 0 if it didn't
1728 * clean the entire cache, or 1 if it did.
1729 *
1730 * XXX One bug in this routine is that if the pv_entry has a single page
1731 * mapped at 0x00000000 a whole cache clean will be performed rather than
1732 * just the 1 page. Since this should not occur in everyday use and if it does
1733 * it will just result in not the most efficient clean for the page.
1734 */
1735 static int
1736 pmap_clean_page(pv, is_src)
1737 struct pv_entry *pv;
1738 boolean_t is_src;
1739 {
1740 struct pmap *pmap;
1741 struct pv_entry *npv;
1742 int cache_needs_cleaning = 0;
1743 vaddr_t page_to_clean = 0;
1744
1745 if (pv == NULL)
1746 /* nothing mapped in so nothing to flush */
1747 return (0);
1748
1749 /* Since we flush the cache each time we change curproc, we
1750 * only need to flush the page if it is in the current pmap.
1751 */
1752 if (curproc)
1753 pmap = curproc->p_vmspace->vm_map.pmap;
1754 else
1755 pmap = pmap_kernel();
1756
1757 for (npv = pv; npv; npv = npv->pv_next) {
1758 if (npv->pv_pmap == pmap) {
1759 /* The page is mapped non-cacheable in
1760 * this map. No need to flush the cache.
1761 */
1762 if (npv->pv_flags & PT_NC) {
1763 #ifdef DIAGNOSTIC
1764 if (cache_needs_cleaning)
1765 panic("pmap_clean_page: "
1766 "cache inconsistency");
1767 #endif
1768 break;
1769 }
1770 #if 0
1771 /* This doesn't work, because pmap_protect
1772 doesn't flush changes on pages that it
1773 has write-protected. */
1774
1775 /* If the page is not writable and this
1776 is the source, then there is no need
1777 to flush it from the cache. */
1778 else if (is_src && ! (npv->pv_flags & PT_Wr))
1779 continue;
1780 #endif
1781 if (cache_needs_cleaning){
1782 page_to_clean = 0;
1783 break;
1784 }
1785 else
1786 page_to_clean = npv->pv_va;
1787 cache_needs_cleaning = 1;
1788 }
1789 }
1790
1791 if (page_to_clean)
1792 cpu_idcache_wbinv_range(page_to_clean, NBPG);
1793 else if (cache_needs_cleaning) {
1794 cpu_idcache_wbinv_all();
1795 return (1);
1796 }
1797 return (0);
1798 }
1799
1800 /*
1801 * pmap_zero_page()
1802 *
1803 * Zero a given physical page by mapping it at a page hook point.
1804 * In doing the zero page op, the page we zero is mapped cachable, as with
1805 * StrongARM accesses to non-cached pages are non-burst making writing
1806 * _any_ bulk data very slow.
1807 */
1808 void
1809 pmap_zero_page(phys)
1810 paddr_t phys;
1811 {
1812 struct vm_page *pg;
1813
1814 /* Get an entry for this page, and clean it it. */
1815 pg = PHYS_TO_VM_PAGE(phys);
1816 simple_lock(&pg->mdpage.pvh_slock);
1817 pmap_clean_page(pg->mdpage.pvh_list, FALSE);
1818 simple_unlock(&pg->mdpage.pvh_slock);
1819
1820 /*
1821 * Hook in the page, zero it, and purge the cache for that
1822 * zeroed page. Invalidate the TLB as needed.
1823 */
1824 *cdst_pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1825 cpu_tlb_flushD_SE(cdstp);
1826 cpu_cpwait();
1827 bzero_page(cdstp);
1828 cpu_dcache_wbinv_range(cdstp, NBPG);
1829 }
1830
1831 /* pmap_pageidlezero()
1832 *
1833 * The same as above, except that we assume that the page is not
1834 * mapped. This means we never have to flush the cache first. Called
1835 * from the idle loop.
1836 */
1837 boolean_t
1838 pmap_pageidlezero(phys)
1839 paddr_t phys;
1840 {
1841 int i, *ptr;
1842 boolean_t rv = TRUE;
1843
1844 #ifdef DIAGNOSTIC
1845 struct vm_page *pg;
1846
1847 pg = PHYS_TO_VM_PAGE(phys);
1848 if (pg->mdpage.pvh_list != NULL)
1849 panic("pmap_pageidlezero: zeroing mapped page\n");
1850 #endif
1851
1852 /*
1853 * Hook in the page, zero it, and purge the cache for that
1854 * zeroed page. Invalidate the TLB as needed.
1855 */
1856 *cdst_pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1857 cpu_tlb_flushD_SE(cdstp);
1858 cpu_cpwait();
1859
1860 for (i = 0, ptr = (int *)cdstp;
1861 i < (NBPG / sizeof(int)); i++) {
1862 if (sched_whichqs != 0) {
1863 /*
1864 * A process has become ready. Abort now,
1865 * so we don't keep it waiting while we
1866 * do slow memory access to finish this
1867 * page.
1868 */
1869 rv = FALSE;
1870 break;
1871 }
1872 *ptr++ = 0;
1873 }
1874
1875 if (rv)
1876 /*
1877 * if we aborted we'll rezero this page again later so don't
1878 * purge it unless we finished it
1879 */
1880 cpu_dcache_wbinv_range(cdstp, NBPG);
1881 return (rv);
1882 }
1883
1884 /*
1885 * pmap_copy_page()
1886 *
1887 * Copy one physical page into another, by mapping the pages into
1888 * hook points. The same comment regarding cachability as in
1889 * pmap_zero_page also applies here.
1890 */
1891 void
1892 pmap_copy_page(src, dest)
1893 paddr_t src;
1894 paddr_t dest;
1895 {
1896 struct vm_page *src_pg, *dest_pg;
1897 boolean_t cleanedcache;
1898
1899 /* Get PV entries for the pages, and clean them if needed. */
1900 src_pg = PHYS_TO_VM_PAGE(src);
1901
1902 simple_lock(&src_pg->mdpage.pvh_slock);
1903 cleanedcache = pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1904 simple_unlock(&src_pg->mdpage.pvh_slock);
1905
1906 if (cleanedcache == 0) {
1907 dest_pg = PHYS_TO_VM_PAGE(dest);
1908 simple_lock(&dest_pg->mdpage.pvh_slock);
1909 pmap_clean_page(dest_pg->mdpage.pvh_list, FALSE);
1910 simple_unlock(&dest_pg->mdpage.pvh_slock);
1911 }
1912 /*
1913 * Map the pages into the page hook points, copy them, and purge
1914 * the cache for the appropriate page. Invalidate the TLB
1915 * as required.
1916 */
1917 *csrc_pte = L2_PTE(src & PG_FRAME, AP_KRW);
1918 *cdst_pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1919 cpu_tlb_flushD_SE(csrcp);
1920 cpu_tlb_flushD_SE(cdstp);
1921 cpu_cpwait();
1922 bcopy_page(csrcp, cdstp);
1923 cpu_dcache_wbinv_range(csrcp, NBPG);
1924 cpu_dcache_wbinv_range(cdstp, NBPG);
1925 }
1926
1927 #if 0
1928 void
1929 pmap_pte_addref(pmap, va)
1930 struct pmap *pmap;
1931 vaddr_t va;
1932 {
1933 pd_entry_t *pde;
1934 paddr_t pa;
1935 struct vm_page *m;
1936
1937 if (pmap == pmap_kernel())
1938 return;
1939
1940 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1941 pa = pmap_pte_pa(pde);
1942 m = PHYS_TO_VM_PAGE(pa);
1943 ++m->wire_count;
1944 #ifdef MYCROFT_HACK
1945 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1946 pmap, va, pde, pa, m, m->wire_count);
1947 #endif
1948 }
1949
1950 void
1951 pmap_pte_delref(pmap, va)
1952 struct pmap *pmap;
1953 vaddr_t va;
1954 {
1955 pd_entry_t *pde;
1956 paddr_t pa;
1957 struct vm_page *m;
1958
1959 if (pmap == pmap_kernel())
1960 return;
1961
1962 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1963 pa = pmap_pte_pa(pde);
1964 m = PHYS_TO_VM_PAGE(pa);
1965 --m->wire_count;
1966 #ifdef MYCROFT_HACK
1967 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1968 pmap, va, pde, pa, m, m->wire_count);
1969 #endif
1970 if (m->wire_count == 0) {
1971 #ifdef MYCROFT_HACK
1972 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1973 pmap, va, pde, pa, m);
1974 #endif
1975 pmap_unmap_in_l1(pmap, va);
1976 uvm_pagefree(m);
1977 --pmap->pm_stats.resident_count;
1978 }
1979 }
1980 #else
1981 #define pmap_pte_addref(pmap, va)
1982 #define pmap_pte_delref(pmap, va)
1983 #endif
1984
1985 /*
1986 * Since we have a virtually indexed cache, we may need to inhibit caching if
1987 * there is more than one mapping and at least one of them is writable.
1988 * Since we purge the cache on every context switch, we only need to check for
1989 * other mappings within the same pmap, or kernel_pmap.
1990 * This function is also called when a page is unmapped, to possibly reenable
1991 * caching on any remaining mappings.
1992 *
1993 * The code implements the following logic, where:
1994 *
1995 * KW = # of kernel read/write pages
1996 * KR = # of kernel read only pages
1997 * UW = # of user read/write pages
1998 * UR = # of user read only pages
1999 * OW = # of user read/write pages in another pmap, then
2000 *
2001 * KC = kernel mapping is cacheable
2002 * UC = user mapping is cacheable
2003 *
2004 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
2005 * +---------------------------------------------
2006 * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0
2007 * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
2008 * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0
2009 * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2010 * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2011 *
2012 * Note that the pmap must have it's ptes mapped in, and passed with ptes.
2013 */
2014 __inline static void
2015 pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2016 boolean_t clear_cache)
2017 {
2018 if (pmap == pmap_kernel())
2019 pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
2020 else
2021 pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2022 }
2023
2024 static void
2025 pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2026 boolean_t clear_cache)
2027 {
2028 int user_entries = 0;
2029 int user_writable = 0;
2030 int user_cacheable = 0;
2031 int kernel_entries = 0;
2032 int kernel_writable = 0;
2033 int kernel_cacheable = 0;
2034 struct pv_entry *pv;
2035 struct pmap *last_pmap = pmap;
2036
2037 #ifdef DIAGNOSTIC
2038 if (pmap != pmap_kernel())
2039 panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2040 #endif
2041
2042 /*
2043 * Pass one, see if there are both kernel and user pmaps for
2044 * this page. Calculate whether there are user-writable or
2045 * kernel-writable pages.
2046 */
2047 for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
2048 if (pv->pv_pmap != pmap) {
2049 user_entries++;
2050 if (pv->pv_flags & PT_Wr)
2051 user_writable++;
2052 if ((pv->pv_flags & PT_NC) == 0)
2053 user_cacheable++;
2054 } else {
2055 kernel_entries++;
2056 if (pv->pv_flags & PT_Wr)
2057 kernel_writable++;
2058 if ((pv->pv_flags & PT_NC) == 0)
2059 kernel_cacheable++;
2060 }
2061 }
2062
2063 /*
2064 * We know we have just been updating a kernel entry, so if
2065 * all user pages are already cacheable, then there is nothing
2066 * further to do.
2067 */
2068 if (kernel_entries == 0 &&
2069 user_cacheable == user_entries)
2070 return;
2071
2072 if (user_entries) {
2073 /*
2074 * Scan over the list again, for each entry, if it
2075 * might not be set correctly, call pmap_vac_me_user
2076 * to recalculate the settings.
2077 */
2078 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
2079 /*
2080 * We know kernel mappings will get set
2081 * correctly in other calls. We also know
2082 * that if the pmap is the same as last_pmap
2083 * then we've just handled this entry.
2084 */
2085 if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2086 continue;
2087 /*
2088 * If there are kernel entries and this page
2089 * is writable but non-cacheable, then we can
2090 * skip this entry also.
2091 */
2092 if (kernel_entries > 0 &&
2093 (pv->pv_flags & (PT_NC | PT_Wr)) ==
2094 (PT_NC | PT_Wr))
2095 continue;
2096 /*
2097 * Similarly if there are no kernel-writable
2098 * entries and the page is already
2099 * read-only/cacheable.
2100 */
2101 if (kernel_writable == 0 &&
2102 (pv->pv_flags & (PT_NC | PT_Wr)) == 0)
2103 continue;
2104 /*
2105 * For some of the remaining cases, we know
2106 * that we must recalculate, but for others we
2107 * can't tell if they are correct or not, so
2108 * we recalculate anyway.
2109 */
2110 pmap_unmap_ptes(last_pmap);
2111 last_pmap = pv->pv_pmap;
2112 ptes = pmap_map_ptes(last_pmap);
2113 pmap_vac_me_user(last_pmap, pg, ptes,
2114 pmap_is_curpmap(last_pmap));
2115 }
2116 /* Restore the pte mapping that was passed to us. */
2117 if (last_pmap != pmap) {
2118 pmap_unmap_ptes(last_pmap);
2119 ptes = pmap_map_ptes(pmap);
2120 }
2121 if (kernel_entries == 0)
2122 return;
2123 }
2124
2125 pmap_vac_me_user(pmap, pg, ptes, clear_cache);
2126 return;
2127 }
2128
2129 static void
2130 pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
2131 boolean_t clear_cache)
2132 {
2133 struct pmap *kpmap = pmap_kernel();
2134 struct pv_entry *pv, *npv;
2135 int entries = 0;
2136 int writable = 0;
2137 int cacheable_entries = 0;
2138 int kern_cacheable = 0;
2139 int other_writable = 0;
2140
2141 pv = pg->mdpage.pvh_list;
2142 KASSERT(ptes != NULL);
2143
2144 /*
2145 * Count mappings and writable mappings in this pmap.
2146 * Include kernel mappings as part of our own.
2147 * Keep a pointer to the first one.
2148 */
2149 for (npv = pv; npv; npv = npv->pv_next) {
2150 /* Count mappings in the same pmap */
2151 if (pmap == npv->pv_pmap ||
2152 kpmap == npv->pv_pmap) {
2153 if (entries++ == 0)
2154 pv = npv;
2155 /* Cacheable mappings */
2156 if ((npv->pv_flags & PT_NC) == 0) {
2157 cacheable_entries++;
2158 if (kpmap == npv->pv_pmap)
2159 kern_cacheable++;
2160 }
2161 /* Writable mappings */
2162 if (npv->pv_flags & PT_Wr)
2163 ++writable;
2164 } else if (npv->pv_flags & PT_Wr)
2165 other_writable = 1;
2166 }
2167
2168 PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
2169 "writable %d cacheable %d %s\n", pmap, entries, writable,
2170 cacheable_entries, clear_cache ? "clean" : "no clean"));
2171
2172 /*
2173 * Enable or disable caching as necessary.
2174 * Note: the first entry might be part of the kernel pmap,
2175 * so we can't assume this is indicative of the state of the
2176 * other (maybe non-kpmap) entries.
2177 */
2178 if ((entries > 1 && writable) ||
2179 (entries > 0 && pmap == kpmap && other_writable)) {
2180 if (cacheable_entries == 0)
2181 return;
2182 for (npv = pv; npv; npv = npv->pv_next) {
2183 if ((pmap == npv->pv_pmap
2184 || kpmap == npv->pv_pmap) &&
2185 (npv->pv_flags & PT_NC) == 0) {
2186 ptes[arm_btop(npv->pv_va)] &= ~(PT_C | PT_B);
2187 npv->pv_flags |= PT_NC;
2188 /*
2189 * If this page needs flushing from the
2190 * cache, and we aren't going to do it
2191 * below, do it now.
2192 */
2193 if ((cacheable_entries < 4 &&
2194 (clear_cache || npv->pv_pmap == kpmap)) ||
2195 (npv->pv_pmap == kpmap &&
2196 !clear_cache && kern_cacheable < 4)) {
2197 cpu_idcache_wbinv_range(npv->pv_va,
2198 NBPG);
2199 cpu_tlb_flushID_SE(npv->pv_va);
2200 }
2201 }
2202 }
2203 if ((clear_cache && cacheable_entries >= 4) ||
2204 kern_cacheable >= 4) {
2205 cpu_idcache_wbinv_all();
2206 cpu_tlb_flushID();
2207 }
2208 cpu_cpwait();
2209 } else if (entries > 0) {
2210 /*
2211 * Turn cacheing back on for some pages. If it is a kernel
2212 * page, only do so if there are no other writable pages.
2213 */
2214 for (npv = pv; npv; npv = npv->pv_next) {
2215 if ((pmap == npv->pv_pmap ||
2216 (kpmap == npv->pv_pmap && other_writable == 0)) &&
2217 (npv->pv_flags & PT_NC)) {
2218 ptes[arm_btop(npv->pv_va)] |= pte_cache_mode;
2219 npv->pv_flags &= ~PT_NC;
2220 }
2221 }
2222 }
2223 }
2224
2225 /*
2226 * pmap_remove()
2227 *
2228 * pmap_remove is responsible for nuking a number of mappings for a range
2229 * of virtual address space in the current pmap. To do this efficiently
2230 * is interesting, because in a number of cases a wide virtual address
2231 * range may be supplied that contains few actual mappings. So, the
2232 * optimisations are:
2233 * 1. Try and skip over hunks of address space for which an L1 entry
2234 * does not exist.
2235 * 2. Build up a list of pages we've hit, up to a maximum, so we can
2236 * maybe do just a partial cache clean. This path of execution is
2237 * complicated by the fact that the cache must be flushed _before_
2238 * the PTE is nuked, being a VAC :-)
2239 * 3. Maybe later fast-case a single page, but I don't think this is
2240 * going to make _that_ much difference overall.
2241 */
2242
2243 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
2244
2245 void
2246 pmap_remove(pmap, sva, eva)
2247 struct pmap *pmap;
2248 vaddr_t sva;
2249 vaddr_t eva;
2250 {
2251 int cleanlist_idx = 0;
2252 struct pagelist {
2253 vaddr_t va;
2254 pt_entry_t *pte;
2255 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
2256 pt_entry_t *pte = 0, *ptes;
2257 paddr_t pa;
2258 int pmap_active;
2259 struct vm_page *pg;
2260
2261 /* Exit quick if there is no pmap */
2262 if (!pmap)
2263 return;
2264
2265 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
2266
2267 sva &= PG_FRAME;
2268 eva &= PG_FRAME;
2269
2270 /*
2271 * we lock in the pmap => vm_page direction
2272 */
2273 PMAP_MAP_TO_HEAD_LOCK();
2274
2275 ptes = pmap_map_ptes(pmap);
2276 /* Get a page table pointer */
2277 while (sva < eva) {
2278 if (pmap_pde_page(pmap_pde(pmap, sva)))
2279 break;
2280 sva = (sva & PD_MASK) + NBPD;
2281 }
2282
2283 pte = &ptes[arm_btop(sva)];
2284 /* Note if the pmap is active thus require cache and tlb cleans */
2285 pmap_active = pmap_is_curpmap(pmap);
2286
2287 /* Now loop along */
2288 while (sva < eva) {
2289 /* Check if we can move to the next PDE (l1 chunk) */
2290 if (!(sva & PT_MASK))
2291 if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2292 sva += NBPD;
2293 pte += arm_btop(NBPD);
2294 continue;
2295 }
2296
2297 /* We've found a valid PTE, so this page of PTEs has to go. */
2298 if (pmap_pte_v(pte)) {
2299 /* Update statistics */
2300 --pmap->pm_stats.resident_count;
2301
2302 /*
2303 * Add this page to our cache remove list, if we can.
2304 * If, however the cache remove list is totally full,
2305 * then do a complete cache invalidation taking note
2306 * to backtrack the PTE table beforehand, and ignore
2307 * the lists in future because there's no longer any
2308 * point in bothering with them (we've paid the
2309 * penalty, so will carry on unhindered). Otherwise,
2310 * when we fall out, we just clean the list.
2311 */
2312 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2313 pa = pmap_pte_pa(pte);
2314
2315 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2316 /* Add to the clean list. */
2317 cleanlist[cleanlist_idx].pte = pte;
2318 cleanlist[cleanlist_idx].va = sva;
2319 cleanlist_idx++;
2320 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2321 int cnt;
2322
2323 /* Nuke everything if needed. */
2324 if (pmap_active) {
2325 cpu_idcache_wbinv_all();
2326 cpu_tlb_flushID();
2327 }
2328
2329 /*
2330 * Roll back the previous PTE list,
2331 * and zero out the current PTE.
2332 */
2333 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
2334 *cleanlist[cnt].pte = 0;
2335 pmap_pte_delref(pmap, cleanlist[cnt].va);
2336 }
2337 *pte = 0;
2338 pmap_pte_delref(pmap, sva);
2339 cleanlist_idx++;
2340 } else {
2341 /*
2342 * We've already nuked the cache and
2343 * TLB, so just carry on regardless,
2344 * and we won't need to do it again
2345 */
2346 *pte = 0;
2347 pmap_pte_delref(pmap, sva);
2348 }
2349
2350 /*
2351 * Update flags. In a number of circumstances,
2352 * we could cluster a lot of these and do a
2353 * number of sequential pages in one go.
2354 */
2355 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2356 struct pv_entry *pve;
2357 simple_lock(&pg->mdpage.pvh_slock);
2358 pve = pmap_remove_pv(pg, pmap, sva);
2359 pmap_free_pv(pmap, pve);
2360 pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2361 simple_unlock(&pg->mdpage.pvh_slock);
2362 }
2363 }
2364 sva += NBPG;
2365 pte++;
2366 }
2367
2368 pmap_unmap_ptes(pmap);
2369 /*
2370 * Now, if we've fallen through down to here, chances are that there
2371 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2372 */
2373 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2374 u_int cnt;
2375
2376 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2377 if (pmap_active) {
2378 cpu_idcache_wbinv_range(cleanlist[cnt].va,
2379 NBPG);
2380 *cleanlist[cnt].pte = 0;
2381 cpu_tlb_flushID_SE(cleanlist[cnt].va);
2382 } else
2383 *cleanlist[cnt].pte = 0;
2384 pmap_pte_delref(pmap, cleanlist[cnt].va);
2385 }
2386 }
2387 PMAP_MAP_TO_HEAD_UNLOCK();
2388 }
2389
2390 /*
2391 * Routine: pmap_remove_all
2392 * Function:
2393 * Removes this physical page from
2394 * all physical maps in which it resides.
2395 * Reflects back modify bits to the pager.
2396 */
2397
2398 static void
2399 pmap_remove_all(pg)
2400 struct vm_page *pg;
2401 {
2402 struct pv_entry *pv, *npv;
2403 struct pmap *pmap;
2404 pt_entry_t *pte, *ptes;
2405
2406 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
2407
2408 /* set vm_page => pmap locking */
2409 PMAP_HEAD_TO_MAP_LOCK();
2410
2411 simple_lock(&pg->mdpage.pvh_slock);
2412
2413 pv = pg->mdpage.pvh_list;
2414 if (pv == NULL) {
2415 PDEBUG(0, printf("free page\n"));
2416 simple_unlock(&pg->mdpage.pvh_slock);
2417 PMAP_HEAD_TO_MAP_UNLOCK();
2418 return;
2419 }
2420 pmap_clean_page(pv, FALSE);
2421
2422 while (pv) {
2423 pmap = pv->pv_pmap;
2424 ptes = pmap_map_ptes(pmap);
2425 pte = &ptes[arm_btop(pv->pv_va)];
2426
2427 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2428 pv->pv_va, pv->pv_flags));
2429 #ifdef DEBUG
2430 if (!pmap_pde_page(pmap_pde(pmap, pv->pv_va)) ||
2431 !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
2432 panic("pmap_remove_all: bad mapping");
2433 #endif /* DEBUG */
2434
2435 /*
2436 * Update statistics
2437 */
2438 --pmap->pm_stats.resident_count;
2439
2440 /* Wired bit */
2441 if (pv->pv_flags & PT_W)
2442 --pmap->pm_stats.wired_count;
2443
2444 /*
2445 * Invalidate the PTEs.
2446 * XXX: should cluster them up and invalidate as many
2447 * as possible at once.
2448 */
2449
2450 #ifdef needednotdone
2451 reduce wiring count on page table pages as references drop
2452 #endif
2453
2454 *pte = 0;
2455 pmap_pte_delref(pmap, pv->pv_va);
2456
2457 npv = pv->pv_next;
2458 pmap_free_pv(pmap, pv);
2459 pv = npv;
2460 pmap_unmap_ptes(pmap);
2461 }
2462 pg->mdpage.pvh_list = NULL;
2463 simple_unlock(&pg->mdpage.pvh_slock);
2464 PMAP_HEAD_TO_MAP_UNLOCK();
2465
2466 PDEBUG(0, printf("done\n"));
2467 cpu_tlb_flushID();
2468 cpu_cpwait();
2469 }
2470
2471
2472 /*
2473 * Set the physical protection on the specified range of this map as requested.
2474 */
2475
2476 void
2477 pmap_protect(pmap, sva, eva, prot)
2478 struct pmap *pmap;
2479 vaddr_t sva;
2480 vaddr_t eva;
2481 vm_prot_t prot;
2482 {
2483 pt_entry_t *pte = NULL, *ptes;
2484 struct vm_page *pg;
2485 int armprot;
2486 int flush = 0;
2487 paddr_t pa;
2488
2489 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2490 pmap, sva, eva, prot));
2491
2492 if (~prot & VM_PROT_READ) {
2493 /* Just remove the mappings. */
2494 pmap_remove(pmap, sva, eva);
2495 /* pmap_update not needed as it should be called by the caller
2496 * of pmap_protect */
2497 return;
2498 }
2499 if (prot & VM_PROT_WRITE) {
2500 /*
2501 * If this is a read->write transition, just ignore it and let
2502 * uvm_fault() take care of it later.
2503 */
2504 return;
2505 }
2506
2507 sva &= PG_FRAME;
2508 eva &= PG_FRAME;
2509
2510 /* Need to lock map->head */
2511 PMAP_MAP_TO_HEAD_LOCK();
2512
2513 ptes = pmap_map_ptes(pmap);
2514 /*
2515 * We need to acquire a pointer to a page table page before entering
2516 * the following loop.
2517 */
2518 while (sva < eva) {
2519 if (pmap_pde_page(pmap_pde(pmap, sva)))
2520 break;
2521 sva = (sva & PD_MASK) + NBPD;
2522 }
2523
2524 pte = &ptes[arm_btop(sva)];
2525
2526 while (sva < eva) {
2527 /* only check once in a while */
2528 if ((sva & PT_MASK) == 0) {
2529 if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2530 /* We can race ahead here, to the next pde. */
2531 sva += NBPD;
2532 pte += arm_btop(NBPD);
2533 continue;
2534 }
2535 }
2536
2537 if (!pmap_pte_v(pte))
2538 goto next;
2539
2540 flush = 1;
2541
2542 armprot = 0;
2543 if (sva < VM_MAXUSER_ADDRESS)
2544 armprot |= PT_AP(AP_U);
2545 else if (sva < VM_MAX_ADDRESS)
2546 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
2547 *pte = (*pte & 0xfffff00f) | armprot;
2548
2549 pa = pmap_pte_pa(pte);
2550
2551 /* Get the physical page index */
2552
2553 /* Clear write flag */
2554 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2555 simple_lock(&pg->mdpage.pvh_slock);
2556 (void) pmap_modify_pv(pmap, sva, pg, PT_Wr, 0);
2557 pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2558 simple_unlock(&pg->mdpage.pvh_slock);
2559 }
2560
2561 next:
2562 sva += NBPG;
2563 pte++;
2564 }
2565 pmap_unmap_ptes(pmap);
2566 PMAP_MAP_TO_HEAD_UNLOCK();
2567 if (flush)
2568 cpu_tlb_flushID();
2569 }
2570
2571 /*
2572 * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2573 * int flags)
2574 *
2575 * Insert the given physical page (p) at
2576 * the specified virtual address (v) in the
2577 * target physical map with the protection requested.
2578 *
2579 * If specified, the page will be wired down, meaning
2580 * that the related pte can not be reclaimed.
2581 *
2582 * NB: This is the only routine which MAY NOT lazy-evaluate
2583 * or lose information. That is, this routine must actually
2584 * insert this page into the given map NOW.
2585 */
2586
2587 int
2588 pmap_enter(pmap, va, pa, prot, flags)
2589 struct pmap *pmap;
2590 vaddr_t va;
2591 paddr_t pa;
2592 vm_prot_t prot;
2593 int flags;
2594 {
2595 pt_entry_t *pte, *ptes;
2596 u_int npte;
2597 paddr_t opa;
2598 int nflags;
2599 boolean_t wired = (flags & PMAP_WIRED) != 0;
2600 struct vm_page *pg;
2601 struct pv_entry *pve;
2602 int error;
2603
2604 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2605 va, pa, pmap, prot, wired));
2606
2607 #ifdef DIAGNOSTIC
2608 /* Valid address ? */
2609 if (va >= (pmap_curmaxkvaddr))
2610 panic("pmap_enter: too big");
2611 if (pmap != pmap_kernel() && va != 0) {
2612 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2613 panic("pmap_enter: kernel page in user map");
2614 } else {
2615 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2616 panic("pmap_enter: user page in kernel map");
2617 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2618 panic("pmap_enter: entering PT page");
2619 }
2620 #endif
2621 /*
2622 * Get a pointer to the page. Later on in this function, we
2623 * test for a managed page by checking pg != NULL.
2624 */
2625 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2626
2627 /* get lock */
2628 PMAP_MAP_TO_HEAD_LOCK();
2629 /*
2630 * Get a pointer to the pte for this virtual address. If the
2631 * pte pointer is NULL then we are missing the L2 page table
2632 * so we need to create one.
2633 */
2634 /* XXX horrible hack to get us working with lockdebug */
2635 simple_lock(&pmap->pm_obj.vmobjlock);
2636 pte = pmap_pte(pmap, va);
2637 if (!pte) {
2638 struct vm_page *ptp;
2639
2640 /* kernel should be pre-grown */
2641 KASSERT(pmap != pmap_kernel());
2642
2643 /* if failure is allowed then don't try too hard */
2644 ptp = pmap_get_ptp(pmap, va);
2645 if (ptp == NULL) {
2646 if (flags & PMAP_CANFAIL) {
2647 error = ENOMEM;
2648 goto out;
2649 }
2650 panic("pmap_enter: get ptp failed");
2651 }
2652
2653 pte = pmap_pte(pmap, va);
2654 #ifdef DIAGNOSTIC
2655 if (!pte)
2656 panic("pmap_enter: no pte");
2657 #endif
2658 }
2659
2660 nflags = 0;
2661 if (prot & VM_PROT_WRITE)
2662 nflags |= PT_Wr;
2663 if (wired)
2664 nflags |= PT_W;
2665
2666 /* More debugging info */
2667 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2668 *pte));
2669
2670 /* Is the pte valid ? If so then this page is already mapped */
2671 if (pmap_pte_v(pte)) {
2672 /* Get the physical address of the current page mapped */
2673 opa = pmap_pte_pa(pte);
2674
2675 #ifdef MYCROFT_HACK
2676 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2677 #endif
2678
2679 /* Are we mapping the same page ? */
2680 if (opa == pa) {
2681 /* All we must be doing is changing the protection */
2682 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2683 va, pa));
2684
2685 /* Has the wiring changed ? */
2686 if (pg != NULL) {
2687 simple_lock(&pg->mdpage.pvh_slock);
2688 (void) pmap_modify_pv(pmap, va, pg,
2689 PT_Wr | PT_W, nflags);
2690 simple_unlock(&pg->mdpage.pvh_slock);
2691 }
2692 } else {
2693 struct vm_page *opg;
2694
2695 /* We are replacing the page with a new one. */
2696 cpu_idcache_wbinv_range(va, NBPG);
2697
2698 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2699 va, pa, opa));
2700
2701 /*
2702 * If it is part of our managed memory then we
2703 * must remove it from the PV list
2704 */
2705 if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
2706 simple_lock(&opg->mdpage.pvh_slock);
2707 pve = pmap_remove_pv(opg, pmap, va);
2708 simple_unlock(&opg->mdpage.pvh_slock);
2709 } else {
2710 pve = NULL;
2711 }
2712
2713 goto enter;
2714 }
2715 } else {
2716 opa = 0;
2717 pve = NULL;
2718 pmap_pte_addref(pmap, va);
2719
2720 /* pte is not valid so we must be hooking in a new page */
2721 ++pmap->pm_stats.resident_count;
2722
2723 enter:
2724 /*
2725 * Enter on the PV list if part of our managed memory
2726 */
2727 if (pg != NULL) {
2728 if (pve == NULL) {
2729 pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2730 if (pve == NULL) {
2731 if (flags & PMAP_CANFAIL) {
2732 error = ENOMEM;
2733 goto out;
2734 }
2735 panic("pmap_enter: no pv entries available");
2736 }
2737 }
2738 /* enter_pv locks pvh when adding */
2739 pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
2740 } else {
2741 pg = NULL;
2742 if (pve != NULL)
2743 pmap_free_pv(pmap, pve);
2744 }
2745 }
2746
2747 #ifdef MYCROFT_HACK
2748 if (mycroft_hack)
2749 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2750 #endif
2751
2752 /* Construct the pte, giving the correct access. */
2753 npte = (pa & PG_FRAME);
2754
2755 /* VA 0 is magic. */
2756 if (pmap != pmap_kernel() && va != 0)
2757 npte |= PT_AP(AP_U);
2758
2759 if (pg != NULL) {
2760 #ifdef DIAGNOSTIC
2761 if ((flags & VM_PROT_ALL) & ~prot)
2762 panic("pmap_enter: access_type exceeds prot");
2763 #endif
2764 npte |= pte_cache_mode;
2765 if (flags & VM_PROT_WRITE) {
2766 npte |= L2_SPAGE | PT_AP(AP_W);
2767 pg->mdpage.pvh_attrs |= PT_H | PT_M;
2768 } else if (flags & VM_PROT_ALL) {
2769 npte |= L2_SPAGE;
2770 pg->mdpage.pvh_attrs |= PT_H;
2771 } else
2772 npte |= L2_INVAL;
2773 } else {
2774 if (prot & VM_PROT_WRITE)
2775 npte |= L2_SPAGE | PT_AP(AP_W);
2776 else if (prot & VM_PROT_ALL)
2777 npte |= L2_SPAGE;
2778 else
2779 npte |= L2_INVAL;
2780 }
2781
2782 #ifdef MYCROFT_HACK
2783 if (mycroft_hack)
2784 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2785 #endif
2786
2787 *pte = npte;
2788
2789 if (pg != NULL) {
2790 /* XXX this will change once the whole of pmap_enter uses
2791 * map_ptes
2792 */
2793 ptes = pmap_map_ptes(pmap);
2794 simple_lock(&pg->mdpage.pvh_slock);
2795 pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
2796 simple_unlock(&pg->mdpage.pvh_slock);
2797 pmap_unmap_ptes(pmap);
2798 }
2799
2800 /* Better flush the TLB ... */
2801 cpu_tlb_flushID_SE(va);
2802 error = 0;
2803 out:
2804 simple_unlock(&pmap->pm_obj.vmobjlock);
2805 PMAP_MAP_TO_HEAD_UNLOCK();
2806 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2807
2808 return error;
2809 }
2810
2811 /*
2812 * pmap_kenter_pa: enter a kernel mapping
2813 *
2814 * => no need to lock anything assume va is already allocated
2815 * => should be faster than normal pmap enter function
2816 */
2817 void
2818 pmap_kenter_pa(va, pa, prot)
2819 vaddr_t va;
2820 paddr_t pa;
2821 vm_prot_t prot;
2822 {
2823 pt_entry_t *pte;
2824
2825 pte = vtopte(va);
2826 KASSERT(!pmap_pte_v(pte));
2827 *pte = L2_PTE(pa, AP_KRW);
2828 }
2829
2830 void
2831 pmap_kremove(va, len)
2832 vaddr_t va;
2833 vsize_t len;
2834 {
2835 pt_entry_t *pte;
2836
2837 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2838
2839 /*
2840 * We assume that we will only be called with small
2841 * regions of memory.
2842 */
2843
2844 KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
2845 pte = vtopte(va);
2846 cpu_idcache_wbinv_range(va, PAGE_SIZE);
2847 *pte = 0;
2848 cpu_tlb_flushID_SE(va);
2849 }
2850 }
2851
2852 /*
2853 * pmap_page_protect:
2854 *
2855 * Lower the permission for all mappings to a given page.
2856 */
2857
2858 void
2859 pmap_page_protect(pg, prot)
2860 struct vm_page *pg;
2861 vm_prot_t prot;
2862 {
2863
2864 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
2865 VM_PAGE_TO_PHYS(pg), prot));
2866
2867 switch(prot) {
2868 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
2869 case VM_PROT_READ|VM_PROT_WRITE:
2870 return;
2871
2872 case VM_PROT_READ:
2873 case VM_PROT_READ|VM_PROT_EXECUTE:
2874 pmap_copy_on_write(pg);
2875 break;
2876
2877 default:
2878 pmap_remove_all(pg);
2879 break;
2880 }
2881 }
2882
2883
2884 /*
2885 * Routine: pmap_unwire
2886 * Function: Clear the wired attribute for a map/virtual-address
2887 * pair.
2888 * In/out conditions:
2889 * The mapping must already exist in the pmap.
2890 */
2891
2892 void
2893 pmap_unwire(pmap, va)
2894 struct pmap *pmap;
2895 vaddr_t va;
2896 {
2897 pt_entry_t *ptes;
2898 struct vm_page *pg;
2899 paddr_t pa;
2900
2901 PMAP_MAP_TO_HEAD_LOCK();
2902 ptes = pmap_map_ptes(pmap); /* locks pmap */
2903
2904 if (pmap_pde_v(pmap_pde(pmap, va))) {
2905 #ifdef DIAGNOSTIC
2906 if (l2pte_valid(ptes[arm_btop(va)]) == 0)
2907 panic("pmap_unwire: invalid L2 PTE");
2908 #endif
2909 /* Extract the physical address of the page */
2910 pa = l2pte_pa(ptes[arm_btop(va)]);
2911
2912 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
2913 goto out;
2914
2915 /* Update the wired bit in the pv entry for this page. */
2916 simple_lock(&pg->mdpage.pvh_slock);
2917 (void) pmap_modify_pv(pmap, va, pg, PT_W, 0);
2918 simple_unlock(&pg->mdpage.pvh_slock);
2919 }
2920 #ifdef DIAGNOSTIC
2921 else {
2922 panic("pmap_unwire: invalid L1 PTE");
2923 }
2924 #endif
2925 out:
2926 pmap_unmap_ptes(pmap); /* unlocks pmap */
2927 PMAP_MAP_TO_HEAD_UNLOCK();
2928 }
2929
2930 /*
2931 * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
2932 *
2933 * Return the pointer to a page table entry corresponding to the supplied
2934 * virtual address.
2935 *
2936 * The page directory is first checked to make sure that a page table
2937 * for the address in question exists and if it does a pointer to the
2938 * entry is returned.
2939 *
2940 * The way this works is that that the kernel page tables are mapped
2941 * into the memory map at APTE_BASE to APTE_BASE+4MB. This allows
2942 * page tables to be located quickly.
2943 */
2944 pt_entry_t *
2945 pmap_pte(pmap, va)
2946 struct pmap *pmap;
2947 vaddr_t va;
2948 {
2949 pt_entry_t *ptp;
2950 pt_entry_t *result;
2951
2952 /* The pmap must be valid */
2953 if (!pmap)
2954 return(NULL);
2955
2956 /* Return the address of the pte */
2957 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2958 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2959
2960 /* Do we have a valid pde ? If not we don't have a page table */
2961 if (!pmap_pde_page(pmap_pde(pmap, va))) {
2962 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2963 pmap_pde(pmap, va)));
2964 return(NULL);
2965 }
2966
2967 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2968 pmap->pm_pptpt, (*((pt_entry_t *)(PTE_BASE
2969 + (PTE_BASE >> (PGSHIFT - 2)) +
2970 (PTE_BASE >> PDSHIFT))) & PG_FRAME)));
2971
2972 /*
2973 * If the pmap is the kernel pmap or the pmap is the active one
2974 * then we can just return a pointer to entry relative to
2975 * PTE_BASE.
2976 * Otherwise we need to map the page tables to an alternative
2977 * address and reference them there.
2978 */
2979 if (pmap == pmap_kernel() || pmap->pm_pptpt
2980 == (*((pt_entry_t *)(PTE_BASE
2981 + ((PTE_BASE >> (PGSHIFT - 2)) &
2982 ~3) + (PTE_BASE >> PDSHIFT))) & PG_FRAME)) {
2983 ptp = (pt_entry_t *)PTE_BASE;
2984 } else {
2985 struct proc *p = curproc;
2986
2987 /* If we don't have a valid curproc use proc0 */
2988 /* Perhaps we should just use kernel_pmap instead */
2989 if (p == NULL)
2990 p = &proc0;
2991 #ifdef DIAGNOSTIC
2992 /*
2993 * The pmap should always be valid for the process so
2994 * panic if it is not.
2995 */
2996 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2997 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2998 va, p, p->p_vmspace);
2999 console_debugger();
3000 }
3001 /*
3002 * The pmap for the current process should be mapped. If it
3003 * is not then we have a problem.
3004 */
3005 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
3006 (*((pt_entry_t *)(PTE_BASE
3007 + (PTE_BASE >> (PGSHIFT - 2)) +
3008 (PTE_BASE >> PDSHIFT))) & PG_FRAME)) {
3009 printf("pmap pagetable = P%08lx current = P%08x ",
3010 pmap->pm_pptpt, (*((pt_entry_t *)(PTE_BASE
3011 + (PTE_BASE >> (PGSHIFT - 2)) +
3012 (PTE_BASE >> PDSHIFT))) &
3013 PG_FRAME));
3014 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
3015 panic("pmap_pte: current and pmap mismatch\n");
3016 }
3017 #endif
3018
3019 ptp = (pt_entry_t *)APTE_BASE;
3020 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE,
3021 pmap->pm_pptpt, FALSE);
3022 cpu_tlb_flushD();
3023 cpu_cpwait();
3024 }
3025 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
3026 ((va >> (PGSHIFT-2)) & ~3)));
3027 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
3028 return(result);
3029 }
3030
3031 /*
3032 * Routine: pmap_extract
3033 * Function:
3034 * Extract the physical page address associated
3035 * with the given map/virtual_address pair.
3036 */
3037 boolean_t
3038 pmap_extract(pmap, va, pap)
3039 struct pmap *pmap;
3040 vaddr_t va;
3041 paddr_t *pap;
3042 {
3043 pd_entry_t *pde;
3044 pt_entry_t *pte, *ptes;
3045 paddr_t pa;
3046 boolean_t rv = TRUE;
3047
3048 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
3049
3050 /*
3051 * Get the pte for this virtual address.
3052 */
3053 pde = pmap_pde(pmap, va);
3054 ptes = pmap_map_ptes(pmap);
3055 pte = &ptes[arm_btop(va)];
3056
3057 if (pmap_pde_section(pde)) {
3058 pa = (*pde & PD_MASK) | (va & (L1_SEC_SIZE - 1));
3059 goto out;
3060 } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
3061 rv = FALSE;
3062 goto out;
3063 }
3064
3065 if ((*pte & L2_MASK) == L2_LPAGE) {
3066 /* Extract the physical address from the pte */
3067 pa = *pte & ~(L2_LPAGE_SIZE - 1);
3068
3069 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
3070 (pa | (va & (L2_LPAGE_SIZE - 1)))));
3071
3072 if (pap != NULL)
3073 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
3074 goto out;
3075 }
3076
3077 /* Extract the physical address from the pte */
3078 pa = pmap_pte_pa(pte);
3079
3080 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
3081 (pa | (va & ~PG_FRAME))));
3082
3083 if (pap != NULL)
3084 *pap = pa | (va & ~PG_FRAME);
3085 out:
3086 pmap_unmap_ptes(pmap);
3087 return (rv);
3088 }
3089
3090
3091 /*
3092 * Copy the range specified by src_addr/len from the source map to the
3093 * range dst_addr/len in the destination map.
3094 *
3095 * This routine is only advisory and need not do anything.
3096 */
3097
3098 void
3099 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
3100 struct pmap *dst_pmap;
3101 struct pmap *src_pmap;
3102 vaddr_t dst_addr;
3103 vsize_t len;
3104 vaddr_t src_addr;
3105 {
3106 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
3107 dst_pmap, src_pmap, dst_addr, len, src_addr));
3108 }
3109
3110 #if defined(PMAP_DEBUG)
3111 void
3112 pmap_dump_pvlist(phys, m)
3113 vaddr_t phys;
3114 char *m;
3115 {
3116 struct vm_page *pg;
3117 struct pv_entry *pv;
3118
3119 if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
3120 printf("INVALID PA\n");
3121 return;
3122 }
3123 simple_lock(&pg->mdpage.pvh_slock);
3124 printf("%s %08lx:", m, phys);
3125 if (pg->mdpage.pvh_list == NULL) {
3126 printf(" no mappings\n");
3127 return;
3128 }
3129
3130 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
3131 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3132 pv->pv_va, pv->pv_flags);
3133
3134 printf("\n");
3135 simple_unlock(&pg->mdpage.pvh_slock);
3136 }
3137
3138 #endif /* PMAP_DEBUG */
3139
3140 static pt_entry_t *
3141 pmap_map_ptes(struct pmap *pmap)
3142 {
3143 struct proc *p;
3144
3145 /* the kernel's pmap is always accessible */
3146 if (pmap == pmap_kernel()) {
3147 return (pt_entry_t *)PTE_BASE ;
3148 }
3149
3150 if (pmap_is_curpmap(pmap)) {
3151 simple_lock(&pmap->pm_obj.vmobjlock);
3152 return (pt_entry_t *)PTE_BASE;
3153 }
3154
3155 p = curproc;
3156
3157 if (p == NULL)
3158 p = &proc0;
3159
3160 /* need to lock both curpmap and pmap: use ordered locking */
3161 if ((unsigned) pmap < (unsigned) curproc->p_vmspace->vm_map.pmap) {
3162 simple_lock(&pmap->pm_obj.vmobjlock);
3163 simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3164 } else {
3165 simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3166 simple_lock(&pmap->pm_obj.vmobjlock);
3167 }
3168
3169 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE,
3170 pmap->pm_pptpt, FALSE);
3171 cpu_tlb_flushD();
3172 cpu_cpwait();
3173 return (pt_entry_t *)APTE_BASE;
3174 }
3175
3176 /*
3177 * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3178 */
3179
3180 static void
3181 pmap_unmap_ptes(pmap)
3182 struct pmap *pmap;
3183 {
3184 if (pmap == pmap_kernel()) {
3185 return;
3186 }
3187 if (pmap_is_curpmap(pmap)) {
3188 simple_unlock(&pmap->pm_obj.vmobjlock);
3189 } else {
3190 simple_unlock(&pmap->pm_obj.vmobjlock);
3191 simple_unlock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3192 }
3193 }
3194
3195 /*
3196 * Modify pte bits for all ptes corresponding to the given physical address.
3197 * We use `maskbits' rather than `clearbits' because we're always passing
3198 * constants and the latter would require an extra inversion at run-time.
3199 */
3200
3201 static void
3202 pmap_clearbit(pg, maskbits)
3203 struct vm_page *pg;
3204 unsigned int maskbits;
3205 {
3206 struct pv_entry *pv;
3207 pt_entry_t *ptes;
3208 vaddr_t va;
3209 int tlbentry;
3210
3211 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
3212 VM_PAGE_TO_PHYS(pg), maskbits));
3213
3214 tlbentry = 0;
3215
3216 PMAP_HEAD_TO_MAP_LOCK();
3217 simple_lock(&pg->mdpage.pvh_slock);
3218
3219 /*
3220 * Clear saved attributes (modify, reference)
3221 */
3222 pg->mdpage.pvh_attrs &= ~maskbits;
3223
3224 if (pg->mdpage.pvh_list == NULL) {
3225 simple_unlock(&pg->mdpage.pvh_slock);
3226 PMAP_HEAD_TO_MAP_UNLOCK();
3227 return;
3228 }
3229
3230 /*
3231 * Loop over all current mappings setting/clearing as appropos
3232 */
3233 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
3234 va = pv->pv_va;
3235 pv->pv_flags &= ~maskbits;
3236 ptes = pmap_map_ptes(pv->pv_pmap); /* locks pmap */
3237 KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
3238 if (maskbits & (PT_Wr|PT_M)) {
3239 if ((pv->pv_flags & PT_NC)) {
3240 /*
3241 * Entry is not cacheable: reenable
3242 * the cache, nothing to flush
3243 *
3244 * Don't turn caching on again if this
3245 * is a modified emulation. This
3246 * would be inconsitent with the
3247 * settings created by
3248 * pmap_vac_me_harder().
3249 *
3250 * There's no need to call
3251 * pmap_vac_me_harder() here: all
3252 * pages are loosing their write
3253 * permission.
3254 *
3255 */
3256 if (maskbits & PT_Wr) {
3257 ptes[arm_btop(va)] |= pte_cache_mode;
3258 pv->pv_flags &= ~PT_NC;
3259 }
3260 } else if (pmap_is_curpmap(pv->pv_pmap)) {
3261 /*
3262 * Entry is cacheable: check if pmap is
3263 * current if it is flush it,
3264 * otherwise it won't be in the cache
3265 */
3266 cpu_idcache_wbinv_range(pv->pv_va, NBPG);
3267 }
3268
3269 /* make the pte read only */
3270 ptes[arm_btop(va)] &= ~PT_AP(AP_W);
3271 }
3272
3273 if (maskbits & PT_H)
3274 ptes[arm_btop(va)] =
3275 (ptes[arm_btop(va)] & ~L2_MASK) | L2_INVAL;
3276
3277 if (pmap_is_curpmap(pv->pv_pmap)) {
3278 /*
3279 * if we had cacheable pte's we'd clean the
3280 * pte out to memory here
3281 *
3282 * flush tlb entry as it's in the current pmap
3283 */
3284 cpu_tlb_flushID_SE(pv->pv_va);
3285 }
3286 pmap_unmap_ptes(pv->pv_pmap); /* unlocks pmap */
3287 }
3288 cpu_cpwait();
3289
3290 simple_unlock(&pg->mdpage.pvh_slock);
3291 PMAP_HEAD_TO_MAP_UNLOCK();
3292 }
3293
3294 /*
3295 * pmap_clear_modify:
3296 *
3297 * Clear the "modified" attribute for a page.
3298 */
3299 boolean_t
3300 pmap_clear_modify(pg)
3301 struct vm_page *pg;
3302 {
3303 boolean_t rv;
3304
3305 if (pg->mdpage.pvh_attrs & PT_M) {
3306 rv = TRUE;
3307 pmap_clearbit(pg, PT_M);
3308 } else
3309 rv = FALSE;
3310
3311 PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
3312 VM_PAGE_TO_PHYS(pg), rv));
3313
3314 return (rv);
3315 }
3316
3317 /*
3318 * pmap_clear_reference:
3319 *
3320 * Clear the "referenced" attribute for a page.
3321 */
3322 boolean_t
3323 pmap_clear_reference(pg)
3324 struct vm_page *pg;
3325 {
3326 boolean_t rv;
3327
3328 if (pg->mdpage.pvh_attrs & PT_H) {
3329 rv = TRUE;
3330 pmap_clearbit(pg, PT_H);
3331 } else
3332 rv = FALSE;
3333
3334 PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
3335 VM_PAGE_TO_PHYS(pg), rv));
3336
3337 return (rv);
3338 }
3339
3340
3341 void
3342 pmap_copy_on_write(pg)
3343 struct vm_page *pg;
3344 {
3345 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", VM_PAGE_TO_PHYS(pg)));
3346 pmap_clearbit(pg, PT_Wr);
3347 }
3348
3349 /*
3350 * pmap_is_modified:
3351 *
3352 * Test if a page has the "modified" attribute.
3353 */
3354 /* See <arm/arm32/pmap.h> */
3355
3356 /*
3357 * pmap_is_referenced:
3358 *
3359 * Test if a page has the "referenced" attribute.
3360 */
3361 /* See <arm/arm32/pmap.h> */
3362
3363 int
3364 pmap_modified_emulation(pmap, va)
3365 struct pmap *pmap;
3366 vaddr_t va;
3367 {
3368 pt_entry_t *ptes;
3369 struct vm_page *pg;
3370 paddr_t pa;
3371 u_int flags;
3372 int rv = 0;
3373
3374 PDEBUG(2, printf("pmap_modified_emulation\n"));
3375
3376 PMAP_MAP_TO_HEAD_LOCK();
3377 ptes = pmap_map_ptes(pmap); /* locks pmap */
3378
3379 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3380 PDEBUG(2, printf("L1 PTE invalid\n"));
3381 goto out;
3382 }
3383
3384 PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3385
3386 /* Check for a invalid pte */
3387 if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3388 goto out;
3389
3390 /* This can happen if user code tries to access kernel memory. */
3391 if ((ptes[arm_btop(va)] & PT_AP(AP_W)) != 0)
3392 goto out;
3393
3394 /* Extract the physical address of the page */
3395 pa = l2pte_pa(ptes[arm_btop(va)]);
3396 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3397 goto out;
3398
3399 /* Get the current flags for this page. */
3400 simple_lock(&pg->mdpage.pvh_slock);
3401
3402 flags = pmap_modify_pv(pmap, va, pg, 0, 0);
3403 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3404
3405 /*
3406 * Do the flags say this page is writable ? If not then it is a
3407 * genuine write fault. If yes then the write fault is our fault
3408 * as we did not reflect the write access in the PTE. Now we know
3409 * a write has occurred we can correct this and also set the
3410 * modified bit
3411 */
3412 if (~flags & PT_Wr) {
3413 simple_unlock(&pg->mdpage.pvh_slock);
3414 goto out;
3415 }
3416
3417 PDEBUG(0,
3418 printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
3419 va, ptes[arm_btop(va)]));
3420 pg->mdpage.pvh_attrs |= PT_H | PT_M;
3421
3422 /*
3423 * Re-enable write permissions for the page. No need to call
3424 * pmap_vac_me_harder(), since this is just a
3425 * modified-emulation fault, and the PT_Wr bit isn't changing. We've
3426 * already set the cacheable bits based on the assumption that we
3427 * can write to this page.
3428 */
3429 ptes[arm_btop(va)] =
3430 (ptes[arm_btop(va)] & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
3431 PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3432
3433 simple_unlock(&pg->mdpage.pvh_slock);
3434
3435 cpu_tlb_flushID_SE(va);
3436 cpu_cpwait();
3437 rv = 1;
3438 out:
3439 pmap_unmap_ptes(pmap); /* unlocks pmap */
3440 PMAP_MAP_TO_HEAD_UNLOCK();
3441 return (rv);
3442 }
3443
3444 int
3445 pmap_handled_emulation(pmap, va)
3446 struct pmap *pmap;
3447 vaddr_t va;
3448 {
3449 pt_entry_t *ptes;
3450 struct vm_page *pg;
3451 paddr_t pa;
3452 int rv = 0;
3453
3454 PDEBUG(2, printf("pmap_handled_emulation\n"));
3455
3456 ptes = pmap_map_ptes(pmap); /* locks pmap */
3457
3458 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3459 PDEBUG(2, printf("L1 PTE invalid\n"));
3460 goto out;
3461 }
3462
3463 PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3464
3465 /* Check for invalid pte */
3466 if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3467 goto out;
3468
3469 /* This can happen if user code tries to access kernel memory. */
3470 if ((ptes[arm_btop(va)] & L2_MASK) != L2_INVAL)
3471 goto out;
3472
3473 /* Extract the physical address of the page */
3474 pa = l2pte_pa(ptes[arm_btop(va)]);
3475 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3476 goto out;
3477
3478 /*
3479 * Ok we just enable the pte and mark the attibs as handled
3480 */
3481 PDEBUG(0,
3482 printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
3483 va, ptes[arm_btop(va)]));
3484 pg->mdpage.pvh_attrs |= PT_H;
3485
3486 ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_MASK) | L2_SPAGE;
3487 PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3488
3489 cpu_tlb_flushID_SE(va);
3490 cpu_cpwait();
3491 rv = 1;
3492 out:
3493 pmap_unmap_ptes(pmap); /* unlocks pmap */
3494 return (rv);
3495 }
3496
3497 /*
3498 * pmap_collect: free resources held by a pmap
3499 *
3500 * => optional function.
3501 * => called when a process is swapped out to free memory.
3502 */
3503
3504 void
3505 pmap_collect(pmap)
3506 struct pmap *pmap;
3507 {
3508 }
3509
3510 /*
3511 * Routine: pmap_procwr
3512 *
3513 * Function:
3514 * Synchronize caches corresponding to [addr, addr+len) in p.
3515 *
3516 */
3517 void
3518 pmap_procwr(p, va, len)
3519 struct proc *p;
3520 vaddr_t va;
3521 int len;
3522 {
3523 /* We only need to do anything if it is the current process. */
3524 if (p == curproc)
3525 cpu_icache_sync_range(va, len);
3526 }
3527 /*
3528 * PTP functions
3529 */
3530
3531 /*
3532 * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3533 *
3534 * => pmap should NOT be pmap_kernel()
3535 * => pmap should be locked
3536 */
3537
3538 static struct vm_page *
3539 pmap_get_ptp(struct pmap *pmap, vaddr_t va)
3540 {
3541 struct vm_page *ptp;
3542
3543 if (pmap_pde_page(pmap_pde(pmap, va))) {
3544
3545 /* valid... check hint (saves us a PA->PG lookup) */
3546 #if 0
3547 if (pmap->pm_ptphint &&
3548 ((unsigned)pmap_pde(pmap, va) & PG_FRAME) ==
3549 VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3550 return (pmap->pm_ptphint);
3551 #endif
3552 ptp = uvm_pagelookup(&pmap->pm_obj, va);
3553 #ifdef DIAGNOSTIC
3554 if (ptp == NULL)
3555 panic("pmap_get_ptp: unmanaged user PTP");
3556 #endif
3557 // pmap->pm_ptphint = ptp;
3558 return(ptp);
3559 }
3560
3561 /* allocate a new PTP (updates ptphint) */
3562 return(pmap_alloc_ptp(pmap, va));
3563 }
3564
3565 /*
3566 * pmap_alloc_ptp: allocate a PTP for a PMAP
3567 *
3568 * => pmap should already be locked by caller
3569 * => we use the ptp's wire_count to count the number of active mappings
3570 * in the PTP (we start it at one to prevent any chance this PTP
3571 * will ever leak onto the active/inactive queues)
3572 */
3573
3574 /*__inline */ static struct vm_page *
3575 pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
3576 {
3577 struct vm_page *ptp;
3578
3579 ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3580 UVM_PGA_USERESERVE|UVM_PGA_ZERO);
3581 if (ptp == NULL)
3582 return (NULL);
3583
3584 /* got one! */
3585 ptp->flags &= ~PG_BUSY; /* never busy */
3586 ptp->wire_count = 1; /* no mappings yet */
3587 pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
3588 pmap->pm_stats.resident_count++; /* count PTP as resident */
3589 // pmap->pm_ptphint = ptp;
3590 return (ptp);
3591 }
3592
3593 vaddr_t
3594 pmap_growkernel(maxkvaddr)
3595 vaddr_t maxkvaddr;
3596 {
3597 struct pmap *kpm = pmap_kernel(), *pm;
3598 int s;
3599 paddr_t ptaddr;
3600 struct vm_page *ptp;
3601
3602 if (maxkvaddr <= pmap_curmaxkvaddr)
3603 goto out; /* we are OK */
3604 NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
3605 pmap_curmaxkvaddr, maxkvaddr));
3606
3607 /*
3608 * whoops! we need to add kernel PTPs
3609 */
3610
3611 s = splhigh(); /* to be safe */
3612 simple_lock(&kpm->pm_obj.vmobjlock);
3613 /* due to the way the arm pmap works we map 4MB at a time */
3614 for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr ; pmap_curmaxkvaddr += 4 * NBPD) {
3615
3616 if (uvm.page_init_done == FALSE) {
3617
3618 /*
3619 * we're growing the kernel pmap early (from
3620 * uvm_pageboot_alloc()). this case must be
3621 * handled a little differently.
3622 */
3623
3624 if (uvm_page_physget(&ptaddr) == FALSE)
3625 panic("pmap_growkernel: out of memory");
3626 pmap_zero_page(ptaddr);
3627
3628 /* map this page in */
3629 pmap_map_in_l1(kpm, (pmap_curmaxkvaddr + 1), ptaddr, TRUE);
3630
3631 /* count PTP as resident */
3632 kpm->pm_stats.resident_count++;
3633 continue;
3634 }
3635
3636 /*
3637 * THIS *MUST* BE CODED SO AS TO WORK IN THE
3638 * pmap_initialized == FALSE CASE! WE MAY BE
3639 * INVOKED WHILE pmap_init() IS RUNNING!
3640 */
3641
3642 if ((ptp = pmap_alloc_ptp(kpm, (pmap_curmaxkvaddr + 1))) == NULL) {
3643 panic("pmap_growkernel: alloc ptp failed");
3644 }
3645
3646 /* distribute new kernel PTP to all active pmaps */
3647 simple_lock(&pmaps_lock);
3648 LIST_FOREACH(pm, &pmaps, pm_list) {
3649 pmap_map_in_l1(pm, (pmap_curmaxkvaddr + 1), VM_PAGE_TO_PHYS(ptp), TRUE);
3650 }
3651
3652 simple_unlock(&pmaps_lock);
3653 }
3654
3655 /*
3656 * flush out the cache, expensive but growkernel will happen so
3657 * rarely
3658 */
3659 cpu_tlb_flushD();
3660 cpu_cpwait();
3661
3662 simple_unlock(&kpm->pm_obj.vmobjlock);
3663 splx(s);
3664
3665 out:
3666 return (pmap_curmaxkvaddr);
3667 }
3668
3669
3670
3671 /************************ Bootstrapping routines ****************************/
3672
3673 /*
3674 * This list exists for the benefit of pmap_map_chunk(). It keeps track
3675 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3676 * find them as necessary.
3677 *
3678 * Note that the data on this list is not valid after initarm() returns.
3679 */
3680 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3681
3682 static vaddr_t
3683 kernel_pt_lookup(paddr_t pa)
3684 {
3685 pv_addr_t *pv;
3686
3687 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3688 if (pv->pv_pa == pa)
3689 return (pv->pv_va);
3690 }
3691 return (0);
3692 }
3693
3694 /*
3695 * pmap_map_section:
3696 *
3697 * Create a single section mapping.
3698 */
3699 void
3700 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3701 {
3702 pd_entry_t *pde = (pd_entry_t *) l1pt;
3703 pd_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3704 pd_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
3705
3706 KASSERT(((va | pa) & (L1_SEC_SIZE - 1)) == 0);
3707
3708 pde[va >> PDSHIFT] = L1_SECPTE(pa & PD_MASK, ap, fl);
3709 }
3710
3711 /*
3712 * pmap_map_entry:
3713 *
3714 * Create a single page mapping.
3715 */
3716 void
3717 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3718 {
3719 pd_entry_t *pde = (pd_entry_t *) l1pt;
3720 pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3721 pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
3722 pt_entry_t *pte;
3723
3724 KASSERT(((va | pa) & PGOFSET) == 0);
3725
3726 if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
3727 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3728
3729 pte = (pt_entry_t *)
3730 kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
3731 if (pte == NULL)
3732 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3733
3734 pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
3735 }
3736
3737 /*
3738 * pmap_link_l2pt:
3739 *
3740 * Link the L2 page table specified by "pa" into the L1
3741 * page table at the slot for "va".
3742 */
3743 void
3744 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
3745 {
3746 pd_entry_t *pde = (pd_entry_t *) l1pt;
3747 u_int slot = va >> PDSHIFT;
3748
3749 KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3750
3751 pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
3752 pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
3753 pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
3754 pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
3755
3756 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
3757 }
3758
3759 /*
3760 * pmap_map_chunk:
3761 *
3762 * Map a chunk of memory using the most efficient mappings
3763 * possible (section, large page, small page) into the
3764 * provided L1 and L2 tables at the specified virtual address.
3765 */
3766 vsize_t
3767 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3768 int prot, int cache)
3769 {
3770 pd_entry_t *pde = (pd_entry_t *) l1pt;
3771 pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3772 pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
3773 pt_entry_t *pte;
3774 vsize_t resid;
3775 int i;
3776
3777 resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3778
3779 if (l1pt == 0)
3780 panic("pmap_map_chunk: no L1 table provided");
3781
3782 #ifdef VERBOSE_INIT_ARM
3783 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3784 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3785 #endif
3786
3787 size = resid;
3788
3789 while (resid > 0) {
3790 /* See if we can use a section mapping. */
3791 if (((pa | va) & (L1_SEC_SIZE - 1)) == 0 &&
3792 resid >= L1_SEC_SIZE) {
3793 #ifdef VERBOSE_INIT_ARM
3794 printf("S");
3795 #endif
3796 pde[va >> PDSHIFT] = L1_SECPTE(pa, ap, fl);
3797 va += L1_SEC_SIZE;
3798 pa += L1_SEC_SIZE;
3799 resid -= L1_SEC_SIZE;
3800 continue;
3801 }
3802
3803 /*
3804 * Ok, we're going to use an L2 table. Make sure
3805 * one is actually in the corresponding L1 slot
3806 * for the current VA.
3807 */
3808 if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
3809 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3810
3811 pte = (pt_entry_t *)
3812 kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
3813 if (pte == NULL)
3814 panic("pmap_map_chunk: can't find L2 table for VA"
3815 "0x%08lx", va);
3816
3817 /* See if we can use a L2 large page mapping. */
3818 if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
3819 resid >= L2_LPAGE_SIZE) {
3820 #ifdef VERBOSE_INIT_ARM
3821 printf("L");
3822 #endif
3823 for (i = 0; i < 16; i++) {
3824 pte[((va >> PGSHIFT) & 0x3f0) + i] =
3825 L2_LPTE(pa, ap, fl);
3826 }
3827 va += L2_LPAGE_SIZE;
3828 pa += L2_LPAGE_SIZE;
3829 resid -= L2_LPAGE_SIZE;
3830 continue;
3831 }
3832
3833 /* Use a small page mapping. */
3834 #ifdef VERBOSE_INIT_ARM
3835 printf("P");
3836 #endif
3837 pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa, ap, fl);
3838 va += NBPG;
3839 pa += NBPG;
3840 resid -= NBPG;
3841 }
3842 #ifdef VERBOSE_INIT_ARM
3843 printf("\n");
3844 #endif
3845 return (size);
3846 }
3847