pmap.c revision 1.16 1 /* $NetBSD: pmap.c,v 1.16 2001/07/29 12:45:27 chris Exp $ */
2
3 /*
4 * Copyright (c) 2001 Richard Earnshaw
5 * Copyright (c) 2001 Christopher Gilbert
6 * All rights reserved.
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the company nor the name of the author may be used to
14 * endorse or promote products derived from this software without specific
15 * prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*-
31 * Copyright (c) 1999 The NetBSD Foundation, Inc.
32 * All rights reserved.
33 *
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Charles M. Hannum.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the NetBSD
48 * Foundation, Inc. and its contributors.
49 * 4. Neither the name of The NetBSD Foundation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * Copyright (c) 1994-1998 Mark Brinicombe.
68 * Copyright (c) 1994 Brini.
69 * All rights reserved.
70 *
71 * This code is derived from software written for Brini by Mark Brinicombe
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. All advertising materials mentioning features or use of this software
82 * must display the following acknowledgement:
83 * This product includes software developed by Mark Brinicombe.
84 * 4. The name of the author may not be used to endorse or promote products
85 * derived from this software without specific prior written permission.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
88 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
89 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
90 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
91 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
92 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
96 *
97 * RiscBSD kernel project
98 *
99 * pmap.c
100 *
101 * Machine dependant vm stuff
102 *
103 * Created : 20/09/94
104 */
105
106 /*
107 * Performance improvements, UVM changes, overhauls and part-rewrites
108 * were contributed by Neil A. Carson <neil (at) causality.com>.
109 */
110
111 /*
112 * The dram block info is currently referenced from the bootconfig.
113 * This should be placed in a separate structure.
114 */
115
116 /*
117 * Special compilation symbols
118 * PMAP_DEBUG - Build in pmap_debug_level code
119 */
120
121 /* Include header files */
122
123 #include "opt_pmap_debug.h"
124 #include "opt_ddb.h"
125
126 #include <sys/types.h>
127 #include <sys/param.h>
128 #include <sys/kernel.h>
129 #include <sys/systm.h>
130 #include <sys/proc.h>
131 #include <sys/malloc.h>
132 #include <sys/user.h>
133 #include <sys/pool.h>
134 #include <sys/cdefs.h>
135
136 #include <uvm/uvm.h>
137
138 #include <machine/bootconfig.h>
139 #include <machine/bus.h>
140 #include <machine/pmap.h>
141 #include <machine/pcb.h>
142 #include <machine/param.h>
143 #include <machine/katelib.h>
144
145 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.16 2001/07/29 12:45:27 chris Exp $");
146
147 #ifdef PMAP_DEBUG
148 #define PDEBUG(_lev_,_stat_) \
149 if (pmap_debug_level >= (_lev_)) \
150 ((_stat_))
151 int pmap_debug_level = -2;
152 #else /* PMAP_DEBUG */
153 #define PDEBUG(_lev_,_stat_) /* Nothing */
154 #endif /* PMAP_DEBUG */
155
156 struct pmap kernel_pmap_store;
157
158 /*
159 * pool that pmap structures are allocated from
160 */
161
162 struct pool pmap_pmap_pool;
163
164 pagehook_t page_hook0;
165 pagehook_t page_hook1;
166 char *memhook;
167 pt_entry_t msgbufpte;
168 extern caddr_t msgbufaddr;
169
170 #ifdef DIAGNOSTIC
171 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
172 #endif
173
174 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
175
176 int pv_nfree = 0;
177
178 vsize_t npages;
179
180 extern paddr_t physical_start;
181 extern paddr_t physical_freestart;
182 extern paddr_t physical_end;
183 extern paddr_t physical_freeend;
184 extern unsigned int free_pages;
185 extern int max_processes;
186
187 vaddr_t virtual_start;
188 vaddr_t virtual_end;
189
190 vaddr_t avail_start;
191 vaddr_t avail_end;
192
193 extern pv_addr_t systempage;
194
195 #define ALLOC_PAGE_HOOK(x, s) \
196 x.va = virtual_start; \
197 x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
198 virtual_start += s;
199
200 /* Variables used by the L1 page table queue code */
201 SIMPLEQ_HEAD(l1pt_queue, l1pt);
202 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
203 int l1pt_static_queue_count; /* items in the static l1 queue */
204 int l1pt_static_create_count; /* static l1 items created */
205 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
206 int l1pt_queue_count; /* items in the l1 queue */
207 int l1pt_create_count; /* stat - L1's create count */
208 int l1pt_reuse_count; /* stat - L1's reused count */
209
210 /* Local function prototypes (not used outside this file) */
211 pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
212 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
213 paddr_t pa, unsigned int flags));
214 void pmap_copy_on_write __P((paddr_t pa));
215 void pmap_pinit __P((struct pmap *));
216 void pmap_freepagedir __P((struct pmap *));
217 void pmap_release __P((struct pmap *));
218
219 /* Other function prototypes */
220 extern void bzero_page __P((vaddr_t));
221 extern void bcopy_page __P((vaddr_t, vaddr_t));
222
223 struct l1pt *pmap_alloc_l1pt __P((void));
224 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
225 vaddr_t l2pa));
226
227 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
228 /* eventually this will be a function */
229 #define pmap_unmap_ptes(a)
230
231 void pmap_vac_me_harder __P((struct pmap *, struct pv_entry *,
232 pt_entry_t *, boolean_t));
233
234 #ifdef MYCROFT_HACK
235 int mycroft_hack = 0;
236 #endif
237
238 /* Function to set the debug level of the pmap code */
239
240 #ifdef PMAP_DEBUG
241 void
242 pmap_debug(level)
243 int level;
244 {
245 pmap_debug_level = level;
246 printf("pmap_debug: level=%d\n", pmap_debug_level);
247 }
248 #endif /* PMAP_DEBUG */
249
250 #include "isadma.h"
251
252 #if NISADMA > 0
253 /*
254 * Used to protect memory for ISA DMA bounce buffers. If, when loading
255 * pages into the system, memory intersects with any of these ranges,
256 * the intersecting memory will be loaded into a lower-priority free list.
257 */
258 bus_dma_segment_t *pmap_isa_dma_ranges;
259 int pmap_isa_dma_nranges;
260
261 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
262 paddr_t *, psize_t *));
263
264 /*
265 * Check if a memory range intersects with an ISA DMA range, and
266 * return the page-rounded intersection if it does. The intersection
267 * will be placed on a lower-priority free list.
268 */
269 boolean_t
270 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
271 paddr_t pa;
272 psize_t size;
273 paddr_t *pap;
274 psize_t *sizep;
275 {
276 bus_dma_segment_t *ds;
277 int i;
278
279 if (pmap_isa_dma_ranges == NULL)
280 return (FALSE);
281
282 for (i = 0, ds = pmap_isa_dma_ranges;
283 i < pmap_isa_dma_nranges; i++, ds++) {
284 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
285 /*
286 * Beginning of region intersects with this range.
287 */
288 *pap = trunc_page(pa);
289 *sizep = round_page(min(pa + size,
290 ds->ds_addr + ds->ds_len) - pa);
291 return (TRUE);
292 }
293 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
294 /*
295 * End of region intersects with this range.
296 */
297 *pap = trunc_page(ds->ds_addr);
298 *sizep = round_page(min((pa + size) - ds->ds_addr,
299 ds->ds_len));
300 return (TRUE);
301 }
302 }
303
304 /*
305 * No intersection found.
306 */
307 return (FALSE);
308 }
309 #endif /* NISADMA > 0 */
310
311 /*
312 * Functions for manipluation pv_entry structures. These are used to keep a
313 * record of the mappings of virtual addresses and the associated physical
314 * pages.
315 */
316
317 /*
318 * Allocate a new pv_entry structure from the freelist. If the list is
319 * empty allocate a new page and fill the freelist.
320 */
321 struct pv_entry *
322 pmap_alloc_pv()
323 {
324 struct pv_page *pvp;
325 struct pv_entry *pv;
326 int i;
327
328 /*
329 * Do we have any free pv_entry structures left ?
330 * If not allocate a page of them
331 */
332
333 if (pv_nfree == 0) {
334 /* NOTE: can't lock kernel_map here */
335 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
336 if (pvp == 0)
337 panic("pmap_alloc_pv: kmem_alloc() failed");
338 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
339 for (i = NPVPPG - 2; i; i--, pv++)
340 pv->pv_next = pv + 1;
341 pv->pv_next = 0;
342 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
343 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
344 pv = &pvp->pvp_pv[0];
345 } else {
346 --pv_nfree;
347 pvp = pv_page_freelist.tqh_first;
348 if (--pvp->pvp_pgi.pgi_nfree == 0) {
349 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
350 }
351 pv = pvp->pvp_pgi.pgi_freelist;
352 #ifdef DIAGNOSTIC
353 if (pv == 0)
354 panic("pmap_alloc_pv: pgi_nfree inconsistent");
355 #endif /* DIAGNOSTIC */
356 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
357 }
358 return pv;
359 }
360
361 /*
362 * Release a pv_entry structure putting it back on the freelist.
363 */
364
365 void
366 pmap_free_pv(pv)
367 struct pv_entry *pv;
368 {
369 struct pv_page *pvp;
370
371 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
372 switch (++pvp->pvp_pgi.pgi_nfree) {
373 case 1:
374 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
375 default:
376 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
377 pvp->pvp_pgi.pgi_freelist = pv;
378 ++pv_nfree;
379 break;
380 case NPVPPG:
381 pv_nfree -= NPVPPG - 1;
382 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
383 FREE((vaddr_t)pvp, M_VMPVENT);
384 break;
385 }
386 }
387
388 #if 0
389 void
390 pmap_collect_pv()
391 {
392 struct pv_page_list pv_page_collectlist;
393 struct pv_page *pvp, *npvp;
394 struct pv_entry *ph, *ppv, *pv, *npv;
395 int s;
396
397 TAILQ_INIT(&pv_page_collectlist);
398
399 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
400 if (pv_nfree < NPVPPG)
401 break;
402 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
403 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
404 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
405 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
406 pvp_pgi.pgi_list);
407 pv_nfree -= NPVPPG;
408 pvp->pvp_pgi.pgi_nfree = -1;
409 }
410 }
411
412 if (pv_page_collectlist.tqh_first == 0)
413 return;
414
415 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
416 if (ph->pv_pmap == 0)
417 continue;
418 s = splvm();
419 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
420 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
421 if (pvp->pvp_pgi.pgi_nfree == -1) {
422 pvp = pv_page_freelist.tqh_first;
423 if (--pvp->pvp_pgi.pgi_nfree == 0) {
424 TAILQ_REMOVE(&pv_page_freelist,
425 pvp, pvp_pgi.pgi_list);
426 }
427 npv = pvp->pvp_pgi.pgi_freelist;
428 #ifdef DIAGNOSTIC
429 if (npv == 0)
430 panic("pmap_collect_pv: pgi_nfree inconsistent");
431 #endif /* DIAGNOSTIC */
432 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
433 *npv = *pv;
434 ppv->pv_next = npv;
435 ppv = npv;
436 } else
437 ppv = pv;
438 }
439 splx(s);
440 }
441
442 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
443 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
444 FREE((vaddr_t)pvp, M_VMPVENT);
445 }
446 }
447 #endif
448
449 /*
450 * Enter a new physical-virtual mapping into the pv table
451 */
452
453 /*__inline*/ void
454 pmap_enter_pv(pmap, va, pv, flags)
455 struct pmap *pmap;
456 vaddr_t va;
457 struct pv_entry *pv;
458 u_int flags;
459 {
460 struct pv_entry *npv;
461 u_int s;
462
463 #ifdef DIAGNOSTIC
464 if (!pmap_initialized)
465 panic("pmap_enter_pv: !pmap_initialized");
466 #endif
467
468 s = splvm();
469
470 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
471 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
472
473 if (pv->pv_pmap == NULL) {
474 /*
475 * No entries yet, use header as the first entry
476 */
477 pv->pv_va = va;
478 pv->pv_pmap = pmap;
479 pv->pv_next = NULL;
480 pv->pv_flags = flags;
481 } else {
482 /*
483 * There is at least one other VA mapping this page.
484 * Place this entry after the header.
485 */
486 #ifdef PMAP_DEBUG
487 for (npv = pv; npv; npv = npv->pv_next)
488 if (pmap == npv->pv_pmap && va == npv->pv_va)
489 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
490 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
491 #endif
492 npv = pmap_alloc_pv();
493 /* Must make sure that the new entry is before any others
494 * for the same pmap. Otherwise the vac handling code
495 * will get confused.
496 * XXX this would be better if we used lists like i386 (infact
497 * this would be a lot simpler)
498 */
499 *npv = *pv;
500 pv->pv_va = va;
501 pv->pv_pmap = pmap;
502 pv->pv_flags = flags;
503 pv->pv_next = npv;
504 }
505
506 if (flags & PT_W)
507 ++pmap->pm_stats.wired_count;
508
509 splx(s);
510 }
511
512
513 /*
514 * Remove a physical-virtual mapping from the pv table
515 */
516
517 /*__inline*/ void
518 pmap_remove_pv(pmap, va, pv)
519 struct pmap *pmap;
520 vaddr_t va;
521 struct pv_entry *pv;
522 {
523 struct pv_entry *npv;
524 u_int s;
525 u_int flags = 0;
526
527 #ifdef DIAGNOSTIC
528 if (!pmap_initialized)
529 panic("pmap_remove_pv: !pmap_initialized");
530 #endif
531
532 s = splvm();
533
534 /*
535 * If it is the first entry on the list, it is actually
536 * in the header and we must copy the following entry up
537 * to the header. Otherwise we must search the list for
538 * the entry. In either case we free the now unused entry.
539 */
540
541 if (pmap == pv->pv_pmap && va == pv->pv_va) {
542 npv = pv->pv_next;
543 if (npv) {
544 *pv = *npv;
545 flags = npv->pv_flags;
546 pmap_free_pv(npv);
547 } else {
548 flags = pv->pv_flags;
549 pv->pv_pmap = NULL;
550 }
551 } else {
552 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
553 if (pmap == npv->pv_pmap && va == npv->pv_va)
554 break;
555 }
556 if (npv) {
557 pv->pv_next = npv->pv_next;
558 flags = npv->pv_flags;
559 pmap_free_pv(npv);
560 } else
561 panic("pmap_remove_pv: lost entry");
562 }
563
564 if (flags & PT_W)
565 --pmap->pm_stats.wired_count;
566
567 splx(s);
568 }
569
570 /*
571 * Modify a physical-virtual mapping in the pv table
572 */
573
574 /*__inline */ u_int
575 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
576 struct pmap *pmap;
577 vaddr_t va;
578 struct pv_entry *pv;
579 u_int bic_mask;
580 u_int eor_mask;
581 {
582 struct pv_entry *npv;
583 u_int s;
584 u_int flags, oflags;
585
586 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
587 pmap, va, pv, bic_mask, eor_mask));
588
589 #ifdef DIAGNOSTIC
590 if (!pmap_initialized)
591 panic("pmap_modify_pv: !pmap_initialized");
592 #endif
593
594 s = splvm();
595
596 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
597 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
598
599 /*
600 * There is at least one VA mapping this page.
601 */
602
603 for (npv = pv; npv; npv = npv->pv_next) {
604 if (pmap == npv->pv_pmap && va == npv->pv_va) {
605 oflags = npv->pv_flags;
606 npv->pv_flags = flags =
607 ((oflags & ~bic_mask) ^ eor_mask);
608 if ((flags ^ oflags) & PT_W) {
609 if (flags & PT_W)
610 ++pmap->pm_stats.wired_count;
611 else
612 --pmap->pm_stats.wired_count;
613 }
614 PDEBUG(0, printf("done flags=%08x\n", flags));
615 splx(s);
616 return (oflags);
617 }
618 }
619
620 PDEBUG(0, printf("done.\n"));
621 splx(s);
622 return (0);
623 }
624
625
626 /*
627 * Map the specified level 2 pagetable into the level 1 page table for
628 * the given pmap to cover a chunk of virtual address space starting from the
629 * address specified.
630 */
631 static /*__inline*/ void
632 pmap_map_in_l1(pmap, va, l2pa)
633 struct pmap *pmap;
634 vaddr_t va, l2pa;
635 {
636 vaddr_t ptva;
637
638 /* Calculate the index into the L1 page table. */
639 ptva = (va >> PDSHIFT) & ~3;
640
641 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
642 pmap->pm_pdir, L1_PTE(l2pa), ptva));
643
644 /* Map page table into the L1. */
645 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
646 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
647 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
648 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
649
650 PDEBUG(0, printf("pt self reference %lx in %lx\n",
651 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
652
653 /* Map the page table into the page table area. */
654 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
655
656 /* XXX should be a purge */
657 /* cpu_tlb_flushD();*/
658 }
659
660 #if 0
661 static /*__inline*/ void
662 pmap_unmap_in_l1(pmap, va)
663 struct pmap *pmap;
664 vaddr_t va;
665 {
666 vaddr_t ptva;
667
668 /* Calculate the index into the L1 page table. */
669 ptva = (va >> PDSHIFT) & ~3;
670
671 /* Unmap page table from the L1. */
672 pmap->pm_pdir[ptva + 0] = 0;
673 pmap->pm_pdir[ptva + 1] = 0;
674 pmap->pm_pdir[ptva + 2] = 0;
675 pmap->pm_pdir[ptva + 3] = 0;
676
677 /* Unmap the page table from the page table area. */
678 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
679
680 /* XXX should be a purge */
681 /* cpu_tlb_flushD();*/
682 }
683 #endif
684
685
686 /*
687 * Used to map a range of physical addresses into kernel
688 * virtual address space.
689 *
690 * For now, VM is already on, we only need to map the
691 * specified memory.
692 */
693 vaddr_t
694 pmap_map(va, spa, epa, prot)
695 vaddr_t va, spa, epa;
696 int prot;
697 {
698 while (spa < epa) {
699 pmap_enter(pmap_kernel(), va, spa, prot, 0);
700 va += NBPG;
701 spa += NBPG;
702 }
703 pmap_update();
704 return(va);
705 }
706
707
708 /*
709 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
710 *
711 * bootstrap the pmap system. This is called from initarm and allows
712 * the pmap system to initailise any structures it requires.
713 *
714 * Currently this sets up the kernel_pmap that is statically allocated
715 * and also allocated virtual addresses for certain page hooks.
716 * Currently the only one page hook is allocated that is used
717 * to zero physical pages of memory.
718 * It also initialises the start and end address of the kernel data space.
719 */
720 extern paddr_t physical_freestart;
721 extern paddr_t physical_freeend;
722
723 struct pv_entry *boot_pvent;
724 char *boot_attrs;
725
726 void
727 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
728 pd_entry_t *kernel_l1pt;
729 pv_addr_t kernel_ptpt;
730 {
731 int loop;
732 paddr_t start, end;
733 #if NISADMA > 0
734 paddr_t istart;
735 psize_t isize;
736 #endif
737 vsize_t size;
738
739 pmap_kernel()->pm_pdir = kernel_l1pt;
740 pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
741 pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
742 simple_lock_init(&pmap_kernel()->pm_lock);
743 pmap_kernel()->pm_obj.pgops = NULL;
744 TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
745 pmap_kernel()->pm_obj.uo_npages = 0;
746 pmap_kernel()->pm_obj.uo_refs = 1;
747
748 /*
749 * Initialize PAGE_SIZE-dependent variables.
750 */
751 uvm_setpagesize();
752
753 npages = 0;
754 loop = 0;
755 while (loop < bootconfig.dramblocks) {
756 start = (paddr_t)bootconfig.dram[loop].address;
757 end = start + (bootconfig.dram[loop].pages * NBPG);
758 if (start < physical_freestart)
759 start = physical_freestart;
760 if (end > physical_freeend)
761 end = physical_freeend;
762 #if 0
763 printf("%d: %lx -> %lx\n", loop, start, end - 1);
764 #endif
765 #if NISADMA > 0
766 if (pmap_isa_dma_range_intersect(start, end - start,
767 &istart, &isize)) {
768 /*
769 * Place the pages that intersect with the
770 * ISA DMA range onto the ISA DMA free list.
771 */
772 #if 0
773 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
774 istart + isize - 1);
775 #endif
776 uvm_page_physload(atop(istart),
777 atop(istart + isize), atop(istart),
778 atop(istart + isize), VM_FREELIST_ISADMA);
779 npages += atop(istart + isize) - atop(istart);
780
781 /*
782 * Load the pieces that come before
783 * the intersection into the default
784 * free list.
785 */
786 if (start < istart) {
787 #if 0
788 printf(" BEFORE 0x%lx -> 0x%lx\n",
789 start, istart - 1);
790 #endif
791 uvm_page_physload(atop(start),
792 atop(istart), atop(start),
793 atop(istart), VM_FREELIST_DEFAULT);
794 npages += atop(istart) - atop(start);
795 }
796
797 /*
798 * Load the pieces that come after
799 * the intersection into the default
800 * free list.
801 */
802 if ((istart + isize) < end) {
803 #if 0
804 printf(" AFTER 0x%lx -> 0x%lx\n",
805 (istart + isize), end - 1);
806 #endif
807 uvm_page_physload(atop(istart + isize),
808 atop(end), atop(istart + isize),
809 atop(end), VM_FREELIST_DEFAULT);
810 npages += atop(end) - atop(istart + isize);
811 }
812 } else {
813 uvm_page_physload(atop(start), atop(end),
814 atop(start), atop(end), VM_FREELIST_DEFAULT);
815 npages += atop(end) - atop(start);
816 }
817 #else /* NISADMA > 0 */
818 uvm_page_physload(atop(start), atop(end),
819 atop(start), atop(end), VM_FREELIST_DEFAULT);
820 npages += atop(end) - atop(start);
821 #endif /* NISADMA > 0 */
822 ++loop;
823 }
824
825 #ifdef MYCROFT_HACK
826 printf("npages = %ld\n", npages);
827 #endif
828
829 virtual_start = KERNEL_VM_BASE;
830 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
831
832 ALLOC_PAGE_HOOK(page_hook0, NBPG);
833 ALLOC_PAGE_HOOK(page_hook1, NBPG);
834
835 /*
836 * The mem special device needs a virtual hook but we don't
837 * need a pte
838 */
839 memhook = (char *)virtual_start;
840 virtual_start += NBPG;
841
842 msgbufaddr = (caddr_t)virtual_start;
843 msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
844 virtual_start += round_page(MSGBUFSIZE);
845
846 size = npages * sizeof(struct pv_entry);
847 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
848 bzero(boot_pvent, size);
849 size = npages * sizeof(char);
850 boot_attrs = (char *)uvm_pageboot_alloc(size);
851 bzero(boot_attrs, size);
852
853 /*
854 * initialize the pmap pool.
855 */
856
857 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
858 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
859
860 cpu_cache_cleanD();
861 }
862
863 /*
864 * void pmap_init(void)
865 *
866 * Initialize the pmap module.
867 * Called by vm_init() in vm/vm_init.c in order to initialise
868 * any structures that the pmap system needs to map virtual memory.
869 */
870
871 extern int physmem;
872
873 void
874 pmap_init()
875 {
876 int lcv;
877
878 #ifdef MYCROFT_HACK
879 printf("physmem = %d\n", physmem);
880 #endif
881
882 /*
883 * Set the available memory vars - These do not map to real memory
884 * addresses and cannot as the physical memory is fragmented.
885 * They are used by ps for %mem calculations.
886 * One could argue whether this should be the entire memory or just
887 * the memory that is useable in a user process.
888 */
889 avail_start = 0;
890 avail_end = physmem * NBPG;
891
892 /* Set up pmap info for physsegs. */
893 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
894 vm_physmem[lcv].pmseg.pvent = boot_pvent;
895 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
896 vm_physmem[lcv].pmseg.attrs = boot_attrs;
897 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
898 }
899 #ifdef MYCROFT_HACK
900 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
901 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
902 lcv,
903 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
904 vm_physmem[lcv].start, vm_physmem[lcv].end);
905 }
906 #endif
907 TAILQ_INIT(&pv_page_freelist);
908
909 #ifdef DIAGNOSTIC
910 /* Now it is safe to enable pv_entry recording. */
911 pmap_initialized = TRUE;
912 #endif
913
914 /* Initialise our L1 page table queues and counters */
915 SIMPLEQ_INIT(&l1pt_static_queue);
916 l1pt_static_queue_count = 0;
917 l1pt_static_create_count = 0;
918 SIMPLEQ_INIT(&l1pt_queue);
919 l1pt_queue_count = 0;
920 l1pt_create_count = 0;
921 l1pt_reuse_count = 0;
922 }
923
924 /*
925 * pmap_postinit()
926 *
927 * This routine is called after the vm and kmem subsystems have been
928 * initialised. This allows the pmap code to perform any initialisation
929 * that can only be done one the memory allocation is in place.
930 */
931
932 void
933 pmap_postinit()
934 {
935 int loop;
936 struct l1pt *pt;
937
938 #ifdef PMAP_STATIC_L1S
939 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
940 #else /* PMAP_STATIC_L1S */
941 for (loop = 0; loop < max_processes; ++loop) {
942 #endif /* PMAP_STATIC_L1S */
943 /* Allocate a L1 page table */
944 pt = pmap_alloc_l1pt();
945 if (!pt)
946 panic("Cannot allocate static L1 page tables\n");
947
948 /* Clean it */
949 bzero((void *)pt->pt_va, PD_SIZE);
950 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
951 /* Add the page table to the queue */
952 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
953 ++l1pt_static_queue_count;
954 ++l1pt_static_create_count;
955 }
956 }
957
958
959 /*
960 * Create and return a physical map.
961 *
962 * If the size specified for the map is zero, the map is an actual physical
963 * map, and may be referenced by the hardware.
964 *
965 * If the size specified is non-zero, the map will be used in software only,
966 * and is bounded by that size.
967 */
968
969 pmap_t
970 pmap_create()
971 {
972 struct pmap *pmap;
973
974 /*
975 * Fetch pmap entry from the pool
976 */
977
978 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
979 bzero(pmap, sizeof(*pmap));
980
981 simple_lock_init(&pmap->pm_obj.vmobjlock);
982 pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
983 TAILQ_INIT(&pmap->pm_obj.memq);
984 pmap->pm_obj.uo_npages = 0;
985 pmap->pm_obj.uo_refs = 1;
986 pmap->pm_stats.wired_count = 0;
987 pmap->pm_stats.resident_count = 1;
988
989 /* Now init the machine part of the pmap */
990 pmap_pinit(pmap);
991 return(pmap);
992 }
993
994 /*
995 * pmap_alloc_l1pt()
996 *
997 * This routine allocates physical and virtual memory for a L1 page table
998 * and wires it.
999 * A l1pt structure is returned to describe the allocated page table.
1000 *
1001 * This routine is allowed to fail if the required memory cannot be allocated.
1002 * In this case NULL is returned.
1003 */
1004
1005 struct l1pt *
1006 pmap_alloc_l1pt(void)
1007 {
1008 paddr_t pa;
1009 vaddr_t va;
1010 struct l1pt *pt;
1011 int error;
1012 struct vm_page *m;
1013 pt_entry_t *ptes;
1014
1015 /* Allocate virtual address space for the L1 page table */
1016 va = uvm_km_valloc(kernel_map, PD_SIZE);
1017 if (va == 0) {
1018 #ifdef DIAGNOSTIC
1019 printf("pmap: Cannot allocate pageable memory for L1\n");
1020 #endif /* DIAGNOSTIC */
1021 return(NULL);
1022 }
1023
1024 /* Allocate memory for the l1pt structure */
1025 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1026
1027 /*
1028 * Allocate pages from the VM system.
1029 */
1030 TAILQ_INIT(&pt->pt_plist);
1031 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
1032 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1033 if (error) {
1034 #ifdef DIAGNOSTIC
1035 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
1036 error);
1037 #endif /* DIAGNOSTIC */
1038 /* Release the resources we already have claimed */
1039 free(pt, M_VMPMAP);
1040 uvm_km_free(kernel_map, va, PD_SIZE);
1041 return(NULL);
1042 }
1043
1044 /* Map our physical pages into our virtual space */
1045 pt->pt_va = va;
1046 m = pt->pt_plist.tqh_first;
1047 ptes = pmap_map_ptes(pmap_kernel());
1048 while (m && va < (pt->pt_va + PD_SIZE)) {
1049 pa = VM_PAGE_TO_PHYS(m);
1050
1051 pmap_enter(pmap_kernel(), va, pa,
1052 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
1053
1054 /* Revoke cacheability and bufferability */
1055 /* XXX should be done better than this */
1056 ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
1057
1058 va += NBPG;
1059 m = m->pageq.tqe_next;
1060 }
1061 pmap_unmap_ptes(pmap_kernel());
1062 pmap_update();
1063
1064 #ifdef DIAGNOSTIC
1065 if (m)
1066 panic("pmap_alloc_l1pt: pglist not empty\n");
1067 #endif /* DIAGNOSTIC */
1068
1069 pt->pt_flags = 0;
1070 return(pt);
1071 }
1072
1073 /*
1074 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1075 */
1076 void
1077 pmap_free_l1pt(pt)
1078 struct l1pt *pt;
1079 {
1080 /* Separate the physical memory for the virtual space */
1081 pmap_remove(pmap_kernel(), pt->pt_va, pt->pt_va + PD_SIZE);
1082 pmap_update();
1083
1084 /* Return the physical memory */
1085 uvm_pglistfree(&pt->pt_plist);
1086
1087 /* Free the virtual space */
1088 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1089
1090 /* Free the l1pt structure */
1091 free(pt, M_VMPMAP);
1092 }
1093
1094 /*
1095 * Allocate a page directory.
1096 * This routine will either allocate a new page directory from the pool
1097 * of L1 page tables currently held by the kernel or it will allocate
1098 * a new one via pmap_alloc_l1pt().
1099 * It will then initialise the l1 page table for use.
1100 */
1101 int
1102 pmap_allocpagedir(pmap)
1103 struct pmap *pmap;
1104 {
1105 paddr_t pa;
1106 struct l1pt *pt;
1107 pt_entry_t *pte;
1108
1109 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1110
1111 /* Do we have any spare L1's lying around ? */
1112 if (l1pt_static_queue_count) {
1113 --l1pt_static_queue_count;
1114 pt = l1pt_static_queue.sqh_first;
1115 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1116 } else if (l1pt_queue_count) {
1117 --l1pt_queue_count;
1118 pt = l1pt_queue.sqh_first;
1119 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1120 ++l1pt_reuse_count;
1121 } else {
1122 pt = pmap_alloc_l1pt();
1123 if (!pt)
1124 return(ENOMEM);
1125 ++l1pt_create_count;
1126 }
1127
1128 /* Store the pointer to the l1 descriptor in the pmap. */
1129 pmap->pm_l1pt = pt;
1130
1131 /* Get the physical address of the start of the l1 */
1132 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1133
1134 /* Store the virtual address of the l1 in the pmap. */
1135 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1136
1137 /* Clean the L1 if it is dirty */
1138 if (!(pt->pt_flags & PTFLAG_CLEAN))
1139 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1140
1141 /* Do we already have the kernel mappings ? */
1142 if (!(pt->pt_flags & PTFLAG_KPT)) {
1143 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1144
1145 bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1146 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1147 KERNEL_PD_SIZE);
1148 pt->pt_flags |= PTFLAG_KPT;
1149 }
1150
1151 /* Allocate a page table to map all the page tables for this pmap */
1152
1153 #ifdef DIAGNOSTIC
1154 if (pmap->pm_vptpt) {
1155 /* XXX What if we have one already ? */
1156 panic("pmap_allocpagedir: have pt already\n");
1157 }
1158 #endif /* DIAGNOSTIC */
1159 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1160 if (pmap->pm_vptpt == 0) {
1161 pmap_freepagedir(pmap);
1162 return(ENOMEM);
1163 }
1164
1165 (void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
1166 pmap->pm_pptpt &= PG_FRAME;
1167 /* Revoke cacheability and bufferability */
1168 /* XXX should be done better than this */
1169 pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
1170 *pte = *pte & ~(PT_C | PT_B);
1171
1172 /* Wire in this page table */
1173 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1174
1175 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1176
1177 /*
1178 * Map the kernel page tables for 0xf0000000 +
1179 * into the page table used to map the
1180 * pmap's page tables
1181 */
1182 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1183 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1184 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1185 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1186 (KERNEL_PD_SIZE >> 2));
1187
1188 return(0);
1189 }
1190
1191
1192 /*
1193 * Initialize a preallocated and zeroed pmap structure,
1194 * such as one in a vmspace structure.
1195 */
1196
1197 static int pmap_pagedir_ident; /* tsleep() ident */
1198
1199 void
1200 pmap_pinit(pmap)
1201 struct pmap *pmap;
1202 {
1203 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1204
1205 /* Keep looping until we succeed in allocating a page directory */
1206 while (pmap_allocpagedir(pmap) != 0) {
1207 /*
1208 * Ok we failed to allocate a suitable block of memory for an
1209 * L1 page table. This means that either:
1210 * 1. 16KB of virtual address space could not be allocated
1211 * 2. 16KB of physically contiguous memory on a 16KB boundary
1212 * could not be allocated.
1213 *
1214 * Since we cannot fail we will sleep for a while and try
1215 * again. Although we will be wakened when another page table
1216 * is freed other memory releasing and swapping may occur
1217 * that will mean we can succeed so we will keep trying
1218 * regularly just in case.
1219 */
1220
1221 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1222 "l1ptwait", 1000) == EWOULDBLOCK)
1223 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1224 }
1225
1226 /* Map zero page for the pmap. This will also map the L2 for it */
1227 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1228 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1229 pmap_update();
1230 }
1231
1232
1233 void
1234 pmap_freepagedir(pmap)
1235 struct pmap *pmap;
1236 {
1237 /* Free the memory used for the page table mapping */
1238 if (pmap->pm_vptpt != 0)
1239 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1240
1241 /* junk the L1 page table */
1242 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1243 /* Add the page table to the queue */
1244 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1245 ++l1pt_static_queue_count;
1246 /* Wake up any sleeping processes waiting for a l1 page table */
1247 wakeup((caddr_t)&pmap_pagedir_ident);
1248 } else if (l1pt_queue_count < 8) {
1249 /* Add the page table to the queue */
1250 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1251 ++l1pt_queue_count;
1252 /* Wake up any sleeping processes waiting for a l1 page table */
1253 wakeup((caddr_t)&pmap_pagedir_ident);
1254 } else
1255 pmap_free_l1pt(pmap->pm_l1pt);
1256 }
1257
1258
1259 /*
1260 * Retire the given physical map from service.
1261 * Should only be called if the map contains no valid mappings.
1262 */
1263
1264 void
1265 pmap_destroy(pmap)
1266 struct pmap *pmap;
1267 {
1268 int count;
1269
1270 if (pmap == NULL)
1271 return;
1272
1273 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1274 simple_lock(&pmap->pm_lock);
1275 count = --pmap->pm_obj.uo_refs;
1276 simple_unlock(&pmap->pm_lock);
1277 if (count == 0) {
1278 pmap_release(pmap);
1279 pool_put(&pmap_pmap_pool, pmap);
1280 }
1281 }
1282
1283
1284 /*
1285 * Release any resources held by the given physical map.
1286 * Called when a pmap initialized by pmap_pinit is being released.
1287 * Should only be called if the map contains no valid mappings.
1288 */
1289
1290 void
1291 pmap_release(pmap)
1292 struct pmap *pmap;
1293 {
1294 struct vm_page *page;
1295
1296 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1297
1298 /* Remove the zero page mapping */
1299 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1300 pmap_update();
1301
1302 /*
1303 * Free any page tables still mapped
1304 * This is only temporay until pmap_enter can count the number
1305 * of mappings made in a page table. Then pmap_remove() can
1306 * reduce the count and free the pagetable when the count
1307 * reaches zero. Note that entries in this list should match the
1308 * contents of the ptpt, however this is faster than walking a 1024
1309 * entries looking for pt's
1310 * taken from i386 pmap.c
1311 */
1312 while (pmap->pm_obj.memq.tqh_first != NULL) {
1313 page = pmap->pm_obj.memq.tqh_first;
1314 #ifdef DIAGNOSTIC
1315 if (page->flags & PG_BUSY)
1316 panic("pmap_release: busy page table page");
1317 #endif
1318 /* pmap_page_protect? currently no need for it. */
1319
1320 page->wire_count = 0;
1321 uvm_pagefree(page);
1322 }
1323
1324 /* Free the page dir */
1325 pmap_freepagedir(pmap);
1326 }
1327
1328
1329 /*
1330 * void pmap_reference(struct pmap *pmap)
1331 *
1332 * Add a reference to the specified pmap.
1333 */
1334
1335 void
1336 pmap_reference(pmap)
1337 struct pmap *pmap;
1338 {
1339 if (pmap == NULL)
1340 return;
1341
1342 simple_lock(&pmap->pm_lock);
1343 pmap->pm_obj.uo_refs++;
1344 simple_unlock(&pmap->pm_lock);
1345 }
1346
1347 /*
1348 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1349 *
1350 * Return the start and end addresses of the kernel's virtual space.
1351 * These values are setup in pmap_bootstrap and are updated as pages
1352 * are allocated.
1353 */
1354
1355 void
1356 pmap_virtual_space(start, end)
1357 vaddr_t *start;
1358 vaddr_t *end;
1359 {
1360 *start = virtual_start;
1361 *end = virtual_end;
1362 }
1363
1364
1365 /*
1366 * Activate the address space for the specified process. If the process
1367 * is the current process, load the new MMU context.
1368 */
1369 void
1370 pmap_activate(p)
1371 struct proc *p;
1372 {
1373 struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1374 struct pcb *pcb = &p->p_addr->u_pcb;
1375
1376 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1377 (paddr_t *)&pcb->pcb_pagedir);
1378
1379 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1380 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1381
1382 if (p == curproc) {
1383 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1384 setttb((u_int)pcb->pcb_pagedir);
1385 }
1386 #if 0
1387 pmap->pm_pdchanged = FALSE;
1388 #endif
1389 }
1390
1391
1392 /*
1393 * Deactivate the address space of the specified process.
1394 */
1395 void
1396 pmap_deactivate(p)
1397 struct proc *p;
1398 {
1399 }
1400
1401
1402 /*
1403 * pmap_clean_page()
1404 *
1405 * This is a local function used to work out the best strategy to clean
1406 * a single page referenced by its entry in the PV table. It's used by
1407 * pmap_copy_page, pmap_zero page and maybe some others later on.
1408 *
1409 * Its policy is effectively:
1410 * o If there are no mappings, we don't bother doing anything with the cache.
1411 * o If there is one mapping, we clean just that page.
1412 * o If there are multiple mappings, we clean the entire cache.
1413 *
1414 * So that some functions can be further optimised, it returns 0 if it didn't
1415 * clean the entire cache, or 1 if it did.
1416 *
1417 * XXX One bug in this routine is that if the pv_entry has a single page
1418 * mapped at 0x00000000 a whole cache clean will be performed rather than
1419 * just the 1 page. Since this should not occur in everyday use and if it does
1420 * it will just result in not the most efficient clean for the page.
1421 */
1422 static int
1423 pmap_clean_page(pv)
1424 struct pv_entry *pv;
1425 {
1426 int s;
1427 int cache_needs_cleaning = 0;
1428 vaddr_t page_to_clean = 0;
1429
1430 /* Go to splvm() so we get exclusive lock for a mo */
1431 s = splvm();
1432 if (pv->pv_pmap) {
1433 cache_needs_cleaning = 1;
1434 if (!pv->pv_next)
1435 page_to_clean = pv->pv_va;
1436 }
1437 splx(s);
1438
1439 /* Do cache ops outside the splvm. */
1440 if (page_to_clean)
1441 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1442 else if (cache_needs_cleaning) {
1443 cpu_cache_purgeID();
1444 return (1);
1445 }
1446 return (0);
1447 }
1448
1449 /*
1450 * pmap_find_pv()
1451 *
1452 * This is a local function that finds a PV entry for a given physical page.
1453 * This is a common op, and this function removes loads of ifdefs in the code.
1454 */
1455 static __inline struct pv_entry *
1456 pmap_find_pv(phys)
1457 paddr_t phys;
1458 {
1459 int bank, off;
1460 struct pv_entry *pv;
1461
1462 #ifdef DIAGNOSTIC
1463 if (!pmap_initialized)
1464 panic("pmap_find_pv: !pmap_initialized");
1465 #endif
1466
1467 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1468 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1469 pv = &vm_physmem[bank].pmseg.pvent[off];
1470 return (pv);
1471 }
1472
1473 /*
1474 * pmap_zero_page()
1475 *
1476 * Zero a given physical page by mapping it at a page hook point.
1477 * In doing the zero page op, the page we zero is mapped cachable, as with
1478 * StrongARM accesses to non-cached pages are non-burst making writing
1479 * _any_ bulk data very slow.
1480 */
1481 void
1482 pmap_zero_page(phys)
1483 paddr_t phys;
1484 {
1485 struct pv_entry *pv;
1486
1487 /* Get an entry for this page, and clean it it. */
1488 pv = pmap_find_pv(phys);
1489 pmap_clean_page(pv);
1490
1491 /*
1492 * Hook in the page, zero it, and purge the cache for that
1493 * zeroed page. Invalidate the TLB as needed.
1494 */
1495 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1496 cpu_tlb_flushD_SE(page_hook0.va);
1497 bzero_page(page_hook0.va);
1498 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1499 }
1500
1501 /*
1502 * pmap_copy_page()
1503 *
1504 * Copy one physical page into another, by mapping the pages into
1505 * hook points. The same comment regarding cachability as in
1506 * pmap_zero_page also applies here.
1507 */
1508 void
1509 pmap_copy_page(src, dest)
1510 paddr_t src;
1511 paddr_t dest;
1512 {
1513 struct pv_entry *src_pv, *dest_pv;
1514
1515 /* Get PV entries for the pages, and clean them if needed. */
1516 src_pv = pmap_find_pv(src);
1517 dest_pv = pmap_find_pv(dest);
1518 if (!pmap_clean_page(src_pv))
1519 pmap_clean_page(dest_pv);
1520
1521 /*
1522 * Map the pages into the page hook points, copy them, and purge
1523 * the cache for the appropriate page. Invalidate the TLB
1524 * as required.
1525 */
1526 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1527 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1528 cpu_tlb_flushD_SE(page_hook0.va);
1529 cpu_tlb_flushD_SE(page_hook1.va);
1530 bcopy_page(page_hook0.va, page_hook1.va);
1531 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1532 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1533 }
1534
1535 /*
1536 * int pmap_next_phys_page(paddr_t *addr)
1537 *
1538 * Allocate another physical page returning true or false depending
1539 * on whether a page could be allocated.
1540 */
1541
1542 paddr_t
1543 pmap_next_phys_page(addr)
1544 paddr_t addr;
1545
1546 {
1547 int loop;
1548
1549 if (addr < bootconfig.dram[0].address)
1550 return(bootconfig.dram[0].address);
1551
1552 loop = 0;
1553
1554 while (bootconfig.dram[loop].address != 0
1555 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1556 ++loop;
1557
1558 if (bootconfig.dram[loop].address == 0)
1559 return(0);
1560
1561 addr += NBPG;
1562
1563 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1564 if (bootconfig.dram[loop + 1].address == 0)
1565 return(0);
1566 addr = bootconfig.dram[loop + 1].address;
1567 }
1568
1569 return(addr);
1570 }
1571
1572 #if 0
1573 void
1574 pmap_pte_addref(pmap, va)
1575 struct pmap *pmap;
1576 vaddr_t va;
1577 {
1578 pd_entry_t *pde;
1579 paddr_t pa;
1580 struct vm_page *m;
1581
1582 if (pmap == pmap_kernel())
1583 return;
1584
1585 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1586 pa = pmap_pte_pa(pde);
1587 m = PHYS_TO_VM_PAGE(pa);
1588 ++m->wire_count;
1589 #ifdef MYCROFT_HACK
1590 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1591 pmap, va, pde, pa, m, m->wire_count);
1592 #endif
1593 }
1594
1595 void
1596 pmap_pte_delref(pmap, va)
1597 struct pmap *pmap;
1598 vaddr_t va;
1599 {
1600 pd_entry_t *pde;
1601 paddr_t pa;
1602 struct vm_page *m;
1603
1604 if (pmap == pmap_kernel())
1605 return;
1606
1607 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1608 pa = pmap_pte_pa(pde);
1609 m = PHYS_TO_VM_PAGE(pa);
1610 --m->wire_count;
1611 #ifdef MYCROFT_HACK
1612 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1613 pmap, va, pde, pa, m, m->wire_count);
1614 #endif
1615 if (m->wire_count == 0) {
1616 #ifdef MYCROFT_HACK
1617 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1618 pmap, va, pde, pa, m);
1619 #endif
1620 pmap_unmap_in_l1(pmap, va);
1621 uvm_pagefree(m);
1622 --pmap->pm_stats.resident_count;
1623 }
1624 }
1625 #else
1626 #define pmap_pte_addref(pmap, va)
1627 #define pmap_pte_delref(pmap, va)
1628 #endif
1629
1630 /*
1631 * Since we have a virtually indexed cache, we may need to inhibit caching if
1632 * there is more than one mapping and at least one of them is writable.
1633 * Since we purge the cache on every context switch, we only need to check for
1634 * other mappings within the same pmap, or kernel_pmap.
1635 * This function is also called when a page is unmapped, to possibly reenable
1636 * caching on any remaining mappings.
1637 *
1638 * Note that the pmap must have it's ptes mapped in, and passed with ptes.
1639 */
1640 void
1641 pmap_vac_me_harder(struct pmap *pmap, struct pv_entry *pv, pt_entry_t *ptes,
1642 boolean_t clear_cache)
1643 {
1644 struct pv_entry *npv;
1645 pt_entry_t *pte;
1646 int entries = 0;
1647 int writeable = 0;
1648 int cacheable_entries = 0;
1649
1650 if (pv->pv_pmap == NULL)
1651 return;
1652 KASSERT(ptes != NULL);
1653
1654 /*
1655 * Count mappings and writable mappings in this pmap.
1656 * Keep a pointer to the first one.
1657 */
1658 for (npv = pv; npv; npv = npv->pv_next) {
1659 /* Count mappings in the same pmap */
1660 if (pmap == npv->pv_pmap) {
1661 if (entries++ == 0)
1662 pv = npv;
1663 /* Cacheable mappings */
1664 if ((npv->pv_flags & PT_NC) == 0)
1665 cacheable_entries++;
1666 /* Writeable mappings */
1667 if (npv->pv_flags & PT_Wr)
1668 ++writeable;
1669 }
1670 }
1671
1672 PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
1673 "writeable %d cacheable %d %s\n", pmap, entries, writeable,
1674 cacheable_entries, clear_cache ? "clean" : "no clean"));
1675
1676 /*
1677 * Enable or disable caching as necessary.
1678 * We do a quick check of the first PTE to avoid walking the list if
1679 * we're already in the right state.
1680 */
1681 if (entries > 1 && writeable) {
1682 if (cacheable_entries == 0)
1683 return;
1684 if (pv->pv_flags & PT_NC) {
1685 #ifdef DIAGNOSTIC
1686 /* We have cacheable entries, but the first one
1687 isn't among them. Something is wrong. */
1688 if (cacheable_entries)
1689 panic("pmap_vac_me_harder: "
1690 "cacheable inconsistent");
1691 #endif
1692 return;
1693 }
1694 pte = &ptes[arm_byte_to_page(pv->pv_va)];
1695 *pte &= ~(PT_C | PT_B);
1696 pv->pv_flags |= PT_NC;
1697 if (clear_cache && cacheable_entries < 4) {
1698 cpu_cache_purgeID_rng(pv->pv_va, NBPG);
1699 cpu_tlb_flushID_SE(pv->pv_va);
1700 }
1701 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1702 if (pmap == npv->pv_pmap &&
1703 (npv->pv_flags & PT_NC) == 0) {
1704 ptes[arm_byte_to_page(npv->pv_va)] &=
1705 ~(PT_C | PT_B);
1706 npv->pv_flags |= PT_NC;
1707 if (clear_cache && cacheable_entries < 4) {
1708 cpu_cache_purgeID_rng(npv->pv_va,
1709 NBPG);
1710 cpu_tlb_flushID_SE(npv->pv_va);
1711 }
1712 }
1713 }
1714 if (clear_cache && cacheable_entries >= 4) {
1715 cpu_cache_purgeID();
1716 cpu_tlb_flushID();
1717 }
1718 } else if (entries > 0) {
1719 if ((pv->pv_flags & PT_NC) == 0)
1720 return;
1721 pte = &ptes[arm_byte_to_page(pv->pv_va)];
1722 *pte |= (PT_C | PT_B);
1723 pv->pv_flags &= ~PT_NC;
1724 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1725 if (pmap == npv->pv_pmap &&
1726 (npv->pv_flags & PT_NC)) {
1727 ptes[arm_byte_to_page(npv->pv_va)] |=
1728 (PT_C | PT_B);
1729 npv->pv_flags &= ~PT_NC;
1730 }
1731 }
1732 }
1733 }
1734
1735 /*
1736 * pmap_remove()
1737 *
1738 * pmap_remove is responsible for nuking a number of mappings for a range
1739 * of virtual address space in the current pmap. To do this efficiently
1740 * is interesting, because in a number of cases a wide virtual address
1741 * range may be supplied that contains few actual mappings. So, the
1742 * optimisations are:
1743 * 1. Try and skip over hunks of address space for which an L1 entry
1744 * does not exist.
1745 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1746 * maybe do just a partial cache clean. This path of execution is
1747 * complicated by the fact that the cache must be flushed _before_
1748 * the PTE is nuked, being a VAC :-)
1749 * 3. Maybe later fast-case a single page, but I don't think this is
1750 * going to make _that_ much difference overall.
1751 */
1752
1753 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1754
1755 void
1756 pmap_remove(pmap, sva, eva)
1757 struct pmap *pmap;
1758 vaddr_t sva;
1759 vaddr_t eva;
1760 {
1761 int cleanlist_idx = 0;
1762 struct pagelist {
1763 vaddr_t va;
1764 pt_entry_t *pte;
1765 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1766 pt_entry_t *pte = 0, *ptes;
1767 paddr_t pa;
1768 int pmap_active;
1769 struct pv_entry *pv;
1770
1771 /* Exit quick if there is no pmap */
1772 if (!pmap)
1773 return;
1774
1775 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1776
1777 sva &= PG_FRAME;
1778 eva &= PG_FRAME;
1779
1780 ptes = pmap_map_ptes(pmap);
1781 /* Get a page table pointer */
1782 while (sva < eva) {
1783 if (pmap_pde_v(pmap_pde(pmap, sva)))
1784 break;
1785 sva = (sva & PD_MASK) + NBPD;
1786 }
1787
1788 pte = &ptes[arm_byte_to_page(sva)];
1789 /* Note if the pmap is active thus require cache and tlb cleans */
1790 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1791 || (pmap == pmap_kernel()))
1792 pmap_active = 1;
1793 else
1794 pmap_active = 0;
1795
1796 /* Now loop along */
1797 while (sva < eva) {
1798 /* Check if we can move to the next PDE (l1 chunk) */
1799 if (!(sva & PT_MASK))
1800 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1801 sva += NBPD;
1802 pte += arm_byte_to_page(NBPD);
1803 continue;
1804 }
1805
1806 /* We've found a valid PTE, so this page of PTEs has to go. */
1807 if (pmap_pte_v(pte)) {
1808 int bank, off;
1809
1810 /* Update statistics */
1811 --pmap->pm_stats.resident_count;
1812
1813 /*
1814 * Add this page to our cache remove list, if we can.
1815 * If, however the cache remove list is totally full,
1816 * then do a complete cache invalidation taking note
1817 * to backtrack the PTE table beforehand, and ignore
1818 * the lists in future because there's no longer any
1819 * point in bothering with them (we've paid the
1820 * penalty, so will carry on unhindered). Otherwise,
1821 * when we fall out, we just clean the list.
1822 */
1823 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1824 pa = pmap_pte_pa(pte);
1825
1826 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1827 /* Add to the clean list. */
1828 cleanlist[cleanlist_idx].pte = pte;
1829 cleanlist[cleanlist_idx].va = sva;
1830 cleanlist_idx++;
1831 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1832 int cnt;
1833
1834 /* Nuke everything if needed. */
1835 if (pmap_active) {
1836 cpu_cache_purgeID();
1837 cpu_tlb_flushID();
1838 }
1839
1840 /*
1841 * Roll back the previous PTE list,
1842 * and zero out the current PTE.
1843 */
1844 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1845 *cleanlist[cnt].pte = 0;
1846 pmap_pte_delref(pmap, cleanlist[cnt].va);
1847 }
1848 *pte = 0;
1849 pmap_pte_delref(pmap, sva);
1850 cleanlist_idx++;
1851 } else {
1852 /*
1853 * We've already nuked the cache and
1854 * TLB, so just carry on regardless,
1855 * and we won't need to do it again
1856 */
1857 *pte = 0;
1858 pmap_pte_delref(pmap, sva);
1859 }
1860
1861 /*
1862 * Update flags. In a number of circumstances,
1863 * we could cluster a lot of these and do a
1864 * number of sequential pages in one go.
1865 */
1866 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1867 pv = &vm_physmem[bank].pmseg.pvent[off];
1868 pmap_remove_pv(pmap, sva, pv);
1869 pmap_vac_me_harder(pmap, pv, ptes, FALSE);
1870 }
1871 }
1872 sva += NBPG;
1873 pte++;
1874 }
1875
1876 pmap_unmap_ptes(pmap);
1877 /*
1878 * Now, if we've fallen through down to here, chances are that there
1879 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1880 */
1881 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1882 u_int cnt;
1883
1884 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1885 if (pmap_active) {
1886 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1887 *cleanlist[cnt].pte = 0;
1888 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1889 } else
1890 *cleanlist[cnt].pte = 0;
1891 pmap_pte_delref(pmap, cleanlist[cnt].va);
1892 }
1893 }
1894 }
1895
1896 /*
1897 * Routine: pmap_remove_all
1898 * Function:
1899 * Removes this physical page from
1900 * all physical maps in which it resides.
1901 * Reflects back modify bits to the pager.
1902 */
1903
1904 void
1905 pmap_remove_all(pa)
1906 paddr_t pa;
1907 {
1908 struct pv_entry *ph, *pv, *npv;
1909 struct pmap *pmap;
1910 pt_entry_t *pte, *ptes;
1911 int s;
1912
1913 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1914
1915 pv = ph = pmap_find_pv(pa);
1916 pmap_clean_page(pv);
1917
1918 s = splvm();
1919
1920 if (ph->pv_pmap == NULL) {
1921 PDEBUG(0, printf("free page\n"));
1922 splx(s);
1923 return;
1924 }
1925
1926
1927
1928 while (pv) {
1929 pmap = pv->pv_pmap;
1930 ptes = pmap_map_ptes(pmap);
1931 pte = &ptes[arm_byte_to_page(pv->pv_va)];
1932
1933 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1934 pv->pv_va, pv->pv_flags));
1935 #ifdef DEBUG
1936 if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)
1937 || pmap_pte_pa(pte) != pa)
1938 panic("pmap_remove_all: bad mapping");
1939 #endif /* DEBUG */
1940
1941 /*
1942 * Update statistics
1943 */
1944 --pmap->pm_stats.resident_count;
1945
1946 /* Wired bit */
1947 if (pv->pv_flags & PT_W)
1948 --pmap->pm_stats.wired_count;
1949
1950 /*
1951 * Invalidate the PTEs.
1952 * XXX: should cluster them up and invalidate as many
1953 * as possible at once.
1954 */
1955
1956 #ifdef needednotdone
1957 reduce wiring count on page table pages as references drop
1958 #endif
1959
1960 *pte = 0;
1961 pmap_pte_delref(pmap, pv->pv_va);
1962
1963 npv = pv->pv_next;
1964 if (pv == ph)
1965 ph->pv_pmap = NULL;
1966 else
1967 pmap_free_pv(pv);
1968 pv = npv;
1969 pmap_unmap_ptes(pmap);
1970 }
1971
1972 splx(s);
1973
1974 PDEBUG(0, printf("done\n"));
1975 cpu_tlb_flushID();
1976 }
1977
1978
1979 /*
1980 * Set the physical protection on the specified range of this map as requested.
1981 */
1982
1983 void
1984 pmap_protect(pmap, sva, eva, prot)
1985 struct pmap *pmap;
1986 vaddr_t sva;
1987 vaddr_t eva;
1988 vm_prot_t prot;
1989 {
1990 pt_entry_t *pte = NULL, *ptes;
1991 int armprot;
1992 int flush = 0;
1993 paddr_t pa;
1994 int bank, off;
1995 struct pv_entry *pv;
1996
1997 /*
1998 * Make sure pmap is valid. -dct
1999 */
2000 if (pmap == NULL)
2001 return;
2002 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2003 pmap, sva, eva, prot));
2004
2005 if (~prot & VM_PROT_READ) {
2006 /* Just remove the mappings. */
2007 pmap_remove(pmap, sva, eva);
2008 return;
2009 }
2010 if (prot & VM_PROT_WRITE) {
2011 /*
2012 * If this is a read->write transition, just ignore it and let
2013 * uvm_fault() take care of it later.
2014 */
2015 return;
2016 }
2017
2018 sva &= PG_FRAME;
2019 eva &= PG_FRAME;
2020
2021 ptes = pmap_map_ptes(pmap);
2022 /*
2023 * We need to acquire a pointer to a page table page before entering
2024 * the following loop.
2025 */
2026 while (sva < eva) {
2027 if (pmap_pde_v(pmap_pde(pmap, sva)))
2028 break;
2029 sva = (sva & PD_MASK) + NBPD;
2030 }
2031
2032 pte = &ptes[arm_byte_to_page(sva)];
2033
2034 while (sva < eva) {
2035 /* only check once in a while */
2036 if ((sva & PT_MASK) == 0) {
2037 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
2038 /* We can race ahead here, to the next pde. */
2039 sva += NBPD;
2040 pte += arm_byte_to_page(NBPD);
2041 continue;
2042 }
2043 }
2044
2045 if (!pmap_pte_v(pte))
2046 goto next;
2047
2048 flush = 1;
2049
2050 armprot = 0;
2051 if (sva < VM_MAXUSER_ADDRESS)
2052 armprot |= PT_AP(AP_U);
2053 else if (sva < VM_MAX_ADDRESS)
2054 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
2055 *pte = (*pte & 0xfffff00f) | armprot;
2056
2057 pa = pmap_pte_pa(pte);
2058
2059 /* Get the physical page index */
2060
2061 /* Clear write flag */
2062 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2063 pv = &vm_physmem[bank].pmseg.pvent[off];
2064 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
2065 pmap_vac_me_harder(pmap, pv, ptes, FALSE);
2066 }
2067
2068 next:
2069 sva += NBPG;
2070 pte++;
2071 }
2072 pmap_unmap_ptes(pmap);
2073 if (flush)
2074 cpu_tlb_flushID();
2075 }
2076
2077 /*
2078 * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2079 * int flags)
2080 *
2081 * Insert the given physical page (p) at
2082 * the specified virtual address (v) in the
2083 * target physical map with the protection requested.
2084 *
2085 * If specified, the page will be wired down, meaning
2086 * that the related pte can not be reclaimed.
2087 *
2088 * NB: This is the only routine which MAY NOT lazy-evaluate
2089 * or lose information. That is, this routine must actually
2090 * insert this page into the given map NOW.
2091 */
2092
2093 int
2094 pmap_enter(pmap, va, pa, prot, flags)
2095 struct pmap *pmap;
2096 vaddr_t va;
2097 paddr_t pa;
2098 vm_prot_t prot;
2099 int flags;
2100 {
2101 pt_entry_t *pte, *ptes;
2102 u_int npte;
2103 int bank, off;
2104 struct pv_entry *pv = NULL;
2105 paddr_t opa;
2106 int nflags;
2107 boolean_t wired = (flags & PMAP_WIRED) != 0;
2108
2109 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2110 va, pa, pmap, prot, wired));
2111
2112 #ifdef DIAGNOSTIC
2113 /* Valid address ? */
2114 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
2115 panic("pmap_enter: too big");
2116 if (pmap != pmap_kernel() && va != 0) {
2117 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2118 panic("pmap_enter: kernel page in user map");
2119 } else {
2120 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2121 panic("pmap_enter: user page in kernel map");
2122 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2123 panic("pmap_enter: entering PT page");
2124 }
2125 #endif
2126
2127 /*
2128 * Get a pointer to the pte for this virtual address. If the
2129 * pte pointer is NULL then we are missing the L2 page table
2130 * so we need to create one.
2131 */
2132 pte = pmap_pte(pmap, va);
2133 if (!pte) {
2134 paddr_t l2pa;
2135 struct vm_page *m;
2136
2137 /* Allocate a page table */
2138 for (;;) {
2139 m = uvm_pagealloc(&(pmap->pm_obj), 0, NULL,
2140 UVM_PGA_USERESERVE);
2141 if (m != NULL)
2142 break;
2143
2144 /*
2145 * No page available. If we're the kernel
2146 * pmap, we die, since we might not have
2147 * a valid thread context. For user pmaps,
2148 * we assume that we _do_ have a valid thread
2149 * context, so we wait here for the pagedaemon
2150 * to free up some pages.
2151 *
2152 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2153 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2154 * XXX SO THIS IS PROBABLY SAFE. In any case,
2155 * XXX other pmap modules claim it is safe to
2156 * XXX sleep here if it's a user pmap.
2157 */
2158 if (pmap == pmap_kernel())
2159 panic("pmap_enter: no free pages");
2160 else
2161 uvm_wait("pmap_enter");
2162 }
2163
2164 /* Wire this page table into the L1. */
2165 l2pa = VM_PAGE_TO_PHYS(m);
2166 pmap_zero_page(l2pa);
2167 pmap_map_in_l1(pmap, va, l2pa);
2168 ++pmap->pm_stats.resident_count;
2169 m->flags &= ~PG_BUSY; /* never busy */
2170 m->wire_count = 1; /* no mappings yet */
2171
2172 pte = pmap_pte(pmap, va);
2173 #ifdef DIAGNOSTIC
2174 if (!pte)
2175 panic("pmap_enter: no pte");
2176 #endif
2177 }
2178
2179 nflags = 0;
2180 if (prot & VM_PROT_WRITE)
2181 nflags |= PT_Wr;
2182 if (wired)
2183 nflags |= PT_W;
2184
2185 /* More debugging info */
2186 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2187 *pte));
2188
2189 /* Is the pte valid ? If so then this page is already mapped */
2190 if (pmap_pte_v(pte)) {
2191 /* Get the physical address of the current page mapped */
2192 opa = pmap_pte_pa(pte);
2193
2194 #ifdef MYCROFT_HACK
2195 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2196 #endif
2197
2198 /* Are we mapping the same page ? */
2199 if (opa == pa) {
2200 /* All we must be doing is changing the protection */
2201 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2202 va, pa));
2203
2204 /* Has the wiring changed ? */
2205 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2206 pv = &vm_physmem[bank].pmseg.pvent[off];
2207 (void) pmap_modify_pv(pmap, va, pv,
2208 PT_Wr | PT_W, nflags);
2209 }
2210 } else {
2211 /* We are replacing the page with a new one. */
2212 cpu_cache_purgeID_rng(va, NBPG);
2213
2214 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2215 va, pa, opa));
2216
2217 /*
2218 * If it is part of our managed memory then we
2219 * must remove it from the PV list
2220 */
2221 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2222 pv = &vm_physmem[bank].pmseg.pvent[off];
2223 pmap_remove_pv(pmap, va, pv);
2224 }
2225
2226 goto enter;
2227 }
2228 } else {
2229 opa = 0;
2230 pmap_pte_addref(pmap, va);
2231
2232 /* pte is not valid so we must be hooking in a new page */
2233 ++pmap->pm_stats.resident_count;
2234
2235 enter:
2236 /*
2237 * Enter on the PV list if part of our managed memory
2238 */
2239 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2240 pv = &vm_physmem[bank].pmseg.pvent[off];
2241 pmap_enter_pv(pmap, va, pv, nflags);
2242 }
2243 }
2244
2245 #ifdef MYCROFT_HACK
2246 if (mycroft_hack)
2247 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2248 #endif
2249
2250 /* Construct the pte, giving the correct access. */
2251 npte = (pa & PG_FRAME);
2252
2253 /* VA 0 is magic. */
2254 if (pmap != pmap_kernel() && va != 0)
2255 npte |= PT_AP(AP_U);
2256
2257 if (bank != -1) {
2258 #ifdef DIAGNOSTIC
2259 if ((flags & VM_PROT_ALL) & ~prot)
2260 panic("pmap_enter: access_type exceeds prot");
2261 #endif
2262 npte |= PT_C | PT_B;
2263 if (flags & VM_PROT_WRITE) {
2264 npte |= L2_SPAGE | PT_AP(AP_W);
2265 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2266 } else if (flags & VM_PROT_ALL) {
2267 npte |= L2_SPAGE;
2268 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2269 } else
2270 npte |= L2_INVAL;
2271 } else {
2272 if (prot & VM_PROT_WRITE)
2273 npte |= L2_SPAGE | PT_AP(AP_W);
2274 else if (prot & VM_PROT_ALL)
2275 npte |= L2_SPAGE;
2276 else
2277 npte |= L2_INVAL;
2278 }
2279
2280 #ifdef MYCROFT_HACK
2281 if (mycroft_hack)
2282 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2283 #endif
2284
2285 *pte = npte;
2286
2287 if (bank != -1)
2288 {
2289 boolean_t pmap_active = FALSE;
2290 /* XXX this will change once the whole of pmap_enter uses
2291 * map_ptes
2292 */
2293 ptes = pmap_map_ptes(pmap);
2294 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
2295 || (pmap == pmap_kernel()))
2296 pmap_active = TRUE;
2297 pmap_vac_me_harder(pmap, pv, ptes, pmap_active);
2298 pmap_unmap_ptes(pmap);
2299 }
2300
2301 /* Better flush the TLB ... */
2302 cpu_tlb_flushID_SE(va);
2303
2304 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2305
2306 return 0;
2307 }
2308
2309 void
2310 pmap_kenter_pa(va, pa, prot)
2311 vaddr_t va;
2312 paddr_t pa;
2313 vm_prot_t prot;
2314 {
2315 struct pmap *pmap = pmap_kernel();
2316 pt_entry_t *pte;
2317 struct vm_page *pg;
2318
2319 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2320
2321 /*
2322 * For the kernel pmaps it would be better to ensure
2323 * that they are always present, and to grow the
2324 * kernel as required.
2325 */
2326
2327 /* Allocate a page table */
2328 pg = uvm_pagealloc(&(pmap_kernel()->pm_obj), 0, NULL,
2329 UVM_PGA_USERESERVE | UVM_PGA_ZERO);
2330 if (pg == NULL) {
2331 panic("pmap_kenter_pa: no free pages");
2332 }
2333 pg->flags &= ~PG_BUSY; /* never busy */
2334
2335 /* Wire this page table into the L1. */
2336 pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(pg));
2337 }
2338 pte = vtopte(va);
2339 KASSERT(!pmap_pte_v(pte));
2340 *pte = L2_PTE(pa, AP_KRW);
2341 }
2342
2343 void
2344 pmap_kremove(va, len)
2345 vaddr_t va;
2346 vsize_t len;
2347 {
2348 pt_entry_t *pte;
2349
2350 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2351
2352 /*
2353 * We assume that we will only be called with small
2354 * regions of memory.
2355 */
2356
2357 KASSERT(pmap_pde_v(pmap_pde(pmap_kernel(), va)));
2358 pte = vtopte(va);
2359 cpu_cache_purgeID_rng(va, PAGE_SIZE);
2360 *pte = 0;
2361 cpu_tlb_flushID_SE(va);
2362 }
2363 }
2364
2365 /*
2366 * pmap_page_protect:
2367 *
2368 * Lower the permission for all mappings to a given page.
2369 */
2370
2371 void
2372 pmap_page_protect(pg, prot)
2373 struct vm_page *pg;
2374 vm_prot_t prot;
2375 {
2376 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2377
2378 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2379
2380 switch(prot) {
2381 case VM_PROT_READ:
2382 case VM_PROT_READ|VM_PROT_EXECUTE:
2383 pmap_copy_on_write(pa);
2384 break;
2385
2386 case VM_PROT_ALL:
2387 break;
2388
2389 default:
2390 pmap_remove_all(pa);
2391 break;
2392 }
2393 }
2394
2395
2396 /*
2397 * Routine: pmap_unwire
2398 * Function: Clear the wired attribute for a map/virtual-address
2399 * pair.
2400 * In/out conditions:
2401 * The mapping must already exist in the pmap.
2402 */
2403
2404 void
2405 pmap_unwire(pmap, va)
2406 struct pmap *pmap;
2407 vaddr_t va;
2408 {
2409 pt_entry_t *pte;
2410 paddr_t pa;
2411 int bank, off;
2412 struct pv_entry *pv;
2413
2414 /*
2415 * Make sure pmap is valid. -dct
2416 */
2417 if (pmap == NULL)
2418 return;
2419
2420 /* Get the pte */
2421 pte = pmap_pte(pmap, va);
2422 if (!pte)
2423 return;
2424
2425 /* Extract the physical address of the page */
2426 pa = pmap_pte_pa(pte);
2427
2428 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2429 return;
2430 pv = &vm_physmem[bank].pmseg.pvent[off];
2431 /* Update the wired bit in the pv entry for this page. */
2432 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2433 }
2434
2435 /*
2436 * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
2437 *
2438 * Return the pointer to a page table entry corresponding to the supplied
2439 * virtual address.
2440 *
2441 * The page directory is first checked to make sure that a page table
2442 * for the address in question exists and if it does a pointer to the
2443 * entry is returned.
2444 *
2445 * The way this works is that that the kernel page tables are mapped
2446 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2447 * This allows page tables to be located quickly.
2448 */
2449 pt_entry_t *
2450 pmap_pte(pmap, va)
2451 struct pmap *pmap;
2452 vaddr_t va;
2453 {
2454 pt_entry_t *ptp;
2455 pt_entry_t *result;
2456
2457 /* The pmap must be valid */
2458 if (!pmap)
2459 return(NULL);
2460
2461 /* Return the address of the pte */
2462 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2463 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2464
2465 /* Do we have a valid pde ? If not we don't have a page table */
2466 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2467 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2468 pmap_pde(pmap, va)));
2469 return(NULL);
2470 }
2471
2472 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2473 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2474 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2475 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2476
2477 /*
2478 * If the pmap is the kernel pmap or the pmap is the active one
2479 * then we can just return a pointer to entry relative to
2480 * PROCESS_PAGE_TBLS_BASE.
2481 * Otherwise we need to map the page tables to an alternative
2482 * address and reference them there.
2483 */
2484 if (pmap == pmap_kernel() || pmap->pm_pptpt
2485 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2486 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2487 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2488 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2489 } else {
2490 struct proc *p = curproc;
2491
2492 /* If we don't have a valid curproc use proc0 */
2493 /* Perhaps we should just use kernel_pmap instead */
2494 if (p == NULL)
2495 p = &proc0;
2496 #ifdef DIAGNOSTIC
2497 /*
2498 * The pmap should always be valid for the process so
2499 * panic if it is not.
2500 */
2501 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2502 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2503 va, p, p->p_vmspace);
2504 console_debugger();
2505 }
2506 /*
2507 * The pmap for the current process should be mapped. If it
2508 * is not then we have a problem.
2509 */
2510 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2511 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2512 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2513 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2514 printf("pmap pagetable = P%08lx current = P%08x ",
2515 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2516 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2517 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2518 PG_FRAME));
2519 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2520 panic("pmap_pte: current and pmap mismatch\n");
2521 }
2522 #endif
2523
2524 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2525 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2526 pmap->pm_pptpt);
2527 cpu_tlb_flushD();
2528 }
2529 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2530 ((va >> (PGSHIFT-2)) & ~3)));
2531 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2532 return(result);
2533 }
2534
2535 /*
2536 * Routine: pmap_extract
2537 * Function:
2538 * Extract the physical page address associated
2539 * with the given map/virtual_address pair.
2540 */
2541 boolean_t
2542 pmap_extract(pmap, va, pap)
2543 struct pmap *pmap;
2544 vaddr_t va;
2545 paddr_t *pap;
2546 {
2547 pt_entry_t *pte, *ptes;
2548 paddr_t pa;
2549
2550 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2551
2552 /*
2553 * Get the pte for this virtual address.
2554 */
2555 ptes = pmap_map_ptes(pmap);
2556 pte = &ptes[arm_byte_to_page(va)];
2557
2558 /*
2559 * If there is no pte then there is no page table etc.
2560 * Is the pte valid ? If not then no paged is actually mapped here
2561 */
2562 if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)){
2563 pmap_unmap_ptes(pmap);
2564 return (FALSE);
2565 }
2566
2567 /* Return the physical address depending on the PTE type */
2568 /* XXX What about L1 section mappings ? */
2569 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2570 /* Extract the physical address from the pte */
2571 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2572
2573 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2574 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2575
2576 if (pap != NULL)
2577 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2578 } else {
2579 /* Extract the physical address from the pte */
2580 pa = pmap_pte_pa(pte);
2581
2582 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2583 (pa | (va & ~PG_FRAME))));
2584
2585 if (pap != NULL)
2586 *pap = pa | (va & ~PG_FRAME);
2587 }
2588 pmap_unmap_ptes(pmap);
2589 return (TRUE);
2590 }
2591
2592
2593 /*
2594 * Copy the range specified by src_addr/len from the source map to the
2595 * range dst_addr/len in the destination map.
2596 *
2597 * This routine is only advisory and need not do anything.
2598 */
2599
2600 void
2601 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2602 struct pmap *dst_pmap;
2603 struct pmap *src_pmap;
2604 vaddr_t dst_addr;
2605 vsize_t len;
2606 vaddr_t src_addr;
2607 {
2608 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2609 dst_pmap, src_pmap, dst_addr, len, src_addr));
2610 }
2611
2612 #if defined(PMAP_DEBUG)
2613 void
2614 pmap_dump_pvlist(phys, m)
2615 vaddr_t phys;
2616 char *m;
2617 {
2618 struct pv_entry *pv;
2619 int bank, off;
2620
2621 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2622 printf("INVALID PA\n");
2623 return;
2624 }
2625 pv = &vm_physmem[bank].pmseg.pvent[off];
2626 printf("%s %08lx:", m, phys);
2627 if (pv->pv_pmap == NULL) {
2628 printf(" no mappings\n");
2629 return;
2630 }
2631
2632 for (; pv; pv = pv->pv_next)
2633 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2634 pv->pv_va, pv->pv_flags);
2635
2636 printf("\n");
2637 }
2638
2639 #endif /* PMAP_DEBUG */
2640
2641 boolean_t
2642 pmap_testbit(pa, setbits)
2643 paddr_t pa;
2644 int setbits;
2645 {
2646 int bank, off;
2647
2648 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2649
2650 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2651 return(FALSE);
2652
2653 /*
2654 * Check saved info only
2655 */
2656 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2657 PDEBUG(0, printf("pmap_attributes = %02x\n",
2658 vm_physmem[bank].pmseg.attrs[off]));
2659 return(TRUE);
2660 }
2661
2662 return(FALSE);
2663 }
2664
2665 static pt_entry_t *
2666 pmap_map_ptes(struct pmap *pmap)
2667 {
2668 struct proc *p;
2669
2670 /* the kernel's pmap is always accessible */
2671 if (pmap == pmap_kernel()) {
2672 return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
2673 }
2674
2675 if (curproc &&
2676 curproc->p_vmspace->vm_map.pmap == pmap)
2677 return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2678
2679 p = curproc;
2680
2681 if (p == NULL)
2682 p = &proc0;
2683
2684 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2685 pmap->pm_pptpt);
2686 cpu_tlb_flushD();
2687 return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2688 }
2689
2690 /*
2691 * Modify pte bits for all ptes corresponding to the given physical address.
2692 * We use `maskbits' rather than `clearbits' because we're always passing
2693 * constants and the latter would require an extra inversion at run-time.
2694 */
2695
2696 void
2697 pmap_clearbit(pa, maskbits)
2698 paddr_t pa;
2699 int maskbits;
2700 {
2701 struct pv_entry *pv;
2702 pt_entry_t *pte;
2703 vaddr_t va;
2704 int bank, off;
2705 int s;
2706
2707 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2708 pa, maskbits));
2709 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2710 return;
2711 pv = &vm_physmem[bank].pmseg.pvent[off];
2712 s = splvm();
2713
2714 /*
2715 * Clear saved attributes (modify, reference)
2716 */
2717 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2718
2719 if (pv->pv_pmap == NULL) {
2720 splx(s);
2721 return;
2722 }
2723
2724 /*
2725 * Loop over all current mappings setting/clearing as appropos
2726 */
2727 for (; pv; pv = pv->pv_next) {
2728 va = pv->pv_va;
2729
2730 /*
2731 * XXX don't write protect pager mappings
2732 */
2733 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2734 printf("pmap_clearbit: found page VA on pv_list\n");
2735 continue;
2736 }
2737
2738 pv->pv_flags &= ~maskbits;
2739 pte = pmap_pte(pv->pv_pmap, va);
2740 if (maskbits & (PT_Wr|PT_M))
2741 *pte = *pte & ~PT_AP(AP_W);
2742 if (maskbits & PT_H)
2743 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2744 }
2745 cpu_tlb_flushID();
2746
2747 splx(s);
2748 }
2749
2750
2751 boolean_t
2752 pmap_clear_modify(pg)
2753 struct vm_page *pg;
2754 {
2755 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2756 boolean_t rv;
2757
2758 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2759 rv = pmap_testbit(pa, PT_M);
2760 pmap_clearbit(pa, PT_M);
2761 return rv;
2762 }
2763
2764
2765 boolean_t
2766 pmap_clear_reference(pg)
2767 struct vm_page *pg;
2768 {
2769 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2770 boolean_t rv;
2771
2772 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2773 rv = pmap_testbit(pa, PT_H);
2774 pmap_clearbit(pa, PT_H);
2775 return rv;
2776 }
2777
2778
2779 void
2780 pmap_copy_on_write(pa)
2781 paddr_t pa;
2782 {
2783 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2784 pmap_clearbit(pa, PT_Wr);
2785 }
2786
2787
2788 boolean_t
2789 pmap_is_modified(pg)
2790 struct vm_page *pg;
2791 {
2792 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2793 boolean_t result;
2794
2795 result = pmap_testbit(pa, PT_M);
2796 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2797 return (result);
2798 }
2799
2800
2801 boolean_t
2802 pmap_is_referenced(pg)
2803 struct vm_page *pg;
2804 {
2805 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2806 boolean_t result;
2807
2808 result = pmap_testbit(pa, PT_H);
2809 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2810 return (result);
2811 }
2812
2813
2814 int
2815 pmap_modified_emulation(pmap, va)
2816 struct pmap *pmap;
2817 vaddr_t va;
2818 {
2819 pt_entry_t *pte;
2820 paddr_t pa;
2821 int bank, off;
2822 struct pv_entry *pv;
2823 u_int flags;
2824
2825 PDEBUG(2, printf("pmap_modified_emulation\n"));
2826
2827 /* Get the pte */
2828 pte = pmap_pte(pmap, va);
2829 if (!pte) {
2830 PDEBUG(2, printf("no pte\n"));
2831 return(0);
2832 }
2833
2834 PDEBUG(1, printf("*pte=%08x\n", *pte));
2835
2836 /* Check for a zero pte */
2837 if (*pte == 0)
2838 return(0);
2839
2840 /* This can happen if user code tries to access kernel memory. */
2841 if ((*pte & PT_AP(AP_W)) != 0)
2842 return (0);
2843
2844 /* Extract the physical address of the page */
2845 pa = pmap_pte_pa(pte);
2846 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2847 return(0);
2848
2849 /* Get the current flags for this page. */
2850 pv = &vm_physmem[bank].pmseg.pvent[off];
2851 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2852 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2853
2854 /*
2855 * Do the flags say this page is writable ? If not then it is a
2856 * genuine write fault. If yes then the write fault is our fault
2857 * as we did not reflect the write access in the PTE. Now we know
2858 * a write has occurred we can correct this and also set the
2859 * modified bit
2860 */
2861 if (~flags & PT_Wr)
2862 return(0);
2863
2864 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2865 va, pte, *pte));
2866 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2867 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2868 PDEBUG(0, printf("->(%08x)\n", *pte));
2869
2870 /* Return, indicating the problem has been dealt with */
2871 cpu_tlb_flushID_SE(va);
2872 return(1);
2873 }
2874
2875
2876 int
2877 pmap_handled_emulation(pmap, va)
2878 struct pmap *pmap;
2879 vaddr_t va;
2880 {
2881 pt_entry_t *pte;
2882 paddr_t pa;
2883 int bank, off;
2884
2885 PDEBUG(2, printf("pmap_handled_emulation\n"));
2886
2887 /* Get the pte */
2888 pte = pmap_pte(pmap, va);
2889 if (!pte) {
2890 PDEBUG(2, printf("no pte\n"));
2891 return(0);
2892 }
2893
2894 PDEBUG(1, printf("*pte=%08x\n", *pte));
2895
2896 /* Check for a zero pte */
2897 if (*pte == 0)
2898 return(0);
2899
2900 /* This can happen if user code tries to access kernel memory. */
2901 if ((*pte & L2_MASK) != L2_INVAL)
2902 return (0);
2903
2904 /* Extract the physical address of the page */
2905 pa = pmap_pte_pa(pte);
2906 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2907 return(0);
2908
2909 /*
2910 * Ok we just enable the pte and mark the attibs as handled
2911 */
2912 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2913 va, pte, *pte));
2914 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2915 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2916 PDEBUG(0, printf("->(%08x)\n", *pte));
2917
2918 /* Return, indicating the problem has been dealt with */
2919 cpu_tlb_flushID_SE(va);
2920 return(1);
2921 }
2922
2923 /*
2924 * pmap_collect: free resources held by a pmap
2925 *
2926 * => optional function.
2927 * => called when a process is swapped out to free memory.
2928 */
2929
2930 void
2931 pmap_collect(pmap)
2932 struct pmap *pmap;
2933 {
2934 }
2935
2936 /*
2937 * Routine: pmap_procwr
2938 *
2939 * Function:
2940 * Synchronize caches corresponding to [addr, addr+len) in p.
2941 *
2942 */
2943 void
2944 pmap_procwr(p, va, len)
2945 struct proc *p;
2946 vaddr_t va;
2947 int len;
2948 {
2949 /* We only need to do anything if it is the current process. */
2950 if (p == curproc)
2951 cpu_cache_syncI_rng(va, len);
2952 }
2953
2954 /* End of pmap.c */
2955