pmap.c revision 1.13 1 /* $NetBSD: pmap.c,v 1.13 2001/07/06 20:15:13 chris Exp $ */
2
3 /*
4 * Copyright (c) 2001 Richard Earnshaw
5 * Copyright (c) 2001 Christopher Gilbert
6 * All rights reserved.
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the company nor the name of the author may be used to
14 * endorse or promote products derived from this software without specific
15 * prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*-
31 * Copyright (c) 1999 The NetBSD Foundation, Inc.
32 * All rights reserved.
33 *
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Charles M. Hannum.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the NetBSD
48 * Foundation, Inc. and its contributors.
49 * 4. Neither the name of The NetBSD Foundation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * Copyright (c) 1994-1998 Mark Brinicombe.
68 * Copyright (c) 1994 Brini.
69 * All rights reserved.
70 *
71 * This code is derived from software written for Brini by Mark Brinicombe
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. All advertising materials mentioning features or use of this software
82 * must display the following acknowledgement:
83 * This product includes software developed by Mark Brinicombe.
84 * 4. The name of the author may not be used to endorse or promote products
85 * derived from this software without specific prior written permission.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
88 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
89 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
90 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
91 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
92 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
93 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
94 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
96 *
97 * RiscBSD kernel project
98 *
99 * pmap.c
100 *
101 * Machine dependant vm stuff
102 *
103 * Created : 20/09/94
104 */
105
106 /*
107 * Performance improvements, UVM changes, overhauls and part-rewrites
108 * were contributed by Neil A. Carson <neil (at) causality.com>.
109 */
110
111 /*
112 * The dram block info is currently referenced from the bootconfig.
113 * This should be placed in a separate structure.
114 */
115
116 /*
117 * Special compilation symbols
118 * PMAP_DEBUG - Build in pmap_debug_level code
119 */
120
121 /* Include header files */
122
123 #include "opt_pmap_debug.h"
124 #include "opt_ddb.h"
125
126 #include <sys/types.h>
127 #include <sys/param.h>
128 #include <sys/kernel.h>
129 #include <sys/systm.h>
130 #include <sys/proc.h>
131 #include <sys/malloc.h>
132 #include <sys/user.h>
133 #include <sys/pool.h>
134
135 #include <uvm/uvm.h>
136
137 #include <machine/bootconfig.h>
138 #include <machine/bus.h>
139 #include <machine/pmap.h>
140 #include <machine/pcb.h>
141 #include <machine/param.h>
142 #include <machine/katelib.h>
143
144 #ifdef PMAP_DEBUG
145 #define PDEBUG(_lev_,_stat_) \
146 if (pmap_debug_level >= (_lev_)) \
147 ((_stat_))
148 int pmap_debug_level = -2;
149 #else /* PMAP_DEBUG */
150 #define PDEBUG(_lev_,_stat_) /* Nothing */
151 #endif /* PMAP_DEBUG */
152
153 struct pmap kernel_pmap_store;
154 pmap_t kernel_pmap;
155
156 /*
157 * pool that pmap structures are allocated from
158 */
159
160 struct pool pmap_pmap_pool;
161
162 pagehook_t page_hook0;
163 pagehook_t page_hook1;
164 char *memhook;
165 pt_entry_t msgbufpte;
166 extern caddr_t msgbufaddr;
167
168 #ifdef DIAGNOSTIC
169 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
170 #endif
171
172 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
173
174 int pv_nfree = 0;
175
176 vsize_t npages;
177
178 extern paddr_t physical_start;
179 extern paddr_t physical_freestart;
180 extern paddr_t physical_end;
181 extern paddr_t physical_freeend;
182 extern unsigned int free_pages;
183 extern int max_processes;
184
185 vaddr_t virtual_start;
186 vaddr_t virtual_end;
187
188 vaddr_t avail_start;
189 vaddr_t avail_end;
190
191 extern pv_addr_t systempage;
192
193 #define ALLOC_PAGE_HOOK(x, s) \
194 x.va = virtual_start; \
195 x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
196 virtual_start += s;
197
198 /* Variables used by the L1 page table queue code */
199 SIMPLEQ_HEAD(l1pt_queue, l1pt);
200 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
201 int l1pt_static_queue_count; /* items in the static l1 queue */
202 int l1pt_static_create_count; /* static l1 items created */
203 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
204 int l1pt_queue_count; /* items in the l1 queue */
205 int l1pt_create_count; /* stat - L1's create count */
206 int l1pt_reuse_count; /* stat - L1's reused count */
207
208 /* Local function prototypes (not used outside this file) */
209 pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
210 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
211 paddr_t pa, unsigned int flags));
212 void pmap_copy_on_write __P((paddr_t pa));
213 void pmap_pinit __P((pmap_t));
214 void pmap_freepagedir __P((pmap_t));
215 void pmap_release __P((pmap_t));
216
217 /* Other function prototypes */
218 extern void bzero_page __P((vaddr_t));
219 extern void bcopy_page __P((vaddr_t, vaddr_t));
220
221 struct l1pt *pmap_alloc_l1pt __P((void));
222 static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
223 vaddr_t l2pa));
224
225 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
226 /* eventually this will be a function */
227 #define pmap_unmap_ptes(a)
228
229 void pmap_vac_me_harder __P((struct pmap *, struct pv_entry *,
230 pt_entry_t *, boolean_t));
231
232 #ifdef MYCROFT_HACK
233 int mycroft_hack = 0;
234 #endif
235
236 /* Function to set the debug level of the pmap code */
237
238 #ifdef PMAP_DEBUG
239 void
240 pmap_debug(level)
241 int level;
242 {
243 pmap_debug_level = level;
244 printf("pmap_debug: level=%d\n", pmap_debug_level);
245 }
246 #endif /* PMAP_DEBUG */
247
248 #include "isadma.h"
249
250 #if NISADMA > 0
251 /*
252 * Used to protect memory for ISA DMA bounce buffers. If, when loading
253 * pages into the system, memory intersects with any of these ranges,
254 * the intersecting memory will be loaded into a lower-priority free list.
255 */
256 bus_dma_segment_t *pmap_isa_dma_ranges;
257 int pmap_isa_dma_nranges;
258
259 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
260 paddr_t *, psize_t *));
261
262 /*
263 * Check if a memory range intersects with an ISA DMA range, and
264 * return the page-rounded intersection if it does. The intersection
265 * will be placed on a lower-priority free list.
266 */
267 boolean_t
268 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
269 paddr_t pa;
270 psize_t size;
271 paddr_t *pap;
272 psize_t *sizep;
273 {
274 bus_dma_segment_t *ds;
275 int i;
276
277 if (pmap_isa_dma_ranges == NULL)
278 return (FALSE);
279
280 for (i = 0, ds = pmap_isa_dma_ranges;
281 i < pmap_isa_dma_nranges; i++, ds++) {
282 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
283 /*
284 * Beginning of region intersects with this range.
285 */
286 *pap = trunc_page(pa);
287 *sizep = round_page(min(pa + size,
288 ds->ds_addr + ds->ds_len) - pa);
289 return (TRUE);
290 }
291 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
292 /*
293 * End of region intersects with this range.
294 */
295 *pap = trunc_page(ds->ds_addr);
296 *sizep = round_page(min((pa + size) - ds->ds_addr,
297 ds->ds_len));
298 return (TRUE);
299 }
300 }
301
302 /*
303 * No intersection found.
304 */
305 return (FALSE);
306 }
307 #endif /* NISADMA > 0 */
308
309 /*
310 * Functions for manipluation pv_entry structures. These are used to keep a
311 * record of the mappings of virtual addresses and the associated physical
312 * pages.
313 */
314
315 /*
316 * Allocate a new pv_entry structure from the freelist. If the list is
317 * empty allocate a new page and fill the freelist.
318 */
319 struct pv_entry *
320 pmap_alloc_pv()
321 {
322 struct pv_page *pvp;
323 struct pv_entry *pv;
324 int i;
325
326 /*
327 * Do we have any free pv_entry structures left ?
328 * If not allocate a page of them
329 */
330
331 if (pv_nfree == 0) {
332 /* NOTE: can't lock kernel_map here */
333 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
334 if (pvp == 0)
335 panic("pmap_alloc_pv: kmem_alloc() failed");
336 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
337 for (i = NPVPPG - 2; i; i--, pv++)
338 pv->pv_next = pv + 1;
339 pv->pv_next = 0;
340 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
341 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
342 pv = &pvp->pvp_pv[0];
343 } else {
344 --pv_nfree;
345 pvp = pv_page_freelist.tqh_first;
346 if (--pvp->pvp_pgi.pgi_nfree == 0) {
347 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
348 }
349 pv = pvp->pvp_pgi.pgi_freelist;
350 #ifdef DIAGNOSTIC
351 if (pv == 0)
352 panic("pmap_alloc_pv: pgi_nfree inconsistent");
353 #endif /* DIAGNOSTIC */
354 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
355 }
356 return pv;
357 }
358
359 /*
360 * Release a pv_entry structure putting it back on the freelist.
361 */
362
363 void
364 pmap_free_pv(pv)
365 struct pv_entry *pv;
366 {
367 struct pv_page *pvp;
368
369 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
370 switch (++pvp->pvp_pgi.pgi_nfree) {
371 case 1:
372 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
373 default:
374 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
375 pvp->pvp_pgi.pgi_freelist = pv;
376 ++pv_nfree;
377 break;
378 case NPVPPG:
379 pv_nfree -= NPVPPG - 1;
380 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
381 FREE((vaddr_t)pvp, M_VMPVENT);
382 break;
383 }
384 }
385
386 #if 0
387 void
388 pmap_collect_pv()
389 {
390 struct pv_page_list pv_page_collectlist;
391 struct pv_page *pvp, *npvp;
392 struct pv_entry *ph, *ppv, *pv, *npv;
393 int s;
394
395 TAILQ_INIT(&pv_page_collectlist);
396
397 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
398 if (pv_nfree < NPVPPG)
399 break;
400 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
401 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
402 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
403 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
404 pvp_pgi.pgi_list);
405 pv_nfree -= NPVPPG;
406 pvp->pvp_pgi.pgi_nfree = -1;
407 }
408 }
409
410 if (pv_page_collectlist.tqh_first == 0)
411 return;
412
413 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
414 if (ph->pv_pmap == 0)
415 continue;
416 s = splvm();
417 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
418 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
419 if (pvp->pvp_pgi.pgi_nfree == -1) {
420 pvp = pv_page_freelist.tqh_first;
421 if (--pvp->pvp_pgi.pgi_nfree == 0) {
422 TAILQ_REMOVE(&pv_page_freelist,
423 pvp, pvp_pgi.pgi_list);
424 }
425 npv = pvp->pvp_pgi.pgi_freelist;
426 #ifdef DIAGNOSTIC
427 if (npv == 0)
428 panic("pmap_collect_pv: pgi_nfree inconsistent");
429 #endif /* DIAGNOSTIC */
430 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
431 *npv = *pv;
432 ppv->pv_next = npv;
433 ppv = npv;
434 } else
435 ppv = pv;
436 }
437 splx(s);
438 }
439
440 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
441 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
442 FREE((vaddr_t)pvp, M_VMPVENT);
443 }
444 }
445 #endif
446
447 /*
448 * Enter a new physical-virtual mapping into the pv table
449 */
450
451 /*__inline*/ void
452 pmap_enter_pv(pmap, va, pv, flags)
453 pmap_t pmap;
454 vaddr_t va;
455 struct pv_entry *pv;
456 u_int flags;
457 {
458 struct pv_entry *npv;
459 u_int s;
460
461 #ifdef DIAGNOSTIC
462 if (!pmap_initialized)
463 panic("pmap_enter_pv: !pmap_initialized");
464 #endif
465
466 s = splvm();
467
468 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
469 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
470
471 if (pv->pv_pmap == NULL) {
472 /*
473 * No entries yet, use header as the first entry
474 */
475 pv->pv_va = va;
476 pv->pv_pmap = pmap;
477 pv->pv_next = NULL;
478 pv->pv_flags = flags;
479 } else {
480 /*
481 * There is at least one other VA mapping this page.
482 * Place this entry after the header.
483 */
484 #ifdef PMAP_DEBUG
485 for (npv = pv; npv; npv = npv->pv_next)
486 if (pmap == npv->pv_pmap && va == npv->pv_va)
487 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
488 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
489 #endif
490 npv = pmap_alloc_pv();
491 /* Must make sure that the new entry is before any others
492 * for the same pmap. Otherwise the vac handling code
493 * will get confused.
494 * XXX this would be better if we used lists like i386 (infact
495 * this would be a lot simpler)
496 */
497 *npv = *pv;
498 pv->pv_va = va;
499 pv->pv_pmap = pmap;
500 pv->pv_flags = flags;
501 pv->pv_next = npv;
502 }
503
504 if (flags & PT_W)
505 ++pmap->pm_stats.wired_count;
506
507 splx(s);
508 }
509
510
511 /*
512 * Remove a physical-virtual mapping from the pv table
513 */
514
515 /*__inline*/ void
516 pmap_remove_pv(pmap, va, pv)
517 pmap_t pmap;
518 vaddr_t va;
519 struct pv_entry *pv;
520 {
521 struct pv_entry *npv;
522 u_int s;
523 u_int flags = 0;
524
525 #ifdef DIAGNOSTIC
526 if (!pmap_initialized)
527 panic("pmap_remove_pv: !pmap_initialized");
528 #endif
529
530 s = splvm();
531
532 /*
533 * If it is the first entry on the list, it is actually
534 * in the header and we must copy the following entry up
535 * to the header. Otherwise we must search the list for
536 * the entry. In either case we free the now unused entry.
537 */
538
539 if (pmap == pv->pv_pmap && va == pv->pv_va) {
540 npv = pv->pv_next;
541 if (npv) {
542 *pv = *npv;
543 flags = npv->pv_flags;
544 pmap_free_pv(npv);
545 } else {
546 flags = pv->pv_flags;
547 pv->pv_pmap = NULL;
548 }
549 } else {
550 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
551 if (pmap == npv->pv_pmap && va == npv->pv_va)
552 break;
553 }
554 if (npv) {
555 pv->pv_next = npv->pv_next;
556 flags = npv->pv_flags;
557 pmap_free_pv(npv);
558 } else
559 panic("pmap_remove_pv: lost entry");
560 }
561
562 if (flags & PT_W)
563 --pmap->pm_stats.wired_count;
564
565 splx(s);
566 }
567
568 /*
569 * Modify a physical-virtual mapping in the pv table
570 */
571
572 /*__inline */ u_int
573 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
574 pmap_t pmap;
575 vaddr_t va;
576 struct pv_entry *pv;
577 u_int bic_mask;
578 u_int eor_mask;
579 {
580 struct pv_entry *npv;
581 u_int s;
582 u_int flags, oflags;
583
584 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
585 pmap, va, pv, bic_mask, eor_mask));
586
587 #ifdef DIAGNOSTIC
588 if (!pmap_initialized)
589 panic("pmap_modify_pv: !pmap_initialized");
590 #endif
591
592 s = splvm();
593
594 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
595 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
596
597 /*
598 * There is at least one VA mapping this page.
599 */
600
601 for (npv = pv; npv; npv = npv->pv_next) {
602 if (pmap == npv->pv_pmap && va == npv->pv_va) {
603 oflags = npv->pv_flags;
604 npv->pv_flags = flags =
605 ((oflags & ~bic_mask) ^ eor_mask);
606 if ((flags ^ oflags) & PT_W) {
607 if (flags & PT_W)
608 ++pmap->pm_stats.wired_count;
609 else
610 --pmap->pm_stats.wired_count;
611 }
612 PDEBUG(0, printf("done flags=%08x\n", flags));
613 splx(s);
614 return (oflags);
615 }
616 }
617
618 PDEBUG(0, printf("done.\n"));
619 splx(s);
620 return (0);
621 }
622
623
624 /*
625 * Map the specified level 2 pagetable into the level 1 page table for
626 * the given pmap to cover a chunk of virtual address space starting from the
627 * address specified.
628 */
629 static /*__inline*/ void
630 pmap_map_in_l1(pmap, va, l2pa)
631 pmap_t pmap;
632 vaddr_t va, l2pa;
633 {
634 vaddr_t ptva;
635
636 /* Calculate the index into the L1 page table. */
637 ptva = (va >> PDSHIFT) & ~3;
638
639 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
640 pmap->pm_pdir, L1_PTE(l2pa), ptva));
641
642 /* Map page table into the L1. */
643 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
644 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
645 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
646 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
647
648 PDEBUG(0, printf("pt self reference %lx in %lx\n",
649 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
650
651 /* Map the page table into the page table area. */
652 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
653
654 /* XXX should be a purge */
655 /* cpu_tlb_flushD();*/
656 }
657
658 #if 0
659 static /*__inline*/ void
660 pmap_unmap_in_l1(pmap, va)
661 pmap_t pmap;
662 vaddr_t va;
663 {
664 vaddr_t ptva;
665
666 /* Calculate the index into the L1 page table. */
667 ptva = (va >> PDSHIFT) & ~3;
668
669 /* Unmap page table from the L1. */
670 pmap->pm_pdir[ptva + 0] = 0;
671 pmap->pm_pdir[ptva + 1] = 0;
672 pmap->pm_pdir[ptva + 2] = 0;
673 pmap->pm_pdir[ptva + 3] = 0;
674
675 /* Unmap the page table from the page table area. */
676 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
677
678 /* XXX should be a purge */
679 /* cpu_tlb_flushD();*/
680 }
681 #endif
682
683
684 /*
685 * Used to map a range of physical addresses into kernel
686 * virtual address space.
687 *
688 * For now, VM is already on, we only need to map the
689 * specified memory.
690 */
691 vaddr_t
692 pmap_map(va, spa, epa, prot)
693 vaddr_t va, spa, epa;
694 int prot;
695 {
696 while (spa < epa) {
697 pmap_enter(pmap_kernel(), va, spa, prot, 0);
698 va += NBPG;
699 spa += NBPG;
700 }
701 pmap_update();
702 return(va);
703 }
704
705
706 /*
707 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
708 *
709 * bootstrap the pmap system. This is called from initarm and allows
710 * the pmap system to initailise any structures it requires.
711 *
712 * Currently this sets up the kernel_pmap that is statically allocated
713 * and also allocated virtual addresses for certain page hooks.
714 * Currently the only one page hook is allocated that is used
715 * to zero physical pages of memory.
716 * It also initialises the start and end address of the kernel data space.
717 */
718 extern paddr_t physical_freestart;
719 extern paddr_t physical_freeend;
720
721 struct pv_entry *boot_pvent;
722 char *boot_attrs;
723
724 void
725 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
726 pd_entry_t *kernel_l1pt;
727 pv_addr_t kernel_ptpt;
728 {
729 int loop;
730 paddr_t start, end;
731 #if NISADMA > 0
732 paddr_t istart;
733 psize_t isize;
734 #endif
735 vsize_t size;
736
737 kernel_pmap = &kernel_pmap_store;
738
739 kernel_pmap->pm_pdir = kernel_l1pt;
740 kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
741 kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
742 simple_lock_init(&kernel_pmap->pm_lock);
743 kernel_pmap->pm_count = 1;
744
745 /*
746 * Initialize PAGE_SIZE-dependent variables.
747 */
748 uvm_setpagesize();
749
750 npages = 0;
751 loop = 0;
752 while (loop < bootconfig.dramblocks) {
753 start = (paddr_t)bootconfig.dram[loop].address;
754 end = start + (bootconfig.dram[loop].pages * NBPG);
755 if (start < physical_freestart)
756 start = physical_freestart;
757 if (end > physical_freeend)
758 end = physical_freeend;
759 #if 0
760 printf("%d: %lx -> %lx\n", loop, start, end - 1);
761 #endif
762 #if NISADMA > 0
763 if (pmap_isa_dma_range_intersect(start, end - start,
764 &istart, &isize)) {
765 /*
766 * Place the pages that intersect with the
767 * ISA DMA range onto the ISA DMA free list.
768 */
769 #if 0
770 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
771 istart + isize - 1);
772 #endif
773 uvm_page_physload(atop(istart),
774 atop(istart + isize), atop(istart),
775 atop(istart + isize), VM_FREELIST_ISADMA);
776 npages += atop(istart + isize) - atop(istart);
777
778 /*
779 * Load the pieces that come before
780 * the intersection into the default
781 * free list.
782 */
783 if (start < istart) {
784 #if 0
785 printf(" BEFORE 0x%lx -> 0x%lx\n",
786 start, istart - 1);
787 #endif
788 uvm_page_physload(atop(start),
789 atop(istart), atop(start),
790 atop(istart), VM_FREELIST_DEFAULT);
791 npages += atop(istart) - atop(start);
792 }
793
794 /*
795 * Load the pieces that come after
796 * the intersection into the default
797 * free list.
798 */
799 if ((istart + isize) < end) {
800 #if 0
801 printf(" AFTER 0x%lx -> 0x%lx\n",
802 (istart + isize), end - 1);
803 #endif
804 uvm_page_physload(atop(istart + isize),
805 atop(end), atop(istart + isize),
806 atop(end), VM_FREELIST_DEFAULT);
807 npages += atop(end) - atop(istart + isize);
808 }
809 } else {
810 uvm_page_physload(atop(start), atop(end),
811 atop(start), atop(end), VM_FREELIST_DEFAULT);
812 npages += atop(end) - atop(start);
813 }
814 #else /* NISADMA > 0 */
815 uvm_page_physload(atop(start), atop(end),
816 atop(start), atop(end), VM_FREELIST_DEFAULT);
817 npages += atop(end) - atop(start);
818 #endif /* NISADMA > 0 */
819 ++loop;
820 }
821
822 #ifdef MYCROFT_HACK
823 printf("npages = %ld\n", npages);
824 #endif
825
826 virtual_start = KERNEL_VM_BASE;
827 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
828
829 ALLOC_PAGE_HOOK(page_hook0, NBPG);
830 ALLOC_PAGE_HOOK(page_hook1, NBPG);
831
832 /*
833 * The mem special device needs a virtual hook but we don't
834 * need a pte
835 */
836 memhook = (char *)virtual_start;
837 virtual_start += NBPG;
838
839 msgbufaddr = (caddr_t)virtual_start;
840 msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
841 virtual_start += round_page(MSGBUFSIZE);
842
843 size = npages * sizeof(struct pv_entry);
844 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
845 bzero(boot_pvent, size);
846 size = npages * sizeof(char);
847 boot_attrs = (char *)uvm_pageboot_alloc(size);
848 bzero(boot_attrs, size);
849
850 /*
851 * initialize the pmap pool.
852 */
853
854 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
855 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
856
857 cpu_cache_cleanD();
858 }
859
860 /*
861 * void pmap_init(void)
862 *
863 * Initialize the pmap module.
864 * Called by vm_init() in vm/vm_init.c in order to initialise
865 * any structures that the pmap system needs to map virtual memory.
866 */
867
868 extern int physmem;
869
870 void
871 pmap_init()
872 {
873 int lcv;
874
875 #ifdef MYCROFT_HACK
876 printf("physmem = %d\n", physmem);
877 #endif
878
879 /*
880 * Set the available memory vars - These do not map to real memory
881 * addresses and cannot as the physical memory is fragmented.
882 * They are used by ps for %mem calculations.
883 * One could argue whether this should be the entire memory or just
884 * the memory that is useable in a user process.
885 */
886 avail_start = 0;
887 avail_end = physmem * NBPG;
888
889 /* Set up pmap info for physsegs. */
890 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
891 vm_physmem[lcv].pmseg.pvent = boot_pvent;
892 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
893 vm_physmem[lcv].pmseg.attrs = boot_attrs;
894 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
895 }
896 #ifdef MYCROFT_HACK
897 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
898 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
899 lcv,
900 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
901 vm_physmem[lcv].start, vm_physmem[lcv].end);
902 }
903 #endif
904 TAILQ_INIT(&pv_page_freelist);
905
906 #ifdef DIAGNOSTIC
907 /* Now it is safe to enable pv_entry recording. */
908 pmap_initialized = TRUE;
909 #endif
910
911 /* Initialise our L1 page table queues and counters */
912 SIMPLEQ_INIT(&l1pt_static_queue);
913 l1pt_static_queue_count = 0;
914 l1pt_static_create_count = 0;
915 SIMPLEQ_INIT(&l1pt_queue);
916 l1pt_queue_count = 0;
917 l1pt_create_count = 0;
918 l1pt_reuse_count = 0;
919 }
920
921 /*
922 * pmap_postinit()
923 *
924 * This routine is called after the vm and kmem subsystems have been
925 * initialised. This allows the pmap code to perform any initialisation
926 * that can only be done one the memory allocation is in place.
927 */
928
929 void
930 pmap_postinit()
931 {
932 int loop;
933 struct l1pt *pt;
934
935 #ifdef PMAP_STATIC_L1S
936 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
937 #else /* PMAP_STATIC_L1S */
938 for (loop = 0; loop < max_processes; ++loop) {
939 #endif /* PMAP_STATIC_L1S */
940 /* Allocate a L1 page table */
941 pt = pmap_alloc_l1pt();
942 if (!pt)
943 panic("Cannot allocate static L1 page tables\n");
944
945 /* Clean it */
946 bzero((void *)pt->pt_va, PD_SIZE);
947 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
948 /* Add the page table to the queue */
949 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
950 ++l1pt_static_queue_count;
951 ++l1pt_static_create_count;
952 }
953 }
954
955
956 /*
957 * Create and return a physical map.
958 *
959 * If the size specified for the map is zero, the map is an actual physical
960 * map, and may be referenced by the hardware.
961 *
962 * If the size specified is non-zero, the map will be used in software only,
963 * and is bounded by that size.
964 */
965
966 pmap_t
967 pmap_create()
968 {
969 pmap_t pmap;
970
971 /*
972 * Fetch pmap entry from the pool
973 */
974
975 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
976 bzero(pmap, sizeof(*pmap));
977
978 /* Now init the machine part of the pmap */
979 pmap_pinit(pmap);
980 return(pmap);
981 }
982
983 /*
984 * pmap_alloc_l1pt()
985 *
986 * This routine allocates physical and virtual memory for a L1 page table
987 * and wires it.
988 * A l1pt structure is returned to describe the allocated page table.
989 *
990 * This routine is allowed to fail if the required memory cannot be allocated.
991 * In this case NULL is returned.
992 */
993
994 struct l1pt *
995 pmap_alloc_l1pt(void)
996 {
997 paddr_t pa;
998 vaddr_t va;
999 struct l1pt *pt;
1000 int error;
1001 struct vm_page *m;
1002 pt_entry_t *ptes;
1003
1004 /* Allocate virtual address space for the L1 page table */
1005 va = uvm_km_valloc(kernel_map, PD_SIZE);
1006 if (va == 0) {
1007 #ifdef DIAGNOSTIC
1008 printf("pmap: Cannot allocate pageable memory for L1\n");
1009 #endif /* DIAGNOSTIC */
1010 return(NULL);
1011 }
1012
1013 /* Allocate memory for the l1pt structure */
1014 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1015
1016 /*
1017 * Allocate pages from the VM system.
1018 */
1019 TAILQ_INIT(&pt->pt_plist);
1020 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
1021 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1022 if (error) {
1023 #ifdef DIAGNOSTIC
1024 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
1025 error);
1026 #endif /* DIAGNOSTIC */
1027 /* Release the resources we already have claimed */
1028 free(pt, M_VMPMAP);
1029 uvm_km_free(kernel_map, va, PD_SIZE);
1030 return(NULL);
1031 }
1032
1033 /* Map our physical pages into our virtual space */
1034 pt->pt_va = va;
1035 m = pt->pt_plist.tqh_first;
1036 ptes = pmap_map_ptes(pmap_kernel());
1037 while (m && va < (pt->pt_va + PD_SIZE)) {
1038 pa = VM_PAGE_TO_PHYS(m);
1039
1040 pmap_enter(pmap_kernel(), va, pa,
1041 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
1042
1043 /* Revoke cacheability and bufferability */
1044 /* XXX should be done better than this */
1045 ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
1046
1047 va += NBPG;
1048 m = m->pageq.tqe_next;
1049 }
1050 pmap_unmap_ptes(pmap_kernel());
1051 pmap_update();
1052
1053 #ifdef DIAGNOSTIC
1054 if (m)
1055 panic("pmap_alloc_l1pt: pglist not empty\n");
1056 #endif /* DIAGNOSTIC */
1057
1058 pt->pt_flags = 0;
1059 return(pt);
1060 }
1061
1062 /*
1063 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1064 */
1065 void
1066 pmap_free_l1pt(pt)
1067 struct l1pt *pt;
1068 {
1069 /* Separate the physical memory for the virtual space */
1070 pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
1071 pmap_update();
1072
1073 /* Return the physical memory */
1074 uvm_pglistfree(&pt->pt_plist);
1075
1076 /* Free the virtual space */
1077 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1078
1079 /* Free the l1pt structure */
1080 free(pt, M_VMPMAP);
1081 }
1082
1083 /*
1084 * Allocate a page directory.
1085 * This routine will either allocate a new page directory from the pool
1086 * of L1 page tables currently held by the kernel or it will allocate
1087 * a new one via pmap_alloc_l1pt().
1088 * It will then initialise the l1 page table for use.
1089 */
1090 int
1091 pmap_allocpagedir(pmap)
1092 struct pmap *pmap;
1093 {
1094 paddr_t pa;
1095 struct l1pt *pt;
1096 pt_entry_t *pte;
1097
1098 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1099
1100 /* Do we have any spare L1's lying around ? */
1101 if (l1pt_static_queue_count) {
1102 --l1pt_static_queue_count;
1103 pt = l1pt_static_queue.sqh_first;
1104 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1105 } else if (l1pt_queue_count) {
1106 --l1pt_queue_count;
1107 pt = l1pt_queue.sqh_first;
1108 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1109 ++l1pt_reuse_count;
1110 } else {
1111 pt = pmap_alloc_l1pt();
1112 if (!pt)
1113 return(ENOMEM);
1114 ++l1pt_create_count;
1115 }
1116
1117 /* Store the pointer to the l1 descriptor in the pmap. */
1118 pmap->pm_l1pt = pt;
1119
1120 /* Get the physical address of the start of the l1 */
1121 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1122
1123 /* Store the virtual address of the l1 in the pmap. */
1124 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1125
1126 /* Clean the L1 if it is dirty */
1127 if (!(pt->pt_flags & PTFLAG_CLEAN))
1128 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1129
1130 /* Do we already have the kernel mappings ? */
1131 if (!(pt->pt_flags & PTFLAG_KPT)) {
1132 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1133
1134 bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1135 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1136 KERNEL_PD_SIZE);
1137 pt->pt_flags |= PTFLAG_KPT;
1138 }
1139
1140 /* Allocate a page table to map all the page tables for this pmap */
1141
1142 #ifdef DIAGNOSTIC
1143 if (pmap->pm_vptpt) {
1144 /* XXX What if we have one already ? */
1145 panic("pmap_allocpagedir: have pt already\n");
1146 }
1147 #endif /* DIAGNOSTIC */
1148 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1149 if (pmap->pm_vptpt == 0) {
1150 pmap_freepagedir(pmap);
1151 return(ENOMEM);
1152 }
1153
1154 (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
1155 pmap->pm_pptpt &= PG_FRAME;
1156 /* Revoke cacheability and bufferability */
1157 /* XXX should be done better than this */
1158 pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
1159 *pte = *pte & ~(PT_C | PT_B);
1160
1161 /* Wire in this page table */
1162 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1163
1164 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1165
1166 /*
1167 * Map the kernel page tables for 0xf0000000 +
1168 * into the page table used to map the
1169 * pmap's page tables
1170 */
1171 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1172 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1173 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1174 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1175 (KERNEL_PD_SIZE >> 2));
1176
1177 pmap->pm_count = 1;
1178 simple_lock_init(&pmap->pm_lock);
1179
1180 return(0);
1181 }
1182
1183
1184 /*
1185 * Initialize a preallocated and zeroed pmap structure,
1186 * such as one in a vmspace structure.
1187 */
1188
1189 static int pmap_pagedir_ident; /* tsleep() ident */
1190
1191 void
1192 pmap_pinit(pmap)
1193 struct pmap *pmap;
1194 {
1195 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1196
1197 /* Keep looping until we succeed in allocating a page directory */
1198 while (pmap_allocpagedir(pmap) != 0) {
1199 /*
1200 * Ok we failed to allocate a suitable block of memory for an
1201 * L1 page table. This means that either:
1202 * 1. 16KB of virtual address space could not be allocated
1203 * 2. 16KB of physically contiguous memory on a 16KB boundary
1204 * could not be allocated.
1205 *
1206 * Since we cannot fail we will sleep for a while and try
1207 * again. Although we will be wakened when another page table
1208 * is freed other memory releasing and swapping may occur
1209 * that will mean we can succeed so we will keep trying
1210 * regularly just in case.
1211 */
1212
1213 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1214 "l1ptwait", 1000) == EWOULDBLOCK)
1215 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1216 }
1217
1218 /* Map zero page for the pmap. This will also map the L2 for it */
1219 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1220 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1221 pmap_update();
1222 }
1223
1224
1225 void
1226 pmap_freepagedir(pmap)
1227 pmap_t pmap;
1228 {
1229 /* Free the memory used for the page table mapping */
1230 if (pmap->pm_vptpt != 0)
1231 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1232
1233 /* junk the L1 page table */
1234 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1235 /* Add the page table to the queue */
1236 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1237 ++l1pt_static_queue_count;
1238 /* Wake up any sleeping processes waiting for a l1 page table */
1239 wakeup((caddr_t)&pmap_pagedir_ident);
1240 } else if (l1pt_queue_count < 8) {
1241 /* Add the page table to the queue */
1242 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1243 ++l1pt_queue_count;
1244 /* Wake up any sleeping processes waiting for a l1 page table */
1245 wakeup((caddr_t)&pmap_pagedir_ident);
1246 } else
1247 pmap_free_l1pt(pmap->pm_l1pt);
1248 }
1249
1250
1251 /*
1252 * Retire the given physical map from service.
1253 * Should only be called if the map contains no valid mappings.
1254 */
1255
1256 void
1257 pmap_destroy(pmap)
1258 pmap_t pmap;
1259 {
1260 int count;
1261
1262 if (pmap == NULL)
1263 return;
1264
1265 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1266 simple_lock(&pmap->pm_lock);
1267 count = --pmap->pm_count;
1268 simple_unlock(&pmap->pm_lock);
1269 if (count == 0) {
1270 pmap_release(pmap);
1271 pool_put(&pmap_pmap_pool, pmap);
1272 }
1273 }
1274
1275
1276 /*
1277 * Release any resources held by the given physical map.
1278 * Called when a pmap initialized by pmap_pinit is being released.
1279 * Should only be called if the map contains no valid mappings.
1280 */
1281
1282 void
1283 pmap_release(pmap)
1284 pmap_t pmap;
1285 {
1286 struct vm_page *page;
1287 pt_entry_t *pte;
1288 int loop;
1289
1290 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1291
1292 #if 0
1293 if (pmap->pm_count != 1) /* XXX: needs sorting */
1294 panic("pmap_release count %d", pmap->pm_count);
1295 #endif
1296
1297 /* Remove the zero page mapping */
1298 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1299 pmap_update();
1300
1301 /*
1302 * Free any page tables still mapped
1303 * This is only temporay until pmap_enter can count the number
1304 * of mappings made in a page table. Then pmap_remove() can
1305 * reduce the count and free the pagetable when the count
1306 * reaches zero.
1307 */
1308 for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) {
1309 pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4);
1310 if (*pte != 0) {
1311 PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte));
1312 page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
1313 if (page == NULL)
1314 panic("pmap_release: bad address for phys page");
1315 uvm_pagefree(page);
1316 }
1317 }
1318 /* Free the page dir */
1319 pmap_freepagedir(pmap);
1320 }
1321
1322
1323 /*
1324 * void pmap_reference(pmap_t pmap)
1325 *
1326 * Add a reference to the specified pmap.
1327 */
1328
1329 void
1330 pmap_reference(pmap)
1331 pmap_t pmap;
1332 {
1333 if (pmap == NULL)
1334 return;
1335
1336 simple_lock(&pmap->pm_lock);
1337 pmap->pm_count++;
1338 simple_unlock(&pmap->pm_lock);
1339 }
1340
1341 /*
1342 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1343 *
1344 * Return the start and end addresses of the kernel's virtual space.
1345 * These values are setup in pmap_bootstrap and are updated as pages
1346 * are allocated.
1347 */
1348
1349 void
1350 pmap_virtual_space(start, end)
1351 vaddr_t *start;
1352 vaddr_t *end;
1353 {
1354 *start = virtual_start;
1355 *end = virtual_end;
1356 }
1357
1358
1359 /*
1360 * Activate the address space for the specified process. If the process
1361 * is the current process, load the new MMU context.
1362 */
1363 void
1364 pmap_activate(p)
1365 struct proc *p;
1366 {
1367 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1368 struct pcb *pcb = &p->p_addr->u_pcb;
1369
1370 (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
1371 (paddr_t *)&pcb->pcb_pagedir);
1372
1373 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1374 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1375
1376 if (p == curproc) {
1377 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1378 setttb((u_int)pcb->pcb_pagedir);
1379 }
1380 #if 0
1381 pmap->pm_pdchanged = FALSE;
1382 #endif
1383 }
1384
1385
1386 /*
1387 * Deactivate the address space of the specified process.
1388 */
1389 void
1390 pmap_deactivate(p)
1391 struct proc *p;
1392 {
1393 }
1394
1395
1396 /*
1397 * pmap_clean_page()
1398 *
1399 * This is a local function used to work out the best strategy to clean
1400 * a single page referenced by its entry in the PV table. It's used by
1401 * pmap_copy_page, pmap_zero page and maybe some others later on.
1402 *
1403 * Its policy is effectively:
1404 * o If there are no mappings, we don't bother doing anything with the cache.
1405 * o If there is one mapping, we clean just that page.
1406 * o If there are multiple mappings, we clean the entire cache.
1407 *
1408 * So that some functions can be further optimised, it returns 0 if it didn't
1409 * clean the entire cache, or 1 if it did.
1410 *
1411 * XXX One bug in this routine is that if the pv_entry has a single page
1412 * mapped at 0x00000000 a whole cache clean will be performed rather than
1413 * just the 1 page. Since this should not occur in everyday use and if it does
1414 * it will just result in not the most efficient clean for the page.
1415 */
1416 static int
1417 pmap_clean_page(pv)
1418 struct pv_entry *pv;
1419 {
1420 int s;
1421 int cache_needs_cleaning = 0;
1422 vaddr_t page_to_clean = 0;
1423
1424 /* Go to splvm() so we get exclusive lock for a mo */
1425 s = splvm();
1426 if (pv->pv_pmap) {
1427 cache_needs_cleaning = 1;
1428 if (!pv->pv_next)
1429 page_to_clean = pv->pv_va;
1430 }
1431 splx(s);
1432
1433 /* Do cache ops outside the splvm. */
1434 if (page_to_clean)
1435 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1436 else if (cache_needs_cleaning) {
1437 cpu_cache_purgeID();
1438 return (1);
1439 }
1440 return (0);
1441 }
1442
1443 /*
1444 * pmap_find_pv()
1445 *
1446 * This is a local function that finds a PV entry for a given physical page.
1447 * This is a common op, and this function removes loads of ifdefs in the code.
1448 */
1449 static __inline struct pv_entry *
1450 pmap_find_pv(phys)
1451 paddr_t phys;
1452 {
1453 int bank, off;
1454 struct pv_entry *pv;
1455
1456 #ifdef DIAGNOSTIC
1457 if (!pmap_initialized)
1458 panic("pmap_find_pv: !pmap_initialized");
1459 #endif
1460
1461 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1462 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1463 pv = &vm_physmem[bank].pmseg.pvent[off];
1464 return (pv);
1465 }
1466
1467 /*
1468 * pmap_zero_page()
1469 *
1470 * Zero a given physical page by mapping it at a page hook point.
1471 * In doing the zero page op, the page we zero is mapped cachable, as with
1472 * StrongARM accesses to non-cached pages are non-burst making writing
1473 * _any_ bulk data very slow.
1474 */
1475 void
1476 pmap_zero_page(phys)
1477 paddr_t phys;
1478 {
1479 struct pv_entry *pv;
1480
1481 /* Get an entry for this page, and clean it it. */
1482 pv = pmap_find_pv(phys);
1483 pmap_clean_page(pv);
1484
1485 /*
1486 * Hook in the page, zero it, and purge the cache for that
1487 * zeroed page. Invalidate the TLB as needed.
1488 */
1489 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1490 cpu_tlb_flushD_SE(page_hook0.va);
1491 bzero_page(page_hook0.va);
1492 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1493 }
1494
1495 /*
1496 * pmap_copy_page()
1497 *
1498 * Copy one physical page into another, by mapping the pages into
1499 * hook points. The same comment regarding cachability as in
1500 * pmap_zero_page also applies here.
1501 */
1502 void
1503 pmap_copy_page(src, dest)
1504 paddr_t src;
1505 paddr_t dest;
1506 {
1507 struct pv_entry *src_pv, *dest_pv;
1508
1509 /* Get PV entries for the pages, and clean them if needed. */
1510 src_pv = pmap_find_pv(src);
1511 dest_pv = pmap_find_pv(dest);
1512 if (!pmap_clean_page(src_pv))
1513 pmap_clean_page(dest_pv);
1514
1515 /*
1516 * Map the pages into the page hook points, copy them, and purge
1517 * the cache for the appropriate page. Invalidate the TLB
1518 * as required.
1519 */
1520 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1521 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1522 cpu_tlb_flushD_SE(page_hook0.va);
1523 cpu_tlb_flushD_SE(page_hook1.va);
1524 bcopy_page(page_hook0.va, page_hook1.va);
1525 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1526 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1527 }
1528
1529 /*
1530 * int pmap_next_phys_page(paddr_t *addr)
1531 *
1532 * Allocate another physical page returning true or false depending
1533 * on whether a page could be allocated.
1534 */
1535
1536 paddr_t
1537 pmap_next_phys_page(addr)
1538 paddr_t addr;
1539
1540 {
1541 int loop;
1542
1543 if (addr < bootconfig.dram[0].address)
1544 return(bootconfig.dram[0].address);
1545
1546 loop = 0;
1547
1548 while (bootconfig.dram[loop].address != 0
1549 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1550 ++loop;
1551
1552 if (bootconfig.dram[loop].address == 0)
1553 return(0);
1554
1555 addr += NBPG;
1556
1557 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1558 if (bootconfig.dram[loop + 1].address == 0)
1559 return(0);
1560 addr = bootconfig.dram[loop + 1].address;
1561 }
1562
1563 return(addr);
1564 }
1565
1566 #if 0
1567 void
1568 pmap_pte_addref(pmap, va)
1569 pmap_t pmap;
1570 vaddr_t va;
1571 {
1572 pd_entry_t *pde;
1573 paddr_t pa;
1574 struct vm_page *m;
1575
1576 if (pmap == pmap_kernel())
1577 return;
1578
1579 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1580 pa = pmap_pte_pa(pde);
1581 m = PHYS_TO_VM_PAGE(pa);
1582 ++m->wire_count;
1583 #ifdef MYCROFT_HACK
1584 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1585 pmap, va, pde, pa, m, m->wire_count);
1586 #endif
1587 }
1588
1589 void
1590 pmap_pte_delref(pmap, va)
1591 pmap_t pmap;
1592 vaddr_t va;
1593 {
1594 pd_entry_t *pde;
1595 paddr_t pa;
1596 struct vm_page *m;
1597
1598 if (pmap == pmap_kernel())
1599 return;
1600
1601 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1602 pa = pmap_pte_pa(pde);
1603 m = PHYS_TO_VM_PAGE(pa);
1604 --m->wire_count;
1605 #ifdef MYCROFT_HACK
1606 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1607 pmap, va, pde, pa, m, m->wire_count);
1608 #endif
1609 if (m->wire_count == 0) {
1610 #ifdef MYCROFT_HACK
1611 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1612 pmap, va, pde, pa, m);
1613 #endif
1614 pmap_unmap_in_l1(pmap, va);
1615 uvm_pagefree(m);
1616 --pmap->pm_stats.resident_count;
1617 }
1618 }
1619 #else
1620 #define pmap_pte_addref(pmap, va)
1621 #define pmap_pte_delref(pmap, va)
1622 #endif
1623
1624 /*
1625 * Since we have a virtually indexed cache, we may need to inhibit caching if
1626 * there is more than one mapping and at least one of them is writable.
1627 * Since we purge the cache on every context switch, we only need to check for
1628 * other mappings within the same pmap, or kernel_pmap.
1629 * This function is also called when a page is unmapped, to possibly reenable
1630 * caching on any remaining mappings.
1631 *
1632 * Note that the pmap must have it's ptes mapped in, and passed with ptes.
1633 */
1634 void
1635 pmap_vac_me_harder(struct pmap *pmap, struct pv_entry *pv, pt_entry_t *ptes,
1636 boolean_t clear_cache)
1637 {
1638 struct pv_entry *npv;
1639 pt_entry_t *pte;
1640 int entries = 0;
1641 int writeable = 0;
1642 int cacheable_entries = 0;
1643
1644 if (pv->pv_pmap == NULL)
1645 return;
1646 KASSERT(ptes != NULL);
1647
1648 /*
1649 * Count mappings and writable mappings in this pmap.
1650 * Keep a pointer to the first one.
1651 */
1652 for (npv = pv; npv; npv = npv->pv_next) {
1653 /* Count mappings in the same pmap */
1654 if (pmap == npv->pv_pmap) {
1655 if (entries++ == 0)
1656 pv = npv;
1657 /* Cacheable mappings */
1658 if ((npv->pv_flags & PT_NC) == 0)
1659 cacheable_entries++;
1660 /* Writeable mappings */
1661 if (npv->pv_flags & PT_Wr)
1662 ++writeable;
1663 }
1664 }
1665
1666 PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
1667 "writeable %d cacheable %d %s\n", pmap, entries, writeable,
1668 cacheable_entries, clear_cache ? "clean" : "no clean"));
1669
1670 /*
1671 * Enable or disable caching as necessary.
1672 * We do a quick check of the first PTE to avoid walking the list if
1673 * we're already in the right state.
1674 */
1675 if (entries > 1 && writeable) {
1676 if (cacheable_entries == 0)
1677 return;
1678 if (pv->pv_flags & PT_NC) {
1679 #ifdef DIAGNOSTIC
1680 /* We have cacheable entries, but the first one
1681 isn't among them. Something is wrong. */
1682 if (cacheable_entries)
1683 panic("pmap_vac_me_harder: "
1684 "cacheable inconsistent");
1685 #endif
1686 return;
1687 }
1688 pte = &ptes[arm_byte_to_page(pv->pv_va)];
1689 *pte &= ~(PT_C | PT_B);
1690 pv->pv_flags |= PT_NC;
1691 if (clear_cache && cacheable_entries < 4) {
1692 cpu_cache_purgeID_rng(pv->pv_va, NBPG);
1693 cpu_tlb_flushID_SE(pv->pv_va);
1694 }
1695 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1696 if (pmap == npv->pv_pmap &&
1697 (npv->pv_flags & PT_NC) == 0) {
1698 ptes[arm_byte_to_page(npv->pv_va)] &=
1699 ~(PT_C | PT_B);
1700 npv->pv_flags |= PT_NC;
1701 if (clear_cache && cacheable_entries < 4) {
1702 cpu_cache_purgeID_rng(npv->pv_va,
1703 NBPG);
1704 cpu_tlb_flushID_SE(npv->pv_va);
1705 }
1706 }
1707 }
1708 if (clear_cache && cacheable_entries >= 4) {
1709 cpu_cache_purgeID();
1710 cpu_tlb_flushID();
1711 }
1712 } else if (entries > 0) {
1713 if ((pv->pv_flags & PT_NC) == 0)
1714 return;
1715 pte = &ptes[arm_byte_to_page(pv->pv_va)];
1716 *pte |= (PT_C | PT_B);
1717 pv->pv_flags &= ~PT_NC;
1718 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1719 if (pmap == npv->pv_pmap &&
1720 (npv->pv_flags & PT_NC)) {
1721 ptes[arm_byte_to_page(npv->pv_va)] |=
1722 (PT_C | PT_B);
1723 npv->pv_flags &= ~PT_NC;
1724 }
1725 }
1726 }
1727 }
1728
1729 /*
1730 * pmap_remove()
1731 *
1732 * pmap_remove is responsible for nuking a number of mappings for a range
1733 * of virtual address space in the current pmap. To do this efficiently
1734 * is interesting, because in a number of cases a wide virtual address
1735 * range may be supplied that contains few actual mappings. So, the
1736 * optimisations are:
1737 * 1. Try and skip over hunks of address space for which an L1 entry
1738 * does not exist.
1739 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1740 * maybe do just a partial cache clean. This path of execution is
1741 * complicated by the fact that the cache must be flushed _before_
1742 * the PTE is nuked, being a VAC :-)
1743 * 3. Maybe later fast-case a single page, but I don't think this is
1744 * going to make _that_ much difference overall.
1745 */
1746
1747 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1748
1749 void
1750 pmap_remove(pmap, sva, eva)
1751 pmap_t pmap;
1752 vaddr_t sva;
1753 vaddr_t eva;
1754 {
1755 int cleanlist_idx = 0;
1756 struct pagelist {
1757 vaddr_t va;
1758 pt_entry_t *pte;
1759 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1760 pt_entry_t *pte = 0, *ptes;
1761 paddr_t pa;
1762 int pmap_active;
1763 struct pv_entry *pv;
1764
1765 /* Exit quick if there is no pmap */
1766 if (!pmap)
1767 return;
1768
1769 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1770
1771 sva &= PG_FRAME;
1772 eva &= PG_FRAME;
1773
1774 ptes = pmap_map_ptes(pmap);
1775 /* Get a page table pointer */
1776 while (sva < eva) {
1777 if (pmap_pde_v(pmap_pde(pmap, sva)))
1778 break;
1779 sva = (sva & PD_MASK) + NBPD;
1780 }
1781
1782 pte = &ptes[arm_byte_to_page(sva)];
1783 /* Note if the pmap is active thus require cache and tlb cleans */
1784 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1785 || (pmap == kernel_pmap))
1786 pmap_active = 1;
1787 else
1788 pmap_active = 0;
1789
1790 /* Now loop along */
1791 while (sva < eva) {
1792 /* Check if we can move to the next PDE (l1 chunk) */
1793 if (!(sva & PT_MASK))
1794 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1795 sva += NBPD;
1796 pte += arm_byte_to_page(NBPD);
1797 continue;
1798 }
1799
1800 /* We've found a valid PTE, so this page of PTEs has to go. */
1801 if (pmap_pte_v(pte)) {
1802 int bank, off;
1803
1804 /* Update statistics */
1805 --pmap->pm_stats.resident_count;
1806
1807 /*
1808 * Add this page to our cache remove list, if we can.
1809 * If, however the cache remove list is totally full,
1810 * then do a complete cache invalidation taking note
1811 * to backtrack the PTE table beforehand, and ignore
1812 * the lists in future because there's no longer any
1813 * point in bothering with them (we've paid the
1814 * penalty, so will carry on unhindered). Otherwise,
1815 * when we fall out, we just clean the list.
1816 */
1817 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1818 pa = pmap_pte_pa(pte);
1819
1820 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1821 /* Add to the clean list. */
1822 cleanlist[cleanlist_idx].pte = pte;
1823 cleanlist[cleanlist_idx].va = sva;
1824 cleanlist_idx++;
1825 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1826 int cnt;
1827
1828 /* Nuke everything if needed. */
1829 if (pmap_active) {
1830 cpu_cache_purgeID();
1831 cpu_tlb_flushID();
1832 }
1833
1834 /*
1835 * Roll back the previous PTE list,
1836 * and zero out the current PTE.
1837 */
1838 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1839 *cleanlist[cnt].pte = 0;
1840 pmap_pte_delref(pmap, cleanlist[cnt].va);
1841 }
1842 *pte = 0;
1843 pmap_pte_delref(pmap, sva);
1844 cleanlist_idx++;
1845 } else {
1846 /*
1847 * We've already nuked the cache and
1848 * TLB, so just carry on regardless,
1849 * and we won't need to do it again
1850 */
1851 *pte = 0;
1852 pmap_pte_delref(pmap, sva);
1853 }
1854
1855 /*
1856 * Update flags. In a number of circumstances,
1857 * we could cluster a lot of these and do a
1858 * number of sequential pages in one go.
1859 */
1860 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1861 pv = &vm_physmem[bank].pmseg.pvent[off];
1862 pmap_remove_pv(pmap, sva, pv);
1863 pmap_vac_me_harder(pmap, pv, ptes, FALSE);
1864 }
1865 }
1866 sva += NBPG;
1867 pte++;
1868 }
1869
1870 pmap_unmap_ptes(pmap);
1871 /*
1872 * Now, if we've fallen through down to here, chances are that there
1873 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1874 */
1875 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1876 u_int cnt;
1877
1878 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1879 if (pmap_active) {
1880 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1881 *cleanlist[cnt].pte = 0;
1882 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1883 } else
1884 *cleanlist[cnt].pte = 0;
1885 pmap_pte_delref(pmap, cleanlist[cnt].va);
1886 }
1887 }
1888 }
1889
1890 /*
1891 * Routine: pmap_remove_all
1892 * Function:
1893 * Removes this physical page from
1894 * all physical maps in which it resides.
1895 * Reflects back modify bits to the pager.
1896 */
1897
1898 void
1899 pmap_remove_all(pa)
1900 paddr_t pa;
1901 {
1902 struct pv_entry *ph, *pv, *npv;
1903 pmap_t pmap;
1904 pt_entry_t *pte, *ptes;
1905 int s;
1906
1907 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1908
1909 pv = ph = pmap_find_pv(pa);
1910 pmap_clean_page(pv);
1911
1912 s = splvm();
1913
1914 if (ph->pv_pmap == NULL) {
1915 PDEBUG(0, printf("free page\n"));
1916 splx(s);
1917 return;
1918 }
1919
1920
1921
1922 while (pv) {
1923 pmap = pv->pv_pmap;
1924 ptes = pmap_map_ptes(pmap);
1925 pte = &ptes[arm_byte_to_page(pv->pv_va)];
1926
1927 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1928 pv->pv_va, pv->pv_flags));
1929 #ifdef DEBUG
1930 if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)
1931 || pmap_pte_pa(pte) != pa)
1932 panic("pmap_remove_all: bad mapping");
1933 #endif /* DEBUG */
1934
1935 /*
1936 * Update statistics
1937 */
1938 --pmap->pm_stats.resident_count;
1939
1940 /* Wired bit */
1941 if (pv->pv_flags & PT_W)
1942 --pmap->pm_stats.wired_count;
1943
1944 /*
1945 * Invalidate the PTEs.
1946 * XXX: should cluster them up and invalidate as many
1947 * as possible at once.
1948 */
1949
1950 #ifdef needednotdone
1951 reduce wiring count on page table pages as references drop
1952 #endif
1953
1954 *pte = 0;
1955 pmap_pte_delref(pmap, pv->pv_va);
1956
1957 npv = pv->pv_next;
1958 if (pv == ph)
1959 ph->pv_pmap = NULL;
1960 else
1961 pmap_free_pv(pv);
1962 pv = npv;
1963 pmap_unmap_ptes(pmap);
1964 }
1965
1966 splx(s);
1967
1968 PDEBUG(0, printf("done\n"));
1969 cpu_tlb_flushID();
1970 }
1971
1972
1973 /*
1974 * Set the physical protection on the specified range of this map as requested.
1975 */
1976
1977 void
1978 pmap_protect(pmap, sva, eva, prot)
1979 pmap_t pmap;
1980 vaddr_t sva;
1981 vaddr_t eva;
1982 vm_prot_t prot;
1983 {
1984 pt_entry_t *pte = NULL, *ptes;
1985 int armprot;
1986 int flush = 0;
1987 paddr_t pa;
1988 int bank, off;
1989 struct pv_entry *pv;
1990
1991 /*
1992 * Make sure pmap is valid. -dct
1993 */
1994 if (pmap == NULL)
1995 return;
1996 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
1997 pmap, sva, eva, prot));
1998
1999 if (~prot & VM_PROT_READ) {
2000 /* Just remove the mappings. */
2001 pmap_remove(pmap, sva, eva);
2002 return;
2003 }
2004 if (prot & VM_PROT_WRITE) {
2005 /*
2006 * If this is a read->write transition, just ignore it and let
2007 * uvm_fault() take care of it later.
2008 */
2009 return;
2010 }
2011
2012 sva &= PG_FRAME;
2013 eva &= PG_FRAME;
2014
2015 ptes = pmap_map_ptes(pmap);
2016 /*
2017 * We need to acquire a pointer to a page table page before entering
2018 * the following loop.
2019 */
2020 while (sva < eva) {
2021 if (pmap_pde_v(pmap_pde(pmap, sva)))
2022 break;
2023 sva = (sva & PD_MASK) + NBPD;
2024 }
2025
2026 pte = &ptes[arm_byte_to_page(sva)];
2027
2028 while (sva < eva) {
2029 /* only check once in a while */
2030 if ((sva & PT_MASK) == 0) {
2031 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
2032 /* We can race ahead here, to the next pde. */
2033 sva += NBPD;
2034 pte += arm_byte_to_page(NBPD);
2035 continue;
2036 }
2037 }
2038
2039 if (!pmap_pte_v(pte))
2040 goto next;
2041
2042 flush = 1;
2043
2044 armprot = 0;
2045 if (sva < VM_MAXUSER_ADDRESS)
2046 armprot |= PT_AP(AP_U);
2047 else if (sva < VM_MAX_ADDRESS)
2048 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
2049 *pte = (*pte & 0xfffff00f) | armprot;
2050
2051 pa = pmap_pte_pa(pte);
2052
2053 /* Get the physical page index */
2054
2055 /* Clear write flag */
2056 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2057 pv = &vm_physmem[bank].pmseg.pvent[off];
2058 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
2059 pmap_vac_me_harder(pmap, pv, ptes, FALSE);
2060 }
2061
2062 next:
2063 sva += NBPG;
2064 pte++;
2065 }
2066 pmap_unmap_ptes(pmap);
2067 if (flush)
2068 cpu_tlb_flushID();
2069 }
2070
2071 /*
2072 * void pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2073 * int flags)
2074 *
2075 * Insert the given physical page (p) at
2076 * the specified virtual address (v) in the
2077 * target physical map with the protection requested.
2078 *
2079 * If specified, the page will be wired down, meaning
2080 * that the related pte can not be reclaimed.
2081 *
2082 * NB: This is the only routine which MAY NOT lazy-evaluate
2083 * or lose information. That is, this routine must actually
2084 * insert this page into the given map NOW.
2085 */
2086
2087 int
2088 pmap_enter(pmap, va, pa, prot, flags)
2089 pmap_t pmap;
2090 vaddr_t va;
2091 paddr_t pa;
2092 vm_prot_t prot;
2093 int flags;
2094 {
2095 pt_entry_t *pte, *ptes;
2096 u_int npte;
2097 int bank, off;
2098 struct pv_entry *pv = NULL;
2099 paddr_t opa;
2100 int nflags;
2101 boolean_t wired = (flags & PMAP_WIRED) != 0;
2102
2103 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2104 va, pa, pmap, prot, wired));
2105
2106 #ifdef DIAGNOSTIC
2107 /* Valid address ? */
2108 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
2109 panic("pmap_enter: too big");
2110 if (pmap != pmap_kernel() && va != 0) {
2111 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2112 panic("pmap_enter: kernel page in user map");
2113 } else {
2114 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2115 panic("pmap_enter: user page in kernel map");
2116 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2117 panic("pmap_enter: entering PT page");
2118 }
2119 #endif
2120
2121 /*
2122 * Get a pointer to the pte for this virtual address. If the
2123 * pte pointer is NULL then we are missing the L2 page table
2124 * so we need to create one.
2125 */
2126 pte = pmap_pte(pmap, va);
2127 if (!pte) {
2128 paddr_t l2pa;
2129 struct vm_page *m;
2130
2131 /* Allocate a page table */
2132 for (;;) {
2133 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2134 if (m != NULL)
2135 break;
2136
2137 /*
2138 * No page available. If we're the kernel
2139 * pmap, we die, since we might not have
2140 * a valid thread context. For user pmaps,
2141 * we assume that we _do_ have a valid thread
2142 * context, so we wait here for the pagedaemon
2143 * to free up some pages.
2144 *
2145 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2146 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2147 * XXX SO THIS IS PROBABLY SAFE. In any case,
2148 * XXX other pmap modules claim it is safe to
2149 * XXX sleep here if it's a user pmap.
2150 */
2151 if (pmap == pmap_kernel())
2152 panic("pmap_enter: no free pages");
2153 else
2154 uvm_wait("pmap_enter");
2155 }
2156
2157 /* Wire this page table into the L1. */
2158 l2pa = VM_PAGE_TO_PHYS(m);
2159 pmap_zero_page(l2pa);
2160 pmap_map_in_l1(pmap, va, l2pa);
2161 ++pmap->pm_stats.resident_count;
2162
2163 pte = pmap_pte(pmap, va);
2164 #ifdef DIAGNOSTIC
2165 if (!pte)
2166 panic("pmap_enter: no pte");
2167 #endif
2168 }
2169
2170 nflags = 0;
2171 if (prot & VM_PROT_WRITE)
2172 nflags |= PT_Wr;
2173 if (wired)
2174 nflags |= PT_W;
2175
2176 /* More debugging info */
2177 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2178 *pte));
2179
2180 /* Is the pte valid ? If so then this page is already mapped */
2181 if (pmap_pte_v(pte)) {
2182 /* Get the physical address of the current page mapped */
2183 opa = pmap_pte_pa(pte);
2184
2185 #ifdef MYCROFT_HACK
2186 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2187 #endif
2188
2189 /* Are we mapping the same page ? */
2190 if (opa == pa) {
2191 /* All we must be doing is changing the protection */
2192 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2193 va, pa));
2194
2195 /* Has the wiring changed ? */
2196 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2197 pv = &vm_physmem[bank].pmseg.pvent[off];
2198 (void) pmap_modify_pv(pmap, va, pv,
2199 PT_Wr | PT_W, nflags);
2200 }
2201 } else {
2202 /* We are replacing the page with a new one. */
2203 cpu_cache_purgeID_rng(va, NBPG);
2204
2205 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2206 va, pa, opa));
2207
2208 /*
2209 * If it is part of our managed memory then we
2210 * must remove it from the PV list
2211 */
2212 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2213 pv = &vm_physmem[bank].pmseg.pvent[off];
2214 pmap_remove_pv(pmap, va, pv);
2215 }
2216
2217 goto enter;
2218 }
2219 } else {
2220 opa = 0;
2221 pmap_pte_addref(pmap, va);
2222
2223 /* pte is not valid so we must be hooking in a new page */
2224 ++pmap->pm_stats.resident_count;
2225
2226 enter:
2227 /*
2228 * Enter on the PV list if part of our managed memory
2229 */
2230 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2231 pv = &vm_physmem[bank].pmseg.pvent[off];
2232 pmap_enter_pv(pmap, va, pv, nflags);
2233 }
2234 }
2235
2236 #ifdef MYCROFT_HACK
2237 if (mycroft_hack)
2238 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2239 #endif
2240
2241 /* Construct the pte, giving the correct access. */
2242 npte = (pa & PG_FRAME);
2243
2244 /* VA 0 is magic. */
2245 if (pmap != pmap_kernel() && va != 0)
2246 npte |= PT_AP(AP_U);
2247
2248 if (bank != -1) {
2249 #ifdef DIAGNOSTIC
2250 if ((flags & VM_PROT_ALL) & ~prot)
2251 panic("pmap_enter: access_type exceeds prot");
2252 #endif
2253 npte |= PT_C | PT_B;
2254 if (flags & VM_PROT_WRITE) {
2255 npte |= L2_SPAGE | PT_AP(AP_W);
2256 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2257 } else if (flags & VM_PROT_ALL) {
2258 npte |= L2_SPAGE;
2259 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2260 } else
2261 npte |= L2_INVAL;
2262 } else {
2263 if (prot & VM_PROT_WRITE)
2264 npte |= L2_SPAGE | PT_AP(AP_W);
2265 else if (prot & VM_PROT_ALL)
2266 npte |= L2_SPAGE;
2267 else
2268 npte |= L2_INVAL;
2269 }
2270
2271 #ifdef MYCROFT_HACK
2272 if (mycroft_hack)
2273 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2274 #endif
2275
2276 *pte = npte;
2277
2278 if (bank != -1)
2279 {
2280 boolean_t pmap_active = FALSE;
2281 /* XXX this will change once the whole of pmap_enter uses
2282 * map_ptes
2283 */
2284 ptes = pmap_map_ptes(pmap);
2285 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
2286 || (pmap == kernel_pmap))
2287 pmap_active = TRUE;
2288 pmap_vac_me_harder(pmap, pv, ptes, pmap_active);
2289 pmap_unmap_ptes(pmap);
2290 }
2291
2292 /* Better flush the TLB ... */
2293 cpu_tlb_flushID_SE(va);
2294
2295 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2296
2297 return 0;
2298 }
2299
2300 void
2301 pmap_kenter_pa(va, pa, prot)
2302 vaddr_t va;
2303 paddr_t pa;
2304 vm_prot_t prot;
2305 {
2306 pt_entry_t *pte;
2307
2308 #ifdef DIAGNOSTIC
2309 int bank, off;
2310
2311 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2312 struct pv_entry *pv;
2313
2314 pv = &vm_physmem[bank].pmseg.pvent[off];
2315 if (pv->pv_pmap != NULL)
2316 panic("pmap_kenter_pa: %08lx multiply mapped\n", pa);
2317 }
2318 #endif
2319
2320 if (!pmap_pde_v(pmap_pde(pmap_kernel(), va))) {
2321 /*
2322 * For the kernel pmaps it would be better to ensure
2323 * that they are always present, and to grow the
2324 * kernel as required.
2325 */
2326 vm_offset_t l2pa;
2327 struct vm_page *m;
2328
2329 /* Allocate a page table */
2330 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2331 if (m == NULL) {
2332 /*
2333 * No page available. We're the kernel
2334 * pmap, so die.
2335 */
2336 panic("pmap_kenter_pa: no free pages");
2337 }
2338
2339 /* Wire this page table into the L1. */
2340 l2pa = VM_PAGE_TO_PHYS(m);
2341 pmap_zero_page(l2pa);
2342 pmap_map_in_l1(pmap_kernel(), va, l2pa);
2343 ++(pmap_kernel())->pm_stats.resident_count;
2344 }
2345 pte = vtopte(va);
2346
2347 if (pmap_pte_v(pte))
2348 {
2349 cpu_tlb_flushID_SE(va);
2350 cpu_cache_purgeID_rng(va, PAGE_SIZE);
2351 }
2352 *pte = L2_PTE(pa, AP_KRW);
2353 /* pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED); */
2354 }
2355
2356 void
2357 pmap_kremove(va, len)
2358 vaddr_t va;
2359 vsize_t len;
2360 {
2361 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2362 pt_entry_t *pte;
2363 #ifdef DIAGNOSTIC
2364 int bank, off;
2365 paddr_t pa;
2366
2367 if (!pmap_pde_v(pmap_pde(pmap_kernel(), va)))
2368 panic("pmap_kremove: no pde\n");
2369 #endif
2370 pte = vtopte(va);
2371
2372 #ifdef DIAGNOSTIC
2373 pa = pmap_pte_pa(pte);
2374 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2375 struct pv_entry *pv;
2376
2377 pv = &vm_physmem[bank].pmseg.pvent[off];
2378 if (pv->pv_pmap != NULL)
2379 panic("pmap_kremove: %08lx multiply mapped\n",
2380 pa);
2381 }
2382 #endif
2383
2384 /* We assume that we will only be called with small
2385 regions of memory. */
2386 cpu_cache_purgeID_rng(va, PAGE_SIZE);
2387 *pte = 0;
2388 cpu_tlb_flushID_SE(va);
2389 /* pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); */
2390 }
2391 }
2392
2393 /*
2394 * pmap_page_protect:
2395 *
2396 * Lower the permission for all mappings to a given page.
2397 */
2398
2399 void
2400 pmap_page_protect(pg, prot)
2401 struct vm_page *pg;
2402 vm_prot_t prot;
2403 {
2404 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2405
2406 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2407
2408 switch(prot) {
2409 case VM_PROT_READ:
2410 case VM_PROT_READ|VM_PROT_EXECUTE:
2411 pmap_copy_on_write(pa);
2412 break;
2413
2414 case VM_PROT_ALL:
2415 break;
2416
2417 default:
2418 pmap_remove_all(pa);
2419 break;
2420 }
2421 }
2422
2423
2424 /*
2425 * Routine: pmap_unwire
2426 * Function: Clear the wired attribute for a map/virtual-address
2427 * pair.
2428 * In/out conditions:
2429 * The mapping must already exist in the pmap.
2430 */
2431
2432 void
2433 pmap_unwire(pmap, va)
2434 pmap_t pmap;
2435 vaddr_t va;
2436 {
2437 pt_entry_t *pte;
2438 paddr_t pa;
2439 int bank, off;
2440 struct pv_entry *pv;
2441
2442 /*
2443 * Make sure pmap is valid. -dct
2444 */
2445 if (pmap == NULL)
2446 return;
2447
2448 /* Get the pte */
2449 pte = pmap_pte(pmap, va);
2450 if (!pte)
2451 return;
2452
2453 /* Extract the physical address of the page */
2454 pa = pmap_pte_pa(pte);
2455
2456 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2457 return;
2458 pv = &vm_physmem[bank].pmseg.pvent[off];
2459 /* Update the wired bit in the pv entry for this page. */
2460 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2461 }
2462
2463 /*
2464 * pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
2465 *
2466 * Return the pointer to a page table entry corresponding to the supplied
2467 * virtual address.
2468 *
2469 * The page directory is first checked to make sure that a page table
2470 * for the address in question exists and if it does a pointer to the
2471 * entry is returned.
2472 *
2473 * The way this works is that that the kernel page tables are mapped
2474 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2475 * This allows page tables to be located quickly.
2476 */
2477 pt_entry_t *
2478 pmap_pte(pmap, va)
2479 pmap_t pmap;
2480 vaddr_t va;
2481 {
2482 pt_entry_t *ptp;
2483 pt_entry_t *result;
2484
2485 /* The pmap must be valid */
2486 if (!pmap)
2487 return(NULL);
2488
2489 /* Return the address of the pte */
2490 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2491 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2492
2493 /* Do we have a valid pde ? If not we don't have a page table */
2494 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2495 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2496 pmap_pde(pmap, va)));
2497 return(NULL);
2498 }
2499
2500 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2501 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2502 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2503 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2504
2505 /*
2506 * If the pmap is the kernel pmap or the pmap is the active one
2507 * then we can just return a pointer to entry relative to
2508 * PROCESS_PAGE_TBLS_BASE.
2509 * Otherwise we need to map the page tables to an alternative
2510 * address and reference them there.
2511 */
2512 if (pmap == kernel_pmap || pmap->pm_pptpt
2513 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2514 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2515 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2516 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2517 } else {
2518 struct proc *p = curproc;
2519
2520 /* If we don't have a valid curproc use proc0 */
2521 /* Perhaps we should just use kernel_pmap instead */
2522 if (p == NULL)
2523 p = &proc0;
2524 #ifdef DIAGNOSTIC
2525 /*
2526 * The pmap should always be valid for the process so
2527 * panic if it is not.
2528 */
2529 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2530 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2531 va, p, p->p_vmspace);
2532 console_debugger();
2533 }
2534 /*
2535 * The pmap for the current process should be mapped. If it
2536 * is not then we have a problem.
2537 */
2538 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2539 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2540 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2541 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2542 printf("pmap pagetable = P%08lx current = P%08x ",
2543 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2544 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2545 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2546 PG_FRAME));
2547 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2548 panic("pmap_pte: current and pmap mismatch\n");
2549 }
2550 #endif
2551
2552 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2553 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2554 pmap->pm_pptpt);
2555 cpu_tlb_flushD();
2556 }
2557 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2558 ((va >> (PGSHIFT-2)) & ~3)));
2559 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2560 return(result);
2561 }
2562
2563 /*
2564 * Routine: pmap_extract
2565 * Function:
2566 * Extract the physical page address associated
2567 * with the given map/virtual_address pair.
2568 */
2569 boolean_t
2570 pmap_extract(pmap, va, pap)
2571 pmap_t pmap;
2572 vaddr_t va;
2573 paddr_t *pap;
2574 {
2575 pt_entry_t *pte, *ptes;
2576 paddr_t pa;
2577
2578 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2579
2580 /*
2581 * Get the pte for this virtual address.
2582 */
2583 ptes = pmap_map_ptes(pmap);
2584 pte = &ptes[arm_byte_to_page(va)];
2585
2586 /*
2587 * If there is no pte then there is no page table etc.
2588 * Is the pte valid ? If not then no paged is actually mapped here
2589 */
2590 if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)){
2591 pmap_unmap_ptes(pmap);
2592 return (FALSE);
2593 }
2594
2595 /* Return the physical address depending on the PTE type */
2596 /* XXX What about L1 section mappings ? */
2597 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2598 /* Extract the physical address from the pte */
2599 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2600
2601 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2602 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2603
2604 if (pap != NULL)
2605 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2606 } else {
2607 /* Extract the physical address from the pte */
2608 pa = pmap_pte_pa(pte);
2609
2610 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2611 (pa | (va & ~PG_FRAME))));
2612
2613 if (pap != NULL)
2614 *pap = pa | (va & ~PG_FRAME);
2615 }
2616 pmap_unmap_ptes(pmap);
2617 return (TRUE);
2618 }
2619
2620
2621 /*
2622 * Copy the range specified by src_addr/len from the source map to the
2623 * range dst_addr/len in the destination map.
2624 *
2625 * This routine is only advisory and need not do anything.
2626 */
2627
2628 void
2629 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2630 pmap_t dst_pmap;
2631 pmap_t src_pmap;
2632 vaddr_t dst_addr;
2633 vsize_t len;
2634 vaddr_t src_addr;
2635 {
2636 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2637 dst_pmap, src_pmap, dst_addr, len, src_addr));
2638 }
2639
2640 #if defined(PMAP_DEBUG)
2641 void
2642 pmap_dump_pvlist(phys, m)
2643 vaddr_t phys;
2644 char *m;
2645 {
2646 struct pv_entry *pv;
2647 int bank, off;
2648
2649 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2650 printf("INVALID PA\n");
2651 return;
2652 }
2653 pv = &vm_physmem[bank].pmseg.pvent[off];
2654 printf("%s %08lx:", m, phys);
2655 if (pv->pv_pmap == NULL) {
2656 printf(" no mappings\n");
2657 return;
2658 }
2659
2660 for (; pv; pv = pv->pv_next)
2661 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2662 pv->pv_va, pv->pv_flags);
2663
2664 printf("\n");
2665 }
2666
2667 #endif /* PMAP_DEBUG */
2668
2669 boolean_t
2670 pmap_testbit(pa, setbits)
2671 paddr_t pa;
2672 int setbits;
2673 {
2674 int bank, off;
2675
2676 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2677
2678 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2679 return(FALSE);
2680
2681 /*
2682 * Check saved info only
2683 */
2684 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2685 PDEBUG(0, printf("pmap_attributes = %02x\n",
2686 vm_physmem[bank].pmseg.attrs[off]));
2687 return(TRUE);
2688 }
2689
2690 return(FALSE);
2691 }
2692
2693 static pt_entry_t *
2694 pmap_map_ptes(struct pmap *pmap)
2695 {
2696 struct proc *p;
2697
2698 /* the kernel's pmap is always accessible */
2699 if (pmap == pmap_kernel()) {
2700 return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
2701 }
2702
2703 if (curproc &&
2704 curproc->p_vmspace->vm_map.pmap == pmap)
2705 return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2706
2707 p = curproc;
2708
2709 if (p == NULL)
2710 p = &proc0;
2711
2712 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2713 pmap->pm_pptpt);
2714 cpu_tlb_flushD();
2715 return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2716 }
2717
2718 /*
2719 * Modify pte bits for all ptes corresponding to the given physical address.
2720 * We use `maskbits' rather than `clearbits' because we're always passing
2721 * constants and the latter would require an extra inversion at run-time.
2722 */
2723
2724 void
2725 pmap_clearbit(pa, maskbits)
2726 paddr_t pa;
2727 int maskbits;
2728 {
2729 struct pv_entry *pv;
2730 pt_entry_t *pte;
2731 vaddr_t va;
2732 int bank, off;
2733 int s;
2734
2735 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2736 pa, maskbits));
2737 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2738 return;
2739 pv = &vm_physmem[bank].pmseg.pvent[off];
2740 s = splvm();
2741
2742 /*
2743 * Clear saved attributes (modify, reference)
2744 */
2745 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2746
2747 if (pv->pv_pmap == NULL) {
2748 splx(s);
2749 return;
2750 }
2751
2752 /*
2753 * Loop over all current mappings setting/clearing as appropos
2754 */
2755 for (; pv; pv = pv->pv_next) {
2756 va = pv->pv_va;
2757
2758 /*
2759 * XXX don't write protect pager mappings
2760 */
2761 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2762 printf("pmap_clearbit: found page VA on pv_list\n");
2763 continue;
2764 }
2765
2766 pv->pv_flags &= ~maskbits;
2767 pte = pmap_pte(pv->pv_pmap, va);
2768 if (maskbits & (PT_Wr|PT_M))
2769 *pte = *pte & ~PT_AP(AP_W);
2770 if (maskbits & PT_H)
2771 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2772 }
2773 cpu_tlb_flushID();
2774
2775 splx(s);
2776 }
2777
2778
2779 boolean_t
2780 pmap_clear_modify(pg)
2781 struct vm_page *pg;
2782 {
2783 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2784 boolean_t rv;
2785
2786 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2787 rv = pmap_testbit(pa, PT_M);
2788 pmap_clearbit(pa, PT_M);
2789 return rv;
2790 }
2791
2792
2793 boolean_t
2794 pmap_clear_reference(pg)
2795 struct vm_page *pg;
2796 {
2797 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2798 boolean_t rv;
2799
2800 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2801 rv = pmap_testbit(pa, PT_H);
2802 pmap_clearbit(pa, PT_H);
2803 return rv;
2804 }
2805
2806
2807 void
2808 pmap_copy_on_write(pa)
2809 paddr_t pa;
2810 {
2811 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2812 pmap_clearbit(pa, PT_Wr);
2813 }
2814
2815
2816 boolean_t
2817 pmap_is_modified(pg)
2818 struct vm_page *pg;
2819 {
2820 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2821 boolean_t result;
2822
2823 result = pmap_testbit(pa, PT_M);
2824 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2825 return (result);
2826 }
2827
2828
2829 boolean_t
2830 pmap_is_referenced(pg)
2831 struct vm_page *pg;
2832 {
2833 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2834 boolean_t result;
2835
2836 result = pmap_testbit(pa, PT_H);
2837 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2838 return (result);
2839 }
2840
2841
2842 int
2843 pmap_modified_emulation(pmap, va)
2844 pmap_t pmap;
2845 vaddr_t va;
2846 {
2847 pt_entry_t *pte;
2848 paddr_t pa;
2849 int bank, off;
2850 struct pv_entry *pv;
2851 u_int flags;
2852
2853 PDEBUG(2, printf("pmap_modified_emulation\n"));
2854
2855 /* Get the pte */
2856 pte = pmap_pte(pmap, va);
2857 if (!pte) {
2858 PDEBUG(2, printf("no pte\n"));
2859 return(0);
2860 }
2861
2862 PDEBUG(1, printf("*pte=%08x\n", *pte));
2863
2864 /* Check for a zero pte */
2865 if (*pte == 0)
2866 return(0);
2867
2868 /* This can happen if user code tries to access kernel memory. */
2869 if ((*pte & PT_AP(AP_W)) != 0)
2870 return (0);
2871
2872 /* Extract the physical address of the page */
2873 pa = pmap_pte_pa(pte);
2874 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2875 return(0);
2876
2877 /* Get the current flags for this page. */
2878 pv = &vm_physmem[bank].pmseg.pvent[off];
2879 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2880 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2881
2882 /*
2883 * Do the flags say this page is writable ? If not then it is a
2884 * genuine write fault. If yes then the write fault is our fault
2885 * as we did not reflect the write access in the PTE. Now we know
2886 * a write has occurred we can correct this and also set the
2887 * modified bit
2888 */
2889 if (~flags & PT_Wr)
2890 return(0);
2891
2892 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2893 va, pte, *pte));
2894 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2895 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2896 PDEBUG(0, printf("->(%08x)\n", *pte));
2897
2898 /* Return, indicating the problem has been dealt with */
2899 cpu_tlb_flushID_SE(va);
2900 return(1);
2901 }
2902
2903
2904 int
2905 pmap_handled_emulation(pmap, va)
2906 pmap_t pmap;
2907 vaddr_t va;
2908 {
2909 pt_entry_t *pte;
2910 paddr_t pa;
2911 int bank, off;
2912
2913 PDEBUG(2, printf("pmap_handled_emulation\n"));
2914
2915 /* Get the pte */
2916 pte = pmap_pte(pmap, va);
2917 if (!pte) {
2918 PDEBUG(2, printf("no pte\n"));
2919 return(0);
2920 }
2921
2922 PDEBUG(1, printf("*pte=%08x\n", *pte));
2923
2924 /* Check for a zero pte */
2925 if (*pte == 0)
2926 return(0);
2927
2928 /* This can happen if user code tries to access kernel memory. */
2929 if ((*pte & L2_MASK) != L2_INVAL)
2930 return (0);
2931
2932 /* Extract the physical address of the page */
2933 pa = pmap_pte_pa(pte);
2934 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2935 return(0);
2936
2937 /*
2938 * Ok we just enable the pte and mark the attibs as handled
2939 */
2940 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2941 va, pte, *pte));
2942 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2943 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2944 PDEBUG(0, printf("->(%08x)\n", *pte));
2945
2946 /* Return, indicating the problem has been dealt with */
2947 cpu_tlb_flushID_SE(va);
2948 return(1);
2949 }
2950
2951 /*
2952 * pmap_collect: free resources held by a pmap
2953 *
2954 * => optional function.
2955 * => called when a process is swapped out to free memory.
2956 */
2957
2958 void
2959 pmap_collect(pmap)
2960 pmap_t pmap;
2961 {
2962 }
2963
2964 /*
2965 * Routine: pmap_procwr
2966 *
2967 * Function:
2968 * Synchronize caches corresponding to [addr, addr+len) in p.
2969 *
2970 */
2971 void
2972 pmap_procwr(p, va, len)
2973 struct proc *p;
2974 vaddr_t va;
2975 int len;
2976 {
2977 /* We only need to do anything if it is the current process. */
2978 if (p == curproc)
2979 cpu_cache_syncI_rng(va, len);
2980 }
2981
2982 /* End of pmap.c */
2983