pmap.c revision 1.9 1 /* $NetBSD: pmap.c,v 1.9 2001/05/26 21:27:04 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1994-1998 Mark Brinicombe.
41 * Copyright (c) 1994 Brini.
42 * All rights reserved.
43 *
44 * This code is derived from software written for Brini by Mark Brinicombe
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Mark Brinicombe.
57 * 4. The name of the author may not be used to endorse or promote products
58 * derived from this software without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69 *
70 * RiscBSD kernel project
71 *
72 * pmap.c
73 *
74 * Machine dependant vm stuff
75 *
76 * Created : 20/09/94
77 */
78
79 /*
80 * Performance improvements, UVM changes, overhauls and part-rewrites
81 * were contributed by Neil A. Carson <neil (at) causality.com>.
82 */
83
84 /*
85 * The dram block info is currently referenced from the bootconfig.
86 * This should be placed in a separate structure.
87 */
88
89 /*
90 * Special compilation symbols
91 * PMAP_DEBUG - Build in pmap_debug_level code
92 */
93
94 /* Include header files */
95
96 #include "opt_pmap_debug.h"
97 #include "opt_ddb.h"
98
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/kernel.h>
102 #include <sys/systm.h>
103 #include <sys/proc.h>
104 #include <sys/malloc.h>
105 #include <sys/user.h>
106
107 #include <uvm/uvm.h>
108
109 #include <machine/bootconfig.h>
110 #include <machine/bus.h>
111 #include <machine/pmap.h>
112 #include <machine/pcb.h>
113 #include <machine/param.h>
114 #include <machine/katelib.h>
115
116 #ifdef PMAP_DEBUG
117 #define PDEBUG(_lev_,_stat_) \
118 if (pmap_debug_level >= (_lev_)) \
119 ((_stat_))
120 int pmap_debug_level = -2;
121 #else /* PMAP_DEBUG */
122 #define PDEBUG(_lev_,_stat_) /* Nothing */
123 #endif /* PMAP_DEBUG */
124
125 struct pmap kernel_pmap_store;
126 pmap_t kernel_pmap;
127
128 pagehook_t page_hook0;
129 pagehook_t page_hook1;
130 char *memhook;
131 pt_entry_t msgbufpte;
132 extern caddr_t msgbufaddr;
133
134 #ifdef DIAGNOSTIC
135 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
136 #endif
137
138 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
139
140 int pv_nfree = 0;
141
142 vsize_t npages;
143
144 extern paddr_t physical_start;
145 extern paddr_t physical_freestart;
146 extern paddr_t physical_end;
147 extern paddr_t physical_freeend;
148 extern unsigned int free_pages;
149 extern int max_processes;
150
151 vaddr_t virtual_start;
152 vaddr_t virtual_end;
153
154 vaddr_t avail_start;
155 vaddr_t avail_end;
156
157 extern pv_addr_t systempage;
158
159 #define ALLOC_PAGE_HOOK(x, s) \
160 x.va = virtual_start; \
161 x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
162 virtual_start += s;
163
164 /* Variables used by the L1 page table queue code */
165 SIMPLEQ_HEAD(l1pt_queue, l1pt);
166 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
167 int l1pt_static_queue_count; /* items in the static l1 queue */
168 int l1pt_static_create_count; /* static l1 items created */
169 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
170 int l1pt_queue_count; /* items in the l1 queue */
171 int l1pt_create_count; /* stat - L1's create count */
172 int l1pt_reuse_count; /* stat - L1's reused count */
173
174 /* Local function prototypes (not used outside this file) */
175 pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
176 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
177 paddr_t pa, unsigned int flags));
178 void pmap_copy_on_write __P((paddr_t pa));
179 void pmap_pinit __P((pmap_t));
180 void pmap_freepagedir __P((pmap_t));
181 void pmap_release __P((pmap_t));
182
183 /* Other function prototypes */
184 extern void bzero_page __P((vaddr_t));
185 extern void bcopy_page __P((vaddr_t, vaddr_t));
186
187 struct l1pt *pmap_alloc_l1pt __P((void));
188 static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
189 vaddr_t l2pa));
190
191 #ifdef MYCROFT_HACK
192 int mycroft_hack = 0;
193 #endif
194
195 /* Function to set the debug level of the pmap code */
196
197 #ifdef PMAP_DEBUG
198 void
199 pmap_debug(level)
200 int level;
201 {
202 pmap_debug_level = level;
203 printf("pmap_debug: level=%d\n", pmap_debug_level);
204 }
205 #endif /* PMAP_DEBUG */
206
207 #include "isadma.h"
208
209 #if NISADMA > 0
210 /*
211 * Used to protect memory for ISA DMA bounce buffers. If, when loading
212 * pages into the system, memory intersects with any of these ranges,
213 * the intersecting memory will be loaded into a lower-priority free list.
214 */
215 bus_dma_segment_t *pmap_isa_dma_ranges;
216 int pmap_isa_dma_nranges;
217
218 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
219 paddr_t *, psize_t *));
220
221 /*
222 * Check if a memory range intersects with an ISA DMA range, and
223 * return the page-rounded intersection if it does. The intersection
224 * will be placed on a lower-priority free list.
225 */
226 boolean_t
227 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
228 paddr_t pa;
229 psize_t size;
230 paddr_t *pap;
231 psize_t *sizep;
232 {
233 bus_dma_segment_t *ds;
234 int i;
235
236 if (pmap_isa_dma_ranges == NULL)
237 return (FALSE);
238
239 for (i = 0, ds = pmap_isa_dma_ranges;
240 i < pmap_isa_dma_nranges; i++, ds++) {
241 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
242 /*
243 * Beginning of region intersects with this range.
244 */
245 *pap = trunc_page(pa);
246 *sizep = round_page(min(pa + size,
247 ds->ds_addr + ds->ds_len) - pa);
248 return (TRUE);
249 }
250 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
251 /*
252 * End of region intersects with this range.
253 */
254 *pap = trunc_page(ds->ds_addr);
255 *sizep = round_page(min((pa + size) - ds->ds_addr,
256 ds->ds_len));
257 return (TRUE);
258 }
259 }
260
261 /*
262 * No intersection found.
263 */
264 return (FALSE);
265 }
266 #endif /* NISADMA > 0 */
267
268 /*
269 * Functions for manipluation pv_entry structures. These are used to keep a
270 * record of the mappings of virtual addresses and the associated physical
271 * pages.
272 */
273
274 /*
275 * Allocate a new pv_entry structure from the freelist. If the list is
276 * empty allocate a new page and fill the freelist.
277 */
278 struct pv_entry *
279 pmap_alloc_pv()
280 {
281 struct pv_page *pvp;
282 struct pv_entry *pv;
283 int i;
284
285 /*
286 * Do we have any free pv_entry structures left ?
287 * If not allocate a page of them
288 */
289
290 if (pv_nfree == 0) {
291 /* NOTE: can't lock kernel_map here */
292 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
293 if (pvp == 0)
294 panic("pmap_alloc_pv: kmem_alloc() failed");
295 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
296 for (i = NPVPPG - 2; i; i--, pv++)
297 pv->pv_next = pv + 1;
298 pv->pv_next = 0;
299 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
300 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
301 pv = &pvp->pvp_pv[0];
302 } else {
303 --pv_nfree;
304 pvp = pv_page_freelist.tqh_first;
305 if (--pvp->pvp_pgi.pgi_nfree == 0) {
306 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
307 }
308 pv = pvp->pvp_pgi.pgi_freelist;
309 #ifdef DIAGNOSTIC
310 if (pv == 0)
311 panic("pmap_alloc_pv: pgi_nfree inconsistent");
312 #endif /* DIAGNOSTIC */
313 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
314 }
315 return pv;
316 }
317
318 /*
319 * Release a pv_entry structure putting it back on the freelist.
320 */
321
322 void
323 pmap_free_pv(pv)
324 struct pv_entry *pv;
325 {
326 struct pv_page *pvp;
327
328 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
329 switch (++pvp->pvp_pgi.pgi_nfree) {
330 case 1:
331 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
332 default:
333 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
334 pvp->pvp_pgi.pgi_freelist = pv;
335 ++pv_nfree;
336 break;
337 case NPVPPG:
338 pv_nfree -= NPVPPG - 1;
339 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
340 FREE((vaddr_t)pvp, M_VMPVENT);
341 break;
342 }
343 }
344
345 #if 0
346 void
347 pmap_collect_pv()
348 {
349 struct pv_page_list pv_page_collectlist;
350 struct pv_page *pvp, *npvp;
351 struct pv_entry *ph, *ppv, *pv, *npv;
352 int s;
353
354 TAILQ_INIT(&pv_page_collectlist);
355
356 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
357 if (pv_nfree < NPVPPG)
358 break;
359 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
360 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
361 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
362 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
363 pvp_pgi.pgi_list);
364 pv_nfree -= NPVPPG;
365 pvp->pvp_pgi.pgi_nfree = -1;
366 }
367 }
368
369 if (pv_page_collectlist.tqh_first == 0)
370 return;
371
372 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
373 if (ph->pv_pmap == 0)
374 continue;
375 s = splvm();
376 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
377 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
378 if (pvp->pvp_pgi.pgi_nfree == -1) {
379 pvp = pv_page_freelist.tqh_first;
380 if (--pvp->pvp_pgi.pgi_nfree == 0) {
381 TAILQ_REMOVE(&pv_page_freelist,
382 pvp, pvp_pgi.pgi_list);
383 }
384 npv = pvp->pvp_pgi.pgi_freelist;
385 #ifdef DIAGNOSTIC
386 if (npv == 0)
387 panic("pmap_collect_pv: pgi_nfree inconsistent");
388 #endif /* DIAGNOSTIC */
389 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
390 *npv = *pv;
391 ppv->pv_next = npv;
392 ppv = npv;
393 } else
394 ppv = pv;
395 }
396 splx(s);
397 }
398
399 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
400 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
401 FREE((vaddr_t)pvp, M_VMPVENT);
402 }
403 }
404 #endif
405
406 /*
407 * Enter a new physical-virtual mapping into the pv table
408 */
409
410 /*__inline*/ void
411 pmap_enter_pv(pmap, va, pv, flags)
412 pmap_t pmap;
413 vaddr_t va;
414 struct pv_entry *pv;
415 u_int flags;
416 {
417 struct pv_entry *npv;
418 u_int s;
419
420 #ifdef DIAGNOSTIC
421 if (!pmap_initialized)
422 panic("pmap_enter_pv: !pmap_initialized");
423 #endif
424
425 s = splvm();
426
427 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
428 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
429
430 if (pv->pv_pmap == NULL) {
431 /*
432 * No entries yet, use header as the first entry
433 */
434 pv->pv_va = va;
435 pv->pv_pmap = pmap;
436 pv->pv_next = NULL;
437 pv->pv_flags = flags;
438 } else {
439 /*
440 * There is at least one other VA mapping this page.
441 * Place this entry after the header.
442 */
443 #ifdef PMAP_DEBUG
444 for (npv = pv; npv; npv = npv->pv_next)
445 if (pmap == npv->pv_pmap && va == npv->pv_va)
446 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
447 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
448 #endif
449 npv = pmap_alloc_pv();
450 npv->pv_va = va;
451 npv->pv_pmap = pmap;
452 npv->pv_flags = flags;
453 npv->pv_next = pv->pv_next;
454 pv->pv_next = npv;
455 }
456
457 if (flags & PT_W)
458 ++pmap->pm_stats.wired_count;
459
460 splx(s);
461 }
462
463
464 /*
465 * Remove a physical-virtual mapping from the pv table
466 */
467
468 /*__inline*/ void
469 pmap_remove_pv(pmap, va, pv)
470 pmap_t pmap;
471 vaddr_t va;
472 struct pv_entry *pv;
473 {
474 struct pv_entry *npv;
475 u_int s;
476 u_int flags = 0;
477
478 #ifdef DIAGNOSTIC
479 if (!pmap_initialized)
480 panic("pmap_remove_pv: !pmap_initialized");
481 #endif
482
483 s = splvm();
484
485 /*
486 * If it is the first entry on the list, it is actually
487 * in the header and we must copy the following entry up
488 * to the header. Otherwise we must search the list for
489 * the entry. In either case we free the now unused entry.
490 */
491
492 if (pmap == pv->pv_pmap && va == pv->pv_va) {
493 npv = pv->pv_next;
494 if (npv) {
495 *pv = *npv;
496 flags = npv->pv_flags;
497 pmap_free_pv(npv);
498 } else {
499 flags = pv->pv_flags;
500 pv->pv_pmap = NULL;
501 }
502 } else {
503 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
504 if (pmap == npv->pv_pmap && va == npv->pv_va)
505 break;
506 }
507 if (npv) {
508 pv->pv_next = npv->pv_next;
509 flags = npv->pv_flags;
510 pmap_free_pv(npv);
511 } else
512 panic("pmap_remove_pv: lost entry");
513 }
514
515 if (flags & PT_W)
516 --pmap->pm_stats.wired_count;
517
518 splx(s);
519 }
520
521 /*
522 * Modify a physical-virtual mapping in the pv table
523 */
524
525 /*__inline */ u_int
526 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
527 pmap_t pmap;
528 vaddr_t va;
529 struct pv_entry *pv;
530 u_int bic_mask;
531 u_int eor_mask;
532 {
533 struct pv_entry *npv;
534 u_int s;
535 u_int flags, oflags;
536
537 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
538 pmap, va, pv, bic_mask, eor_mask));
539
540 #ifdef DIAGNOSTIC
541 if (!pmap_initialized)
542 panic("pmap_modify_pv: !pmap_initialized");
543 #endif
544
545 s = splvm();
546
547 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
548 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
549
550 /*
551 * There is at least one VA mapping this page.
552 */
553
554 for (npv = pv; npv; npv = npv->pv_next) {
555 if (pmap == npv->pv_pmap && va == npv->pv_va) {
556 oflags = npv->pv_flags;
557 npv->pv_flags = flags =
558 ((oflags & ~bic_mask) ^ eor_mask);
559 if ((flags ^ oflags) & PT_W) {
560 if (flags & PT_W)
561 ++pmap->pm_stats.wired_count;
562 else
563 --pmap->pm_stats.wired_count;
564 }
565 PDEBUG(0, printf("done flags=%08x\n", flags));
566 splx(s);
567 return (oflags);
568 }
569 }
570
571 PDEBUG(0, printf("done.\n"));
572 splx(s);
573 return (0);
574 }
575
576
577 /*
578 * Map the specified level 2 pagetable into the level 1 page table for
579 * the given pmap to cover a chunk of virtual address space starting from the
580 * address specified.
581 */
582 static /*__inline*/ void
583 pmap_map_in_l1(pmap, va, l2pa)
584 pmap_t pmap;
585 vaddr_t va, l2pa;
586 {
587 vaddr_t ptva;
588
589 /* Calculate the index into the L1 page table. */
590 ptva = (va >> PDSHIFT) & ~3;
591
592 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
593 pmap->pm_pdir, L1_PTE(l2pa), ptva));
594
595 /* Map page table into the L1. */
596 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
597 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
598 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
599 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
600
601 PDEBUG(0, printf("pt self reference %lx in %lx\n",
602 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
603
604 /* Map the page table into the page table area. */
605 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
606
607 /* XXX should be a purge */
608 /* cpu_tlb_flushD();*/
609 }
610
611 #if 0
612 static /*__inline*/ void
613 pmap_unmap_in_l1(pmap, va)
614 pmap_t pmap;
615 vaddr_t va;
616 {
617 vaddr_t ptva;
618
619 /* Calculate the index into the L1 page table. */
620 ptva = (va >> PDSHIFT) & ~3;
621
622 /* Unmap page table from the L1. */
623 pmap->pm_pdir[ptva + 0] = 0;
624 pmap->pm_pdir[ptva + 1] = 0;
625 pmap->pm_pdir[ptva + 2] = 0;
626 pmap->pm_pdir[ptva + 3] = 0;
627
628 /* Unmap the page table from the page table area. */
629 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
630
631 /* XXX should be a purge */
632 /* cpu_tlb_flushD();*/
633 }
634 #endif
635
636
637 /*
638 * Used to map a range of physical addresses into kernel
639 * virtual address space.
640 *
641 * For now, VM is already on, we only need to map the
642 * specified memory.
643 */
644 vaddr_t
645 pmap_map(va, spa, epa, prot)
646 vaddr_t va, spa, epa;
647 int prot;
648 {
649 while (spa < epa) {
650 pmap_enter(pmap_kernel(), va, spa, prot, 0);
651 va += NBPG;
652 spa += NBPG;
653 }
654 pmap_update();
655 return(va);
656 }
657
658
659 /*
660 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
661 *
662 * bootstrap the pmap system. This is called from initarm and allows
663 * the pmap system to initailise any structures it requires.
664 *
665 * Currently this sets up the kernel_pmap that is statically allocated
666 * and also allocated virtual addresses for certain page hooks.
667 * Currently the only one page hook is allocated that is used
668 * to zero physical pages of memory.
669 * It also initialises the start and end address of the kernel data space.
670 */
671 extern paddr_t physical_freestart;
672 extern paddr_t physical_freeend;
673
674 struct pv_entry *boot_pvent;
675 char *boot_attrs;
676
677 void
678 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
679 pd_entry_t *kernel_l1pt;
680 pv_addr_t kernel_ptpt;
681 {
682 int loop;
683 paddr_t start, end;
684 #if NISADMA > 0
685 paddr_t istart;
686 psize_t isize;
687 #endif
688 vsize_t size;
689
690 kernel_pmap = &kernel_pmap_store;
691
692 kernel_pmap->pm_pdir = kernel_l1pt;
693 kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
694 kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
695 simple_lock_init(&kernel_pmap->pm_lock);
696 kernel_pmap->pm_count = 1;
697
698 /*
699 * Initialize PAGE_SIZE-dependent variables.
700 */
701 uvm_setpagesize();
702
703 npages = 0;
704 loop = 0;
705 while (loop < bootconfig.dramblocks) {
706 start = (paddr_t)bootconfig.dram[loop].address;
707 end = start + (bootconfig.dram[loop].pages * NBPG);
708 if (start < physical_freestart)
709 start = physical_freestart;
710 if (end > physical_freeend)
711 end = physical_freeend;
712 #if 0
713 printf("%d: %lx -> %lx\n", loop, start, end - 1);
714 #endif
715 #if NISADMA > 0
716 if (pmap_isa_dma_range_intersect(start, end - start,
717 &istart, &isize)) {
718 /*
719 * Place the pages that intersect with the
720 * ISA DMA range onto the ISA DMA free list.
721 */
722 #if 0
723 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
724 istart + isize - 1);
725 #endif
726 uvm_page_physload(atop(istart),
727 atop(istart + isize), atop(istart),
728 atop(istart + isize), VM_FREELIST_ISADMA);
729 npages += atop(istart + isize) - atop(istart);
730
731 /*
732 * Load the pieces that come before
733 * the intersection into the default
734 * free list.
735 */
736 if (start < istart) {
737 #if 0
738 printf(" BEFORE 0x%lx -> 0x%lx\n",
739 start, istart - 1);
740 #endif
741 uvm_page_physload(atop(start),
742 atop(istart), atop(start),
743 atop(istart), VM_FREELIST_DEFAULT);
744 npages += atop(istart) - atop(start);
745 }
746
747 /*
748 * Load the pieces that come after
749 * the intersection into the default
750 * free list.
751 */
752 if ((istart + isize) < end) {
753 #if 0
754 printf(" AFTER 0x%lx -> 0x%lx\n",
755 (istart + isize), end - 1);
756 #endif
757 uvm_page_physload(atop(istart + isize),
758 atop(end), atop(istart + isize),
759 atop(end), VM_FREELIST_DEFAULT);
760 npages += atop(end) - atop(istart + isize);
761 }
762 } else {
763 uvm_page_physload(atop(start), atop(end),
764 atop(start), atop(end), VM_FREELIST_DEFAULT);
765 npages += atop(end) - atop(start);
766 }
767 #else /* NISADMA > 0 */
768 uvm_page_physload(atop(start), atop(end),
769 atop(start), atop(end), VM_FREELIST_DEFAULT);
770 npages += atop(end) - atop(start);
771 #endif /* NISADMA > 0 */
772 ++loop;
773 }
774
775 #ifdef MYCROFT_HACK
776 printf("npages = %ld\n", npages);
777 #endif
778
779 virtual_start = KERNEL_VM_BASE;
780 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
781
782 ALLOC_PAGE_HOOK(page_hook0, NBPG);
783 ALLOC_PAGE_HOOK(page_hook1, NBPG);
784
785 /*
786 * The mem special device needs a virtual hook but we don't
787 * need a pte
788 */
789 memhook = (char *)virtual_start;
790 virtual_start += NBPG;
791
792 msgbufaddr = (caddr_t)virtual_start;
793 msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
794 virtual_start += round_page(MSGBUFSIZE);
795
796 size = npages * sizeof(struct pv_entry);
797 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
798 bzero(boot_pvent, size);
799 size = npages * sizeof(char);
800 boot_attrs = (char *)uvm_pageboot_alloc(size);
801 bzero(boot_attrs, size);
802
803 cpu_cache_cleanD();
804 }
805
806 /*
807 * void pmap_init(void)
808 *
809 * Initialize the pmap module.
810 * Called by vm_init() in vm/vm_init.c in order to initialise
811 * any structures that the pmap system needs to map virtual memory.
812 */
813
814 extern int physmem;
815
816 void
817 pmap_init()
818 {
819 int lcv;
820
821 #ifdef MYCROFT_HACK
822 printf("physmem = %d\n", physmem);
823 #endif
824
825 /*
826 * Set the available memory vars - These do not map to real memory
827 * addresses and cannot as the physical memory is fragmented.
828 * They are used by ps for %mem calculations.
829 * One could argue whether this should be the entire memory or just
830 * the memory that is useable in a user process.
831 */
832 avail_start = 0;
833 avail_end = physmem * NBPG;
834
835 /* Set up pmap info for physsegs. */
836 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
837 vm_physmem[lcv].pmseg.pvent = boot_pvent;
838 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
839 vm_physmem[lcv].pmseg.attrs = boot_attrs;
840 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
841 }
842 #ifdef MYCROFT_HACK
843 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
844 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
845 lcv,
846 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
847 vm_physmem[lcv].start, vm_physmem[lcv].end);
848 }
849 #endif
850 TAILQ_INIT(&pv_page_freelist);
851
852 #ifdef DIAGNOSTIC
853 /* Now it is safe to enable pv_entry recording. */
854 pmap_initialized = TRUE;
855 #endif
856
857 /* Initialise our L1 page table queues and counters */
858 SIMPLEQ_INIT(&l1pt_static_queue);
859 l1pt_static_queue_count = 0;
860 l1pt_static_create_count = 0;
861 SIMPLEQ_INIT(&l1pt_queue);
862 l1pt_queue_count = 0;
863 l1pt_create_count = 0;
864 l1pt_reuse_count = 0;
865 }
866
867 /*
868 * pmap_postinit()
869 *
870 * This routine is called after the vm and kmem subsystems have been
871 * initialised. This allows the pmap code to perform any initialisation
872 * that can only be done one the memory allocation is in place.
873 */
874
875 void
876 pmap_postinit()
877 {
878 int loop;
879 struct l1pt *pt;
880
881 #ifdef PMAP_STATIC_L1S
882 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
883 #else /* PMAP_STATIC_L1S */
884 for (loop = 0; loop < max_processes; ++loop) {
885 #endif /* PMAP_STATIC_L1S */
886 /* Allocate a L1 page table */
887 pt = pmap_alloc_l1pt();
888 if (!pt)
889 panic("Cannot allocate static L1 page tables\n");
890
891 /* Clean it */
892 bzero((void *)pt->pt_va, PD_SIZE);
893 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
894 /* Add the page table to the queue */
895 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
896 ++l1pt_static_queue_count;
897 ++l1pt_static_create_count;
898 }
899 }
900
901
902 /*
903 * Create and return a physical map.
904 *
905 * If the size specified for the map is zero, the map is an actual physical
906 * map, and may be referenced by the hardware.
907 *
908 * If the size specified is non-zero, the map will be used in software only,
909 * and is bounded by that size.
910 */
911
912 pmap_t
913 pmap_create()
914 {
915 pmap_t pmap;
916
917 /* Allocate memory for pmap structure and zero it */
918 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
919 bzero(pmap, sizeof(*pmap));
920
921 /* Now init the machine part of the pmap */
922 pmap_pinit(pmap);
923 return(pmap);
924 }
925
926 /*
927 * pmap_alloc_l1pt()
928 *
929 * This routine allocates physical and virtual memory for a L1 page table
930 * and wires it.
931 * A l1pt structure is returned to describe the allocated page table.
932 *
933 * This routine is allowed to fail if the required memory cannot be allocated.
934 * In this case NULL is returned.
935 */
936
937 struct l1pt *
938 pmap_alloc_l1pt(void)
939 {
940 paddr_t pa;
941 vaddr_t va;
942 struct l1pt *pt;
943 int error;
944 struct vm_page *m;
945 pt_entry_t *pte;
946
947 /* Allocate virtual address space for the L1 page table */
948 va = uvm_km_valloc(kernel_map, PD_SIZE);
949 if (va == 0) {
950 #ifdef DIAGNOSTIC
951 printf("pmap: Cannot allocate pageable memory for L1\n");
952 #endif /* DIAGNOSTIC */
953 return(NULL);
954 }
955
956 /* Allocate memory for the l1pt structure */
957 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
958
959 /*
960 * Allocate pages from the VM system.
961 */
962 TAILQ_INIT(&pt->pt_plist);
963 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
964 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
965 if (error) {
966 #ifdef DIAGNOSTIC
967 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
968 error);
969 #endif /* DIAGNOSTIC */
970 /* Release the resources we already have claimed */
971 free(pt, M_VMPMAP);
972 uvm_km_free(kernel_map, va, PD_SIZE);
973 return(NULL);
974 }
975
976 /* Map our physical pages into our virtual space */
977 pt->pt_va = va;
978 m = pt->pt_plist.tqh_first;
979 while (m && va < (pt->pt_va + PD_SIZE)) {
980 pa = VM_PAGE_TO_PHYS(m);
981
982 pmap_enter(pmap_kernel(), va, pa,
983 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
984
985 /* Revoke cacheability and bufferability */
986 /* XXX should be done better than this */
987 pte = pmap_pte(pmap_kernel(), va);
988 *pte = *pte & ~(PT_C | PT_B);
989
990 va += NBPG;
991 m = m->pageq.tqe_next;
992 }
993 pmap_update();
994
995 #ifdef DIAGNOSTIC
996 if (m)
997 panic("pmap_alloc_l1pt: pglist not empty\n");
998 #endif /* DIAGNOSTIC */
999
1000 pt->pt_flags = 0;
1001 return(pt);
1002 }
1003
1004 /*
1005 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1006 */
1007 void
1008 pmap_free_l1pt(pt)
1009 struct l1pt *pt;
1010 {
1011 /* Separate the physical memory for the virtual space */
1012 pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
1013 pmap_update();
1014
1015 /* Return the physical memory */
1016 uvm_pglistfree(&pt->pt_plist);
1017
1018 /* Free the virtual space */
1019 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1020
1021 /* Free the l1pt structure */
1022 free(pt, M_VMPMAP);
1023 }
1024
1025 /*
1026 * Allocate a page directory.
1027 * This routine will either allocate a new page directory from the pool
1028 * of L1 page tables currently held by the kernel or it will allocate
1029 * a new one via pmap_alloc_l1pt().
1030 * It will then initialise the l1 page table for use.
1031 */
1032 int
1033 pmap_allocpagedir(pmap)
1034 struct pmap *pmap;
1035 {
1036 paddr_t pa;
1037 struct l1pt *pt;
1038 pt_entry_t *pte;
1039
1040 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1041
1042 /* Do we have any spare L1's lying around ? */
1043 if (l1pt_static_queue_count) {
1044 --l1pt_static_queue_count;
1045 pt = l1pt_static_queue.sqh_first;
1046 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1047 } else if (l1pt_queue_count) {
1048 --l1pt_queue_count;
1049 pt = l1pt_queue.sqh_first;
1050 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1051 ++l1pt_reuse_count;
1052 } else {
1053 pt = pmap_alloc_l1pt();
1054 if (!pt)
1055 return(ENOMEM);
1056 ++l1pt_create_count;
1057 }
1058
1059 /* Store the pointer to the l1 descriptor in the pmap. */
1060 pmap->pm_l1pt = pt;
1061
1062 /* Get the physical address of the start of the l1 */
1063 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1064
1065 /* Store the virtual address of the l1 in the pmap. */
1066 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1067
1068 /* Clean the L1 if it is dirty */
1069 if (!(pt->pt_flags & PTFLAG_CLEAN))
1070 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1071
1072 /* Do we already have the kernel mappings ? */
1073 if (!(pt->pt_flags & PTFLAG_KPT)) {
1074 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1075
1076 bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1077 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1078 KERNEL_PD_SIZE);
1079 pt->pt_flags |= PTFLAG_KPT;
1080 }
1081
1082 /* Allocate a page table to map all the page tables for this pmap */
1083
1084 #ifdef DIAGNOSTIC
1085 if (pmap->pm_vptpt) {
1086 /* XXX What if we have one already ? */
1087 panic("pmap_allocpagedir: have pt already\n");
1088 }
1089 #endif /* DIAGNOSTIC */
1090 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1091 if (pmap->pm_vptpt == 0) {
1092 pmap_freepagedir(pmap);
1093 return(ENOMEM);
1094 }
1095
1096 (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
1097 pmap->pm_pptpt &= PG_FRAME;
1098 /* Revoke cacheability and bufferability */
1099 /* XXX should be done better than this */
1100 pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
1101 *pte = *pte & ~(PT_C | PT_B);
1102
1103 /* Wire in this page table */
1104 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1105
1106 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1107
1108 /*
1109 * Map the kernel page tables for 0xf0000000 +
1110 * into the page table used to map the
1111 * pmap's page tables
1112 */
1113 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1114 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1115 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1116 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1117 (KERNEL_PD_SIZE >> 2));
1118
1119 pmap->pm_count = 1;
1120 simple_lock_init(&pmap->pm_lock);
1121
1122 return(0);
1123 }
1124
1125
1126 /*
1127 * Initialize a preallocated and zeroed pmap structure,
1128 * such as one in a vmspace structure.
1129 */
1130
1131 static int pmap_pagedir_ident; /* tsleep() ident */
1132
1133 void
1134 pmap_pinit(pmap)
1135 struct pmap *pmap;
1136 {
1137 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1138
1139 /* Keep looping until we succeed in allocating a page directory */
1140 while (pmap_allocpagedir(pmap) != 0) {
1141 /*
1142 * Ok we failed to allocate a suitable block of memory for an
1143 * L1 page table. This means that either:
1144 * 1. 16KB of virtual address space could not be allocated
1145 * 2. 16KB of physically contiguous memory on a 16KB boundary
1146 * could not be allocated.
1147 *
1148 * Since we cannot fail we will sleep for a while and try
1149 * again. Although we will be wakened when another page table
1150 * is freed other memory releasing and swapping may occur
1151 * that will mean we can succeed so we will keep trying
1152 * regularly just in case.
1153 */
1154
1155 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1156 "l1ptwait", 1000) == EWOULDBLOCK)
1157 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1158 }
1159
1160 /* Map zero page for the pmap. This will also map the L2 for it */
1161 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1162 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1163 pmap_update();
1164 }
1165
1166
1167 void
1168 pmap_freepagedir(pmap)
1169 pmap_t pmap;
1170 {
1171 /* Free the memory used for the page table mapping */
1172 if (pmap->pm_vptpt != 0)
1173 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1174
1175 /* junk the L1 page table */
1176 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1177 /* Add the page table to the queue */
1178 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1179 ++l1pt_static_queue_count;
1180 /* Wake up any sleeping processes waiting for a l1 page table */
1181 wakeup((caddr_t)&pmap_pagedir_ident);
1182 } else if (l1pt_queue_count < 8) {
1183 /* Add the page table to the queue */
1184 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1185 ++l1pt_queue_count;
1186 /* Wake up any sleeping processes waiting for a l1 page table */
1187 wakeup((caddr_t)&pmap_pagedir_ident);
1188 } else
1189 pmap_free_l1pt(pmap->pm_l1pt);
1190 }
1191
1192
1193 /*
1194 * Retire the given physical map from service.
1195 * Should only be called if the map contains no valid mappings.
1196 */
1197
1198 void
1199 pmap_destroy(pmap)
1200 pmap_t pmap;
1201 {
1202 int count;
1203
1204 if (pmap == NULL)
1205 return;
1206
1207 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1208 simple_lock(&pmap->pm_lock);
1209 count = --pmap->pm_count;
1210 simple_unlock(&pmap->pm_lock);
1211 if (count == 0) {
1212 pmap_release(pmap);
1213 free((caddr_t)pmap, M_VMPMAP);
1214 }
1215 }
1216
1217
1218 /*
1219 * Release any resources held by the given physical map.
1220 * Called when a pmap initialized by pmap_pinit is being released.
1221 * Should only be called if the map contains no valid mappings.
1222 */
1223
1224 void
1225 pmap_release(pmap)
1226 pmap_t pmap;
1227 {
1228 struct vm_page *page;
1229 pt_entry_t *pte;
1230 int loop;
1231
1232 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1233
1234 #if 0
1235 if (pmap->pm_count != 1) /* XXX: needs sorting */
1236 panic("pmap_release count %d", pmap->pm_count);
1237 #endif
1238
1239 /* Remove the zero page mapping */
1240 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1241 pmap_update();
1242
1243 /*
1244 * Free any page tables still mapped
1245 * This is only temporay until pmap_enter can count the number
1246 * of mappings made in a page table. Then pmap_remove() can
1247 * reduce the count and free the pagetable when the count
1248 * reaches zero.
1249 */
1250 for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) {
1251 pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4);
1252 if (*pte != 0) {
1253 PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte));
1254 page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
1255 if (page == NULL)
1256 panic("pmap_release: bad address for phys page");
1257 uvm_pagefree(page);
1258 }
1259 }
1260 /* Free the page dir */
1261 pmap_freepagedir(pmap);
1262 }
1263
1264
1265 /*
1266 * void pmap_reference(pmap_t pmap)
1267 *
1268 * Add a reference to the specified pmap.
1269 */
1270
1271 void
1272 pmap_reference(pmap)
1273 pmap_t pmap;
1274 {
1275 if (pmap == NULL)
1276 return;
1277
1278 simple_lock(&pmap->pm_lock);
1279 pmap->pm_count++;
1280 simple_unlock(&pmap->pm_lock);
1281 }
1282
1283 /*
1284 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1285 *
1286 * Return the start and end addresses of the kernel's virtual space.
1287 * These values are setup in pmap_bootstrap and are updated as pages
1288 * are allocated.
1289 */
1290
1291 void
1292 pmap_virtual_space(start, end)
1293 vaddr_t *start;
1294 vaddr_t *end;
1295 {
1296 *start = virtual_start;
1297 *end = virtual_end;
1298 }
1299
1300
1301 /*
1302 * Activate the address space for the specified process. If the process
1303 * is the current process, load the new MMU context.
1304 */
1305 void
1306 pmap_activate(p)
1307 struct proc *p;
1308 {
1309 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1310 struct pcb *pcb = &p->p_addr->u_pcb;
1311
1312 (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
1313 (paddr_t *)&pcb->pcb_pagedir);
1314
1315 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1316 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1317
1318 if (p == curproc) {
1319 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1320 setttb((u_int)pcb->pcb_pagedir);
1321 }
1322 #if 0
1323 pmap->pm_pdchanged = FALSE;
1324 #endif
1325 }
1326
1327
1328 /*
1329 * Deactivate the address space of the specified process.
1330 */
1331 void
1332 pmap_deactivate(p)
1333 struct proc *p;
1334 {
1335 }
1336
1337
1338 /*
1339 * pmap_clean_page()
1340 *
1341 * This is a local function used to work out the best strategy to clean
1342 * a single page referenced by its entry in the PV table. It's used by
1343 * pmap_copy_page, pmap_zero page and maybe some others later on.
1344 *
1345 * Its policy is effectively:
1346 * o If there are no mappings, we don't bother doing anything with the cache.
1347 * o If there is one mapping, we clean just that page.
1348 * o If there are multiple mappings, we clean the entire cache.
1349 *
1350 * So that some functions can be further optimised, it returns 0 if it didn't
1351 * clean the entire cache, or 1 if it did.
1352 *
1353 * XXX One bug in this routine is that if the pv_entry has a single page
1354 * mapped at 0x00000000 a whole cache clean will be performed rather than
1355 * just the 1 page. Since this should not occur in everyday use and if it does
1356 * it will just result in not the most efficient clean for the page.
1357 */
1358 static int
1359 pmap_clean_page(pv)
1360 struct pv_entry *pv;
1361 {
1362 int s;
1363 int cache_needs_cleaning = 0;
1364 vaddr_t page_to_clean = 0;
1365
1366 /* Go to splvm() so we get exclusive lock for a mo */
1367 s = splvm();
1368 if (pv->pv_pmap) {
1369 cache_needs_cleaning = 1;
1370 if (!pv->pv_next)
1371 page_to_clean = pv->pv_va;
1372 }
1373 splx(s);
1374
1375 /* Do cache ops outside the splvm. */
1376 if (page_to_clean)
1377 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1378 else if (cache_needs_cleaning) {
1379 cpu_cache_purgeID();
1380 return (1);
1381 }
1382 return (0);
1383 }
1384
1385 /*
1386 * pmap_find_pv()
1387 *
1388 * This is a local function that finds a PV entry for a given physical page.
1389 * This is a common op, and this function removes loads of ifdefs in the code.
1390 */
1391 static __inline struct pv_entry *
1392 pmap_find_pv(phys)
1393 paddr_t phys;
1394 {
1395 int bank, off;
1396 struct pv_entry *pv;
1397
1398 #ifdef DIAGNOSTIC
1399 if (!pmap_initialized)
1400 panic("pmap_find_pv: !pmap_initialized");
1401 #endif
1402
1403 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1404 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1405 pv = &vm_physmem[bank].pmseg.pvent[off];
1406 return (pv);
1407 }
1408
1409 /*
1410 * pmap_zero_page()
1411 *
1412 * Zero a given physical page by mapping it at a page hook point.
1413 * In doing the zero page op, the page we zero is mapped cachable, as with
1414 * StrongARM accesses to non-cached pages are non-burst making writing
1415 * _any_ bulk data very slow.
1416 */
1417 void
1418 pmap_zero_page(phys)
1419 paddr_t phys;
1420 {
1421 struct pv_entry *pv;
1422
1423 /* Get an entry for this page, and clean it it. */
1424 pv = pmap_find_pv(phys);
1425 pmap_clean_page(pv);
1426
1427 /*
1428 * Hook in the page, zero it, and purge the cache for that
1429 * zeroed page. Invalidate the TLB as needed.
1430 */
1431 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1432 cpu_tlb_flushD_SE(page_hook0.va);
1433 bzero_page(page_hook0.va);
1434 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1435 }
1436
1437 /*
1438 * pmap_copy_page()
1439 *
1440 * Copy one physical page into another, by mapping the pages into
1441 * hook points. The same comment regarding cachability as in
1442 * pmap_zero_page also applies here.
1443 */
1444 void
1445 pmap_copy_page(src, dest)
1446 paddr_t src;
1447 paddr_t dest;
1448 {
1449 struct pv_entry *src_pv, *dest_pv;
1450
1451 /* Get PV entries for the pages, and clean them if needed. */
1452 src_pv = pmap_find_pv(src);
1453 dest_pv = pmap_find_pv(dest);
1454 if (!pmap_clean_page(src_pv))
1455 pmap_clean_page(dest_pv);
1456
1457 /*
1458 * Map the pages into the page hook points, copy them, and purge
1459 * the cache for the appropriate page. Invalidate the TLB
1460 * as required.
1461 */
1462 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1463 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1464 cpu_tlb_flushD_SE(page_hook0.va);
1465 cpu_tlb_flushD_SE(page_hook1.va);
1466 bcopy_page(page_hook0.va, page_hook1.va);
1467 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1468 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1469 }
1470
1471 /*
1472 * int pmap_next_phys_page(paddr_t *addr)
1473 *
1474 * Allocate another physical page returning true or false depending
1475 * on whether a page could be allocated.
1476 */
1477
1478 paddr_t
1479 pmap_next_phys_page(addr)
1480 paddr_t addr;
1481
1482 {
1483 int loop;
1484
1485 if (addr < bootconfig.dram[0].address)
1486 return(bootconfig.dram[0].address);
1487
1488 loop = 0;
1489
1490 while (bootconfig.dram[loop].address != 0
1491 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1492 ++loop;
1493
1494 if (bootconfig.dram[loop].address == 0)
1495 return(0);
1496
1497 addr += NBPG;
1498
1499 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1500 if (bootconfig.dram[loop + 1].address == 0)
1501 return(0);
1502 addr = bootconfig.dram[loop + 1].address;
1503 }
1504
1505 return(addr);
1506 }
1507
1508 #if 0
1509 void
1510 pmap_pte_addref(pmap, va)
1511 pmap_t pmap;
1512 vaddr_t va;
1513 {
1514 pd_entry_t *pde;
1515 paddr_t pa;
1516 struct vm_page *m;
1517
1518 if (pmap == pmap_kernel())
1519 return;
1520
1521 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1522 pa = pmap_pte_pa(pde);
1523 m = PHYS_TO_VM_PAGE(pa);
1524 ++m->wire_count;
1525 #ifdef MYCROFT_HACK
1526 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1527 pmap, va, pde, pa, m, m->wire_count);
1528 #endif
1529 }
1530
1531 void
1532 pmap_pte_delref(pmap, va)
1533 pmap_t pmap;
1534 vaddr_t va;
1535 {
1536 pd_entry_t *pde;
1537 paddr_t pa;
1538 struct vm_page *m;
1539
1540 if (pmap == pmap_kernel())
1541 return;
1542
1543 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1544 pa = pmap_pte_pa(pde);
1545 m = PHYS_TO_VM_PAGE(pa);
1546 --m->wire_count;
1547 #ifdef MYCROFT_HACK
1548 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1549 pmap, va, pde, pa, m, m->wire_count);
1550 #endif
1551 if (m->wire_count == 0) {
1552 #ifdef MYCROFT_HACK
1553 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1554 pmap, va, pde, pa, m);
1555 #endif
1556 pmap_unmap_in_l1(pmap, va);
1557 uvm_pagefree(m);
1558 --pmap->pm_stats.resident_count;
1559 }
1560 }
1561 #else
1562 #define pmap_pte_addref(pmap, va)
1563 #define pmap_pte_delref(pmap, va)
1564 #endif
1565
1566 /*
1567 * Since we have a virtually indexed cache, we may need to inhibit caching if
1568 * there is more than one mapping and at least one of them is writable.
1569 * Since we purge the cache on every context switch, we only need to check for
1570 * other mappings within the same pmap, or kernel_pmap.
1571 * This function is also called when a page is unmapped, to possibly reenable
1572 * caching on any remaining mappings.
1573 */
1574 void
1575 pmap_vac_me_harder(pmap, pv)
1576 pmap_t pmap;
1577 struct pv_entry *pv;
1578 {
1579 struct pv_entry *npv;
1580 pt_entry_t *pte;
1581 int entries = 0;
1582 int writeable = 0;
1583
1584 if (pv->pv_pmap == NULL)
1585 return;
1586
1587 /*
1588 * Count mappings and writable mappings in this pmap.
1589 * Keep a pointer to the first one.
1590 */
1591 for (npv = pv; npv; npv = npv->pv_next) {
1592 /* Count mappings in the same pmap */
1593 if (pmap == npv->pv_pmap) {
1594 if (entries++ == 0)
1595 pv = npv;
1596 /* Writeable mappings */
1597 if (npv->pv_flags & PT_Wr)
1598 ++writeable;
1599 }
1600 }
1601
1602 /*
1603 * Enable or disable caching as necessary.
1604 * We do a quick check of the first PTE to avoid walking the list if
1605 * we're already in the right state.
1606 */
1607 if (entries > 1 && writeable) {
1608 pte = pmap_pte(pmap, pv->pv_va);
1609 if (~*pte & (PT_C | PT_B))
1610 return;
1611 *pte = *pte & ~(PT_C | PT_B);
1612 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1613 if (pmap == npv->pv_pmap) {
1614 pte = pmap_pte(pmap, npv->pv_va);
1615 *pte = *pte & ~(PT_C | PT_B);
1616 }
1617 }
1618 } else if (entries > 0) {
1619 pte = pmap_pte(pmap, pv->pv_va);
1620 if (*pte & (PT_C | PT_B))
1621 return;
1622 *pte = *pte | (PT_C | PT_B);
1623 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1624 if (pmap == npv->pv_pmap) {
1625 pte = pmap_pte(pmap, npv->pv_va);
1626 *pte = *pte | (PT_C | PT_B);
1627 }
1628 }
1629 }
1630 }
1631
1632 /*
1633 * pmap_remove()
1634 *
1635 * pmap_remove is responsible for nuking a number of mappings for a range
1636 * of virtual address space in the current pmap. To do this efficiently
1637 * is interesting, because in a number of cases a wide virtual address
1638 * range may be supplied that contains few actual mappings. So, the
1639 * optimisations are:
1640 * 1. Try and skip over hunks of address space for which an L1 entry
1641 * does not exist.
1642 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1643 * maybe do just a partial cache clean. This path of execution is
1644 * complicated by the fact that the cache must be flushed _before_
1645 * the PTE is nuked, being a VAC :-)
1646 * 3. Maybe later fast-case a single page, but I don't think this is
1647 * going to make _that_ much difference overall.
1648 */
1649
1650 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1651
1652 void
1653 pmap_remove(pmap, sva, eva)
1654 pmap_t pmap;
1655 vaddr_t sva;
1656 vaddr_t eva;
1657 {
1658 int cleanlist_idx = 0;
1659 struct pagelist {
1660 vaddr_t va;
1661 pt_entry_t *pte;
1662 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1663 pt_entry_t *pte = 0;
1664 paddr_t pa;
1665 int pmap_active;
1666 struct pv_entry *pv;
1667
1668 /* Exit quick if there is no pmap */
1669 if (!pmap)
1670 return;
1671
1672 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1673
1674 sva &= PG_FRAME;
1675 eva &= PG_FRAME;
1676
1677 /* Get a page table pointer */
1678 while (sva < eva) {
1679 pte = pmap_pte(pmap, sva);
1680 if (pte)
1681 break;
1682 sva = (sva & PD_MASK) + NBPD;
1683 }
1684
1685 /* Note if the pmap is active thus require cache and tlb cleans */
1686 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1687 || (pmap == kernel_pmap))
1688 pmap_active = 1;
1689 else
1690 pmap_active = 0;
1691
1692 /* Now loop along */
1693 while (sva < eva) {
1694 /* Check if we can move to the next PDE (l1 chunk) */
1695 if (!(sva & PT_MASK))
1696 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1697 sva += NBPD;
1698 pte += arm_byte_to_page(NBPD);
1699 continue;
1700 }
1701
1702 /* We've found a valid PTE, so this page of PTEs has to go. */
1703 if (pmap_pte_v(pte)) {
1704 int bank, off;
1705
1706 /* Update statistics */
1707 --pmap->pm_stats.resident_count;
1708
1709 /*
1710 * Add this page to our cache remove list, if we can.
1711 * If, however the cache remove list is totally full,
1712 * then do a complete cache invalidation taking note
1713 * to backtrack the PTE table beforehand, and ignore
1714 * the lists in future because there's no longer any
1715 * point in bothering with them (we've paid the
1716 * penalty, so will carry on unhindered). Otherwise,
1717 * when we fall out, we just clean the list.
1718 */
1719 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1720 pa = pmap_pte_pa(pte);
1721
1722 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1723 /* Add to the clean list. */
1724 cleanlist[cleanlist_idx].pte = pte;
1725 cleanlist[cleanlist_idx].va = sva;
1726 cleanlist_idx++;
1727 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1728 int cnt;
1729
1730 /* Nuke everything if needed. */
1731 if (pmap_active) {
1732 cpu_cache_purgeID();
1733 cpu_tlb_flushID();
1734 }
1735
1736 /*
1737 * Roll back the previous PTE list,
1738 * and zero out the current PTE.
1739 */
1740 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1741 *cleanlist[cnt].pte = 0;
1742 pmap_pte_delref(pmap, cleanlist[cnt].va);
1743 }
1744 *pte = 0;
1745 pmap_pte_delref(pmap, sva);
1746 cleanlist_idx++;
1747 } else {
1748 /*
1749 * We've already nuked the cache and
1750 * TLB, so just carry on regardless,
1751 * and we won't need to do it again
1752 */
1753 *pte = 0;
1754 pmap_pte_delref(pmap, sva);
1755 }
1756
1757 /*
1758 * Update flags. In a number of circumstances,
1759 * we could cluster a lot of these and do a
1760 * number of sequential pages in one go.
1761 */
1762 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1763 pv = &vm_physmem[bank].pmseg.pvent[off];
1764 pmap_remove_pv(pmap, sva, pv);
1765 pmap_vac_me_harder(pmap, pv);
1766 }
1767 }
1768 sva += NBPG;
1769 pte++;
1770 }
1771
1772 /*
1773 * Now, if we've fallen through down to here, chances are that there
1774 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1775 */
1776 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1777 u_int cnt;
1778
1779 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1780 if (pmap_active) {
1781 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1782 *cleanlist[cnt].pte = 0;
1783 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1784 } else
1785 *cleanlist[cnt].pte = 0;
1786 pmap_pte_delref(pmap, cleanlist[cnt].va);
1787 }
1788 }
1789 }
1790
1791 /*
1792 * Routine: pmap_remove_all
1793 * Function:
1794 * Removes this physical page from
1795 * all physical maps in which it resides.
1796 * Reflects back modify bits to the pager.
1797 */
1798
1799 void
1800 pmap_remove_all(pa)
1801 paddr_t pa;
1802 {
1803 struct pv_entry *ph, *pv, *npv;
1804 pmap_t pmap;
1805 pt_entry_t *pte;
1806 int s;
1807
1808 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1809
1810 pv = ph = pmap_find_pv(pa);
1811 pmap_clean_page(pv);
1812
1813 s = splvm();
1814
1815 if (ph->pv_pmap == NULL) {
1816 PDEBUG(0, printf("free page\n"));
1817 splx(s);
1818 return;
1819 }
1820
1821 while (pv) {
1822 pmap = pv->pv_pmap;
1823 pte = pmap_pte(pmap, pv->pv_va);
1824
1825 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1826 pv->pv_va, pv->pv_flags));
1827 #ifdef DEBUG
1828 if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1829 panic("pmap_remove_all: bad mapping");
1830 #endif /* DEBUG */
1831
1832 /*
1833 * Update statistics
1834 */
1835 --pmap->pm_stats.resident_count;
1836
1837 /* Wired bit */
1838 if (pv->pv_flags & PT_W)
1839 --pmap->pm_stats.wired_count;
1840
1841 /*
1842 * Invalidate the PTEs.
1843 * XXX: should cluster them up and invalidate as many
1844 * as possible at once.
1845 */
1846
1847 #ifdef needednotdone
1848 reduce wiring count on page table pages as references drop
1849 #endif
1850
1851 *pte = 0;
1852 pmap_pte_delref(pmap, pv->pv_va);
1853
1854 npv = pv->pv_next;
1855 if (pv == ph)
1856 ph->pv_pmap = NULL;
1857 else
1858 pmap_free_pv(pv);
1859 pv = npv;
1860 }
1861
1862 splx(s);
1863
1864 PDEBUG(0, printf("done\n"));
1865 cpu_tlb_flushID();
1866 }
1867
1868
1869 /*
1870 * Set the physical protection on the specified range of this map as requested.
1871 */
1872
1873 void
1874 pmap_protect(pmap, sva, eva, prot)
1875 pmap_t pmap;
1876 vaddr_t sva;
1877 vaddr_t eva;
1878 vm_prot_t prot;
1879 {
1880 pt_entry_t *pte = NULL;
1881 int armprot;
1882 int flush = 0;
1883 paddr_t pa;
1884 int bank, off;
1885 struct pv_entry *pv;
1886
1887 /*
1888 * Make sure pmap is valid. -dct
1889 */
1890 if (pmap == NULL)
1891 return;
1892 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
1893 pmap, sva, eva, prot));
1894
1895 if (~prot & VM_PROT_READ) {
1896 /* Just remove the mappings. */
1897 pmap_remove(pmap, sva, eva);
1898 return;
1899 }
1900 if (prot & VM_PROT_WRITE) {
1901 /*
1902 * If this is a read->write transition, just ignore it and let
1903 * uvm_fault() take care of it later.
1904 */
1905 return;
1906 }
1907
1908 sva &= PG_FRAME;
1909 eva &= PG_FRAME;
1910
1911 /*
1912 * We need to acquire a pointer to a page table page before entering
1913 * the following loop.
1914 */
1915 while (sva < eva) {
1916 pte = pmap_pte(pmap, sva);
1917 if (pte)
1918 break;
1919 sva = (sva & PD_MASK) + NBPD;
1920 }
1921
1922 while (sva < eva) {
1923 /* only check once in a while */
1924 if ((sva & PT_MASK) == 0) {
1925 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1926 /* We can race ahead here, to the next pde. */
1927 sva += NBPD;
1928 pte += arm_byte_to_page(NBPD);
1929 continue;
1930 }
1931 }
1932
1933 if (!pmap_pte_v(pte))
1934 goto next;
1935
1936 flush = 1;
1937
1938 armprot = 0;
1939 if (sva < VM_MAXUSER_ADDRESS)
1940 armprot |= PT_AP(AP_U);
1941 else if (sva < VM_MAX_ADDRESS)
1942 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
1943 *pte = (*pte & 0xfffff00f) | armprot;
1944
1945 pa = pmap_pte_pa(pte);
1946
1947 /* Get the physical page index */
1948
1949 /* Clear write flag */
1950 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1951 pv = &vm_physmem[bank].pmseg.pvent[off];
1952 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
1953 pmap_vac_me_harder(pmap, pv);
1954 }
1955
1956 next:
1957 sva += NBPG;
1958 pte++;
1959 }
1960
1961 if (flush)
1962 cpu_tlb_flushID();
1963 }
1964
1965 /*
1966 * void pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1967 * int flags)
1968 *
1969 * Insert the given physical page (p) at
1970 * the specified virtual address (v) in the
1971 * target physical map with the protection requested.
1972 *
1973 * If specified, the page will be wired down, meaning
1974 * that the related pte can not be reclaimed.
1975 *
1976 * NB: This is the only routine which MAY NOT lazy-evaluate
1977 * or lose information. That is, this routine must actually
1978 * insert this page into the given map NOW.
1979 */
1980
1981 int
1982 pmap_enter(pmap, va, pa, prot, flags)
1983 pmap_t pmap;
1984 vaddr_t va;
1985 paddr_t pa;
1986 vm_prot_t prot;
1987 int flags;
1988 {
1989 pt_entry_t *pte;
1990 u_int npte;
1991 int bank, off;
1992 struct pv_entry *pv = NULL;
1993 paddr_t opa;
1994 int nflags;
1995 boolean_t wired = (flags & PMAP_WIRED) != 0;
1996
1997 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
1998 va, pa, pmap, prot, wired));
1999
2000 #ifdef DIAGNOSTIC
2001 /* Valid address ? */
2002 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
2003 panic("pmap_enter: too big");
2004 if (pmap != pmap_kernel() && va != 0) {
2005 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2006 panic("pmap_enter: kernel page in user map");
2007 } else {
2008 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2009 panic("pmap_enter: user page in kernel map");
2010 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2011 panic("pmap_enter: entering PT page");
2012 }
2013 #endif
2014
2015 /*
2016 * Get a pointer to the pte for this virtual address. If the
2017 * pte pointer is NULL then we are missing the L2 page table
2018 * so we need to create one.
2019 */
2020 pte = pmap_pte(pmap, va);
2021 if (!pte) {
2022 paddr_t l2pa;
2023 struct vm_page *m;
2024
2025 /* Allocate a page table */
2026 for (;;) {
2027 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2028 if (m != NULL)
2029 break;
2030
2031 /*
2032 * No page available. If we're the kernel
2033 * pmap, we die, since we might not have
2034 * a valid thread context. For user pmaps,
2035 * we assume that we _do_ have a valid thread
2036 * context, so we wait here for the pagedaemon
2037 * to free up some pages.
2038 *
2039 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2040 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2041 * XXX SO THIS IS PROBABLY SAFE. In any case,
2042 * XXX other pmap modules claim it is safe to
2043 * XXX sleep here if it's a user pmap.
2044 */
2045 if (pmap == pmap_kernel())
2046 panic("pmap_enter: no free pages");
2047 else
2048 uvm_wait("pmap_enter");
2049 }
2050
2051 /* Wire this page table into the L1. */
2052 l2pa = VM_PAGE_TO_PHYS(m);
2053 pmap_zero_page(l2pa);
2054 pmap_map_in_l1(pmap, va, l2pa);
2055 ++pmap->pm_stats.resident_count;
2056
2057 pte = pmap_pte(pmap, va);
2058 #ifdef DIAGNOSTIC
2059 if (!pte)
2060 panic("pmap_enter: no pte");
2061 #endif
2062 }
2063
2064 nflags = 0;
2065 if (prot & VM_PROT_WRITE)
2066 nflags |= PT_Wr;
2067 if (wired)
2068 nflags |= PT_W;
2069
2070 /* More debugging info */
2071 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2072 *pte));
2073
2074 /* Is the pte valid ? If so then this page is already mapped */
2075 if (pmap_pte_v(pte)) {
2076 /* Get the physical address of the current page mapped */
2077 opa = pmap_pte_pa(pte);
2078
2079 #ifdef MYCROFT_HACK
2080 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2081 #endif
2082
2083 /* Are we mapping the same page ? */
2084 if (opa == pa) {
2085 /* All we must be doing is changing the protection */
2086 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2087 va, pa));
2088
2089 /* Has the wiring changed ? */
2090 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2091 pv = &vm_physmem[bank].pmseg.pvent[off];
2092 (void) pmap_modify_pv(pmap, va, pv,
2093 PT_Wr | PT_W, nflags);
2094 }
2095 } else {
2096 /* We are replacing the page with a new one. */
2097 cpu_cache_purgeID_rng(va, NBPG);
2098
2099 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2100 va, pa, opa));
2101
2102 /*
2103 * If it is part of our managed memory then we
2104 * must remove it from the PV list
2105 */
2106 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2107 pv = &vm_physmem[bank].pmseg.pvent[off];
2108 pmap_remove_pv(pmap, va, pv);
2109 }
2110
2111 goto enter;
2112 }
2113 } else {
2114 opa = 0;
2115 pmap_pte_addref(pmap, va);
2116
2117 /* pte is not valid so we must be hooking in a new page */
2118 ++pmap->pm_stats.resident_count;
2119
2120 enter:
2121 /*
2122 * Enter on the PV list if part of our managed memory
2123 */
2124 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2125 pv = &vm_physmem[bank].pmseg.pvent[off];
2126 pmap_enter_pv(pmap, va, pv, nflags);
2127 }
2128 }
2129
2130 #ifdef MYCROFT_HACK
2131 if (mycroft_hack)
2132 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2133 #endif
2134
2135 /* Construct the pte, giving the correct access. */
2136 npte = (pa & PG_FRAME);
2137
2138 /* VA 0 is magic. */
2139 if (pmap != pmap_kernel() && va != 0)
2140 npte |= PT_AP(AP_U);
2141
2142 if (bank != -1) {
2143 #ifdef DIAGNOSTIC
2144 if ((flags & VM_PROT_ALL) & ~prot)
2145 panic("pmap_enter: access_type exceeds prot");
2146 #endif
2147 npte |= PT_C | PT_B;
2148 if (flags & VM_PROT_WRITE) {
2149 npte |= L2_SPAGE | PT_AP(AP_W);
2150 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2151 } else if (flags & VM_PROT_ALL) {
2152 npte |= L2_SPAGE;
2153 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2154 } else
2155 npte |= L2_INVAL;
2156 } else {
2157 if (prot & VM_PROT_WRITE)
2158 npte |= L2_SPAGE | PT_AP(AP_W);
2159 else if (prot & VM_PROT_ALL)
2160 npte |= L2_SPAGE;
2161 else
2162 npte |= L2_INVAL;
2163 }
2164
2165 #ifdef MYCROFT_HACK
2166 if (mycroft_hack)
2167 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2168 #endif
2169
2170 *pte = npte;
2171
2172 if (bank != -1)
2173 pmap_vac_me_harder(pmap, pv);
2174
2175 /* Better flush the TLB ... */
2176 cpu_tlb_flushID_SE(va);
2177
2178 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2179
2180 return 0;
2181 }
2182
2183 void
2184 pmap_kenter_pa(va, pa, prot)
2185 vaddr_t va;
2186 paddr_t pa;
2187 vm_prot_t prot;
2188 {
2189 pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2190 }
2191
2192 void
2193 pmap_kremove(va, len)
2194 vaddr_t va;
2195 vsize_t len;
2196 {
2197 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2198 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2199 }
2200 }
2201
2202 /*
2203 * pmap_page_protect:
2204 *
2205 * Lower the permission for all mappings to a given page.
2206 */
2207
2208 void
2209 pmap_page_protect(pg, prot)
2210 struct vm_page *pg;
2211 vm_prot_t prot;
2212 {
2213 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2214
2215 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2216
2217 switch(prot) {
2218 case VM_PROT_READ:
2219 case VM_PROT_READ|VM_PROT_EXECUTE:
2220 pmap_copy_on_write(pa);
2221 break;
2222
2223 case VM_PROT_ALL:
2224 break;
2225
2226 default:
2227 pmap_remove_all(pa);
2228 break;
2229 }
2230 }
2231
2232
2233 /*
2234 * Routine: pmap_unwire
2235 * Function: Clear the wired attribute for a map/virtual-address
2236 * pair.
2237 * In/out conditions:
2238 * The mapping must already exist in the pmap.
2239 */
2240
2241 void
2242 pmap_unwire(pmap, va)
2243 pmap_t pmap;
2244 vaddr_t va;
2245 {
2246 pt_entry_t *pte;
2247 paddr_t pa;
2248 int bank, off;
2249 struct pv_entry *pv;
2250
2251 /*
2252 * Make sure pmap is valid. -dct
2253 */
2254 if (pmap == NULL)
2255 return;
2256
2257 /* Get the pte */
2258 pte = pmap_pte(pmap, va);
2259 if (!pte)
2260 return;
2261
2262 /* Extract the physical address of the page */
2263 pa = pmap_pte_pa(pte);
2264
2265 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2266 return;
2267 pv = &vm_physmem[bank].pmseg.pvent[off];
2268 /* Update the wired bit in the pv entry for this page. */
2269 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2270 }
2271
2272 /*
2273 * pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
2274 *
2275 * Return the pointer to a page table entry corresponding to the supplied
2276 * virtual address.
2277 *
2278 * The page directory is first checked to make sure that a page table
2279 * for the address in question exists and if it does a pointer to the
2280 * entry is returned.
2281 *
2282 * The way this works is that that the kernel page tables are mapped
2283 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2284 * This allows page tables to be located quickly.
2285 */
2286 pt_entry_t *
2287 pmap_pte(pmap, va)
2288 pmap_t pmap;
2289 vaddr_t va;
2290 {
2291 pt_entry_t *ptp;
2292 pt_entry_t *result;
2293
2294 /* The pmap must be valid */
2295 if (!pmap)
2296 return(NULL);
2297
2298 /* Return the address of the pte */
2299 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2300 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2301
2302 /* Do we have a valid pde ? If not we don't have a page table */
2303 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2304 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2305 pmap_pde(pmap, va)));
2306 return(NULL);
2307 }
2308
2309 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2310 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2311 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2312 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2313
2314 /*
2315 * If the pmap is the kernel pmap or the pmap is the active one
2316 * then we can just return a pointer to entry relative to
2317 * PROCESS_PAGE_TBLS_BASE.
2318 * Otherwise we need to map the page tables to an alternative
2319 * address and reference them there.
2320 */
2321 if (pmap == kernel_pmap || pmap->pm_pptpt
2322 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2323 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2324 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2325 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2326 } else {
2327 struct proc *p = curproc;
2328
2329 /* If we don't have a valid curproc use proc0 */
2330 /* Perhaps we should just use kernel_pmap instead */
2331 if (p == NULL)
2332 p = &proc0;
2333 #ifdef DIAGNOSTIC
2334 /*
2335 * The pmap should always be valid for the process so
2336 * panic if it is not.
2337 */
2338 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2339 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2340 va, p, p->p_vmspace);
2341 console_debugger();
2342 }
2343 /*
2344 * The pmap for the current process should be mapped. If it
2345 * is not then we have a problem.
2346 */
2347 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2348 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2349 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2350 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2351 printf("pmap pagetable = P%08lx current = P%08x ",
2352 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2353 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2354 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2355 PG_FRAME));
2356 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2357 panic("pmap_pte: current and pmap mismatch\n");
2358 }
2359 #endif
2360
2361 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2362 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2363 pmap->pm_pptpt);
2364 cpu_tlb_flushD();
2365 }
2366 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2367 ((va >> (PGSHIFT-2)) & ~3)));
2368 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2369 return(result);
2370 }
2371
2372 /*
2373 * Routine: pmap_extract
2374 * Function:
2375 * Extract the physical page address associated
2376 * with the given map/virtual_address pair.
2377 */
2378 boolean_t
2379 pmap_extract(pmap, va, pap)
2380 pmap_t pmap;
2381 vaddr_t va;
2382 paddr_t *pap;
2383 {
2384 pt_entry_t *pte;
2385 paddr_t pa;
2386
2387 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2388
2389 /*
2390 * Get the pte for this virtual address. If there is no pte
2391 * then there is no page table etc.
2392 */
2393
2394 pte = pmap_pte(pmap, va);
2395 if (!pte)
2396 return(FALSE);
2397
2398 /* Is the pte valid ? If not then no paged is actually mapped here */
2399 if (!pmap_pte_v(pte))
2400 return(FALSE);
2401
2402 /* Return the physical address depending on the PTE type */
2403 /* XXX What about L1 section mappings ? */
2404 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2405 /* Extract the physical address from the pte */
2406 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2407
2408 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2409 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2410
2411 if (pap != NULL)
2412 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2413 return (TRUE);
2414 } else {
2415 /* Extract the physical address from the pte */
2416 pa = pmap_pte_pa(pte);
2417
2418 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2419 (pa | (va & ~PG_FRAME))));
2420
2421 if (pap != NULL)
2422 *pap = pa | (va & ~PG_FRAME);
2423 return (TRUE);
2424 }
2425 }
2426
2427
2428 /*
2429 * Copy the range specified by src_addr/len from the source map to the
2430 * range dst_addr/len in the destination map.
2431 *
2432 * This routine is only advisory and need not do anything.
2433 */
2434
2435 void
2436 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2437 pmap_t dst_pmap;
2438 pmap_t src_pmap;
2439 vaddr_t dst_addr;
2440 vsize_t len;
2441 vaddr_t src_addr;
2442 {
2443 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2444 dst_pmap, src_pmap, dst_addr, len, src_addr));
2445 }
2446
2447 #if defined(PMAP_DEBUG)
2448 void
2449 pmap_dump_pvlist(phys, m)
2450 vaddr_t phys;
2451 char *m;
2452 {
2453 struct pv_entry *pv;
2454 int bank, off;
2455
2456 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2457 printf("INVALID PA\n");
2458 return;
2459 }
2460 pv = &vm_physmem[bank].pmseg.pvent[off];
2461 printf("%s %08lx:", m, phys);
2462 if (pv->pv_pmap == NULL) {
2463 printf(" no mappings\n");
2464 return;
2465 }
2466
2467 for (; pv; pv = pv->pv_next)
2468 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2469 pv->pv_va, pv->pv_flags);
2470
2471 printf("\n");
2472 }
2473
2474 #endif /* PMAP_DEBUG */
2475
2476 boolean_t
2477 pmap_testbit(pa, setbits)
2478 paddr_t pa;
2479 int setbits;
2480 {
2481 int bank, off;
2482
2483 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2484
2485 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2486 return(FALSE);
2487
2488 /*
2489 * Check saved info only
2490 */
2491 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2492 PDEBUG(0, printf("pmap_attributes = %02x\n",
2493 vm_physmem[bank].pmseg.attrs[off]));
2494 return(TRUE);
2495 }
2496
2497 return(FALSE);
2498 }
2499
2500
2501 /*
2502 * Modify pte bits for all ptes corresponding to the given physical address.
2503 * We use `maskbits' rather than `clearbits' because we're always passing
2504 * constants and the latter would require an extra inversion at run-time.
2505 */
2506
2507 void
2508 pmap_clearbit(pa, maskbits)
2509 paddr_t pa;
2510 int maskbits;
2511 {
2512 struct pv_entry *pv;
2513 pt_entry_t *pte;
2514 vaddr_t va;
2515 int bank, off;
2516 int s;
2517
2518 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2519 pa, maskbits));
2520 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2521 return;
2522 pv = &vm_physmem[bank].pmseg.pvent[off];
2523 s = splvm();
2524
2525 /*
2526 * Clear saved attributes (modify, reference)
2527 */
2528 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2529
2530 if (pv->pv_pmap == NULL) {
2531 splx(s);
2532 return;
2533 }
2534
2535 /*
2536 * Loop over all current mappings setting/clearing as appropos
2537 */
2538 for (; pv; pv = pv->pv_next) {
2539 va = pv->pv_va;
2540
2541 /*
2542 * XXX don't write protect pager mappings
2543 */
2544 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2545 printf("pmap_clearbit: bogon alpha\n");
2546 continue;
2547 }
2548
2549 pv->pv_flags &= ~maskbits;
2550 pte = pmap_pte(pv->pv_pmap, va);
2551 if (maskbits & (PT_Wr|PT_M))
2552 *pte = *pte & ~PT_AP(AP_W);
2553 if (maskbits & PT_H)
2554 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2555 }
2556 cpu_tlb_flushID();
2557
2558 splx(s);
2559 }
2560
2561
2562 boolean_t
2563 pmap_clear_modify(pg)
2564 struct vm_page *pg;
2565 {
2566 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2567 boolean_t rv;
2568
2569 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2570 rv = pmap_testbit(pa, PT_M);
2571 pmap_clearbit(pa, PT_M);
2572 return rv;
2573 }
2574
2575
2576 boolean_t
2577 pmap_clear_reference(pg)
2578 struct vm_page *pg;
2579 {
2580 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2581 boolean_t rv;
2582
2583 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2584 rv = pmap_testbit(pa, PT_H);
2585 pmap_clearbit(pa, PT_H);
2586 return rv;
2587 }
2588
2589
2590 void
2591 pmap_copy_on_write(pa)
2592 paddr_t pa;
2593 {
2594 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2595 pmap_clearbit(pa, PT_Wr);
2596 }
2597
2598
2599 boolean_t
2600 pmap_is_modified(pg)
2601 struct vm_page *pg;
2602 {
2603 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2604 boolean_t result;
2605
2606 result = pmap_testbit(pa, PT_M);
2607 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2608 return (result);
2609 }
2610
2611
2612 boolean_t
2613 pmap_is_referenced(pg)
2614 struct vm_page *pg;
2615 {
2616 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2617 boolean_t result;
2618
2619 result = pmap_testbit(pa, PT_H);
2620 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2621 return (result);
2622 }
2623
2624
2625 int
2626 pmap_modified_emulation(pmap, va)
2627 pmap_t pmap;
2628 vaddr_t va;
2629 {
2630 pt_entry_t *pte;
2631 paddr_t pa;
2632 int bank, off;
2633 struct pv_entry *pv;
2634 u_int flags;
2635
2636 PDEBUG(2, printf("pmap_modified_emulation\n"));
2637
2638 /* Get the pte */
2639 pte = pmap_pte(pmap, va);
2640 if (!pte) {
2641 PDEBUG(2, printf("no pte\n"));
2642 return(0);
2643 }
2644
2645 PDEBUG(1, printf("*pte=%08x\n", *pte));
2646
2647 /* Check for a zero pte */
2648 if (*pte == 0)
2649 return(0);
2650
2651 /* This can happen if user code tries to access kernel memory. */
2652 if ((*pte & PT_AP(AP_W)) != 0)
2653 return (0);
2654
2655 /* Extract the physical address of the page */
2656 pa = pmap_pte_pa(pte);
2657 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2658 return(0);
2659
2660 /* Get the current flags for this page. */
2661 pv = &vm_physmem[bank].pmseg.pvent[off];
2662 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2663 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2664
2665 /*
2666 * Do the flags say this page is writable ? If not then it is a
2667 * genuine write fault. If yes then the write fault is our fault
2668 * as we did not reflect the write access in the PTE. Now we know
2669 * a write has occurred we can correct this and also set the
2670 * modified bit
2671 */
2672 if (~flags & PT_Wr)
2673 return(0);
2674
2675 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2676 va, pte, *pte));
2677 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2678 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2679 PDEBUG(0, printf("->(%08x)\n", *pte));
2680
2681 /* Return, indicating the problem has been dealt with */
2682 cpu_tlb_flushID_SE(va);
2683 return(1);
2684 }
2685
2686
2687 int
2688 pmap_handled_emulation(pmap, va)
2689 pmap_t pmap;
2690 vaddr_t va;
2691 {
2692 pt_entry_t *pte;
2693 paddr_t pa;
2694 int bank, off;
2695
2696 PDEBUG(2, printf("pmap_handled_emulation\n"));
2697
2698 /* Get the pte */
2699 pte = pmap_pte(pmap, va);
2700 if (!pte) {
2701 PDEBUG(2, printf("no pte\n"));
2702 return(0);
2703 }
2704
2705 PDEBUG(1, printf("*pte=%08x\n", *pte));
2706
2707 /* Check for a zero pte */
2708 if (*pte == 0)
2709 return(0);
2710
2711 /* This can happen if user code tries to access kernel memory. */
2712 if ((*pte & L2_MASK) != L2_INVAL)
2713 return (0);
2714
2715 /* Extract the physical address of the page */
2716 pa = pmap_pte_pa(pte);
2717 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2718 return(0);
2719
2720 /*
2721 * Ok we just enable the pte and mark the attibs as handled
2722 */
2723 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2724 va, pte, *pte));
2725 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2726 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2727 PDEBUG(0, printf("->(%08x)\n", *pte));
2728
2729 /* Return, indicating the problem has been dealt with */
2730 cpu_tlb_flushID_SE(va);
2731 return(1);
2732 }
2733
2734 /*
2735 * pmap_collect: free resources held by a pmap
2736 *
2737 * => optional function.
2738 * => called when a process is swapped out to free memory.
2739 */
2740
2741 void
2742 pmap_collect(pmap)
2743 pmap_t pmap;
2744 {
2745 }
2746
2747 /*
2748 * Routine: pmap_procwr
2749 *
2750 * Function:
2751 * Synchronize caches corresponding to [addr, addr+len) in p.
2752 *
2753 */
2754 void
2755 pmap_procwr(p, va, len)
2756 struct proc *p;
2757 vaddr_t va;
2758 int len;
2759 {
2760 /* We only need to do anything if it is the current process. */
2761 if (p == curproc)
2762 cpu_cache_syncI_rng(va, len);
2763 }
2764
2765 /* End of pmap.c */
2766