pmap.c revision 1.7 1 /* $NetBSD: pmap.c,v 1.7 2001/04/24 04:30:53 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1994-1998 Mark Brinicombe.
41 * Copyright (c) 1994 Brini.
42 * All rights reserved.
43 *
44 * This code is derived from software written for Brini by Mark Brinicombe
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Mark Brinicombe.
57 * 4. The name of the author may not be used to endorse or promote products
58 * derived from this software without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69 *
70 * RiscBSD kernel project
71 *
72 * pmap.c
73 *
74 * Machine dependant vm stuff
75 *
76 * Created : 20/09/94
77 */
78
79 /*
80 * Performance improvements, UVM changes, overhauls and part-rewrites
81 * were contributed by Neil A. Carson <neil (at) causality.com>.
82 */
83
84 /*
85 * The dram block info is currently referenced from the bootconfig.
86 * This should be placed in a separate structure.
87 */
88
89 /*
90 * Special compilation symbols
91 * PMAP_DEBUG - Build in pmap_debug_level code
92 */
93
94 /* Include header files */
95
96 #include "opt_pmap_debug.h"
97 #include "opt_ddb.h"
98
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/kernel.h>
102 #include <sys/systm.h>
103 #include <sys/proc.h>
104 #include <sys/malloc.h>
105 #include <sys/user.h>
106
107 #include <uvm/uvm.h>
108
109 #include <machine/bootconfig.h>
110 #include <machine/bus.h>
111 #include <machine/pmap.h>
112 #include <machine/pcb.h>
113 #include <machine/param.h>
114 #include <machine/katelib.h>
115
116 #ifdef PMAP_DEBUG
117 #define PDEBUG(_lev_,_stat_) \
118 if (pmap_debug_level >= (_lev_)) \
119 ((_stat_))
120 int pmap_debug_level = -2;
121 #else /* PMAP_DEBUG */
122 #define PDEBUG(_lev_,_stat_) /* Nothing */
123 #endif /* PMAP_DEBUG */
124
125 struct pmap kernel_pmap_store;
126 pmap_t kernel_pmap;
127
128 pagehook_t page_hook0;
129 pagehook_t page_hook1;
130 char *memhook;
131 pt_entry_t msgbufpte;
132 extern caddr_t msgbufaddr;
133
134 #ifdef DIAGNOSTIC
135 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
136 #endif
137
138 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
139
140 int pv_nfree = 0;
141
142 vsize_t npages;
143
144 extern paddr_t physical_start;
145 extern paddr_t physical_freestart;
146 extern paddr_t physical_end;
147 extern paddr_t physical_freeend;
148 extern unsigned int free_pages;
149 extern int max_processes;
150
151 vaddr_t virtual_start;
152 vaddr_t virtual_end;
153
154 vaddr_t avail_start;
155 vaddr_t avail_end;
156
157 extern pv_addr_t systempage;
158
159 #define ALLOC_PAGE_HOOK(x, s) \
160 x.va = virtual_start; \
161 x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
162 virtual_start += s;
163
164 /* Variables used by the L1 page table queue code */
165 SIMPLEQ_HEAD(l1pt_queue, l1pt);
166 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
167 int l1pt_static_queue_count; /* items in the static l1 queue */
168 int l1pt_static_create_count; /* static l1 items created */
169 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
170 int l1pt_queue_count; /* items in the l1 queue */
171 int l1pt_create_count; /* stat - L1's create count */
172 int l1pt_reuse_count; /* stat - L1's reused count */
173
174 /* Local function prototypes (not used outside this file) */
175 pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
176 int pmap_page_index __P((paddr_t pa));
177 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
178 paddr_t pa, unsigned int flags));
179 void pmap_copy_on_write __P((paddr_t pa));
180 void pmap_pinit __P((pmap_t));
181 void pmap_freepagedir __P((pmap_t));
182 void pmap_release __P((pmap_t));
183
184 /* Other function prototypes */
185 extern void bzero_page __P((vaddr_t));
186 extern void bcopy_page __P((vaddr_t, vaddr_t));
187
188 struct l1pt *pmap_alloc_l1pt __P((void));
189 static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
190 vaddr_t l2pa));
191
192 #ifdef MYCROFT_HACK
193 int mycroft_hack = 0;
194 #endif
195
196 /* Function to set the debug level of the pmap code */
197
198 #ifdef PMAP_DEBUG
199 void
200 pmap_debug(level)
201 int level;
202 {
203 pmap_debug_level = level;
204 printf("pmap_debug: level=%d\n", pmap_debug_level);
205 }
206 #endif /* PMAP_DEBUG */
207
208 #include "isadma.h"
209
210 #if NISADMA > 0
211 /*
212 * Used to protect memory for ISA DMA bounce buffers. If, when loading
213 * pages into the system, memory intersects with any of these ranges,
214 * the intersecting memory will be loaded into a lower-priority free list.
215 */
216 bus_dma_segment_t *pmap_isa_dma_ranges;
217 int pmap_isa_dma_nranges;
218
219 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
220 paddr_t *, psize_t *));
221
222 /*
223 * Check if a memory range intersects with an ISA DMA range, and
224 * return the page-rounded intersection if it does. The intersection
225 * will be placed on a lower-priority free list.
226 */
227 boolean_t
228 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
229 paddr_t pa;
230 psize_t size;
231 paddr_t *pap;
232 psize_t *sizep;
233 {
234 bus_dma_segment_t *ds;
235 int i;
236
237 if (pmap_isa_dma_ranges == NULL)
238 return (FALSE);
239
240 for (i = 0, ds = pmap_isa_dma_ranges;
241 i < pmap_isa_dma_nranges; i++, ds++) {
242 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
243 /*
244 * Beginning of region intersects with this range.
245 */
246 *pap = trunc_page(pa);
247 *sizep = round_page(min(pa + size,
248 ds->ds_addr + ds->ds_len) - pa);
249 return (TRUE);
250 }
251 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
252 /*
253 * End of region intersects with this range.
254 */
255 *pap = trunc_page(ds->ds_addr);
256 *sizep = round_page(min((pa + size) - ds->ds_addr,
257 ds->ds_len));
258 return (TRUE);
259 }
260 }
261
262 /*
263 * No intersection found.
264 */
265 return (FALSE);
266 }
267 #endif /* NISADMA > 0 */
268
269 /*
270 * Functions for manipluation pv_entry structures. These are used to keep a
271 * record of the mappings of virtual addresses and the associated physical
272 * pages.
273 */
274
275 /*
276 * Allocate a new pv_entry structure from the freelist. If the list is
277 * empty allocate a new page and fill the freelist.
278 */
279 struct pv_entry *
280 pmap_alloc_pv()
281 {
282 struct pv_page *pvp;
283 struct pv_entry *pv;
284 int i;
285
286 /*
287 * Do we have any free pv_entry structures left ?
288 * If not allocate a page of them
289 */
290
291 if (pv_nfree == 0) {
292 /* NOTE: can't lock kernel_map here */
293 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
294 if (pvp == 0)
295 panic("pmap_alloc_pv: kmem_alloc() failed");
296 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
297 for (i = NPVPPG - 2; i; i--, pv++)
298 pv->pv_next = pv + 1;
299 pv->pv_next = 0;
300 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
301 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
302 pv = &pvp->pvp_pv[0];
303 } else {
304 --pv_nfree;
305 pvp = pv_page_freelist.tqh_first;
306 if (--pvp->pvp_pgi.pgi_nfree == 0) {
307 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
308 }
309 pv = pvp->pvp_pgi.pgi_freelist;
310 #ifdef DIAGNOSTIC
311 if (pv == 0)
312 panic("pmap_alloc_pv: pgi_nfree inconsistent");
313 #endif /* DIAGNOSTIC */
314 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
315 }
316 return pv;
317 }
318
319 /*
320 * Release a pv_entry structure putting it back on the freelist.
321 */
322
323 void
324 pmap_free_pv(pv)
325 struct pv_entry *pv;
326 {
327 struct pv_page *pvp;
328
329 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
330 switch (++pvp->pvp_pgi.pgi_nfree) {
331 case 1:
332 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
333 default:
334 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
335 pvp->pvp_pgi.pgi_freelist = pv;
336 ++pv_nfree;
337 break;
338 case NPVPPG:
339 pv_nfree -= NPVPPG - 1;
340 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
341 FREE((vaddr_t)pvp, M_VMPVENT);
342 break;
343 }
344 }
345
346 #if 0
347 void
348 pmap_collect_pv()
349 {
350 struct pv_page_list pv_page_collectlist;
351 struct pv_page *pvp, *npvp;
352 struct pv_entry *ph, *ppv, *pv, *npv;
353 int s;
354
355 TAILQ_INIT(&pv_page_collectlist);
356
357 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
358 if (pv_nfree < NPVPPG)
359 break;
360 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
361 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
362 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
363 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
364 pvp_pgi.pgi_list);
365 pv_nfree -= NPVPPG;
366 pvp->pvp_pgi.pgi_nfree = -1;
367 }
368 }
369
370 if (pv_page_collectlist.tqh_first == 0)
371 return;
372
373 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
374 if (ph->pv_pmap == 0)
375 continue;
376 s = splvm();
377 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
378 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
379 if (pvp->pvp_pgi.pgi_nfree == -1) {
380 pvp = pv_page_freelist.tqh_first;
381 if (--pvp->pvp_pgi.pgi_nfree == 0) {
382 TAILQ_REMOVE(&pv_page_freelist,
383 pvp, pvp_pgi.pgi_list);
384 }
385 npv = pvp->pvp_pgi.pgi_freelist;
386 #ifdef DIAGNOSTIC
387 if (npv == 0)
388 panic("pmap_collect_pv: pgi_nfree inconsistent");
389 #endif /* DIAGNOSTIC */
390 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
391 *npv = *pv;
392 ppv->pv_next = npv;
393 ppv = npv;
394 } else
395 ppv = pv;
396 }
397 splx(s);
398 }
399
400 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
401 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
402 FREE((vaddr_t)pvp, M_VMPVENT);
403 }
404 }
405 #endif
406
407 /*
408 * Enter a new physical-virtual mapping into the pv table
409 */
410
411 /*__inline*/ void
412 pmap_enter_pv(pmap, va, pv, flags)
413 pmap_t pmap;
414 vaddr_t va;
415 struct pv_entry *pv;
416 u_int flags;
417 {
418 struct pv_entry *npv;
419 u_int s;
420
421 #ifdef DIAGNOSTIC
422 if (!pmap_initialized)
423 panic("pmap_enter_pv: !pmap_initialized");
424 #endif
425
426 s = splvm();
427
428 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
429 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
430
431 if (pv->pv_pmap == NULL) {
432 /*
433 * No entries yet, use header as the first entry
434 */
435 pv->pv_va = va;
436 pv->pv_pmap = pmap;
437 pv->pv_next = NULL;
438 pv->pv_flags = flags;
439 } else {
440 /*
441 * There is at least one other VA mapping this page.
442 * Place this entry after the header.
443 */
444 #ifdef PMAP_DEBUG
445 for (npv = pv; npv; npv = npv->pv_next)
446 if (pmap == npv->pv_pmap && va == npv->pv_va)
447 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
448 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
449 #endif
450 npv = pmap_alloc_pv();
451 npv->pv_va = va;
452 npv->pv_pmap = pmap;
453 npv->pv_flags = flags;
454 npv->pv_next = pv->pv_next;
455 pv->pv_next = npv;
456 }
457
458 if (flags & PT_W)
459 ++pmap->pm_stats.wired_count;
460
461 splx(s);
462 }
463
464
465 /*
466 * Remove a physical-virtual mapping from the pv table
467 */
468
469 /*__inline*/ void
470 pmap_remove_pv(pmap, va, pv)
471 pmap_t pmap;
472 vaddr_t va;
473 struct pv_entry *pv;
474 {
475 struct pv_entry *npv;
476 u_int s;
477 u_int flags = 0;
478
479 #ifdef DIAGNOSTIC
480 if (!pmap_initialized)
481 panic("pmap_remove_pv: !pmap_initialized");
482 #endif
483
484 s = splvm();
485
486 /*
487 * If it is the first entry on the list, it is actually
488 * in the header and we must copy the following entry up
489 * to the header. Otherwise we must search the list for
490 * the entry. In either case we free the now unused entry.
491 */
492
493 if (pmap == pv->pv_pmap && va == pv->pv_va) {
494 npv = pv->pv_next;
495 if (npv) {
496 *pv = *npv;
497 flags = npv->pv_flags;
498 pmap_free_pv(npv);
499 } else {
500 flags = pv->pv_flags;
501 pv->pv_pmap = NULL;
502 }
503 } else {
504 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
505 if (pmap == npv->pv_pmap && va == npv->pv_va)
506 break;
507 }
508 if (npv) {
509 pv->pv_next = npv->pv_next;
510 flags = npv->pv_flags;
511 pmap_free_pv(npv);
512 } else
513 panic("pmap_remove_pv: lost entry");
514 }
515
516 if (flags & PT_W)
517 --pmap->pm_stats.wired_count;
518
519 splx(s);
520 }
521
522 /*
523 * Modify a physical-virtual mapping in the pv table
524 */
525
526 /*__inline */ u_int
527 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
528 pmap_t pmap;
529 vaddr_t va;
530 struct pv_entry *pv;
531 u_int bic_mask;
532 u_int eor_mask;
533 {
534 struct pv_entry *npv;
535 u_int s;
536 u_int flags, oflags;
537
538 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
539 pmap, va, pv, bic_mask, eor_mask));
540
541 #ifdef DIAGNOSTIC
542 if (!pmap_initialized)
543 panic("pmap_modify_pv: !pmap_initialized");
544 #endif
545
546 s = splvm();
547
548 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
549 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
550
551 /*
552 * There is at least one VA mapping this page.
553 */
554
555 for (npv = pv; npv; npv = npv->pv_next) {
556 if (pmap == npv->pv_pmap && va == npv->pv_va) {
557 oflags = npv->pv_flags;
558 npv->pv_flags = flags =
559 ((oflags & ~bic_mask) ^ eor_mask);
560 if ((flags ^ oflags) & PT_W) {
561 if (flags & PT_W)
562 ++pmap->pm_stats.wired_count;
563 else
564 --pmap->pm_stats.wired_count;
565 }
566 PDEBUG(0, printf("done flags=%08x\n", flags));
567 splx(s);
568 return (oflags);
569 }
570 }
571
572 PDEBUG(0, printf("done.\n"));
573 splx(s);
574 return (0);
575 }
576
577
578 /*
579 * Map the specified level 2 pagetable into the level 1 page table for
580 * the given pmap to cover a chunk of virtual address space starting from the
581 * address specified.
582 */
583 static /*__inline*/ void
584 pmap_map_in_l1(pmap, va, l2pa)
585 pmap_t pmap;
586 vaddr_t va, l2pa;
587 {
588 vaddr_t ptva;
589
590 /* Calculate the index into the L1 page table. */
591 ptva = (va >> PDSHIFT) & ~3;
592
593 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
594 pmap->pm_pdir, L1_PTE(l2pa), ptva));
595
596 /* Map page table into the L1. */
597 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
598 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
599 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
600 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
601
602 PDEBUG(0, printf("pt self reference %lx in %lx\n",
603 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
604
605 /* Map the page table into the page table area. */
606 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
607
608 /* XXX should be a purge */
609 /* cpu_tlb_flushD();*/
610 }
611
612 #if 0
613 static /*__inline*/ void
614 pmap_unmap_in_l1(pmap, va)
615 pmap_t pmap;
616 vaddr_t va;
617 {
618 vaddr_t ptva;
619
620 /* Calculate the index into the L1 page table. */
621 ptva = (va >> PDSHIFT) & ~3;
622
623 /* Unmap page table from the L1. */
624 pmap->pm_pdir[ptva + 0] = 0;
625 pmap->pm_pdir[ptva + 1] = 0;
626 pmap->pm_pdir[ptva + 2] = 0;
627 pmap->pm_pdir[ptva + 3] = 0;
628
629 /* Unmap the page table from the page table area. */
630 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
631
632 /* XXX should be a purge */
633 /* cpu_tlb_flushD();*/
634 }
635 #endif
636
637
638 /*
639 * Used to map a range of physical addresses into kernel
640 * virtual address space.
641 *
642 * For now, VM is already on, we only need to map the
643 * specified memory.
644 */
645 vaddr_t
646 pmap_map(va, spa, epa, prot)
647 vaddr_t va, spa, epa;
648 int prot;
649 {
650 while (spa < epa) {
651 pmap_enter(pmap_kernel(), va, spa, prot, 0);
652 va += NBPG;
653 spa += NBPG;
654 }
655 pmap_update();
656 return(va);
657 }
658
659
660 /*
661 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
662 *
663 * bootstrap the pmap system. This is called from initarm and allows
664 * the pmap system to initailise any structures it requires.
665 *
666 * Currently this sets up the kernel_pmap that is statically allocated
667 * and also allocated virtual addresses for certain page hooks.
668 * Currently the only one page hook is allocated that is used
669 * to zero physical pages of memory.
670 * It also initialises the start and end address of the kernel data space.
671 */
672 extern paddr_t physical_freestart;
673 extern paddr_t physical_freeend;
674
675 struct pv_entry *boot_pvent;
676 char *boot_attrs;
677
678 void
679 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
680 pd_entry_t *kernel_l1pt;
681 pv_addr_t kernel_ptpt;
682 {
683 int loop;
684 paddr_t start, end;
685 #if NISADMA > 0
686 paddr_t istart;
687 psize_t isize;
688 #endif
689 vsize_t size;
690
691 kernel_pmap = &kernel_pmap_store;
692
693 kernel_pmap->pm_pdir = kernel_l1pt;
694 kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
695 kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
696 simple_lock_init(&kernel_pmap->pm_lock);
697 kernel_pmap->pm_count = 1;
698
699 /*
700 * Initialize PAGE_SIZE-dependent variables.
701 */
702 uvm_setpagesize();
703
704 npages = 0;
705 loop = 0;
706 while (loop < bootconfig.dramblocks) {
707 start = (paddr_t)bootconfig.dram[loop].address;
708 end = start + (bootconfig.dram[loop].pages * NBPG);
709 if (start < physical_freestart)
710 start = physical_freestart;
711 if (end > physical_freeend)
712 end = physical_freeend;
713 #if 0
714 printf("%d: %lx -> %lx\n", loop, start, end - 1);
715 #endif
716 #if NISADMA > 0
717 if (pmap_isa_dma_range_intersect(start, end - start,
718 &istart, &isize)) {
719 /*
720 * Place the pages that intersect with the
721 * ISA DMA range onto the ISA DMA free list.
722 */
723 #if 0
724 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
725 istart + isize - 1);
726 #endif
727 uvm_page_physload(atop(istart),
728 atop(istart + isize), atop(istart),
729 atop(istart + isize), VM_FREELIST_ISADMA);
730 npages += atop(istart + isize) - atop(istart);
731
732 /*
733 * Load the pieces that come before
734 * the intersection into the default
735 * free list.
736 */
737 if (start < istart) {
738 #if 0
739 printf(" BEFORE 0x%lx -> 0x%lx\n",
740 start, istart - 1);
741 #endif
742 uvm_page_physload(atop(start),
743 atop(istart), atop(start),
744 atop(istart), VM_FREELIST_DEFAULT);
745 npages += atop(istart) - atop(start);
746 }
747
748 /*
749 * Load the pieces that come after
750 * the intersection into the default
751 * free list.
752 */
753 if ((istart + isize) < end) {
754 #if 0
755 printf(" AFTER 0x%lx -> 0x%lx\n",
756 (istart + isize), end - 1);
757 #endif
758 uvm_page_physload(atop(istart + isize),
759 atop(end), atop(istart + isize),
760 atop(end), VM_FREELIST_DEFAULT);
761 npages += atop(end) - atop(istart + isize);
762 }
763 } else {
764 uvm_page_physload(atop(start), atop(end),
765 atop(start), atop(end), VM_FREELIST_DEFAULT);
766 npages += atop(end) - atop(start);
767 }
768 #else /* NISADMA > 0 */
769 uvm_page_physload(atop(start), atop(end),
770 atop(start), atop(end), VM_FREELIST_DEFAULT);
771 npages += atop(end) - atop(start);
772 #endif /* NISADMA > 0 */
773 ++loop;
774 }
775
776 #ifdef MYCROFT_HACK
777 printf("npages = %ld\n", npages);
778 #endif
779
780 virtual_start = KERNEL_VM_BASE;
781 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
782
783 ALLOC_PAGE_HOOK(page_hook0, NBPG);
784 ALLOC_PAGE_HOOK(page_hook1, NBPG);
785
786 /*
787 * The mem special device needs a virtual hook but we don't
788 * need a pte
789 */
790 memhook = (char *)virtual_start;
791 virtual_start += NBPG;
792
793 msgbufaddr = (caddr_t)virtual_start;
794 msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
795 virtual_start += round_page(MSGBUFSIZE);
796
797 size = npages * sizeof(struct pv_entry);
798 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
799 bzero(boot_pvent, size);
800 size = npages * sizeof(char);
801 boot_attrs = (char *)uvm_pageboot_alloc(size);
802 bzero(boot_attrs, size);
803
804 cpu_cache_cleanD();
805 }
806
807 /*
808 * void pmap_init(void)
809 *
810 * Initialize the pmap module.
811 * Called by vm_init() in vm/vm_init.c in order to initialise
812 * any structures that the pmap system needs to map virtual memory.
813 */
814
815 extern int physmem;
816
817 void
818 pmap_init()
819 {
820 int lcv;
821
822 #ifdef MYCROFT_HACK
823 printf("physmem = %d\n", physmem);
824 #endif
825
826 /*
827 * Set the available memory vars - These do not map to real memory
828 * addresses and cannot as the physical memory is fragmented.
829 * They are used by ps for %mem calculations.
830 * One could argue whether this should be the entire memory or just
831 * the memory that is useable in a user process.
832 */
833 avail_start = 0;
834 avail_end = physmem * NBPG;
835
836 /* Set up pmap info for physsegs. */
837 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
838 vm_physmem[lcv].pmseg.pvent = boot_pvent;
839 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
840 vm_physmem[lcv].pmseg.attrs = boot_attrs;
841 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
842 }
843 #ifdef MYCROFT_HACK
844 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
845 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
846 lcv,
847 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
848 vm_physmem[lcv].start, vm_physmem[lcv].end);
849 }
850 #endif
851 TAILQ_INIT(&pv_page_freelist);
852
853 #ifdef DIAGNOSTIC
854 /* Now it is safe to enable pv_entry recording. */
855 pmap_initialized = TRUE;
856 #endif
857
858 /* Initialise our L1 page table queues and counters */
859 SIMPLEQ_INIT(&l1pt_static_queue);
860 l1pt_static_queue_count = 0;
861 l1pt_static_create_count = 0;
862 SIMPLEQ_INIT(&l1pt_queue);
863 l1pt_queue_count = 0;
864 l1pt_create_count = 0;
865 l1pt_reuse_count = 0;
866 }
867
868 /*
869 * pmap_postinit()
870 *
871 * This routine is called after the vm and kmem subsystems have been
872 * initialised. This allows the pmap code to perform any initialisation
873 * that can only be done one the memory allocation is in place.
874 */
875
876 void
877 pmap_postinit()
878 {
879 int loop;
880 struct l1pt *pt;
881
882 #ifdef PMAP_STATIC_L1S
883 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
884 #else /* PMAP_STATIC_L1S */
885 for (loop = 0; loop < max_processes; ++loop) {
886 #endif /* PMAP_STATIC_L1S */
887 /* Allocate a L1 page table */
888 pt = pmap_alloc_l1pt();
889 if (!pt)
890 panic("Cannot allocate static L1 page tables\n");
891
892 /* Clean it */
893 bzero((void *)pt->pt_va, PD_SIZE);
894 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
895 /* Add the page table to the queue */
896 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
897 ++l1pt_static_queue_count;
898 ++l1pt_static_create_count;
899 }
900 }
901
902
903 /*
904 * Create and return a physical map.
905 *
906 * If the size specified for the map is zero, the map is an actual physical
907 * map, and may be referenced by the hardware.
908 *
909 * If the size specified is non-zero, the map will be used in software only,
910 * and is bounded by that size.
911 */
912
913 pmap_t
914 pmap_create()
915 {
916 pmap_t pmap;
917
918 /* Allocate memory for pmap structure and zero it */
919 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
920 bzero(pmap, sizeof(*pmap));
921
922 /* Now init the machine part of the pmap */
923 pmap_pinit(pmap);
924 return(pmap);
925 }
926
927 /*
928 * pmap_alloc_l1pt()
929 *
930 * This routine allocates physical and virtual memory for a L1 page table
931 * and wires it.
932 * A l1pt structure is returned to describe the allocated page table.
933 *
934 * This routine is allowed to fail if the required memory cannot be allocated.
935 * In this case NULL is returned.
936 */
937
938 struct l1pt *
939 pmap_alloc_l1pt(void)
940 {
941 paddr_t pa;
942 vaddr_t va;
943 struct l1pt *pt;
944 int error;
945 vm_page_t m;
946 pt_entry_t *pte;
947
948 /* Allocate virtual address space for the L1 page table */
949 va = uvm_km_valloc(kernel_map, PD_SIZE);
950 if (va == 0) {
951 #ifdef DIAGNOSTIC
952 printf("pmap: Cannot allocate pageable memory for L1\n");
953 #endif /* DIAGNOSTIC */
954 return(NULL);
955 }
956
957 /* Allocate memory for the l1pt structure */
958 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
959
960 /*
961 * Allocate pages from the VM system.
962 */
963 TAILQ_INIT(&pt->pt_plist);
964 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
965 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
966 if (error) {
967 #ifdef DIAGNOSTIC
968 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
969 error);
970 #endif /* DIAGNOSTIC */
971 /* Release the resources we already have claimed */
972 free(pt, M_VMPMAP);
973 uvm_km_free(kernel_map, va, PD_SIZE);
974 return(NULL);
975 }
976
977 /* Map our physical pages into our virtual space */
978 pt->pt_va = va;
979 m = pt->pt_plist.tqh_first;
980 while (m && va < (pt->pt_va + PD_SIZE)) {
981 pa = VM_PAGE_TO_PHYS(m);
982
983 pmap_enter(pmap_kernel(), va, pa,
984 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
985
986 /* Revoke cacheability and bufferability */
987 /* XXX should be done better than this */
988 pte = pmap_pte(pmap_kernel(), va);
989 *pte = *pte & ~(PT_C | PT_B);
990
991 va += NBPG;
992 m = m->pageq.tqe_next;
993 }
994 pmap_update();
995
996 #ifdef DIAGNOSTIC
997 if (m)
998 panic("pmap_alloc_l1pt: pglist not empty\n");
999 #endif /* DIAGNOSTIC */
1000
1001 pt->pt_flags = 0;
1002 return(pt);
1003 }
1004
1005 /*
1006 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1007 */
1008 void
1009 pmap_free_l1pt(pt)
1010 struct l1pt *pt;
1011 {
1012 /* Separate the physical memory for the virtual space */
1013 pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
1014 pmap_update();
1015
1016 /* Return the physical memory */
1017 uvm_pglistfree(&pt->pt_plist);
1018
1019 /* Free the virtual space */
1020 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1021
1022 /* Free the l1pt structure */
1023 free(pt, M_VMPMAP);
1024 }
1025
1026 /*
1027 * Allocate a page directory.
1028 * This routine will either allocate a new page directory from the pool
1029 * of L1 page tables currently held by the kernel or it will allocate
1030 * a new one via pmap_alloc_l1pt().
1031 * It will then initialise the l1 page table for use.
1032 */
1033 int
1034 pmap_allocpagedir(pmap)
1035 struct pmap *pmap;
1036 {
1037 paddr_t pa;
1038 struct l1pt *pt;
1039 pt_entry_t *pte;
1040
1041 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1042
1043 /* Do we have any spare L1's lying around ? */
1044 if (l1pt_static_queue_count) {
1045 --l1pt_static_queue_count;
1046 pt = l1pt_static_queue.sqh_first;
1047 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1048 } else if (l1pt_queue_count) {
1049 --l1pt_queue_count;
1050 pt = l1pt_queue.sqh_first;
1051 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1052 ++l1pt_reuse_count;
1053 } else {
1054 pt = pmap_alloc_l1pt();
1055 if (!pt)
1056 return(ENOMEM);
1057 ++l1pt_create_count;
1058 }
1059
1060 /* Store the pointer to the l1 descriptor in the pmap. */
1061 pmap->pm_l1pt = pt;
1062
1063 /* Get the physical address of the start of the l1 */
1064 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1065
1066 /* Store the virtual address of the l1 in the pmap. */
1067 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1068
1069 /* Clean the L1 if it is dirty */
1070 if (!(pt->pt_flags & PTFLAG_CLEAN))
1071 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1072
1073 /* Do we already have the kernel mappings ? */
1074 if (!(pt->pt_flags & PTFLAG_KPT)) {
1075 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1076
1077 bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1078 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1079 KERNEL_PD_SIZE);
1080 pt->pt_flags |= PTFLAG_KPT;
1081 }
1082
1083 /* Allocate a page table to map all the page tables for this pmap */
1084
1085 #ifdef DIAGNOSTIC
1086 if (pmap->pm_vptpt) {
1087 /* XXX What if we have one already ? */
1088 panic("pmap_allocpagedir: have pt already\n");
1089 }
1090 #endif /* DIAGNOSTIC */
1091 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1092 if (pmap->pm_vptpt == 0) {
1093 pmap_freepagedir(pmap);
1094 return(ENOMEM);
1095 }
1096
1097 (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
1098 pmap->pm_pptpt &= PG_FRAME;
1099 /* Revoke cacheability and bufferability */
1100 /* XXX should be done better than this */
1101 pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
1102 *pte = *pte & ~(PT_C | PT_B);
1103
1104 /* Wire in this page table */
1105 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1106
1107 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1108
1109 /*
1110 * Map the kernel page tables for 0xf0000000 +
1111 * into the page table used to map the
1112 * pmap's page tables
1113 */
1114 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1115 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1116 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1117 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1118 (KERNEL_PD_SIZE >> 2));
1119
1120 pmap->pm_count = 1;
1121 simple_lock_init(&pmap->pm_lock);
1122
1123 return(0);
1124 }
1125
1126
1127 /*
1128 * Initialize a preallocated and zeroed pmap structure,
1129 * such as one in a vmspace structure.
1130 */
1131
1132 static int pmap_pagedir_ident; /* tsleep() ident */
1133
1134 void
1135 pmap_pinit(pmap)
1136 struct pmap *pmap;
1137 {
1138 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1139
1140 /* Keep looping until we succeed in allocating a page directory */
1141 while (pmap_allocpagedir(pmap) != 0) {
1142 /*
1143 * Ok we failed to allocate a suitable block of memory for an
1144 * L1 page table. This means that either:
1145 * 1. 16KB of virtual address space could not be allocated
1146 * 2. 16KB of physically contiguous memory on a 16KB boundary
1147 * could not be allocated.
1148 *
1149 * Since we cannot fail we will sleep for a while and try
1150 * again. Although we will be wakened when another page table
1151 * is freed other memory releasing and swapping may occur
1152 * that will mean we can succeed so we will keep trying
1153 * regularly just in case.
1154 */
1155
1156 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1157 "l1ptwait", 1000) == EWOULDBLOCK)
1158 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1159 }
1160
1161 /* Map zero page for the pmap. This will also map the L2 for it */
1162 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1163 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1164 pmap_update();
1165 }
1166
1167
1168 void
1169 pmap_freepagedir(pmap)
1170 pmap_t pmap;
1171 {
1172 /* Free the memory used for the page table mapping */
1173 if (pmap->pm_vptpt != 0)
1174 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1175
1176 /* junk the L1 page table */
1177 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1178 /* Add the page table to the queue */
1179 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1180 ++l1pt_static_queue_count;
1181 /* Wake up any sleeping processes waiting for a l1 page table */
1182 wakeup((caddr_t)&pmap_pagedir_ident);
1183 } else if (l1pt_queue_count < 8) {
1184 /* Add the page table to the queue */
1185 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1186 ++l1pt_queue_count;
1187 /* Wake up any sleeping processes waiting for a l1 page table */
1188 wakeup((caddr_t)&pmap_pagedir_ident);
1189 } else
1190 pmap_free_l1pt(pmap->pm_l1pt);
1191 }
1192
1193
1194 /*
1195 * Retire the given physical map from service.
1196 * Should only be called if the map contains no valid mappings.
1197 */
1198
1199 void
1200 pmap_destroy(pmap)
1201 pmap_t pmap;
1202 {
1203 int count;
1204
1205 if (pmap == NULL)
1206 return;
1207
1208 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1209 simple_lock(&pmap->pm_lock);
1210 count = --pmap->pm_count;
1211 simple_unlock(&pmap->pm_lock);
1212 if (count == 0) {
1213 pmap_release(pmap);
1214 free((caddr_t)pmap, M_VMPMAP);
1215 }
1216 }
1217
1218
1219 /*
1220 * Release any resources held by the given physical map.
1221 * Called when a pmap initialized by pmap_pinit is being released.
1222 * Should only be called if the map contains no valid mappings.
1223 */
1224
1225 void
1226 pmap_release(pmap)
1227 pmap_t pmap;
1228 {
1229 struct vm_page *page;
1230 pt_entry_t *pte;
1231 int loop;
1232
1233 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1234
1235 #if 0
1236 if (pmap->pm_count != 1) /* XXX: needs sorting */
1237 panic("pmap_release count %d", pmap->pm_count);
1238 #endif
1239
1240 /* Remove the zero page mapping */
1241 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1242 pmap_update();
1243
1244 /*
1245 * Free any page tables still mapped
1246 * This is only temporay until pmap_enter can count the number
1247 * of mappings made in a page table. Then pmap_remove() can
1248 * reduce the count and free the pagetable when the count
1249 * reaches zero.
1250 */
1251 for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) {
1252 pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4);
1253 if (*pte != 0) {
1254 PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte));
1255 page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
1256 if (page == NULL)
1257 panic("pmap_release: bad address for phys page");
1258 uvm_pagefree(page);
1259 }
1260 }
1261 /* Free the page dir */
1262 pmap_freepagedir(pmap);
1263 }
1264
1265
1266 /*
1267 * void pmap_reference(pmap_t pmap)
1268 *
1269 * Add a reference to the specified pmap.
1270 */
1271
1272 void
1273 pmap_reference(pmap)
1274 pmap_t pmap;
1275 {
1276 if (pmap == NULL)
1277 return;
1278
1279 simple_lock(&pmap->pm_lock);
1280 pmap->pm_count++;
1281 simple_unlock(&pmap->pm_lock);
1282 }
1283
1284 /*
1285 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1286 *
1287 * Return the start and end addresses of the kernel's virtual space.
1288 * These values are setup in pmap_bootstrap and are updated as pages
1289 * are allocated.
1290 */
1291
1292 void
1293 pmap_virtual_space(start, end)
1294 vaddr_t *start;
1295 vaddr_t *end;
1296 {
1297 *start = virtual_start;
1298 *end = virtual_end;
1299 }
1300
1301
1302 /*
1303 * Activate the address space for the specified process. If the process
1304 * is the current process, load the new MMU context.
1305 */
1306 void
1307 pmap_activate(p)
1308 struct proc *p;
1309 {
1310 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1311 struct pcb *pcb = &p->p_addr->u_pcb;
1312
1313 (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
1314 (paddr_t *)&pcb->pcb_pagedir);
1315
1316 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1317 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1318
1319 if (p == curproc) {
1320 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1321 setttb((u_int)pcb->pcb_pagedir);
1322 }
1323 #if 0
1324 pmap->pm_pdchanged = FALSE;
1325 #endif
1326 }
1327
1328
1329 /*
1330 * Deactivate the address space of the specified process.
1331 */
1332 void
1333 pmap_deactivate(p)
1334 struct proc *p;
1335 {
1336 }
1337
1338
1339 /*
1340 * pmap_clean_page()
1341 *
1342 * This is a local function used to work out the best strategy to clean
1343 * a single page referenced by its entry in the PV table. It's used by
1344 * pmap_copy_page, pmap_zero page and maybe some others later on.
1345 *
1346 * Its policy is effectively:
1347 * o If there are no mappings, we don't bother doing anything with the cache.
1348 * o If there is one mapping, we clean just that page.
1349 * o If there are multiple mappings, we clean the entire cache.
1350 *
1351 * So that some functions can be further optimised, it returns 0 if it didn't
1352 * clean the entire cache, or 1 if it did.
1353 *
1354 * XXX One bug in this routine is that if the pv_entry has a single page
1355 * mapped at 0x00000000 a whole cache clean will be performed rather than
1356 * just the 1 page. Since this should not occur in everyday use and if it does
1357 * it will just result in not the most efficient clean for the page.
1358 */
1359 static int
1360 pmap_clean_page(pv)
1361 struct pv_entry *pv;
1362 {
1363 int s;
1364 int cache_needs_cleaning = 0;
1365 vaddr_t page_to_clean = 0;
1366
1367 /* Go to splvm() so we get exclusive lock for a mo */
1368 s = splvm();
1369 if (pv->pv_pmap) {
1370 cache_needs_cleaning = 1;
1371 if (!pv->pv_next)
1372 page_to_clean = pv->pv_va;
1373 }
1374 splx(s);
1375
1376 /* Do cache ops outside the splvm. */
1377 if (page_to_clean)
1378 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1379 else if (cache_needs_cleaning) {
1380 cpu_cache_purgeID();
1381 return (1);
1382 }
1383 return (0);
1384 }
1385
1386 /*
1387 * pmap_find_pv()
1388 *
1389 * This is a local function that finds a PV entry for a given physical page.
1390 * This is a common op, and this function removes loads of ifdefs in the code.
1391 */
1392 static __inline struct pv_entry *
1393 pmap_find_pv(phys)
1394 paddr_t phys;
1395 {
1396 int bank, off;
1397 struct pv_entry *pv;
1398
1399 #ifdef DIAGNOSTIC
1400 if (!pmap_initialized)
1401 panic("pmap_find_pv: !pmap_initialized");
1402 #endif
1403
1404 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1405 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1406 pv = &vm_physmem[bank].pmseg.pvent[off];
1407 return (pv);
1408 }
1409
1410 /*
1411 * pmap_zero_page()
1412 *
1413 * Zero a given physical page by mapping it at a page hook point.
1414 * In doing the zero page op, the page we zero is mapped cachable, as with
1415 * StrongARM accesses to non-cached pages are non-burst making writing
1416 * _any_ bulk data very slow.
1417 */
1418 void
1419 pmap_zero_page(phys)
1420 paddr_t phys;
1421 {
1422 struct pv_entry *pv;
1423
1424 /* Get an entry for this page, and clean it it. */
1425 pv = pmap_find_pv(phys);
1426 pmap_clean_page(pv);
1427
1428 /*
1429 * Hook in the page, zero it, and purge the cache for that
1430 * zeroed page. Invalidate the TLB as needed.
1431 */
1432 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1433 cpu_tlb_flushD_SE(page_hook0.va);
1434 bzero_page(page_hook0.va);
1435 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1436 }
1437
1438 /*
1439 * pmap_copy_page()
1440 *
1441 * Copy one physical page into another, by mapping the pages into
1442 * hook points. The same comment regarding cachability as in
1443 * pmap_zero_page also applies here.
1444 */
1445 void
1446 pmap_copy_page(src, dest)
1447 paddr_t src;
1448 paddr_t dest;
1449 {
1450 struct pv_entry *src_pv, *dest_pv;
1451
1452 /* Get PV entries for the pages, and clean them if needed. */
1453 src_pv = pmap_find_pv(src);
1454 dest_pv = pmap_find_pv(dest);
1455 if (!pmap_clean_page(src_pv))
1456 pmap_clean_page(dest_pv);
1457
1458 /*
1459 * Map the pages into the page hook points, copy them, and purge
1460 * the cache for the appropriate page. Invalidate the TLB
1461 * as required.
1462 */
1463 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1464 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1465 cpu_tlb_flushD_SE(page_hook0.va);
1466 cpu_tlb_flushD_SE(page_hook1.va);
1467 bcopy_page(page_hook0.va, page_hook1.va);
1468 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1469 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1470 }
1471
1472 /*
1473 * int pmap_next_phys_page(paddr_t *addr)
1474 *
1475 * Allocate another physical page returning true or false depending
1476 * on whether a page could be allocated.
1477 */
1478
1479 paddr_t
1480 pmap_next_phys_page(addr)
1481 paddr_t addr;
1482
1483 {
1484 int loop;
1485
1486 if (addr < bootconfig.dram[0].address)
1487 return(bootconfig.dram[0].address);
1488
1489 loop = 0;
1490
1491 while (bootconfig.dram[loop].address != 0
1492 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1493 ++loop;
1494
1495 if (bootconfig.dram[loop].address == 0)
1496 return(0);
1497
1498 addr += NBPG;
1499
1500 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1501 if (bootconfig.dram[loop + 1].address == 0)
1502 return(0);
1503 addr = bootconfig.dram[loop + 1].address;
1504 }
1505
1506 return(addr);
1507 }
1508
1509 #if 0
1510 void
1511 pmap_pte_addref(pmap, va)
1512 pmap_t pmap;
1513 vaddr_t va;
1514 {
1515 pd_entry_t *pde;
1516 paddr_t pa;
1517 struct vm_page *m;
1518
1519 if (pmap == pmap_kernel())
1520 return;
1521
1522 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1523 pa = pmap_pte_pa(pde);
1524 m = PHYS_TO_VM_PAGE(pa);
1525 ++m->wire_count;
1526 #ifdef MYCROFT_HACK
1527 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1528 pmap, va, pde, pa, m, m->wire_count);
1529 #endif
1530 }
1531
1532 void
1533 pmap_pte_delref(pmap, va)
1534 pmap_t pmap;
1535 vaddr_t va;
1536 {
1537 pd_entry_t *pde;
1538 paddr_t pa;
1539 struct vm_page *m;
1540
1541 if (pmap == pmap_kernel())
1542 return;
1543
1544 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1545 pa = pmap_pte_pa(pde);
1546 m = PHYS_TO_VM_PAGE(pa);
1547 --m->wire_count;
1548 #ifdef MYCROFT_HACK
1549 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1550 pmap, va, pde, pa, m, m->wire_count);
1551 #endif
1552 if (m->wire_count == 0) {
1553 #ifdef MYCROFT_HACK
1554 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1555 pmap, va, pde, pa, m);
1556 #endif
1557 pmap_unmap_in_l1(pmap, va);
1558 uvm_pagefree(m);
1559 --pmap->pm_stats.resident_count;
1560 }
1561 }
1562 #else
1563 #define pmap_pte_addref(pmap, va)
1564 #define pmap_pte_delref(pmap, va)
1565 #endif
1566
1567 /*
1568 * Since we have a virtually indexed cache, we may need to inhibit caching if
1569 * there is more than one mapping and at least one of them is writable.
1570 * Since we purge the cache on every context switch, we only need to check for
1571 * other mappings within the same pmap, or kernel_pmap.
1572 * This function is also called when a page is unmapped, to possibly reenable
1573 * caching on any remaining mappings.
1574 */
1575 void
1576 pmap_vac_me_harder(pmap, pv)
1577 pmap_t pmap;
1578 struct pv_entry *pv;
1579 {
1580 struct pv_entry *npv;
1581 pt_entry_t *pte;
1582 int entries = 0;
1583 int writeable = 0;
1584
1585 if (pv->pv_pmap == NULL)
1586 return;
1587
1588 /*
1589 * Count mappings and writable mappings in this pmap.
1590 * Keep a pointer to the first one.
1591 */
1592 for (npv = pv; npv; npv = npv->pv_next) {
1593 /* Count mappings in the same pmap */
1594 if (pmap == npv->pv_pmap) {
1595 if (entries++ == 0)
1596 pv = npv;
1597 /* Writeable mappings */
1598 if (npv->pv_flags & PT_Wr)
1599 ++writeable;
1600 }
1601 }
1602
1603 /*
1604 * Enable or disable caching as necessary.
1605 * We do a quick check of the first PTE to avoid walking the list if
1606 * we're already in the right state.
1607 */
1608 if (entries > 1 && writeable) {
1609 pte = pmap_pte(pmap, pv->pv_va);
1610 if (~*pte & (PT_C | PT_B))
1611 return;
1612 *pte = *pte & ~(PT_C | PT_B);
1613 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1614 if (pmap == npv->pv_pmap) {
1615 pte = pmap_pte(pmap, npv->pv_va);
1616 *pte = *pte & ~(PT_C | PT_B);
1617 }
1618 }
1619 } else if (entries > 0) {
1620 pte = pmap_pte(pmap, pv->pv_va);
1621 if (*pte & (PT_C | PT_B))
1622 return;
1623 *pte = *pte | (PT_C | PT_B);
1624 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1625 if (pmap == npv->pv_pmap) {
1626 pte = pmap_pte(pmap, npv->pv_va);
1627 *pte = *pte | (PT_C | PT_B);
1628 }
1629 }
1630 }
1631 }
1632
1633 /*
1634 * pmap_remove()
1635 *
1636 * pmap_remove is responsible for nuking a number of mappings for a range
1637 * of virtual address space in the current pmap. To do this efficiently
1638 * is interesting, because in a number of cases a wide virtual address
1639 * range may be supplied that contains few actual mappings. So, the
1640 * optimisations are:
1641 * 1. Try and skip over hunks of address space for which an L1 entry
1642 * does not exist.
1643 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1644 * maybe do just a partial cache clean. This path of execution is
1645 * complicated by the fact that the cache must be flushed _before_
1646 * the PTE is nuked, being a VAC :-)
1647 * 3. Maybe later fast-case a single page, but I don't think this is
1648 * going to make _that_ much difference overall.
1649 */
1650
1651 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1652
1653 void
1654 pmap_remove(pmap, sva, eva)
1655 pmap_t pmap;
1656 vaddr_t sva;
1657 vaddr_t eva;
1658 {
1659 int cleanlist_idx = 0;
1660 struct pagelist {
1661 vaddr_t va;
1662 pt_entry_t *pte;
1663 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1664 pt_entry_t *pte = 0;
1665 paddr_t pa;
1666 int pmap_active;
1667 struct pv_entry *pv;
1668
1669 /* Exit quick if there is no pmap */
1670 if (!pmap)
1671 return;
1672
1673 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1674
1675 sva &= PG_FRAME;
1676 eva &= PG_FRAME;
1677
1678 /* Get a page table pointer */
1679 while (sva < eva) {
1680 pte = pmap_pte(pmap, sva);
1681 if (pte)
1682 break;
1683 sva = (sva & PD_MASK) + NBPD;
1684 }
1685
1686 /* Note if the pmap is active thus require cache and tlb cleans */
1687 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1688 || (pmap == kernel_pmap))
1689 pmap_active = 1;
1690 else
1691 pmap_active = 0;
1692
1693 /* Now loop along */
1694 while (sva < eva) {
1695 /* Check if we can move to the next PDE (l1 chunk) */
1696 if (!(sva & PT_MASK))
1697 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1698 sva += NBPD;
1699 pte += arm_byte_to_page(NBPD);
1700 continue;
1701 }
1702
1703 /* We've found a valid PTE, so this page of PTEs has to go. */
1704 if (pmap_pte_v(pte)) {
1705 int bank, off;
1706
1707 /* Update statistics */
1708 --pmap->pm_stats.resident_count;
1709
1710 /*
1711 * Add this page to our cache remove list, if we can.
1712 * If, however the cache remove list is totally full,
1713 * then do a complete cache invalidation taking note
1714 * to backtrack the PTE table beforehand, and ignore
1715 * the lists in future because there's no longer any
1716 * point in bothering with them (we've paid the
1717 * penalty, so will carry on unhindered). Otherwise,
1718 * when we fall out, we just clean the list.
1719 */
1720 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1721 pa = pmap_pte_pa(pte);
1722
1723 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1724 /* Add to the clean list. */
1725 cleanlist[cleanlist_idx].pte = pte;
1726 cleanlist[cleanlist_idx].va = sva;
1727 cleanlist_idx++;
1728 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1729 int cnt;
1730
1731 /* Nuke everything if needed. */
1732 if (pmap_active) {
1733 cpu_cache_purgeID();
1734 cpu_tlb_flushID();
1735 }
1736
1737 /*
1738 * Roll back the previous PTE list,
1739 * and zero out the current PTE.
1740 */
1741 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1742 *cleanlist[cnt].pte = 0;
1743 pmap_pte_delref(pmap, cleanlist[cnt].va);
1744 }
1745 *pte = 0;
1746 pmap_pte_delref(pmap, sva);
1747 cleanlist_idx++;
1748 } else {
1749 /*
1750 * We've already nuked the cache and
1751 * TLB, so just carry on regardless,
1752 * and we won't need to do it again
1753 */
1754 *pte = 0;
1755 pmap_pte_delref(pmap, sva);
1756 }
1757
1758 /*
1759 * Update flags. In a number of circumstances,
1760 * we could cluster a lot of these and do a
1761 * number of sequential pages in one go.
1762 */
1763 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1764 pv = &vm_physmem[bank].pmseg.pvent[off];
1765 pmap_remove_pv(pmap, sva, pv);
1766 pmap_vac_me_harder(pmap, pv);
1767 }
1768 }
1769 sva += NBPG;
1770 pte++;
1771 }
1772
1773 /*
1774 * Now, if we've fallen through down to here, chances are that there
1775 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1776 */
1777 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1778 u_int cnt;
1779
1780 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1781 if (pmap_active) {
1782 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1783 *cleanlist[cnt].pte = 0;
1784 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1785 } else
1786 *cleanlist[cnt].pte = 0;
1787 pmap_pte_delref(pmap, cleanlist[cnt].va);
1788 }
1789 }
1790 }
1791
1792 /*
1793 * Routine: pmap_remove_all
1794 * Function:
1795 * Removes this physical page from
1796 * all physical maps in which it resides.
1797 * Reflects back modify bits to the pager.
1798 */
1799
1800 void
1801 pmap_remove_all(pa)
1802 paddr_t pa;
1803 {
1804 struct pv_entry *ph, *pv, *npv;
1805 pmap_t pmap;
1806 pt_entry_t *pte;
1807 int s;
1808
1809 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1810
1811 pv = ph = pmap_find_pv(pa);
1812 pmap_clean_page(pv);
1813
1814 s = splvm();
1815
1816 if (ph->pv_pmap == NULL) {
1817 PDEBUG(0, printf("free page\n"));
1818 splx(s);
1819 return;
1820 }
1821
1822 while (pv) {
1823 pmap = pv->pv_pmap;
1824 pte = pmap_pte(pmap, pv->pv_va);
1825
1826 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1827 pv->pv_va, pv->pv_flags));
1828 #ifdef DEBUG
1829 if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1830 panic("pmap_remove_all: bad mapping");
1831 #endif /* DEBUG */
1832
1833 /*
1834 * Update statistics
1835 */
1836 --pmap->pm_stats.resident_count;
1837
1838 /* Wired bit */
1839 if (pv->pv_flags & PT_W)
1840 --pmap->pm_stats.wired_count;
1841
1842 /*
1843 * Invalidate the PTEs.
1844 * XXX: should cluster them up and invalidate as many
1845 * as possible at once.
1846 */
1847
1848 #ifdef needednotdone
1849 reduce wiring count on page table pages as references drop
1850 #endif
1851
1852 *pte = 0;
1853 pmap_pte_delref(pmap, pv->pv_va);
1854
1855 npv = pv->pv_next;
1856 if (pv == ph)
1857 ph->pv_pmap = NULL;
1858 else
1859 pmap_free_pv(pv);
1860 pv = npv;
1861 }
1862
1863 splx(s);
1864
1865 PDEBUG(0, printf("done\n"));
1866 cpu_tlb_flushID();
1867 }
1868
1869
1870 /*
1871 * Set the physical protection on the specified range of this map as requested.
1872 */
1873
1874 void
1875 pmap_protect(pmap, sva, eva, prot)
1876 pmap_t pmap;
1877 vaddr_t sva;
1878 vaddr_t eva;
1879 vm_prot_t prot;
1880 {
1881 pt_entry_t *pte = NULL;
1882 int armprot;
1883 int flush = 0;
1884 paddr_t pa;
1885 int bank, off;
1886 struct pv_entry *pv;
1887
1888 /*
1889 * Make sure pmap is valid. -dct
1890 */
1891 if (pmap == NULL)
1892 return;
1893 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
1894 pmap, sva, eva, prot));
1895
1896 if (~prot & VM_PROT_READ) {
1897 /* Just remove the mappings. */
1898 pmap_remove(pmap, sva, eva);
1899 return;
1900 }
1901 if (prot & VM_PROT_WRITE) {
1902 /*
1903 * If this is a read->write transition, just ignore it and let
1904 * uvm_fault() take care of it later.
1905 */
1906 return;
1907 }
1908
1909 sva &= PG_FRAME;
1910 eva &= PG_FRAME;
1911
1912 /*
1913 * We need to acquire a pointer to a page table page before entering
1914 * the following loop.
1915 */
1916 while (sva < eva) {
1917 pte = pmap_pte(pmap, sva);
1918 if (pte)
1919 break;
1920 sva = (sva & PD_MASK) + NBPD;
1921 }
1922
1923 while (sva < eva) {
1924 /* only check once in a while */
1925 if ((sva & PT_MASK) == 0) {
1926 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1927 /* We can race ahead here, to the next pde. */
1928 sva += NBPD;
1929 pte += arm_byte_to_page(NBPD);
1930 continue;
1931 }
1932 }
1933
1934 if (!pmap_pte_v(pte))
1935 goto next;
1936
1937 flush = 1;
1938
1939 armprot = 0;
1940 if (sva < VM_MAXUSER_ADDRESS)
1941 armprot |= PT_AP(AP_U);
1942 else if (sva < VM_MAX_ADDRESS)
1943 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
1944 *pte = (*pte & 0xfffff00f) | armprot;
1945
1946 pa = pmap_pte_pa(pte);
1947
1948 /* Get the physical page index */
1949
1950 /* Clear write flag */
1951 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1952 pv = &vm_physmem[bank].pmseg.pvent[off];
1953 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
1954 pmap_vac_me_harder(pmap, pv);
1955 }
1956
1957 next:
1958 sva += NBPG;
1959 pte++;
1960 }
1961
1962 if (flush)
1963 cpu_tlb_flushID();
1964 }
1965
1966 /*
1967 * void pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1968 * int flags)
1969 *
1970 * Insert the given physical page (p) at
1971 * the specified virtual address (v) in the
1972 * target physical map with the protection requested.
1973 *
1974 * If specified, the page will be wired down, meaning
1975 * that the related pte can not be reclaimed.
1976 *
1977 * NB: This is the only routine which MAY NOT lazy-evaluate
1978 * or lose information. That is, this routine must actually
1979 * insert this page into the given map NOW.
1980 */
1981
1982 int
1983 pmap_enter(pmap, va, pa, prot, flags)
1984 pmap_t pmap;
1985 vaddr_t va;
1986 paddr_t pa;
1987 vm_prot_t prot;
1988 int flags;
1989 {
1990 pt_entry_t *pte;
1991 u_int npte;
1992 int bank, off;
1993 struct pv_entry *pv = NULL;
1994 paddr_t opa;
1995 int nflags;
1996 boolean_t wired = (flags & PMAP_WIRED) != 0;
1997
1998 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
1999 va, pa, pmap, prot, wired));
2000
2001 #ifdef DIAGNOSTIC
2002 /* Valid address ? */
2003 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
2004 panic("pmap_enter: too big");
2005 if (pmap != pmap_kernel() && va != 0) {
2006 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2007 panic("pmap_enter: kernel page in user map");
2008 } else {
2009 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2010 panic("pmap_enter: user page in kernel map");
2011 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2012 panic("pmap_enter: entering PT page");
2013 }
2014 #endif
2015
2016 /*
2017 * Get a pointer to the pte for this virtual address. If the
2018 * pte pointer is NULL then we are missing the L2 page table
2019 * so we need to create one.
2020 */
2021 pte = pmap_pte(pmap, va);
2022 if (!pte) {
2023 paddr_t l2pa;
2024 struct vm_page *m;
2025
2026 /* Allocate a page table */
2027 for (;;) {
2028 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2029 if (m != NULL)
2030 break;
2031
2032 /*
2033 * No page available. If we're the kernel
2034 * pmap, we die, since we might not have
2035 * a valid thread context. For user pmaps,
2036 * we assume that we _do_ have a valid thread
2037 * context, so we wait here for the pagedaemon
2038 * to free up some pages.
2039 *
2040 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2041 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2042 * XXX SO THIS IS PROBABLY SAFE. In any case,
2043 * XXX other pmap modules claim it is safe to
2044 * XXX sleep here if it's a user pmap.
2045 */
2046 if (pmap == pmap_kernel())
2047 panic("pmap_enter: no free pages");
2048 else
2049 uvm_wait("pmap_enter");
2050 }
2051
2052 /* Wire this page table into the L1. */
2053 l2pa = VM_PAGE_TO_PHYS(m);
2054 pmap_zero_page(l2pa);
2055 pmap_map_in_l1(pmap, va, l2pa);
2056 ++pmap->pm_stats.resident_count;
2057
2058 pte = pmap_pte(pmap, va);
2059 #ifdef DIAGNOSTIC
2060 if (!pte)
2061 panic("pmap_enter: no pte");
2062 #endif
2063 }
2064
2065 nflags = 0;
2066 if (prot & VM_PROT_WRITE)
2067 nflags |= PT_Wr;
2068 if (wired)
2069 nflags |= PT_W;
2070
2071 /* More debugging info */
2072 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2073 *pte));
2074
2075 /* Is the pte valid ? If so then this page is already mapped */
2076 if (pmap_pte_v(pte)) {
2077 /* Get the physical address of the current page mapped */
2078 opa = pmap_pte_pa(pte);
2079
2080 #ifdef MYCROFT_HACK
2081 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2082 #endif
2083
2084 /* Are we mapping the same page ? */
2085 if (opa == pa) {
2086 /* All we must be doing is changing the protection */
2087 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2088 va, pa));
2089
2090 /* Has the wiring changed ? */
2091 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2092 pv = &vm_physmem[bank].pmseg.pvent[off];
2093 (void) pmap_modify_pv(pmap, va, pv,
2094 PT_Wr | PT_W, nflags);
2095 }
2096 } else {
2097 /* We are replacing the page with a new one. */
2098 cpu_cache_purgeID_rng(va, NBPG);
2099
2100 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2101 va, pa, opa));
2102
2103 /*
2104 * If it is part of our managed memory then we
2105 * must remove it from the PV list
2106 */
2107 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2108 pv = &vm_physmem[bank].pmseg.pvent[off];
2109 pmap_remove_pv(pmap, va, pv);
2110 }
2111
2112 goto enter;
2113 }
2114 } else {
2115 opa = 0;
2116 pmap_pte_addref(pmap, va);
2117
2118 /* pte is not valid so we must be hooking in a new page */
2119 ++pmap->pm_stats.resident_count;
2120
2121 enter:
2122 /*
2123 * Enter on the PV list if part of our managed memory
2124 */
2125 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2126 pv = &vm_physmem[bank].pmseg.pvent[off];
2127 pmap_enter_pv(pmap, va, pv, nflags);
2128 }
2129 }
2130
2131 #ifdef MYCROFT_HACK
2132 if (mycroft_hack)
2133 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2134 #endif
2135
2136 /* Construct the pte, giving the correct access. */
2137 npte = (pa & PG_FRAME);
2138
2139 /* VA 0 is magic. */
2140 if (pmap != pmap_kernel() && va != 0)
2141 npte |= PT_AP(AP_U);
2142
2143 if (bank != -1) {
2144 #ifdef DIAGNOSTIC
2145 if ((flags & VM_PROT_ALL) & ~prot)
2146 panic("pmap_enter: access_type exceeds prot");
2147 #endif
2148 npte |= PT_C | PT_B;
2149 if (flags & VM_PROT_WRITE) {
2150 npte |= L2_SPAGE | PT_AP(AP_W);
2151 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2152 } else if (flags & VM_PROT_ALL) {
2153 npte |= L2_SPAGE;
2154 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2155 } else
2156 npte |= L2_INVAL;
2157 } else {
2158 if (prot & VM_PROT_WRITE)
2159 npte |= L2_SPAGE | PT_AP(AP_W);
2160 else if (prot & VM_PROT_ALL)
2161 npte |= L2_SPAGE;
2162 else
2163 npte |= L2_INVAL;
2164 }
2165
2166 #ifdef MYCROFT_HACK
2167 if (mycroft_hack)
2168 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2169 #endif
2170
2171 *pte = npte;
2172
2173 if (bank != -1)
2174 pmap_vac_me_harder(pmap, pv);
2175
2176 /* Better flush the TLB ... */
2177 cpu_tlb_flushID_SE(va);
2178
2179 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2180
2181 return 0;
2182 }
2183
2184 void
2185 pmap_kenter_pa(va, pa, prot)
2186 vaddr_t va;
2187 paddr_t pa;
2188 vm_prot_t prot;
2189 {
2190 pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2191 }
2192
2193 void
2194 pmap_kremove(va, len)
2195 vaddr_t va;
2196 vsize_t len;
2197 {
2198 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2199 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2200 }
2201 }
2202
2203 /*
2204 * pmap_page_protect:
2205 *
2206 * Lower the permission for all mappings to a given page.
2207 */
2208
2209 void
2210 pmap_page_protect(pg, prot)
2211 struct vm_page *pg;
2212 vm_prot_t prot;
2213 {
2214 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2215
2216 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2217
2218 switch(prot) {
2219 case VM_PROT_READ:
2220 case VM_PROT_READ|VM_PROT_EXECUTE:
2221 pmap_copy_on_write(pa);
2222 break;
2223
2224 case VM_PROT_ALL:
2225 break;
2226
2227 default:
2228 pmap_remove_all(pa);
2229 break;
2230 }
2231 }
2232
2233
2234 /*
2235 * Routine: pmap_unwire
2236 * Function: Clear the wired attribute for a map/virtual-address
2237 * pair.
2238 * In/out conditions:
2239 * The mapping must already exist in the pmap.
2240 */
2241
2242 void
2243 pmap_unwire(pmap, va)
2244 pmap_t pmap;
2245 vaddr_t va;
2246 {
2247 pt_entry_t *pte;
2248 paddr_t pa;
2249 int bank, off;
2250 struct pv_entry *pv;
2251
2252 /*
2253 * Make sure pmap is valid. -dct
2254 */
2255 if (pmap == NULL)
2256 return;
2257
2258 /* Get the pte */
2259 pte = pmap_pte(pmap, va);
2260 if (!pte)
2261 return;
2262
2263 /* Extract the physical address of the page */
2264 pa = pmap_pte_pa(pte);
2265
2266 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2267 return;
2268 pv = &vm_physmem[bank].pmseg.pvent[off];
2269 /* Update the wired bit in the pv entry for this page. */
2270 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2271 }
2272
2273 /*
2274 * pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
2275 *
2276 * Return the pointer to a page table entry corresponding to the supplied
2277 * virtual address.
2278 *
2279 * The page directory is first checked to make sure that a page table
2280 * for the address in question exists and if it does a pointer to the
2281 * entry is returned.
2282 *
2283 * The way this works is that that the kernel page tables are mapped
2284 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2285 * This allows page tables to be located quickly.
2286 */
2287 pt_entry_t *
2288 pmap_pte(pmap, va)
2289 pmap_t pmap;
2290 vaddr_t va;
2291 {
2292 pt_entry_t *ptp;
2293 pt_entry_t *result;
2294
2295 /* The pmap must be valid */
2296 if (!pmap)
2297 return(NULL);
2298
2299 /* Return the address of the pte */
2300 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2301 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2302
2303 /* Do we have a valid pde ? If not we don't have a page table */
2304 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2305 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2306 pmap_pde(pmap, va)));
2307 return(NULL);
2308 }
2309
2310 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2311 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2312 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2313 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2314
2315 /*
2316 * If the pmap is the kernel pmap or the pmap is the active one
2317 * then we can just return a pointer to entry relative to
2318 * PROCESS_PAGE_TBLS_BASE.
2319 * Otherwise we need to map the page tables to an alternative
2320 * address and reference them there.
2321 */
2322 if (pmap == kernel_pmap || pmap->pm_pptpt
2323 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2324 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2325 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2326 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2327 } else {
2328 struct proc *p = curproc;
2329
2330 /* If we don't have a valid curproc use proc0 */
2331 /* Perhaps we should just use kernel_pmap instead */
2332 if (p == NULL)
2333 p = &proc0;
2334 #ifdef DIAGNOSTIC
2335 /*
2336 * The pmap should always be valid for the process so
2337 * panic if it is not.
2338 */
2339 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2340 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2341 va, p, p->p_vmspace);
2342 console_debugger();
2343 }
2344 /*
2345 * The pmap for the current process should be mapped. If it
2346 * is not then we have a problem.
2347 */
2348 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2349 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2350 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2351 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2352 printf("pmap pagetable = P%08lx current = P%08x ",
2353 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2354 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2355 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2356 PG_FRAME));
2357 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2358 panic("pmap_pte: current and pmap mismatch\n");
2359 }
2360 #endif
2361
2362 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2363 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2364 pmap->pm_pptpt);
2365 cpu_tlb_flushD();
2366 }
2367 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2368 ((va >> (PGSHIFT-2)) & ~3)));
2369 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2370 return(result);
2371 }
2372
2373 /*
2374 * Routine: pmap_extract
2375 * Function:
2376 * Extract the physical page address associated
2377 * with the given map/virtual_address pair.
2378 */
2379 boolean_t
2380 pmap_extract(pmap, va, pap)
2381 pmap_t pmap;
2382 vaddr_t va;
2383 paddr_t *pap;
2384 {
2385 pt_entry_t *pte;
2386 paddr_t pa;
2387
2388 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2389
2390 /*
2391 * Get the pte for this virtual address. If there is no pte
2392 * then there is no page table etc.
2393 */
2394
2395 pte = pmap_pte(pmap, va);
2396 if (!pte)
2397 return(FALSE);
2398
2399 /* Is the pte valid ? If not then no paged is actually mapped here */
2400 if (!pmap_pte_v(pte))
2401 return(FALSE);
2402
2403 /* Return the physical address depending on the PTE type */
2404 /* XXX What about L1 section mappings ? */
2405 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2406 /* Extract the physical address from the pte */
2407 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2408
2409 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2410 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2411
2412 if (pap != NULL)
2413 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2414 return (TRUE);
2415 } else {
2416 /* Extract the physical address from the pte */
2417 pa = pmap_pte_pa(pte);
2418
2419 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2420 (pa | (va & ~PG_FRAME))));
2421
2422 if (pap != NULL)
2423 *pap = pa | (va & ~PG_FRAME);
2424 return (TRUE);
2425 }
2426 }
2427
2428
2429 /*
2430 * Copy the range specified by src_addr/len from the source map to the
2431 * range dst_addr/len in the destination map.
2432 *
2433 * This routine is only advisory and need not do anything.
2434 */
2435
2436 void
2437 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2438 pmap_t dst_pmap;
2439 pmap_t src_pmap;
2440 vaddr_t dst_addr;
2441 vsize_t len;
2442 vaddr_t src_addr;
2443 {
2444 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2445 dst_pmap, src_pmap, dst_addr, len, src_addr));
2446 }
2447
2448 #if defined(PMAP_DEBUG)
2449 void
2450 pmap_dump_pvlist(phys, m)
2451 vaddr_t phys;
2452 char *m;
2453 {
2454 struct pv_entry *pv;
2455 int bank, off;
2456
2457 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2458 printf("INVALID PA\n");
2459 return;
2460 }
2461 pv = &vm_physmem[bank].pmseg.pvent[off];
2462 printf("%s %08lx:", m, phys);
2463 if (pv->pv_pmap == NULL) {
2464 printf(" no mappings\n");
2465 return;
2466 }
2467
2468 for (; pv; pv = pv->pv_next)
2469 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2470 pv->pv_va, pv->pv_flags);
2471
2472 printf("\n");
2473 }
2474
2475 #endif /* PMAP_DEBUG */
2476
2477 boolean_t
2478 pmap_testbit(pa, setbits)
2479 paddr_t pa;
2480 int setbits;
2481 {
2482 int bank, off;
2483
2484 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2485
2486 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2487 return(FALSE);
2488
2489 /*
2490 * Check saved info only
2491 */
2492 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2493 PDEBUG(0, printf("pmap_attributes = %02x\n",
2494 vm_physmem[bank].pmseg.attrs[off]));
2495 return(TRUE);
2496 }
2497
2498 return(FALSE);
2499 }
2500
2501
2502 /*
2503 * Modify pte bits for all ptes corresponding to the given physical address.
2504 * We use `maskbits' rather than `clearbits' because we're always passing
2505 * constants and the latter would require an extra inversion at run-time.
2506 */
2507
2508 void
2509 pmap_clearbit(pa, maskbits)
2510 paddr_t pa;
2511 int maskbits;
2512 {
2513 struct pv_entry *pv;
2514 pt_entry_t *pte;
2515 vaddr_t va;
2516 int bank, off;
2517 int s;
2518
2519 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2520 pa, maskbits));
2521 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2522 return;
2523 pv = &vm_physmem[bank].pmseg.pvent[off];
2524 s = splvm();
2525
2526 /*
2527 * Clear saved attributes (modify, reference)
2528 */
2529 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2530
2531 if (pv->pv_pmap == NULL) {
2532 splx(s);
2533 return;
2534 }
2535
2536 /*
2537 * Loop over all current mappings setting/clearing as appropos
2538 */
2539 for (; pv; pv = pv->pv_next) {
2540 va = pv->pv_va;
2541
2542 /*
2543 * XXX don't write protect pager mappings
2544 */
2545 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2546 printf("pmap_clearbit: bogon alpha\n");
2547 continue;
2548 }
2549
2550 pv->pv_flags &= ~maskbits;
2551 pte = pmap_pte(pv->pv_pmap, va);
2552 if (maskbits & (PT_Wr|PT_M))
2553 *pte = *pte & ~PT_AP(AP_W);
2554 if (maskbits & PT_H)
2555 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2556 }
2557 cpu_tlb_flushID();
2558
2559 splx(s);
2560 }
2561
2562
2563 boolean_t
2564 pmap_clear_modify(pg)
2565 struct vm_page *pg;
2566 {
2567 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2568 boolean_t rv;
2569
2570 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2571 rv = pmap_testbit(pa, PT_M);
2572 pmap_clearbit(pa, PT_M);
2573 return rv;
2574 }
2575
2576
2577 boolean_t
2578 pmap_clear_reference(pg)
2579 struct vm_page *pg;
2580 {
2581 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2582 boolean_t rv;
2583
2584 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2585 rv = pmap_testbit(pa, PT_H);
2586 pmap_clearbit(pa, PT_H);
2587 return rv;
2588 }
2589
2590
2591 void
2592 pmap_copy_on_write(pa)
2593 paddr_t pa;
2594 {
2595 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2596 pmap_clearbit(pa, PT_Wr);
2597 }
2598
2599
2600 boolean_t
2601 pmap_is_modified(pg)
2602 struct vm_page *pg;
2603 {
2604 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2605 boolean_t result;
2606
2607 result = pmap_testbit(pa, PT_M);
2608 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2609 return (result);
2610 }
2611
2612
2613 boolean_t
2614 pmap_is_referenced(pg)
2615 struct vm_page *pg;
2616 {
2617 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2618 boolean_t result;
2619
2620 result = pmap_testbit(pa, PT_H);
2621 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2622 return (result);
2623 }
2624
2625
2626 int
2627 pmap_modified_emulation(pmap, va)
2628 pmap_t pmap;
2629 vaddr_t va;
2630 {
2631 pt_entry_t *pte;
2632 paddr_t pa;
2633 int bank, off;
2634 struct pv_entry *pv;
2635 u_int flags;
2636
2637 PDEBUG(2, printf("pmap_modified_emulation\n"));
2638
2639 /* Get the pte */
2640 pte = pmap_pte(pmap, va);
2641 if (!pte) {
2642 PDEBUG(2, printf("no pte\n"));
2643 return(0);
2644 }
2645
2646 PDEBUG(1, printf("*pte=%08x\n", *pte));
2647
2648 /* Check for a zero pte */
2649 if (*pte == 0)
2650 return(0);
2651
2652 /* This can happen if user code tries to access kernel memory. */
2653 if ((*pte & PT_AP(AP_W)) != 0)
2654 return (0);
2655
2656 /* Extract the physical address of the page */
2657 pa = pmap_pte_pa(pte);
2658 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2659 return(0);
2660
2661 /* Get the current flags for this page. */
2662 pv = &vm_physmem[bank].pmseg.pvent[off];
2663 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2664 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2665
2666 /*
2667 * Do the flags say this page is writable ? If not then it is a
2668 * genuine write fault. If yes then the write fault is our fault
2669 * as we did not reflect the write access in the PTE. Now we know
2670 * a write has occurred we can correct this and also set the
2671 * modified bit
2672 */
2673 if (~flags & PT_Wr)
2674 return(0);
2675
2676 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2677 va, pte, *pte));
2678 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2679 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2680 PDEBUG(0, printf("->(%08x)\n", *pte));
2681
2682 /* Return, indicating the problem has been dealt with */
2683 cpu_tlb_flushID_SE(va);
2684 return(1);
2685 }
2686
2687
2688 int
2689 pmap_handled_emulation(pmap, va)
2690 pmap_t pmap;
2691 vaddr_t va;
2692 {
2693 pt_entry_t *pte;
2694 paddr_t pa;
2695 int bank, off;
2696
2697 PDEBUG(2, printf("pmap_handled_emulation\n"));
2698
2699 /* Get the pte */
2700 pte = pmap_pte(pmap, va);
2701 if (!pte) {
2702 PDEBUG(2, printf("no pte\n"));
2703 return(0);
2704 }
2705
2706 PDEBUG(1, printf("*pte=%08x\n", *pte));
2707
2708 /* Check for a zero pte */
2709 if (*pte == 0)
2710 return(0);
2711
2712 /* This can happen if user code tries to access kernel memory. */
2713 if ((*pte & L2_MASK) != L2_INVAL)
2714 return (0);
2715
2716 /* Extract the physical address of the page */
2717 pa = pmap_pte_pa(pte);
2718 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2719 return(0);
2720
2721 /*
2722 * Ok we just enable the pte and mark the attibs as handled
2723 */
2724 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2725 va, pte, *pte));
2726 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2727 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2728 PDEBUG(0, printf("->(%08x)\n", *pte));
2729
2730 /* Return, indicating the problem has been dealt with */
2731 cpu_tlb_flushID_SE(va);
2732 return(1);
2733 }
2734
2735 /*
2736 * pmap_collect: free resources held by a pmap
2737 *
2738 * => optional function.
2739 * => called when a process is swapped out to free memory.
2740 */
2741
2742 void
2743 pmap_collect(pmap)
2744 pmap_t pmap;
2745 {
2746 }
2747
2748 /*
2749 * Routine: pmap_procwr
2750 *
2751 * Function:
2752 * Synchronize caches corresponding to [addr, addr+len) in p.
2753 *
2754 */
2755 void
2756 pmap_procwr(p, va, len)
2757 struct proc *p;
2758 vaddr_t va;
2759 int len;
2760 {
2761 /* We only need to do anything if it is the current process. */
2762 if (p == curproc)
2763 cpu_cache_syncI_rng(va, len);
2764 }
2765
2766 /* End of pmap.c */
2767