pmap.c revision 1.2 1 /* $NetBSD: pmap.c,v 1.2 2001/03/04 07:30:19 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1994-1998 Mark Brinicombe.
41 * Copyright (c) 1994 Brini.
42 * All rights reserved.
43 *
44 * This code is derived from software written for Brini by Mark Brinicombe
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Mark Brinicombe.
57 * 4. The name of the author may not be used to endorse or promote products
58 * derived from this software without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69 *
70 * RiscBSD kernel project
71 *
72 * pmap.c
73 *
74 * Machine dependant vm stuff
75 *
76 * Created : 20/09/94
77 */
78
79 /*
80 * Performance improvements, UVM changes, overhauls and part-rewrites
81 * were contributed by Neil A. Carson <neil (at) causality.com>.
82 */
83
84 /*
85 * The dram block info is currently referenced from the bootconfig.
86 * This should be placed in a separate structure.
87 */
88
89 /*
90 * Special compilation symbols
91 * PMAP_DEBUG - Build in pmap_debug_level code
92 */
93
94 /* Include header files */
95
96 #include "opt_pmap_debug.h"
97 #include "opt_ddb.h"
98
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/kernel.h>
102 #include <sys/systm.h>
103 #include <sys/proc.h>
104 #include <sys/malloc.h>
105 #include <sys/user.h>
106
107 #include <uvm/uvm.h>
108
109 #include <machine/bootconfig.h>
110 #include <machine/bus.h>
111 #include <machine/pmap.h>
112 #include <machine/pcb.h>
113 #include <machine/param.h>
114 #include <machine/katelib.h>
115
116 #ifdef PMAP_DEBUG
117 #define PDEBUG(_lev_,_stat_) \
118 if (pmap_debug_level >= (_lev_)) \
119 ((_stat_))
120 int pmap_debug_level = -2;
121 #else /* PMAP_DEBUG */
122 #define PDEBUG(_lev_,_stat_) /* Nothing */
123 #endif /* PMAP_DEBUG */
124
125 struct pmap kernel_pmap_store;
126 pmap_t kernel_pmap;
127
128 pagehook_t page_hook0;
129 pagehook_t page_hook1;
130 char *memhook;
131 pt_entry_t msgbufpte;
132 extern caddr_t msgbufaddr;
133
134 #ifdef DIAGNOSTIC
135 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
136 #endif
137
138 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
139
140 int pv_nfree = 0;
141
142 vsize_t npages;
143
144 extern paddr_t physical_start;
145 extern paddr_t physical_freestart;
146 extern paddr_t physical_end;
147 extern paddr_t physical_freeend;
148 extern unsigned int free_pages;
149 extern int max_processes;
150
151 vaddr_t virtual_start;
152 vaddr_t virtual_end;
153
154 vaddr_t avail_start;
155 vaddr_t avail_end;
156
157 extern pv_addr_t systempage;
158
159 #define ALLOC_PAGE_HOOK(x, s) \
160 x.va = virtual_start; \
161 x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
162 virtual_start += s;
163
164 /* Variables used by the L1 page table queue code */
165 SIMPLEQ_HEAD(l1pt_queue, l1pt);
166 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
167 int l1pt_static_queue_count; /* items in the static l1 queue */
168 int l1pt_static_create_count; /* static l1 items created */
169 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
170 int l1pt_queue_count; /* items in the l1 queue */
171 int l1pt_create_count; /* stat - L1's create count */
172 int l1pt_reuse_count; /* stat - L1's reused count */
173
174 /* Local function prototypes (not used outside this file) */
175 pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
176 int pmap_page_index __P((paddr_t pa));
177 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
178 paddr_t pa, unsigned int flags));
179 void pmap_copy_on_write __P((paddr_t pa));
180 void pmap_pinit __P((pmap_t));
181 void pmap_release __P((pmap_t));
182
183 /* Other function prototypes */
184 extern void bzero_page __P((vaddr_t));
185 extern void bcopy_page __P((vaddr_t, vaddr_t));
186
187 struct l1pt *pmap_alloc_l1pt __P((void));
188 static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
189 vaddr_t l2pa));
190
191 #ifdef MYCROFT_HACK
192 int mycroft_hack = 0;
193 #endif
194
195 /* Function to set the debug level of the pmap code */
196
197 #ifdef PMAP_DEBUG
198 void
199 pmap_debug(level)
200 int level;
201 {
202 pmap_debug_level = level;
203 printf("pmap_debug: level=%d\n", pmap_debug_level);
204 }
205 #endif /* PMAP_DEBUG */
206
207 #include "isadma.h"
208
209 #if NISADMA > 0
210 /*
211 * Used to protect memory for ISA DMA bounce buffers. If, when loading
212 * pages into the system, memory intersects with any of these ranges,
213 * the intersecting memory will be loaded into a lower-priority free list.
214 */
215 bus_dma_segment_t *pmap_isa_dma_ranges;
216 int pmap_isa_dma_nranges;
217
218 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
219 paddr_t *, psize_t *));
220
221 /*
222 * Check if a memory range intersects with an ISA DMA range, and
223 * return the page-rounded intersection if it does. The intersection
224 * will be placed on a lower-priority free list.
225 */
226 boolean_t
227 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
228 paddr_t pa;
229 psize_t size;
230 paddr_t *pap;
231 psize_t *sizep;
232 {
233 bus_dma_segment_t *ds;
234 int i;
235
236 if (pmap_isa_dma_ranges == NULL)
237 return (FALSE);
238
239 for (i = 0, ds = pmap_isa_dma_ranges;
240 i < pmap_isa_dma_nranges; i++, ds++) {
241 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
242 /*
243 * Beginning of region intersects with this range.
244 */
245 *pap = trunc_page(pa);
246 *sizep = round_page(min(pa + size,
247 ds->ds_addr + ds->ds_len) - pa);
248 return (TRUE);
249 }
250 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
251 /*
252 * End of region intersects with this range.
253 */
254 *pap = trunc_page(ds->ds_addr);
255 *sizep = round_page(min((pa + size) - ds->ds_addr,
256 ds->ds_len));
257 return (TRUE);
258 }
259 }
260
261 /*
262 * No intersection found.
263 */
264 return (FALSE);
265 }
266 #endif /* NISADMA > 0 */
267
268 /*
269 * Functions for manipluation pv_entry structures. These are used to keep a
270 * record of the mappings of virtual addresses and the associated physical
271 * pages.
272 */
273
274 /*
275 * Allocate a new pv_entry structure from the freelist. If the list is
276 * empty allocate a new page and fill the freelist.
277 */
278 struct pv_entry *
279 pmap_alloc_pv()
280 {
281 struct pv_page *pvp;
282 struct pv_entry *pv;
283 int i;
284
285 /*
286 * Do we have any free pv_entry structures left ?
287 * If not allocate a page of them
288 */
289
290 if (pv_nfree == 0) {
291 /* NOTE: can't lock kernel_map here */
292 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
293 if (pvp == 0)
294 panic("pmap_alloc_pv: kmem_alloc() failed");
295 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
296 for (i = NPVPPG - 2; i; i--, pv++)
297 pv->pv_next = pv + 1;
298 pv->pv_next = 0;
299 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
300 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
301 pv = &pvp->pvp_pv[0];
302 } else {
303 --pv_nfree;
304 pvp = pv_page_freelist.tqh_first;
305 if (--pvp->pvp_pgi.pgi_nfree == 0) {
306 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
307 }
308 pv = pvp->pvp_pgi.pgi_freelist;
309 #ifdef DIAGNOSTIC
310 if (pv == 0)
311 panic("pmap_alloc_pv: pgi_nfree inconsistent");
312 #endif /* DIAGNOSTIC */
313 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
314 }
315 return pv;
316 }
317
318 /*
319 * Release a pv_entry structure putting it back on the freelist.
320 */
321
322 void
323 pmap_free_pv(pv)
324 struct pv_entry *pv;
325 {
326 struct pv_page *pvp;
327
328 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
329 switch (++pvp->pvp_pgi.pgi_nfree) {
330 case 1:
331 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
332 default:
333 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
334 pvp->pvp_pgi.pgi_freelist = pv;
335 ++pv_nfree;
336 break;
337 case NPVPPG:
338 pv_nfree -= NPVPPG - 1;
339 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
340 FREE((vaddr_t)pvp, M_VMPVENT);
341 break;
342 }
343 }
344
345 #if 0
346 void
347 pmap_collect_pv()
348 {
349 struct pv_page_list pv_page_collectlist;
350 struct pv_page *pvp, *npvp;
351 struct pv_entry *ph, *ppv, *pv, *npv;
352 int s;
353
354 TAILQ_INIT(&pv_page_collectlist);
355
356 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
357 if (pv_nfree < NPVPPG)
358 break;
359 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
360 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
361 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
362 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
363 pvp_pgi.pgi_list);
364 pv_nfree -= NPVPPG;
365 pvp->pvp_pgi.pgi_nfree = -1;
366 }
367 }
368
369 if (pv_page_collectlist.tqh_first == 0)
370 return;
371
372 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
373 if (ph->pv_pmap == 0)
374 continue;
375 s = splvm();
376 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
377 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
378 if (pvp->pvp_pgi.pgi_nfree == -1) {
379 pvp = pv_page_freelist.tqh_first;
380 if (--pvp->pvp_pgi.pgi_nfree == 0) {
381 TAILQ_REMOVE(&pv_page_freelist,
382 pvp, pvp_pgi.pgi_list);
383 }
384 npv = pvp->pvp_pgi.pgi_freelist;
385 #ifdef DIAGNOSTIC
386 if (npv == 0)
387 panic("pmap_collect_pv: pgi_nfree inconsistent");
388 #endif /* DIAGNOSTIC */
389 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
390 *npv = *pv;
391 ppv->pv_next = npv;
392 ppv = npv;
393 } else
394 ppv = pv;
395 }
396 splx(s);
397 }
398
399 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
400 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
401 FREE((vaddr_t)pvp, M_VMPVENT);
402 }
403 }
404 #endif
405
406 /*
407 * Enter a new physical-virtual mapping into the pv table
408 */
409
410 /*__inline*/ void
411 pmap_enter_pv(pmap, va, pv, flags)
412 pmap_t pmap;
413 vaddr_t va;
414 struct pv_entry *pv;
415 u_int flags;
416 {
417 struct pv_entry *npv;
418 u_int s;
419
420 #ifdef DIAGNOSTIC
421 if (!pmap_initialized)
422 panic("pmap_enter_pv: !pmap_initialized");
423 #endif
424
425 s = splvm();
426
427 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
428 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
429
430 if (pv->pv_pmap == NULL) {
431 /*
432 * No entries yet, use header as the first entry
433 */
434 pv->pv_va = va;
435 pv->pv_pmap = pmap;
436 pv->pv_next = NULL;
437 pv->pv_flags = flags;
438 } else {
439 /*
440 * There is at least one other VA mapping this page.
441 * Place this entry after the header.
442 */
443 #ifdef PMAP_DEBUG
444 for (npv = pv; npv; npv = npv->pv_next)
445 if (pmap == npv->pv_pmap && va == npv->pv_va)
446 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
447 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
448 #endif
449 npv = pmap_alloc_pv();
450 npv->pv_va = va;
451 npv->pv_pmap = pmap;
452 npv->pv_flags = flags;
453 npv->pv_next = pv->pv_next;
454 pv->pv_next = npv;
455 }
456
457 if (flags & PT_W)
458 ++pmap->pm_stats.wired_count;
459
460 splx(s);
461 }
462
463
464 /*
465 * Remove a physical-virtual mapping from the pv table
466 */
467
468 /*__inline*/ void
469 pmap_remove_pv(pmap, va, pv)
470 pmap_t pmap;
471 vaddr_t va;
472 struct pv_entry *pv;
473 {
474 struct pv_entry *npv;
475 u_int s;
476 u_int flags = 0;
477
478 #ifdef DIAGNOSTIC
479 if (!pmap_initialized)
480 panic("pmap_remove_pv: !pmap_initialized");
481 #endif
482
483 s = splvm();
484
485 /*
486 * If it is the first entry on the list, it is actually
487 * in the header and we must copy the following entry up
488 * to the header. Otherwise we must search the list for
489 * the entry. In either case we free the now unused entry.
490 */
491
492 if (pmap == pv->pv_pmap && va == pv->pv_va) {
493 npv = pv->pv_next;
494 if (npv) {
495 *pv = *npv;
496 flags = npv->pv_flags;
497 pmap_free_pv(npv);
498 } else {
499 flags = pv->pv_flags;
500 pv->pv_pmap = NULL;
501 }
502 } else {
503 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
504 if (pmap == npv->pv_pmap && va == npv->pv_va)
505 break;
506 }
507 if (npv) {
508 pv->pv_next = npv->pv_next;
509 flags = npv->pv_flags;
510 pmap_free_pv(npv);
511 } else
512 panic("pmap_remove_pv: lost entry");
513 }
514
515 if (flags & PT_W)
516 --pmap->pm_stats.wired_count;
517
518 splx(s);
519 }
520
521 /*
522 * Modify a physical-virtual mapping in the pv table
523 */
524
525 /*__inline */ u_int
526 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
527 pmap_t pmap;
528 vaddr_t va;
529 struct pv_entry *pv;
530 u_int bic_mask;
531 u_int eor_mask;
532 {
533 struct pv_entry *npv;
534 u_int s;
535 u_int flags, oflags;
536
537 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
538 pmap, va, pv, bic_mask, eor_mask));
539
540 #ifdef DIAGNOSTIC
541 if (!pmap_initialized)
542 panic("pmap_modify_pv: !pmap_initialized");
543 #endif
544
545 s = splvm();
546
547 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
548 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
549
550 /*
551 * There is at least one VA mapping this page.
552 */
553
554 for (npv = pv; npv; npv = npv->pv_next) {
555 if (pmap == npv->pv_pmap && va == npv->pv_va) {
556 oflags = npv->pv_flags;
557 npv->pv_flags = flags =
558 ((oflags & ~bic_mask) ^ eor_mask);
559 if ((flags ^ oflags) & PT_W) {
560 if (flags & PT_W)
561 ++pmap->pm_stats.wired_count;
562 else
563 --pmap->pm_stats.wired_count;
564 }
565 PDEBUG(0, printf("done flags=%08x\n", flags));
566 splx(s);
567 return (oflags);
568 }
569 }
570
571 PDEBUG(0, printf("done.\n"));
572 splx(s);
573 return (0);
574 }
575
576
577 /*
578 * Map the specified level 2 pagetable into the level 1 page table for
579 * the given pmap to cover a chunk of virtual address space starting from the
580 * address specified.
581 */
582 static /*__inline*/ void
583 pmap_map_in_l1(pmap, va, l2pa)
584 pmap_t pmap;
585 vaddr_t va, l2pa;
586 {
587 vaddr_t ptva;
588
589 /* Calculate the index into the L1 page table. */
590 ptva = (va >> PDSHIFT) & ~3;
591
592 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
593 pmap->pm_pdir, L1_PTE(l2pa), ptva));
594
595 /* Map page table into the L1. */
596 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
597 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
598 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
599 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
600
601 PDEBUG(0, printf("pt self reference %lx in %lx\n",
602 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
603
604 /* Map the page table into the page table area. */
605 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
606
607 /* XXX should be a purge */
608 /* cpu_tlb_flushD();*/
609 }
610
611 #if 0
612 static /*__inline*/ void
613 pmap_unmap_in_l1(pmap, va)
614 pmap_t pmap;
615 vaddr_t va;
616 {
617 vaddr_t ptva;
618
619 /* Calculate the index into the L1 page table. */
620 ptva = (va >> PDSHIFT) & ~3;
621
622 /* Unmap page table from the L1. */
623 pmap->pm_pdir[ptva + 0] = 0;
624 pmap->pm_pdir[ptva + 1] = 0;
625 pmap->pm_pdir[ptva + 2] = 0;
626 pmap->pm_pdir[ptva + 3] = 0;
627
628 /* Unmap the page table from the page table area. */
629 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
630
631 /* XXX should be a purge */
632 /* cpu_tlb_flushD();*/
633 }
634 #endif
635
636
637 /*
638 * Used to map a range of physical addresses into kernel
639 * virtual address space.
640 *
641 * For now, VM is already on, we only need to map the
642 * specified memory.
643 */
644 vaddr_t
645 pmap_map(va, spa, epa, prot)
646 vaddr_t va, spa, epa;
647 int prot;
648 {
649 while (spa < epa) {
650 pmap_enter(pmap_kernel(), va, spa, prot, 0);
651 va += NBPG;
652 spa += NBPG;
653 }
654 return(va);
655 }
656
657
658 /*
659 * void pmap_bootstrap(pd_entry_t *kernel_l1pt)
660 *
661 * bootstrap the pmap system. This is called from initarm and allows
662 * the pmap system to initailise any structures it requires.
663 *
664 * Currently this sets up the kernel_pmap that is statically allocated
665 * and also allocated virtual addresses for certain page hooks.
666 * Currently the only one page hook is allocated that is used
667 * to zero physical pages of memory.
668 * It also initialises the start and end address of the kernel data space.
669 */
670 extern paddr_t physical_freestart;
671 extern paddr_t physical_freeend;
672
673 struct pv_entry *boot_pvent;
674 char *boot_attrs;
675
676 void
677 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
678 pd_entry_t *kernel_l1pt;
679 pv_addr_t kernel_ptpt;
680 {
681 int loop;
682 paddr_t start, end;
683 #if NISADMA > 0
684 paddr_t istart;
685 psize_t isize;
686 #endif
687 vsize_t size;
688
689 kernel_pmap = &kernel_pmap_store;
690
691 kernel_pmap->pm_pdir = kernel_l1pt;
692 kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
693 kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
694 simple_lock_init(&kernel_pmap->pm_lock);
695 kernel_pmap->pm_count = 1;
696
697 /*
698 * Initialize PAGE_SIZE-dependent variables.
699 */
700 uvm_setpagesize();
701
702 npages = 0;
703 loop = 0;
704 while (loop < bootconfig.dramblocks) {
705 start = (paddr_t)bootconfig.dram[loop].address;
706 end = start + (bootconfig.dram[loop].pages * NBPG);
707 if (start < physical_freestart)
708 start = physical_freestart;
709 if (end > physical_freeend)
710 end = physical_freeend;
711 #if 0
712 printf("%d: %lx -> %lx\n", loop, start, end - 1);
713 #endif
714 #if NISADMA > 0
715 if (pmap_isa_dma_range_intersect(start, end - start,
716 &istart, &isize)) {
717 /*
718 * Place the pages that intersect with the
719 * ISA DMA range onto the ISA DMA free list.
720 */
721 #if 0
722 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
723 istart + isize - 1);
724 #endif
725 uvm_page_physload(atop(istart),
726 atop(istart + isize), atop(istart),
727 atop(istart + isize), VM_FREELIST_ISADMA);
728 npages += atop(istart + isize) - atop(istart);
729
730 /*
731 * Load the pieces that come before
732 * the intersection into the default
733 * free list.
734 */
735 if (start < istart) {
736 #if 0
737 printf(" BEFORE 0x%lx -> 0x%lx\n",
738 start, istart - 1);
739 #endif
740 uvm_page_physload(atop(start),
741 atop(istart), atop(start),
742 atop(istart), VM_FREELIST_DEFAULT);
743 npages += atop(istart) - atop(start);
744 }
745
746 /*
747 * Load the pieces that come after
748 * the intersection into the default
749 * free list.
750 */
751 if ((istart + isize) < end) {
752 #if 0
753 printf(" AFTER 0x%lx -> 0x%lx\n",
754 (istart + isize), end - 1);
755 #endif
756 uvm_page_physload(atop(istart + isize),
757 atop(end), atop(istart + isize),
758 atop(end), VM_FREELIST_DEFAULT);
759 npages += atop(end) - atop(istart + isize);
760 }
761 } else {
762 uvm_page_physload(atop(start), atop(end),
763 atop(start), atop(end), VM_FREELIST_DEFAULT);
764 npages += atop(end) - atop(start);
765 }
766 #else /* NISADMA > 0 */
767 uvm_page_physload(atop(start), atop(end),
768 atop(start), atop(end), VM_FREELIST_DEFAULT);
769 npages += atop(end) - atop(start);
770 #endif /* NISADMA > 0 */
771 ++loop;
772 }
773
774 #ifdef MYCROFT_HACK
775 printf("npages = %ld\n", npages);
776 #endif
777
778 virtual_start = KERNEL_VM_BASE;
779 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
780
781 ALLOC_PAGE_HOOK(page_hook0, NBPG);
782 ALLOC_PAGE_HOOK(page_hook1, NBPG);
783
784 /*
785 * The mem special device needs a virtual hook but we don't
786 * need a pte
787 */
788 memhook = (char *)virtual_start;
789 virtual_start += NBPG;
790
791 msgbufaddr = (caddr_t)virtual_start;
792 msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
793 virtual_start += round_page(MSGBUFSIZE);
794
795 size = npages * sizeof(struct pv_entry);
796 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
797 bzero(boot_pvent, size);
798 size = npages * sizeof(char);
799 boot_attrs = (char *)uvm_pageboot_alloc(size);
800 bzero(boot_attrs, size);
801
802 cpu_cache_cleanD();
803 }
804
805 /*
806 * void pmap_init(void)
807 *
808 * Initialize the pmap module.
809 * Called by vm_init() in vm/vm_init.c in order to initialise
810 * any structures that the pmap system needs to map virtual memory.
811 */
812
813 extern int physmem;
814
815 void
816 pmap_init()
817 {
818 int lcv;
819
820 #ifdef MYCROFT_HACK
821 printf("physmem = %d\n", physmem);
822 #endif
823
824 /*
825 * Set the available memory vars - These do not map to real memory
826 * addresses and cannot as the physical memory is fragmented.
827 * They are used by ps for %mem calculations.
828 * One could argue whether this should be the entire memory or just
829 * the memory that is useable in a user process.
830 */
831 avail_start = 0;
832 avail_end = physmem * NBPG;
833
834 /* Set up pmap info for physsegs. */
835 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
836 vm_physmem[lcv].pmseg.pvent = boot_pvent;
837 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
838 vm_physmem[lcv].pmseg.attrs = boot_attrs;
839 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
840 }
841 #ifdef MYCROFT_HACK
842 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
843 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
844 lcv,
845 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
846 vm_physmem[lcv].start, vm_physmem[lcv].end);
847 }
848 #endif
849 TAILQ_INIT(&pv_page_freelist);
850
851 #ifdef DIAGNOSTIC
852 /* Now it is safe to enable pv_entry recording. */
853 pmap_initialized = TRUE;
854 #endif
855
856 /* Initialise our L1 page table queues and counters */
857 SIMPLEQ_INIT(&l1pt_static_queue);
858 l1pt_static_queue_count = 0;
859 l1pt_static_create_count = 0;
860 SIMPLEQ_INIT(&l1pt_queue);
861 l1pt_queue_count = 0;
862 l1pt_create_count = 0;
863 l1pt_reuse_count = 0;
864 }
865
866 /*
867 * pmap_postinit()
868 *
869 * This routine is called after the vm and kmem subsystems have been
870 * initialised. This allows the pmap code to perform any initialisation
871 * that can only be done one the memory allocation is in place.
872 */
873
874 void
875 pmap_postinit()
876 {
877 int loop;
878 struct l1pt *pt;
879
880 #ifdef PMAP_STATIC_L1S
881 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
882 #else /* PMAP_STATIC_L1S */
883 for (loop = 0; loop < max_processes; ++loop) {
884 #endif /* PMAP_STATIC_L1S */
885 /* Allocate a L1 page table */
886 pt = pmap_alloc_l1pt();
887 if (!pt)
888 panic("Cannot allocate static L1 page tables\n");
889
890 /* Clean it */
891 bzero((void *)pt->pt_va, PD_SIZE);
892 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
893 /* Add the page table to the queue */
894 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
895 ++l1pt_static_queue_count;
896 ++l1pt_static_create_count;
897 }
898 }
899
900
901 /*
902 * Create and return a physical map.
903 *
904 * If the size specified for the map is zero, the map is an actual physical
905 * map, and may be referenced by the hardware.
906 *
907 * If the size specified is non-zero, the map will be used in software only,
908 * and is bounded by that size.
909 */
910
911 pmap_t
912 pmap_create()
913 {
914 pmap_t pmap;
915
916 /* Allocate memory for pmap structure and zero it */
917 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
918 bzero(pmap, sizeof(*pmap));
919
920 /* Now init the machine part of the pmap */
921 pmap_pinit(pmap);
922 return(pmap);
923 }
924
925 /*
926 * pmap_alloc_l1pt()
927 *
928 * This routine allocates physical and virtual memory for a L1 page table
929 * and wires it.
930 * A l1pt structure is returned to describe the allocated page table.
931 *
932 * This routine is allowed to fail if the required memory cannot be allocated.
933 * In this case NULL is returned.
934 */
935
936 struct l1pt *
937 pmap_alloc_l1pt(void)
938 {
939 paddr_t pa;
940 vaddr_t va;
941 struct l1pt *pt;
942 int error;
943 vm_page_t m;
944 pt_entry_t *pte;
945
946 /* Allocate virtual address space for the L1 page table */
947 va = uvm_km_valloc(kernel_map, PD_SIZE);
948 if (va == 0) {
949 #ifdef DIAGNOSTIC
950 printf("pmap: Cannot allocate pageable memory for L1\n");
951 #endif /* DIAGNOSTIC */
952 return(NULL);
953 }
954
955 /* Allocate memory for the l1pt structure */
956 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
957
958 /*
959 * Allocate pages from the VM system.
960 */
961 TAILQ_INIT(&pt->pt_plist);
962 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
963 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
964 if (error) {
965 #ifdef DIAGNOSTIC
966 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
967 error);
968 #endif /* DIAGNOSTIC */
969 /* Release the resources we already have claimed */
970 free(pt, M_VMPMAP);
971 uvm_km_free(kernel_map, va, PD_SIZE);
972 return(NULL);
973 }
974
975 /* Map our physical pages into our virtual space */
976 pt->pt_va = va;
977 m = pt->pt_plist.tqh_first;
978 while (m && va < (pt->pt_va + PD_SIZE)) {
979 pa = VM_PAGE_TO_PHYS(m);
980
981 pmap_enter(pmap_kernel(), va, pa,
982 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
983
984 /* Revoke cacheability and bufferability */
985 /* XXX should be done better than this */
986 pte = pmap_pte(pmap_kernel(), va);
987 *pte = *pte & ~(PT_C | PT_B);
988
989 va += NBPG;
990 m = m->pageq.tqe_next;
991 }
992
993 #ifdef DIAGNOSTIC
994 if (m)
995 panic("pmap_alloc_l1pt: pglist not empty\n");
996 #endif /* DIAGNOSTIC */
997
998 pt->pt_flags = 0;
999 return(pt);
1000 }
1001
1002 /*
1003 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1004 */
1005 void
1006 pmap_free_l1pt(pt)
1007 struct l1pt *pt;
1008 {
1009 /* Separate the physical memory for the virtual space */
1010 pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
1011
1012 /* Return the physical memory */
1013 uvm_pglistfree(&pt->pt_plist);
1014
1015 /* Free the virtual space */
1016 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1017
1018 /* Free the l1pt structure */
1019 free(pt, M_VMPMAP);
1020 }
1021
1022 /*
1023 * Allocate a page directory.
1024 * This routine will either allocate a new page directory from the pool
1025 * of L1 page tables currently held by the kernel or it will allocate
1026 * a new one via pmap_alloc_l1pt().
1027 * It will then initialise the l1 page table for use.
1028 */
1029 int
1030 pmap_allocpagedir(pmap)
1031 struct pmap *pmap;
1032 {
1033 paddr_t pa;
1034 struct l1pt *pt;
1035 pt_entry_t *pte;
1036
1037 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1038
1039 /* Do we have any spare L1's lying around ? */
1040 if (l1pt_static_queue_count) {
1041 --l1pt_static_queue_count;
1042 pt = l1pt_static_queue.sqh_first;
1043 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1044 } else if (l1pt_queue_count) {
1045 --l1pt_queue_count;
1046 pt = l1pt_queue.sqh_first;
1047 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1048 ++l1pt_reuse_count;
1049 } else {
1050 pt = pmap_alloc_l1pt();
1051 if (!pt)
1052 return(ENOMEM);
1053 ++l1pt_create_count;
1054 }
1055
1056 /* Store the pointer to the l1 descriptor in the pmap. */
1057 pmap->pm_l1pt = pt;
1058
1059 /* Get the physical address of the start of the l1 */
1060 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1061
1062 /* Store the virtual address of the l1 in the pmap. */
1063 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1064
1065 /* Clean the L1 if it is dirty */
1066 if (!(pt->pt_flags & PTFLAG_CLEAN))
1067 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1068
1069 /* Do we already have the kernel mappings ? */
1070 if (!(pt->pt_flags & PTFLAG_KPT)) {
1071 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1072
1073 bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1074 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1075 KERNEL_PD_SIZE);
1076 pt->pt_flags |= PTFLAG_KPT;
1077 }
1078
1079 /* Allocate a page table to map all the page tables for this pmap */
1080
1081 #ifdef DIAGNOSTIC
1082 if (pmap->pm_vptpt) {
1083 /* XXX What if we have one already ? */
1084 panic("pmap_allocpagedir: have pt already\n");
1085 }
1086 #endif /* DIAGNOSTIC */
1087 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1088 (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
1089 pmap->pm_pptpt &= PG_FRAME;
1090 /* Revoke cacheability and bufferability */
1091 /* XXX should be done better than this */
1092 pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
1093 *pte = *pte & ~(PT_C | PT_B);
1094
1095 /* Wire in this page table */
1096 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1097
1098 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1099
1100 /*
1101 * Map the kernel page tables for 0xf0000000 +
1102 * into the page table used to map the
1103 * pmap's page tables
1104 */
1105 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1106 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1107 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1108 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1109 (KERNEL_PD_SIZE >> 2));
1110
1111 pmap->pm_count = 1;
1112 simple_lock_init(&pmap->pm_lock);
1113
1114 return(0);
1115 }
1116
1117
1118 /*
1119 * Initialize a preallocated and zeroed pmap structure,
1120 * such as one in a vmspace structure.
1121 */
1122
1123 static int pmap_pagedir_ident; /* tsleep() ident */
1124
1125 void
1126 pmap_pinit(pmap)
1127 struct pmap *pmap;
1128 {
1129 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1130
1131 /* Keep looping until we succeed in allocating a page directory */
1132 while (pmap_allocpagedir(pmap) != 0) {
1133 /*
1134 * Ok we failed to allocate a suitable block of memory for an
1135 * L1 page table. This means that either:
1136 * 1. 16KB of virtual address space could not be allocated
1137 * 2. 16KB of physically contiguous memory on a 16KB boundary
1138 * could not be allocated.
1139 *
1140 * Since we cannot fail we will sleep for a while and try
1141 * again. Although we will be wakened when another page table
1142 * is freed other memory releasing and swapping may occur
1143 * that will mean we can succeed so we will keep trying
1144 * regularly just in case.
1145 */
1146
1147 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1148 "l1ptwait", 1000) == EWOULDBLOCK)
1149 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1150 }
1151
1152 /* Map zero page for the pmap. This will also map the L2 for it */
1153 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1154 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1155 }
1156
1157
1158 void
1159 pmap_freepagedir(pmap)
1160 pmap_t pmap;
1161 {
1162 /* Free the memory used for the page table mapping */
1163 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1164
1165 /* junk the L1 page table */
1166 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1167 /* Add the page table to the queue */
1168 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1169 ++l1pt_static_queue_count;
1170 /* Wake up any sleeping processes waiting for a l1 page table */
1171 wakeup((caddr_t)&pmap_pagedir_ident);
1172 } else if (l1pt_queue_count < 8) {
1173 /* Add the page table to the queue */
1174 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1175 ++l1pt_queue_count;
1176 /* Wake up any sleeping processes waiting for a l1 page table */
1177 wakeup((caddr_t)&pmap_pagedir_ident);
1178 } else
1179 pmap_free_l1pt(pmap->pm_l1pt);
1180 }
1181
1182
1183 /*
1184 * Retire the given physical map from service.
1185 * Should only be called if the map contains no valid mappings.
1186 */
1187
1188 void
1189 pmap_destroy(pmap)
1190 pmap_t pmap;
1191 {
1192 int count;
1193
1194 if (pmap == NULL)
1195 return;
1196
1197 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1198 simple_lock(&pmap->pm_lock);
1199 count = --pmap->pm_count;
1200 simple_unlock(&pmap->pm_lock);
1201 if (count == 0) {
1202 pmap_release(pmap);
1203 free((caddr_t)pmap, M_VMPMAP);
1204 }
1205 }
1206
1207
1208 /*
1209 * Release any resources held by the given physical map.
1210 * Called when a pmap initialized by pmap_pinit is being released.
1211 * Should only be called if the map contains no valid mappings.
1212 */
1213
1214 void
1215 pmap_release(pmap)
1216 pmap_t pmap;
1217 {
1218 struct vm_page *page;
1219 pt_entry_t *pte;
1220 int loop;
1221
1222 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1223
1224 #if 0
1225 if (pmap->pm_count != 1) /* XXX: needs sorting */
1226 panic("pmap_release count %d", pmap->pm_count);
1227 #endif
1228
1229 /* Remove the zero page mapping */
1230 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1231
1232 /*
1233 * Free any page tables still mapped
1234 * This is only temporay until pmap_enter can count the number
1235 * of mappings made in a page table. Then pmap_remove() can
1236 * reduce the count and free the pagetable when the count
1237 * reaches zero.
1238 */
1239 for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) {
1240 pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4);
1241 if (*pte != 0) {
1242 PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte));
1243 page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
1244 if (page == NULL)
1245 panic("pmap_release: bad address for phys page");
1246 uvm_pagefree(page);
1247 }
1248 }
1249 /* Free the page dir */
1250 pmap_freepagedir(pmap);
1251 }
1252
1253
1254 /*
1255 * void pmap_reference(pmap_t pmap)
1256 *
1257 * Add a reference to the specified pmap.
1258 */
1259
1260 void
1261 pmap_reference(pmap)
1262 pmap_t pmap;
1263 {
1264 if (pmap == NULL)
1265 return;
1266
1267 simple_lock(&pmap->pm_lock);
1268 pmap->pm_count++;
1269 simple_unlock(&pmap->pm_lock);
1270 }
1271
1272 /*
1273 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1274 *
1275 * Return the start and end addresses of the kernel's virtual space.
1276 * These values are setup in pmap_bootstrap and are updated as pages
1277 * are allocated.
1278 */
1279
1280 void
1281 pmap_virtual_space(start, end)
1282 vaddr_t *start;
1283 vaddr_t *end;
1284 {
1285 *start = virtual_start;
1286 *end = virtual_end;
1287 }
1288
1289
1290 /*
1291 * Activate the address space for the specified process. If the process
1292 * is the current process, load the new MMU context.
1293 */
1294 void
1295 pmap_activate(p)
1296 struct proc *p;
1297 {
1298 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1299 struct pcb *pcb = &p->p_addr->u_pcb;
1300
1301 (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
1302 (paddr_t *)&pcb->pcb_pagedir);
1303
1304 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1305 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1306
1307 if (p == curproc) {
1308 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1309 setttb((u_int)pcb->pcb_pagedir);
1310 }
1311 #if 0
1312 pmap->pm_pdchanged = FALSE;
1313 #endif
1314 }
1315
1316
1317 /*
1318 * Deactivate the address space of the specified process.
1319 */
1320 void
1321 pmap_deactivate(p)
1322 struct proc *p;
1323 {
1324 }
1325
1326
1327 /*
1328 * pmap_clean_page()
1329 *
1330 * This is a local function used to work out the best strategy to clean
1331 * a single page referenced by its entry in the PV table. It's used by
1332 * pmap_copy_page, pmap_zero page and maybe some others later on.
1333 *
1334 * Its policy is effectively:
1335 * o If there are no mappings, we don't bother doing anything with the cache.
1336 * o If there is one mapping, we clean just that page.
1337 * o If there are multiple mappings, we clean the entire cache.
1338 *
1339 * So that some functions can be further optimised, it returns 0 if it didn't
1340 * clean the entire cache, or 1 if it did.
1341 *
1342 * XXX One bug in this routine is that if the pv_entry has a single page
1343 * mapped at 0x00000000 a whole cache clean will be performed rather than
1344 * just the 1 page. Since this should not occur in everyday use and if it does
1345 * it will just result in not the most efficient clean for the page.
1346 */
1347 static int
1348 pmap_clean_page(pv)
1349 struct pv_entry *pv;
1350 {
1351 int s;
1352 int cache_needs_cleaning = 0;
1353 vaddr_t page_to_clean = 0;
1354
1355 /* Go to splvm() so we get exclusive lock for a mo */
1356 s = splvm();
1357 if (pv->pv_pmap) {
1358 cache_needs_cleaning = 1;
1359 if (!pv->pv_next)
1360 page_to_clean = pv->pv_va;
1361 }
1362 splx(s);
1363
1364 /* Do cache ops outside the splvm. */
1365 if (page_to_clean)
1366 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1367 else if (cache_needs_cleaning) {
1368 cpu_cache_purgeID();
1369 return (1);
1370 }
1371 return (0);
1372 }
1373
1374 /*
1375 * pmap_find_pv()
1376 *
1377 * This is a local function that finds a PV entry for a given physical page.
1378 * This is a common op, and this function removes loads of ifdefs in the code.
1379 */
1380 static __inline struct pv_entry *
1381 pmap_find_pv(phys)
1382 paddr_t phys;
1383 {
1384 int bank, off;
1385 struct pv_entry *pv;
1386
1387 #ifdef DIAGNOSTIC
1388 if (!pmap_initialized)
1389 panic("pmap_find_pv: !pmap_initialized");
1390 #endif
1391
1392 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1393 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1394 pv = &vm_physmem[bank].pmseg.pvent[off];
1395 return (pv);
1396 }
1397
1398 /*
1399 * pmap_zero_page()
1400 *
1401 * Zero a given physical page by mapping it at a page hook point.
1402 * In doing the zero page op, the page we zero is mapped cachable, as with
1403 * StrongARM accesses to non-cached pages are non-burst making writing
1404 * _any_ bulk data very slow.
1405 */
1406 void
1407 pmap_zero_page(phys)
1408 paddr_t phys;
1409 {
1410 struct pv_entry *pv;
1411
1412 /* Get an entry for this page, and clean it it. */
1413 pv = pmap_find_pv(phys);
1414 pmap_clean_page(pv);
1415
1416 /*
1417 * Hook in the page, zero it, and purge the cache for that
1418 * zeroed page. Invalidate the TLB as needed.
1419 */
1420 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1421 cpu_tlb_flushD_SE(page_hook0.va);
1422 bzero_page(page_hook0.va);
1423 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1424 }
1425
1426 /*
1427 * pmap_copy_page()
1428 *
1429 * Copy one physical page into another, by mapping the pages into
1430 * hook points. The same comment regarding cachability as in
1431 * pmap_zero_page also applies here.
1432 */
1433 void
1434 pmap_copy_page(src, dest)
1435 paddr_t src;
1436 paddr_t dest;
1437 {
1438 struct pv_entry *src_pv, *dest_pv;
1439
1440 /* Get PV entries for the pages, and clean them if needed. */
1441 src_pv = pmap_find_pv(src);
1442 dest_pv = pmap_find_pv(dest);
1443 if (!pmap_clean_page(src_pv))
1444 pmap_clean_page(dest_pv);
1445
1446 /*
1447 * Map the pages into the page hook points, copy them, and purge
1448 * the cache for the appropriate page. Invalidate the TLB
1449 * as required.
1450 */
1451 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1452 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1453 cpu_tlb_flushD_SE(page_hook0.va);
1454 cpu_tlb_flushD_SE(page_hook1.va);
1455 bcopy_page(page_hook0.va, page_hook1.va);
1456 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1457 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1458 }
1459
1460 /*
1461 * int pmap_next_phys_page(paddr_t *addr)
1462 *
1463 * Allocate another physical page returning true or false depending
1464 * on whether a page could be allocated.
1465 */
1466
1467 paddr_t
1468 pmap_next_phys_page(addr)
1469 paddr_t addr;
1470
1471 {
1472 int loop;
1473
1474 if (addr < bootconfig.dram[0].address)
1475 return(bootconfig.dram[0].address);
1476
1477 loop = 0;
1478
1479 while (bootconfig.dram[loop].address != 0
1480 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1481 ++loop;
1482
1483 if (bootconfig.dram[loop].address == 0)
1484 return(0);
1485
1486 addr += NBPG;
1487
1488 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1489 if (bootconfig.dram[loop + 1].address == 0)
1490 return(0);
1491 addr = bootconfig.dram[loop + 1].address;
1492 }
1493
1494 return(addr);
1495 }
1496
1497 #if 0
1498 void
1499 pmap_pte_addref(pmap, va)
1500 pmap_t pmap;
1501 vaddr_t va;
1502 {
1503 pd_entry_t *pde;
1504 paddr_t pa;
1505 struct vm_page *m;
1506
1507 if (pmap == pmap_kernel())
1508 return;
1509
1510 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1511 pa = pmap_pte_pa(pde);
1512 m = PHYS_TO_VM_PAGE(pa);
1513 ++m->wire_count;
1514 #ifdef MYCROFT_HACK
1515 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1516 pmap, va, pde, pa, m, m->wire_count);
1517 #endif
1518 }
1519
1520 void
1521 pmap_pte_delref(pmap, va)
1522 pmap_t pmap;
1523 vaddr_t va;
1524 {
1525 pd_entry_t *pde;
1526 paddr_t pa;
1527 struct vm_page *m;
1528
1529 if (pmap == pmap_kernel())
1530 return;
1531
1532 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1533 pa = pmap_pte_pa(pde);
1534 m = PHYS_TO_VM_PAGE(pa);
1535 --m->wire_count;
1536 #ifdef MYCROFT_HACK
1537 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1538 pmap, va, pde, pa, m, m->wire_count);
1539 #endif
1540 if (m->wire_count == 0) {
1541 #ifdef MYCROFT_HACK
1542 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1543 pmap, va, pde, pa, m);
1544 #endif
1545 pmap_unmap_in_l1(pmap, va);
1546 uvm_pagefree(m);
1547 --pmap->pm_stats.resident_count;
1548 }
1549 }
1550 #else
1551 #define pmap_pte_addref(pmap, va)
1552 #define pmap_pte_delref(pmap, va)
1553 #endif
1554
1555 /*
1556 * Since we have a virtually indexed cache, we may need to inhibit caching if
1557 * there is more than one mapping and at least one of them is writable.
1558 * Since we purge the cache on every context switch, we only need to check for
1559 * other mappings within the same pmap, or kernel_pmap.
1560 * This function is also called when a page is unmapped, to possibly reenable
1561 * caching on any remaining mappings.
1562 */
1563 void
1564 pmap_vac_me_harder(pmap, pv)
1565 pmap_t pmap;
1566 struct pv_entry *pv;
1567 {
1568 struct pv_entry *npv;
1569 pt_entry_t *pte;
1570 int entries = 0;
1571 int writeable = 0;
1572
1573 if (pv->pv_pmap == NULL)
1574 return;
1575
1576 /*
1577 * Count mappings and writable mappings in this pmap.
1578 * Keep a pointer to the first one.
1579 */
1580 for (npv = pv; npv; npv = npv->pv_next) {
1581 /* Count mappings in the same pmap */
1582 if (pmap == npv->pv_pmap) {
1583 if (entries++ == 0)
1584 pv = npv;
1585 /* Writeable mappings */
1586 if (npv->pv_flags & PT_Wr)
1587 ++writeable;
1588 }
1589 }
1590
1591 /*
1592 * Enable or disable caching as necessary.
1593 * We do a quick check of the first PTE to avoid walking the list if
1594 * we're already in the right state.
1595 */
1596 if (entries > 1 && writeable) {
1597 pte = pmap_pte(pmap, pv->pv_va);
1598 if (~*pte & (PT_C | PT_B))
1599 return;
1600 *pte = *pte & ~(PT_C | PT_B);
1601 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1602 if (pmap == npv->pv_pmap) {
1603 pte = pmap_pte(pmap, npv->pv_va);
1604 *pte = *pte & ~(PT_C | PT_B);
1605 }
1606 }
1607 } else if (entries > 0) {
1608 pte = pmap_pte(pmap, pv->pv_va);
1609 if (*pte & (PT_C | PT_B))
1610 return;
1611 *pte = *pte | (PT_C | PT_B);
1612 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1613 if (pmap == npv->pv_pmap) {
1614 pte = pmap_pte(pmap, npv->pv_va);
1615 *pte = *pte | (PT_C | PT_B);
1616 }
1617 }
1618 }
1619 }
1620
1621 /*
1622 * pmap_remove()
1623 *
1624 * pmap_remove is responsible for nuking a number of mappings for a range
1625 * of virtual address space in the current pmap. To do this efficiently
1626 * is interesting, because in a number of cases a wide virtual address
1627 * range may be supplied that contains few actual mappings. So, the
1628 * optimisations are:
1629 * 1. Try and skip over hunks of address space for which an L1 entry
1630 * does not exist.
1631 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1632 * maybe do just a partial cache clean. This path of execution is
1633 * complicated by the fact that the cache must be flushed _before_
1634 * the PTE is nuked, being a VAC :-)
1635 * 3. Maybe later fast-case a single page, but I don't think this is
1636 * going to make _that_ much difference overall.
1637 */
1638
1639 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1640
1641 void
1642 pmap_remove(pmap, sva, eva)
1643 pmap_t pmap;
1644 vaddr_t sva;
1645 vaddr_t eva;
1646 {
1647 int cleanlist_idx = 0;
1648 struct pagelist {
1649 vaddr_t va;
1650 pt_entry_t *pte;
1651 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1652 pt_entry_t *pte = 0;
1653 paddr_t pa;
1654 int pmap_active;
1655 struct pv_entry *pv;
1656
1657 /* Exit quick if there is no pmap */
1658 if (!pmap)
1659 return;
1660
1661 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1662
1663 sva &= PG_FRAME;
1664 eva &= PG_FRAME;
1665
1666 /* Get a page table pointer */
1667 while (sva < eva) {
1668 pte = pmap_pte(pmap, sva);
1669 if (pte)
1670 break;
1671 sva = (sva & PD_MASK) + NBPD;
1672 }
1673
1674 /* Note if the pmap is active thus require cache and tlb cleans */
1675 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1676 || (pmap == kernel_pmap))
1677 pmap_active = 1;
1678 else
1679 pmap_active = 0;
1680
1681 /* Now loop along */
1682 while (sva < eva) {
1683 /* Check if we can move to the next PDE (l1 chunk) */
1684 if (!(sva & PT_MASK))
1685 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1686 sva += NBPD;
1687 pte += arm_byte_to_page(NBPD);
1688 continue;
1689 }
1690
1691 /* We've found a valid PTE, so this page of PTEs has to go. */
1692 if (pmap_pte_v(pte)) {
1693 int bank, off;
1694
1695 /* Update statistics */
1696 --pmap->pm_stats.resident_count;
1697
1698 /*
1699 * Add this page to our cache remove list, if we can.
1700 * If, however the cache remove list is totally full,
1701 * then do a complete cache invalidation taking note
1702 * to backtrack the PTE table beforehand, and ignore
1703 * the lists in future because there's no longer any
1704 * point in bothering with them (we've paid the
1705 * penalty, so will carry on unhindered). Otherwise,
1706 * when we fall out, we just clean the list.
1707 */
1708 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1709 pa = pmap_pte_pa(pte);
1710
1711 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1712 /* Add to the clean list. */
1713 cleanlist[cleanlist_idx].pte = pte;
1714 cleanlist[cleanlist_idx].va = sva;
1715 cleanlist_idx++;
1716 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1717 int cnt;
1718
1719 /* Nuke everything if needed. */
1720 if (pmap_active) {
1721 cpu_cache_purgeID();
1722 cpu_tlb_flushID();
1723 }
1724
1725 /*
1726 * Roll back the previous PTE list,
1727 * and zero out the current PTE.
1728 */
1729 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1730 *cleanlist[cnt].pte = 0;
1731 pmap_pte_delref(pmap, cleanlist[cnt].va);
1732 }
1733 *pte = 0;
1734 pmap_pte_delref(pmap, sva);
1735 cleanlist_idx++;
1736 } else {
1737 /*
1738 * We've already nuked the cache and
1739 * TLB, so just carry on regardless,
1740 * and we won't need to do it again
1741 */
1742 *pte = 0;
1743 pmap_pte_delref(pmap, sva);
1744 }
1745
1746 /*
1747 * Update flags. In a number of circumstances,
1748 * we could cluster a lot of these and do a
1749 * number of sequential pages in one go.
1750 */
1751 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1752 pv = &vm_physmem[bank].pmseg.pvent[off];
1753 pmap_remove_pv(pmap, sva, pv);
1754 pmap_vac_me_harder(pmap, pv);
1755 }
1756 }
1757 sva += NBPG;
1758 pte++;
1759 }
1760
1761 /*
1762 * Now, if we've fallen through down to here, chances are that there
1763 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1764 */
1765 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1766 u_int cnt;
1767
1768 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1769 if (pmap_active) {
1770 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1771 *cleanlist[cnt].pte = 0;
1772 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1773 } else
1774 *cleanlist[cnt].pte = 0;
1775 pmap_pte_delref(pmap, cleanlist[cnt].va);
1776 }
1777 }
1778 }
1779
1780 /*
1781 * Routine: pmap_remove_all
1782 * Function:
1783 * Removes this physical page from
1784 * all physical maps in which it resides.
1785 * Reflects back modify bits to the pager.
1786 */
1787
1788 void
1789 pmap_remove_all(pa)
1790 paddr_t pa;
1791 {
1792 struct pv_entry *ph, *pv, *npv;
1793 pmap_t pmap;
1794 pt_entry_t *pte;
1795 int s;
1796
1797 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1798
1799 pv = ph = pmap_find_pv(pa);
1800 pmap_clean_page(pv);
1801
1802 s = splvm();
1803
1804 if (ph->pv_pmap == NULL) {
1805 PDEBUG(0, printf("free page\n"));
1806 splx(s);
1807 return;
1808 }
1809
1810 while (pv) {
1811 pmap = pv->pv_pmap;
1812 pte = pmap_pte(pmap, pv->pv_va);
1813
1814 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1815 pv->pv_va, pv->pv_flags));
1816 #ifdef DEBUG
1817 if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1818 panic("pmap_remove_all: bad mapping");
1819 #endif /* DEBUG */
1820
1821 /*
1822 * Update statistics
1823 */
1824 --pmap->pm_stats.resident_count;
1825
1826 /* Wired bit */
1827 if (pv->pv_flags & PT_W)
1828 --pmap->pm_stats.wired_count;
1829
1830 /*
1831 * Invalidate the PTEs.
1832 * XXX: should cluster them up and invalidate as many
1833 * as possible at once.
1834 */
1835
1836 #ifdef needednotdone
1837 reduce wiring count on page table pages as references drop
1838 #endif
1839
1840 *pte = 0;
1841 pmap_pte_delref(pmap, pv->pv_va);
1842
1843 npv = pv->pv_next;
1844 if (pv == ph)
1845 ph->pv_pmap = NULL;
1846 else
1847 pmap_free_pv(pv);
1848 pv = npv;
1849 }
1850
1851 splx(s);
1852
1853 PDEBUG(0, printf("done\n"));
1854 cpu_tlb_flushID();
1855 }
1856
1857
1858 /*
1859 * Set the physical protection on the specified range of this map as requested.
1860 */
1861
1862 void
1863 pmap_protect(pmap, sva, eva, prot)
1864 pmap_t pmap;
1865 vaddr_t sva;
1866 vaddr_t eva;
1867 vm_prot_t prot;
1868 {
1869 pt_entry_t *pte = NULL;
1870 int armprot;
1871 int flush = 0;
1872 paddr_t pa;
1873 int bank, off;
1874 struct pv_entry *pv;
1875
1876 /*
1877 * Make sure pmap is valid. -dct
1878 */
1879 if (pmap == NULL)
1880 return;
1881 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
1882 pmap, sva, eva, prot));
1883
1884 if (~prot & VM_PROT_READ) {
1885 /* Just remove the mappings. */
1886 pmap_remove(pmap, sva, eva);
1887 return;
1888 }
1889 if (prot & VM_PROT_WRITE) {
1890 /*
1891 * If this is a read->write transition, just ignore it and let
1892 * uvm_fault() take care of it later.
1893 */
1894 return;
1895 }
1896
1897 sva &= PG_FRAME;
1898 eva &= PG_FRAME;
1899
1900 /*
1901 * We need to acquire a pointer to a page table page before entering
1902 * the following loop.
1903 */
1904 while (sva < eva) {
1905 pte = pmap_pte(pmap, sva);
1906 if (pte)
1907 break;
1908 sva = (sva & PD_MASK) + NBPD;
1909 }
1910
1911 while (sva < eva) {
1912 /* only check once in a while */
1913 if ((sva & PT_MASK) == 0) {
1914 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1915 /* We can race ahead here, to the next pde. */
1916 sva += NBPD;
1917 pte += arm_byte_to_page(NBPD);
1918 continue;
1919 }
1920 }
1921
1922 if (!pmap_pte_v(pte))
1923 goto next;
1924
1925 flush = 1;
1926
1927 armprot = 0;
1928 if (sva < VM_MAXUSER_ADDRESS)
1929 armprot |= PT_AP(AP_U);
1930 else if (sva < VM_MAX_ADDRESS)
1931 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
1932 *pte = (*pte & 0xfffff00f) | armprot;
1933
1934 pa = pmap_pte_pa(pte);
1935
1936 /* Get the physical page index */
1937
1938 /* Clear write flag */
1939 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1940 pv = &vm_physmem[bank].pmseg.pvent[off];
1941 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
1942 pmap_vac_me_harder(pmap, pv);
1943 }
1944
1945 next:
1946 sva += NBPG;
1947 pte++;
1948 }
1949
1950 if (flush)
1951 cpu_tlb_flushID();
1952 }
1953
1954 /*
1955 * void pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1956 * int flags)
1957 *
1958 * Insert the given physical page (p) at
1959 * the specified virtual address (v) in the
1960 * target physical map with the protection requested.
1961 *
1962 * If specified, the page will be wired down, meaning
1963 * that the related pte can not be reclaimed.
1964 *
1965 * NB: This is the only routine which MAY NOT lazy-evaluate
1966 * or lose information. That is, this routine must actually
1967 * insert this page into the given map NOW.
1968 */
1969
1970 int
1971 pmap_enter(pmap, va, pa, prot, flags)
1972 pmap_t pmap;
1973 vaddr_t va;
1974 paddr_t pa;
1975 vm_prot_t prot;
1976 int flags;
1977 {
1978 pt_entry_t *pte;
1979 u_int npte;
1980 int bank, off;
1981 struct pv_entry *pv = NULL;
1982 paddr_t opa;
1983 int nflags;
1984 boolean_t wired = (flags & PMAP_WIRED) != 0;
1985
1986 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
1987 va, pa, pmap, prot, wired));
1988
1989 /* Valid pmap ? */
1990 if (pmap == NULL)
1991 return (KERN_SUCCESS);
1992
1993 #ifdef DIAGNOSTIC
1994 /* Valid address ? */
1995 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
1996 panic("pmap_enter: too big");
1997 if (pmap != pmap_kernel() && va != 0) {
1998 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
1999 panic("pmap_enter: kernel page in user map");
2000 } else {
2001 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2002 panic("pmap_enter: user page in kernel map");
2003 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2004 panic("pmap_enter: entering PT page");
2005 }
2006 #endif
2007
2008 /*
2009 * Get a pointer to the pte for this virtual address. If the
2010 * pte pointer is NULL then we are missing the L2 page table
2011 * so we need to create one.
2012 */
2013 pte = pmap_pte(pmap, va);
2014 if (!pte) {
2015 paddr_t l2pa;
2016 struct vm_page *m;
2017
2018 /* Allocate a page table */
2019 for (;;) {
2020 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2021 if (m != NULL)
2022 break;
2023
2024 /*
2025 * No page available. If we're the kernel
2026 * pmap, we die, since we might not have
2027 * a valid thread context. For user pmaps,
2028 * we assume that we _do_ have a valid thread
2029 * context, so we wait here for the pagedaemon
2030 * to free up some pages.
2031 *
2032 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2033 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2034 * XXX SO THIS IS PROBABLY SAFE. In any case,
2035 * XXX other pmap modules claim it is safe to
2036 * XXX sleep here if it's a user pmap.
2037 */
2038 if (pmap == pmap_kernel())
2039 panic("pmap_enter: no free pages");
2040 else
2041 uvm_wait("pmap_enter");
2042 }
2043
2044 /* Wire this page table into the L1. */
2045 l2pa = VM_PAGE_TO_PHYS(m);
2046 pmap_zero_page(l2pa);
2047 pmap_map_in_l1(pmap, va, l2pa);
2048 ++pmap->pm_stats.resident_count;
2049
2050 pte = pmap_pte(pmap, va);
2051 #ifdef DIAGNOSTIC
2052 if (!pte)
2053 panic("pmap_enter: no pte");
2054 #endif
2055 }
2056
2057 nflags = 0;
2058 if (prot & VM_PROT_WRITE)
2059 nflags |= PT_Wr;
2060 if (wired)
2061 nflags |= PT_W;
2062
2063 /* More debugging info */
2064 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2065 *pte));
2066
2067 /* Is the pte valid ? If so then this page is already mapped */
2068 if (pmap_pte_v(pte)) {
2069 /* Get the physical address of the current page mapped */
2070 opa = pmap_pte_pa(pte);
2071
2072 #ifdef MYCROFT_HACK
2073 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2074 #endif
2075
2076 /* Are we mapping the same page ? */
2077 if (opa == pa) {
2078 /* All we must be doing is changing the protection */
2079 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2080 va, pa));
2081
2082 /* Has the wiring changed ? */
2083 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2084 pv = &vm_physmem[bank].pmseg.pvent[off];
2085 (void) pmap_modify_pv(pmap, va, pv,
2086 PT_Wr | PT_W, nflags);
2087 }
2088 } else {
2089 /* We are replacing the page with a new one. */
2090 cpu_cache_purgeID_rng(va, NBPG);
2091
2092 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2093 va, pa, opa));
2094
2095 /*
2096 * If it is part of our managed memory then we
2097 * must remove it from the PV list
2098 */
2099 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2100 pv = &vm_physmem[bank].pmseg.pvent[off];
2101 pmap_remove_pv(pmap, va, pv);
2102 }
2103
2104 goto enter;
2105 }
2106 } else {
2107 opa = 0;
2108 pmap_pte_addref(pmap, va);
2109
2110 /* pte is not valid so we must be hooking in a new page */
2111 ++pmap->pm_stats.resident_count;
2112
2113 enter:
2114 /*
2115 * Enter on the PV list if part of our managed memory
2116 */
2117 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2118 pv = &vm_physmem[bank].pmseg.pvent[off];
2119 pmap_enter_pv(pmap, va, pv, nflags);
2120 }
2121 }
2122
2123 #ifdef MYCROFT_HACK
2124 if (mycroft_hack)
2125 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2126 #endif
2127
2128 /* Construct the pte, giving the correct access. */
2129 npte = (pa & PG_FRAME);
2130
2131 /* VA 0 is magic. */
2132 if (pmap != pmap_kernel() && va != 0)
2133 npte |= PT_AP(AP_U);
2134
2135 if (bank != -1) {
2136 #ifdef DIAGNOSTIC
2137 if ((flags & VM_PROT_ALL) & ~prot)
2138 panic("pmap_enter: access_type exceeds prot");
2139 #endif
2140 npte |= PT_C | PT_B;
2141 if (flags & VM_PROT_WRITE) {
2142 npte |= L2_SPAGE | PT_AP(AP_W);
2143 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2144 } else if (flags & VM_PROT_ALL) {
2145 npte |= L2_SPAGE;
2146 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2147 } else
2148 npte |= L2_INVAL;
2149 } else {
2150 if (prot & VM_PROT_WRITE)
2151 npte |= L2_SPAGE | PT_AP(AP_W);
2152 else if (prot & VM_PROT_ALL)
2153 npte |= L2_SPAGE;
2154 else
2155 npte |= L2_INVAL;
2156 }
2157
2158 #ifdef MYCROFT_HACK
2159 if (mycroft_hack)
2160 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2161 #endif
2162
2163 *pte = npte;
2164
2165 if (bank != -1)
2166 pmap_vac_me_harder(pmap, pv);
2167
2168 /* Better flush the TLB ... */
2169 cpu_tlb_flushID_SE(va);
2170
2171 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2172
2173 return (KERN_SUCCESS);
2174 }
2175
2176 void
2177 pmap_kenter_pa(va, pa, prot)
2178 vaddr_t va;
2179 paddr_t pa;
2180 vm_prot_t prot;
2181 {
2182 pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2183 }
2184
2185 void
2186 pmap_kenter_pgs(va, pgs, npgs)
2187 vaddr_t va;
2188 struct vm_page **pgs;
2189 int npgs;
2190 {
2191 int i;
2192
2193 for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
2194 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
2195 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
2196 }
2197 }
2198
2199 void
2200 pmap_kremove(va, len)
2201 vaddr_t va;
2202 vsize_t len;
2203 {
2204 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2205 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2206 }
2207 }
2208
2209 /*
2210 * pmap_page_protect:
2211 *
2212 * Lower the permission for all mappings to a given page.
2213 */
2214
2215 void
2216 pmap_page_protect(pg, prot)
2217 struct vm_page *pg;
2218 vm_prot_t prot;
2219 {
2220 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2221
2222 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2223
2224 switch(prot) {
2225 case VM_PROT_READ:
2226 case VM_PROT_READ|VM_PROT_EXECUTE:
2227 pmap_copy_on_write(pa);
2228 break;
2229
2230 case VM_PROT_ALL:
2231 break;
2232
2233 default:
2234 pmap_remove_all(pa);
2235 break;
2236 }
2237 }
2238
2239
2240 /*
2241 * Routine: pmap_unwire
2242 * Function: Clear the wired attribute for a map/virtual-address
2243 * pair.
2244 * In/out conditions:
2245 * The mapping must already exist in the pmap.
2246 */
2247
2248 void
2249 pmap_unwire(pmap, va)
2250 pmap_t pmap;
2251 vaddr_t va;
2252 {
2253 pt_entry_t *pte;
2254 paddr_t pa;
2255 int bank, off;
2256 struct pv_entry *pv;
2257
2258 /*
2259 * Make sure pmap is valid. -dct
2260 */
2261 if (pmap == NULL)
2262 return;
2263
2264 /* Get the pte */
2265 pte = pmap_pte(pmap, va);
2266 if (!pte)
2267 return;
2268
2269 /* Extract the physical address of the page */
2270 pa = pmap_pte_pa(pte);
2271
2272 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2273 return;
2274 pv = &vm_physmem[bank].pmseg.pvent[off];
2275 /* Update the wired bit in the pv entry for this page. */
2276 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2277 }
2278
2279 /*
2280 * pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
2281 *
2282 * Return the pointer to a page table entry corresponding to the supplied
2283 * virtual address.
2284 *
2285 * The page directory is first checked to make sure that a page table
2286 * for the address in question exists and if it does a pointer to the
2287 * entry is returned.
2288 *
2289 * The way this works is that that the kernel page tables are mapped
2290 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2291 * This allows page tables to be located quickly.
2292 */
2293 pt_entry_t *
2294 pmap_pte(pmap, va)
2295 pmap_t pmap;
2296 vaddr_t va;
2297 {
2298 pt_entry_t *ptp;
2299 pt_entry_t *result;
2300
2301 /* The pmap must be valid */
2302 if (!pmap)
2303 return(NULL);
2304
2305 /* Return the address of the pte */
2306 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2307 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2308
2309 /* Do we have a valid pde ? If not we don't have a page table */
2310 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2311 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2312 pmap_pde(pmap, va)));
2313 return(NULL);
2314 }
2315
2316 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2317 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2318 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2319 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2320
2321 /*
2322 * If the pmap is the kernel pmap or the pmap is the active one
2323 * then we can just return a pointer to entry relative to
2324 * PROCESS_PAGE_TBLS_BASE.
2325 * Otherwise we need to map the page tables to an alternative
2326 * address and reference them there.
2327 */
2328 if (pmap == kernel_pmap || pmap->pm_pptpt
2329 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2330 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2331 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2332 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2333 } else {
2334 struct proc *p = curproc;
2335
2336 /* If we don't have a valid curproc use proc0 */
2337 /* Perhaps we should just use kernel_pmap instead */
2338 if (p == NULL)
2339 p = &proc0;
2340 #ifdef DIAGNOSTIC
2341 /*
2342 * The pmap should always be valid for the process so
2343 * panic if it is not.
2344 */
2345 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2346 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2347 va, p, p->p_vmspace);
2348 console_debugger();
2349 }
2350 /*
2351 * The pmap for the current process should be mapped. If it
2352 * is not then we have a problem.
2353 */
2354 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2355 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2356 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2357 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2358 printf("pmap pagetable = P%08lx current = P%08x ",
2359 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2360 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2361 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2362 PG_FRAME));
2363 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2364 panic("pmap_pte: current and pmap mismatch\n");
2365 }
2366 #endif
2367
2368 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2369 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2370 pmap->pm_pptpt);
2371 cpu_tlb_flushD();
2372 }
2373 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2374 ((va >> (PGSHIFT-2)) & ~3)));
2375 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2376 return(result);
2377 }
2378
2379 /*
2380 * Routine: pmap_extract
2381 * Function:
2382 * Extract the physical page address associated
2383 * with the given map/virtual_address pair.
2384 */
2385 boolean_t
2386 pmap_extract(pmap, va, pap)
2387 pmap_t pmap;
2388 vaddr_t va;
2389 paddr_t *pap;
2390 {
2391 pt_entry_t *pte;
2392 paddr_t pa;
2393
2394 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2395
2396 /*
2397 * Get the pte for this virtual address. If there is no pte
2398 * then there is no page table etc.
2399 */
2400
2401 pte = pmap_pte(pmap, va);
2402 if (!pte)
2403 return(FALSE);
2404
2405 /* Is the pte valid ? If not then no paged is actually mapped here */
2406 if (!pmap_pte_v(pte))
2407 return(FALSE);
2408
2409 /* Return the physical address depending on the PTE type */
2410 /* XXX What about L1 section mappings ? */
2411 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2412 /* Extract the physical address from the pte */
2413 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2414
2415 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2416 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2417
2418 if (pap != NULL)
2419 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2420 return (TRUE);
2421 } else {
2422 /* Extract the physical address from the pte */
2423 pa = pmap_pte_pa(pte);
2424
2425 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2426 (pa | (va & ~PG_FRAME))));
2427
2428 if (pap != NULL)
2429 *pap = pa | (va & ~PG_FRAME);
2430 return (TRUE);
2431 }
2432 }
2433
2434
2435 /*
2436 * Copy the range specified by src_addr/len from the source map to the
2437 * range dst_addr/len in the destination map.
2438 *
2439 * This routine is only advisory and need not do anything.
2440 */
2441
2442 void
2443 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2444 pmap_t dst_pmap;
2445 pmap_t src_pmap;
2446 vaddr_t dst_addr;
2447 vsize_t len;
2448 vaddr_t src_addr;
2449 {
2450 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2451 dst_pmap, src_pmap, dst_addr, len, src_addr));
2452 }
2453
2454 #if defined(PMAP_DEBUG)
2455 void
2456 pmap_dump_pvlist(phys, m)
2457 vaddr_t phys;
2458 char *m;
2459 {
2460 struct pv_entry *pv;
2461 int bank, off;
2462
2463 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2464 printf("INVALID PA\n");
2465 return;
2466 }
2467 pv = &vm_physmem[bank].pmseg.pvent[off];
2468 printf("%s %08lx:", m, phys);
2469 if (pv->pv_pmap == NULL) {
2470 printf(" no mappings\n");
2471 return;
2472 }
2473
2474 for (; pv; pv = pv->pv_next)
2475 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2476 pv->pv_va, pv->pv_flags);
2477
2478 printf("\n");
2479 }
2480
2481 #endif /* PMAP_DEBUG */
2482
2483 boolean_t
2484 pmap_testbit(pa, setbits)
2485 paddr_t pa;
2486 int setbits;
2487 {
2488 int bank, off;
2489
2490 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2491
2492 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2493 return(FALSE);
2494
2495 /*
2496 * Check saved info only
2497 */
2498 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2499 PDEBUG(0, printf("pmap_attributes = %02x\n",
2500 vm_physmem[bank].pmseg.attrs[off]));
2501 return(TRUE);
2502 }
2503
2504 return(FALSE);
2505 }
2506
2507
2508 /*
2509 * Modify pte bits for all ptes corresponding to the given physical address.
2510 * We use `maskbits' rather than `clearbits' because we're always passing
2511 * constants and the latter would require an extra inversion at run-time.
2512 */
2513
2514 void
2515 pmap_clearbit(pa, maskbits)
2516 paddr_t pa;
2517 int maskbits;
2518 {
2519 struct pv_entry *pv;
2520 pt_entry_t *pte;
2521 vaddr_t va;
2522 int bank, off;
2523 int s;
2524
2525 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2526 pa, maskbits));
2527 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2528 return;
2529 pv = &vm_physmem[bank].pmseg.pvent[off];
2530 s = splvm();
2531
2532 /*
2533 * Clear saved attributes (modify, reference)
2534 */
2535 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2536
2537 if (pv->pv_pmap == NULL) {
2538 splx(s);
2539 return;
2540 }
2541
2542 /*
2543 * Loop over all current mappings setting/clearing as appropos
2544 */
2545 for (; pv; pv = pv->pv_next) {
2546 va = pv->pv_va;
2547
2548 /*
2549 * XXX don't write protect pager mappings
2550 */
2551 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2552 printf("pmap_clearbit: bogon alpha\n");
2553 continue;
2554 }
2555
2556 pv->pv_flags &= ~maskbits;
2557 pte = pmap_pte(pv->pv_pmap, va);
2558 if (maskbits & (PT_Wr|PT_M))
2559 *pte = *pte & ~PT_AP(AP_W);
2560 if (maskbits & PT_H)
2561 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2562 }
2563 cpu_tlb_flushID();
2564
2565 splx(s);
2566 }
2567
2568
2569 boolean_t
2570 pmap_clear_modify(pg)
2571 struct vm_page *pg;
2572 {
2573 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2574 boolean_t rv;
2575
2576 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2577 rv = pmap_testbit(pa, PT_M);
2578 pmap_clearbit(pa, PT_M);
2579 return rv;
2580 }
2581
2582
2583 boolean_t
2584 pmap_clear_reference(pg)
2585 struct vm_page *pg;
2586 {
2587 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2588 boolean_t rv;
2589
2590 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2591 rv = pmap_testbit(pa, PT_H);
2592 pmap_clearbit(pa, PT_H);
2593 return rv;
2594 }
2595
2596
2597 void
2598 pmap_copy_on_write(pa)
2599 paddr_t pa;
2600 {
2601 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2602 pmap_clearbit(pa, PT_Wr);
2603 }
2604
2605
2606 boolean_t
2607 pmap_is_modified(pg)
2608 struct vm_page *pg;
2609 {
2610 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2611 boolean_t result;
2612
2613 result = pmap_testbit(pa, PT_M);
2614 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2615 return (result);
2616 }
2617
2618
2619 boolean_t
2620 pmap_is_referenced(pg)
2621 struct vm_page *pg;
2622 {
2623 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2624 boolean_t result;
2625
2626 result = pmap_testbit(pa, PT_H);
2627 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2628 return (result);
2629 }
2630
2631
2632 int
2633 pmap_modified_emulation(pmap, va)
2634 pmap_t pmap;
2635 vaddr_t va;
2636 {
2637 pt_entry_t *pte;
2638 paddr_t pa;
2639 int bank, off;
2640 struct pv_entry *pv;
2641 u_int flags;
2642
2643 PDEBUG(2, printf("pmap_modified_emulation\n"));
2644
2645 /* Get the pte */
2646 pte = pmap_pte(pmap, va);
2647 if (!pte) {
2648 PDEBUG(2, printf("no pte\n"));
2649 return(0);
2650 }
2651
2652 PDEBUG(1, printf("*pte=%08x\n", *pte));
2653
2654 /* Check for a zero pte */
2655 if (*pte == 0)
2656 return(0);
2657
2658 /* This can happen if user code tries to access kernel memory. */
2659 if ((*pte & PT_AP(AP_W)) != 0)
2660 return (0);
2661
2662 /* Extract the physical address of the page */
2663 pa = pmap_pte_pa(pte);
2664 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2665 return(0);
2666
2667 /* Get the current flags for this page. */
2668 pv = &vm_physmem[bank].pmseg.pvent[off];
2669 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2670 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2671
2672 /*
2673 * Do the flags say this page is writable ? If not then it is a
2674 * genuine write fault. If yes then the write fault is our fault
2675 * as we did not reflect the write access in the PTE. Now we know
2676 * a write has occurred we can correct this and also set the
2677 * modified bit
2678 */
2679 if (~flags & PT_Wr)
2680 return(0);
2681
2682 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2683 va, pte, *pte));
2684 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2685 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2686 PDEBUG(0, printf("->(%08x)\n", *pte));
2687
2688 /* Return, indicating the problem has been dealt with */
2689 cpu_tlb_flushID_SE(va);
2690 return(1);
2691 }
2692
2693
2694 int
2695 pmap_handled_emulation(pmap, va)
2696 pmap_t pmap;
2697 vaddr_t va;
2698 {
2699 pt_entry_t *pte;
2700 paddr_t pa;
2701 int bank, off;
2702
2703 PDEBUG(2, printf("pmap_handled_emulation\n"));
2704
2705 /* Get the pte */
2706 pte = pmap_pte(pmap, va);
2707 if (!pte) {
2708 PDEBUG(2, printf("no pte\n"));
2709 return(0);
2710 }
2711
2712 PDEBUG(1, printf("*pte=%08x\n", *pte));
2713
2714 /* Check for a zero pte */
2715 if (*pte == 0)
2716 return(0);
2717
2718 /* This can happen if user code tries to access kernel memory. */
2719 if ((*pte & L2_MASK) != L2_INVAL)
2720 return (0);
2721
2722 /* Extract the physical address of the page */
2723 pa = pmap_pte_pa(pte);
2724 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2725 return(0);
2726
2727 /*
2728 * Ok we just enable the pte and mark the attibs as handled
2729 */
2730 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2731 va, pte, *pte));
2732 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2733 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2734 PDEBUG(0, printf("->(%08x)\n", *pte));
2735
2736 /* Return, indicating the problem has been dealt with */
2737 cpu_tlb_flushID_SE(va);
2738 return(1);
2739 }
2740
2741 /*
2742 * pmap_collect: free resources held by a pmap
2743 *
2744 * => optional function.
2745 * => called when a process is swapped out to free memory.
2746 */
2747
2748 void
2749 pmap_collect(pmap)
2750 pmap_t pmap;
2751 {
2752 }
2753
2754 /*
2755 * Routine: pmap_procwr
2756 *
2757 * Function:
2758 * Synchronize caches corresponding to [addr, addr+len) in p.
2759 *
2760 */
2761 void
2762 pmap_procwr(p, va, len)
2763 struct proc *p;
2764 vaddr_t va;
2765 u_long len;
2766 {
2767 /* We only need to do anything if it is the current process. */
2768 if (p == curproc)
2769 cpu_cache_syncI_rng(va, len);
2770 }
2771
2772 /* End of pmap.c */
2773