pmap.c revision 1.1 1 /* $NetBSD: pmap.c,v 1.1 2001/03/04 05:08:09 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1994-1998 Mark Brinicombe.
41 * Copyright (c) 1994 Brini.
42 * All rights reserved.
43 *
44 * This code is derived from software written for Brini by Mark Brinicombe
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Mark Brinicombe.
57 * 4. The name of the author may not be used to endorse or promote products
58 * derived from this software without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69 *
70 * RiscBSD kernel project
71 *
72 * pmap.c
73 *
74 * Machine dependant vm stuff
75 *
76 * Created : 20/09/94
77 */
78
79 /*
80 * Performance improvements, UVM changes, overhauls and part-rewrites
81 * were contributed by Neil A. Carson <neil (at) causality.com>.
82 */
83
84 /*
85 * The dram block info is currently referenced from the bootconfig.
86 * This should be placed in a separate structure.
87 */
88
89 /*
90 * Special compilation symbols
91 * PMAP_DEBUG - Build in pmap_debug_level code
92 */
93
94 /* Include header files */
95
96 #include "opt_pmap_debug.h"
97 #include "opt_ddb.h"
98
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/kernel.h>
102 #include <sys/systm.h>
103 #include <sys/proc.h>
104 #include <sys/malloc.h>
105 #include <sys/user.h>
106
107 #include <uvm/uvm.h>
108
109 #include <machine/bootconfig.h>
110 #include <machine/bus.h>
111 #include <machine/pmap.h>
112 #include <machine/pcb.h>
113 #include <machine/param.h>
114 #include <machine/katelib.h>
115
116 #ifdef PMAP_DEBUG
117 #define PDEBUG(_lev_,_stat_) \
118 if (pmap_debug_level >= (_lev_)) \
119 ((_stat_))
120 int pmap_debug_level = -2;
121 #else /* PMAP_DEBUG */
122 #define PDEBUG(_lev_,_stat_) /* Nothing */
123 #endif /* PMAP_DEBUG */
124
125 struct pmap kernel_pmap_store;
126 pmap_t kernel_pmap;
127
128 pagehook_t page_hook0;
129 pagehook_t page_hook1;
130 char *memhook;
131 pt_entry_t msgbufpte;
132 extern caddr_t msgbufaddr;
133
134 #ifdef DIAGNOSTIC
135 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
136 #endif
137
138 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
139
140 int pv_nfree = 0;
141
142 vm_size_t npages;
143
144 extern vaddr_t physical_start;
145 extern vaddr_t physical_freestart;
146 extern vaddr_t physical_end;
147 extern vaddr_t physical_freeend;
148 extern unsigned int free_pages;
149 extern int max_processes;
150
151 vaddr_t virtual_start;
152 vaddr_t virtual_end;
153
154 vaddr_t avail_start;
155 vaddr_t avail_end;
156
157 extern pv_addr_t systempage;
158
159 #define ALLOC_PAGE_HOOK(x, s) \
160 x.va = virtual_start; \
161 x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
162 virtual_start += s;
163
164 /* Variables used by the L1 page table queue code */
165 SIMPLEQ_HEAD(l1pt_queue, l1pt);
166 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
167 int l1pt_static_queue_count; /* items in the static l1 queue */
168 int l1pt_static_create_count; /* static l1 items created */
169 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
170 int l1pt_queue_count; /* items in the l1 queue */
171 int l1pt_create_count; /* stat - L1's create count */
172 int l1pt_reuse_count; /* stat - L1's reused count */
173
174 /* Local function prototypes (not used outside this file) */
175 pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
176 int pmap_page_index __P((vaddr_t pa));
177 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
178 vaddr_t pa, unsigned int flags));
179 void pmap_copy_on_write __P((vaddr_t pa));
180 void pmap_pinit __P((pmap_t));
181 void pmap_release __P((pmap_t));
182
183 /* Other function prototypes */
184 extern void bzero_page __P((vaddr_t));
185 extern void bcopy_page __P((vaddr_t, vaddr_t));
186
187 struct l1pt *pmap_alloc_l1pt __P((void));
188 static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
189 vaddr_t l2pa));
190
191 #ifdef MYCROFT_HACK
192 int mycroft_hack = 0;
193 #endif
194
195 /* Function to set the debug level of the pmap code */
196
197 #ifdef PMAP_DEBUG
198 void
199 pmap_debug(level)
200 int level;
201 {
202 pmap_debug_level = level;
203 printf("pmap_debug: level=%d\n", pmap_debug_level);
204 }
205 #endif /* PMAP_DEBUG */
206
207 #include "isadma.h"
208
209 #if NISADMA > 0
210 /*
211 * Used to protect memory for ISA DMA bounce buffers. If, when loading
212 * pages into the system, memory intersects with any of these ranges,
213 * the intersecting memory will be loaded into a lower-priority free list.
214 */
215 bus_dma_segment_t *pmap_isa_dma_ranges;
216 int pmap_isa_dma_nranges;
217
218 boolean_t pmap_isa_dma_range_intersect __P((vaddr_t, vm_size_t,
219 vaddr_t *, vm_size_t *));
220
221 /*
222 * Check if a memory range intersects with an ISA DMA range, and
223 * return the page-rounded intersection if it does. The intersection
224 * will be placed on a lower-priority free list.
225 */
226 boolean_t
227 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
228 vaddr_t pa;
229 vm_size_t size;
230 vaddr_t *pap;
231 vm_size_t *sizep;
232 {
233 bus_dma_segment_t *ds;
234 int i;
235
236 if (pmap_isa_dma_ranges == NULL)
237 return (FALSE);
238
239 for (i = 0, ds = pmap_isa_dma_ranges;
240 i < pmap_isa_dma_nranges; i++, ds++) {
241 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
242 /*
243 * Beginning of region intersects with this range.
244 */
245 *pap = trunc_page(pa);
246 *sizep = round_page(min(pa + size,
247 ds->ds_addr + ds->ds_len) - pa);
248 return (TRUE);
249 }
250 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
251 /*
252 * End of region intersects with this range.
253 */
254 *pap = trunc_page(ds->ds_addr);
255 *sizep = round_page(min((pa + size) - ds->ds_addr,
256 ds->ds_len));
257 return (TRUE);
258 }
259 }
260
261 /*
262 * No intersection found.
263 */
264 return (FALSE);
265 }
266 #endif /* NISADMA > 0 */
267
268 /*
269 * Functions for manipluation pv_entry structures. These are used to keep a
270 * record of the mappings of virtual addresses and the associated physical
271 * pages.
272 */
273
274 /*
275 * Allocate a new pv_entry structure from the freelist. If the list is
276 * empty allocate a new page and fill the freelist.
277 */
278 struct pv_entry *
279 pmap_alloc_pv()
280 {
281 struct pv_page *pvp;
282 struct pv_entry *pv;
283 int i;
284
285 /*
286 * Do we have any free pv_entry structures left ?
287 * If not allocate a page of them
288 */
289
290 if (pv_nfree == 0) {
291 /* NOTE: can't lock kernel_map here */
292 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
293 if (pvp == 0)
294 panic("pmap_alloc_pv: kmem_alloc() failed");
295 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
296 for (i = NPVPPG - 2; i; i--, pv++)
297 pv->pv_next = pv + 1;
298 pv->pv_next = 0;
299 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
300 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
301 pv = &pvp->pvp_pv[0];
302 } else {
303 --pv_nfree;
304 pvp = pv_page_freelist.tqh_first;
305 if (--pvp->pvp_pgi.pgi_nfree == 0) {
306 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
307 }
308 pv = pvp->pvp_pgi.pgi_freelist;
309 #ifdef DIAGNOSTIC
310 if (pv == 0)
311 panic("pmap_alloc_pv: pgi_nfree inconsistent");
312 #endif /* DIAGNOSTIC */
313 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
314 }
315 return pv;
316 }
317
318 /*
319 * Release a pv_entry structure putting it back on the freelist.
320 */
321
322 void
323 pmap_free_pv(pv)
324 struct pv_entry *pv;
325 {
326 struct pv_page *pvp;
327
328 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
329 switch (++pvp->pvp_pgi.pgi_nfree) {
330 case 1:
331 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
332 default:
333 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
334 pvp->pvp_pgi.pgi_freelist = pv;
335 ++pv_nfree;
336 break;
337 case NPVPPG:
338 pv_nfree -= NPVPPG - 1;
339 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
340 FREE((vaddr_t)pvp, M_VMPVENT);
341 break;
342 }
343 }
344
345 #if 0
346 void
347 pmap_collect_pv()
348 {
349 struct pv_page_list pv_page_collectlist;
350 struct pv_page *pvp, *npvp;
351 struct pv_entry *ph, *ppv, *pv, *npv;
352 int s;
353
354 TAILQ_INIT(&pv_page_collectlist);
355
356 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
357 if (pv_nfree < NPVPPG)
358 break;
359 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
360 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
361 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
362 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
363 pvp_pgi.pgi_list);
364 pv_nfree -= NPVPPG;
365 pvp->pvp_pgi.pgi_nfree = -1;
366 }
367 }
368
369 if (pv_page_collectlist.tqh_first == 0)
370 return;
371
372 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
373 if (ph->pv_pmap == 0)
374 continue;
375 s = splvm();
376 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
377 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
378 if (pvp->pvp_pgi.pgi_nfree == -1) {
379 pvp = pv_page_freelist.tqh_first;
380 if (--pvp->pvp_pgi.pgi_nfree == 0) {
381 TAILQ_REMOVE(&pv_page_freelist,
382 pvp, pvp_pgi.pgi_list);
383 }
384 npv = pvp->pvp_pgi.pgi_freelist;
385 #ifdef DIAGNOSTIC
386 if (npv == 0)
387 panic("pmap_collect_pv: pgi_nfree inconsistent");
388 #endif /* DIAGNOSTIC */
389 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
390 *npv = *pv;
391 ppv->pv_next = npv;
392 ppv = npv;
393 } else
394 ppv = pv;
395 }
396 splx(s);
397 }
398
399 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
400 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
401 FREE((vaddr_t)pvp, M_VMPVENT);
402 }
403 }
404 #endif
405
406 /*
407 * Enter a new physical-virtual mapping into the pv table
408 */
409
410 /*__inline*/ void
411 pmap_enter_pv(pmap, va, pv, flags)
412 pmap_t pmap;
413 vaddr_t va;
414 struct pv_entry *pv;
415 u_int flags;
416 {
417 struct pv_entry *npv;
418 u_int s;
419
420 #ifdef DIAGNOSTIC
421 if (!pmap_initialized)
422 panic("pmap_enter_pv: !pmap_initialized");
423 #endif
424
425 s = splvm();
426
427 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
428 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
429
430 if (pv->pv_pmap == NULL) {
431 /*
432 * No entries yet, use header as the first entry
433 */
434 pv->pv_va = va;
435 pv->pv_pmap = pmap;
436 pv->pv_next = NULL;
437 pv->pv_flags = flags;
438 } else {
439 /*
440 * There is at least one other VA mapping this page.
441 * Place this entry after the header.
442 */
443 #ifdef PMAP_DEBUG
444 for (npv = pv; npv; npv = npv->pv_next)
445 if (pmap == npv->pv_pmap && va == npv->pv_va)
446 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
447 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
448 #endif
449 npv = pmap_alloc_pv();
450 npv->pv_va = va;
451 npv->pv_pmap = pmap;
452 npv->pv_flags = flags;
453 npv->pv_next = pv->pv_next;
454 pv->pv_next = npv;
455 }
456
457 if (flags & PT_W)
458 ++pmap->pm_stats.wired_count;
459
460 splx(s);
461 }
462
463
464 /*
465 * Remove a physical-virtual mapping from the pv table
466 */
467
468 /*__inline*/ void
469 pmap_remove_pv(pmap, va, pv)
470 pmap_t pmap;
471 vaddr_t va;
472 struct pv_entry *pv;
473 {
474 struct pv_entry *npv;
475 u_int s;
476 u_int flags = 0;
477
478 #ifdef DIAGNOSTIC
479 if (!pmap_initialized)
480 panic("pmap_remove_pv: !pmap_initialized");
481 #endif
482
483 s = splvm();
484
485 /*
486 * If it is the first entry on the list, it is actually
487 * in the header and we must copy the following entry up
488 * to the header. Otherwise we must search the list for
489 * the entry. In either case we free the now unused entry.
490 */
491
492 if (pmap == pv->pv_pmap && va == pv->pv_va) {
493 npv = pv->pv_next;
494 if (npv) {
495 *pv = *npv;
496 flags = npv->pv_flags;
497 pmap_free_pv(npv);
498 } else {
499 flags = pv->pv_flags;
500 pv->pv_pmap = NULL;
501 }
502 } else {
503 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
504 if (pmap == npv->pv_pmap && va == npv->pv_va)
505 break;
506 }
507 if (npv) {
508 pv->pv_next = npv->pv_next;
509 flags = npv->pv_flags;
510 pmap_free_pv(npv);
511 } else
512 panic("pmap_remove_pv: lost entry");
513 }
514
515 if (flags & PT_W)
516 --pmap->pm_stats.wired_count;
517
518 splx(s);
519 }
520
521 /*
522 * Modify a physical-virtual mapping in the pv table
523 */
524
525 /*__inline */ u_int
526 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
527 pmap_t pmap;
528 vaddr_t va;
529 struct pv_entry *pv;
530 u_int bic_mask;
531 u_int eor_mask;
532 {
533 struct pv_entry *npv;
534 u_int s;
535 u_int flags, oflags;
536
537 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
538 pmap, va, pv, bic_mask, eor_mask));
539
540 #ifdef DIAGNOSTIC
541 if (!pmap_initialized)
542 panic("pmap_modify_pv: !pmap_initialized");
543 #endif
544
545 s = splvm();
546
547 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
548 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
549
550 /*
551 * There is at least one VA mapping this page.
552 */
553
554 for (npv = pv; npv; npv = npv->pv_next) {
555 if (pmap == npv->pv_pmap && va == npv->pv_va) {
556 oflags = npv->pv_flags;
557 npv->pv_flags = flags =
558 ((oflags & ~bic_mask) ^ eor_mask);
559 if ((flags ^ oflags) & PT_W) {
560 if (flags & PT_W)
561 ++pmap->pm_stats.wired_count;
562 else
563 --pmap->pm_stats.wired_count;
564 }
565 PDEBUG(0, printf("done flags=%08x\n", flags));
566 splx(s);
567 return (oflags);
568 }
569 }
570
571 PDEBUG(0, printf("done.\n"));
572 splx(s);
573 return (0);
574 }
575
576
577 /*
578 * Map the specified level 2 pagetable into the level 1 page table for
579 * the given pmap to cover a chunk of virtual address space starting from the
580 * address specified.
581 */
582 static /*__inline*/ void
583 pmap_map_in_l1(pmap, va, l2pa)
584 pmap_t pmap;
585 vaddr_t va, l2pa;
586 {
587 vaddr_t ptva;
588
589 /* Calculate the index into the L1 page table. */
590 ptva = (va >> PDSHIFT) & ~3;
591
592 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
593 pmap->pm_pdir, L1_PTE(l2pa), ptva));
594
595 /* Map page table into the L1. */
596 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
597 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
598 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
599 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
600
601 PDEBUG(0, printf("pt self reference %lx in %lx\n",
602 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
603
604 /* Map the page table into the page table area. */
605 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
606
607 /* XXX should be a purge */
608 /* cpu_tlb_flushD();*/
609 }
610
611 #if 0
612 static /*__inline*/ void
613 pmap_unmap_in_l1(pmap, va)
614 pmap_t pmap;
615 vaddr_t va;
616 {
617 vaddr_t ptva;
618
619 /* Calculate the index into the L1 page table. */
620 ptva = (va >> PDSHIFT) & ~3;
621
622 /* Unmap page table from the L1. */
623 pmap->pm_pdir[ptva + 0] = 0;
624 pmap->pm_pdir[ptva + 1] = 0;
625 pmap->pm_pdir[ptva + 2] = 0;
626 pmap->pm_pdir[ptva + 3] = 0;
627
628 /* Unmap the page table from the page table area. */
629 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
630
631 /* XXX should be a purge */
632 /* cpu_tlb_flushD();*/
633 }
634 #endif
635
636
637 /*
638 * Used to map a range of physical addresses into kernel
639 * virtual address space.
640 *
641 * For now, VM is already on, we only need to map the
642 * specified memory.
643 */
644 vaddr_t
645 pmap_map(va, spa, epa, prot)
646 vaddr_t va, spa, epa;
647 int prot;
648 {
649 while (spa < epa) {
650 pmap_enter(pmap_kernel(), va, spa, prot, 0);
651 va += NBPG;
652 spa += NBPG;
653 }
654 return(va);
655 }
656
657
658 /*
659 * void pmap_bootstrap(pd_entry_t *kernel_l1pt)
660 *
661 * bootstrap the pmap system. This is called from initarm and allows
662 * the pmap system to initailise any structures it requires.
663 *
664 * Currently this sets up the kernel_pmap that is statically allocated
665 * and also allocated virtual addresses for certain page hooks.
666 * Currently the only one page hook is allocated that is used
667 * to zero physical pages of memory.
668 * It also initialises the start and end address of the kernel data space.
669 */
670 extern vaddr_t physical_freestart;
671 extern vaddr_t physical_freeend;
672
673 struct pv_entry *boot_pvent;
674 char *boot_attrs;
675
676 void
677 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
678 pd_entry_t *kernel_l1pt;
679 pv_addr_t kernel_ptpt;
680 {
681 int loop;
682 vaddr_t start, end;
683 #if NISADMA > 0
684 vaddr_t istart;
685 vm_size_t isize;
686 #endif
687 vsize_t size;
688
689 kernel_pmap = &kernel_pmap_store;
690
691 kernel_pmap->pm_pdir = kernel_l1pt;
692 kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
693 kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
694 simple_lock_init(&kernel_pmap->pm_lock);
695 kernel_pmap->pm_count = 1;
696
697 /*
698 * Initialize PAGE_SIZE-dependent variables.
699 */
700 uvm_setpagesize();
701
702 npages = 0;
703 loop = 0;
704 while (loop < bootconfig.dramblocks) {
705 start = (vaddr_t)bootconfig.dram[loop].address;
706 end = start + (bootconfig.dram[loop].pages * NBPG);
707 if (start < physical_freestart)
708 start = physical_freestart;
709 if (end > physical_freeend)
710 end = physical_freeend;
711 #if 0
712 printf("%d: %lx -> %lx\n", loop, start, end - 1);
713 #endif
714 #if NISADMA > 0
715 if (pmap_isa_dma_range_intersect(start, end - start,
716 &istart, &isize)) {
717 /*
718 * Place the pages that intersect with the
719 * ISA DMA range onto the ISA DMA free list.
720 */
721 #if 0
722 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
723 istart + isize - 1);
724 #endif
725 uvm_page_physload(atop(istart),
726 atop(istart + isize), atop(istart),
727 atop(istart + isize), VM_FREELIST_ISADMA);
728 npages += atop(istart + isize) - atop(istart);
729
730 /*
731 * Load the pieces that come before
732 * the intersection into the default
733 * free list.
734 */
735 if (start < istart) {
736 #if 0
737 printf(" BEFORE 0x%lx -> 0x%lx\n",
738 start, istart - 1);
739 #endif
740 uvm_page_physload(atop(start),
741 atop(istart), atop(start),
742 atop(istart), VM_FREELIST_DEFAULT);
743 npages += atop(istart) - atop(start);
744 }
745
746 /*
747 * Load the pieces that come after
748 * the intersection into the default
749 * free list.
750 */
751 if ((istart + isize) < end) {
752 #if 0
753 printf(" AFTER 0x%lx -> 0x%lx\n",
754 (istart + isize), end - 1);
755 #endif
756 uvm_page_physload(atop(istart + isize),
757 atop(end), atop(istart + isize),
758 atop(end), VM_FREELIST_DEFAULT);
759 npages += atop(end) - atop(istart + isize);
760 }
761 } else {
762 uvm_page_physload(atop(start), atop(end),
763 atop(start), atop(end), VM_FREELIST_DEFAULT);
764 npages += atop(end) - atop(start);
765 }
766 #else /* NISADMA > 0 */
767 uvm_page_physload(atop(start), atop(end),
768 atop(start), atop(end), VM_FREELIST_DEFAULT);
769 npages += atop(end) - atop(start);
770 #endif /* NISADMA > 0 */
771 ++loop;
772 }
773
774 #ifdef MYCROFT_HACK
775 printf("npages = %ld\n", npages);
776 #endif
777
778 virtual_start = KERNEL_VM_BASE;
779 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
780
781 ALLOC_PAGE_HOOK(page_hook0, NBPG);
782 ALLOC_PAGE_HOOK(page_hook1, NBPG);
783
784 /*
785 * The mem special device needs a virtual hook but we don't
786 * need a pte
787 */
788 memhook = (char *)virtual_start;
789 virtual_start += NBPG;
790
791 msgbufaddr = (caddr_t)virtual_start;
792 msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
793 virtual_start += round_page(MSGBUFSIZE);
794
795 size = npages * sizeof(struct pv_entry);
796 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
797 bzero(boot_pvent, size);
798 size = npages * sizeof(char);
799 boot_attrs = (char *)uvm_pageboot_alloc(size);
800 bzero(boot_attrs, size);
801
802 cpu_cache_cleanD();
803 }
804
805 /*
806 * void pmap_init(void)
807 *
808 * Initialize the pmap module.
809 * Called by vm_init() in vm/vm_init.c in order to initialise
810 * any structures that the pmap system needs to map virtual memory.
811 */
812
813 extern int physmem;
814
815 void
816 pmap_init()
817 {
818 int lcv;
819
820 #ifdef MYCROFT_HACK
821 printf("physmem = %d\n", physmem);
822 #endif
823
824 /*
825 * Set the available memory vars - These do not map to real memory
826 * addresses and cannot as the physical memory is fragmented.
827 * They are used by ps for %mem calculations.
828 * One could argue whether this should be the entire memory or just
829 * the memory that is useable in a user process.
830 */
831 avail_start = 0;
832 avail_end = physmem * NBPG;
833
834 /* Set up pmap info for physsegs. */
835 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
836 vm_physmem[lcv].pmseg.pvent = boot_pvent;
837 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
838 vm_physmem[lcv].pmseg.attrs = boot_attrs;
839 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
840 }
841 #ifdef MYCROFT_HACK
842 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
843 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
844 lcv,
845 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
846 vm_physmem[lcv].start, vm_physmem[lcv].end);
847 }
848 #endif
849 TAILQ_INIT(&pv_page_freelist);
850
851 #ifdef DIAGNOSTIC
852 /* Now it is safe to enable pv_entry recording. */
853 pmap_initialized = TRUE;
854 #endif
855
856 /* Initialise our L1 page table queues and counters */
857 SIMPLEQ_INIT(&l1pt_static_queue);
858 l1pt_static_queue_count = 0;
859 l1pt_static_create_count = 0;
860 SIMPLEQ_INIT(&l1pt_queue);
861 l1pt_queue_count = 0;
862 l1pt_create_count = 0;
863 l1pt_reuse_count = 0;
864 }
865
866 /*
867 * pmap_postinit()
868 *
869 * This routine is called after the vm and kmem subsystems have been
870 * initialised. This allows the pmap code to perform any initialisation
871 * that can only be done one the memory allocation is in place.
872 */
873
874 void
875 pmap_postinit()
876 {
877 int loop;
878 struct l1pt *pt;
879
880 #ifdef PMAP_STATIC_L1S
881 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
882 #else /* PMAP_STATIC_L1S */
883 for (loop = 0; loop < max_processes; ++loop) {
884 #endif /* PMAP_STATIC_L1S */
885 /* Allocate a L1 page table */
886 pt = pmap_alloc_l1pt();
887 if (!pt)
888 panic("Cannot allocate static L1 page tables\n");
889
890 /* Clean it */
891 bzero((void *)pt->pt_va, PD_SIZE);
892 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
893 /* Add the page table to the queue */
894 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
895 ++l1pt_static_queue_count;
896 ++l1pt_static_create_count;
897 }
898 }
899
900
901 /*
902 * Create and return a physical map.
903 *
904 * If the size specified for the map is zero, the map is an actual physical
905 * map, and may be referenced by the hardware.
906 *
907 * If the size specified is non-zero, the map will be used in software only,
908 * and is bounded by that size.
909 */
910
911 pmap_t
912 pmap_create()
913 {
914 pmap_t pmap;
915
916 /* Allocate memory for pmap structure and zero it */
917 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
918 bzero(pmap, sizeof(*pmap));
919
920 /* Now init the machine part of the pmap */
921 pmap_pinit(pmap);
922 return(pmap);
923 }
924
925 /*
926 * pmap_alloc_l1pt()
927 *
928 * This routine allocates physical and virtual memory for a L1 page table
929 * and wires it.
930 * A l1pt structure is returned to describe the allocated page table.
931 *
932 * This routine is allowed to fail if the required memory cannot be allocated.
933 * In this case NULL is returned.
934 */
935
936 struct l1pt *
937 pmap_alloc_l1pt(void)
938 {
939 vaddr_t va, pa;
940 struct l1pt *pt;
941 int error;
942 vm_page_t m;
943 pt_entry_t *pte;
944
945 /* Allocate virtual address space for the L1 page table */
946 va = uvm_km_valloc(kernel_map, PD_SIZE);
947 if (va == 0) {
948 #ifdef DIAGNOSTIC
949 printf("pmap: Cannot allocate pageable memory for L1\n");
950 #endif /* DIAGNOSTIC */
951 return(NULL);
952 }
953
954 /* Allocate memory for the l1pt structure */
955 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
956
957 /*
958 * Allocate pages from the VM system.
959 */
960 TAILQ_INIT(&pt->pt_plist);
961 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
962 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
963 if (error) {
964 #ifdef DIAGNOSTIC
965 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
966 error);
967 #endif /* DIAGNOSTIC */
968 /* Release the resources we already have claimed */
969 free(pt, M_VMPMAP);
970 uvm_km_free(kernel_map, va, PD_SIZE);
971 return(NULL);
972 }
973
974 /* Map our physical pages into our virtual space */
975 pt->pt_va = va;
976 m = pt->pt_plist.tqh_first;
977 while (m && va < (pt->pt_va + PD_SIZE)) {
978 pa = VM_PAGE_TO_PHYS(m);
979
980 pmap_enter(pmap_kernel(), va, pa,
981 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
982
983 /* Revoke cacheability and bufferability */
984 /* XXX should be done better than this */
985 pte = pmap_pte(pmap_kernel(), va);
986 *pte = *pte & ~(PT_C | PT_B);
987
988 va += NBPG;
989 m = m->pageq.tqe_next;
990 }
991
992 #ifdef DIAGNOSTIC
993 if (m)
994 panic("pmap_alloc_l1pt: pglist not empty\n");
995 #endif /* DIAGNOSTIC */
996
997 pt->pt_flags = 0;
998 return(pt);
999 }
1000
1001 /*
1002 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1003 */
1004 void
1005 pmap_free_l1pt(pt)
1006 struct l1pt *pt;
1007 {
1008 /* Separate the physical memory for the virtual space */
1009 pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
1010
1011 /* Return the physical memory */
1012 uvm_pglistfree(&pt->pt_plist);
1013
1014 /* Free the virtual space */
1015 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1016
1017 /* Free the l1pt structure */
1018 free(pt, M_VMPMAP);
1019 }
1020
1021 /*
1022 * Allocate a page directory.
1023 * This routine will either allocate a new page directory from the pool
1024 * of L1 page tables currently held by the kernel or it will allocate
1025 * a new one via pmap_alloc_l1pt().
1026 * It will then initialise the l1 page table for use.
1027 */
1028 int
1029 pmap_allocpagedir(pmap)
1030 struct pmap *pmap;
1031 {
1032 vaddr_t pa;
1033 struct l1pt *pt;
1034 pt_entry_t *pte;
1035
1036 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1037
1038 /* Do we have any spare L1's lying around ? */
1039 if (l1pt_static_queue_count) {
1040 --l1pt_static_queue_count;
1041 pt = l1pt_static_queue.sqh_first;
1042 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1043 } else if (l1pt_queue_count) {
1044 --l1pt_queue_count;
1045 pt = l1pt_queue.sqh_first;
1046 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1047 ++l1pt_reuse_count;
1048 } else {
1049 pt = pmap_alloc_l1pt();
1050 if (!pt)
1051 return(ENOMEM);
1052 ++l1pt_create_count;
1053 }
1054
1055 /* Store the pointer to the l1 descriptor in the pmap. */
1056 pmap->pm_l1pt = pt;
1057
1058 /* Get the physical address of the start of the l1 */
1059 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1060
1061 /* Store the virtual address of the l1 in the pmap. */
1062 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1063
1064 /* Clean the L1 if it is dirty */
1065 if (!(pt->pt_flags & PTFLAG_CLEAN))
1066 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1067
1068 /* Do we already have the kernel mappings ? */
1069 if (!(pt->pt_flags & PTFLAG_KPT)) {
1070 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1071
1072 bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1073 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1074 KERNEL_PD_SIZE);
1075 pt->pt_flags |= PTFLAG_KPT;
1076 }
1077
1078 /* Allocate a page table to map all the page tables for this pmap */
1079
1080 #ifdef DIAGNOSTIC
1081 if (pmap->pm_vptpt) {
1082 /* XXX What if we have one already ? */
1083 panic("pmap_allocpagedir: have pt already\n");
1084 }
1085 #endif /* DIAGNOSTIC */
1086 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1087 (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
1088 pmap->pm_pptpt &= PG_FRAME;
1089 /* Revoke cacheability and bufferability */
1090 /* XXX should be done better than this */
1091 pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
1092 *pte = *pte & ~(PT_C | PT_B);
1093
1094 /* Wire in this page table */
1095 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1096
1097 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1098
1099 /*
1100 * Map the kernel page tables for 0xf0000000 +
1101 * into the page table used to map the
1102 * pmap's page tables
1103 */
1104 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1105 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1106 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1107 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1108 (KERNEL_PD_SIZE >> 2));
1109
1110 pmap->pm_count = 1;
1111 simple_lock_init(&pmap->pm_lock);
1112
1113 return(0);
1114 }
1115
1116
1117 /*
1118 * Initialize a preallocated and zeroed pmap structure,
1119 * such as one in a vmspace structure.
1120 */
1121
1122 static int pmap_pagedir_ident; /* tsleep() ident */
1123
1124 void
1125 pmap_pinit(pmap)
1126 struct pmap *pmap;
1127 {
1128 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1129
1130 /* Keep looping until we succeed in allocating a page directory */
1131 while (pmap_allocpagedir(pmap) != 0) {
1132 /*
1133 * Ok we failed to allocate a suitable block of memory for an
1134 * L1 page table. This means that either:
1135 * 1. 16KB of virtual address space could not be allocated
1136 * 2. 16KB of physically contiguous memory on a 16KB boundary
1137 * could not be allocated.
1138 *
1139 * Since we cannot fail we will sleep for a while and try
1140 * again. Although we will be wakened when another page table
1141 * is freed other memory releasing and swapping may occur
1142 * that will mean we can succeed so we will keep trying
1143 * regularly just in case.
1144 */
1145
1146 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1147 "l1ptwait", 1000) == EWOULDBLOCK)
1148 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1149 }
1150
1151 /* Map zero page for the pmap. This will also map the L2 for it */
1152 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1153 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1154 }
1155
1156
1157 void
1158 pmap_freepagedir(pmap)
1159 pmap_t pmap;
1160 {
1161 /* Free the memory used for the page table mapping */
1162 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1163
1164 /* junk the L1 page table */
1165 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1166 /* Add the page table to the queue */
1167 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1168 ++l1pt_static_queue_count;
1169 /* Wake up any sleeping processes waiting for a l1 page table */
1170 wakeup((caddr_t)&pmap_pagedir_ident);
1171 } else if (l1pt_queue_count < 8) {
1172 /* Add the page table to the queue */
1173 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1174 ++l1pt_queue_count;
1175 /* Wake up any sleeping processes waiting for a l1 page table */
1176 wakeup((caddr_t)&pmap_pagedir_ident);
1177 } else
1178 pmap_free_l1pt(pmap->pm_l1pt);
1179 }
1180
1181
1182 /*
1183 * Retire the given physical map from service.
1184 * Should only be called if the map contains no valid mappings.
1185 */
1186
1187 void
1188 pmap_destroy(pmap)
1189 pmap_t pmap;
1190 {
1191 int count;
1192
1193 if (pmap == NULL)
1194 return;
1195
1196 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1197 simple_lock(&pmap->pm_lock);
1198 count = --pmap->pm_count;
1199 simple_unlock(&pmap->pm_lock);
1200 if (count == 0) {
1201 pmap_release(pmap);
1202 free((caddr_t)pmap, M_VMPMAP);
1203 }
1204 }
1205
1206
1207 /*
1208 * Release any resources held by the given physical map.
1209 * Called when a pmap initialized by pmap_pinit is being released.
1210 * Should only be called if the map contains no valid mappings.
1211 */
1212
1213 void
1214 pmap_release(pmap)
1215 pmap_t pmap;
1216 {
1217 struct vm_page *page;
1218 pt_entry_t *pte;
1219 int loop;
1220
1221 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1222
1223 #if 0
1224 if (pmap->pm_count != 1) /* XXX: needs sorting */
1225 panic("pmap_release count %d", pmap->pm_count);
1226 #endif
1227
1228 /* Remove the zero page mapping */
1229 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1230
1231 /*
1232 * Free any page tables still mapped
1233 * This is only temporay until pmap_enter can count the number
1234 * of mappings made in a page table. Then pmap_remove() can
1235 * reduce the count and free the pagetable when the count
1236 * reaches zero.
1237 */
1238 for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) {
1239 pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4);
1240 if (*pte != 0) {
1241 PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte));
1242 page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
1243 if (page == NULL)
1244 panic("pmap_release: bad address for phys page");
1245 uvm_pagefree(page);
1246 }
1247 }
1248 /* Free the page dir */
1249 pmap_freepagedir(pmap);
1250 }
1251
1252
1253 /*
1254 * void pmap_reference(pmap_t pmap)
1255 *
1256 * Add a reference to the specified pmap.
1257 */
1258
1259 void
1260 pmap_reference(pmap)
1261 pmap_t pmap;
1262 {
1263 if (pmap == NULL)
1264 return;
1265
1266 simple_lock(&pmap->pm_lock);
1267 pmap->pm_count++;
1268 simple_unlock(&pmap->pm_lock);
1269 }
1270
1271 /*
1272 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1273 *
1274 * Return the start and end addresses of the kernel's virtual space.
1275 * These values are setup in pmap_bootstrap and are updated as pages
1276 * are allocated.
1277 */
1278
1279 void
1280 pmap_virtual_space(start, end)
1281 vaddr_t *start;
1282 vaddr_t *end;
1283 {
1284 *start = virtual_start;
1285 *end = virtual_end;
1286 }
1287
1288
1289 /*
1290 * Activate the address space for the specified process. If the process
1291 * is the current process, load the new MMU context.
1292 */
1293 void
1294 pmap_activate(p)
1295 struct proc *p;
1296 {
1297 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1298 struct pcb *pcb = &p->p_addr->u_pcb;
1299
1300 (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
1301 (paddr_t *)&pcb->pcb_pagedir);
1302
1303 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1304 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1305
1306 if (p == curproc) {
1307 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1308 setttb((u_int)pcb->pcb_pagedir);
1309 }
1310 #if 0
1311 pmap->pm_pdchanged = FALSE;
1312 #endif
1313 }
1314
1315
1316 /*
1317 * Deactivate the address space of the specified process.
1318 */
1319 void
1320 pmap_deactivate(p)
1321 struct proc *p;
1322 {
1323 }
1324
1325
1326 /*
1327 * pmap_clean_page()
1328 *
1329 * This is a local function used to work out the best strategy to clean
1330 * a single page referenced by its entry in the PV table. It's used by
1331 * pmap_copy_page, pmap_zero page and maybe some others later on.
1332 *
1333 * Its policy is effectively:
1334 * o If there are no mappings, we don't bother doing anything with the cache.
1335 * o If there is one mapping, we clean just that page.
1336 * o If there are multiple mappings, we clean the entire cache.
1337 *
1338 * So that some functions can be further optimised, it returns 0 if it didn't
1339 * clean the entire cache, or 1 if it did.
1340 *
1341 * XXX One bug in this routine is that if the pv_entry has a single page
1342 * mapped at 0x00000000 a whole cache clean will be performed rather than
1343 * just the 1 page. Since this should not occur in everyday use and if it does
1344 * it will just result in not the most efficient clean for the page.
1345 */
1346 static int
1347 pmap_clean_page(pv)
1348 struct pv_entry *pv;
1349 {
1350 int s;
1351 int cache_needs_cleaning = 0;
1352 vaddr_t page_to_clean = 0;
1353
1354 /* Go to splvm() so we get exclusive lock for a mo */
1355 s = splvm();
1356 if (pv->pv_pmap) {
1357 cache_needs_cleaning = 1;
1358 if (!pv->pv_next)
1359 page_to_clean = pv->pv_va;
1360 }
1361 splx(s);
1362
1363 /* Do cache ops outside the splvm. */
1364 if (page_to_clean)
1365 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1366 else if (cache_needs_cleaning) {
1367 cpu_cache_purgeID();
1368 return (1);
1369 }
1370 return (0);
1371 }
1372
1373 /*
1374 * pmap_find_pv()
1375 *
1376 * This is a local function that finds a PV entry for a given physical page.
1377 * This is a common op, and this function removes loads of ifdefs in the code.
1378 */
1379 static __inline struct pv_entry *
1380 pmap_find_pv(phys)
1381 vaddr_t phys;
1382 {
1383 int bank, off;
1384 struct pv_entry *pv;
1385
1386 #ifdef DIAGNOSTIC
1387 if (!pmap_initialized)
1388 panic("pmap_find_pv: !pmap_initialized");
1389 #endif
1390
1391 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1392 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1393 pv = &vm_physmem[bank].pmseg.pvent[off];
1394 return (pv);
1395 }
1396
1397 /*
1398 * pmap_zero_page()
1399 *
1400 * Zero a given physical page by mapping it at a page hook point.
1401 * In doing the zero page op, the page we zero is mapped cachable, as with
1402 * StrongARM accesses to non-cached pages are non-burst making writing
1403 * _any_ bulk data very slow.
1404 */
1405 void
1406 pmap_zero_page(phys)
1407 vaddr_t phys;
1408 {
1409 struct pv_entry *pv;
1410
1411 /* Get an entry for this page, and clean it it. */
1412 pv = pmap_find_pv(phys);
1413 pmap_clean_page(pv);
1414
1415 /*
1416 * Hook in the page, zero it, and purge the cache for that
1417 * zeroed page. Invalidate the TLB as needed.
1418 */
1419 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1420 cpu_tlb_flushD_SE(page_hook0.va);
1421 bzero_page(page_hook0.va);
1422 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1423 }
1424
1425 /*
1426 * pmap_copy_page()
1427 *
1428 * Copy one physical page into another, by mapping the pages into
1429 * hook points. The same comment regarding cachability as in
1430 * pmap_zero_page also applies here.
1431 */
1432 void
1433 pmap_copy_page(src, dest)
1434 vaddr_t src;
1435 vaddr_t dest;
1436 {
1437 struct pv_entry *src_pv, *dest_pv;
1438
1439 /* Get PV entries for the pages, and clean them if needed. */
1440 src_pv = pmap_find_pv(src);
1441 dest_pv = pmap_find_pv(dest);
1442 if (!pmap_clean_page(src_pv))
1443 pmap_clean_page(dest_pv);
1444
1445 /*
1446 * Map the pages into the page hook points, copy them, and purge
1447 * the cache for the appropriate page. Invalidate the TLB
1448 * as required.
1449 */
1450 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1451 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1452 cpu_tlb_flushD_SE(page_hook0.va);
1453 cpu_tlb_flushD_SE(page_hook1.va);
1454 bcopy_page(page_hook0.va, page_hook1.va);
1455 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1456 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1457 }
1458
1459 /*
1460 * int pmap_next_phys_page(vaddr_t *addr)
1461 *
1462 * Allocate another physical page returning true or false depending
1463 * on whether a page could be allocated.
1464 */
1465
1466 vaddr_t
1467 pmap_next_phys_page(addr)
1468 vaddr_t addr;
1469
1470 {
1471 int loop;
1472
1473 if (addr < bootconfig.dram[0].address)
1474 return(bootconfig.dram[0].address);
1475
1476 loop = 0;
1477
1478 while (bootconfig.dram[loop].address != 0
1479 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1480 ++loop;
1481
1482 if (bootconfig.dram[loop].address == 0)
1483 return(0);
1484
1485 addr += NBPG;
1486
1487 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1488 if (bootconfig.dram[loop + 1].address == 0)
1489 return(0);
1490 addr = bootconfig.dram[loop + 1].address;
1491 }
1492
1493 return(addr);
1494 }
1495
1496 #if 0
1497 void
1498 pmap_pte_addref(pmap, va)
1499 pmap_t pmap;
1500 vaddr_t va;
1501 {
1502 pd_entry_t *pde;
1503 vaddr_t pa;
1504 struct vm_page *m;
1505
1506 if (pmap == pmap_kernel())
1507 return;
1508
1509 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1510 pa = pmap_pte_pa(pde);
1511 m = PHYS_TO_VM_PAGE(pa);
1512 ++m->wire_count;
1513 #ifdef MYCROFT_HACK
1514 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1515 pmap, va, pde, pa, m, m->wire_count);
1516 #endif
1517 }
1518
1519 void
1520 pmap_pte_delref(pmap, va)
1521 pmap_t pmap;
1522 vaddr_t va;
1523 {
1524 pd_entry_t *pde;
1525 vaddr_t pa;
1526 struct vm_page *m;
1527
1528 if (pmap == pmap_kernel())
1529 return;
1530
1531 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1532 pa = pmap_pte_pa(pde);
1533 m = PHYS_TO_VM_PAGE(pa);
1534 --m->wire_count;
1535 #ifdef MYCROFT_HACK
1536 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1537 pmap, va, pde, pa, m, m->wire_count);
1538 #endif
1539 if (m->wire_count == 0) {
1540 #ifdef MYCROFT_HACK
1541 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1542 pmap, va, pde, pa, m);
1543 #endif
1544 pmap_unmap_in_l1(pmap, va);
1545 uvm_pagefree(m);
1546 --pmap->pm_stats.resident_count;
1547 }
1548 }
1549 #else
1550 #define pmap_pte_addref(pmap, va)
1551 #define pmap_pte_delref(pmap, va)
1552 #endif
1553
1554 /*
1555 * Since we have a virtually indexed cache, we may need to inhibit caching if
1556 * there is more than one mapping and at least one of them is writable.
1557 * Since we purge the cache on every context switch, we only need to check for
1558 * other mappings within the same pmap, or kernel_pmap.
1559 * This function is also called when a page is unmapped, to possibly reenable
1560 * caching on any remaining mappings.
1561 */
1562 void
1563 pmap_vac_me_harder(pmap, pv)
1564 pmap_t pmap;
1565 struct pv_entry *pv;
1566 {
1567 struct pv_entry *npv;
1568 pt_entry_t *pte;
1569 int entries = 0;
1570 int writeable = 0;
1571
1572 if (pv->pv_pmap == NULL)
1573 return;
1574
1575 /*
1576 * Count mappings and writable mappings in this pmap.
1577 * Keep a pointer to the first one.
1578 */
1579 for (npv = pv; npv; npv = npv->pv_next) {
1580 /* Count mappings in the same pmap */
1581 if (pmap == npv->pv_pmap) {
1582 if (entries++ == 0)
1583 pv = npv;
1584 /* Writeable mappings */
1585 if (npv->pv_flags & PT_Wr)
1586 ++writeable;
1587 }
1588 }
1589
1590 /*
1591 * Enable or disable caching as necessary.
1592 * We do a quick check of the first PTE to avoid walking the list if
1593 * we're already in the right state.
1594 */
1595 if (entries > 1 && writeable) {
1596 pte = pmap_pte(pmap, pv->pv_va);
1597 if (~*pte & (PT_C | PT_B))
1598 return;
1599 *pte = *pte & ~(PT_C | PT_B);
1600 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1601 if (pmap == npv->pv_pmap) {
1602 pte = pmap_pte(pmap, npv->pv_va);
1603 *pte = *pte & ~(PT_C | PT_B);
1604 }
1605 }
1606 } else if (entries > 0) {
1607 pte = pmap_pte(pmap, pv->pv_va);
1608 if (*pte & (PT_C | PT_B))
1609 return;
1610 *pte = *pte | (PT_C | PT_B);
1611 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1612 if (pmap == npv->pv_pmap) {
1613 pte = pmap_pte(pmap, npv->pv_va);
1614 *pte = *pte | (PT_C | PT_B);
1615 }
1616 }
1617 }
1618 }
1619
1620 /*
1621 * pmap_remove()
1622 *
1623 * pmap_remove is responsible for nuking a number of mappings for a range
1624 * of virtual address space in the current pmap. To do this efficiently
1625 * is interesting, because in a number of cases a wide virtual address
1626 * range may be supplied that contains few actual mappings. So, the
1627 * optimisations are:
1628 * 1. Try and skip over hunks of address space for which an L1 entry
1629 * does not exist.
1630 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1631 * maybe do just a partial cache clean. This path of execution is
1632 * complicated by the fact that the cache must be flushed _before_
1633 * the PTE is nuked, being a VAC :-)
1634 * 3. Maybe later fast-case a single page, but I don't think this is
1635 * going to make _that_ much difference overall.
1636 */
1637
1638 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1639
1640 void
1641 pmap_remove(pmap, sva, eva)
1642 pmap_t pmap;
1643 vaddr_t sva;
1644 vaddr_t eva;
1645 {
1646 int cleanlist_idx = 0;
1647 struct pagelist {
1648 vaddr_t va;
1649 pt_entry_t *pte;
1650 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1651 pt_entry_t *pte = 0;
1652 vaddr_t pa;
1653 int pmap_active;
1654 struct pv_entry *pv;
1655
1656 /* Exit quick if there is no pmap */
1657 if (!pmap)
1658 return;
1659
1660 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1661
1662 sva &= PG_FRAME;
1663 eva &= PG_FRAME;
1664
1665 /* Get a page table pointer */
1666 while (sva < eva) {
1667 pte = pmap_pte(pmap, sva);
1668 if (pte)
1669 break;
1670 sva = (sva & PD_MASK) + NBPD;
1671 }
1672
1673 /* Note if the pmap is active thus require cache and tlb cleans */
1674 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1675 || (pmap == kernel_pmap))
1676 pmap_active = 1;
1677 else
1678 pmap_active = 0;
1679
1680 /* Now loop along */
1681 while (sva < eva) {
1682 /* Check if we can move to the next PDE (l1 chunk) */
1683 if (!(sva & PT_MASK))
1684 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1685 sva += NBPD;
1686 pte += arm_byte_to_page(NBPD);
1687 continue;
1688 }
1689
1690 /* We've found a valid PTE, so this page of PTEs has to go. */
1691 if (pmap_pte_v(pte)) {
1692 int bank, off;
1693
1694 /* Update statistics */
1695 --pmap->pm_stats.resident_count;
1696
1697 /*
1698 * Add this page to our cache remove list, if we can.
1699 * If, however the cache remove list is totally full,
1700 * then do a complete cache invalidation taking note
1701 * to backtrack the PTE table beforehand, and ignore
1702 * the lists in future because there's no longer any
1703 * point in bothering with them (we've paid the
1704 * penalty, so will carry on unhindered). Otherwise,
1705 * when we fall out, we just clean the list.
1706 */
1707 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1708 pa = pmap_pte_pa(pte);
1709
1710 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1711 /* Add to the clean list. */
1712 cleanlist[cleanlist_idx].pte = pte;
1713 cleanlist[cleanlist_idx].va = sva;
1714 cleanlist_idx++;
1715 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1716 int cnt;
1717
1718 /* Nuke everything if needed. */
1719 if (pmap_active) {
1720 cpu_cache_purgeID();
1721 cpu_tlb_flushID();
1722 }
1723
1724 /*
1725 * Roll back the previous PTE list,
1726 * and zero out the current PTE.
1727 */
1728 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1729 *cleanlist[cnt].pte = 0;
1730 pmap_pte_delref(pmap, cleanlist[cnt].va);
1731 }
1732 *pte = 0;
1733 pmap_pte_delref(pmap, sva);
1734 cleanlist_idx++;
1735 } else {
1736 /*
1737 * We've already nuked the cache and
1738 * TLB, so just carry on regardless,
1739 * and we won't need to do it again
1740 */
1741 *pte = 0;
1742 pmap_pte_delref(pmap, sva);
1743 }
1744
1745 /*
1746 * Update flags. In a number of circumstances,
1747 * we could cluster a lot of these and do a
1748 * number of sequential pages in one go.
1749 */
1750 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1751 pv = &vm_physmem[bank].pmseg.pvent[off];
1752 pmap_remove_pv(pmap, sva, pv);
1753 pmap_vac_me_harder(pmap, pv);
1754 }
1755 }
1756 sva += NBPG;
1757 pte++;
1758 }
1759
1760 /*
1761 * Now, if we've fallen through down to here, chances are that there
1762 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1763 */
1764 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1765 u_int cnt;
1766
1767 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1768 if (pmap_active) {
1769 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1770 *cleanlist[cnt].pte = 0;
1771 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1772 } else
1773 *cleanlist[cnt].pte = 0;
1774 pmap_pte_delref(pmap, cleanlist[cnt].va);
1775 }
1776 }
1777 }
1778
1779 /*
1780 * Routine: pmap_remove_all
1781 * Function:
1782 * Removes this physical page from
1783 * all physical maps in which it resides.
1784 * Reflects back modify bits to the pager.
1785 */
1786
1787 void
1788 pmap_remove_all(pa)
1789 vaddr_t pa;
1790 {
1791 struct pv_entry *ph, *pv, *npv;
1792 pmap_t pmap;
1793 pt_entry_t *pte;
1794 int s;
1795
1796 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1797
1798 pv = ph = pmap_find_pv(pa);
1799 pmap_clean_page(pv);
1800
1801 s = splvm();
1802
1803 if (ph->pv_pmap == NULL) {
1804 PDEBUG(0, printf("free page\n"));
1805 splx(s);
1806 return;
1807 }
1808
1809 while (pv) {
1810 pmap = pv->pv_pmap;
1811 pte = pmap_pte(pmap, pv->pv_va);
1812
1813 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1814 pv->pv_va, pv->pv_flags));
1815 #ifdef DEBUG
1816 if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1817 panic("pmap_remove_all: bad mapping");
1818 #endif /* DEBUG */
1819
1820 /*
1821 * Update statistics
1822 */
1823 --pmap->pm_stats.resident_count;
1824
1825 /* Wired bit */
1826 if (pv->pv_flags & PT_W)
1827 --pmap->pm_stats.wired_count;
1828
1829 /*
1830 * Invalidate the PTEs.
1831 * XXX: should cluster them up and invalidate as many
1832 * as possible at once.
1833 */
1834
1835 #ifdef needednotdone
1836 reduce wiring count on page table pages as references drop
1837 #endif
1838
1839 *pte = 0;
1840 pmap_pte_delref(pmap, pv->pv_va);
1841
1842 npv = pv->pv_next;
1843 if (pv == ph)
1844 ph->pv_pmap = NULL;
1845 else
1846 pmap_free_pv(pv);
1847 pv = npv;
1848 }
1849
1850 splx(s);
1851
1852 PDEBUG(0, printf("done\n"));
1853 cpu_tlb_flushID();
1854 }
1855
1856
1857 /*
1858 * Set the physical protection on the specified range of this map as requested.
1859 */
1860
1861 void
1862 pmap_protect(pmap, sva, eva, prot)
1863 pmap_t pmap;
1864 vaddr_t sva;
1865 vaddr_t eva;
1866 vm_prot_t prot;
1867 {
1868 pt_entry_t *pte = NULL;
1869 int armprot;
1870 int flush = 0;
1871 vaddr_t pa;
1872 int bank, off;
1873 struct pv_entry *pv;
1874
1875 /*
1876 * Make sure pmap is valid. -dct
1877 */
1878 if (pmap == NULL)
1879 return;
1880 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
1881 pmap, sva, eva, prot));
1882
1883 if (~prot & VM_PROT_READ) {
1884 /* Just remove the mappings. */
1885 pmap_remove(pmap, sva, eva);
1886 return;
1887 }
1888 if (prot & VM_PROT_WRITE) {
1889 /*
1890 * If this is a read->write transition, just ignore it and let
1891 * uvm_fault() take care of it later.
1892 */
1893 return;
1894 }
1895
1896 sva &= PG_FRAME;
1897 eva &= PG_FRAME;
1898
1899 /*
1900 * We need to acquire a pointer to a page table page before entering
1901 * the following loop.
1902 */
1903 while (sva < eva) {
1904 pte = pmap_pte(pmap, sva);
1905 if (pte)
1906 break;
1907 sva = (sva & PD_MASK) + NBPD;
1908 }
1909
1910 while (sva < eva) {
1911 /* only check once in a while */
1912 if ((sva & PT_MASK) == 0) {
1913 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1914 /* We can race ahead here, to the next pde. */
1915 sva += NBPD;
1916 pte += arm_byte_to_page(NBPD);
1917 continue;
1918 }
1919 }
1920
1921 if (!pmap_pte_v(pte))
1922 goto next;
1923
1924 flush = 1;
1925
1926 armprot = 0;
1927 if (sva < VM_MAXUSER_ADDRESS)
1928 armprot |= PT_AP(AP_U);
1929 else if (sva < VM_MAX_ADDRESS)
1930 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
1931 *pte = (*pte & 0xfffff00f) | armprot;
1932
1933 pa = pmap_pte_pa(pte);
1934
1935 /* Get the physical page index */
1936
1937 /* Clear write flag */
1938 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1939 pv = &vm_physmem[bank].pmseg.pvent[off];
1940 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
1941 pmap_vac_me_harder(pmap, pv);
1942 }
1943
1944 next:
1945 sva += NBPG;
1946 pte++;
1947 }
1948
1949 if (flush)
1950 cpu_tlb_flushID();
1951 }
1952
1953 /*
1954 * void pmap_enter(pmap_t pmap, vaddr_t va, vaddr_t pa, vm_prot_t prot,
1955 * int flags)
1956 *
1957 * Insert the given physical page (p) at
1958 * the specified virtual address (v) in the
1959 * target physical map with the protection requested.
1960 *
1961 * If specified, the page will be wired down, meaning
1962 * that the related pte can not be reclaimed.
1963 *
1964 * NB: This is the only routine which MAY NOT lazy-evaluate
1965 * or lose information. That is, this routine must actually
1966 * insert this page into the given map NOW.
1967 */
1968
1969 int
1970 pmap_enter(pmap, va, pa, prot, flags)
1971 pmap_t pmap;
1972 vaddr_t va;
1973 vaddr_t pa;
1974 vm_prot_t prot;
1975 int flags;
1976 {
1977 pt_entry_t *pte;
1978 u_int npte;
1979 int bank, off;
1980 struct pv_entry *pv = NULL;
1981 vaddr_t opa;
1982 int nflags;
1983 boolean_t wired = (flags & PMAP_WIRED) != 0;
1984
1985 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
1986 va, pa, pmap, prot, wired));
1987
1988 /* Valid pmap ? */
1989 if (pmap == NULL)
1990 return (KERN_SUCCESS);
1991
1992 #ifdef DIAGNOSTIC
1993 /* Valid address ? */
1994 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
1995 panic("pmap_enter: too big");
1996 if (pmap != pmap_kernel() && va != 0) {
1997 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
1998 panic("pmap_enter: kernel page in user map");
1999 } else {
2000 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2001 panic("pmap_enter: user page in kernel map");
2002 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2003 panic("pmap_enter: entering PT page");
2004 }
2005 #endif
2006
2007 /*
2008 * Get a pointer to the pte for this virtual address. If the
2009 * pte pointer is NULL then we are missing the L2 page table
2010 * so we need to create one.
2011 */
2012 pte = pmap_pte(pmap, va);
2013 if (!pte) {
2014 vaddr_t l2pa;
2015 struct vm_page *m;
2016
2017 /* Allocate a page table */
2018 for (;;) {
2019 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2020 if (m != NULL)
2021 break;
2022
2023 /*
2024 * No page available. If we're the kernel
2025 * pmap, we die, since we might not have
2026 * a valid thread context. For user pmaps,
2027 * we assume that we _do_ have a valid thread
2028 * context, so we wait here for the pagedaemon
2029 * to free up some pages.
2030 *
2031 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2032 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2033 * XXX SO THIS IS PROBABLY SAFE. In any case,
2034 * XXX other pmap modules claim it is safe to
2035 * XXX sleep here if it's a user pmap.
2036 */
2037 if (pmap == pmap_kernel())
2038 panic("pmap_enter: no free pages");
2039 else
2040 uvm_wait("pmap_enter");
2041 }
2042
2043 /* Wire this page table into the L1. */
2044 l2pa = VM_PAGE_TO_PHYS(m);
2045 pmap_zero_page(l2pa);
2046 pmap_map_in_l1(pmap, va, l2pa);
2047 ++pmap->pm_stats.resident_count;
2048
2049 pte = pmap_pte(pmap, va);
2050 #ifdef DIAGNOSTIC
2051 if (!pte)
2052 panic("pmap_enter: no pte");
2053 #endif
2054 }
2055
2056 nflags = 0;
2057 if (prot & VM_PROT_WRITE)
2058 nflags |= PT_Wr;
2059 if (wired)
2060 nflags |= PT_W;
2061
2062 /* More debugging info */
2063 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2064 *pte));
2065
2066 /* Is the pte valid ? If so then this page is already mapped */
2067 if (pmap_pte_v(pte)) {
2068 /* Get the physical address of the current page mapped */
2069 opa = pmap_pte_pa(pte);
2070
2071 #ifdef MYCROFT_HACK
2072 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2073 #endif
2074
2075 /* Are we mapping the same page ? */
2076 if (opa == pa) {
2077 /* All we must be doing is changing the protection */
2078 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2079 va, pa));
2080
2081 /* Has the wiring changed ? */
2082 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2083 pv = &vm_physmem[bank].pmseg.pvent[off];
2084 (void) pmap_modify_pv(pmap, va, pv,
2085 PT_Wr | PT_W, nflags);
2086 }
2087 } else {
2088 /* We are replacing the page with a new one. */
2089 cpu_cache_purgeID_rng(va, NBPG);
2090
2091 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2092 va, pa, opa));
2093
2094 /*
2095 * If it is part of our managed memory then we
2096 * must remove it from the PV list
2097 */
2098 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2099 pv = &vm_physmem[bank].pmseg.pvent[off];
2100 pmap_remove_pv(pmap, va, pv);
2101 }
2102
2103 goto enter;
2104 }
2105 } else {
2106 opa = 0;
2107 pmap_pte_addref(pmap, va);
2108
2109 /* pte is not valid so we must be hooking in a new page */
2110 ++pmap->pm_stats.resident_count;
2111
2112 enter:
2113 /*
2114 * Enter on the PV list if part of our managed memory
2115 */
2116 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2117 pv = &vm_physmem[bank].pmseg.pvent[off];
2118 pmap_enter_pv(pmap, va, pv, nflags);
2119 }
2120 }
2121
2122 #ifdef MYCROFT_HACK
2123 if (mycroft_hack)
2124 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2125 #endif
2126
2127 /* Construct the pte, giving the correct access. */
2128 npte = (pa & PG_FRAME);
2129
2130 /* VA 0 is magic. */
2131 if (pmap != pmap_kernel() && va != 0)
2132 npte |= PT_AP(AP_U);
2133
2134 if (bank != -1) {
2135 #ifdef DIAGNOSTIC
2136 if ((flags & VM_PROT_ALL) & ~prot)
2137 panic("pmap_enter: access_type exceeds prot");
2138 #endif
2139 npte |= PT_C | PT_B;
2140 if (flags & VM_PROT_WRITE) {
2141 npte |= L2_SPAGE | PT_AP(AP_W);
2142 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2143 } else if (flags & VM_PROT_ALL) {
2144 npte |= L2_SPAGE;
2145 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2146 } else
2147 npte |= L2_INVAL;
2148 } else {
2149 if (prot & VM_PROT_WRITE)
2150 npte |= L2_SPAGE | PT_AP(AP_W);
2151 else if (prot & VM_PROT_ALL)
2152 npte |= L2_SPAGE;
2153 else
2154 npte |= L2_INVAL;
2155 }
2156
2157 #ifdef MYCROFT_HACK
2158 if (mycroft_hack)
2159 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2160 #endif
2161
2162 *pte = npte;
2163
2164 if (bank != -1)
2165 pmap_vac_me_harder(pmap, pv);
2166
2167 /* Better flush the TLB ... */
2168 cpu_tlb_flushID_SE(va);
2169
2170 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2171
2172 return (KERN_SUCCESS);
2173 }
2174
2175 void
2176 pmap_kenter_pa(va, pa, prot)
2177 vaddr_t va;
2178 paddr_t pa;
2179 vm_prot_t prot;
2180 {
2181 pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2182 }
2183
2184 void
2185 pmap_kenter_pgs(va, pgs, npgs)
2186 vaddr_t va;
2187 struct vm_page **pgs;
2188 int npgs;
2189 {
2190 int i;
2191
2192 for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
2193 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
2194 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
2195 }
2196 }
2197
2198 void
2199 pmap_kremove(va, len)
2200 vaddr_t va;
2201 vsize_t len;
2202 {
2203 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2204 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2205 }
2206 }
2207
2208 /*
2209 * pmap_page_protect:
2210 *
2211 * Lower the permission for all mappings to a given page.
2212 */
2213
2214 void
2215 pmap_page_protect(pg, prot)
2216 struct vm_page *pg;
2217 vm_prot_t prot;
2218 {
2219 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2220
2221 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2222
2223 switch(prot) {
2224 case VM_PROT_READ:
2225 case VM_PROT_READ|VM_PROT_EXECUTE:
2226 pmap_copy_on_write(pa);
2227 break;
2228
2229 case VM_PROT_ALL:
2230 break;
2231
2232 default:
2233 pmap_remove_all(pa);
2234 break;
2235 }
2236 }
2237
2238
2239 /*
2240 * Routine: pmap_unwire
2241 * Function: Clear the wired attribute for a map/virtual-address
2242 * pair.
2243 * In/out conditions:
2244 * The mapping must already exist in the pmap.
2245 */
2246
2247 void
2248 pmap_unwire(pmap, va)
2249 pmap_t pmap;
2250 vaddr_t va;
2251 {
2252 pt_entry_t *pte;
2253 vaddr_t pa;
2254 int bank, off;
2255 struct pv_entry *pv;
2256
2257 /*
2258 * Make sure pmap is valid. -dct
2259 */
2260 if (pmap == NULL)
2261 return;
2262
2263 /* Get the pte */
2264 pte = pmap_pte(pmap, va);
2265 if (!pte)
2266 return;
2267
2268 /* Extract the physical address of the page */
2269 pa = pmap_pte_pa(pte);
2270
2271 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2272 return;
2273 pv = &vm_physmem[bank].pmseg.pvent[off];
2274 /* Update the wired bit in the pv entry for this page. */
2275 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2276 }
2277
2278 /*
2279 * pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
2280 *
2281 * Return the pointer to a page table entry corresponding to the supplied
2282 * virtual address.
2283 *
2284 * The page directory is first checked to make sure that a page table
2285 * for the address in question exists and if it does a pointer to the
2286 * entry is returned.
2287 *
2288 * The way this works is that that the kernel page tables are mapped
2289 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2290 * This allows page tables to be located quickly.
2291 */
2292 pt_entry_t *
2293 pmap_pte(pmap, va)
2294 pmap_t pmap;
2295 vaddr_t va;
2296 {
2297 pt_entry_t *ptp;
2298 pt_entry_t *result;
2299
2300 /* The pmap must be valid */
2301 if (!pmap)
2302 return(NULL);
2303
2304 /* Return the address of the pte */
2305 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2306 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2307
2308 /* Do we have a valid pde ? If not we don't have a page table */
2309 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2310 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2311 pmap_pde(pmap, va)));
2312 return(NULL);
2313 }
2314
2315 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2316 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2317 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2318 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2319
2320 /*
2321 * If the pmap is the kernel pmap or the pmap is the active one
2322 * then we can just return a pointer to entry relative to
2323 * PROCESS_PAGE_TBLS_BASE.
2324 * Otherwise we need to map the page tables to an alternative
2325 * address and reference them there.
2326 */
2327 if (pmap == kernel_pmap || pmap->pm_pptpt
2328 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2329 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2330 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2331 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2332 } else {
2333 struct proc *p = curproc;
2334
2335 /* If we don't have a valid curproc use proc0 */
2336 /* Perhaps we should just use kernel_pmap instead */
2337 if (p == NULL)
2338 p = &proc0;
2339 #ifdef DIAGNOSTIC
2340 /*
2341 * The pmap should always be valid for the process so
2342 * panic if it is not.
2343 */
2344 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2345 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2346 va, p, p->p_vmspace);
2347 console_debugger();
2348 }
2349 /*
2350 * The pmap for the current process should be mapped. If it
2351 * is not then we have a problem.
2352 */
2353 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2354 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2355 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2356 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2357 printf("pmap pagetable = P%08lx current = P%08x ",
2358 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2359 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2360 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2361 PG_FRAME));
2362 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2363 panic("pmap_pte: current and pmap mismatch\n");
2364 }
2365 #endif
2366
2367 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2368 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2369 pmap->pm_pptpt);
2370 cpu_tlb_flushD();
2371 }
2372 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2373 ((va >> (PGSHIFT-2)) & ~3)));
2374 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2375 return(result);
2376 }
2377
2378 /*
2379 * Routine: pmap_extract
2380 * Function:
2381 * Extract the physical page address associated
2382 * with the given map/virtual_address pair.
2383 */
2384 boolean_t
2385 pmap_extract(pmap, va, pap)
2386 pmap_t pmap;
2387 vaddr_t va;
2388 paddr_t *pap;
2389 {
2390 pt_entry_t *pte;
2391 paddr_t pa;
2392
2393 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2394
2395 /*
2396 * Get the pte for this virtual address. If there is no pte
2397 * then there is no page table etc.
2398 */
2399
2400 pte = pmap_pte(pmap, va);
2401 if (!pte)
2402 return(FALSE);
2403
2404 /* Is the pte valid ? If not then no paged is actually mapped here */
2405 if (!pmap_pte_v(pte))
2406 return(FALSE);
2407
2408 /* Return the physical address depending on the PTE type */
2409 /* XXX What about L1 section mappings ? */
2410 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2411 /* Extract the physical address from the pte */
2412 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2413
2414 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2415 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2416
2417 if (pap != NULL)
2418 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2419 return (TRUE);
2420 } else {
2421 /* Extract the physical address from the pte */
2422 pa = pmap_pte_pa(pte);
2423
2424 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2425 (pa | (va & ~PG_FRAME))));
2426
2427 if (pap != NULL)
2428 *pap = pa | (va & ~PG_FRAME);
2429 return (TRUE);
2430 }
2431 }
2432
2433
2434 /*
2435 * Copy the range specified by src_addr/len from the source map to the
2436 * range dst_addr/len in the destination map.
2437 *
2438 * This routine is only advisory and need not do anything.
2439 */
2440
2441 void
2442 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2443 pmap_t dst_pmap;
2444 pmap_t src_pmap;
2445 vaddr_t dst_addr;
2446 vm_size_t len;
2447 vaddr_t src_addr;
2448 {
2449 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2450 dst_pmap, src_pmap, dst_addr, len, src_addr));
2451 }
2452
2453 #if defined(PMAP_DEBUG)
2454 void
2455 pmap_dump_pvlist(phys, m)
2456 vaddr_t phys;
2457 char *m;
2458 {
2459 struct pv_entry *pv;
2460 int bank, off;
2461
2462 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2463 printf("INVALID PA\n");
2464 return;
2465 }
2466 pv = &vm_physmem[bank].pmseg.pvent[off];
2467 printf("%s %08lx:", m, phys);
2468 if (pv->pv_pmap == NULL) {
2469 printf(" no mappings\n");
2470 return;
2471 }
2472
2473 for (; pv; pv = pv->pv_next)
2474 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2475 pv->pv_va, pv->pv_flags);
2476
2477 printf("\n");
2478 }
2479
2480 #endif /* PMAP_DEBUG */
2481
2482 boolean_t
2483 pmap_testbit(pa, setbits)
2484 vaddr_t pa;
2485 int setbits;
2486 {
2487 int bank, off;
2488
2489 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2490
2491 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2492 return(FALSE);
2493
2494 /*
2495 * Check saved info only
2496 */
2497 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2498 PDEBUG(0, printf("pmap_attributes = %02x\n",
2499 vm_physmem[bank].pmseg.attrs[off]));
2500 return(TRUE);
2501 }
2502
2503 return(FALSE);
2504 }
2505
2506
2507 /*
2508 * Modify pte bits for all ptes corresponding to the given physical address.
2509 * We use `maskbits' rather than `clearbits' because we're always passing
2510 * constants and the latter would require an extra inversion at run-time.
2511 */
2512
2513 void
2514 pmap_clearbit(pa, maskbits)
2515 vaddr_t pa;
2516 int maskbits;
2517 {
2518 struct pv_entry *pv;
2519 pt_entry_t *pte;
2520 vaddr_t va;
2521 int bank, off;
2522 int s;
2523
2524 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2525 pa, maskbits));
2526 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2527 return;
2528 pv = &vm_physmem[bank].pmseg.pvent[off];
2529 s = splvm();
2530
2531 /*
2532 * Clear saved attributes (modify, reference)
2533 */
2534 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2535
2536 if (pv->pv_pmap == NULL) {
2537 splx(s);
2538 return;
2539 }
2540
2541 /*
2542 * Loop over all current mappings setting/clearing as appropos
2543 */
2544 for (; pv; pv = pv->pv_next) {
2545 va = pv->pv_va;
2546
2547 /*
2548 * XXX don't write protect pager mappings
2549 */
2550 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2551 printf("pmap_clearbit: bogon alpha\n");
2552 continue;
2553 }
2554
2555 pv->pv_flags &= ~maskbits;
2556 pte = pmap_pte(pv->pv_pmap, va);
2557 if (maskbits & (PT_Wr|PT_M))
2558 *pte = *pte & ~PT_AP(AP_W);
2559 if (maskbits & PT_H)
2560 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2561 }
2562 cpu_tlb_flushID();
2563
2564 splx(s);
2565 }
2566
2567
2568 boolean_t
2569 pmap_clear_modify(pg)
2570 struct vm_page *pg;
2571 {
2572 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2573 boolean_t rv;
2574
2575 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2576 rv = pmap_testbit(pa, PT_M);
2577 pmap_clearbit(pa, PT_M);
2578 return rv;
2579 }
2580
2581
2582 boolean_t
2583 pmap_clear_reference(pg)
2584 struct vm_page *pg;
2585 {
2586 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2587 boolean_t rv;
2588
2589 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2590 rv = pmap_testbit(pa, PT_H);
2591 pmap_clearbit(pa, PT_H);
2592 return rv;
2593 }
2594
2595
2596 void
2597 pmap_copy_on_write(pa)
2598 vaddr_t pa;
2599 {
2600 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2601 pmap_clearbit(pa, PT_Wr);
2602 }
2603
2604
2605 boolean_t
2606 pmap_is_modified(pg)
2607 struct vm_page *pg;
2608 {
2609 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2610 boolean_t result;
2611
2612 result = pmap_testbit(pa, PT_M);
2613 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2614 return (result);
2615 }
2616
2617
2618 boolean_t
2619 pmap_is_referenced(pg)
2620 struct vm_page *pg;
2621 {
2622 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2623 boolean_t result;
2624
2625 result = pmap_testbit(pa, PT_H);
2626 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2627 return (result);
2628 }
2629
2630
2631 int
2632 pmap_modified_emulation(pmap, va)
2633 pmap_t pmap;
2634 vaddr_t va;
2635 {
2636 pt_entry_t *pte;
2637 vaddr_t pa;
2638 int bank, off;
2639 struct pv_entry *pv;
2640 u_int flags;
2641
2642 PDEBUG(2, printf("pmap_modified_emulation\n"));
2643
2644 /* Get the pte */
2645 pte = pmap_pte(pmap, va);
2646 if (!pte) {
2647 PDEBUG(2, printf("no pte\n"));
2648 return(0);
2649 }
2650
2651 PDEBUG(1, printf("*pte=%08x\n", *pte));
2652
2653 /* Check for a zero pte */
2654 if (*pte == 0)
2655 return(0);
2656
2657 /* This can happen if user code tries to access kernel memory. */
2658 if ((*pte & PT_AP(AP_W)) != 0)
2659 return (0);
2660
2661 /* Extract the physical address of the page */
2662 pa = pmap_pte_pa(pte);
2663 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2664 return(0);
2665
2666 /* Get the current flags for this page. */
2667 pv = &vm_physmem[bank].pmseg.pvent[off];
2668 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2669 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2670
2671 /*
2672 * Do the flags say this page is writable ? If not then it is a
2673 * genuine write fault. If yes then the write fault is our fault
2674 * as we did not reflect the write access in the PTE. Now we know
2675 * a write has occurred we can correct this and also set the
2676 * modified bit
2677 */
2678 if (~flags & PT_Wr)
2679 return(0);
2680
2681 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2682 va, pte, *pte));
2683 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2684 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2685 PDEBUG(0, printf("->(%08x)\n", *pte));
2686
2687 /* Return, indicating the problem has been dealt with */
2688 cpu_tlb_flushID_SE(va);
2689 return(1);
2690 }
2691
2692
2693 int
2694 pmap_handled_emulation(pmap, va)
2695 pmap_t pmap;
2696 vaddr_t va;
2697 {
2698 pt_entry_t *pte;
2699 vaddr_t pa;
2700 int bank, off;
2701
2702 PDEBUG(2, printf("pmap_handled_emulation\n"));
2703
2704 /* Get the pte */
2705 pte = pmap_pte(pmap, va);
2706 if (!pte) {
2707 PDEBUG(2, printf("no pte\n"));
2708 return(0);
2709 }
2710
2711 PDEBUG(1, printf("*pte=%08x\n", *pte));
2712
2713 /* Check for a zero pte */
2714 if (*pte == 0)
2715 return(0);
2716
2717 /* This can happen if user code tries to access kernel memory. */
2718 if ((*pte & L2_MASK) != L2_INVAL)
2719 return (0);
2720
2721 /* Extract the physical address of the page */
2722 pa = pmap_pte_pa(pte);
2723 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2724 return(0);
2725
2726 /*
2727 * Ok we just enable the pte and mark the attibs as handled
2728 */
2729 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2730 va, pte, *pte));
2731 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2732 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2733 PDEBUG(0, printf("->(%08x)\n", *pte));
2734
2735 /* Return, indicating the problem has been dealt with */
2736 cpu_tlb_flushID_SE(va);
2737 return(1);
2738 }
2739
2740 /*
2741 * pmap_collect: free resources held by a pmap
2742 *
2743 * => optional function.
2744 * => called when a process is swapped out to free memory.
2745 */
2746
2747 void
2748 pmap_collect(pmap)
2749 pmap_t pmap;
2750 {
2751 }
2752
2753 /*
2754 * Routine: pmap_procwr
2755 *
2756 * Function:
2757 * Synchronize caches corresponding to [addr, addr+len) in p.
2758 *
2759 */
2760 void
2761 pmap_procwr(p, va, len)
2762 struct proc *p;
2763 vaddr_t va;
2764 u_long len;
2765 {
2766 /* We only need to do anything if it is the current process. */
2767 if (p == curproc)
2768 cpu_cache_syncI_rng(va, len);
2769 }
2770
2771 /* End of pmap.c */
2772