pmap.c revision 1.5 1 /* $NetBSD: pmap.c,v 1.5 2001/04/20 18:11:53 toshii Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1994-1998 Mark Brinicombe.
41 * Copyright (c) 1994 Brini.
42 * All rights reserved.
43 *
44 * This code is derived from software written for Brini by Mark Brinicombe
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 * must display the following acknowledgement:
56 * This product includes software developed by Mark Brinicombe.
57 * 4. The name of the author may not be used to endorse or promote products
58 * derived from this software without specific prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
61 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
64 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
65 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
66 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
67 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
68 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
69 *
70 * RiscBSD kernel project
71 *
72 * pmap.c
73 *
74 * Machine dependant vm stuff
75 *
76 * Created : 20/09/94
77 */
78
79 /*
80 * Performance improvements, UVM changes, overhauls and part-rewrites
81 * were contributed by Neil A. Carson <neil (at) causality.com>.
82 */
83
84 /*
85 * The dram block info is currently referenced from the bootconfig.
86 * This should be placed in a separate structure.
87 */
88
89 /*
90 * Special compilation symbols
91 * PMAP_DEBUG - Build in pmap_debug_level code
92 */
93
94 /* Include header files */
95
96 #include "opt_pmap_debug.h"
97 #include "opt_ddb.h"
98
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/kernel.h>
102 #include <sys/systm.h>
103 #include <sys/proc.h>
104 #include <sys/malloc.h>
105 #include <sys/user.h>
106
107 #include <uvm/uvm.h>
108
109 #include <machine/bootconfig.h>
110 #include <machine/bus.h>
111 #include <machine/pmap.h>
112 #include <machine/pcb.h>
113 #include <machine/param.h>
114 #include <machine/katelib.h>
115
116 #ifdef PMAP_DEBUG
117 #define PDEBUG(_lev_,_stat_) \
118 if (pmap_debug_level >= (_lev_)) \
119 ((_stat_))
120 int pmap_debug_level = -2;
121 #else /* PMAP_DEBUG */
122 #define PDEBUG(_lev_,_stat_) /* Nothing */
123 #endif /* PMAP_DEBUG */
124
125 struct pmap kernel_pmap_store;
126 pmap_t kernel_pmap;
127
128 pagehook_t page_hook0;
129 pagehook_t page_hook1;
130 char *memhook;
131 pt_entry_t msgbufpte;
132 extern caddr_t msgbufaddr;
133
134 #ifdef DIAGNOSTIC
135 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
136 #endif
137
138 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
139
140 int pv_nfree = 0;
141
142 vsize_t npages;
143
144 extern paddr_t physical_start;
145 extern paddr_t physical_freestart;
146 extern paddr_t physical_end;
147 extern paddr_t physical_freeend;
148 extern unsigned int free_pages;
149 extern int max_processes;
150
151 vaddr_t virtual_start;
152 vaddr_t virtual_end;
153
154 vaddr_t avail_start;
155 vaddr_t avail_end;
156
157 extern pv_addr_t systempage;
158
159 #define ALLOC_PAGE_HOOK(x, s) \
160 x.va = virtual_start; \
161 x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
162 virtual_start += s;
163
164 /* Variables used by the L1 page table queue code */
165 SIMPLEQ_HEAD(l1pt_queue, l1pt);
166 struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
167 int l1pt_static_queue_count; /* items in the static l1 queue */
168 int l1pt_static_create_count; /* static l1 items created */
169 struct l1pt_queue l1pt_queue; /* head of our l1 queue */
170 int l1pt_queue_count; /* items in the l1 queue */
171 int l1pt_create_count; /* stat - L1's create count */
172 int l1pt_reuse_count; /* stat - L1's reused count */
173
174 /* Local function prototypes (not used outside this file) */
175 pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
176 int pmap_page_index __P((paddr_t pa));
177 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
178 paddr_t pa, unsigned int flags));
179 void pmap_copy_on_write __P((paddr_t pa));
180 void pmap_pinit __P((pmap_t));
181 void pmap_freepagedir __P((pmap_t));
182 void pmap_release __P((pmap_t));
183
184 /* Other function prototypes */
185 extern void bzero_page __P((vaddr_t));
186 extern void bcopy_page __P((vaddr_t, vaddr_t));
187
188 struct l1pt *pmap_alloc_l1pt __P((void));
189 static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
190 vaddr_t l2pa));
191
192 #ifdef MYCROFT_HACK
193 int mycroft_hack = 0;
194 #endif
195
196 /* Function to set the debug level of the pmap code */
197
198 #ifdef PMAP_DEBUG
199 void
200 pmap_debug(level)
201 int level;
202 {
203 pmap_debug_level = level;
204 printf("pmap_debug: level=%d\n", pmap_debug_level);
205 }
206 #endif /* PMAP_DEBUG */
207
208 #include "isadma.h"
209
210 #if NISADMA > 0
211 /*
212 * Used to protect memory for ISA DMA bounce buffers. If, when loading
213 * pages into the system, memory intersects with any of these ranges,
214 * the intersecting memory will be loaded into a lower-priority free list.
215 */
216 bus_dma_segment_t *pmap_isa_dma_ranges;
217 int pmap_isa_dma_nranges;
218
219 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
220 paddr_t *, psize_t *));
221
222 /*
223 * Check if a memory range intersects with an ISA DMA range, and
224 * return the page-rounded intersection if it does. The intersection
225 * will be placed on a lower-priority free list.
226 */
227 boolean_t
228 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
229 paddr_t pa;
230 psize_t size;
231 paddr_t *pap;
232 psize_t *sizep;
233 {
234 bus_dma_segment_t *ds;
235 int i;
236
237 if (pmap_isa_dma_ranges == NULL)
238 return (FALSE);
239
240 for (i = 0, ds = pmap_isa_dma_ranges;
241 i < pmap_isa_dma_nranges; i++, ds++) {
242 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
243 /*
244 * Beginning of region intersects with this range.
245 */
246 *pap = trunc_page(pa);
247 *sizep = round_page(min(pa + size,
248 ds->ds_addr + ds->ds_len) - pa);
249 return (TRUE);
250 }
251 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
252 /*
253 * End of region intersects with this range.
254 */
255 *pap = trunc_page(ds->ds_addr);
256 *sizep = round_page(min((pa + size) - ds->ds_addr,
257 ds->ds_len));
258 return (TRUE);
259 }
260 }
261
262 /*
263 * No intersection found.
264 */
265 return (FALSE);
266 }
267 #endif /* NISADMA > 0 */
268
269 /*
270 * Functions for manipluation pv_entry structures. These are used to keep a
271 * record of the mappings of virtual addresses and the associated physical
272 * pages.
273 */
274
275 /*
276 * Allocate a new pv_entry structure from the freelist. If the list is
277 * empty allocate a new page and fill the freelist.
278 */
279 struct pv_entry *
280 pmap_alloc_pv()
281 {
282 struct pv_page *pvp;
283 struct pv_entry *pv;
284 int i;
285
286 /*
287 * Do we have any free pv_entry structures left ?
288 * If not allocate a page of them
289 */
290
291 if (pv_nfree == 0) {
292 /* NOTE: can't lock kernel_map here */
293 MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
294 if (pvp == 0)
295 panic("pmap_alloc_pv: kmem_alloc() failed");
296 pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
297 for (i = NPVPPG - 2; i; i--, pv++)
298 pv->pv_next = pv + 1;
299 pv->pv_next = 0;
300 pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
301 TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
302 pv = &pvp->pvp_pv[0];
303 } else {
304 --pv_nfree;
305 pvp = pv_page_freelist.tqh_first;
306 if (--pvp->pvp_pgi.pgi_nfree == 0) {
307 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
308 }
309 pv = pvp->pvp_pgi.pgi_freelist;
310 #ifdef DIAGNOSTIC
311 if (pv == 0)
312 panic("pmap_alloc_pv: pgi_nfree inconsistent");
313 #endif /* DIAGNOSTIC */
314 pvp->pvp_pgi.pgi_freelist = pv->pv_next;
315 }
316 return pv;
317 }
318
319 /*
320 * Release a pv_entry structure putting it back on the freelist.
321 */
322
323 void
324 pmap_free_pv(pv)
325 struct pv_entry *pv;
326 {
327 struct pv_page *pvp;
328
329 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
330 switch (++pvp->pvp_pgi.pgi_nfree) {
331 case 1:
332 TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
333 default:
334 pv->pv_next = pvp->pvp_pgi.pgi_freelist;
335 pvp->pvp_pgi.pgi_freelist = pv;
336 ++pv_nfree;
337 break;
338 case NPVPPG:
339 pv_nfree -= NPVPPG - 1;
340 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
341 FREE((vaddr_t)pvp, M_VMPVENT);
342 break;
343 }
344 }
345
346 #if 0
347 void
348 pmap_collect_pv()
349 {
350 struct pv_page_list pv_page_collectlist;
351 struct pv_page *pvp, *npvp;
352 struct pv_entry *ph, *ppv, *pv, *npv;
353 int s;
354
355 TAILQ_INIT(&pv_page_collectlist);
356
357 for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
358 if (pv_nfree < NPVPPG)
359 break;
360 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
361 if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
362 TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
363 TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
364 pvp_pgi.pgi_list);
365 pv_nfree -= NPVPPG;
366 pvp->pvp_pgi.pgi_nfree = -1;
367 }
368 }
369
370 if (pv_page_collectlist.tqh_first == 0)
371 return;
372
373 for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
374 if (ph->pv_pmap == 0)
375 continue;
376 s = splvm();
377 for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
378 pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
379 if (pvp->pvp_pgi.pgi_nfree == -1) {
380 pvp = pv_page_freelist.tqh_first;
381 if (--pvp->pvp_pgi.pgi_nfree == 0) {
382 TAILQ_REMOVE(&pv_page_freelist,
383 pvp, pvp_pgi.pgi_list);
384 }
385 npv = pvp->pvp_pgi.pgi_freelist;
386 #ifdef DIAGNOSTIC
387 if (npv == 0)
388 panic("pmap_collect_pv: pgi_nfree inconsistent");
389 #endif /* DIAGNOSTIC */
390 pvp->pvp_pgi.pgi_freelist = npv->pv_next;
391 *npv = *pv;
392 ppv->pv_next = npv;
393 ppv = npv;
394 } else
395 ppv = pv;
396 }
397 splx(s);
398 }
399
400 for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
401 npvp = pvp->pvp_pgi.pgi_list.tqe_next;
402 FREE((vaddr_t)pvp, M_VMPVENT);
403 }
404 }
405 #endif
406
407 /*
408 * Enter a new physical-virtual mapping into the pv table
409 */
410
411 /*__inline*/ void
412 pmap_enter_pv(pmap, va, pv, flags)
413 pmap_t pmap;
414 vaddr_t va;
415 struct pv_entry *pv;
416 u_int flags;
417 {
418 struct pv_entry *npv;
419 u_int s;
420
421 #ifdef DIAGNOSTIC
422 if (!pmap_initialized)
423 panic("pmap_enter_pv: !pmap_initialized");
424 #endif
425
426 s = splvm();
427
428 PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
429 pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
430
431 if (pv->pv_pmap == NULL) {
432 /*
433 * No entries yet, use header as the first entry
434 */
435 pv->pv_va = va;
436 pv->pv_pmap = pmap;
437 pv->pv_next = NULL;
438 pv->pv_flags = flags;
439 } else {
440 /*
441 * There is at least one other VA mapping this page.
442 * Place this entry after the header.
443 */
444 #ifdef PMAP_DEBUG
445 for (npv = pv; npv; npv = npv->pv_next)
446 if (pmap == npv->pv_pmap && va == npv->pv_va)
447 panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
448 pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
449 #endif
450 npv = pmap_alloc_pv();
451 npv->pv_va = va;
452 npv->pv_pmap = pmap;
453 npv->pv_flags = flags;
454 npv->pv_next = pv->pv_next;
455 pv->pv_next = npv;
456 }
457
458 if (flags & PT_W)
459 ++pmap->pm_stats.wired_count;
460
461 splx(s);
462 }
463
464
465 /*
466 * Remove a physical-virtual mapping from the pv table
467 */
468
469 /*__inline*/ void
470 pmap_remove_pv(pmap, va, pv)
471 pmap_t pmap;
472 vaddr_t va;
473 struct pv_entry *pv;
474 {
475 struct pv_entry *npv;
476 u_int s;
477 u_int flags = 0;
478
479 #ifdef DIAGNOSTIC
480 if (!pmap_initialized)
481 panic("pmap_remove_pv: !pmap_initialized");
482 #endif
483
484 s = splvm();
485
486 /*
487 * If it is the first entry on the list, it is actually
488 * in the header and we must copy the following entry up
489 * to the header. Otherwise we must search the list for
490 * the entry. In either case we free the now unused entry.
491 */
492
493 if (pmap == pv->pv_pmap && va == pv->pv_va) {
494 npv = pv->pv_next;
495 if (npv) {
496 *pv = *npv;
497 flags = npv->pv_flags;
498 pmap_free_pv(npv);
499 } else {
500 flags = pv->pv_flags;
501 pv->pv_pmap = NULL;
502 }
503 } else {
504 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
505 if (pmap == npv->pv_pmap && va == npv->pv_va)
506 break;
507 }
508 if (npv) {
509 pv->pv_next = npv->pv_next;
510 flags = npv->pv_flags;
511 pmap_free_pv(npv);
512 } else
513 panic("pmap_remove_pv: lost entry");
514 }
515
516 if (flags & PT_W)
517 --pmap->pm_stats.wired_count;
518
519 splx(s);
520 }
521
522 /*
523 * Modify a physical-virtual mapping in the pv table
524 */
525
526 /*__inline */ u_int
527 pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
528 pmap_t pmap;
529 vaddr_t va;
530 struct pv_entry *pv;
531 u_int bic_mask;
532 u_int eor_mask;
533 {
534 struct pv_entry *npv;
535 u_int s;
536 u_int flags, oflags;
537
538 PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
539 pmap, va, pv, bic_mask, eor_mask));
540
541 #ifdef DIAGNOSTIC
542 if (!pmap_initialized)
543 panic("pmap_modify_pv: !pmap_initialized");
544 #endif
545
546 s = splvm();
547
548 PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
549 pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
550
551 /*
552 * There is at least one VA mapping this page.
553 */
554
555 for (npv = pv; npv; npv = npv->pv_next) {
556 if (pmap == npv->pv_pmap && va == npv->pv_va) {
557 oflags = npv->pv_flags;
558 npv->pv_flags = flags =
559 ((oflags & ~bic_mask) ^ eor_mask);
560 if ((flags ^ oflags) & PT_W) {
561 if (flags & PT_W)
562 ++pmap->pm_stats.wired_count;
563 else
564 --pmap->pm_stats.wired_count;
565 }
566 PDEBUG(0, printf("done flags=%08x\n", flags));
567 splx(s);
568 return (oflags);
569 }
570 }
571
572 PDEBUG(0, printf("done.\n"));
573 splx(s);
574 return (0);
575 }
576
577
578 /*
579 * Map the specified level 2 pagetable into the level 1 page table for
580 * the given pmap to cover a chunk of virtual address space starting from the
581 * address specified.
582 */
583 static /*__inline*/ void
584 pmap_map_in_l1(pmap, va, l2pa)
585 pmap_t pmap;
586 vaddr_t va, l2pa;
587 {
588 vaddr_t ptva;
589
590 /* Calculate the index into the L1 page table. */
591 ptva = (va >> PDSHIFT) & ~3;
592
593 PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
594 pmap->pm_pdir, L1_PTE(l2pa), ptva));
595
596 /* Map page table into the L1. */
597 pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
598 pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
599 pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
600 pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
601
602 PDEBUG(0, printf("pt self reference %lx in %lx\n",
603 L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
604
605 /* Map the page table into the page table area. */
606 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
607
608 /* XXX should be a purge */
609 /* cpu_tlb_flushD();*/
610 }
611
612 #if 0
613 static /*__inline*/ void
614 pmap_unmap_in_l1(pmap, va)
615 pmap_t pmap;
616 vaddr_t va;
617 {
618 vaddr_t ptva;
619
620 /* Calculate the index into the L1 page table. */
621 ptva = (va >> PDSHIFT) & ~3;
622
623 /* Unmap page table from the L1. */
624 pmap->pm_pdir[ptva + 0] = 0;
625 pmap->pm_pdir[ptva + 1] = 0;
626 pmap->pm_pdir[ptva + 2] = 0;
627 pmap->pm_pdir[ptva + 3] = 0;
628
629 /* Unmap the page table from the page table area. */
630 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
631
632 /* XXX should be a purge */
633 /* cpu_tlb_flushD();*/
634 }
635 #endif
636
637
638 /*
639 * Used to map a range of physical addresses into kernel
640 * virtual address space.
641 *
642 * For now, VM is already on, we only need to map the
643 * specified memory.
644 */
645 vaddr_t
646 pmap_map(va, spa, epa, prot)
647 vaddr_t va, spa, epa;
648 int prot;
649 {
650 while (spa < epa) {
651 pmap_enter(pmap_kernel(), va, spa, prot, 0);
652 va += NBPG;
653 spa += NBPG;
654 }
655 return(va);
656 }
657
658
659 /*
660 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
661 *
662 * bootstrap the pmap system. This is called from initarm and allows
663 * the pmap system to initailise any structures it requires.
664 *
665 * Currently this sets up the kernel_pmap that is statically allocated
666 * and also allocated virtual addresses for certain page hooks.
667 * Currently the only one page hook is allocated that is used
668 * to zero physical pages of memory.
669 * It also initialises the start and end address of the kernel data space.
670 */
671 extern paddr_t physical_freestart;
672 extern paddr_t physical_freeend;
673
674 struct pv_entry *boot_pvent;
675 char *boot_attrs;
676
677 void
678 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
679 pd_entry_t *kernel_l1pt;
680 pv_addr_t kernel_ptpt;
681 {
682 int loop;
683 paddr_t start, end;
684 #if NISADMA > 0
685 paddr_t istart;
686 psize_t isize;
687 #endif
688 vsize_t size;
689
690 kernel_pmap = &kernel_pmap_store;
691
692 kernel_pmap->pm_pdir = kernel_l1pt;
693 kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
694 kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
695 simple_lock_init(&kernel_pmap->pm_lock);
696 kernel_pmap->pm_count = 1;
697
698 /*
699 * Initialize PAGE_SIZE-dependent variables.
700 */
701 uvm_setpagesize();
702
703 npages = 0;
704 loop = 0;
705 while (loop < bootconfig.dramblocks) {
706 start = (paddr_t)bootconfig.dram[loop].address;
707 end = start + (bootconfig.dram[loop].pages * NBPG);
708 if (start < physical_freestart)
709 start = physical_freestart;
710 if (end > physical_freeend)
711 end = physical_freeend;
712 #if 0
713 printf("%d: %lx -> %lx\n", loop, start, end - 1);
714 #endif
715 #if NISADMA > 0
716 if (pmap_isa_dma_range_intersect(start, end - start,
717 &istart, &isize)) {
718 /*
719 * Place the pages that intersect with the
720 * ISA DMA range onto the ISA DMA free list.
721 */
722 #if 0
723 printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
724 istart + isize - 1);
725 #endif
726 uvm_page_physload(atop(istart),
727 atop(istart + isize), atop(istart),
728 atop(istart + isize), VM_FREELIST_ISADMA);
729 npages += atop(istart + isize) - atop(istart);
730
731 /*
732 * Load the pieces that come before
733 * the intersection into the default
734 * free list.
735 */
736 if (start < istart) {
737 #if 0
738 printf(" BEFORE 0x%lx -> 0x%lx\n",
739 start, istart - 1);
740 #endif
741 uvm_page_physload(atop(start),
742 atop(istart), atop(start),
743 atop(istart), VM_FREELIST_DEFAULT);
744 npages += atop(istart) - atop(start);
745 }
746
747 /*
748 * Load the pieces that come after
749 * the intersection into the default
750 * free list.
751 */
752 if ((istart + isize) < end) {
753 #if 0
754 printf(" AFTER 0x%lx -> 0x%lx\n",
755 (istart + isize), end - 1);
756 #endif
757 uvm_page_physload(atop(istart + isize),
758 atop(end), atop(istart + isize),
759 atop(end), VM_FREELIST_DEFAULT);
760 npages += atop(end) - atop(istart + isize);
761 }
762 } else {
763 uvm_page_physload(atop(start), atop(end),
764 atop(start), atop(end), VM_FREELIST_DEFAULT);
765 npages += atop(end) - atop(start);
766 }
767 #else /* NISADMA > 0 */
768 uvm_page_physload(atop(start), atop(end),
769 atop(start), atop(end), VM_FREELIST_DEFAULT);
770 npages += atop(end) - atop(start);
771 #endif /* NISADMA > 0 */
772 ++loop;
773 }
774
775 #ifdef MYCROFT_HACK
776 printf("npages = %ld\n", npages);
777 #endif
778
779 virtual_start = KERNEL_VM_BASE;
780 virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
781
782 ALLOC_PAGE_HOOK(page_hook0, NBPG);
783 ALLOC_PAGE_HOOK(page_hook1, NBPG);
784
785 /*
786 * The mem special device needs a virtual hook but we don't
787 * need a pte
788 */
789 memhook = (char *)virtual_start;
790 virtual_start += NBPG;
791
792 msgbufaddr = (caddr_t)virtual_start;
793 msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
794 virtual_start += round_page(MSGBUFSIZE);
795
796 size = npages * sizeof(struct pv_entry);
797 boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
798 bzero(boot_pvent, size);
799 size = npages * sizeof(char);
800 boot_attrs = (char *)uvm_pageboot_alloc(size);
801 bzero(boot_attrs, size);
802
803 cpu_cache_cleanD();
804 }
805
806 /*
807 * void pmap_init(void)
808 *
809 * Initialize the pmap module.
810 * Called by vm_init() in vm/vm_init.c in order to initialise
811 * any structures that the pmap system needs to map virtual memory.
812 */
813
814 extern int physmem;
815
816 void
817 pmap_init()
818 {
819 int lcv;
820
821 #ifdef MYCROFT_HACK
822 printf("physmem = %d\n", physmem);
823 #endif
824
825 /*
826 * Set the available memory vars - These do not map to real memory
827 * addresses and cannot as the physical memory is fragmented.
828 * They are used by ps for %mem calculations.
829 * One could argue whether this should be the entire memory or just
830 * the memory that is useable in a user process.
831 */
832 avail_start = 0;
833 avail_end = physmem * NBPG;
834
835 /* Set up pmap info for physsegs. */
836 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
837 vm_physmem[lcv].pmseg.pvent = boot_pvent;
838 boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
839 vm_physmem[lcv].pmseg.attrs = boot_attrs;
840 boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
841 }
842 #ifdef MYCROFT_HACK
843 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
844 printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
845 lcv,
846 vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
847 vm_physmem[lcv].start, vm_physmem[lcv].end);
848 }
849 #endif
850 TAILQ_INIT(&pv_page_freelist);
851
852 #ifdef DIAGNOSTIC
853 /* Now it is safe to enable pv_entry recording. */
854 pmap_initialized = TRUE;
855 #endif
856
857 /* Initialise our L1 page table queues and counters */
858 SIMPLEQ_INIT(&l1pt_static_queue);
859 l1pt_static_queue_count = 0;
860 l1pt_static_create_count = 0;
861 SIMPLEQ_INIT(&l1pt_queue);
862 l1pt_queue_count = 0;
863 l1pt_create_count = 0;
864 l1pt_reuse_count = 0;
865 }
866
867 /*
868 * pmap_postinit()
869 *
870 * This routine is called after the vm and kmem subsystems have been
871 * initialised. This allows the pmap code to perform any initialisation
872 * that can only be done one the memory allocation is in place.
873 */
874
875 void
876 pmap_postinit()
877 {
878 int loop;
879 struct l1pt *pt;
880
881 #ifdef PMAP_STATIC_L1S
882 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
883 #else /* PMAP_STATIC_L1S */
884 for (loop = 0; loop < max_processes; ++loop) {
885 #endif /* PMAP_STATIC_L1S */
886 /* Allocate a L1 page table */
887 pt = pmap_alloc_l1pt();
888 if (!pt)
889 panic("Cannot allocate static L1 page tables\n");
890
891 /* Clean it */
892 bzero((void *)pt->pt_va, PD_SIZE);
893 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
894 /* Add the page table to the queue */
895 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
896 ++l1pt_static_queue_count;
897 ++l1pt_static_create_count;
898 }
899 }
900
901
902 /*
903 * Create and return a physical map.
904 *
905 * If the size specified for the map is zero, the map is an actual physical
906 * map, and may be referenced by the hardware.
907 *
908 * If the size specified is non-zero, the map will be used in software only,
909 * and is bounded by that size.
910 */
911
912 pmap_t
913 pmap_create()
914 {
915 pmap_t pmap;
916
917 /* Allocate memory for pmap structure and zero it */
918 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
919 bzero(pmap, sizeof(*pmap));
920
921 /* Now init the machine part of the pmap */
922 pmap_pinit(pmap);
923 return(pmap);
924 }
925
926 /*
927 * pmap_alloc_l1pt()
928 *
929 * This routine allocates physical and virtual memory for a L1 page table
930 * and wires it.
931 * A l1pt structure is returned to describe the allocated page table.
932 *
933 * This routine is allowed to fail if the required memory cannot be allocated.
934 * In this case NULL is returned.
935 */
936
937 struct l1pt *
938 pmap_alloc_l1pt(void)
939 {
940 paddr_t pa;
941 vaddr_t va;
942 struct l1pt *pt;
943 int error;
944 vm_page_t m;
945 pt_entry_t *pte;
946
947 /* Allocate virtual address space for the L1 page table */
948 va = uvm_km_valloc(kernel_map, PD_SIZE);
949 if (va == 0) {
950 #ifdef DIAGNOSTIC
951 printf("pmap: Cannot allocate pageable memory for L1\n");
952 #endif /* DIAGNOSTIC */
953 return(NULL);
954 }
955
956 /* Allocate memory for the l1pt structure */
957 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
958
959 /*
960 * Allocate pages from the VM system.
961 */
962 TAILQ_INIT(&pt->pt_plist);
963 error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
964 PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
965 if (error) {
966 #ifdef DIAGNOSTIC
967 printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
968 error);
969 #endif /* DIAGNOSTIC */
970 /* Release the resources we already have claimed */
971 free(pt, M_VMPMAP);
972 uvm_km_free(kernel_map, va, PD_SIZE);
973 return(NULL);
974 }
975
976 /* Map our physical pages into our virtual space */
977 pt->pt_va = va;
978 m = pt->pt_plist.tqh_first;
979 while (m && va < (pt->pt_va + PD_SIZE)) {
980 pa = VM_PAGE_TO_PHYS(m);
981
982 pmap_enter(pmap_kernel(), va, pa,
983 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
984
985 /* Revoke cacheability and bufferability */
986 /* XXX should be done better than this */
987 pte = pmap_pte(pmap_kernel(), va);
988 *pte = *pte & ~(PT_C | PT_B);
989
990 va += NBPG;
991 m = m->pageq.tqe_next;
992 }
993
994 #ifdef DIAGNOSTIC
995 if (m)
996 panic("pmap_alloc_l1pt: pglist not empty\n");
997 #endif /* DIAGNOSTIC */
998
999 pt->pt_flags = 0;
1000 return(pt);
1001 }
1002
1003 /*
1004 * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1005 */
1006 void
1007 pmap_free_l1pt(pt)
1008 struct l1pt *pt;
1009 {
1010 /* Separate the physical memory for the virtual space */
1011 pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
1012
1013 /* Return the physical memory */
1014 uvm_pglistfree(&pt->pt_plist);
1015
1016 /* Free the virtual space */
1017 uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1018
1019 /* Free the l1pt structure */
1020 free(pt, M_VMPMAP);
1021 }
1022
1023 /*
1024 * Allocate a page directory.
1025 * This routine will either allocate a new page directory from the pool
1026 * of L1 page tables currently held by the kernel or it will allocate
1027 * a new one via pmap_alloc_l1pt().
1028 * It will then initialise the l1 page table for use.
1029 */
1030 int
1031 pmap_allocpagedir(pmap)
1032 struct pmap *pmap;
1033 {
1034 paddr_t pa;
1035 struct l1pt *pt;
1036 pt_entry_t *pte;
1037
1038 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1039
1040 /* Do we have any spare L1's lying around ? */
1041 if (l1pt_static_queue_count) {
1042 --l1pt_static_queue_count;
1043 pt = l1pt_static_queue.sqh_first;
1044 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1045 } else if (l1pt_queue_count) {
1046 --l1pt_queue_count;
1047 pt = l1pt_queue.sqh_first;
1048 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1049 ++l1pt_reuse_count;
1050 } else {
1051 pt = pmap_alloc_l1pt();
1052 if (!pt)
1053 return(ENOMEM);
1054 ++l1pt_create_count;
1055 }
1056
1057 /* Store the pointer to the l1 descriptor in the pmap. */
1058 pmap->pm_l1pt = pt;
1059
1060 /* Get the physical address of the start of the l1 */
1061 pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
1062
1063 /* Store the virtual address of the l1 in the pmap. */
1064 pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1065
1066 /* Clean the L1 if it is dirty */
1067 if (!(pt->pt_flags & PTFLAG_CLEAN))
1068 bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1069
1070 /* Do we already have the kernel mappings ? */
1071 if (!(pt->pt_flags & PTFLAG_KPT)) {
1072 /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1073
1074 bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1075 (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1076 KERNEL_PD_SIZE);
1077 pt->pt_flags |= PTFLAG_KPT;
1078 }
1079
1080 /* Allocate a page table to map all the page tables for this pmap */
1081
1082 #ifdef DIAGNOSTIC
1083 if (pmap->pm_vptpt) {
1084 /* XXX What if we have one already ? */
1085 panic("pmap_allocpagedir: have pt already\n");
1086 }
1087 #endif /* DIAGNOSTIC */
1088 pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1089 if (pmap->pm_vptpt == 0) {
1090 pmap_freepagedir(pmap);
1091 return(ENOMEM);
1092 }
1093
1094 (void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
1095 pmap->pm_pptpt &= PG_FRAME;
1096 /* Revoke cacheability and bufferability */
1097 /* XXX should be done better than this */
1098 pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
1099 *pte = *pte & ~(PT_C | PT_B);
1100
1101 /* Wire in this page table */
1102 pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
1103
1104 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1105
1106 /*
1107 * Map the kernel page tables for 0xf0000000 +
1108 * into the page table used to map the
1109 * pmap's page tables
1110 */
1111 bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1112 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1113 + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1114 (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1115 (KERNEL_PD_SIZE >> 2));
1116
1117 pmap->pm_count = 1;
1118 simple_lock_init(&pmap->pm_lock);
1119
1120 return(0);
1121 }
1122
1123
1124 /*
1125 * Initialize a preallocated and zeroed pmap structure,
1126 * such as one in a vmspace structure.
1127 */
1128
1129 static int pmap_pagedir_ident; /* tsleep() ident */
1130
1131 void
1132 pmap_pinit(pmap)
1133 struct pmap *pmap;
1134 {
1135 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1136
1137 /* Keep looping until we succeed in allocating a page directory */
1138 while (pmap_allocpagedir(pmap) != 0) {
1139 /*
1140 * Ok we failed to allocate a suitable block of memory for an
1141 * L1 page table. This means that either:
1142 * 1. 16KB of virtual address space could not be allocated
1143 * 2. 16KB of physically contiguous memory on a 16KB boundary
1144 * could not be allocated.
1145 *
1146 * Since we cannot fail we will sleep for a while and try
1147 * again. Although we will be wakened when another page table
1148 * is freed other memory releasing and swapping may occur
1149 * that will mean we can succeed so we will keep trying
1150 * regularly just in case.
1151 */
1152
1153 if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
1154 "l1ptwait", 1000) == EWOULDBLOCK)
1155 printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
1156 }
1157
1158 /* Map zero page for the pmap. This will also map the L2 for it */
1159 pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1160 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1161 }
1162
1163
1164 void
1165 pmap_freepagedir(pmap)
1166 pmap_t pmap;
1167 {
1168 /* Free the memory used for the page table mapping */
1169 if (pmap->pm_vptpt != 0)
1170 uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1171
1172 /* junk the L1 page table */
1173 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1174 /* Add the page table to the queue */
1175 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1176 ++l1pt_static_queue_count;
1177 /* Wake up any sleeping processes waiting for a l1 page table */
1178 wakeup((caddr_t)&pmap_pagedir_ident);
1179 } else if (l1pt_queue_count < 8) {
1180 /* Add the page table to the queue */
1181 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1182 ++l1pt_queue_count;
1183 /* Wake up any sleeping processes waiting for a l1 page table */
1184 wakeup((caddr_t)&pmap_pagedir_ident);
1185 } else
1186 pmap_free_l1pt(pmap->pm_l1pt);
1187 }
1188
1189
1190 /*
1191 * Retire the given physical map from service.
1192 * Should only be called if the map contains no valid mappings.
1193 */
1194
1195 void
1196 pmap_destroy(pmap)
1197 pmap_t pmap;
1198 {
1199 int count;
1200
1201 if (pmap == NULL)
1202 return;
1203
1204 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1205 simple_lock(&pmap->pm_lock);
1206 count = --pmap->pm_count;
1207 simple_unlock(&pmap->pm_lock);
1208 if (count == 0) {
1209 pmap_release(pmap);
1210 free((caddr_t)pmap, M_VMPMAP);
1211 }
1212 }
1213
1214
1215 /*
1216 * Release any resources held by the given physical map.
1217 * Called when a pmap initialized by pmap_pinit is being released.
1218 * Should only be called if the map contains no valid mappings.
1219 */
1220
1221 void
1222 pmap_release(pmap)
1223 pmap_t pmap;
1224 {
1225 struct vm_page *page;
1226 pt_entry_t *pte;
1227 int loop;
1228
1229 PDEBUG(0, printf("pmap_release(%p)\n", pmap));
1230
1231 #if 0
1232 if (pmap->pm_count != 1) /* XXX: needs sorting */
1233 panic("pmap_release count %d", pmap->pm_count);
1234 #endif
1235
1236 /* Remove the zero page mapping */
1237 pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1238
1239 /*
1240 * Free any page tables still mapped
1241 * This is only temporay until pmap_enter can count the number
1242 * of mappings made in a page table. Then pmap_remove() can
1243 * reduce the count and free the pagetable when the count
1244 * reaches zero.
1245 */
1246 for (loop = 0; loop < (((PD_SIZE - KERNEL_PD_SIZE) >> 4) - 1); ++loop) {
1247 pte = (pt_entry_t *)(pmap->pm_vptpt + loop * 4);
1248 if (*pte != 0) {
1249 PDEBUG(0, printf("%x: pte=%p:%08x\n", loop, pte, *pte));
1250 page = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
1251 if (page == NULL)
1252 panic("pmap_release: bad address for phys page");
1253 uvm_pagefree(page);
1254 }
1255 }
1256 /* Free the page dir */
1257 pmap_freepagedir(pmap);
1258 }
1259
1260
1261 /*
1262 * void pmap_reference(pmap_t pmap)
1263 *
1264 * Add a reference to the specified pmap.
1265 */
1266
1267 void
1268 pmap_reference(pmap)
1269 pmap_t pmap;
1270 {
1271 if (pmap == NULL)
1272 return;
1273
1274 simple_lock(&pmap->pm_lock);
1275 pmap->pm_count++;
1276 simple_unlock(&pmap->pm_lock);
1277 }
1278
1279 /*
1280 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1281 *
1282 * Return the start and end addresses of the kernel's virtual space.
1283 * These values are setup in pmap_bootstrap and are updated as pages
1284 * are allocated.
1285 */
1286
1287 void
1288 pmap_virtual_space(start, end)
1289 vaddr_t *start;
1290 vaddr_t *end;
1291 {
1292 *start = virtual_start;
1293 *end = virtual_end;
1294 }
1295
1296
1297 /*
1298 * Activate the address space for the specified process. If the process
1299 * is the current process, load the new MMU context.
1300 */
1301 void
1302 pmap_activate(p)
1303 struct proc *p;
1304 {
1305 pmap_t pmap = p->p_vmspace->vm_map.pmap;
1306 struct pcb *pcb = &p->p_addr->u_pcb;
1307
1308 (void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
1309 (paddr_t *)&pcb->pcb_pagedir);
1310
1311 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1312 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1313
1314 if (p == curproc) {
1315 PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1316 setttb((u_int)pcb->pcb_pagedir);
1317 }
1318 #if 0
1319 pmap->pm_pdchanged = FALSE;
1320 #endif
1321 }
1322
1323
1324 /*
1325 * Deactivate the address space of the specified process.
1326 */
1327 void
1328 pmap_deactivate(p)
1329 struct proc *p;
1330 {
1331 }
1332
1333
1334 /*
1335 * pmap_clean_page()
1336 *
1337 * This is a local function used to work out the best strategy to clean
1338 * a single page referenced by its entry in the PV table. It's used by
1339 * pmap_copy_page, pmap_zero page and maybe some others later on.
1340 *
1341 * Its policy is effectively:
1342 * o If there are no mappings, we don't bother doing anything with the cache.
1343 * o If there is one mapping, we clean just that page.
1344 * o If there are multiple mappings, we clean the entire cache.
1345 *
1346 * So that some functions can be further optimised, it returns 0 if it didn't
1347 * clean the entire cache, or 1 if it did.
1348 *
1349 * XXX One bug in this routine is that if the pv_entry has a single page
1350 * mapped at 0x00000000 a whole cache clean will be performed rather than
1351 * just the 1 page. Since this should not occur in everyday use and if it does
1352 * it will just result in not the most efficient clean for the page.
1353 */
1354 static int
1355 pmap_clean_page(pv)
1356 struct pv_entry *pv;
1357 {
1358 int s;
1359 int cache_needs_cleaning = 0;
1360 vaddr_t page_to_clean = 0;
1361
1362 /* Go to splvm() so we get exclusive lock for a mo */
1363 s = splvm();
1364 if (pv->pv_pmap) {
1365 cache_needs_cleaning = 1;
1366 if (!pv->pv_next)
1367 page_to_clean = pv->pv_va;
1368 }
1369 splx(s);
1370
1371 /* Do cache ops outside the splvm. */
1372 if (page_to_clean)
1373 cpu_cache_purgeID_rng(page_to_clean, NBPG);
1374 else if (cache_needs_cleaning) {
1375 cpu_cache_purgeID();
1376 return (1);
1377 }
1378 return (0);
1379 }
1380
1381 /*
1382 * pmap_find_pv()
1383 *
1384 * This is a local function that finds a PV entry for a given physical page.
1385 * This is a common op, and this function removes loads of ifdefs in the code.
1386 */
1387 static __inline struct pv_entry *
1388 pmap_find_pv(phys)
1389 paddr_t phys;
1390 {
1391 int bank, off;
1392 struct pv_entry *pv;
1393
1394 #ifdef DIAGNOSTIC
1395 if (!pmap_initialized)
1396 panic("pmap_find_pv: !pmap_initialized");
1397 #endif
1398
1399 if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
1400 panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
1401 pv = &vm_physmem[bank].pmseg.pvent[off];
1402 return (pv);
1403 }
1404
1405 /*
1406 * pmap_zero_page()
1407 *
1408 * Zero a given physical page by mapping it at a page hook point.
1409 * In doing the zero page op, the page we zero is mapped cachable, as with
1410 * StrongARM accesses to non-cached pages are non-burst making writing
1411 * _any_ bulk data very slow.
1412 */
1413 void
1414 pmap_zero_page(phys)
1415 paddr_t phys;
1416 {
1417 struct pv_entry *pv;
1418
1419 /* Get an entry for this page, and clean it it. */
1420 pv = pmap_find_pv(phys);
1421 pmap_clean_page(pv);
1422
1423 /*
1424 * Hook in the page, zero it, and purge the cache for that
1425 * zeroed page. Invalidate the TLB as needed.
1426 */
1427 *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1428 cpu_tlb_flushD_SE(page_hook0.va);
1429 bzero_page(page_hook0.va);
1430 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1431 }
1432
1433 /*
1434 * pmap_copy_page()
1435 *
1436 * Copy one physical page into another, by mapping the pages into
1437 * hook points. The same comment regarding cachability as in
1438 * pmap_zero_page also applies here.
1439 */
1440 void
1441 pmap_copy_page(src, dest)
1442 paddr_t src;
1443 paddr_t dest;
1444 {
1445 struct pv_entry *src_pv, *dest_pv;
1446
1447 /* Get PV entries for the pages, and clean them if needed. */
1448 src_pv = pmap_find_pv(src);
1449 dest_pv = pmap_find_pv(dest);
1450 if (!pmap_clean_page(src_pv))
1451 pmap_clean_page(dest_pv);
1452
1453 /*
1454 * Map the pages into the page hook points, copy them, and purge
1455 * the cache for the appropriate page. Invalidate the TLB
1456 * as required.
1457 */
1458 *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1459 *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1460 cpu_tlb_flushD_SE(page_hook0.va);
1461 cpu_tlb_flushD_SE(page_hook1.va);
1462 bcopy_page(page_hook0.va, page_hook1.va);
1463 cpu_cache_purgeD_rng(page_hook0.va, NBPG);
1464 cpu_cache_purgeD_rng(page_hook1.va, NBPG);
1465 }
1466
1467 /*
1468 * int pmap_next_phys_page(paddr_t *addr)
1469 *
1470 * Allocate another physical page returning true or false depending
1471 * on whether a page could be allocated.
1472 */
1473
1474 paddr_t
1475 pmap_next_phys_page(addr)
1476 paddr_t addr;
1477
1478 {
1479 int loop;
1480
1481 if (addr < bootconfig.dram[0].address)
1482 return(bootconfig.dram[0].address);
1483
1484 loop = 0;
1485
1486 while (bootconfig.dram[loop].address != 0
1487 && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
1488 ++loop;
1489
1490 if (bootconfig.dram[loop].address == 0)
1491 return(0);
1492
1493 addr += NBPG;
1494
1495 if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
1496 if (bootconfig.dram[loop + 1].address == 0)
1497 return(0);
1498 addr = bootconfig.dram[loop + 1].address;
1499 }
1500
1501 return(addr);
1502 }
1503
1504 #if 0
1505 void
1506 pmap_pte_addref(pmap, va)
1507 pmap_t pmap;
1508 vaddr_t va;
1509 {
1510 pd_entry_t *pde;
1511 paddr_t pa;
1512 struct vm_page *m;
1513
1514 if (pmap == pmap_kernel())
1515 return;
1516
1517 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1518 pa = pmap_pte_pa(pde);
1519 m = PHYS_TO_VM_PAGE(pa);
1520 ++m->wire_count;
1521 #ifdef MYCROFT_HACK
1522 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1523 pmap, va, pde, pa, m, m->wire_count);
1524 #endif
1525 }
1526
1527 void
1528 pmap_pte_delref(pmap, va)
1529 pmap_t pmap;
1530 vaddr_t va;
1531 {
1532 pd_entry_t *pde;
1533 paddr_t pa;
1534 struct vm_page *m;
1535
1536 if (pmap == pmap_kernel())
1537 return;
1538
1539 pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1540 pa = pmap_pte_pa(pde);
1541 m = PHYS_TO_VM_PAGE(pa);
1542 --m->wire_count;
1543 #ifdef MYCROFT_HACK
1544 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1545 pmap, va, pde, pa, m, m->wire_count);
1546 #endif
1547 if (m->wire_count == 0) {
1548 #ifdef MYCROFT_HACK
1549 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1550 pmap, va, pde, pa, m);
1551 #endif
1552 pmap_unmap_in_l1(pmap, va);
1553 uvm_pagefree(m);
1554 --pmap->pm_stats.resident_count;
1555 }
1556 }
1557 #else
1558 #define pmap_pte_addref(pmap, va)
1559 #define pmap_pte_delref(pmap, va)
1560 #endif
1561
1562 /*
1563 * Since we have a virtually indexed cache, we may need to inhibit caching if
1564 * there is more than one mapping and at least one of them is writable.
1565 * Since we purge the cache on every context switch, we only need to check for
1566 * other mappings within the same pmap, or kernel_pmap.
1567 * This function is also called when a page is unmapped, to possibly reenable
1568 * caching on any remaining mappings.
1569 */
1570 void
1571 pmap_vac_me_harder(pmap, pv)
1572 pmap_t pmap;
1573 struct pv_entry *pv;
1574 {
1575 struct pv_entry *npv;
1576 pt_entry_t *pte;
1577 int entries = 0;
1578 int writeable = 0;
1579
1580 if (pv->pv_pmap == NULL)
1581 return;
1582
1583 /*
1584 * Count mappings and writable mappings in this pmap.
1585 * Keep a pointer to the first one.
1586 */
1587 for (npv = pv; npv; npv = npv->pv_next) {
1588 /* Count mappings in the same pmap */
1589 if (pmap == npv->pv_pmap) {
1590 if (entries++ == 0)
1591 pv = npv;
1592 /* Writeable mappings */
1593 if (npv->pv_flags & PT_Wr)
1594 ++writeable;
1595 }
1596 }
1597
1598 /*
1599 * Enable or disable caching as necessary.
1600 * We do a quick check of the first PTE to avoid walking the list if
1601 * we're already in the right state.
1602 */
1603 if (entries > 1 && writeable) {
1604 pte = pmap_pte(pmap, pv->pv_va);
1605 if (~*pte & (PT_C | PT_B))
1606 return;
1607 *pte = *pte & ~(PT_C | PT_B);
1608 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1609 if (pmap == npv->pv_pmap) {
1610 pte = pmap_pte(pmap, npv->pv_va);
1611 *pte = *pte & ~(PT_C | PT_B);
1612 }
1613 }
1614 } else if (entries > 0) {
1615 pte = pmap_pte(pmap, pv->pv_va);
1616 if (*pte & (PT_C | PT_B))
1617 return;
1618 *pte = *pte | (PT_C | PT_B);
1619 for (npv = pv->pv_next; npv; npv = npv->pv_next) {
1620 if (pmap == npv->pv_pmap) {
1621 pte = pmap_pte(pmap, npv->pv_va);
1622 *pte = *pte | (PT_C | PT_B);
1623 }
1624 }
1625 }
1626 }
1627
1628 /*
1629 * pmap_remove()
1630 *
1631 * pmap_remove is responsible for nuking a number of mappings for a range
1632 * of virtual address space in the current pmap. To do this efficiently
1633 * is interesting, because in a number of cases a wide virtual address
1634 * range may be supplied that contains few actual mappings. So, the
1635 * optimisations are:
1636 * 1. Try and skip over hunks of address space for which an L1 entry
1637 * does not exist.
1638 * 2. Build up a list of pages we've hit, up to a maximum, so we can
1639 * maybe do just a partial cache clean. This path of execution is
1640 * complicated by the fact that the cache must be flushed _before_
1641 * the PTE is nuked, being a VAC :-)
1642 * 3. Maybe later fast-case a single page, but I don't think this is
1643 * going to make _that_ much difference overall.
1644 */
1645
1646 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
1647
1648 void
1649 pmap_remove(pmap, sva, eva)
1650 pmap_t pmap;
1651 vaddr_t sva;
1652 vaddr_t eva;
1653 {
1654 int cleanlist_idx = 0;
1655 struct pagelist {
1656 vaddr_t va;
1657 pt_entry_t *pte;
1658 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1659 pt_entry_t *pte = 0;
1660 paddr_t pa;
1661 int pmap_active;
1662 struct pv_entry *pv;
1663
1664 /* Exit quick if there is no pmap */
1665 if (!pmap)
1666 return;
1667
1668 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
1669
1670 sva &= PG_FRAME;
1671 eva &= PG_FRAME;
1672
1673 /* Get a page table pointer */
1674 while (sva < eva) {
1675 pte = pmap_pte(pmap, sva);
1676 if (pte)
1677 break;
1678 sva = (sva & PD_MASK) + NBPD;
1679 }
1680
1681 /* Note if the pmap is active thus require cache and tlb cleans */
1682 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1683 || (pmap == kernel_pmap))
1684 pmap_active = 1;
1685 else
1686 pmap_active = 0;
1687
1688 /* Now loop along */
1689 while (sva < eva) {
1690 /* Check if we can move to the next PDE (l1 chunk) */
1691 if (!(sva & PT_MASK))
1692 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1693 sva += NBPD;
1694 pte += arm_byte_to_page(NBPD);
1695 continue;
1696 }
1697
1698 /* We've found a valid PTE, so this page of PTEs has to go. */
1699 if (pmap_pte_v(pte)) {
1700 int bank, off;
1701
1702 /* Update statistics */
1703 --pmap->pm_stats.resident_count;
1704
1705 /*
1706 * Add this page to our cache remove list, if we can.
1707 * If, however the cache remove list is totally full,
1708 * then do a complete cache invalidation taking note
1709 * to backtrack the PTE table beforehand, and ignore
1710 * the lists in future because there's no longer any
1711 * point in bothering with them (we've paid the
1712 * penalty, so will carry on unhindered). Otherwise,
1713 * when we fall out, we just clean the list.
1714 */
1715 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
1716 pa = pmap_pte_pa(pte);
1717
1718 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
1719 /* Add to the clean list. */
1720 cleanlist[cleanlist_idx].pte = pte;
1721 cleanlist[cleanlist_idx].va = sva;
1722 cleanlist_idx++;
1723 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1724 int cnt;
1725
1726 /* Nuke everything if needed. */
1727 if (pmap_active) {
1728 cpu_cache_purgeID();
1729 cpu_tlb_flushID();
1730 }
1731
1732 /*
1733 * Roll back the previous PTE list,
1734 * and zero out the current PTE.
1735 */
1736 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1737 *cleanlist[cnt].pte = 0;
1738 pmap_pte_delref(pmap, cleanlist[cnt].va);
1739 }
1740 *pte = 0;
1741 pmap_pte_delref(pmap, sva);
1742 cleanlist_idx++;
1743 } else {
1744 /*
1745 * We've already nuked the cache and
1746 * TLB, so just carry on regardless,
1747 * and we won't need to do it again
1748 */
1749 *pte = 0;
1750 pmap_pte_delref(pmap, sva);
1751 }
1752
1753 /*
1754 * Update flags. In a number of circumstances,
1755 * we could cluster a lot of these and do a
1756 * number of sequential pages in one go.
1757 */
1758 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1759 pv = &vm_physmem[bank].pmseg.pvent[off];
1760 pmap_remove_pv(pmap, sva, pv);
1761 pmap_vac_me_harder(pmap, pv);
1762 }
1763 }
1764 sva += NBPG;
1765 pte++;
1766 }
1767
1768 /*
1769 * Now, if we've fallen through down to here, chances are that there
1770 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
1771 */
1772 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
1773 u_int cnt;
1774
1775 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1776 if (pmap_active) {
1777 cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
1778 *cleanlist[cnt].pte = 0;
1779 cpu_tlb_flushID_SE(cleanlist[cnt].va);
1780 } else
1781 *cleanlist[cnt].pte = 0;
1782 pmap_pte_delref(pmap, cleanlist[cnt].va);
1783 }
1784 }
1785 }
1786
1787 /*
1788 * Routine: pmap_remove_all
1789 * Function:
1790 * Removes this physical page from
1791 * all physical maps in which it resides.
1792 * Reflects back modify bits to the pager.
1793 */
1794
1795 void
1796 pmap_remove_all(pa)
1797 paddr_t pa;
1798 {
1799 struct pv_entry *ph, *pv, *npv;
1800 pmap_t pmap;
1801 pt_entry_t *pte;
1802 int s;
1803
1804 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1805
1806 pv = ph = pmap_find_pv(pa);
1807 pmap_clean_page(pv);
1808
1809 s = splvm();
1810
1811 if (ph->pv_pmap == NULL) {
1812 PDEBUG(0, printf("free page\n"));
1813 splx(s);
1814 return;
1815 }
1816
1817 while (pv) {
1818 pmap = pv->pv_pmap;
1819 pte = pmap_pte(pmap, pv->pv_va);
1820
1821 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
1822 pv->pv_va, pv->pv_flags));
1823 #ifdef DEBUG
1824 if (!pte || !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1825 panic("pmap_remove_all: bad mapping");
1826 #endif /* DEBUG */
1827
1828 /*
1829 * Update statistics
1830 */
1831 --pmap->pm_stats.resident_count;
1832
1833 /* Wired bit */
1834 if (pv->pv_flags & PT_W)
1835 --pmap->pm_stats.wired_count;
1836
1837 /*
1838 * Invalidate the PTEs.
1839 * XXX: should cluster them up and invalidate as many
1840 * as possible at once.
1841 */
1842
1843 #ifdef needednotdone
1844 reduce wiring count on page table pages as references drop
1845 #endif
1846
1847 *pte = 0;
1848 pmap_pte_delref(pmap, pv->pv_va);
1849
1850 npv = pv->pv_next;
1851 if (pv == ph)
1852 ph->pv_pmap = NULL;
1853 else
1854 pmap_free_pv(pv);
1855 pv = npv;
1856 }
1857
1858 splx(s);
1859
1860 PDEBUG(0, printf("done\n"));
1861 cpu_tlb_flushID();
1862 }
1863
1864
1865 /*
1866 * Set the physical protection on the specified range of this map as requested.
1867 */
1868
1869 void
1870 pmap_protect(pmap, sva, eva, prot)
1871 pmap_t pmap;
1872 vaddr_t sva;
1873 vaddr_t eva;
1874 vm_prot_t prot;
1875 {
1876 pt_entry_t *pte = NULL;
1877 int armprot;
1878 int flush = 0;
1879 paddr_t pa;
1880 int bank, off;
1881 struct pv_entry *pv;
1882
1883 /*
1884 * Make sure pmap is valid. -dct
1885 */
1886 if (pmap == NULL)
1887 return;
1888 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
1889 pmap, sva, eva, prot));
1890
1891 if (~prot & VM_PROT_READ) {
1892 /* Just remove the mappings. */
1893 pmap_remove(pmap, sva, eva);
1894 return;
1895 }
1896 if (prot & VM_PROT_WRITE) {
1897 /*
1898 * If this is a read->write transition, just ignore it and let
1899 * uvm_fault() take care of it later.
1900 */
1901 return;
1902 }
1903
1904 sva &= PG_FRAME;
1905 eva &= PG_FRAME;
1906
1907 /*
1908 * We need to acquire a pointer to a page table page before entering
1909 * the following loop.
1910 */
1911 while (sva < eva) {
1912 pte = pmap_pte(pmap, sva);
1913 if (pte)
1914 break;
1915 sva = (sva & PD_MASK) + NBPD;
1916 }
1917
1918 while (sva < eva) {
1919 /* only check once in a while */
1920 if ((sva & PT_MASK) == 0) {
1921 if (!pmap_pde_v(pmap_pde(pmap, sva))) {
1922 /* We can race ahead here, to the next pde. */
1923 sva += NBPD;
1924 pte += arm_byte_to_page(NBPD);
1925 continue;
1926 }
1927 }
1928
1929 if (!pmap_pte_v(pte))
1930 goto next;
1931
1932 flush = 1;
1933
1934 armprot = 0;
1935 if (sva < VM_MAXUSER_ADDRESS)
1936 armprot |= PT_AP(AP_U);
1937 else if (sva < VM_MAX_ADDRESS)
1938 armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
1939 *pte = (*pte & 0xfffff00f) | armprot;
1940
1941 pa = pmap_pte_pa(pte);
1942
1943 /* Get the physical page index */
1944
1945 /* Clear write flag */
1946 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1947 pv = &vm_physmem[bank].pmseg.pvent[off];
1948 (void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
1949 pmap_vac_me_harder(pmap, pv);
1950 }
1951
1952 next:
1953 sva += NBPG;
1954 pte++;
1955 }
1956
1957 if (flush)
1958 cpu_tlb_flushID();
1959 }
1960
1961 /*
1962 * void pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1963 * int flags)
1964 *
1965 * Insert the given physical page (p) at
1966 * the specified virtual address (v) in the
1967 * target physical map with the protection requested.
1968 *
1969 * If specified, the page will be wired down, meaning
1970 * that the related pte can not be reclaimed.
1971 *
1972 * NB: This is the only routine which MAY NOT lazy-evaluate
1973 * or lose information. That is, this routine must actually
1974 * insert this page into the given map NOW.
1975 */
1976
1977 int
1978 pmap_enter(pmap, va, pa, prot, flags)
1979 pmap_t pmap;
1980 vaddr_t va;
1981 paddr_t pa;
1982 vm_prot_t prot;
1983 int flags;
1984 {
1985 pt_entry_t *pte;
1986 u_int npte;
1987 int bank, off;
1988 struct pv_entry *pv = NULL;
1989 paddr_t opa;
1990 int nflags;
1991 boolean_t wired = (flags & PMAP_WIRED) != 0;
1992
1993 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
1994 va, pa, pmap, prot, wired));
1995
1996 #ifdef DIAGNOSTIC
1997 /* Valid address ? */
1998 if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
1999 panic("pmap_enter: too big");
2000 if (pmap != pmap_kernel() && va != 0) {
2001 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2002 panic("pmap_enter: kernel page in user map");
2003 } else {
2004 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2005 panic("pmap_enter: user page in kernel map");
2006 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2007 panic("pmap_enter: entering PT page");
2008 }
2009 #endif
2010
2011 /*
2012 * Get a pointer to the pte for this virtual address. If the
2013 * pte pointer is NULL then we are missing the L2 page table
2014 * so we need to create one.
2015 */
2016 pte = pmap_pte(pmap, va);
2017 if (!pte) {
2018 paddr_t l2pa;
2019 struct vm_page *m;
2020
2021 /* Allocate a page table */
2022 for (;;) {
2023 m = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
2024 if (m != NULL)
2025 break;
2026
2027 /*
2028 * No page available. If we're the kernel
2029 * pmap, we die, since we might not have
2030 * a valid thread context. For user pmaps,
2031 * we assume that we _do_ have a valid thread
2032 * context, so we wait here for the pagedaemon
2033 * to free up some pages.
2034 *
2035 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
2036 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
2037 * XXX SO THIS IS PROBABLY SAFE. In any case,
2038 * XXX other pmap modules claim it is safe to
2039 * XXX sleep here if it's a user pmap.
2040 */
2041 if (pmap == pmap_kernel())
2042 panic("pmap_enter: no free pages");
2043 else
2044 uvm_wait("pmap_enter");
2045 }
2046
2047 /* Wire this page table into the L1. */
2048 l2pa = VM_PAGE_TO_PHYS(m);
2049 pmap_zero_page(l2pa);
2050 pmap_map_in_l1(pmap, va, l2pa);
2051 ++pmap->pm_stats.resident_count;
2052
2053 pte = pmap_pte(pmap, va);
2054 #ifdef DIAGNOSTIC
2055 if (!pte)
2056 panic("pmap_enter: no pte");
2057 #endif
2058 }
2059
2060 nflags = 0;
2061 if (prot & VM_PROT_WRITE)
2062 nflags |= PT_Wr;
2063 if (wired)
2064 nflags |= PT_W;
2065
2066 /* More debugging info */
2067 PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2068 *pte));
2069
2070 /* Is the pte valid ? If so then this page is already mapped */
2071 if (pmap_pte_v(pte)) {
2072 /* Get the physical address of the current page mapped */
2073 opa = pmap_pte_pa(pte);
2074
2075 #ifdef MYCROFT_HACK
2076 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2077 #endif
2078
2079 /* Are we mapping the same page ? */
2080 if (opa == pa) {
2081 /* All we must be doing is changing the protection */
2082 PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2083 va, pa));
2084
2085 /* Has the wiring changed ? */
2086 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2087 pv = &vm_physmem[bank].pmseg.pvent[off];
2088 (void) pmap_modify_pv(pmap, va, pv,
2089 PT_Wr | PT_W, nflags);
2090 }
2091 } else {
2092 /* We are replacing the page with a new one. */
2093 cpu_cache_purgeID_rng(va, NBPG);
2094
2095 PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2096 va, pa, opa));
2097
2098 /*
2099 * If it is part of our managed memory then we
2100 * must remove it from the PV list
2101 */
2102 if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
2103 pv = &vm_physmem[bank].pmseg.pvent[off];
2104 pmap_remove_pv(pmap, va, pv);
2105 }
2106
2107 goto enter;
2108 }
2109 } else {
2110 opa = 0;
2111 pmap_pte_addref(pmap, va);
2112
2113 /* pte is not valid so we must be hooking in a new page */
2114 ++pmap->pm_stats.resident_count;
2115
2116 enter:
2117 /*
2118 * Enter on the PV list if part of our managed memory
2119 */
2120 if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
2121 pv = &vm_physmem[bank].pmseg.pvent[off];
2122 pmap_enter_pv(pmap, va, pv, nflags);
2123 }
2124 }
2125
2126 #ifdef MYCROFT_HACK
2127 if (mycroft_hack)
2128 printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2129 #endif
2130
2131 /* Construct the pte, giving the correct access. */
2132 npte = (pa & PG_FRAME);
2133
2134 /* VA 0 is magic. */
2135 if (pmap != pmap_kernel() && va != 0)
2136 npte |= PT_AP(AP_U);
2137
2138 if (bank != -1) {
2139 #ifdef DIAGNOSTIC
2140 if ((flags & VM_PROT_ALL) & ~prot)
2141 panic("pmap_enter: access_type exceeds prot");
2142 #endif
2143 npte |= PT_C | PT_B;
2144 if (flags & VM_PROT_WRITE) {
2145 npte |= L2_SPAGE | PT_AP(AP_W);
2146 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2147 } else if (flags & VM_PROT_ALL) {
2148 npte |= L2_SPAGE;
2149 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2150 } else
2151 npte |= L2_INVAL;
2152 } else {
2153 if (prot & VM_PROT_WRITE)
2154 npte |= L2_SPAGE | PT_AP(AP_W);
2155 else if (prot & VM_PROT_ALL)
2156 npte |= L2_SPAGE;
2157 else
2158 npte |= L2_INVAL;
2159 }
2160
2161 #ifdef MYCROFT_HACK
2162 if (mycroft_hack)
2163 printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2164 #endif
2165
2166 *pte = npte;
2167
2168 if (bank != -1)
2169 pmap_vac_me_harder(pmap, pv);
2170
2171 /* Better flush the TLB ... */
2172 cpu_tlb_flushID_SE(va);
2173
2174 PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2175
2176 return 0;
2177 }
2178
2179 void
2180 pmap_kenter_pa(va, pa, prot)
2181 vaddr_t va;
2182 paddr_t pa;
2183 vm_prot_t prot;
2184 {
2185 pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2186 }
2187
2188 void
2189 pmap_kenter_pgs(va, pgs, npgs)
2190 vaddr_t va;
2191 struct vm_page **pgs;
2192 int npgs;
2193 {
2194 int i;
2195
2196 for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
2197 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
2198 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
2199 }
2200 }
2201
2202 void
2203 pmap_kremove(va, len)
2204 vaddr_t va;
2205 vsize_t len;
2206 {
2207 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2208 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2209 }
2210 }
2211
2212 /*
2213 * pmap_page_protect:
2214 *
2215 * Lower the permission for all mappings to a given page.
2216 */
2217
2218 void
2219 pmap_page_protect(pg, prot)
2220 struct vm_page *pg;
2221 vm_prot_t prot;
2222 {
2223 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2224
2225 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
2226
2227 switch(prot) {
2228 case VM_PROT_READ:
2229 case VM_PROT_READ|VM_PROT_EXECUTE:
2230 pmap_copy_on_write(pa);
2231 break;
2232
2233 case VM_PROT_ALL:
2234 break;
2235
2236 default:
2237 pmap_remove_all(pa);
2238 break;
2239 }
2240 }
2241
2242
2243 /*
2244 * Routine: pmap_unwire
2245 * Function: Clear the wired attribute for a map/virtual-address
2246 * pair.
2247 * In/out conditions:
2248 * The mapping must already exist in the pmap.
2249 */
2250
2251 void
2252 pmap_unwire(pmap, va)
2253 pmap_t pmap;
2254 vaddr_t va;
2255 {
2256 pt_entry_t *pte;
2257 paddr_t pa;
2258 int bank, off;
2259 struct pv_entry *pv;
2260
2261 /*
2262 * Make sure pmap is valid. -dct
2263 */
2264 if (pmap == NULL)
2265 return;
2266
2267 /* Get the pte */
2268 pte = pmap_pte(pmap, va);
2269 if (!pte)
2270 return;
2271
2272 /* Extract the physical address of the page */
2273 pa = pmap_pte_pa(pte);
2274
2275 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2276 return;
2277 pv = &vm_physmem[bank].pmseg.pvent[off];
2278 /* Update the wired bit in the pv entry for this page. */
2279 (void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
2280 }
2281
2282 /*
2283 * pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
2284 *
2285 * Return the pointer to a page table entry corresponding to the supplied
2286 * virtual address.
2287 *
2288 * The page directory is first checked to make sure that a page table
2289 * for the address in question exists and if it does a pointer to the
2290 * entry is returned.
2291 *
2292 * The way this works is that that the kernel page tables are mapped
2293 * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2294 * This allows page tables to be located quickly.
2295 */
2296 pt_entry_t *
2297 pmap_pte(pmap, va)
2298 pmap_t pmap;
2299 vaddr_t va;
2300 {
2301 pt_entry_t *ptp;
2302 pt_entry_t *result;
2303
2304 /* The pmap must be valid */
2305 if (!pmap)
2306 return(NULL);
2307
2308 /* Return the address of the pte */
2309 PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2310 pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2311
2312 /* Do we have a valid pde ? If not we don't have a page table */
2313 if (!pmap_pde_v(pmap_pde(pmap, va))) {
2314 PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2315 pmap_pde(pmap, va)));
2316 return(NULL);
2317 }
2318
2319 PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2320 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2321 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2322 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2323
2324 /*
2325 * If the pmap is the kernel pmap or the pmap is the active one
2326 * then we can just return a pointer to entry relative to
2327 * PROCESS_PAGE_TBLS_BASE.
2328 * Otherwise we need to map the page tables to an alternative
2329 * address and reference them there.
2330 */
2331 if (pmap == kernel_pmap || pmap->pm_pptpt
2332 == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2333 + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2334 ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2335 ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2336 } else {
2337 struct proc *p = curproc;
2338
2339 /* If we don't have a valid curproc use proc0 */
2340 /* Perhaps we should just use kernel_pmap instead */
2341 if (p == NULL)
2342 p = &proc0;
2343 #ifdef DIAGNOSTIC
2344 /*
2345 * The pmap should always be valid for the process so
2346 * panic if it is not.
2347 */
2348 if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2349 printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2350 va, p, p->p_vmspace);
2351 console_debugger();
2352 }
2353 /*
2354 * The pmap for the current process should be mapped. If it
2355 * is not then we have a problem.
2356 */
2357 if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
2358 (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2359 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2360 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2361 printf("pmap pagetable = P%08lx current = P%08x ",
2362 pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2363 + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2364 (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
2365 PG_FRAME));
2366 printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
2367 panic("pmap_pte: current and pmap mismatch\n");
2368 }
2369 #endif
2370
2371 ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
2372 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
2373 pmap->pm_pptpt);
2374 cpu_tlb_flushD();
2375 }
2376 PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
2377 ((va >> (PGSHIFT-2)) & ~3)));
2378 result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
2379 return(result);
2380 }
2381
2382 /*
2383 * Routine: pmap_extract
2384 * Function:
2385 * Extract the physical page address associated
2386 * with the given map/virtual_address pair.
2387 */
2388 boolean_t
2389 pmap_extract(pmap, va, pap)
2390 pmap_t pmap;
2391 vaddr_t va;
2392 paddr_t *pap;
2393 {
2394 pt_entry_t *pte;
2395 paddr_t pa;
2396
2397 PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
2398
2399 /*
2400 * Get the pte for this virtual address. If there is no pte
2401 * then there is no page table etc.
2402 */
2403
2404 pte = pmap_pte(pmap, va);
2405 if (!pte)
2406 return(FALSE);
2407
2408 /* Is the pte valid ? If not then no paged is actually mapped here */
2409 if (!pmap_pte_v(pte))
2410 return(FALSE);
2411
2412 /* Return the physical address depending on the PTE type */
2413 /* XXX What about L1 section mappings ? */
2414 if ((*(pte) & L2_MASK) == L2_LPAGE) {
2415 /* Extract the physical address from the pte */
2416 pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
2417
2418 PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
2419 (pa | (va & (L2_LPAGE_SIZE - 1)))));
2420
2421 if (pap != NULL)
2422 *pap = pa | (va & (L2_LPAGE_SIZE - 1));
2423 return (TRUE);
2424 } else {
2425 /* Extract the physical address from the pte */
2426 pa = pmap_pte_pa(pte);
2427
2428 PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
2429 (pa | (va & ~PG_FRAME))));
2430
2431 if (pap != NULL)
2432 *pap = pa | (va & ~PG_FRAME);
2433 return (TRUE);
2434 }
2435 }
2436
2437
2438 /*
2439 * Copy the range specified by src_addr/len from the source map to the
2440 * range dst_addr/len in the destination map.
2441 *
2442 * This routine is only advisory and need not do anything.
2443 */
2444
2445 void
2446 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2447 pmap_t dst_pmap;
2448 pmap_t src_pmap;
2449 vaddr_t dst_addr;
2450 vsize_t len;
2451 vaddr_t src_addr;
2452 {
2453 PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
2454 dst_pmap, src_pmap, dst_addr, len, src_addr));
2455 }
2456
2457 #if defined(PMAP_DEBUG)
2458 void
2459 pmap_dump_pvlist(phys, m)
2460 vaddr_t phys;
2461 char *m;
2462 {
2463 struct pv_entry *pv;
2464 int bank, off;
2465
2466 if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
2467 printf("INVALID PA\n");
2468 return;
2469 }
2470 pv = &vm_physmem[bank].pmseg.pvent[off];
2471 printf("%s %08lx:", m, phys);
2472 if (pv->pv_pmap == NULL) {
2473 printf(" no mappings\n");
2474 return;
2475 }
2476
2477 for (; pv; pv = pv->pv_next)
2478 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
2479 pv->pv_va, pv->pv_flags);
2480
2481 printf("\n");
2482 }
2483
2484 #endif /* PMAP_DEBUG */
2485
2486 boolean_t
2487 pmap_testbit(pa, setbits)
2488 paddr_t pa;
2489 int setbits;
2490 {
2491 int bank, off;
2492
2493 PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
2494
2495 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2496 return(FALSE);
2497
2498 /*
2499 * Check saved info only
2500 */
2501 if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
2502 PDEBUG(0, printf("pmap_attributes = %02x\n",
2503 vm_physmem[bank].pmseg.attrs[off]));
2504 return(TRUE);
2505 }
2506
2507 return(FALSE);
2508 }
2509
2510
2511 /*
2512 * Modify pte bits for all ptes corresponding to the given physical address.
2513 * We use `maskbits' rather than `clearbits' because we're always passing
2514 * constants and the latter would require an extra inversion at run-time.
2515 */
2516
2517 void
2518 pmap_clearbit(pa, maskbits)
2519 paddr_t pa;
2520 int maskbits;
2521 {
2522 struct pv_entry *pv;
2523 pt_entry_t *pte;
2524 vaddr_t va;
2525 int bank, off;
2526 int s;
2527
2528 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
2529 pa, maskbits));
2530 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2531 return;
2532 pv = &vm_physmem[bank].pmseg.pvent[off];
2533 s = splvm();
2534
2535 /*
2536 * Clear saved attributes (modify, reference)
2537 */
2538 vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
2539
2540 if (pv->pv_pmap == NULL) {
2541 splx(s);
2542 return;
2543 }
2544
2545 /*
2546 * Loop over all current mappings setting/clearing as appropos
2547 */
2548 for (; pv; pv = pv->pv_next) {
2549 va = pv->pv_va;
2550
2551 /*
2552 * XXX don't write protect pager mappings
2553 */
2554 if (va >= uvm.pager_sva && va < uvm.pager_eva) {
2555 printf("pmap_clearbit: bogon alpha\n");
2556 continue;
2557 }
2558
2559 pv->pv_flags &= ~maskbits;
2560 pte = pmap_pte(pv->pv_pmap, va);
2561 if (maskbits & (PT_Wr|PT_M))
2562 *pte = *pte & ~PT_AP(AP_W);
2563 if (maskbits & PT_H)
2564 *pte = (*pte & ~L2_MASK) | L2_INVAL;
2565 }
2566 cpu_tlb_flushID();
2567
2568 splx(s);
2569 }
2570
2571
2572 boolean_t
2573 pmap_clear_modify(pg)
2574 struct vm_page *pg;
2575 {
2576 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2577 boolean_t rv;
2578
2579 PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
2580 rv = pmap_testbit(pa, PT_M);
2581 pmap_clearbit(pa, PT_M);
2582 return rv;
2583 }
2584
2585
2586 boolean_t
2587 pmap_clear_reference(pg)
2588 struct vm_page *pg;
2589 {
2590 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2591 boolean_t rv;
2592
2593 PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
2594 rv = pmap_testbit(pa, PT_H);
2595 pmap_clearbit(pa, PT_H);
2596 return rv;
2597 }
2598
2599
2600 void
2601 pmap_copy_on_write(pa)
2602 paddr_t pa;
2603 {
2604 PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
2605 pmap_clearbit(pa, PT_Wr);
2606 }
2607
2608
2609 boolean_t
2610 pmap_is_modified(pg)
2611 struct vm_page *pg;
2612 {
2613 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2614 boolean_t result;
2615
2616 result = pmap_testbit(pa, PT_M);
2617 PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
2618 return (result);
2619 }
2620
2621
2622 boolean_t
2623 pmap_is_referenced(pg)
2624 struct vm_page *pg;
2625 {
2626 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2627 boolean_t result;
2628
2629 result = pmap_testbit(pa, PT_H);
2630 PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
2631 return (result);
2632 }
2633
2634
2635 int
2636 pmap_modified_emulation(pmap, va)
2637 pmap_t pmap;
2638 vaddr_t va;
2639 {
2640 pt_entry_t *pte;
2641 paddr_t pa;
2642 int bank, off;
2643 struct pv_entry *pv;
2644 u_int flags;
2645
2646 PDEBUG(2, printf("pmap_modified_emulation\n"));
2647
2648 /* Get the pte */
2649 pte = pmap_pte(pmap, va);
2650 if (!pte) {
2651 PDEBUG(2, printf("no pte\n"));
2652 return(0);
2653 }
2654
2655 PDEBUG(1, printf("*pte=%08x\n", *pte));
2656
2657 /* Check for a zero pte */
2658 if (*pte == 0)
2659 return(0);
2660
2661 /* This can happen if user code tries to access kernel memory. */
2662 if ((*pte & PT_AP(AP_W)) != 0)
2663 return (0);
2664
2665 /* Extract the physical address of the page */
2666 pa = pmap_pte_pa(pte);
2667 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2668 return(0);
2669
2670 /* Get the current flags for this page. */
2671 pv = &vm_physmem[bank].pmseg.pvent[off];
2672 flags = pmap_modify_pv(pmap, va, pv, 0, 0);
2673 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
2674
2675 /*
2676 * Do the flags say this page is writable ? If not then it is a
2677 * genuine write fault. If yes then the write fault is our fault
2678 * as we did not reflect the write access in the PTE. Now we know
2679 * a write has occurred we can correct this and also set the
2680 * modified bit
2681 */
2682 if (~flags & PT_Wr)
2683 return(0);
2684
2685 PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
2686 va, pte, *pte));
2687 vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
2688 *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
2689 PDEBUG(0, printf("->(%08x)\n", *pte));
2690
2691 /* Return, indicating the problem has been dealt with */
2692 cpu_tlb_flushID_SE(va);
2693 return(1);
2694 }
2695
2696
2697 int
2698 pmap_handled_emulation(pmap, va)
2699 pmap_t pmap;
2700 vaddr_t va;
2701 {
2702 pt_entry_t *pte;
2703 paddr_t pa;
2704 int bank, off;
2705
2706 PDEBUG(2, printf("pmap_handled_emulation\n"));
2707
2708 /* Get the pte */
2709 pte = pmap_pte(pmap, va);
2710 if (!pte) {
2711 PDEBUG(2, printf("no pte\n"));
2712 return(0);
2713 }
2714
2715 PDEBUG(1, printf("*pte=%08x\n", *pte));
2716
2717 /* Check for a zero pte */
2718 if (*pte == 0)
2719 return(0);
2720
2721 /* This can happen if user code tries to access kernel memory. */
2722 if ((*pte & L2_MASK) != L2_INVAL)
2723 return (0);
2724
2725 /* Extract the physical address of the page */
2726 pa = pmap_pte_pa(pte);
2727 if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
2728 return(0);
2729
2730 /*
2731 * Ok we just enable the pte and mark the attibs as handled
2732 */
2733 PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
2734 va, pte, *pte));
2735 vm_physmem[bank].pmseg.attrs[off] |= PT_H;
2736 *pte = (*pte & ~L2_MASK) | L2_SPAGE;
2737 PDEBUG(0, printf("->(%08x)\n", *pte));
2738
2739 /* Return, indicating the problem has been dealt with */
2740 cpu_tlb_flushID_SE(va);
2741 return(1);
2742 }
2743
2744 /*
2745 * pmap_collect: free resources held by a pmap
2746 *
2747 * => optional function.
2748 * => called when a process is swapped out to free memory.
2749 */
2750
2751 void
2752 pmap_collect(pmap)
2753 pmap_t pmap;
2754 {
2755 }
2756
2757 /*
2758 * Routine: pmap_procwr
2759 *
2760 * Function:
2761 * Synchronize caches corresponding to [addr, addr+len) in p.
2762 *
2763 */
2764 void
2765 pmap_procwr(p, va, len)
2766 struct proc *p;
2767 vaddr_t va;
2768 int len;
2769 {
2770 /* We only need to do anything if it is the current process. */
2771 if (p == curproc)
2772 cpu_cache_syncI_rng(va, len);
2773 }
2774
2775 /* End of pmap.c */
2776