pmap.c revision 1.20 1 /* $NetBSD: pmap.c,v 1.20 2003/05/08 18:13:21 thorpej Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include <sys/param.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/user.h>
73 #include <sys/queue.h>
74 #include <sys/systm.h>
75 #include <sys/pool.h>
76 #include <sys/device.h>
77
78 #include <uvm/uvm.h>
79
80 #include <machine/cpu.h>
81 #include <machine/pcb.h>
82 #include <machine/powerpc.h>
83
84 #include <powerpc/spr.h>
85 #include <machine/tlb.h>
86
87 /*
88 * kernmap is an array of PTEs large enough to map in
89 * 4GB. At 16KB/page it is 256K entries or 2MB.
90 */
91 #define KERNMAP_SIZE ((0xffffffffU/PAGE_SIZE)+1)
92 caddr_t kernmap;
93
94 #define MINCTX 2
95 #define NUMCTX 256
96 volatile struct pmap *ctxbusy[NUMCTX];
97
98 #define TLBF_USED 0x1
99 #define TLBF_REF 0x2
100 #define TLBF_LOCKED 0x4
101 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED)
102 typedef struct tlb_info_s {
103 char ti_flags;
104 char ti_ctx; /* TLB_PID assiciated with the entry */
105 u_int ti_va;
106 } tlb_info_t;
107
108 volatile tlb_info_t tlb_info[NTLB];
109 /* We'll use a modified FIFO replacement policy cause it's cheap */
110 volatile int tlbnext = TLB_NRESERVED;
111
112 u_long dtlb_miss_count = 0;
113 u_long itlb_miss_count = 0;
114 u_long ktlb_miss_count = 0;
115 u_long utlb_miss_count = 0;
116
117 /* Event counters */
118 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
119 NULL, "cpu", "tlbmiss");
120 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
121 NULL, "cpu", "tlbhit");
122 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
123 NULL, "cpu", "tlbflush");
124 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
125 NULL, "cpu", "tlbenter");
126
127 struct pmap kernel_pmap_;
128
129 int physmem;
130 static int npgs;
131 static u_int nextavail;
132 #ifndef MSGBUFADDR
133 extern paddr_t msgbuf_paddr;
134 #endif
135
136 static struct mem_region *mem, *avail;
137
138 /*
139 * This is a cache of referenced/modified bits.
140 * Bits herein are shifted by ATTRSHFT.
141 */
142 static char *pmap_attrib;
143
144 #define PV_WIRED 0x1
145 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED)
146 #define PV_CMPVA(va,pv) (!(((pv)->pv_va^(va))&(~PV_WIRED)))
147
148 struct pv_entry {
149 struct pv_entry *pv_next; /* Linked list of mappings */
150 vaddr_t pv_va; /* virtual address of mapping */
151 struct pmap *pv_pm;
152 };
153
154 struct pv_entry *pv_table;
155 static struct pool pv_pool;
156
157 static int pmap_initialized;
158
159 static int ctx_flush(int);
160
161 inline struct pv_entry *pa_to_pv(paddr_t);
162 static inline char *pa_to_attr(paddr_t);
163
164 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
165 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
166
167 static void pmap_pinit(pmap_t);
168 static void pmap_release(pmap_t);
169 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t);
170 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
171
172
173 inline struct pv_entry *
174 pa_to_pv(paddr_t pa)
175 {
176 int bank, pg;
177
178 bank = vm_physseg_find(atop(pa), &pg);
179 if (bank == -1)
180 return NULL;
181 return &vm_physmem[bank].pmseg.pvent[pg];
182 }
183
184 static inline char *
185 pa_to_attr(paddr_t pa)
186 {
187 int bank, pg;
188
189 bank = vm_physseg_find(atop(pa), &pg);
190 if (bank == -1)
191 return NULL;
192 return &vm_physmem[bank].pmseg.attrs[pg];
193 }
194
195 /*
196 * Insert PTE into page table.
197 */
198 int
199 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
200 {
201 int seg = STIDX(va);
202 int ptn = PTIDX(va);
203 paddr_t pa;
204
205 if (!pm->pm_ptbl[seg]) {
206 /* Don't allocate a page to clear a non-existent mapping. */
207 if (!pte) return (1);
208 /* Allocate a page XXXX this will sleep! */
209 pa = 0;
210 pm->pm_ptbl[seg] =
211 (uint *)uvm_km_alloc1(kernel_map, PAGE_SIZE, 1);
212 }
213 pm->pm_ptbl[seg][ptn] = pte;
214
215 /* Flush entry. */
216 ppc4xx_tlb_flush(va, pm->pm_ctx);
217 return (1);
218 }
219
220 /*
221 * Get a pointer to a PTE in a page table.
222 */
223 volatile u_int *
224 pte_find(struct pmap *pm, vaddr_t va)
225 {
226 int seg = STIDX(va);
227 int ptn = PTIDX(va);
228
229 if (pm->pm_ptbl[seg])
230 return (&pm->pm_ptbl[seg][ptn]);
231
232 return (NULL);
233 }
234
235 /*
236 * This is called during initppc, before the system is really initialized.
237 */
238 void
239 pmap_bootstrap(u_int kernelstart, u_int kernelend)
240 {
241 struct mem_region *mp, *mp1;
242 int cnt, i;
243 u_int s, e, sz;
244
245 /*
246 * Allocate the kernel page table at the end of
247 * kernel space so it's in the locked TTE.
248 */
249 kernmap = (caddr_t)kernelend;
250
251 /*
252 * Initialize kernel page table.
253 */
254 for (i = 0; i < STSZ; i++) {
255 pmap_kernel()->pm_ptbl[i] = 0;
256 }
257 ctxbusy[0] = ctxbusy[1] = pmap_kernel();
258
259 /*
260 * Announce page-size to the VM-system
261 */
262 uvmexp.pagesize = NBPG;
263 uvm_setpagesize();
264
265 /*
266 * Get memory.
267 */
268 mem_regions(&mem, &avail);
269 for (mp = mem; mp->size; mp++) {
270 physmem += btoc(mp->size);
271 printf("+%lx,",mp->size);
272 }
273 printf("\n");
274 ppc4xx_tlb_init();
275 /*
276 * Count the number of available entries.
277 */
278 for (cnt = 0, mp = avail; mp->size; mp++)
279 cnt++;
280
281 /*
282 * Page align all regions.
283 * Non-page aligned memory isn't very interesting to us.
284 * Also, sort the entries for ascending addresses.
285 */
286 kernelstart &= ~PGOFSET;
287 kernelend = (kernelend + PGOFSET) & ~PGOFSET;
288 for (mp = avail; mp->size; mp++) {
289 s = mp->start;
290 e = mp->start + mp->size;
291 printf("%08x-%08x -> ",s,e);
292 /*
293 * Check whether this region holds all of the kernel.
294 */
295 if (s < kernelstart && e > kernelend) {
296 avail[cnt].start = kernelend;
297 avail[cnt++].size = e - kernelend;
298 e = kernelstart;
299 }
300 /*
301 * Look whether this regions starts within the kernel.
302 */
303 if (s >= kernelstart && s < kernelend) {
304 if (e <= kernelend)
305 goto empty;
306 s = kernelend;
307 }
308 /*
309 * Now look whether this region ends within the kernel.
310 */
311 if (e > kernelstart && e <= kernelend) {
312 if (s >= kernelstart)
313 goto empty;
314 e = kernelstart;
315 }
316 /*
317 * Now page align the start and size of the region.
318 */
319 s = round_page(s);
320 e = trunc_page(e);
321 if (e < s)
322 e = s;
323 sz = e - s;
324 printf("%08x-%08x = %x\n",s,e,sz);
325 /*
326 * Check whether some memory is left here.
327 */
328 if (sz == 0) {
329 empty:
330 memmove(mp, mp + 1,
331 (cnt - (mp - avail)) * sizeof *mp);
332 cnt--;
333 mp--;
334 continue;
335 }
336 /*
337 * Do an insertion sort.
338 */
339 npgs += btoc(sz);
340 for (mp1 = avail; mp1 < mp; mp1++)
341 if (s < mp1->start)
342 break;
343 if (mp1 < mp) {
344 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
345 mp1->start = s;
346 mp1->size = sz;
347 } else {
348 mp->start = s;
349 mp->size = sz;
350 }
351 }
352
353 /*
354 * We cannot do pmap_steal_memory here,
355 * since we don't run with translation enabled yet.
356 */
357 #ifndef MSGBUFADDR
358 /*
359 * allow for msgbuf
360 */
361 sz = round_page(MSGBUFSIZE);
362 mp = NULL;
363 for (mp1 = avail; mp1->size; mp1++)
364 if (mp1->size >= sz)
365 mp = mp1;
366 if (mp == NULL)
367 panic("not enough memory?");
368
369 npgs -= btoc(sz);
370 msgbuf_paddr = mp->start + mp->size - sz;
371 mp->size -= sz;
372 if (mp->size <= 0)
373 memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
374 #endif
375
376 printf("Loading pages\n");
377 for (mp = avail; mp->size; mp++)
378 uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
379 atop(mp->start), atop(mp->start + mp->size),
380 VM_FREELIST_DEFAULT);
381
382 /*
383 * Initialize kernel pmap and hardware.
384 */
385 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */
386 pmap_kernel()->pm_ctx = KERNEL_PID;
387 nextavail = avail->start;
388
389 /*
390 * Define the boundaries of the managed kernel virtual
391 * address space.
392 */
393 virtual_avail = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
394 virtual_end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
395
396 evcnt_attach_static(&tlbhit_ev);
397 evcnt_attach_static(&tlbmiss_ev);
398 evcnt_attach_static(&tlbflush_ev);
399 evcnt_attach_static(&tlbenter_ev);
400 printf("Done\n");
401 }
402
403 /*
404 * Restrict given range to physical memory
405 *
406 * (Used by /dev/mem)
407 */
408 void
409 pmap_real_memory(paddr_t *start, psize_t *size)
410 {
411 struct mem_region *mp;
412
413 for (mp = mem; mp->size; mp++) {
414 if (*start + *size > mp->start &&
415 *start < mp->start + mp->size) {
416 if (*start < mp->start) {
417 *size -= mp->start - *start;
418 *start = mp->start;
419 }
420 if (*start + *size > mp->start + mp->size)
421 *size = mp->start + mp->size - *start;
422 return;
423 }
424 }
425 *size = 0;
426 }
427
428 /*
429 * Initialize anything else for pmap handling.
430 * Called during vm_init().
431 */
432 void
433 pmap_init(void)
434 {
435 struct pv_entry *pv;
436 vsize_t sz;
437 vaddr_t addr;
438 int i, s;
439 int bank;
440 char *attr;
441
442 sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
443 sz = round_page(sz);
444 addr = uvm_km_zalloc(kernel_map, sz);
445 s = splvm();
446 pv = pv_table = (struct pv_entry *)addr;
447 for (i = npgs; --i >= 0;)
448 pv++->pv_pm = NULL;
449 pmap_attrib = (char *)pv;
450 memset(pv, 0, npgs);
451
452 pv = pv_table;
453 attr = pmap_attrib;
454 for (bank = 0; bank < vm_nphysseg; bank++) {
455 sz = vm_physmem[bank].end - vm_physmem[bank].start;
456 vm_physmem[bank].pmseg.pvent = pv;
457 vm_physmem[bank].pmseg.attrs = attr;
458 pv += sz;
459 attr += sz;
460 }
461
462 pmap_initialized = 1;
463 splx(s);
464
465 /* Setup a pool for additional pvlist structures */
466 pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL);
467 }
468
469 #ifdef PMAP_GROWKERNEL
470 /*
471 * Preallocate kernel page tables to a specified VA.
472 * This simply loops through the first TTE for each
473 * page table from the beginning of the kernel pmap,
474 * reads the entry, and if the result is
475 * zero (either invalid entry or no page table) it stores
476 * a zero there, populating page tables in the process.
477 * This is not the most efficient technique but i don't
478 * expect it to be called that often.
479 */
480 extern struct vm_page *vm_page_alloc1 __P((void));
481 extern void vm_page_free1 __P((struct vm_page *));
482
483 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
484
485 vaddr_t
486 pmap_growkernel(maxkvaddr)
487 vaddr_t maxkvaddr;
488 {
489 int s;
490 int seg;
491 paddr_t pg;
492 struct pmap *pm = pmap_kernel();
493
494 s = splvm();
495
496 /* Align with the start of a page table */
497 for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
498 kbreak += PTMAP) {
499 seg = STIDX(kbreak);
500
501 if (pte_find(pm, kbreak)) continue;
502
503 if (uvm.page_init_done) {
504 pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
505 } else {
506 if (!uvm_page_physget(&pg))
507 panic("pmap_growkernel: no memory");
508 }
509 if (!pg) panic("pmap_growkernel: no pages");
510 pmap_zero_page((paddr_t)pg);
511
512 /* XXX This is based on all phymem being addressable */
513 pm->pm_ptbl[seg] = (u_int *)pg;
514 }
515 splx(s);
516 return (kbreak);
517 }
518
519 /*
520 * vm_page_alloc1:
521 *
522 * Allocate and return a memory cell with no associated object.
523 */
524 struct vm_page *
525 vm_page_alloc1()
526 {
527 struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
528 if (pg) {
529 pg->wire_count = 1; /* no mappings yet */
530 pg->flags &= ~PG_BUSY; /* never busy */
531 }
532 return pg;
533 }
534
535 /*
536 * vm_page_free1:
537 *
538 * Returns the given page to the free list,
539 * disassociating it with any VM object.
540 *
541 * Object and page must be locked prior to entry.
542 */
543 void
544 vm_page_free1(mem)
545 struct vm_page *mem;
546 {
547 #ifdef DIAGNOSTIC
548 if (mem->flags != (PG_CLEAN|PG_FAKE)) {
549 printf("Freeing invalid page %p\n", mem);
550 printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(mem));
551 #ifdef DDB
552 Debugger();
553 #endif
554 return;
555 }
556 #endif
557 mem->flags |= PG_BUSY;
558 mem->wire_count = 0;
559 uvm_pagefree(mem);
560 }
561 #endif
562
563 /*
564 * Create and return a physical map.
565 */
566 struct pmap *
567 pmap_create(void)
568 {
569 struct pmap *pm;
570
571 pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
572 memset((caddr_t)pm, 0, sizeof *pm);
573 pmap_pinit(pm);
574 return pm;
575 }
576
577 /*
578 * Initialize a preallocated and zeroed pmap structure.
579 */
580 void
581 pmap_pinit(struct pmap *pm)
582 {
583 int i;
584
585 /*
586 * Allocate some segment registers for this pmap.
587 */
588 pm->pm_refs = 1;
589 for (i = 0; i < STSZ; i++)
590 pm->pm_ptbl[i] = NULL;
591 }
592
593 /*
594 * Add a reference to the given pmap.
595 */
596 void
597 pmap_reference(struct pmap *pm)
598 {
599
600 pm->pm_refs++;
601 }
602
603 /*
604 * Retire the given pmap from service.
605 * Should only be called if the map contains no valid mappings.
606 */
607 void
608 pmap_destroy(struct pmap *pm)
609 {
610
611 if (--pm->pm_refs == 0) {
612 pmap_release(pm);
613 free((caddr_t)pm, M_VMPMAP);
614 }
615 }
616
617 /*
618 * Release any resources held by the given physical map.
619 * Called when a pmap initialized by pmap_pinit is being released.
620 */
621 static void
622 pmap_release(struct pmap *pm)
623 {
624 int i;
625
626 for (i = 0; i < STSZ; i++)
627 if (pm->pm_ptbl[i]) {
628 uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
629 PAGE_SIZE);
630 pm->pm_ptbl[i] = NULL;
631 }
632 if (pm->pm_ctx) ctx_free(pm);
633 }
634
635 /*
636 * Copy the range specified by src_addr/len
637 * from the source map to the range dst_addr/len
638 * in the destination map.
639 *
640 * This routine is only advisory and need not do anything.
641 */
642 void
643 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
644 vsize_t len, vaddr_t src_addr)
645 {
646 }
647
648 /*
649 * Require that all active physical maps contain no
650 * incorrect entries NOW.
651 */
652 void
653 pmap_update(struct pmap *pmap)
654 {
655 }
656
657 /*
658 * Garbage collects the physical map system for
659 * pages which are no longer used.
660 * Success need not be guaranteed -- that is, there
661 * may well be pages which are not referenced, but
662 * others may be collected.
663 * Called by the pageout daemon when pages are scarce.
664 */
665 void
666 pmap_collect(struct pmap *pm)
667 {
668 }
669
670 /*
671 * Fill the given physical page with zeroes.
672 */
673 void
674 pmap_zero_page(paddr_t pa)
675 {
676
677 #ifdef PPC_4XX_NOCACHE
678 memset((caddr_t)pa, 0, PAGE_SIZE);
679 #else
680 int i;
681
682 for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
683 __asm __volatile ("dcbz 0,%0" :: "r"(pa));
684 pa += CACHELINESIZE;
685 }
686 #endif
687 }
688
689 /*
690 * Copy the given physical source page to its destination.
691 */
692 void
693 pmap_copy_page(paddr_t src, paddr_t dst)
694 {
695
696 memcpy((caddr_t)dst, (caddr_t)src, PAGE_SIZE);
697 dcache_flush_page(dst);
698 }
699
700 /*
701 * This returns whether this is the first mapping of a page.
702 */
703 static inline int
704 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
705 {
706 struct pv_entry *pv, *npv = NULL;
707 int s;
708
709 if (!pmap_initialized)
710 return 0;
711
712 s = splvm();
713
714 pv = pa_to_pv(pa);
715 for (npv = pv; npv; npv = npv->pv_next)
716 if (npv->pv_va == va && npv->pv_pm == pm) {
717 printf("Duplicate pv: va %lx pm %p\n", va, pm);
718 #ifdef DDB
719 Debugger();
720 #endif
721 return (1);
722 }
723
724 if (!pv->pv_pm) {
725 /*
726 * No entries yet, use header as the first entry.
727 */
728 pv->pv_va = va;
729 pv->pv_pm = pm;
730 pv->pv_next = NULL;
731 } else {
732 /*
733 * There is at least one other VA mapping this page.
734 * Place this entry after the header.
735 */
736 npv = pool_get(&pv_pool, PR_WAITOK);
737 if (!npv) return (0);
738 npv->pv_va = va;
739 npv->pv_pm = pm;
740 npv->pv_next = pv->pv_next;
741 pv->pv_next = npv;
742 }
743 splx(s);
744 return (1);
745 }
746
747 static void
748 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
749 {
750 struct pv_entry *pv, *npv;
751
752 /*
753 * Remove from the PV table.
754 */
755 pv = pa_to_pv(pa);
756 if (!pv) return;
757
758 /*
759 * If it is the first entry on the list, it is actually
760 * in the header and we must copy the following entry up
761 * to the header. Otherwise we must search the list for
762 * the entry. In either case we free the now unused entry.
763 */
764 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
765 if ((npv = pv->pv_next)) {
766 *pv = *npv;
767 pool_put(&pv_pool, npv);
768 } else
769 pv->pv_pm = NULL;
770 } else {
771 for (; (npv = pv->pv_next) != NULL; pv = npv)
772 if (pm == npv->pv_pm && PV_CMPVA(va, npv))
773 break;
774 if (npv) {
775 pv->pv_next = npv->pv_next;
776 pool_put(&pv_pool, npv);
777 }
778 }
779 }
780
781 /*
782 * Insert physical page at pa into the given pmap at virtual address va.
783 */
784 int
785 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
786 {
787 int s;
788 u_int tte;
789 int managed;
790
791 /*
792 * Have to remove any existing mapping first.
793 */
794 pmap_remove(pm, va, va + PAGE_SIZE);
795
796 if (flags & PMAP_WIRED) flags |= prot;
797
798 /* If it has no protections don't bother w/the rest */
799 if (!(flags & VM_PROT_ALL))
800 return (0);
801
802 managed = 0;
803 if (vm_physseg_find(atop(pa), NULL) != -1)
804 managed = 1;
805
806 /*
807 * Generate TTE.
808 *
809 * XXXX
810 *
811 * Since the kernel does not handle execution privileges properly,
812 * we will handle read and execute permissions together.
813 */
814 tte = TTE_PA(pa) | TTE_EX;
815 /* XXXX -- need to support multiple page sizes. */
816 tte |= TTE_SZ_16K;
817 #ifdef DIAGNOSTIC
818 if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) ==
819 (PME_NOCACHE | PME_WRITETHROUG))
820 panic("pmap_enter: uncached & writethrough");
821 #endif
822 if (flags & PME_NOCACHE)
823 /* Must be I/O mapping */
824 tte |= TTE_I | TTE_G;
825 #ifdef PPC_4XX_NOCACHE
826 tte |= TTE_I;
827 #else
828 else if (flags & PME_WRITETHROUG)
829 /* Uncached and writethrough are not compatible */
830 tte |= TTE_W;
831 #endif
832 if (pm == pmap_kernel())
833 tte |= TTE_ZONE(ZONE_PRIV);
834 else
835 tte |= TTE_ZONE(ZONE_USER);
836
837 if (flags & VM_PROT_WRITE)
838 tte |= TTE_WR;
839
840 /*
841 * Now record mapping for later back-translation.
842 */
843 if (pmap_initialized && managed) {
844 char *attr;
845
846 if (!pmap_enter_pv(pm, va, pa)) {
847 /* Could not enter pv on a managed page */
848 return 1;
849 }
850
851 /* Now set attributes. */
852 attr = pa_to_attr(pa);
853 #ifdef DIAGNOSTIC
854 if (!attr)
855 panic("managed but no attr");
856 #endif
857 if (flags & VM_PROT_ALL)
858 *attr |= PTE_HI_REF;
859 if (flags & VM_PROT_WRITE)
860 *attr |= PTE_HI_CHG;
861 }
862
863 s = splvm();
864 pm->pm_stats.resident_count++;
865
866 /* Insert page into page table. */
867 pte_enter(pm, va, tte);
868
869 /* If this is a real fault, enter it in the tlb */
870 if (tte && ((flags & PMAP_WIRED) == 0)) {
871 ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
872 }
873 splx(s);
874
875 /* Flush the real memory from the instruction cache. */
876 if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
877 __syncicache((void *)pa, PAGE_SIZE);
878
879 return 0;
880 }
881
882 void
883 pmap_unwire(struct pmap *pm, vaddr_t va)
884 {
885 struct pv_entry *pv, *npv;
886 paddr_t pa;
887 int s = splvm();
888
889 if (pm == NULL) {
890 return;
891 }
892
893 if (!pmap_extract(pm, va, &pa)) {
894 return;
895 }
896
897 va |= PV_WIRED;
898
899 pv = pa_to_pv(pa);
900 if (!pv) return;
901
902 /*
903 * If it is the first entry on the list, it is actually
904 * in the header and we must copy the following entry up
905 * to the header. Otherwise we must search the list for
906 * the entry. In either case we free the now unused entry.
907 */
908 for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) {
909 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) {
910 npv->pv_va &= ~PV_WIRED;
911 break;
912 }
913 }
914 splx(s);
915 }
916
917 void
918 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
919 {
920 int s;
921 u_int tte;
922 struct pmap *pm = pmap_kernel();
923
924 /*
925 * Have to remove any existing mapping first.
926 */
927
928 /*
929 * Generate TTE.
930 *
931 * XXXX
932 *
933 * Since the kernel does not handle execution privileges properly,
934 * we will handle read and execute permissions together.
935 */
936 tte = 0;
937 if (prot & VM_PROT_ALL) {
938
939 tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
940 /* XXXX -- need to support multiple page sizes. */
941 tte |= TTE_SZ_16K;
942 #ifdef DIAGNOSTIC
943 if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) ==
944 (PME_NOCACHE | PME_WRITETHROUG))
945 panic("pmap_kenter_pa: uncached & writethrough");
946 #endif
947 if (prot & PME_NOCACHE)
948 /* Must be I/O mapping */
949 tte |= TTE_I | TTE_G;
950 #ifdef PPC_4XX_NOCACHE
951 tte |= TTE_I;
952 #else
953 else if (prot & PME_WRITETHROUG)
954 /* Uncached and writethrough are not compatible */
955 tte |= TTE_W;
956 #endif
957 if (prot & VM_PROT_WRITE)
958 tte |= TTE_WR;
959 }
960
961 s = splvm();
962 pm->pm_stats.resident_count++;
963
964 /* Insert page into page table. */
965 pte_enter(pm, va, tte);
966 splx(s);
967 }
968
969 void
970 pmap_kremove(vaddr_t va, vsize_t len)
971 {
972
973 while (len > 0) {
974 pte_enter(pmap_kernel(), va, 0);
975 va += PAGE_SIZE;
976 len -= PAGE_SIZE;
977 }
978 }
979
980 /*
981 * Remove the given range of mapping entries.
982 */
983 void
984 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
985 {
986 int s;
987 paddr_t pa;
988 volatile u_int *ptp;
989
990 s = splvm();
991 while (va < endva) {
992
993 if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
994 pa = TTE_PA(pa);
995 pmap_remove_pv(pm, va, pa);
996 *ptp = 0;
997 ppc4xx_tlb_flush(va, pm->pm_ctx);
998 pm->pm_stats.resident_count--;
999 }
1000 va += PAGE_SIZE;
1001 }
1002
1003 splx(s);
1004 }
1005
1006 /*
1007 * Get the physical page address for the given pmap/virtual address.
1008 */
1009 boolean_t
1010 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1011 {
1012 int seg = STIDX(va);
1013 int ptn = PTIDX(va);
1014 u_int pa = 0;
1015 int s = splvm();
1016
1017 if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
1018 *pap = TTE_PA(pa) | (va & PGOFSET);
1019 }
1020 splx(s);
1021 return (pa != 0);
1022 }
1023
1024 /*
1025 * Lower the protection on the specified range of this pmap.
1026 *
1027 * There are only two cases: either the protection is going to 0,
1028 * or it is going to read-only.
1029 */
1030 void
1031 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1032 {
1033 volatile u_int *ptp;
1034 int s;
1035
1036 if (prot & VM_PROT_READ) {
1037 s = splvm();
1038 while (sva < eva) {
1039 if ((ptp = pte_find(pm, sva)) != NULL) {
1040 *ptp &= ~TTE_WR;
1041 ppc4xx_tlb_flush(sva, pm->pm_ctx);
1042 }
1043 sva += PAGE_SIZE;
1044 }
1045 splx(s);
1046 return;
1047 }
1048 pmap_remove(pm, sva, eva);
1049 }
1050
1051 boolean_t
1052 check_attr(struct vm_page *pg, u_int mask, int clear)
1053 {
1054 paddr_t pa = VM_PAGE_TO_PHYS(pg);
1055 int s;
1056 char *attr;
1057 int rv;
1058
1059 /*
1060 * First modify bits in cache.
1061 */
1062 s = splvm();
1063 attr = pa_to_attr(pa);
1064 if (attr == NULL)
1065 return FALSE;
1066
1067 rv = ((*attr & mask) != 0);
1068 if (clear) {
1069 *attr &= ~mask;
1070 pmap_page_protect(pg, (mask == PTE_HI_CHG) ? VM_PROT_READ : 0);
1071 }
1072 splx(s);
1073 return rv;
1074 }
1075
1076
1077 /*
1078 * Lower the protection on the specified physical page.
1079 *
1080 * There are only two cases: either the protection is going to 0,
1081 * or it is going to read-only.
1082 */
1083 void
1084 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1085 {
1086 paddr_t pa = VM_PAGE_TO_PHYS(pg);
1087 vaddr_t va;
1088 struct pv_entry *pvh, *pv, *npv;
1089 struct pmap *pm;
1090
1091 pvh = pa_to_pv(pa);
1092 if (pvh == NULL)
1093 return;
1094
1095 /* Handle extra pvs which may be deleted in the operation */
1096 for (pv = pvh->pv_next; pv; pv = npv) {
1097 npv = pv->pv_next;
1098
1099 pm = pv->pv_pm;
1100 va = pv->pv_va;
1101 pmap_protect(pm, va, va+PAGE_SIZE, prot);
1102 }
1103 /* Now check the head pv */
1104 if (pvh->pv_pm) {
1105 pv = pvh;
1106 pm = pv->pv_pm;
1107 va = pv->pv_va;
1108 pmap_protect(pm, va, va+PAGE_SIZE, prot);
1109 }
1110 }
1111
1112 /*
1113 * Activate the address space for the specified process. If the process
1114 * is the current process, load the new MMU context.
1115 */
1116 void
1117 pmap_activate(struct lwp *l)
1118 {
1119 #if 0
1120 struct pcb *pcb = &l->l_proc->p_addr->u_pcb;
1121 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1122
1123 /*
1124 * XXX Normally performed in cpu_fork().
1125 */
1126 printf("pmap_activate(%p), pmap=%p\n",l,pmap);
1127 if (pcb->pcb_pm != pmap) {
1128 pcb->pcb_pm = pmap;
1129 (void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm,
1130 (paddr_t *)&pcb->pcb_pmreal);
1131 }
1132
1133 if (l == curlwp) {
1134 /* Store pointer to new current pmap. */
1135 curpm = pcb->pcb_pmreal;
1136 }
1137 #endif
1138 }
1139
1140 /*
1141 * Deactivate the specified process's address space.
1142 */
1143 void
1144 pmap_deactivate(struct lwp *l)
1145 {
1146 }
1147
1148 /*
1149 * Synchronize caches corresponding to [addr, addr+len) in p.
1150 */
1151 void
1152 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1153 {
1154 struct pmap *pm = p->p_vmspace->vm_map.pmap;
1155 int msr, ctx, opid, step;
1156
1157
1158 step = CACHELINESIZE;
1159
1160 /*
1161 * Need to turn off IMMU and switch to user context.
1162 * (icbi uses DMMU).
1163 */
1164 if (!(ctx = pm->pm_ctx)) {
1165 /* No context -- assign it one */
1166 ctx_alloc(pm);
1167 ctx = pm->pm_ctx;
1168 }
1169 __asm __volatile("mfmsr %0;"
1170 "li %1, 0x20;"
1171 "andc %1,%0,%1;"
1172 "mtmsr %1;"
1173 "sync;isync;"
1174 "mfpid %1;"
1175 "mtpid %2;"
1176 "sync; isync;"
1177 "1:"
1178 "dcbf 0,%3;"
1179 "icbi 0,%3;"
1180 "add %3,%3,%5;"
1181 "addc. %4,%4,%6;"
1182 "bge 1b;"
1183 "mtpid %1;"
1184 "mtmsr %0;"
1185 "sync; isync"
1186 : "=&r" (msr), "=&r" (opid)
1187 : "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step));
1188 }
1189
1190
1191 /* This has to be done in real mode !!! */
1192 void
1193 ppc4xx_tlb_flush(vaddr_t va, int pid)
1194 {
1195 u_long i, found;
1196 u_long msr;
1197
1198 /* If there's no context then it can't be mapped. */
1199 if (!pid) return;
1200
1201 asm("mfpid %1;" /* Save PID */
1202 "mfmsr %2;" /* Save MSR */
1203 "li %0,0;" /* Now clear MSR */
1204 "mtmsr %0;"
1205 "mtpid %4;" /* Set PID */
1206 "sync;"
1207 "tlbsx. %0,0,%3;" /* Search TLB */
1208 "sync;"
1209 "mtpid %1;" /* Restore PID */
1210 "mtmsr %2;" /* Restore MSR */
1211 "sync;isync;"
1212 "li %1,1;"
1213 "beq 1f;"
1214 "li %1,0;"
1215 "1:"
1216 : "=&r" (i), "=&r" (found), "=&r" (msr)
1217 : "r" (va), "r" (pid));
1218 if (found && !TLB_LOCKED(i)) {
1219
1220 /* Now flush translation */
1221 asm volatile(
1222 "tlbwe %0,%1,0;"
1223 "sync;isync;"
1224 : : "r" (0), "r" (i));
1225
1226 tlb_info[i].ti_ctx = 0;
1227 tlb_info[i].ti_flags = 0;
1228 tlbnext = i;
1229 /* Successful flushes */
1230 tlbflush_ev.ev_count++;
1231 }
1232 }
1233
1234 void
1235 ppc4xx_tlb_flush_all(void)
1236 {
1237 u_long i;
1238
1239 for (i = 0; i < NTLB; i++)
1240 if (!TLB_LOCKED(i)) {
1241 asm volatile(
1242 "tlbwe %0,%1,0;"
1243 "sync;isync;"
1244 : : "r" (0), "r" (i));
1245 tlb_info[i].ti_ctx = 0;
1246 tlb_info[i].ti_flags = 0;
1247 }
1248
1249 asm volatile("sync;isync");
1250 }
1251
1252 /* Find a TLB entry to evict. */
1253 static int
1254 ppc4xx_tlb_find_victim(void)
1255 {
1256 int flags;
1257
1258 for (;;) {
1259 if (++tlbnext >= NTLB)
1260 tlbnext = TLB_NRESERVED;
1261 flags = tlb_info[tlbnext].ti_flags;
1262 if (!(flags & TLBF_USED) ||
1263 (flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
1264 u_long va, stack = (u_long)&va;
1265
1266 if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
1267 (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
1268 (flags & TLBF_USED)) {
1269 /* Kernel stack page */
1270 flags |= TLBF_USED;
1271 tlb_info[tlbnext].ti_flags = flags;
1272 } else {
1273 /* Found it! */
1274 return (tlbnext);
1275 }
1276 } else {
1277 tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
1278 }
1279 }
1280 }
1281
1282 void
1283 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
1284 {
1285 u_long th, tl, idx;
1286 tlbpid_t pid;
1287 u_short msr;
1288 paddr_t pa;
1289 int s, sz;
1290
1291 tlbenter_ev.ev_count++;
1292
1293 sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
1294 pa = (pte & TTE_RPN_MASK(sz));
1295 th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
1296 tl = (pte & ~TLB_RPN_MASK) | pa;
1297 tl |= ppc4xx_tlbflags(va, pa);
1298
1299 s = splhigh();
1300 idx = ppc4xx_tlb_find_victim();
1301
1302 #ifdef DIAGNOSTIC
1303 if ((idx < TLB_NRESERVED) || (idx >= NTLB)) {
1304 panic("ppc4xx_tlb_enter: repacing entry %ld", idx);
1305 }
1306 #endif
1307
1308 tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
1309 tlb_info[idx].ti_ctx = ctx;
1310 tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
1311
1312 asm volatile(
1313 "mfmsr %0;" /* Save MSR */
1314 "li %1,0;"
1315 "tlbwe %1,%3,0;" /* Invalidate old entry. */
1316 "mtmsr %1;" /* Clear MSR */
1317 "mfpid %1;" /* Save old PID */
1318 "mtpid %2;" /* Load translation ctx */
1319 "sync; isync;"
1320 #ifdef DEBUG
1321 "andi. %3,%3,63;"
1322 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */
1323 #endif
1324 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */
1325 "sync; isync;"
1326 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */
1327 "sync; isync;"
1328 : "=&r" (msr), "=&r" (pid)
1329 : "r" (ctx), "r" (idx), "r" (tl), "r" (th));
1330 splx(s);
1331 }
1332
1333 void
1334 ppc4xx_tlb_unpin(int i)
1335 {
1336
1337 if (i == -1)
1338 for (i = 0; i < TLB_NRESERVED; i++)
1339 tlb_info[i].ti_flags &= ~TLBF_LOCKED;
1340 else
1341 tlb_info[i].ti_flags &= ~TLBF_LOCKED;
1342 }
1343
1344 void
1345 ppc4xx_tlb_init(void)
1346 {
1347 int i;
1348
1349 /* Mark reserved TLB entries */
1350 for (i = 0; i < TLB_NRESERVED; i++) {
1351 tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
1352 tlb_info[i].ti_ctx = KERNEL_PID;
1353 }
1354
1355 /* Setup security zones */
1356 /* Z0 - accessible by kernel only if TLB entry permissions allow
1357 * Z1,Z2 - access is controlled by TLB entry permissions
1358 * Z3 - full access regardless of TLB entry permissions
1359 */
1360
1361 asm volatile(
1362 "mtspr %0,%1;"
1363 "sync;"
1364 :: "K"(SPR_ZPR), "r" (0x1b000000));
1365 }
1366
1367
1368 /*
1369 * We should pass the ctx in from trap code.
1370 */
1371 int
1372 pmap_tlbmiss(vaddr_t va, int ctx)
1373 {
1374 volatile u_int *pte;
1375 u_long tte;
1376
1377 tlbmiss_ev.ev_count++;
1378
1379 /*
1380 * XXXX We will reserve 0-0x80000000 for va==pa mappings.
1381 */
1382 if (ctx != KERNEL_PID || (va & 0x80000000)) {
1383 pte = pte_find((struct pmap *)ctxbusy[ctx], va);
1384 if (pte == NULL) {
1385 /* Map unmanaged addresses directly for kernel access */
1386 return 1;
1387 }
1388 tte = *pte;
1389 if (tte == 0) {
1390 return 1;
1391 }
1392 } else {
1393 /* Create a 16MB writable mapping. */
1394 #ifdef PPC_4XX_NOCACHE
1395 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR;
1396 #else
1397 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
1398 #endif
1399 }
1400 tlbhit_ev.ev_count++;
1401 ppc4xx_tlb_enter(ctx, va, tte);
1402
1403 return 0;
1404 }
1405
1406 /*
1407 * Flush all the entries matching a context from the TLB.
1408 */
1409 static int
1410 ctx_flush(int cnum)
1411 {
1412 int i;
1413
1414 /* We gotta steal this context */
1415 for (i = TLB_NRESERVED; i < NTLB; i++) {
1416 if (tlb_info[i].ti_ctx == cnum) {
1417 /* Can't steal ctx if it has a locked entry. */
1418 if (TLB_LOCKED(i)) {
1419 #ifdef DIAGNOSTIC
1420 printf("ctx_flush: can't invalidate "
1421 "locked mapping %d "
1422 "for context %d\n", i, cnum);
1423 #ifdef DDB
1424 Debugger();
1425 #endif
1426 #endif
1427 return (1);
1428 }
1429 #ifdef DIAGNOSTIC
1430 if (i < TLB_NRESERVED)
1431 panic("TLB entry %d not locked", i);
1432 #endif
1433 /* Invalidate particular TLB entry regardless of locked status */
1434 asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
1435 tlb_info[i].ti_flags = 0;
1436 }
1437 }
1438 return (0);
1439 }
1440
1441 /*
1442 * Allocate a context. If necessary, steal one from someone else.
1443 *
1444 * The new context is flushed from the TLB before returning.
1445 */
1446 int
1447 ctx_alloc(struct pmap *pm)
1448 {
1449 int s, cnum;
1450 static int next = MINCTX;
1451
1452 if (pm == pmap_kernel()) {
1453 #ifdef DIAGNOSTIC
1454 printf("ctx_alloc: kernel pmap!\n");
1455 #endif
1456 return (0);
1457 }
1458 s = splvm();
1459
1460 /* Find a likely context. */
1461 cnum = next;
1462 do {
1463 if ((++cnum) > NUMCTX)
1464 cnum = MINCTX;
1465 } while (ctxbusy[cnum] != NULL && cnum != next);
1466
1467 /* Now clean it out */
1468 oops:
1469 if (cnum < MINCTX)
1470 cnum = MINCTX; /* Never steal ctx 0 or 1 */
1471 if (ctx_flush(cnum)) {
1472 /* oops -- something's wired. */
1473 if ((++cnum) > NUMCTX)
1474 cnum = MINCTX;
1475 goto oops;
1476 }
1477
1478 if (ctxbusy[cnum]) {
1479 #ifdef DEBUG
1480 /* We should identify this pmap and clear it */
1481 printf("Warning: stealing context %d\n", cnum);
1482 #endif
1483 ctxbusy[cnum]->pm_ctx = 0;
1484 }
1485 ctxbusy[cnum] = pm;
1486 next = cnum;
1487 splx(s);
1488 pm->pm_ctx = cnum;
1489
1490 return cnum;
1491 }
1492
1493 /*
1494 * Give away a context.
1495 */
1496 void
1497 ctx_free(struct pmap *pm)
1498 {
1499 int oldctx;
1500
1501 oldctx = pm->pm_ctx;
1502
1503 if (oldctx == 0)
1504 panic("ctx_free: freeing kernel context");
1505 #ifdef DIAGNOSTIC
1506 if (ctxbusy[oldctx] == 0)
1507 printf("ctx_free: freeing free context %d\n", oldctx);
1508 if (ctxbusy[oldctx] != pm) {
1509 printf("ctx_free: freeing someone esle's context\n "
1510 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
1511 oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
1512 #ifdef DDB
1513 Debugger();
1514 #endif
1515 }
1516 #endif
1517 /* We should verify it has not been stolen and reallocated... */
1518 ctxbusy[oldctx] = NULL;
1519 ctx_flush(oldctx);
1520 }
1521
1522
1523 #ifdef DEBUG
1524 /*
1525 * Test ref/modify handling.
1526 */
1527 void pmap_testout __P((void));
1528 void
1529 pmap_testout()
1530 {
1531 vaddr_t va;
1532 volatile int *loc;
1533 int val = 0;
1534 paddr_t pa;
1535 struct vm_page *pg;
1536 int ref, mod;
1537
1538 /* Allocate a page */
1539 va = (vaddr_t)uvm_km_alloc1(kernel_map, PAGE_SIZE, 1);
1540 loc = (int*)va;
1541
1542 pmap_extract(pmap_kernel(), va, &pa);
1543 pg = PHYS_TO_VM_PAGE(pa);
1544 pmap_unwire(pmap_kernel(), va);
1545
1546 pmap_remove(pmap_kernel(), va, va+1);
1547 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1548 pmap_update(pmap_kernel());
1549
1550 /* Now clear reference and modify */
1551 ref = pmap_clear_reference(pg);
1552 mod = pmap_clear_modify(pg);
1553 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1554 (void *)(u_long)va, (long)pa,
1555 ref, mod);
1556
1557 /* Check it's properly cleared */
1558 ref = pmap_is_referenced(pg);
1559 mod = pmap_is_modified(pg);
1560 printf("Checking cleared page: ref %d, mod %d\n",
1561 ref, mod);
1562
1563 /* Reference page */
1564 val = *loc;
1565
1566 ref = pmap_is_referenced(pg);
1567 mod = pmap_is_modified(pg);
1568 printf("Referenced page: ref %d, mod %d val %x\n",
1569 ref, mod, val);
1570
1571 /* Now clear reference and modify */
1572 ref = pmap_clear_reference(pg);
1573 mod = pmap_clear_modify(pg);
1574 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1575 (void *)(u_long)va, (long)pa,
1576 ref, mod);
1577
1578 /* Modify page */
1579 *loc = 1;
1580
1581 ref = pmap_is_referenced(pg);
1582 mod = pmap_is_modified(pg);
1583 printf("Modified page: ref %d, mod %d\n",
1584 ref, mod);
1585
1586 /* Now clear reference and modify */
1587 ref = pmap_clear_reference(pg);
1588 mod = pmap_clear_modify(pg);
1589 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1590 (void *)(u_long)va, (long)pa,
1591 ref, mod);
1592
1593 /* Check it's properly cleared */
1594 ref = pmap_is_referenced(pg);
1595 mod = pmap_is_modified(pg);
1596 printf("Checking cleared page: ref %d, mod %d\n",
1597 ref, mod);
1598
1599 /* Modify page */
1600 *loc = 1;
1601
1602 ref = pmap_is_referenced(pg);
1603 mod = pmap_is_modified(pg);
1604 printf("Modified page: ref %d, mod %d\n",
1605 ref, mod);
1606
1607 /* Check pmap_protect() */
1608 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
1609 pmap_update(pmap_kernel());
1610 ref = pmap_is_referenced(pg);
1611 mod = pmap_is_modified(pg);
1612 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
1613 ref, mod);
1614
1615 /* Now clear reference and modify */
1616 ref = pmap_clear_reference(pg);
1617 mod = pmap_clear_modify(pg);
1618 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1619 (void *)(u_long)va, (long)pa,
1620 ref, mod);
1621
1622 /* Reference page */
1623 val = *loc;
1624
1625 ref = pmap_is_referenced(pg);
1626 mod = pmap_is_modified(pg);
1627 printf("Referenced page: ref %d, mod %d val %x\n",
1628 ref, mod, val);
1629
1630 /* Now clear reference and modify */
1631 ref = pmap_clear_reference(pg);
1632 mod = pmap_clear_modify(pg);
1633 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1634 (void *)(u_long)va, (long)pa,
1635 ref, mod);
1636
1637 /* Modify page */
1638 #if 0
1639 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1640 pmap_update(pmap_kernel());
1641 #endif
1642 *loc = 1;
1643
1644 ref = pmap_is_referenced(pg);
1645 mod = pmap_is_modified(pg);
1646 printf("Modified page: ref %d, mod %d\n",
1647 ref, mod);
1648
1649 /* Check pmap_protect() */
1650 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
1651 pmap_update(pmap_kernel());
1652 ref = pmap_is_referenced(pg);
1653 mod = pmap_is_modified(pg);
1654 printf("pmap_protect(): ref %d, mod %d\n",
1655 ref, mod);
1656
1657 /* Now clear reference and modify */
1658 ref = pmap_clear_reference(pg);
1659 mod = pmap_clear_modify(pg);
1660 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1661 (void *)(u_long)va, (long)pa,
1662 ref, mod);
1663
1664 /* Reference page */
1665 val = *loc;
1666
1667 ref = pmap_is_referenced(pg);
1668 mod = pmap_is_modified(pg);
1669 printf("Referenced page: ref %d, mod %d val %x\n",
1670 ref, mod, val);
1671
1672 /* Now clear reference and modify */
1673 ref = pmap_clear_reference(pg);
1674 mod = pmap_clear_modify(pg);
1675 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1676 (void *)(u_long)va, (long)pa,
1677 ref, mod);
1678
1679 /* Modify page */
1680 #if 0
1681 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1682 pmap_update(pmap_kernel());
1683 #endif
1684 *loc = 1;
1685
1686 ref = pmap_is_referenced(pg);
1687 mod = pmap_is_modified(pg);
1688 printf("Modified page: ref %d, mod %d\n",
1689 ref, mod);
1690
1691 /* Check pmap_pag_protect() */
1692 pmap_page_protect(pg, VM_PROT_READ);
1693 ref = pmap_is_referenced(pg);
1694 mod = pmap_is_modified(pg);
1695 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
1696 ref, mod);
1697
1698 /* Now clear reference and modify */
1699 ref = pmap_clear_reference(pg);
1700 mod = pmap_clear_modify(pg);
1701 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1702 (void *)(u_long)va, (long)pa,
1703 ref, mod);
1704
1705 /* Reference page */
1706 val = *loc;
1707
1708 ref = pmap_is_referenced(pg);
1709 mod = pmap_is_modified(pg);
1710 printf("Referenced page: ref %d, mod %d val %x\n",
1711 ref, mod, val);
1712
1713 /* Now clear reference and modify */
1714 ref = pmap_clear_reference(pg);
1715 mod = pmap_clear_modify(pg);
1716 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1717 (void *)(u_long)va, (long)pa,
1718 ref, mod);
1719
1720 /* Modify page */
1721 #if 0
1722 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1723 pmap_update(pmap_kernel());
1724 #endif
1725 *loc = 1;
1726
1727 ref = pmap_is_referenced(pg);
1728 mod = pmap_is_modified(pg);
1729 printf("Modified page: ref %d, mod %d\n",
1730 ref, mod);
1731
1732 /* Check pmap_pag_protect() */
1733 pmap_page_protect(pg, VM_PROT_NONE);
1734 ref = pmap_is_referenced(pg);
1735 mod = pmap_is_modified(pg);
1736 printf("pmap_page_protect(): ref %d, mod %d\n",
1737 ref, mod);
1738
1739 /* Now clear reference and modify */
1740 ref = pmap_clear_reference(pg);
1741 mod = pmap_clear_modify(pg);
1742 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1743 (void *)(u_long)va, (long)pa,
1744 ref, mod);
1745
1746
1747 /* Reference page */
1748 val = *loc;
1749
1750 ref = pmap_is_referenced(pg);
1751 mod = pmap_is_modified(pg);
1752 printf("Referenced page: ref %d, mod %d val %x\n",
1753 ref, mod, val);
1754
1755 /* Now clear reference and modify */
1756 ref = pmap_clear_reference(pg);
1757 mod = pmap_clear_modify(pg);
1758 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1759 (void *)(u_long)va, (long)pa,
1760 ref, mod);
1761
1762 /* Modify page */
1763 #if 0
1764 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1765 pmap_update(pmap_kernel());
1766 #endif
1767 *loc = 1;
1768
1769 ref = pmap_is_referenced(pg);
1770 mod = pmap_is_modified(pg);
1771 printf("Modified page: ref %d, mod %d\n",
1772 ref, mod);
1773
1774 /* Unmap page */
1775 pmap_remove(pmap_kernel(), va, va+1);
1776 pmap_update(pmap_kernel());
1777 ref = pmap_is_referenced(pg);
1778 mod = pmap_is_modified(pg);
1779 printf("Unmapped page: ref %d, mod %d\n", ref, mod);
1780
1781 /* Now clear reference and modify */
1782 ref = pmap_clear_reference(pg);
1783 mod = pmap_clear_modify(pg);
1784 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1785 (void *)(u_long)va, (long)pa, ref, mod);
1786
1787 /* Check it's properly cleared */
1788 ref = pmap_is_referenced(pg);
1789 mod = pmap_is_modified(pg);
1790 printf("Checking cleared page: ref %d, mod %d\n",
1791 ref, mod);
1792
1793 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL,
1794 VM_PROT_ALL|PMAP_WIRED);
1795 uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE);
1796 }
1797 #endif
1798