pmap.c revision 1.9 1 /* $NetBSD: pmap.c,v 1.9 2003/05/08 18:13:22 thorpej Exp $ */
2 /*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_pmap.h"
71 #include <sys/param.h>
72 #include <sys/malloc.h>
73 #include <sys/proc.h>
74 #include <sys/user.h>
75 #include <sys/pool.h>
76 #include <sys/queue.h>
77 #include <sys/device.h> /* for evcnt */
78 #include <sys/systm.h>
79
80 #if __NetBSD_Version__ < 105010000
81 #include <vm/vm.h>
82 #include <vm/vm_kern.h>
83 #define splvm() splimp()
84 #endif
85
86 #include <uvm/uvm.h>
87
88 #include <machine/pcb.h>
89 #include <machine/powerpc.h>
90 #include <powerpc/spr.h>
91 #include <powerpc/oea/sr_601.h>
92 #if __NetBSD_Version__ > 105010000
93 #include <powerpc/oea/bat.h>
94 #else
95 #include <powerpc/bat.h>
96 #endif
97
98 #if defined(DEBUG) || defined(PMAPCHECK)
99 #define STATIC
100 #else
101 #define STATIC static
102 #endif
103
104 #ifdef ALTIVEC
105 int pmap_use_altivec;
106 #endif
107
108 volatile struct pteg *pmap_pteg_table;
109 unsigned int pmap_pteg_cnt;
110 unsigned int pmap_pteg_mask;
111 paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
112
113 struct pmap kernel_pmap_;
114 unsigned int pmap_pages_stolen;
115 u_long pmap_pte_valid;
116 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
117 u_long pmap_pvo_enter_depth;
118 u_long pmap_pvo_remove_depth;
119 #endif
120
121 int physmem;
122 #ifndef MSGBUFADDR
123 extern paddr_t msgbuf_paddr;
124 #endif
125
126 static struct mem_region *mem, *avail;
127 static u_int mem_cnt, avail_cnt;
128
129 #ifdef __HAVE_PMAP_PHYSSEG
130 /*
131 * This is a cache of referenced/modified bits.
132 * Bits herein are shifted by ATTRSHFT.
133 */
134 #define ATTR_SHFT 4
135 struct pmap_physseg pmap_physseg;
136 #endif
137
138 /*
139 * The following structure is exactly 32 bytes long (one cacheline).
140 */
141 struct pvo_entry {
142 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
143 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
144 struct pte pvo_pte; /* Prebuilt PTE */
145 pmap_t pvo_pmap; /* ptr to owning pmap */
146 vaddr_t pvo_vaddr; /* VA of entry */
147 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
148 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
149 #define PVO_WIRED 0x0010 /* PVO entry is wired */
150 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
151 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
152 };
153 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
154 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
155 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
156 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
157 #define PVO_PTEGIDX_CLR(pvo) \
158 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
159 #define PVO_PTEGIDX_SET(pvo,i) \
160 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
161
162 TAILQ_HEAD(pvo_tqhead, pvo_entry);
163 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
164 struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
165 struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
166
167 struct pool pmap_pool; /* pool for pmap structures */
168 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
169 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
170
171 /*
172 * We keep a cache of unmanaged pages to be used for pvo entries for
173 * unmanaged pages.
174 */
175 struct pvo_page {
176 SIMPLEQ_ENTRY(pvo_page) pvop_link;
177 };
178 SIMPLEQ_HEAD(pvop_head, pvo_page);
179 struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
180 struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
181 u_long pmap_upvop_free;
182 u_long pmap_upvop_maxfree;
183 u_long pmap_mpvop_free;
184 u_long pmap_mpvop_maxfree;
185
186 STATIC void *pmap_pool_ualloc(struct pool *, int);
187 STATIC void *pmap_pool_malloc(struct pool *, int);
188
189 STATIC void pmap_pool_ufree(struct pool *, void *);
190 STATIC void pmap_pool_mfree(struct pool *, void *);
191
192 static struct pool_allocator pmap_pool_mallocator = {
193 pmap_pool_malloc, pmap_pool_mfree, 0,
194 };
195
196 static struct pool_allocator pmap_pool_uallocator = {
197 pmap_pool_ualloc, pmap_pool_ufree, 0,
198 };
199
200 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
201 void pmap_pte_print(volatile struct pte *);
202 #endif
203
204 #ifdef DDB
205 void pmap_pteg_check(void);
206 void pmap_pteg_dist(void);
207 void pmap_print_pte(pmap_t, vaddr_t);
208 void pmap_print_mmuregs(void);
209 #endif
210
211 #if defined(DEBUG) || defined(PMAPCHECK)
212 #ifdef PMAPCHECK
213 int pmapcheck = 1;
214 #else
215 int pmapcheck = 0;
216 #endif
217 void pmap_pvo_verify(void);
218 STATIC void pmap_pvo_check(const struct pvo_entry *);
219 #define PMAP_PVO_CHECK(pvo) \
220 do { \
221 if (pmapcheck) \
222 pmap_pvo_check(pvo); \
223 } while (0)
224 #else
225 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
226 #endif
227 STATIC int pmap_pte_insert(int, struct pte *);
228 STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
229 vaddr_t, paddr_t, register_t, int);
230 STATIC void pmap_pvo_remove(struct pvo_entry *, int);
231 STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
232 STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
233
234 STATIC void tlbia(void);
235
236 STATIC void pmap_release(pmap_t);
237 STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
238
239 #define VSID_NBPW (sizeof(uint32_t) * 8)
240 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
241
242 static int pmap_initialized;
243
244 #if defined(DEBUG) || defined(PMAPDEBUG)
245 #define PMAPDEBUG_BOOT 0x0001
246 #define PMAPDEBUG_PTE 0x0002
247 #define PMAPDEBUG_EXEC 0x0008
248 #define PMAPDEBUG_PVOENTER 0x0010
249 #define PMAPDEBUG_PVOREMOVE 0x0020
250 #define PMAPDEBUG_ACTIVATE 0x0100
251 #define PMAPDEBUG_CREATE 0x0200
252 #define PMAPDEBUG_ENTER 0x1000
253 #define PMAPDEBUG_KENTER 0x2000
254 #define PMAPDEBUG_KREMOVE 0x4000
255 #define PMAPDEBUG_REMOVE 0x8000
256 unsigned int pmapdebug = 0;
257 # define DPRINTF(x) printf x
258 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
259 #else
260 # define DPRINTF(x)
261 # define DPRINTFN(n, x)
262 #endif
263
264
265 #ifdef PMAPCOUNTERS
266 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
267 #define PMAPCOUNT2(ev) ((ev).ev_count++)
268
269 struct evcnt pmap_evcnt_mappings =
270 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
271 "pmap", "pages mapped");
272 struct evcnt pmap_evcnt_unmappings =
273 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
274 "pmap", "pages unmapped");
275
276 struct evcnt pmap_evcnt_kernel_mappings =
277 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
278 "pmap", "kernel pages mapped");
279 struct evcnt pmap_evcnt_kernel_unmappings =
280 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
281 "pmap", "kernel pages unmapped");
282
283 struct evcnt pmap_evcnt_mappings_replaced =
284 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
285 "pmap", "page mappings replaced");
286
287 struct evcnt pmap_evcnt_exec_mappings =
288 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
289 "pmap", "exec pages mapped");
290 struct evcnt pmap_evcnt_exec_cached =
291 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
292 "pmap", "exec pages cached");
293
294 struct evcnt pmap_evcnt_exec_synced =
295 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
296 "pmap", "exec pages synced");
297 struct evcnt pmap_evcnt_exec_synced_clear_modify =
298 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
299 "pmap", "exec pages synced (CM)");
300
301 struct evcnt pmap_evcnt_exec_uncached_page_protect =
302 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
303 "pmap", "exec pages uncached (PP)");
304 struct evcnt pmap_evcnt_exec_uncached_clear_modify =
305 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
306 "pmap", "exec pages uncached (CM)");
307 struct evcnt pmap_evcnt_exec_uncached_zero_page =
308 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
309 "pmap", "exec pages uncached (ZP)");
310 struct evcnt pmap_evcnt_exec_uncached_copy_page =
311 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
312 "pmap", "exec pages uncached (CP)");
313
314 struct evcnt pmap_evcnt_updates =
315 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
316 "pmap", "updates");
317 struct evcnt pmap_evcnt_collects =
318 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
319 "pmap", "collects");
320 struct evcnt pmap_evcnt_copies =
321 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
322 "pmap", "copies");
323
324 struct evcnt pmap_evcnt_ptes_spilled =
325 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
326 "pmap", "ptes spilled from overflow");
327 struct evcnt pmap_evcnt_ptes_unspilled =
328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
329 "pmap", "ptes not spilled");
330 struct evcnt pmap_evcnt_ptes_evicted =
331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
332 "pmap", "ptes evicted");
333
334 struct evcnt pmap_evcnt_ptes_primary[8] = {
335 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
336 "pmap", "ptes added at primary[0]"),
337 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
338 "pmap", "ptes added at primary[1]"),
339 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
340 "pmap", "ptes added at primary[2]"),
341 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
342 "pmap", "ptes added at primary[3]"),
343
344 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
345 "pmap", "ptes added at primary[4]"),
346 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
347 "pmap", "ptes added at primary[5]"),
348 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
349 "pmap", "ptes added at primary[6]"),
350 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
351 "pmap", "ptes added at primary[7]"),
352 };
353 struct evcnt pmap_evcnt_ptes_secondary[8] = {
354 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
355 "pmap", "ptes added at secondary[0]"),
356 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
357 "pmap", "ptes added at secondary[1]"),
358 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
359 "pmap", "ptes added at secondary[2]"),
360 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
361 "pmap", "ptes added at secondary[3]"),
362
363 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
364 "pmap", "ptes added at secondary[4]"),
365 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
366 "pmap", "ptes added at secondary[5]"),
367 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
368 "pmap", "ptes added at secondary[6]"),
369 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
370 "pmap", "ptes added at secondary[7]"),
371 };
372 struct evcnt pmap_evcnt_ptes_removed =
373 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
374 "pmap", "ptes removed");
375 struct evcnt pmap_evcnt_ptes_changed =
376 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
377 "pmap", "ptes changed");
378
379 /*
380 * From pmap_subr.c
381 */
382 extern struct evcnt pmap_evcnt_zeroed_pages;
383 extern struct evcnt pmap_evcnt_copied_pages;
384 extern struct evcnt pmap_evcnt_idlezeroed_pages;
385 #else
386 #define PMAPCOUNT(ev) ((void) 0)
387 #define PMAPCOUNT2(ev) ((void) 0)
388 #endif
389
390 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va))
391 #define TLBSYNC() __asm __volatile("tlbsync")
392 #define SYNC() __asm __volatile("sync")
393 #define EIEIO() __asm __volatile("eieio")
394 #define MFMSR() mfmsr()
395 #define MTMSR(psl) mtmsr(psl)
396 #define MFPVR() mfpvr()
397 #define MFSRIN(va) mfsrin(va)
398 #define MFTB() mfrtcltbl()
399
400 static __inline register_t
401 mfsrin(vaddr_t va)
402 {
403 register_t sr;
404 __asm __volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
405 return sr;
406 }
407
408 static __inline register_t
409 pmap_interrupts_off(void)
410 {
411 register_t msr = MFMSR();
412 if (msr & PSL_EE)
413 MTMSR(msr & ~PSL_EE);
414 return msr;
415 }
416
417 static void
418 pmap_interrupts_restore(register_t msr)
419 {
420 if (msr & PSL_EE)
421 MTMSR(msr);
422 }
423
424 static __inline u_int32_t
425 mfrtcltbl(void)
426 {
427
428 if ((MFPVR() >> 16) == MPC601)
429 return (mfrtcl() >> 7);
430 else
431 return (mftbl());
432 }
433
434 /*
435 * These small routines may have to be replaced,
436 * if/when we support processors other that the 604.
437 */
438
439 void
440 tlbia(void)
441 {
442 caddr_t i;
443
444 SYNC();
445 /*
446 * Why not use "tlbia"? Because not all processors implement it.
447 *
448 * This needs to be a per-cpu callback to do the appropriate thing
449 * for the CPU. XXX
450 */
451 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
452 TLBIE(i);
453 EIEIO();
454 SYNC();
455 }
456 TLBSYNC();
457 SYNC();
458 }
459
460 static __inline register_t
461 va_to_vsid(const struct pmap *pm, vaddr_t addr)
462 {
463 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID);
464 }
465
466 static __inline register_t
467 va_to_pteg(const struct pmap *pm, vaddr_t addr)
468 {
469 register_t hash;
470
471 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
472 return hash & pmap_pteg_mask;
473 }
474
475 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
476 /*
477 * Given a PTE in the page table, calculate the VADDR that hashes to it.
478 * The only bit of magic is that the top 4 bits of the address doesn't
479 * technically exist in the PTE. But we know we reserved 4 bits of the
480 * VSID for it so that's how we get it.
481 */
482 static vaddr_t
483 pmap_pte_to_va(volatile const struct pte *pt)
484 {
485 vaddr_t va;
486 uintptr_t ptaddr = (uintptr_t) pt;
487
488 if (pt->pte_hi & PTE_HID)
489 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
490
491 /* PPC Bits 10-19 */
492 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
493 va <<= ADDR_PIDX_SHFT;
494
495 /* PPC Bits 4-9 */
496 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
497
498 /* PPC Bits 0-3 */
499 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
500
501 return va;
502 }
503 #endif
504
505 static __inline struct pvo_head *
506 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
507 {
508 #ifdef __HAVE_VM_PAGE_MD
509 struct vm_page *pg;
510
511 pg = PHYS_TO_VM_PAGE(pa);
512 if (pg_p != NULL)
513 *pg_p = pg;
514 if (pg == NULL)
515 return &pmap_pvo_unmanaged;
516 return &pg->mdpage.mdpg_pvoh;
517 #endif
518 #ifdef __HAVE_PMAP_PHYSSEG
519 int bank, pg;
520
521 bank = vm_physseg_find(atop(pa), &pg);
522 if (pg_p != NULL)
523 *pg_p = pg;
524 if (bank == -1)
525 return &pmap_pvo_unmanaged;
526 return &vm_physmem[bank].pmseg.pvoh[pg];
527 #endif
528 }
529
530 static __inline struct pvo_head *
531 vm_page_to_pvoh(struct vm_page *pg)
532 {
533 #ifdef __HAVE_VM_PAGE_MD
534 return &pg->mdpage.mdpg_pvoh;
535 #endif
536 #ifdef __HAVE_PMAP_PHYSSEG
537 return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
538 #endif
539 }
540
541
542 #ifdef __HAVE_PMAP_PHYSSEG
543 static __inline char *
544 pa_to_attr(paddr_t pa)
545 {
546 int bank, pg;
547
548 bank = vm_physseg_find(atop(pa), &pg);
549 if (bank == -1)
550 return NULL;
551 return &vm_physmem[bank].pmseg.attrs[pg];
552 }
553 #endif
554
555 static __inline void
556 pmap_attr_clear(struct vm_page *pg, int ptebit)
557 {
558 #ifdef __HAVE_PMAP_PHYSSEG
559 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT);
560 #endif
561 #ifdef __HAVE_VM_PAGE_MD
562 pg->mdpage.mdpg_attrs &= ~ptebit;
563 #endif
564 }
565
566 static __inline int
567 pmap_attr_fetch(struct vm_page *pg)
568 {
569 #ifdef __HAVE_PMAP_PHYSSEG
570 return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT;
571 #endif
572 #ifdef __HAVE_VM_PAGE_MD
573 return pg->mdpage.mdpg_attrs;
574 #endif
575 }
576
577 static __inline void
578 pmap_attr_save(struct vm_page *pg, int ptebit)
579 {
580 #ifdef __HAVE_PMAP_PHYSSEG
581 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT);
582 #endif
583 #ifdef __HAVE_VM_PAGE_MD
584 pg->mdpage.mdpg_attrs |= ptebit;
585 #endif
586 }
587
588 static __inline int
589 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
590 {
591 if (pt->pte_hi == pvo_pt->pte_hi
592 #if 0
593 && ((pt->pte_lo ^ pvo_pt->pte_lo) &
594 ~(PTE_REF|PTE_CHG)) == 0
595 #endif
596 )
597 return 1;
598 return 0;
599 }
600
601 static __inline void
602 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
603 {
604 /*
605 * Construct the PTE. Default to IMB initially. Valid bit
606 * only gets set when the real pte is set in memory.
607 *
608 * Note: Don't set the valid bit for correct operation of tlb update.
609 */
610 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
611 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
612 pt->pte_lo = pte_lo;
613 }
614
615 static __inline void
616 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
617 {
618 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
619 }
620
621 static __inline void
622 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
623 {
624 /*
625 * As shown in Section 7.6.3.2.3
626 */
627 pt->pte_lo &= ~ptebit;
628 TLBIE(va);
629 SYNC();
630 EIEIO();
631 TLBSYNC();
632 SYNC();
633 }
634
635 static __inline void
636 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
637 {
638 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
639 if (pvo_pt->pte_hi & PTE_VALID)
640 panic("pte_set: setting an already valid pte %p", pvo_pt);
641 #endif
642 pvo_pt->pte_hi |= PTE_VALID;
643 /*
644 * Update the PTE as defined in section 7.6.3.1
645 * Note that the REF/CHG bits are from pvo_pt and thus should
646 * have been saved so this routine can restore them (if desired).
647 */
648 pt->pte_lo = pvo_pt->pte_lo;
649 EIEIO();
650 pt->pte_hi = pvo_pt->pte_hi;
651 SYNC();
652 pmap_pte_valid++;
653 }
654
655 static __inline void
656 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
657 {
658 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
659 if ((pvo_pt->pte_hi & PTE_VALID) == 0)
660 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
661 if ((pt->pte_hi & PTE_VALID) == 0)
662 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
663 #endif
664
665 pvo_pt->pte_hi &= ~PTE_VALID;
666 /*
667 * Force the ref & chg bits back into the PTEs.
668 */
669 SYNC();
670 /*
671 * Invalidate the pte ... (Section 7.6.3.3)
672 */
673 pt->pte_hi &= ~PTE_VALID;
674 SYNC();
675 TLBIE(va);
676 SYNC();
677 EIEIO();
678 TLBSYNC();
679 SYNC();
680 /*
681 * Save the ref & chg bits ...
682 */
683 pmap_pte_synch(pt, pvo_pt);
684 pmap_pte_valid--;
685 }
686
687 static __inline void
688 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
689 {
690 /*
691 * Invalidate the PTE
692 */
693 pmap_pte_unset(pt, pvo_pt, va);
694 pmap_pte_set(pt, pvo_pt);
695 }
696
697 /*
698 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
699 * (either primary or secondary location).
700 *
701 * Note: both the destination and source PTEs must not have PTE_VALID set.
702 */
703
704 STATIC int
705 pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
706 {
707 volatile struct pte *pt;
708 int i;
709
710 #if defined(DEBUG)
711 DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%lx 0x%lx\n",
712 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
713 #endif
714 /*
715 * First try primary hash.
716 */
717 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
718 if ((pt->pte_hi & PTE_VALID) == 0) {
719 pvo_pt->pte_hi &= ~PTE_HID;
720 pmap_pte_set(pt, pvo_pt);
721 return i;
722 }
723 }
724
725 /*
726 * Now try secondary hash.
727 */
728 ptegidx ^= pmap_pteg_mask;
729 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
730 if ((pt->pte_hi & PTE_VALID) == 0) {
731 pvo_pt->pte_hi |= PTE_HID;
732 pmap_pte_set(pt, pvo_pt);
733 return i;
734 }
735 }
736 return -1;
737 }
738
739 /*
740 * Spill handler.
741 *
742 * Tries to spill a page table entry from the overflow area.
743 * This runs in either real mode (if dealing with a exception spill)
744 * or virtual mode when dealing with manually spilling one of the
745 * kernel's pte entries. In either case, interrupts are already
746 * disabled.
747 */
748 int
749 pmap_pte_spill(struct pmap *pm, vaddr_t addr)
750 {
751 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
752 struct pvo_entry *pvo;
753 struct pvo_tqhead *pvoh, *vpvoh;
754 int ptegidx, i, j;
755 volatile struct pteg *pteg;
756 volatile struct pte *pt;
757
758 ptegidx = va_to_pteg(pm, addr);
759
760 /*
761 * Have to substitute some entry. Use the primary hash for this.
762 *
763 * Use low bits of timebase as random generator
764 */
765 pteg = &pmap_pteg_table[ptegidx];
766 i = MFTB() & 7;
767 pt = &pteg->pt[i];
768
769 source_pvo = NULL;
770 victim_pvo = NULL;
771 pvoh = &pmap_pvo_table[ptegidx];
772 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
773
774 /*
775 * We need to find pvo entry for this address...
776 */
777 PMAP_PVO_CHECK(pvo); /* sanity check */
778
779 /*
780 * If we haven't found the source and we come to a PVO with
781 * a valid PTE, then we know we can't find it because all
782 * evicted PVOs always are first in the list.
783 */
784 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
785 break;
786 if (source_pvo == NULL && pm == pvo->pvo_pmap &&
787 addr == PVO_VADDR(pvo)) {
788
789 /*
790 * Now we have found the entry to be spilled into the
791 * pteg. Attempt to insert it into the page table.
792 */
793 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
794 if (j >= 0) {
795 PVO_PTEGIDX_SET(pvo, j);
796 PMAP_PVO_CHECK(pvo); /* sanity check */
797 pvo->pvo_pmap->pm_evictions--;
798 PMAPCOUNT(ptes_spilled);
799 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
800 ? pmap_evcnt_ptes_secondary
801 : pmap_evcnt_ptes_primary)[j]);
802
803 /*
804 * Since we keep the evicted entries at the
805 * from of the PVO list, we need move this
806 * (now resident) PVO after the evicted
807 * entries.
808 */
809 next_pvo = TAILQ_NEXT(pvo, pvo_olink);
810
811 /*
812 * If we don't have to move (either we were the
813 * last entry or the next entry was valid),
814 * don't change our position. Otherwise
815 * move ourselves to the tail of the queue.
816 */
817 if (next_pvo != NULL &&
818 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
819 TAILQ_REMOVE(pvoh, pvo, pvo_olink);
820 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
821 }
822 return 1;
823 }
824 source_pvo = pvo;
825 if (victim_pvo != NULL)
826 break;
827 }
828
829 /*
830 * We also need the pvo entry of the victim we are replacing
831 * so save the R & C bits of the PTE.
832 */
833 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
834 pmap_pte_compare(pt, &pvo->pvo_pte)) {
835 vpvoh = pvoh;
836 victim_pvo = pvo;
837 if (source_pvo != NULL)
838 break;
839 }
840 }
841
842 if (source_pvo == NULL) {
843 PMAPCOUNT(ptes_unspilled);
844 return 0;
845 }
846
847 if (victim_pvo == NULL) {
848 if ((pt->pte_hi & PTE_HID) == 0)
849 panic("pmap_pte_spill: victim p-pte (%p) has "
850 "no pvo entry!", pt);
851
852 /*
853 * If this is a secondary PTE, we need to search
854 * its primary pvo bucket for the matching PVO.
855 */
856 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask];
857 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
858 PMAP_PVO_CHECK(pvo); /* sanity check */
859
860 /*
861 * We also need the pvo entry of the victim we are
862 * replacing so save the R & C bits of the PTE.
863 */
864 if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
865 victim_pvo = pvo;
866 break;
867 }
868 }
869 if (victim_pvo == NULL)
870 panic("pmap_pte_spill: victim s-pte (%p) has "
871 "no pvo entry!", pt);
872 }
873
874 /*
875 * We are invalidating the TLB entry for the EA for the
876 * we are replacing even though its valid; If we don't
877 * we lose any ref/chg bit changes contained in the TLB
878 * entry.
879 */
880 source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
881
882 /*
883 * To enforce the PVO list ordering constraint that all
884 * evicted entries should come before all valid entries,
885 * move the source PVO to the tail of its list and the
886 * victim PVO to the head of its list (which might not be
887 * the same list, if the victim was using the secondary hash).
888 */
889 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
890 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
891 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
892 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
893 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
894 pmap_pte_set(pt, &source_pvo->pvo_pte);
895 victim_pvo->pvo_pmap->pm_evictions++;
896 source_pvo->pvo_pmap->pm_evictions--;
897
898 PVO_PTEGIDX_CLR(victim_pvo);
899 PVO_PTEGIDX_SET(source_pvo, i);
900 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
901 PMAPCOUNT(ptes_spilled);
902 PMAPCOUNT(ptes_evicted);
903 PMAPCOUNT(ptes_removed);
904
905 PMAP_PVO_CHECK(victim_pvo);
906 PMAP_PVO_CHECK(source_pvo);
907 return 1;
908 }
909
910 /*
911 * Restrict given range to physical memory
912 */
913 void
914 pmap_real_memory(paddr_t *start, psize_t *size)
915 {
916 struct mem_region *mp;
917
918 for (mp = mem; mp->size; mp++) {
919 if (*start + *size > mp->start
920 && *start < mp->start + mp->size) {
921 if (*start < mp->start) {
922 *size -= mp->start - *start;
923 *start = mp->start;
924 }
925 if (*start + *size > mp->start + mp->size)
926 *size = mp->start + mp->size - *start;
927 return;
928 }
929 }
930 *size = 0;
931 }
932
933 /*
934 * Initialize anything else for pmap handling.
935 * Called during vm_init().
936 */
937 void
938 pmap_init(void)
939 {
940 int s;
941 #ifdef __HAVE_PMAP_PHYSSEG
942 struct pvo_tqhead *pvoh;
943 int bank;
944 long sz;
945 char *attr;
946
947 s = splvm();
948 pvoh = pmap_physseg.pvoh;
949 attr = pmap_physseg.attrs;
950 for (bank = 0; bank < vm_nphysseg; bank++) {
951 sz = vm_physmem[bank].end - vm_physmem[bank].start;
952 vm_physmem[bank].pmseg.pvoh = pvoh;
953 vm_physmem[bank].pmseg.attrs = attr;
954 for (; sz > 0; sz--, pvoh++, attr++) {
955 TAILQ_INIT(pvoh);
956 *attr = 0;
957 }
958 }
959 splx(s);
960 #endif
961
962 s = splvm();
963 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
964 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
965 &pmap_pool_mallocator);
966
967 pool_setlowat(&pmap_mpvo_pool, 1008);
968
969 pmap_initialized = 1;
970 splx(s);
971
972 #ifdef PMAPCOUNTERS
973 evcnt_attach_static(&pmap_evcnt_mappings);
974 evcnt_attach_static(&pmap_evcnt_mappings_replaced);
975 evcnt_attach_static(&pmap_evcnt_unmappings);
976
977 evcnt_attach_static(&pmap_evcnt_kernel_mappings);
978 evcnt_attach_static(&pmap_evcnt_kernel_unmappings);
979
980 evcnt_attach_static(&pmap_evcnt_exec_mappings);
981 evcnt_attach_static(&pmap_evcnt_exec_cached);
982 evcnt_attach_static(&pmap_evcnt_exec_synced);
983 evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify);
984
985 evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect);
986 evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify);
987 evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page);
988 evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page);
989
990 evcnt_attach_static(&pmap_evcnt_zeroed_pages);
991 evcnt_attach_static(&pmap_evcnt_copied_pages);
992 evcnt_attach_static(&pmap_evcnt_idlezeroed_pages);
993
994 evcnt_attach_static(&pmap_evcnt_updates);
995 evcnt_attach_static(&pmap_evcnt_collects);
996 evcnt_attach_static(&pmap_evcnt_copies);
997
998 evcnt_attach_static(&pmap_evcnt_ptes_spilled);
999 evcnt_attach_static(&pmap_evcnt_ptes_unspilled);
1000 evcnt_attach_static(&pmap_evcnt_ptes_evicted);
1001 evcnt_attach_static(&pmap_evcnt_ptes_removed);
1002 evcnt_attach_static(&pmap_evcnt_ptes_changed);
1003 evcnt_attach_static(&pmap_evcnt_ptes_primary[0]);
1004 evcnt_attach_static(&pmap_evcnt_ptes_primary[1]);
1005 evcnt_attach_static(&pmap_evcnt_ptes_primary[2]);
1006 evcnt_attach_static(&pmap_evcnt_ptes_primary[3]);
1007 evcnt_attach_static(&pmap_evcnt_ptes_primary[4]);
1008 evcnt_attach_static(&pmap_evcnt_ptes_primary[5]);
1009 evcnt_attach_static(&pmap_evcnt_ptes_primary[6]);
1010 evcnt_attach_static(&pmap_evcnt_ptes_primary[7]);
1011 evcnt_attach_static(&pmap_evcnt_ptes_secondary[0]);
1012 evcnt_attach_static(&pmap_evcnt_ptes_secondary[1]);
1013 evcnt_attach_static(&pmap_evcnt_ptes_secondary[2]);
1014 evcnt_attach_static(&pmap_evcnt_ptes_secondary[3]);
1015 evcnt_attach_static(&pmap_evcnt_ptes_secondary[4]);
1016 evcnt_attach_static(&pmap_evcnt_ptes_secondary[5]);
1017 evcnt_attach_static(&pmap_evcnt_ptes_secondary[6]);
1018 evcnt_attach_static(&pmap_evcnt_ptes_secondary[7]);
1019 #endif
1020 }
1021
1022 /*
1023 * Allocate, initialize, and return a new physical map.
1024 */
1025 pmap_t
1026 pmap_create(void)
1027 {
1028 pmap_t pm;
1029
1030 pm = pool_get(&pmap_pool, PR_WAITOK);
1031 memset((caddr_t)pm, 0, sizeof *pm);
1032 pmap_pinit(pm);
1033
1034 DPRINTFN(CREATE,("pmap_create: pm %p:\n"
1035 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n"
1036 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n", pm,
1037 pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3],
1038 pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7],
1039 pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11],
1040 pm->pm_sr[12], pm->pm_sr[13], pm->pm_sr[14], pm->pm_sr[15]));
1041 return pm;
1042 }
1043
1044 /*
1045 * Initialize a preallocated and zeroed pmap structure.
1046 */
1047 void
1048 pmap_pinit(pmap_t pm)
1049 {
1050 register_t entropy = MFTB();
1051 register_t mask;
1052 int i;
1053
1054 /*
1055 * Allocate some segment registers for this pmap.
1056 */
1057 pm->pm_refs = 1;
1058 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1059 static register_t pmap_vsidcontext;
1060 register_t hash;
1061 unsigned int n;
1062
1063 /* Create a new value by multiplying by a prime adding in
1064 * entropy from the timebase register. This is to make the
1065 * VSID more random so that the PT Hash function collides
1066 * less often. (note that the prime causes gcc to do shifts
1067 * instead of a multiply)
1068 */
1069 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1070 hash = pmap_vsidcontext & (NPMAPS - 1);
1071 if (hash == 0) /* 0 is special, avoid it */
1072 continue;
1073 n = hash >> 5;
1074 mask = 1L << (hash & (VSID_NBPW-1));
1075 hash = pmap_vsidcontext;
1076 if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1077 /* anything free in this bucket? */
1078 if (~pmap_vsid_bitmap[n] == 0) {
1079 entropy = hash >> PTE_VSID_SHFT;
1080 continue;
1081 }
1082 i = ffs(~pmap_vsid_bitmap[n]) - 1;
1083 mask = 1L << i;
1084 hash &= ~(VSID_NBPW-1);
1085 hash |= i;
1086 }
1087 /*
1088 * Make sure clear out SR_KEY_LEN bits because we put our
1089 * our data in those bits (to identify the segment).
1090 */
1091 hash &= PTE_VSID >> (PTE_VSID_SHFT + SR_KEY_LEN);
1092 pmap_vsid_bitmap[n] |= mask;
1093 for (i = 0; i < 16; i++)
1094 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY;
1095 return;
1096 }
1097 panic("pmap_pinit: out of segments");
1098 }
1099
1100 /*
1101 * Add a reference to the given pmap.
1102 */
1103 void
1104 pmap_reference(pmap_t pm)
1105 {
1106 pm->pm_refs++;
1107 }
1108
1109 /*
1110 * Retire the given pmap from service.
1111 * Should only be called if the map contains no valid mappings.
1112 */
1113 void
1114 pmap_destroy(pmap_t pm)
1115 {
1116 if (--pm->pm_refs == 0) {
1117 pmap_release(pm);
1118 pool_put(&pmap_pool, pm);
1119 }
1120 }
1121
1122 /*
1123 * Release any resources held by the given physical map.
1124 * Called when a pmap initialized by pmap_pinit is being released.
1125 */
1126 void
1127 pmap_release(pmap_t pm)
1128 {
1129 int idx, mask;
1130
1131 if (pm->pm_sr[0] == 0)
1132 panic("pmap_release");
1133 idx = VSID_TO_HASH(pm->pm_sr[0]) & (NPMAPS-1);
1134 mask = 1 << (idx % VSID_NBPW);
1135 idx /= VSID_NBPW;
1136 pmap_vsid_bitmap[idx] &= ~mask;
1137 }
1138
1139 /*
1140 * Copy the range specified by src_addr/len
1141 * from the source map to the range dst_addr/len
1142 * in the destination map.
1143 *
1144 * This routine is only advisory and need not do anything.
1145 */
1146 void
1147 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1148 vsize_t len, vaddr_t src_addr)
1149 {
1150 PMAPCOUNT(copies);
1151 }
1152
1153 /*
1154 * Require that all active physical maps contain no
1155 * incorrect entries NOW.
1156 */
1157 void
1158 pmap_update(struct pmap *pmap)
1159 {
1160 PMAPCOUNT(updates);
1161 TLBSYNC();
1162 }
1163
1164 /*
1165 * Garbage collects the physical map system for
1166 * pages which are no longer used.
1167 * Success need not be guaranteed -- that is, there
1168 * may well be pages which are not referenced, but
1169 * others may be collected.
1170 * Called by the pageout daemon when pages are scarce.
1171 */
1172 void
1173 pmap_collect(pmap_t pm)
1174 {
1175 PMAPCOUNT(collects);
1176 }
1177
1178 static __inline int
1179 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1180 {
1181 int pteidx;
1182 /*
1183 * We can find the actual pte entry without searching by
1184 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1185 * and by noticing the HID bit.
1186 */
1187 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1188 if (pvo->pvo_pte.pte_hi & PTE_HID)
1189 pteidx ^= pmap_pteg_mask * 8;
1190 return pteidx;
1191 }
1192
1193 volatile struct pte *
1194 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1195 {
1196 volatile struct pte *pt;
1197
1198 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1199 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1200 return NULL;
1201 #endif
1202
1203 /*
1204 * If we haven't been supplied the ptegidx, calculate it.
1205 */
1206 if (pteidx == -1) {
1207 int ptegidx;
1208 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1209 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1210 }
1211
1212 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1213
1214 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1215 return pt;
1216 #else
1217 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1218 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1219 "pvo but no valid pte index", pvo);
1220 }
1221 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1222 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1223 "pvo but no valid pte", pvo);
1224 }
1225
1226 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1227 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1228 #if defined(DEBUG) || defined(PMAPCHECK)
1229 pmap_pte_print(pt);
1230 #endif
1231 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1232 "pmap_pteg_table %p but invalid in pvo",
1233 pvo, pt);
1234 }
1235 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1236 #if defined(DEBUG) || defined(PMAPCHECK)
1237 pmap_pte_print(pt);
1238 #endif
1239 panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1240 "not match pte %p in pmap_pteg_table",
1241 pvo, pt);
1242 }
1243 return pt;
1244 }
1245
1246 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1247 #if defined(DEBUG) || defined(PMAPCHECK)
1248 pmap_pte_print(pt);
1249 #endif
1250 panic("pmap_pvo_to_pte: pvo %p: has invalid pte %p in "
1251 "pmap_pteg_table but valid in pvo", pvo, pt);
1252 }
1253 return NULL;
1254 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1255 }
1256
1257 struct pvo_entry *
1258 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1259 {
1260 struct pvo_entry *pvo;
1261 int ptegidx;
1262
1263 va &= ~ADDR_POFF;
1264 ptegidx = va_to_pteg(pm, va);
1265
1266 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1267 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1268 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1269 panic("pmap_pvo_find_va: invalid pvo %p on "
1270 "list %#x (%p)", pvo, ptegidx,
1271 &pmap_pvo_table[ptegidx]);
1272 #endif
1273 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1274 if (pteidx_p)
1275 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1276 return pvo;
1277 }
1278 }
1279 return NULL;
1280 }
1281
1282 #if defined(DEBUG) || defined(PMAPCHECK)
1283 void
1284 pmap_pvo_check(const struct pvo_entry *pvo)
1285 {
1286 struct pvo_head *pvo_head;
1287 struct pvo_entry *pvo0;
1288 volatile struct pte *pt;
1289 int failed = 0;
1290
1291 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1292 panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1293
1294 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1295 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1296 pvo, pvo->pvo_pmap);
1297 failed = 1;
1298 }
1299
1300 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1301 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1302 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1303 pvo, TAILQ_NEXT(pvo, pvo_olink));
1304 failed = 1;
1305 }
1306
1307 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1308 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1309 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1310 pvo, LIST_NEXT(pvo, pvo_vlink));
1311 failed = 1;
1312 }
1313
1314 if (pvo->pvo_vaddr & PVO_MANAGED) {
1315 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1316 } else {
1317 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1318 printf("pmap_pvo_check: pvo %p: non kernel address "
1319 "on kernel unmanaged list\n", pvo);
1320 failed = 1;
1321 }
1322 pvo_head = &pmap_pvo_kunmanaged;
1323 }
1324 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1325 if (pvo0 == pvo)
1326 break;
1327 }
1328 if (pvo0 == NULL) {
1329 printf("pmap_pvo_check: pvo %p: not present "
1330 "on its vlist head %p\n", pvo, pvo_head);
1331 failed = 1;
1332 }
1333 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1334 printf("pmap_pvo_check: pvo %p: not present "
1335 "on its olist head\n", pvo);
1336 failed = 1;
1337 }
1338 pt = pmap_pvo_to_pte(pvo, -1);
1339 if (pt == NULL) {
1340 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1341 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1342 "no PTE\n", pvo);
1343 failed = 1;
1344 }
1345 } else {
1346 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1347 (uintptr_t) pt >=
1348 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1349 printf("pmap_pvo_check: pvo %p: pte %p not in "
1350 "pteg table\n", pvo, pt);
1351 failed = 1;
1352 }
1353 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1354 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1355 "no PTE\n", pvo);
1356 failed = 1;
1357 }
1358 if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1359 printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1360 "%#lx/%#lx\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi);
1361 failed = 1;
1362 }
1363 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1364 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1365 printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1366 "%#lx/%#lx\n", pvo,
1367 pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN),
1368 pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN));
1369 failed = 1;
1370 }
1371 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1372 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx"
1373 " doesn't not match PVO's VA %#lx\n",
1374 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1375 failed = 1;
1376 }
1377 if (failed)
1378 pmap_pte_print(pt);
1379 }
1380 if (failed)
1381 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1382 pvo->pvo_pmap);
1383 }
1384 #endif /* DEBUG || PMAPCHECK */
1385
1386 /*
1387 * This returns whether this is the first mapping of a page.
1388 */
1389 int
1390 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1391 vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1392 {
1393 struct pvo_entry *pvo;
1394 struct pvo_tqhead *pvoh;
1395 register_t msr;
1396 int ptegidx;
1397 int i;
1398 int poolflags = PR_NOWAIT;
1399
1400 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1401 if (pmap_pvo_remove_depth > 0)
1402 panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1403 if (++pmap_pvo_enter_depth > 1)
1404 panic("pmap_pvo_enter: called recursively!");
1405 #endif
1406
1407 /*
1408 * Compute the PTE Group index.
1409 */
1410 va &= ~ADDR_POFF;
1411 ptegidx = va_to_pteg(pm, va);
1412
1413 msr = pmap_interrupts_off();
1414 /*
1415 * Remove any existing mapping for this page. Reuse the
1416 * pvo entry if there a mapping.
1417 */
1418 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1419 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1420 #ifdef DEBUG
1421 if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1422 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1423 ~(PTE_REF|PTE_CHG)) == 0 &&
1424 va < VM_MIN_KERNEL_ADDRESS) {
1425 printf("pmap_pvo_enter: pvo %p: dup %#lx/%#lx\n",
1426 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1427 printf("pmap_pvo_enter: pte_hi=%#lx sr=%#lx\n",
1428 pvo->pvo_pte.pte_hi,
1429 pm->pm_sr[va >> ADDR_SR_SHFT]);
1430 pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1431 #ifdef DDBX
1432 Debugger();
1433 #endif
1434 }
1435 #endif
1436 PMAPCOUNT(mappings_replaced);
1437 pmap_pvo_remove(pvo, -1);
1438 break;
1439 }
1440 }
1441
1442 /*
1443 * If we aren't overwriting an mapping, try to allocate
1444 */
1445 pmap_interrupts_restore(msr);
1446 pvo = pool_get(pl, poolflags);
1447 msr = pmap_interrupts_off();
1448 if (pvo == NULL) {
1449 #if 0
1450 pvo = pmap_pvo_reclaim(pm);
1451 if (pvo == NULL) {
1452 #endif
1453 if ((flags & PMAP_CANFAIL) == 0)
1454 panic("pmap_pvo_enter: failed");
1455 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1456 pmap_pvo_enter_depth--;
1457 #endif
1458 pmap_interrupts_restore(msr);
1459 return ENOMEM;
1460 #if 0
1461 }
1462 #endif
1463 }
1464 pvo->pvo_vaddr = va;
1465 pvo->pvo_pmap = pm;
1466 pvo->pvo_vaddr &= ~ADDR_POFF;
1467 if (flags & VM_PROT_EXECUTE) {
1468 PMAPCOUNT(exec_mappings);
1469 pvo->pvo_vaddr |= PVO_EXECUTABLE;
1470 }
1471 if (flags & PMAP_WIRED)
1472 pvo->pvo_vaddr |= PVO_WIRED;
1473 if (pvo_head != &pmap_pvo_kunmanaged) {
1474 pvo->pvo_vaddr |= PVO_MANAGED;
1475 PMAPCOUNT(mappings);
1476 } else {
1477 PMAPCOUNT(kernel_mappings);
1478 }
1479 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1480
1481 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1482 if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1483 pvo->pvo_pmap->pm_stats.wired_count++;
1484 pvo->pvo_pmap->pm_stats.resident_count++;
1485 #if defined(DEBUG)
1486 if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS)
1487 DPRINTFN(PVOENTER,
1488 ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n",
1489 pvo, pm, va, pa));
1490 #endif
1491
1492 /*
1493 * We hope this succeeds but it isn't required.
1494 */
1495 pvoh = &pmap_pvo_table[ptegidx];
1496 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1497 if (i >= 0) {
1498 PVO_PTEGIDX_SET(pvo, i);
1499 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1500 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1501 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1502 } else {
1503
1504 /*
1505 * Since we didn't have room for this entry (which makes it
1506 * and evicted entry), place it at the head of the list.
1507 */
1508 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1509 PMAPCOUNT(ptes_evicted);
1510 pm->pm_evictions++;
1511 }
1512 PMAP_PVO_CHECK(pvo); /* sanity check */
1513 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1514 pmap_pvo_enter_depth--;
1515 #endif
1516 pmap_interrupts_restore(msr);
1517 return 0;
1518 }
1519
1520 void
1521 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1522 {
1523 volatile struct pte *pt;
1524 int ptegidx;
1525
1526 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1527 if (++pmap_pvo_remove_depth > 1)
1528 panic("pmap_pvo_remove: called recursively!");
1529 #endif
1530
1531 /*
1532 * If we haven't been supplied the ptegidx, calculate it.
1533 */
1534 if (pteidx == -1) {
1535 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1536 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1537 } else {
1538 ptegidx = pteidx >> 3;
1539 if (pvo->pvo_pte.pte_hi & PTE_HID)
1540 ptegidx ^= pmap_pteg_mask;
1541 }
1542 PMAP_PVO_CHECK(pvo); /* sanity check */
1543
1544 /*
1545 * If there is an active pte entry, we need to deactivate it
1546 * (and save the ref & chg bits).
1547 */
1548 pt = pmap_pvo_to_pte(pvo, pteidx);
1549 if (pt != NULL) {
1550 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1551 PVO_PTEGIDX_CLR(pvo);
1552 PMAPCOUNT(ptes_removed);
1553 } else {
1554 KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1555 pvo->pvo_pmap->pm_evictions--;
1556 }
1557
1558 /*
1559 * Update our statistics
1560 */
1561 pvo->pvo_pmap->pm_stats.resident_count--;
1562 if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1563 pvo->pvo_pmap->pm_stats.wired_count--;
1564
1565 /*
1566 * Save the REF/CHG bits into their cache if the page is managed.
1567 */
1568 if (pvo->pvo_vaddr & PVO_MANAGED) {
1569 register_t ptelo = pvo->pvo_pte.pte_lo;
1570 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1571
1572 if (pg != NULL) {
1573 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1574 }
1575 PMAPCOUNT(unmappings);
1576 } else {
1577 PMAPCOUNT(kernel_unmappings);
1578 }
1579
1580 /*
1581 * Remove the PVO from its lists and return it to the pool.
1582 */
1583 LIST_REMOVE(pvo, pvo_vlink);
1584 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1585 pool_put(pvo->pvo_vaddr & PVO_MANAGED
1586 ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1587 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1588 pmap_pvo_remove_depth--;
1589 #endif
1590 }
1591
1592 /*
1593 * Insert physical page at pa into the given pmap at virtual address va.
1594 */
1595 int
1596 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1597 {
1598 struct mem_region *mp;
1599 struct pvo_head *pvo_head;
1600 struct vm_page *pg;
1601 struct pool *pl;
1602 register_t pte_lo;
1603 int s;
1604 int error;
1605 u_int pvo_flags;
1606 u_int was_exec = 0;
1607
1608 if (__predict_false(!pmap_initialized)) {
1609 pvo_head = &pmap_pvo_kunmanaged;
1610 pl = &pmap_upvo_pool;
1611 pvo_flags = 0;
1612 pg = NULL;
1613 was_exec = PTE_EXEC;
1614 } else {
1615 pvo_head = pa_to_pvoh(pa, &pg);
1616 pl = &pmap_mpvo_pool;
1617 pvo_flags = PVO_MANAGED;
1618 }
1619
1620 DPRINTFN(ENTER,
1621 ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):",
1622 pm, va, pa, prot, flags));
1623
1624 /*
1625 * If this is a managed page, and it's the first reference to the
1626 * page clear the execness of the page. Otherwise fetch the execness.
1627 */
1628 if (pg != NULL)
1629 was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1630
1631 DPRINTFN(ENTER, (" was_exec=%d", was_exec));
1632
1633 /*
1634 * Assume the page is cache inhibited and access is guarded unless
1635 * it's in our available memory array. If it is in the memory array,
1636 * asssume it's in memory coherent memory.
1637 */
1638 pte_lo = PTE_IG;
1639 if ((flags & PMAP_NC) == 0) {
1640 for (mp = mem; mp->size; mp++) {
1641 if (pa >= mp->start && pa < mp->start + mp->size) {
1642 pte_lo = PTE_M;
1643 break;
1644 }
1645 }
1646 }
1647
1648 if (prot & VM_PROT_WRITE)
1649 pte_lo |= PTE_BW;
1650 else
1651 pte_lo |= PTE_BR;
1652
1653 /*
1654 * If this was in response to a fault, "pre-fault" the PTE's
1655 * changed/referenced bit appropriately.
1656 */
1657 if (flags & VM_PROT_WRITE)
1658 pte_lo |= PTE_CHG;
1659 if (flags & (VM_PROT_READ|VM_PROT_WRITE))
1660 pte_lo |= PTE_REF;
1661
1662 #if 0
1663 if (pm == pmap_kernel()) {
1664 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ)
1665 printf("pmap_pvo_enter: Kernel RO va %#lx pa %#lx\n",
1666 va, pa);
1667 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_NONE)
1668 printf("pmap_pvo_enter: Kernel N/A va %#lx pa %#lx\n",
1669 va, pa);
1670 }
1671 #endif
1672
1673 /*
1674 * We need to know if this page can be executable
1675 */
1676 flags |= (prot & VM_PROT_EXECUTE);
1677
1678 /*
1679 * Record mapping for later back-translation and pte spilling.
1680 * This will overwrite any existing mapping.
1681 */
1682 s = splvm();
1683 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1684 splx(s);
1685
1686 /*
1687 * Flush the real page from the instruction cache if this page is
1688 * mapped executable and cacheable and has not been flushed since
1689 * the last time it was modified.
1690 */
1691 if (error == 0 &&
1692 (flags & VM_PROT_EXECUTE) &&
1693 (pte_lo & PTE_I) == 0 &&
1694 was_exec == 0) {
1695 DPRINTFN(ENTER, (" syncicache"));
1696 PMAPCOUNT(exec_synced);
1697 pmap_syncicache(pa, PAGE_SIZE);
1698 if (pg != NULL) {
1699 pmap_attr_save(pg, PTE_EXEC);
1700 PMAPCOUNT(exec_cached);
1701 #if defined(DEBUG) || defined(PMAPDEBUG)
1702 if (pmapdebug & PMAPDEBUG_ENTER)
1703 printf(" marked-as-exec");
1704 else if (pmapdebug & PMAPDEBUG_EXEC)
1705 printf("[pmap_enter: %#lx: marked-as-exec]\n",
1706 pg->phys_addr);
1707
1708 #endif
1709 }
1710 }
1711
1712 DPRINTFN(ENTER, (": error=%d\n", error));
1713
1714 return error;
1715 }
1716
1717 void
1718 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1719 {
1720 struct mem_region *mp;
1721 register_t pte_lo;
1722 register_t msr;
1723 int error;
1724 int s;
1725
1726 if (va < VM_MIN_KERNEL_ADDRESS)
1727 panic("pmap_kenter_pa: attempt to enter "
1728 "non-kernel address %#lx!", va);
1729
1730 DPRINTFN(KENTER,
1731 ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
1732
1733 /*
1734 * Assume the page is cache inhibited and access is guarded unless
1735 * it's in our available memory array. If it is in the memory array,
1736 * asssume it's in memory coherent memory.
1737 */
1738 pte_lo = PTE_IG;
1739 if ((prot & PMAP_NC) == 0) {
1740 for (mp = mem; mp->size; mp++) {
1741 if (pa >= mp->start && pa < mp->start + mp->size) {
1742 pte_lo = PTE_M;
1743 break;
1744 }
1745 }
1746 }
1747
1748 if (prot & VM_PROT_WRITE)
1749 pte_lo |= PTE_BW;
1750 else
1751 pte_lo |= PTE_BR;
1752
1753 /*
1754 * We don't care about REF/CHG on PVOs on the unmanaged list.
1755 */
1756 s = splvm();
1757 msr = pmap_interrupts_off();
1758 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
1759 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
1760 pmap_interrupts_restore(msr);
1761 splx(s);
1762
1763 if (error != 0)
1764 panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
1765 va, pa, error);
1766 }
1767
1768 void
1769 pmap_kremove(vaddr_t va, vsize_t len)
1770 {
1771 if (va < VM_MIN_KERNEL_ADDRESS)
1772 panic("pmap_kremove: attempt to remove "
1773 "non-kernel address %#lx!", va);
1774
1775 DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len));
1776 pmap_remove(pmap_kernel(), va, va + len);
1777 }
1778
1779 /*
1780 * Remove the given range of mapping entries.
1781 */
1782 void
1783 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
1784 {
1785 struct pvo_entry *pvo;
1786 register_t msr;
1787 int pteidx;
1788 int s;
1789
1790 for (; va < endva; va += PAGE_SIZE) {
1791 s = splvm();
1792 msr = pmap_interrupts_off();
1793 pvo = pmap_pvo_find_va(pm, va, &pteidx);
1794 if (pvo != NULL) {
1795 pmap_pvo_remove(pvo, pteidx);
1796 }
1797 pmap_interrupts_restore(msr);
1798 splx(s);
1799 }
1800 }
1801
1802 /*
1803 * Get the physical page address for the given pmap/virtual address.
1804 */
1805 boolean_t
1806 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
1807 {
1808 struct pvo_entry *pvo;
1809 register_t msr;
1810 int s;
1811
1812 /*
1813 * If this is a kernel pmap lookup, also check the battable
1814 * and if we get a hit, translate the VA to a PA using the
1815 * BAT entries. Don't check for VM_MAX_KENREL_ADDRESS is
1816 * that will wrap back to 0.
1817 */
1818 if (pm == pmap_kernel() &&
1819 (va < VM_MIN_KERNEL_ADDRESS ||
1820 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
1821 register_t batu = battable[va >> ADDR_SR_SHFT].batu;
1822 KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
1823 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
1824 register_t batl = battable[va >> ADDR_SR_SHFT].batl;
1825 register_t mask = (~(batu & BAT_BL) << 15) & ~0x1ffffL;
1826 *pap = (batl & mask) | (va & ~mask);
1827 return TRUE;
1828 }
1829 return FALSE;
1830 }
1831
1832 s = splvm();
1833 msr = pmap_interrupts_off();
1834 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1835 if (pvo != NULL) {
1836 PMAP_PVO_CHECK(pvo); /* sanity check */
1837 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1838 }
1839 pmap_interrupts_restore(msr);
1840 splx(s);
1841 return pvo != NULL;
1842 }
1843
1844 /*
1845 * Lower the protection on the specified range of this pmap.
1846 *
1847 * There are only two cases: either the protection is going to 0,
1848 * or it is going to read-only.
1849 */
1850 void
1851 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
1852 {
1853 struct pvo_entry *pvo;
1854 volatile struct pte *pt;
1855 register_t msr;
1856 int s;
1857 int pteidx;
1858
1859 /*
1860 * Since this routine only downgrades protection, we should
1861 * always be called without WRITE permisison.
1862 */
1863 KASSERT((prot & VM_PROT_WRITE) == 0);
1864
1865 /*
1866 * If there is no protection, this is equivalent to
1867 * remove the pmap from the pmap.
1868 */
1869 if ((prot & VM_PROT_READ) == 0) {
1870 pmap_remove(pm, va, endva);
1871 return;
1872 }
1873
1874 s = splvm();
1875 msr = pmap_interrupts_off();
1876
1877 for (; va < endva; va += PAGE_SIZE) {
1878 pvo = pmap_pvo_find_va(pm, va, &pteidx);
1879 if (pvo == NULL)
1880 continue;
1881 PMAP_PVO_CHECK(pvo); /* sanity check */
1882
1883 /*
1884 * Revoke executable if asked to do so.
1885 */
1886 if ((prot & VM_PROT_EXECUTE) == 0)
1887 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1888
1889 #if 0
1890 /*
1891 * If the page is already read-only, no change
1892 * needs to be made.
1893 */
1894 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
1895 continue;
1896 #endif
1897 /*
1898 * Grab the PTE pointer before we diddle with
1899 * the cached PTE copy.
1900 */
1901 pt = pmap_pvo_to_pte(pvo, pteidx);
1902 /*
1903 * Change the protection of the page.
1904 */
1905 pvo->pvo_pte.pte_lo &= ~PTE_PP;
1906 pvo->pvo_pte.pte_lo |= PTE_BR;
1907
1908 /*
1909 * If the PVO is in the page table, update
1910 * that pte at well.
1911 */
1912 if (pt != NULL) {
1913 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1914 PMAPCOUNT(ptes_changed);
1915 }
1916
1917 PMAP_PVO_CHECK(pvo); /* sanity check */
1918 }
1919
1920 pmap_interrupts_restore(msr);
1921 splx(s);
1922 }
1923
1924 void
1925 pmap_unwire(pmap_t pm, vaddr_t va)
1926 {
1927 struct pvo_entry *pvo;
1928 register_t msr;
1929 int s;
1930
1931 s = splvm();
1932 msr = pmap_interrupts_off();
1933
1934 pvo = pmap_pvo_find_va(pm, va, NULL);
1935 if (pvo != NULL) {
1936 if (pvo->pvo_vaddr & PVO_WIRED) {
1937 pvo->pvo_vaddr &= ~PVO_WIRED;
1938 pm->pm_stats.wired_count--;
1939 }
1940 PMAP_PVO_CHECK(pvo); /* sanity check */
1941 }
1942
1943 pmap_interrupts_restore(msr);
1944 splx(s);
1945 }
1946
1947 /*
1948 * Lower the protection on the specified physical page.
1949 *
1950 * There are only two cases: either the protection is going to 0,
1951 * or it is going to read-only.
1952 */
1953 void
1954 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1955 {
1956 struct pvo_head *pvo_head;
1957 struct pvo_entry *pvo, *next_pvo;
1958 volatile struct pte *pt;
1959 register_t msr;
1960 int s;
1961
1962 /*
1963 * Since this routine only downgrades protection, if the
1964 * maximal protection is desired, there isn't any change
1965 * to be made.
1966 */
1967 KASSERT((prot & VM_PROT_WRITE) == 0);
1968 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
1969 return;
1970
1971 s = splvm();
1972 msr = pmap_interrupts_off();
1973
1974 /*
1975 * When UVM reuses a page, it does a pmap_page_protect with
1976 * VM_PROT_NONE. At that point, we can clear the exec flag
1977 * since we know the page will have different contents.
1978 */
1979 if ((prot & VM_PROT_READ) == 0) {
1980 DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
1981 pg->phys_addr));
1982 if (pmap_attr_fetch(pg) & PTE_EXEC) {
1983 PMAPCOUNT(exec_uncached_page_protect);
1984 pmap_attr_clear(pg, PTE_EXEC);
1985 }
1986 }
1987
1988 pvo_head = vm_page_to_pvoh(pg);
1989 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1990 next_pvo = LIST_NEXT(pvo, pvo_vlink);
1991 PMAP_PVO_CHECK(pvo); /* sanity check */
1992
1993 /*
1994 * Downgrading to no mapping at all, we just remove the entry.
1995 */
1996 if ((prot & VM_PROT_READ) == 0) {
1997 pmap_pvo_remove(pvo, -1);
1998 continue;
1999 }
2000
2001 /*
2002 * If EXEC permission is being revoked, just clear the
2003 * flag in the PVO.
2004 */
2005 if ((prot & VM_PROT_EXECUTE) == 0)
2006 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
2007
2008 /*
2009 * If this entry is already RO, don't diddle with the
2010 * page table.
2011 */
2012 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2013 PMAP_PVO_CHECK(pvo);
2014 continue;
2015 }
2016
2017 /*
2018 * Grab the PTE before the we diddle the bits so
2019 * pvo_to_pte can verify the pte contents are as
2020 * expected.
2021 */
2022 pt = pmap_pvo_to_pte(pvo, -1);
2023 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2024 pvo->pvo_pte.pte_lo |= PTE_BR;
2025 if (pt != NULL) {
2026 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2027 PMAPCOUNT(ptes_changed);
2028 }
2029 PMAP_PVO_CHECK(pvo); /* sanity check */
2030 }
2031
2032 pmap_interrupts_restore(msr);
2033 splx(s);
2034 }
2035
2036 /*
2037 * Activate the address space for the specified process. If the process
2038 * is the current process, load the new MMU context.
2039 */
2040 void
2041 pmap_activate(struct lwp *l)
2042 {
2043 struct pcb *pcb = &l->l_addr->u_pcb;
2044 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2045
2046 DPRINTFN(ACTIVATE,
2047 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
2048
2049 /*
2050 * XXX Normally performed in cpu_fork().
2051 */
2052 if (pcb->pcb_pm != pmap) {
2053 pcb->pcb_pm = pmap;
2054 pcb->pcb_pmreal = pmap;
2055 }
2056
2057 /*
2058 * In theory, the SR registers need only be valid on return
2059 * to user space wait to do them there.
2060 */
2061 if (l == curlwp) {
2062 /* Store pointer to new current pmap. */
2063 curpm = pmap;
2064 }
2065 }
2066
2067 /*
2068 * Deactivate the specified process's address space.
2069 */
2070 void
2071 pmap_deactivate(struct lwp *l)
2072 {
2073 }
2074
2075 boolean_t
2076 pmap_query_bit(struct vm_page *pg, int ptebit)
2077 {
2078 struct pvo_entry *pvo;
2079 volatile struct pte *pt;
2080 register_t msr;
2081 int s;
2082
2083 if (pmap_attr_fetch(pg) & ptebit)
2084 return TRUE;
2085 s = splvm();
2086 msr = pmap_interrupts_off();
2087 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2088 PMAP_PVO_CHECK(pvo); /* sanity check */
2089 /*
2090 * See if we saved the bit off. If so cache, it and return
2091 * success.
2092 */
2093 if (pvo->pvo_pte.pte_lo & ptebit) {
2094 pmap_attr_save(pg, ptebit);
2095 PMAP_PVO_CHECK(pvo); /* sanity check */
2096 pmap_interrupts_restore(msr);
2097 splx(s);
2098 return TRUE;
2099 }
2100 }
2101 /*
2102 * No luck, now go thru the hard part of looking at the ptes
2103 * themselves. Sync so any pending REF/CHG bits are flushed
2104 * to the PTEs.
2105 */
2106 SYNC();
2107 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2108 PMAP_PVO_CHECK(pvo); /* sanity check */
2109 /*
2110 * See if this pvo have a valid PTE. If so, fetch the
2111 * REF/CHG bits from the valid PTE. If the appropriate
2112 * ptebit is set, cache, it and return success.
2113 */
2114 pt = pmap_pvo_to_pte(pvo, -1);
2115 if (pt != NULL) {
2116 pmap_pte_synch(pt, &pvo->pvo_pte);
2117 if (pvo->pvo_pte.pte_lo & ptebit) {
2118 pmap_attr_save(pg, ptebit);
2119 PMAP_PVO_CHECK(pvo); /* sanity check */
2120 pmap_interrupts_restore(msr);
2121 splx(s);
2122 return TRUE;
2123 }
2124 }
2125 }
2126 pmap_interrupts_restore(msr);
2127 splx(s);
2128 return FALSE;
2129 }
2130
2131 boolean_t
2132 pmap_clear_bit(struct vm_page *pg, int ptebit)
2133 {
2134 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2135 struct pvo_entry *pvo;
2136 volatile struct pte *pt;
2137 register_t msr;
2138 int rv = 0;
2139 int s;
2140
2141 s = splvm();
2142 msr = pmap_interrupts_off();
2143
2144 /*
2145 * Fetch the cache value
2146 */
2147 rv |= pmap_attr_fetch(pg);
2148
2149 /*
2150 * Clear the cached value.
2151 */
2152 pmap_attr_clear(pg, ptebit);
2153
2154 /*
2155 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2156 * can reset the right ones). Note that since the pvo entries and
2157 * list heads are accessed via BAT0 and are never placed in the
2158 * page table, we don't have to worry about further accesses setting
2159 * the REF/CHG bits.
2160 */
2161 SYNC();
2162
2163 /*
2164 * For each pvo entry, clear pvo's ptebit. If this pvo have a
2165 * valid PTE. If so, clear the ptebit from the valid PTE.
2166 */
2167 LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2168 PMAP_PVO_CHECK(pvo); /* sanity check */
2169 pt = pmap_pvo_to_pte(pvo, -1);
2170 if (pt != NULL) {
2171 /*
2172 * Only sync the PTE if the bit we are looking
2173 * for is not already set.
2174 */
2175 if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2176 pmap_pte_synch(pt, &pvo->pvo_pte);
2177 /*
2178 * If the bit we are looking for was already set,
2179 * clear that bit in the pte.
2180 */
2181 if (pvo->pvo_pte.pte_lo & ptebit)
2182 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2183 }
2184 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2185 pvo->pvo_pte.pte_lo &= ~ptebit;
2186 PMAP_PVO_CHECK(pvo); /* sanity check */
2187 }
2188 pmap_interrupts_restore(msr);
2189 splx(s);
2190 /*
2191 * If we are clearing the modify bit and this page was marked EXEC
2192 * and the user of the page thinks the page was modified, then we
2193 * need to clean it from the icache if it's mapped or clear the EXEC
2194 * bit if it's not mapped. The page itself might not have the CHG
2195 * bit set if the modification was done via DMA to the page.
2196 */
2197 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2198 if (LIST_EMPTY(pvoh)) {
2199 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
2200 pg->phys_addr));
2201 pmap_attr_clear(pg, PTE_EXEC);
2202 PMAPCOUNT(exec_uncached_clear_modify);
2203 } else {
2204 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
2205 pg->phys_addr));
2206 pmap_syncicache(pg->phys_addr, PAGE_SIZE);
2207 PMAPCOUNT(exec_synced_clear_modify);
2208 }
2209 }
2210 return (rv & ptebit) != 0;
2211 }
2212
2213 void
2214 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2215 {
2216 struct pvo_entry *pvo;
2217 size_t offset = va & ADDR_POFF;
2218 int s;
2219
2220 s = splvm();
2221 while (len > 0) {
2222 size_t seglen = PAGE_SIZE - offset;
2223 if (seglen > len)
2224 seglen = len;
2225 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2226 if (pvo != NULL && PVO_ISEXECUTABLE(pvo)) {
2227 pmap_syncicache(
2228 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2229 PMAP_PVO_CHECK(pvo);
2230 }
2231 va += seglen;
2232 len -= seglen;
2233 offset = 0;
2234 }
2235 splx(s);
2236 }
2237
2238 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2239 void
2240 pmap_pte_print(volatile struct pte *pt)
2241 {
2242 printf("PTE %p: ", pt);
2243 /* High word: */
2244 printf("0x%08lx: [", pt->pte_hi);
2245 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2246 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2247 printf("0x%06lx 0x%02lx",
2248 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2249 pt->pte_hi & PTE_API);
2250 printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
2251 /* Low word: */
2252 printf(" 0x%08lx: [", pt->pte_lo);
2253 printf("0x%05lx... ", pt->pte_lo >> 12);
2254 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2255 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2256 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2257 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2258 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2259 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2260 switch (pt->pte_lo & PTE_PP) {
2261 case PTE_BR: printf("br]\n"); break;
2262 case PTE_BW: printf("bw]\n"); break;
2263 case PTE_SO: printf("so]\n"); break;
2264 case PTE_SW: printf("sw]\n"); break;
2265 }
2266 }
2267 #endif
2268
2269 #if defined(DDB)
2270 void
2271 pmap_pteg_check(void)
2272 {
2273 volatile struct pte *pt;
2274 int i;
2275 int ptegidx;
2276 u_int p_valid = 0;
2277 u_int s_valid = 0;
2278 u_int invalid = 0;
2279
2280 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2281 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2282 if (pt->pte_hi & PTE_VALID) {
2283 if (pt->pte_hi & PTE_HID)
2284 s_valid++;
2285 else
2286 p_valid++;
2287 } else
2288 invalid++;
2289 }
2290 }
2291 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2292 p_valid, p_valid, s_valid, s_valid,
2293 invalid, invalid);
2294 }
2295
2296 void
2297 pmap_print_mmuregs(void)
2298 {
2299 int i;
2300 u_int cpuvers;
2301 vaddr_t addr;
2302 register_t soft_sr[16];
2303 struct bat soft_ibat[4];
2304 struct bat soft_dbat[4];
2305 register_t sdr1;
2306
2307 cpuvers = MFPVR() >> 16;
2308
2309 __asm __volatile ("mfsdr1 %0" : "=r"(sdr1));
2310 for (i=0; i<16; i++) {
2311 soft_sr[i] = MFSRIN(addr);
2312 addr += (1 << ADDR_SR_SHFT);
2313 }
2314
2315 /* read iBAT (601: uBAT) registers */
2316 __asm __volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2317 __asm __volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2318 __asm __volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2319 __asm __volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2320 __asm __volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2321 __asm __volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2322 __asm __volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2323 __asm __volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2324
2325
2326 if (cpuvers != MPC601) {
2327 /* read dBAT registers */
2328 __asm __volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2329 __asm __volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2330 __asm __volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2331 __asm __volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2332 __asm __volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2333 __asm __volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2334 __asm __volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2335 __asm __volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2336 }
2337
2338 printf("SDR1:\t%#lx\n", sdr1);
2339 printf("SR[]:\t");
2340 addr = 0;
2341 for (i=0; i<4; i++)
2342 printf("0x%08lx, ", soft_sr[i]);
2343 printf("\n\t");
2344 for ( ; i<8; i++)
2345 printf("0x%08lx, ", soft_sr[i]);
2346 printf("\n\t");
2347 for ( ; i<12; i++)
2348 printf("0x%08lx, ", soft_sr[i]);
2349 printf("\n\t");
2350 for ( ; i<16; i++)
2351 printf("0x%08lx, ", soft_sr[i]);
2352 printf("\n");
2353
2354 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2355 for (i=0; i<4; i++) {
2356 printf("0x%08lx 0x%08lx, ",
2357 soft_ibat[i].batu, soft_ibat[i].batl);
2358 if (i == 1)
2359 printf("\n\t");
2360 }
2361 if (cpuvers != MPC601) {
2362 printf("\ndBAT[]:\t");
2363 for (i=0; i<4; i++) {
2364 printf("0x%08lx 0x%08lx, ",
2365 soft_dbat[i].batu, soft_dbat[i].batl);
2366 if (i == 1)
2367 printf("\n\t");
2368 }
2369 }
2370 printf("\n");
2371 }
2372
2373 void
2374 pmap_print_pte(pmap_t pm, vaddr_t va)
2375 {
2376 struct pvo_entry *pvo;
2377 volatile struct pte *pt;
2378 int pteidx;
2379
2380 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2381 if (pvo != NULL) {
2382 pt = pmap_pvo_to_pte(pvo, pteidx);
2383 if (pt != NULL) {
2384 printf("VA %#lx -> %p -> %s %#lx, %#lx\n",
2385 va, pt,
2386 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2387 pt->pte_hi, pt->pte_lo);
2388 } else {
2389 printf("No valid PTE found\n");
2390 }
2391 } else {
2392 printf("Address not in pmap\n");
2393 }
2394 }
2395
2396 void
2397 pmap_pteg_dist(void)
2398 {
2399 struct pvo_entry *pvo;
2400 int ptegidx;
2401 int depth;
2402 int max_depth = 0;
2403 unsigned int depths[64];
2404
2405 memset(depths, 0, sizeof(depths));
2406 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2407 depth = 0;
2408 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2409 depth++;
2410 }
2411 if (depth > max_depth)
2412 max_depth = depth;
2413 if (depth > 63)
2414 depth = 63;
2415 depths[depth]++;
2416 }
2417
2418 for (depth = 0; depth < 64; depth++) {
2419 printf(" [%2d]: %8u", depth, depths[depth]);
2420 if ((depth & 3) == 3)
2421 printf("\n");
2422 if (depth == max_depth)
2423 break;
2424 }
2425 if ((depth & 3) != 3)
2426 printf("\n");
2427 printf("Max depth found was %d\n", max_depth);
2428 }
2429 #endif /* DEBUG */
2430
2431 #if defined(PMAPCHECK) || defined(DEBUG)
2432 void
2433 pmap_pvo_verify(void)
2434 {
2435 int ptegidx;
2436 int s;
2437
2438 s = splvm();
2439 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2440 struct pvo_entry *pvo;
2441 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2442 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2443 panic("pmap_pvo_verify: invalid pvo %p "
2444 "on list %#x", pvo, ptegidx);
2445 pmap_pvo_check(pvo);
2446 }
2447 }
2448 splx(s);
2449 }
2450 #endif /* PMAPCHECK */
2451
2452
2453 void *
2454 pmap_pool_ualloc(struct pool *pp, int flags)
2455 {
2456 struct pvo_page *pvop;
2457
2458 pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2459 if (pvop != NULL) {
2460 pmap_upvop_free--;
2461 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2462 return pvop;
2463 }
2464 if (uvm.page_init_done != TRUE) {
2465 return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2466 }
2467 return pmap_pool_malloc(pp, flags);
2468 }
2469
2470 void *
2471 pmap_pool_malloc(struct pool *pp, int flags)
2472 {
2473 struct pvo_page *pvop;
2474 struct vm_page *pg;
2475
2476 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2477 if (pvop != NULL) {
2478 pmap_mpvop_free--;
2479 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2480 return pvop;
2481 }
2482 again:
2483 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2484 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2485 if (__predict_false(pg == NULL)) {
2486 if (flags & PR_WAITOK) {
2487 uvm_wait("plpg");
2488 goto again;
2489 } else {
2490 return (0);
2491 }
2492 }
2493 return (void *) VM_PAGE_TO_PHYS(pg);
2494 }
2495
2496 void
2497 pmap_pool_ufree(struct pool *pp, void *va)
2498 {
2499 struct pvo_page *pvop;
2500 #if 0
2501 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2502 pmap_pool_mfree(va, size, tag);
2503 return;
2504 }
2505 #endif
2506 pvop = va;
2507 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2508 pmap_upvop_free++;
2509 if (pmap_upvop_free > pmap_upvop_maxfree)
2510 pmap_upvop_maxfree = pmap_upvop_free;
2511 }
2512
2513 void
2514 pmap_pool_mfree(struct pool *pp, void *va)
2515 {
2516 struct pvo_page *pvop;
2517
2518 pvop = va;
2519 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2520 pmap_mpvop_free++;
2521 if (pmap_mpvop_free > pmap_mpvop_maxfree)
2522 pmap_mpvop_maxfree = pmap_mpvop_free;
2523 #if 0
2524 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2525 #endif
2526 }
2527
2528 /*
2529 * This routine in bootstraping to steal to-be-managed memory (which will
2530 * then be unmanaged). We use it to grab from the first 256MB for our
2531 * pmap needs and above 256MB for other stuff.
2532 */
2533 vaddr_t
2534 pmap_steal_memory(vsize_t vsize)
2535 {
2536 vsize_t size;
2537 vaddr_t va;
2538 paddr_t pa = 0;
2539 int npgs, bank;
2540 struct vm_physseg *ps;
2541
2542 if (uvm.page_init_done == TRUE)
2543 panic("pmap_steal_memory: called _after_ bootstrap");
2544
2545 size = round_page(vsize);
2546 npgs = atop(size);
2547
2548 /*
2549 * PA 0 will never be among those given to UVM so we can use it
2550 * to indicate we couldn't steal any memory.
2551 */
2552 for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
2553 if (ps->free_list == VM_FREELIST_FIRST256 &&
2554 ps->avail_end - ps->avail_start >= npgs) {
2555 pa = ptoa(ps->avail_start);
2556 break;
2557 }
2558 }
2559
2560 if (pa == 0)
2561 panic("pmap_steal_memory: no approriate memory to steal!");
2562
2563 ps->avail_start += npgs;
2564 ps->start += npgs;
2565
2566 /*
2567 * If we've used up all the pages in the segment, remove it and
2568 * compact the list.
2569 */
2570 if (ps->avail_start == ps->end) {
2571 /*
2572 * If this was the last one, then a very bad thing has occurred
2573 */
2574 if (--vm_nphysseg == 0)
2575 panic("pmap_steal_memory: out of memory!");
2576
2577 printf("pmap_steal_memory: consumed bank %d\n", bank);
2578 for (; bank < vm_nphysseg; bank++, ps++) {
2579 ps[0] = ps[1];
2580 }
2581 }
2582
2583 va = (vaddr_t) pa;
2584 memset((caddr_t) va, 0, size);
2585 pmap_pages_stolen += npgs;
2586 #ifdef DEBUG
2587 if (pmapdebug && npgs > 1) {
2588 u_int cnt = 0;
2589 for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
2590 cnt += ps->avail_end - ps->avail_start;
2591 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2592 npgs, pmap_pages_stolen, cnt);
2593 }
2594 #endif
2595
2596 return va;
2597 }
2598
2599 /*
2600 * Find a chuck of memory with right size and alignment.
2601 */
2602 void *
2603 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2604 {
2605 struct mem_region *mp;
2606 paddr_t s, e;
2607 int i, j;
2608
2609 size = round_page(size);
2610
2611 DPRINTFN(BOOT,
2612 ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
2613 size, alignment, at_end));
2614
2615 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
2616 panic("pmap_boot_find_memory: invalid alignment %lx",
2617 alignment);
2618
2619 if (at_end) {
2620 if (alignment != PAGE_SIZE)
2621 panic("pmap_boot_find_memory: invalid ending "
2622 "alignment %lx", alignment);
2623
2624 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
2625 s = mp->start + mp->size - size;
2626 if (s >= mp->start && mp->size >= size) {
2627 DPRINTFN(BOOT,(": %lx\n", s));
2628 DPRINTFN(BOOT,
2629 ("pmap_boot_find_memory: b-avail[%d] start "
2630 "0x%lx size 0x%lx\n", mp - avail,
2631 mp->start, mp->size));
2632 mp->size -= size;
2633 DPRINTFN(BOOT,
2634 ("pmap_boot_find_memory: a-avail[%d] start "
2635 "0x%lx size 0x%lx\n", mp - avail,
2636 mp->start, mp->size));
2637 return (void *) s;
2638 }
2639 }
2640 panic("pmap_boot_find_memory: no available memory");
2641 }
2642
2643 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2644 s = (mp->start + alignment - 1) & ~(alignment-1);
2645 e = s + size;
2646
2647 /*
2648 * Is the calculated region entirely within the region?
2649 */
2650 if (s < mp->start || e > mp->start + mp->size)
2651 continue;
2652
2653 DPRINTFN(BOOT,(": %lx\n", s));
2654 if (s == mp->start) {
2655 /*
2656 * If the block starts at the beginning of region,
2657 * adjust the size & start. (the region may now be
2658 * zero in length)
2659 */
2660 DPRINTFN(BOOT,
2661 ("pmap_boot_find_memory: b-avail[%d] start "
2662 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2663 mp->start += size;
2664 mp->size -= size;
2665 DPRINTFN(BOOT,
2666 ("pmap_boot_find_memory: a-avail[%d] start "
2667 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2668 } else if (e == mp->start + mp->size) {
2669 /*
2670 * If the block starts at the beginning of region,
2671 * adjust only the size.
2672 */
2673 DPRINTFN(BOOT,
2674 ("pmap_boot_find_memory: b-avail[%d] start "
2675 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2676 mp->size -= size;
2677 DPRINTFN(BOOT,
2678 ("pmap_boot_find_memory: a-avail[%d] start "
2679 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2680 } else {
2681 /*
2682 * Block is in the middle of the region, so we
2683 * have to split it in two.
2684 */
2685 for (j = avail_cnt; j > i + 1; j--) {
2686 avail[j] = avail[j-1];
2687 }
2688 DPRINTFN(BOOT,
2689 ("pmap_boot_find_memory: b-avail[%d] start "
2690 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2691 mp[1].start = e;
2692 mp[1].size = mp[0].start + mp[0].size - e;
2693 mp[0].size = s - mp[0].start;
2694 avail_cnt++;
2695 for (; i < avail_cnt; i++) {
2696 DPRINTFN(BOOT,
2697 ("pmap_boot_find_memory: a-avail[%d] "
2698 "start 0x%lx size 0x%lx\n", i,
2699 avail[i].start, avail[i].size));
2700 }
2701 }
2702 return (void *) s;
2703 }
2704 panic("pmap_boot_find_memory: not enough memory for "
2705 "%lx/%lx allocation?", size, alignment);
2706 }
2707
2708 /*
2709 * This is not part of the defined PMAP interface and is specific to the
2710 * PowerPC architecture. This is called during initppc, before the system
2711 * is really initialized.
2712 */
2713 void
2714 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
2715 {
2716 struct mem_region *mp, tmp;
2717 paddr_t s, e;
2718 psize_t size;
2719 int i, j;
2720
2721 /*
2722 * Define the boundaries of the managed kernel virtual address
2723 * space. For now, reserve one segment (minus some overhead)
2724 * for kernel virtual memory.
2725 */
2726 virtual_avail = VM_MIN_KERNEL_ADDRESS;
2727 virtual_end = VM_MAX_KERNEL_ADDRESS;
2728
2729 /*
2730 * Get memory.
2731 */
2732 mem_regions(&mem, &avail);
2733 #if defined(DEBUG)
2734 if (pmapdebug & PMAPDEBUG_BOOT) {
2735 printf("pmap_bootstrap: memory configuration:\n");
2736 for (mp = mem; mp->size; mp++) {
2737 printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n",
2738 mp->start, mp->size);
2739 }
2740 for (mp = avail; mp->size; mp++) {
2741 printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n",
2742 mp->start, mp->size);
2743 }
2744 }
2745 #endif
2746
2747 /*
2748 * Find out how much physical memory we have and in how many chunks.
2749 */
2750 for (mem_cnt = 0, mp = mem; mp->size; mp++) {
2751 if (mp->start >= pmap_memlimit)
2752 continue;
2753 if (mp->start + mp->size > pmap_memlimit) {
2754 size = pmap_memlimit - mp->start;
2755 physmem += btoc(size);
2756 } else {
2757 physmem += btoc(mp->size);
2758 }
2759 mem_cnt++;
2760 }
2761
2762 /*
2763 * Count the number of available entries.
2764 */
2765 for (avail_cnt = 0, mp = avail; mp->size; mp++)
2766 avail_cnt++;
2767
2768 /*
2769 * Page align all regions.
2770 */
2771 kernelstart = trunc_page(kernelstart);
2772 kernelend = round_page(kernelend);
2773 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2774 s = round_page(mp->start);
2775 mp->size -= (s - mp->start);
2776 mp->size = trunc_page(mp->size);
2777 mp->start = s;
2778 e = mp->start + mp->size;
2779
2780 DPRINTFN(BOOT,
2781 ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
2782 i, mp->start, mp->size));
2783
2784 /*
2785 * Don't allow the end to run beyond our artificial limit
2786 */
2787 if (e > pmap_memlimit)
2788 e = pmap_memlimit;
2789
2790 /*
2791 * Is this region empty or strange? skip it.
2792 */
2793 if (e <= s) {
2794 mp->start = 0;
2795 mp->size = 0;
2796 continue;
2797 }
2798
2799 /*
2800 * Does this overlap the beginning of kernel?
2801 * Does extend past the end of the kernel?
2802 */
2803 else if (s < kernelstart && e > kernelstart) {
2804 if (e > kernelend) {
2805 avail[avail_cnt].start = kernelend;
2806 avail[avail_cnt].size = e - kernelend;
2807 avail_cnt++;
2808 }
2809 mp->size = kernelstart - s;
2810 }
2811 /*
2812 * Check whether this region overlaps the end of the kernel.
2813 */
2814 else if (s < kernelend && e > kernelend) {
2815 mp->start = kernelend;
2816 mp->size = e - kernelend;
2817 }
2818 /*
2819 * Look whether this regions is completely inside the kernel.
2820 * Nuke it if it does.
2821 */
2822 else if (s >= kernelstart && e <= kernelend) {
2823 mp->start = 0;
2824 mp->size = 0;
2825 }
2826 /*
2827 * If the user imposed a memory limit, enforce it.
2828 */
2829 else if (s >= pmap_memlimit) {
2830 mp->start = -PAGE_SIZE; /* let's know why */
2831 mp->size = 0;
2832 }
2833 else {
2834 mp->start = s;
2835 mp->size = e - s;
2836 }
2837 DPRINTFN(BOOT,
2838 ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n",
2839 i, mp->start, mp->size));
2840 }
2841
2842 /*
2843 * Move (and uncount) all the null return to the end.
2844 */
2845 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2846 if (mp->size == 0) {
2847 tmp = avail[i];
2848 avail[i] = avail[--avail_cnt];
2849 avail[avail_cnt] = avail[i];
2850 }
2851 }
2852
2853 /*
2854 * (Bubble)sort them into asecnding order.
2855 */
2856 for (i = 0; i < avail_cnt; i++) {
2857 for (j = i + 1; j < avail_cnt; j++) {
2858 if (avail[i].start > avail[j].start) {
2859 tmp = avail[i];
2860 avail[i] = avail[j];
2861 avail[j] = tmp;
2862 }
2863 }
2864 }
2865
2866 /*
2867 * Make sure they don't overlap.
2868 */
2869 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
2870 if (mp[0].start + mp[0].size > mp[1].start) {
2871 mp[0].size = mp[1].start - mp[0].start;
2872 }
2873 DPRINTFN(BOOT,
2874 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
2875 i, mp->start, mp->size));
2876 }
2877 DPRINTFN(BOOT,
2878 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
2879 i, mp->start, mp->size));
2880
2881 #ifdef PTEGCOUNT
2882 pmap_pteg_cnt = PTEGCOUNT;
2883 #else /* PTEGCOUNT */
2884 pmap_pteg_cnt = 0x1000;
2885
2886 while (pmap_pteg_cnt < physmem)
2887 pmap_pteg_cnt <<= 1;
2888
2889 pmap_pteg_cnt >>= 1;
2890 #endif /* PTEGCOUNT */
2891
2892 /*
2893 * Find suitably aligned memory for PTEG hash table.
2894 */
2895 size = pmap_pteg_cnt * sizeof(struct pteg);
2896 pmap_pteg_table = pmap_boot_find_memory(size, size, 0);
2897 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2898 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
2899 panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB",
2900 pmap_pteg_table, size);
2901 #endif
2902
2903 memset((void *)pmap_pteg_table, 0, pmap_pteg_cnt * sizeof(struct pteg));
2904 pmap_pteg_mask = pmap_pteg_cnt - 1;
2905
2906 /*
2907 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
2908 * with pages. So we just steal them before giving them to UVM.
2909 */
2910 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
2911 pmap_pvo_table = pmap_boot_find_memory(size, PAGE_SIZE, 0);
2912 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2913 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
2914 panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB",
2915 pmap_pvo_table, size);
2916 #endif
2917
2918 for (i = 0; i < pmap_pteg_cnt; i++)
2919 TAILQ_INIT(&pmap_pvo_table[i]);
2920
2921 #ifndef MSGBUFADDR
2922 /*
2923 * Allocate msgbuf in high memory.
2924 */
2925 msgbuf_paddr =
2926 (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
2927 #endif
2928
2929 #ifdef __HAVE_PMAP_PHYSSEG
2930 {
2931 u_int npgs = 0;
2932 for (i = 0, mp = avail; i < avail_cnt; i++, mp++)
2933 npgs += btoc(mp->size);
2934 size = (sizeof(struct pvo_head) + 1) * npgs;
2935 pmap_physseg.pvoh = pmap_boot_find_memory(size, PAGE_SIZE, 0);
2936 pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs];
2937 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2938 if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH)
2939 panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB",
2940 pmap_physseg.pvoh, size);
2941 #endif
2942 }
2943 #endif
2944
2945 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
2946 paddr_t pfstart = atop(mp->start);
2947 paddr_t pfend = atop(mp->start + mp->size);
2948 if (mp->size == 0)
2949 continue;
2950 if (mp->start + mp->size <= SEGMENT_LENGTH) {
2951 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2952 VM_FREELIST_FIRST256);
2953 } else if (mp->start >= SEGMENT_LENGTH) {
2954 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2955 VM_FREELIST_DEFAULT);
2956 } else {
2957 pfend = atop(SEGMENT_LENGTH);
2958 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2959 VM_FREELIST_FIRST256);
2960 pfstart = atop(SEGMENT_LENGTH);
2961 pfend = atop(mp->start + mp->size);
2962 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2963 VM_FREELIST_DEFAULT);
2964 }
2965 }
2966
2967 /*
2968 * Make sure kernel vsid is allocated as well as VSID 0.
2969 */
2970 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
2971 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
2972 pmap_vsid_bitmap[0] |= 1;
2973
2974 /*
2975 * Initialize kernel pmap and hardware.
2976 */
2977 for (i = 0; i < 16; i++) {
2978 pmap_kernel()->pm_sr[i] = EMPTY_SEGMENT;
2979 __asm __volatile ("mtsrin %0,%1"
2980 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
2981 }
2982
2983 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
2984 __asm __volatile ("mtsr %0,%1"
2985 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
2986 #ifdef KERNEL2_SR
2987 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
2988 __asm __volatile ("mtsr %0,%1"
2989 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
2990 #endif
2991 for (i = 0; i < 16; i++) {
2992 if (iosrtable[i] & SR601_T) {
2993 pmap_kernel()->pm_sr[i] = iosrtable[i];
2994 __asm __volatile ("mtsrin %0,%1"
2995 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
2996 }
2997 }
2998
2999 __asm __volatile ("sync; mtsdr1 %0; isync"
3000 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
3001 tlbia();
3002
3003 #ifdef ALTIVEC
3004 pmap_use_altivec = cpu_altivec;
3005 #endif
3006
3007 #ifdef DEBUG
3008 if (pmapdebug & PMAPDEBUG_BOOT) {
3009 u_int cnt;
3010 int bank;
3011 char pbuf[9];
3012 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
3013 cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
3014 printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n",
3015 bank,
3016 ptoa(vm_physmem[bank].avail_start),
3017 ptoa(vm_physmem[bank].avail_end),
3018 ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
3019 }
3020 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3021 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3022 pbuf, cnt);
3023 }
3024 #endif
3025
3026 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3027 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3028 &pmap_pool_uallocator);
3029
3030 pool_setlowat(&pmap_upvo_pool, 252);
3031
3032 pool_init(&pmap_pool, sizeof(struct pmap),
3033 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator);
3034 }
3035