pmap.c revision 1.3 1 /* $NetBSD: pmap.c,v 1.3 2003/03/14 06:25:58 matt Exp $ */
2 /*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_pmap.h"
71 #include <sys/param.h>
72 #include <sys/malloc.h>
73 #include <sys/proc.h>
74 #include <sys/user.h>
75 #include <sys/pool.h>
76 #include <sys/queue.h>
77 #include <sys/device.h> /* for evcnt */
78 #include <sys/systm.h>
79
80 #if __NetBSD_Version__ < 105010000
81 #include <vm/vm.h>
82 #include <vm/vm_kern.h>
83 #define splvm() splimp()
84 #endif
85
86 #include <uvm/uvm.h>
87
88 #include <machine/pcb.h>
89 #include <machine/powerpc.h>
90 #include <powerpc/spr.h>
91 #include <powerpc/oea/sr_601.h>
92 #if __NetBSD_Version__ > 105010000
93 #include <powerpc/oea/bat.h>
94 #else
95 #include <powerpc/bat.h>
96 #endif
97
98 #if defined(DEBUG) || defined(PMAPCHECK)
99 #define STATIC
100 #else
101 #define STATIC static
102 #endif
103
104 #ifdef ALTIVEC
105 int pmap_use_altivec;
106 #endif
107
108 volatile struct pteg *pmap_pteg_table;
109 unsigned int pmap_pteg_cnt;
110 unsigned int pmap_pteg_mask;
111 paddr_t pmap_memlimit = -NBPG; /* there is no limit */
112
113 struct pmap kernel_pmap_;
114 unsigned int pmap_pages_stolen;
115 u_long pmap_pte_valid;
116 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
117 u_long pmap_pvo_enter_depth;
118 u_long pmap_pvo_remove_depth;
119 #endif
120
121 int physmem;
122 #ifndef MSGBUFADDR
123 extern paddr_t msgbuf_paddr;
124 #endif
125
126 static struct mem_region *mem, *avail;
127 static u_int mem_cnt, avail_cnt;
128
129 #ifdef __HAVE_PMAP_PHYSSEG
130 /*
131 * This is a cache of referenced/modified bits.
132 * Bits herein are shifted by ATTRSHFT.
133 */
134 #define ATTR_SHFT 4
135 struct pmap_physseg pmap_physseg;
136 #endif
137
138 /*
139 * The following structure is exactly 32 bytes long (one cacheline).
140 */
141 struct pvo_entry {
142 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
143 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
144 struct pte pvo_pte; /* Prebuilt PTE */
145 pmap_t pvo_pmap; /* ptr to owning pmap */
146 vaddr_t pvo_vaddr; /* VA of entry */
147 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
148 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
149 #define PVO_WIRED 0x0010 /* PVO entry is wired */
150 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
151 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
152 };
153 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
154 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
155 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
156 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
157 #define PVO_PTEGIDX_CLR(pvo) \
158 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
159 #define PVO_PTEGIDX_SET(pvo,i) \
160 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
161
162 TAILQ_HEAD(pvo_tqhead, pvo_entry);
163 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
164 struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
165 struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
166
167 struct pool pmap_pool; /* pool for pmap structures */
168 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
169 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
170
171 /*
172 * We keep a cache of unmanaged pages to be used for pvo entries for
173 * unmanaged pages.
174 */
175 struct pvo_page {
176 SIMPLEQ_ENTRY(pvo_page) pvop_link;
177 };
178 SIMPLEQ_HEAD(pvop_head, pvo_page);
179 struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
180 struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
181 u_long pmap_upvop_free;
182 u_long pmap_upvop_maxfree;
183 u_long pmap_mpvop_free;
184 u_long pmap_mpvop_maxfree;
185
186 STATIC void *pmap_pool_ualloc(struct pool *, int);
187 STATIC void *pmap_pool_malloc(struct pool *, int);
188
189 STATIC void pmap_pool_ufree(struct pool *, void *);
190 STATIC void pmap_pool_mfree(struct pool *, void *);
191
192 static struct pool_allocator pmap_pool_mallocator = {
193 pmap_pool_malloc, pmap_pool_mfree, 0,
194 };
195
196 static struct pool_allocator pmap_pool_uallocator = {
197 pmap_pool_ualloc, pmap_pool_ufree, 0,
198 };
199
200 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
201 void pmap_pte_print(volatile struct pte *);
202 #endif
203
204 #ifdef DDB
205 void pmap_pteg_check(void);
206 void pmap_pteg_dist(void);
207 void pmap_print_pte(pmap_t, vaddr_t);
208 void pmap_print_mmuregs(void);
209 #endif
210
211 #if defined(DEBUG) || defined(PMAPCHECK)
212 #ifdef PMAPCHECK
213 int pmapcheck = 1;
214 #else
215 int pmapcheck = 0;
216 #endif
217 void pmap_pvo_verify(void);
218 STATIC void pmap_pvo_check(const struct pvo_entry *);
219 #define PMAP_PVO_CHECK(pvo) \
220 do { \
221 if (pmapcheck) \
222 pmap_pvo_check(pvo); \
223 } while (0)
224 #else
225 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
226 #endif
227 STATIC int pmap_pte_insert(int, struct pte *);
228 STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
229 vaddr_t, paddr_t, register_t, int);
230 STATIC void pmap_pvo_remove(struct pvo_entry *, int);
231 STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
232 STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
233
234 STATIC void tlbia(void);
235
236 STATIC void pmap_release(pmap_t);
237 STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
238
239 #define VSID_NBPW (sizeof(uint32_t) * 8)
240 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
241
242 static int pmap_initialized;
243
244 #if defined(DEBUG) || defined(PMAPDEBUG)
245 #define PMAPDEBUG_BOOT 0x0001
246 #define PMAPDEBUG_PTE 0x0002
247 #define PMAPDEBUG_EXEC 0x0008
248 #define PMAPDEBUG_PVOENTER 0x0010
249 #define PMAPDEBUG_PVOREMOVE 0x0020
250 #define PMAPDEBUG_ACTIVATE 0x0100
251 #define PMAPDEBUG_CREATE 0x0200
252 #define PMAPDEBUG_ENTER 0x1000
253 #define PMAPDEBUG_KENTER 0x2000
254 #define PMAPDEBUG_KREMOVE 0x4000
255 #define PMAPDEBUG_REMOVE 0x8000
256 unsigned int pmapdebug = 0;
257 # define DPRINTF(x) printf x
258 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
259 #else
260 # define DPRINTF(x)
261 # define DPRINTFN(n, x)
262 #endif
263
264
265 #ifdef PMAPCOUNTERS
266 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
267 #define PMAPCOUNT2(ev) ((ev).ev_count++)
268
269 struct evcnt pmap_evcnt_mappings =
270 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
271 "pmap", "pages mapped");
272 struct evcnt pmap_evcnt_unmappings =
273 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
274 "pmap", "pages unmapped");
275
276 struct evcnt pmap_evcnt_kernel_mappings =
277 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
278 "pmap", "kernel pages mapped");
279 struct evcnt pmap_evcnt_kernel_unmappings =
280 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
281 "pmap", "kernel pages unmapped");
282
283 struct evcnt pmap_evcnt_mappings_replaced =
284 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
285 "pmap", "page mappings replaced");
286
287 struct evcnt pmap_evcnt_exec_mappings =
288 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
289 "pmap", "exec pages mapped");
290 struct evcnt pmap_evcnt_exec_cached =
291 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
292 "pmap", "exec pages cached");
293
294 struct evcnt pmap_evcnt_exec_synced =
295 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
296 "pmap", "exec pages synced");
297 struct evcnt pmap_evcnt_exec_synced_clear_modify =
298 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
299 "pmap", "exec pages synced (CM)");
300
301 struct evcnt pmap_evcnt_exec_uncached_page_protect =
302 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
303 "pmap", "exec pages uncached (PP)");
304 struct evcnt pmap_evcnt_exec_uncached_clear_modify =
305 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
306 "pmap", "exec pages uncached (CM)");
307 struct evcnt pmap_evcnt_exec_uncached_zero_page =
308 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
309 "pmap", "exec pages uncached (ZP)");
310 struct evcnt pmap_evcnt_exec_uncached_copy_page =
311 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
312 "pmap", "exec pages uncached (CP)");
313
314 struct evcnt pmap_evcnt_updates =
315 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
316 "pmap", "updates");
317 struct evcnt pmap_evcnt_collects =
318 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
319 "pmap", "collects");
320 struct evcnt pmap_evcnt_copies =
321 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
322 "pmap", "copies");
323
324 struct evcnt pmap_evcnt_ptes_spilled =
325 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
326 "pmap", "ptes spilled from overflow");
327 struct evcnt pmap_evcnt_ptes_unspilled =
328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
329 "pmap", "ptes not spilled");
330 struct evcnt pmap_evcnt_ptes_evicted =
331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
332 "pmap", "ptes evicted");
333
334 struct evcnt pmap_evcnt_ptes_primary[8] = {
335 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
336 "pmap", "ptes added at primary[0]"),
337 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
338 "pmap", "ptes added at primary[1]"),
339 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
340 "pmap", "ptes added at primary[2]"),
341 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
342 "pmap", "ptes added at primary[3]"),
343
344 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
345 "pmap", "ptes added at primary[4]"),
346 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
347 "pmap", "ptes added at primary[5]"),
348 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
349 "pmap", "ptes added at primary[6]"),
350 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
351 "pmap", "ptes added at primary[7]"),
352 };
353 struct evcnt pmap_evcnt_ptes_secondary[8] = {
354 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
355 "pmap", "ptes added at secondary[0]"),
356 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
357 "pmap", "ptes added at secondary[1]"),
358 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
359 "pmap", "ptes added at secondary[2]"),
360 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
361 "pmap", "ptes added at secondary[3]"),
362
363 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
364 "pmap", "ptes added at secondary[4]"),
365 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
366 "pmap", "ptes added at secondary[5]"),
367 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
368 "pmap", "ptes added at secondary[6]"),
369 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
370 "pmap", "ptes added at secondary[7]"),
371 };
372 struct evcnt pmap_evcnt_ptes_removed =
373 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
374 "pmap", "ptes removed");
375 struct evcnt pmap_evcnt_ptes_changed =
376 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
377 "pmap", "ptes changed");
378
379 /*
380 * From pmap_subr.c
381 */
382 extern struct evcnt pmap_evcnt_zeroed_pages;
383 extern struct evcnt pmap_evcnt_copied_pages;
384 extern struct evcnt pmap_evcnt_idlezeroed_pages;
385 #else
386 #define PMAPCOUNT(ev) ((void) 0)
387 #define PMAPCOUNT2(ev) ((void) 0)
388 #endif
389
390 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va))
391 #define TLBSYNC() __asm __volatile("tlbsync")
392 #define SYNC() __asm __volatile("sync")
393 #define EIEIO() __asm __volatile("eieio")
394 #define MFMSR() mfmsr()
395 #define MTMSR(psl) mtmsr(psl)
396 #define MFPVR() mfpvr()
397 #define MFSRIN(va) mfsrin(va)
398 #define MFTB() mfrtcltbl()
399
400 static __inline register_t
401 mfsrin(vaddr_t va)
402 {
403 register_t sr;
404 __asm __volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
405 return sr;
406 }
407
408 static __inline register_t
409 pmap_interrupts_off(void)
410 {
411 register_t msr = MFMSR();
412 if (msr & PSL_EE)
413 MTMSR(msr & ~PSL_EE);
414 return msr;
415 }
416
417 static void
418 pmap_interrupts_restore(register_t msr)
419 {
420 if (msr & PSL_EE)
421 MTMSR(msr);
422 }
423
424 static __inline u_int32_t
425 mfrtcltbl(void)
426 {
427
428 if ((MFPVR() >> 16) == MPC601)
429 return (mfrtcl() >> 7);
430 else
431 return (mftbl());
432 }
433
434 /*
435 * These small routines may have to be replaced,
436 * if/when we support processors other that the 604.
437 */
438
439 void
440 tlbia(void)
441 {
442 caddr_t i;
443
444 SYNC();
445 /*
446 * Why not use "tlbia"? Because not all processors implement it.
447 *
448 * This needs to be a per-cpu callback to do the appropriate thing
449 * for the CPU. XXX
450 */
451 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
452 TLBIE(i);
453 EIEIO();
454 SYNC();
455 }
456 TLBSYNC();
457 SYNC();
458 }
459
460 static __inline register_t
461 va_to_vsid(const struct pmap *pm, vaddr_t addr)
462 {
463 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID);
464 }
465
466 static __inline register_t
467 va_to_pteg(const struct pmap *pm, vaddr_t addr)
468 {
469 register_t hash;
470
471 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
472 return hash & pmap_pteg_mask;
473 }
474
475 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
476 /*
477 * Given a PTE in the page table, calculate the VADDR that hashes to it.
478 * The only bit of magic is that the top 4 bits of the address doesn't
479 * technically exist in the PTE. But we know we reserved 4 bits of the
480 * VSID for it so that's how we get it.
481 */
482 static vaddr_t
483 pmap_pte_to_va(volatile const struct pte *pt)
484 {
485 vaddr_t va;
486 uintptr_t ptaddr = (uintptr_t) pt;
487
488 if (pt->pte_hi & PTE_HID)
489 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
490
491 /* PPC Bits 10-19 */
492 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr * sizeof(struct pteg))) & 0x3ff;
493 va <<= ADDR_PIDX_SHFT;
494
495 /* PPC Bits 4-9 */
496 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
497
498 /* PPC Bits 0-3 */
499 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
500
501 return va;
502 }
503 #endif
504
505 static __inline struct pvo_head *
506 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
507 {
508 #ifdef __HAVE_VM_PAGE_MD
509 struct vm_page *pg;
510
511 pg = PHYS_TO_VM_PAGE(pa);
512 if (pg_p != NULL)
513 *pg_p = pg;
514 if (pg == NULL)
515 return &pmap_pvo_unmanaged;
516 return &pg->mdpage.mdpg_pvoh;
517 #endif
518 #ifdef __HAVE_PMAP_PHYSSEG
519 int bank, pg;
520
521 bank = vm_physseg_find(atop(pa), &pg);
522 if (pg_p != NULL)
523 *pg_p = pg;
524 if (bank == -1)
525 return &pmap_pvo_unmanaged;
526 return &vm_physmem[bank].pmseg.pvoh[pg];
527 #endif
528 }
529
530 static __inline struct pvo_head *
531 vm_page_to_pvoh(struct vm_page *pg)
532 {
533 #ifdef __HAVE_VM_PAGE_MD
534 return &pg->mdpage.mdpg_pvoh;
535 #endif
536 #ifdef __HAVE_PMAP_PHYSSEG
537 return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
538 #endif
539 }
540
541
542 #ifdef __HAVE_PMAP_PHYSSEG
543 static __inline char *
544 pa_to_attr(paddr_t pa)
545 {
546 int bank, pg;
547
548 bank = vm_physseg_find(atop(pa), &pg);
549 if (bank == -1)
550 return NULL;
551 return &vm_physmem[bank].pmseg.attrs[pg];
552 }
553 #endif
554
555 static __inline void
556 pmap_attr_clear(struct vm_page *pg, int ptebit)
557 {
558 #ifdef __HAVE_PMAP_PHYSSEG
559 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT);
560 #endif
561 #ifdef __HAVE_VM_PAGE_MD
562 pg->mdpage.mdpg_attrs &= ~ptebit;
563 #endif
564 }
565
566 static __inline int
567 pmap_attr_fetch(struct vm_page *pg)
568 {
569 #ifdef __HAVE_PMAP_PHYSSEG
570 return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT;
571 #endif
572 #ifdef __HAVE_VM_PAGE_MD
573 return pg->mdpage.mdpg_attrs;
574 #endif
575 }
576
577 static __inline void
578 pmap_attr_save(struct vm_page *pg, int ptebit)
579 {
580 #ifdef __HAVE_PMAP_PHYSSEG
581 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT);
582 #endif
583 #ifdef __HAVE_VM_PAGE_MD
584 pg->mdpage.mdpg_attrs |= ptebit;
585 #endif
586 }
587
588 static __inline int
589 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
590 {
591 if (pt->pte_hi == pvo_pt->pte_hi
592 #if 0
593 && ((pt->pte_lo ^ pvo_pt->pte_lo) &
594 ~(PTE_REF|PTE_CHG)) == 0
595 #endif
596 )
597 return 1;
598 return 0;
599 }
600
601 static __inline void
602 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
603 {
604 /*
605 * Construct the PTE. Default to IMB initially. Valid bit
606 * only gets set when the real pte is set in memory.
607 *
608 * Note: Don't set the valid bit for correct operation of tlb update.
609 */
610 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
611 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
612 pt->pte_lo = pte_lo;
613 }
614
615 static __inline void
616 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
617 {
618 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
619 }
620
621 static __inline void
622 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
623 {
624 /*
625 * As shown in Section 7.6.3.2.3
626 */
627 pt->pte_lo &= ~ptebit;
628 TLBIE(va);
629 SYNC();
630 EIEIO();
631 TLBSYNC();
632 SYNC();
633 }
634
635 static __inline void
636 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
637 {
638 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
639 if (pvo_pt->pte_hi & PTE_VALID)
640 panic("pte_set: setting an already valid pte %p", pvo_pt);
641 #endif
642 pvo_pt->pte_hi |= PTE_VALID;
643 /*
644 * Update the PTE as defined in section 7.6.3.1
645 * Note that the REF/CHG bits are from pvo_pt and thus should
646 * have been saved so this routine can restore them (if desired).
647 */
648 pt->pte_lo = pvo_pt->pte_lo;
649 EIEIO();
650 pt->pte_hi = pvo_pt->pte_hi;
651 SYNC();
652 pmap_pte_valid++;
653 }
654
655 static __inline void
656 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
657 {
658 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
659 if ((pvo_pt->pte_hi & PTE_VALID) == 0)
660 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
661 if ((pt->pte_hi & PTE_VALID) == 0)
662 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
663 #endif
664
665 pvo_pt->pte_hi &= ~PTE_VALID;
666 /*
667 * Force the ref & chg bits back into the PTEs.
668 */
669 SYNC();
670 /*
671 * Invalidate the pte ... (Section 7.6.3.3)
672 */
673 pt->pte_hi &= ~PTE_VALID;
674 SYNC();
675 TLBIE(va);
676 SYNC();
677 EIEIO();
678 TLBSYNC();
679 SYNC();
680 /*
681 * Save the ref & chg bits ...
682 */
683 pmap_pte_synch(pt, pvo_pt);
684 pmap_pte_valid--;
685 }
686
687 static __inline void
688 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
689 {
690 /*
691 * Invalidate the PTE
692 */
693 pmap_pte_unset(pt, pvo_pt, va);
694 pmap_pte_set(pt, pvo_pt);
695 }
696
697 /*
698 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
699 * (either primary or secondary location).
700 *
701 * Note: both the destination and source PTEs must not have PTE_VALID set.
702 */
703
704 STATIC int
705 pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
706 {
707 volatile struct pte *pt;
708 int i;
709
710 #if defined(DEBUG)
711 DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%lx 0x%lx\n",
712 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
713 #endif
714 /*
715 * First try primary hash.
716 */
717 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
718 if ((pt->pte_hi & PTE_VALID) == 0) {
719 pvo_pt->pte_hi &= ~PTE_HID;
720 pmap_pte_set(pt, pvo_pt);
721 return i;
722 }
723 }
724
725 /*
726 * Now try secondary hash.
727 */
728 ptegidx ^= pmap_pteg_mask;
729 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
730 if ((pt->pte_hi & PTE_VALID) == 0) {
731 pvo_pt->pte_hi |= PTE_HID;
732 pmap_pte_set(pt, pvo_pt);
733 return i;
734 }
735 }
736 return -1;
737 }
738
739 /*
740 * Spill handler.
741 *
742 * Tries to spill a page table entry from the overflow area.
743 * This runs in either real mode (if dealing with a exception spill)
744 * or virtual mode when dealing with manually spilling one of the
745 * kernel's pte entries. In either case, interrupts are already
746 * disabled.
747 */
748 int
749 pmap_pte_spill(struct pmap *pm, vaddr_t addr)
750 {
751 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
752 struct pvo_entry *pvo;
753 struct pvo_tqhead *pvoh, *vpvoh;
754 int ptegidx, i, j;
755 volatile struct pteg *pteg;
756 volatile struct pte *pt;
757
758 ptegidx = va_to_pteg(pm, addr);
759
760 /*
761 * Have to substitute some entry. Use the primary hash for this.
762 *
763 * Use low bits of timebase as random generator
764 */
765 pteg = &pmap_pteg_table[ptegidx];
766 i = MFTB() & 7;
767 pt = &pteg->pt[i];
768
769 source_pvo = NULL;
770 victim_pvo = NULL;
771 pvoh = &pmap_pvo_table[ptegidx];
772 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
773
774 /*
775 * We need to find pvo entry for this address...
776 */
777 PMAP_PVO_CHECK(pvo); /* sanity check */
778
779 /*
780 * If we haven't found the source and we come to a PVO with
781 * a valid PTE, then we know we can't find it because all
782 * evicted PVOs always are first in the list.
783 */
784 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
785 break;
786 if (source_pvo == NULL && pm == pvo->pvo_pmap &&
787 addr == PVO_VADDR(pvo)) {
788
789 /*
790 * Now we have found the entry to be spilled into the
791 * pteg. Attempt to insert it into the page table.
792 */
793 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
794 if (j >= 0) {
795 PVO_PTEGIDX_SET(pvo, j);
796 PMAP_PVO_CHECK(pvo); /* sanity check */
797 pvo->pvo_pmap->pm_evictions--;
798 PMAPCOUNT(ptes_spilled);
799 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
800 ? pmap_evcnt_ptes_secondary
801 : pmap_evcnt_ptes_primary)[j]);
802
803 /*
804 * Since we keep the evicted entries at the
805 * from of the PVO list, we need move this
806 * (now resident) PVO after the evicted
807 * entries.
808 */
809 next_pvo = TAILQ_NEXT(pvo, pvo_olink);
810
811 /*
812 * If we don't have to move (either we were
813 * the last entry or the next entry was valid,
814 * don't change our position. Otherwise
815 * move ourselves to the tail of the queue.
816 */
817 if (next_pvo != NULL &&
818 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
819 TAILQ_REMOVE(pvoh, pvo, pvo_olink);
820 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
821 }
822 return 1;
823 }
824 source_pvo = pvo;
825 if (victim_pvo != NULL)
826 break;
827 }
828
829 /*
830 * We also need the pvo entry of the victim we are replacing
831 * so save the R & C bits of the PTE.
832 */
833 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
834 pmap_pte_compare(pt, &pvo->pvo_pte)) {
835 vpvoh = pvoh;
836 victim_pvo = pvo;
837 if (source_pvo != NULL)
838 break;
839 }
840 }
841
842 if (source_pvo == NULL) {
843 PMAPCOUNT(ptes_unspilled);
844 return 0;
845 }
846
847 if (victim_pvo == NULL) {
848 if ((pt->pte_hi & PTE_HID) == 0)
849 panic("pmap_pte_spill: victim p-pte (%p) has "
850 "no pvo entry!", pt);
851
852 /*
853 * If this is a secondary PTE, we need to search
854 * its primary pvo bucket for the matching PVO.
855 */
856 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask];
857 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
858 PMAP_PVO_CHECK(pvo); /* sanity check */
859
860 /*
861 * We also need the pvo entry of the victim we are
862 * replacing so save the R & C bits of the PTE.
863 */
864 if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
865 victim_pvo = pvo;
866 break;
867 }
868 }
869 if (victim_pvo == NULL)
870 panic("pmap_pte_spill: victim s-pte (%p) has "
871 "no pvo entry!", pt);
872 }
873
874 /*
875 * We are invalidating the TLB entry for the EA for the
876 * we are replacing even though its valid; If we don't
877 * we lose any ref/chg bit changes contained in the TLB
878 * entry.
879 */
880 source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
881
882 /*
883 * To enforce the PVO list ordering constraint that all
884 * evicted entries should come before all valid entries,
885 * move the source PVO to the tail of its list and the
886 * victim PVO to the head of its list (which might not be
887 * the same list, if the victim was using the secondary hash).
888 */
889 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
890 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
891 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
892 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
893 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
894 pmap_pte_set(pt, &source_pvo->pvo_pte);
895 victim_pvo->pvo_pmap->pm_evictions++;
896 source_pvo->pvo_pmap->pm_evictions--;
897
898 PVO_PTEGIDX_CLR(victim_pvo);
899 PVO_PTEGIDX_SET(source_pvo, i);
900 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
901 PMAPCOUNT(ptes_spilled);
902 PMAPCOUNT(ptes_evicted);
903 PMAPCOUNT(ptes_removed);
904
905 PMAP_PVO_CHECK(victim_pvo);
906 PMAP_PVO_CHECK(source_pvo);
907 return 1;
908 }
909
910 /*
911 * Restrict given range to physical memory
912 */
913 void
914 pmap_real_memory(paddr_t *start, psize_t *size)
915 {
916 struct mem_region *mp;
917
918 for (mp = mem; mp->size; mp++) {
919 if (*start + *size > mp->start
920 && *start < mp->start + mp->size) {
921 if (*start < mp->start) {
922 *size -= mp->start - *start;
923 *start = mp->start;
924 }
925 if (*start + *size > mp->start + mp->size)
926 *size = mp->start + mp->size - *start;
927 return;
928 }
929 }
930 *size = 0;
931 }
932
933 /*
934 * Initialize anything else for pmap handling.
935 * Called during vm_init().
936 */
937 void
938 pmap_init(void)
939 {
940 int s;
941 #ifdef __HAVE_PMAP_PHYSSEG
942 struct pvo_tqhead *pvoh;
943 int bank;
944 long sz;
945 char *attr;
946
947 s = splvm();
948 pvoh = pmap_physseg.pvoh;
949 attr = pmap_physseg.attrs;
950 for (bank = 0; bank < vm_nphysseg; bank++) {
951 sz = vm_physmem[bank].end - vm_physmem[bank].start;
952 vm_physmem[bank].pmseg.pvoh = pvoh;
953 vm_physmem[bank].pmseg.attrs = attr;
954 for (; sz > 0; sz--, pvoh++, attr++) {
955 TAILQ_INIT(pvoh);
956 *attr = 0;
957 }
958 }
959 splx(s);
960 #endif
961
962 s = splvm();
963 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
964 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
965 &pmap_pool_mallocator);
966
967 pool_setlowat(&pmap_mpvo_pool, 1008);
968
969 pmap_initialized = 1;
970 splx(s);
971
972 #ifdef PMAPCOUNTERS
973 evcnt_attach_static(&pmap_evcnt_mappings);
974 evcnt_attach_static(&pmap_evcnt_mappings_replaced);
975 evcnt_attach_static(&pmap_evcnt_unmappings);
976
977 evcnt_attach_static(&pmap_evcnt_kernel_mappings);
978 evcnt_attach_static(&pmap_evcnt_kernel_unmappings);
979
980 evcnt_attach_static(&pmap_evcnt_exec_mappings);
981 evcnt_attach_static(&pmap_evcnt_exec_cached);
982 evcnt_attach_static(&pmap_evcnt_exec_synced);
983 evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify);
984
985 evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect);
986 evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify);
987 evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page);
988 evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page);
989
990 evcnt_attach_static(&pmap_evcnt_zeroed_pages);
991 evcnt_attach_static(&pmap_evcnt_copied_pages);
992 evcnt_attach_static(&pmap_evcnt_idlezeroed_pages);
993
994 evcnt_attach_static(&pmap_evcnt_updates);
995 evcnt_attach_static(&pmap_evcnt_collects);
996 evcnt_attach_static(&pmap_evcnt_copies);
997
998 evcnt_attach_static(&pmap_evcnt_ptes_spilled);
999 evcnt_attach_static(&pmap_evcnt_ptes_unspilled);
1000 evcnt_attach_static(&pmap_evcnt_ptes_evicted);
1001 evcnt_attach_static(&pmap_evcnt_ptes_removed);
1002 evcnt_attach_static(&pmap_evcnt_ptes_changed);
1003 evcnt_attach_static(&pmap_evcnt_ptes_primary[0]);
1004 evcnt_attach_static(&pmap_evcnt_ptes_primary[1]);
1005 evcnt_attach_static(&pmap_evcnt_ptes_primary[2]);
1006 evcnt_attach_static(&pmap_evcnt_ptes_primary[3]);
1007 evcnt_attach_static(&pmap_evcnt_ptes_primary[4]);
1008 evcnt_attach_static(&pmap_evcnt_ptes_primary[5]);
1009 evcnt_attach_static(&pmap_evcnt_ptes_primary[6]);
1010 evcnt_attach_static(&pmap_evcnt_ptes_primary[7]);
1011 evcnt_attach_static(&pmap_evcnt_ptes_secondary[0]);
1012 evcnt_attach_static(&pmap_evcnt_ptes_secondary[1]);
1013 evcnt_attach_static(&pmap_evcnt_ptes_secondary[2]);
1014 evcnt_attach_static(&pmap_evcnt_ptes_secondary[3]);
1015 evcnt_attach_static(&pmap_evcnt_ptes_secondary[4]);
1016 evcnt_attach_static(&pmap_evcnt_ptes_secondary[5]);
1017 evcnt_attach_static(&pmap_evcnt_ptes_secondary[6]);
1018 evcnt_attach_static(&pmap_evcnt_ptes_secondary[7]);
1019 #endif
1020 }
1021
1022 /*
1023 * How much virtual space does the kernel get?
1024 */
1025 void
1026 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1027 {
1028 /*
1029 * For now, reserve one segment (minus some overhead) for kernel
1030 * virtual memory
1031 */
1032 *start = VM_MIN_KERNEL_ADDRESS;
1033 *end = VM_MAX_KERNEL_ADDRESS;
1034 }
1035
1036 /*
1037 * Allocate, initialize, and return a new physical map.
1038 */
1039 pmap_t
1040 pmap_create(void)
1041 {
1042 pmap_t pm;
1043
1044 pm = pool_get(&pmap_pool, PR_WAITOK);
1045 memset((caddr_t)pm, 0, sizeof *pm);
1046 pmap_pinit(pm);
1047
1048 DPRINTFN(CREATE,("pmap_create: pm %p:\n"
1049 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n"
1050 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n", pm,
1051 pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3],
1052 pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7],
1053 pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11],
1054 pm->pm_sr[12], pm->pm_sr[13], pm->pm_sr[14], pm->pm_sr[15]));
1055 return pm;
1056 }
1057
1058 /*
1059 * Initialize a preallocated and zeroed pmap structure.
1060 */
1061 void
1062 pmap_pinit(pmap_t pm)
1063 {
1064 register_t entropy = MFTB();
1065 register_t mask;
1066 int i;
1067
1068 /*
1069 * Allocate some segment registers for this pmap.
1070 */
1071 pm->pm_refs = 1;
1072 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1073 static register_t pmap_vsidcontext;
1074 register_t hash;
1075 unsigned int n;
1076
1077 /* Create a new value by multiplying by a prime adding in
1078 * entropy from the timebase register. This is to make the
1079 * VSID more random so that the PT Hash function collides
1080 * less often. (note that the prime causes gcc to do shifts
1081 * instead of a multiply)
1082 */
1083 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1084 hash = pmap_vsidcontext & (NPMAPS - 1);
1085 if (hash == 0) /* 0 is special, avoid it */
1086 continue;
1087 n = hash >> 5;
1088 mask = 1L << (hash & (VSID_NBPW-1));
1089 hash = pmap_vsidcontext;
1090 if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1091 /* anything free in this bucket? */
1092 if (~pmap_vsid_bitmap[n] == 0) {
1093 entropy = hash >> PTE_VSID_SHFT;
1094 continue;
1095 }
1096 i = ffs(~pmap_vsid_bitmap[n]) - 1;
1097 mask = 1L << i;
1098 hash &= ~(VSID_NBPW-1);
1099 hash |= i;
1100 }
1101 /*
1102 * Make sure clear out SR_KEY_LEN bits because we put our
1103 * our data in those bits (to identify the segment).
1104 */
1105 hash &= PTE_VSID >> (PTE_VSID_SHFT + SR_KEY_LEN);
1106 pmap_vsid_bitmap[n] |= mask;
1107 for (i = 0; i < 16; i++)
1108 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY;
1109 return;
1110 }
1111 panic("pmap_pinit: out of segments");
1112 }
1113
1114 /*
1115 * Add a reference to the given pmap.
1116 */
1117 void
1118 pmap_reference(pmap_t pm)
1119 {
1120 pm->pm_refs++;
1121 }
1122
1123 /*
1124 * Retire the given pmap from service.
1125 * Should only be called if the map contains no valid mappings.
1126 */
1127 void
1128 pmap_destroy(pmap_t pm)
1129 {
1130 if (--pm->pm_refs == 0) {
1131 pmap_release(pm);
1132 pool_put(&pmap_pool, pm);
1133 }
1134 }
1135
1136 /*
1137 * Release any resources held by the given physical map.
1138 * Called when a pmap initialized by pmap_pinit is being released.
1139 */
1140 void
1141 pmap_release(pmap_t pm)
1142 {
1143 int idx, mask;
1144
1145 if (pm->pm_sr[0] == 0)
1146 panic("pmap_release");
1147 idx = VSID_TO_HASH(pm->pm_sr[0]) & (NPMAPS-1);
1148 mask = 1 << (idx % VSID_NBPW);
1149 idx /= VSID_NBPW;
1150 pmap_vsid_bitmap[idx] &= ~mask;
1151 }
1152
1153 /*
1154 * Copy the range specified by src_addr/len
1155 * from the source map to the range dst_addr/len
1156 * in the destination map.
1157 *
1158 * This routine is only advisory and need not do anything.
1159 */
1160 void
1161 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1162 vsize_t len, vaddr_t src_addr)
1163 {
1164 PMAPCOUNT(copies);
1165 }
1166
1167 /*
1168 * Require that all active physical maps contain no
1169 * incorrect entries NOW.
1170 */
1171 void
1172 pmap_update(struct pmap *pmap)
1173 {
1174 PMAPCOUNT(updates);
1175 TLBSYNC();
1176 }
1177
1178 /*
1179 * Garbage collects the physical map system for
1180 * pages which are no longer used.
1181 * Success need not be guaranteed -- that is, there
1182 * may well be pages which are not referenced, but
1183 * others may be collected.
1184 * Called by the pageout daemon when pages are scarce.
1185 */
1186 void
1187 pmap_collect(pmap_t pm)
1188 {
1189 PMAPCOUNT(collects);
1190 }
1191
1192 static __inline int
1193 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1194 {
1195 int pteidx;
1196 /*
1197 * We can find the actual pte entry without searching by
1198 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1199 * and by noticing the HID bit.
1200 */
1201 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1202 if (pvo->pvo_pte.pte_hi & PTE_HID)
1203 pteidx ^= pmap_pteg_mask * 8;
1204 return pteidx;
1205 }
1206
1207 volatile struct pte *
1208 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1209 {
1210 volatile struct pte *pt;
1211
1212 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1213 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1214 return NULL;
1215 #endif
1216
1217 /*
1218 * If we haven't been supplied the ptegidx, calculate it.
1219 */
1220 if (pteidx == -1) {
1221 int ptegidx;
1222 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1223 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1224 }
1225
1226 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1227
1228 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1229 return pt;
1230 #else
1231 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1232 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1233 "pvo but no valid pte index", pvo);
1234 }
1235 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1236 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1237 "pvo but no valid pte", pvo);
1238 }
1239
1240 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1241 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1242 #if defined(DEBUG) || defined(PMAPCHECK)
1243 pmap_pte_print(pt);
1244 #endif
1245 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1246 "pmap_pteg_table %p but invalid in pvo",
1247 pvo, pt);
1248 }
1249 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1250 #if defined(DEBUG) || defined(PMAPCHECK)
1251 pmap_pte_print(pt);
1252 #endif
1253 panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1254 "not match pte %p in pmap_pteg_table",
1255 pvo, pt);
1256 }
1257 return pt;
1258 }
1259
1260 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1261 #if defined(DEBUG) || defined(PMAPCHECK)
1262 pmap_pte_print(pt);
1263 #endif
1264 panic("pmap_pvo_to_pte: pvo %p: has invalid pte %p in "
1265 "pmap_pteg_table but valid in pvo", pvo, pt);
1266 }
1267 return NULL;
1268 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1269 }
1270
1271 struct pvo_entry *
1272 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1273 {
1274 struct pvo_entry *pvo;
1275 int ptegidx;
1276
1277 va &= ~ADDR_POFF;
1278 ptegidx = va_to_pteg(pm, va);
1279
1280 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1281 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1282 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1283 panic("pmap_pvo_find_va: invalid pvo %p on "
1284 "list %#x (%p)", pvo, ptegidx,
1285 &pmap_pvo_table[ptegidx]);
1286 #endif
1287 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1288 if (pteidx_p)
1289 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1290 return pvo;
1291 }
1292 }
1293 return NULL;
1294 }
1295
1296 #if defined(DEBUG) || defined(PMAPCHECK)
1297 void
1298 pmap_pvo_check(const struct pvo_entry *pvo)
1299 {
1300 struct pvo_head *pvo_head;
1301 struct pvo_entry *pvo0;
1302 volatile struct pte *pt;
1303 int failed = 0;
1304
1305 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1306 panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1307
1308 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1309 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1310 pvo, pvo->pvo_pmap);
1311 failed = 1;
1312 }
1313
1314 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1315 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1316 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1317 pvo, TAILQ_NEXT(pvo, pvo_olink));
1318 failed = 1;
1319 }
1320
1321 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1322 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1323 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1324 pvo, LIST_NEXT(pvo, pvo_vlink));
1325 failed = 1;
1326 }
1327
1328 if (pvo->pvo_vaddr & PVO_MANAGED) {
1329 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1330 } else {
1331 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1332 printf("pmap_pvo_check: pvo %p: non kernel address "
1333 "on kernel unmanaged list\n", pvo);
1334 failed = 1;
1335 }
1336 pvo_head = &pmap_pvo_kunmanaged;
1337 }
1338 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1339 if (pvo0 == pvo)
1340 break;
1341 }
1342 if (pvo0 == NULL) {
1343 printf("pmap_pvo_check: pvo %p: not present "
1344 "on its vlist head %p\n", pvo, pvo_head);
1345 failed = 1;
1346 }
1347 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1348 printf("pmap_pvo_check: pvo %p: not present "
1349 "on its olist head\n", pvo);
1350 failed = 1;
1351 }
1352 pt = pmap_pvo_to_pte(pvo, -1);
1353 if (pt == NULL) {
1354 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1355 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1356 "no PTE\n", pvo);
1357 failed = 1;
1358 }
1359 } else {
1360 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1361 (uintptr_t) pt >=
1362 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1363 printf("pmap_pvo_check: pvo %p: pte %p not in "
1364 "pteg table\n", pvo, pt);
1365 failed = 1;
1366 }
1367 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1368 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1369 "no PTE\n", pvo);
1370 failed = 1;
1371 }
1372 if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1373 printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1374 "%#lx/%#lx\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi);
1375 failed = 1;
1376 }
1377 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1378 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1379 printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1380 "%#lx/%#lx\n", pvo,
1381 pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN),
1382 pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN));
1383 failed = 1;
1384 }
1385 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1386 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx"
1387 " doesn't not match PVO's VA %#lx\n",
1388 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1389 failed = 1;
1390 }
1391 if (failed)
1392 pmap_pte_print(pt);
1393 }
1394 if (failed)
1395 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1396 pvo->pvo_pmap);
1397 }
1398 #endif /* DEBUG || PMAPCHECK */
1399
1400 /*
1401 * This returns whether this is the first mapping of a page.
1402 */
1403 int
1404 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1405 vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1406 {
1407 struct pvo_entry *pvo;
1408 struct pvo_tqhead *pvoh;
1409 register_t msr;
1410 int ptegidx;
1411 int i;
1412 int poolflags = PR_NOWAIT;
1413
1414 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1415 if (pmap_pvo_remove_depth > 0)
1416 panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1417 if (++pmap_pvo_enter_depth > 1)
1418 panic("pmap_pvo_enter: called recursively!");
1419 #endif
1420
1421 /*
1422 * Compute the PTE Group index.
1423 */
1424 va &= ~ADDR_POFF;
1425 ptegidx = va_to_pteg(pm, va);
1426
1427 msr = pmap_interrupts_off();
1428 /*
1429 * Remove any existing mapping for this page. Reuse the
1430 * pvo entry if there a mapping.
1431 */
1432 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1433 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1434 #ifdef DEBUG
1435 if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1436 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1437 ~(PTE_REF|PTE_CHG)) == 0 &&
1438 va < VM_MIN_KERNEL_ADDRESS) {
1439 printf("pmap_pvo_enter: pvo %p: dup %#lx/%#lx\n",
1440 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1441 printf("pmap_pvo_enter: pte_hi=%#lx sr=%#lx\n",
1442 pvo->pvo_pte.pte_hi,
1443 pm->pm_sr[va >> ADDR_SR_SHFT]);
1444 pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1445 #ifdef DDBX
1446 Debugger();
1447 #endif
1448 }
1449 #endif
1450 PMAPCOUNT(mappings_replaced);
1451 pmap_pvo_remove(pvo, -1);
1452 break;
1453 }
1454 }
1455
1456 /*
1457 * If we aren't overwriting an mapping, try to allocate
1458 */
1459 pmap_interrupts_restore(msr);
1460 pvo = pool_get(pl, poolflags);
1461 msr = pmap_interrupts_off();
1462 if (pvo == NULL) {
1463 #if 0
1464 pvo = pmap_pvo_reclaim(pm);
1465 if (pvo == NULL) {
1466 #endif
1467 if ((flags & PMAP_CANFAIL) == 0)
1468 panic("pmap_pvo_enter: failed");
1469 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1470 pmap_pvo_enter_depth--;
1471 #endif
1472 pmap_interrupts_restore(msr);
1473 return ENOMEM;
1474 #if 0
1475 }
1476 #endif
1477 }
1478 pvo->pvo_vaddr = va;
1479 pvo->pvo_pmap = pm;
1480 pvo->pvo_vaddr &= ~ADDR_POFF;
1481 if (flags & VM_PROT_EXECUTE) {
1482 PMAPCOUNT(exec_mappings);
1483 pvo->pvo_vaddr |= PVO_EXECUTABLE;
1484 }
1485 if (flags & PMAP_WIRED)
1486 pvo->pvo_vaddr |= PVO_WIRED;
1487 if (pvo_head != &pmap_pvo_kunmanaged) {
1488 pvo->pvo_vaddr |= PVO_MANAGED;
1489 PMAPCOUNT(mappings);
1490 } else {
1491 PMAPCOUNT(kernel_mappings);
1492 }
1493 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1494
1495 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1496 if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1497 pvo->pvo_pmap->pm_stats.wired_count++;
1498 pvo->pvo_pmap->pm_stats.resident_count++;
1499 #if defined(DEBUG)
1500 if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS)
1501 DPRINTFN(PVOENTER,
1502 ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n",
1503 pvo, pm, va, pa));
1504 #endif
1505
1506 /*
1507 * We hope this succeeds but it isn't required.
1508 */
1509 pvoh = &pmap_pvo_table[ptegidx];
1510 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1511 if (i >= 0) {
1512 PVO_PTEGIDX_SET(pvo, i);
1513 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1514 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1515 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1516 } else {
1517
1518 /*
1519 * Since we didn't have room for this entry (which makes it
1520 * and evicted entry), place it at the head of the list.
1521 */
1522 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1523 PMAPCOUNT(ptes_evicted);
1524 pm->pm_evictions++;
1525 }
1526 PMAP_PVO_CHECK(pvo); /* sanity check */
1527 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1528 pmap_pvo_enter_depth--;
1529 #endif
1530 pmap_interrupts_restore(msr);
1531 return 0;
1532 }
1533
1534 void
1535 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1536 {
1537 volatile struct pte *pt;
1538 int ptegidx;
1539
1540 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1541 if (++pmap_pvo_remove_depth > 1)
1542 panic("pmap_pvo_remove: called recursively!");
1543 #endif
1544
1545 /*
1546 * If we haven't been supplied the ptegidx, calculate it.
1547 */
1548 if (pteidx == -1) {
1549 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1550 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1551 } else {
1552 ptegidx = pteidx >> 3;
1553 if (pvo->pvo_pte.pte_hi & PTE_HID)
1554 ptegidx ^= pmap_pteg_mask;
1555 }
1556 PMAP_PVO_CHECK(pvo); /* sanity check */
1557
1558 /*
1559 * If there is an active pte entry, we need to deactivate it
1560 * (and save the ref & chg bits).
1561 */
1562 pt = pmap_pvo_to_pte(pvo, pteidx);
1563 if (pt != NULL) {
1564 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1565 PVO_PTEGIDX_CLR(pvo);
1566 PMAPCOUNT(ptes_removed);
1567 } else {
1568 KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1569 pvo->pvo_pmap->pm_evictions--;
1570 }
1571
1572 /*
1573 * Update our statistics
1574 */
1575 pvo->pvo_pmap->pm_stats.resident_count--;
1576 if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1577 pvo->pvo_pmap->pm_stats.wired_count--;
1578
1579 /*
1580 * Save the REF/CHG bits into their cache if the page is managed.
1581 */
1582 if (pvo->pvo_vaddr & PVO_MANAGED) {
1583 register_t ptelo = pvo->pvo_pte.pte_lo;
1584 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1585
1586 if (pg != NULL) {
1587 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1588 }
1589 PMAPCOUNT(unmappings);
1590 } else {
1591 PMAPCOUNT(kernel_unmappings);
1592 }
1593
1594 /*
1595 * Remove the PVO from its lists and return it to the pool.
1596 */
1597 LIST_REMOVE(pvo, pvo_vlink);
1598 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1599 pool_put(pvo->pvo_vaddr & PVO_MANAGED
1600 ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1601 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1602 pmap_pvo_remove_depth--;
1603 #endif
1604 }
1605
1606 /*
1607 * Insert physical page at pa into the given pmap at virtual address va.
1608 */
1609 int
1610 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1611 {
1612 struct mem_region *mp;
1613 struct pvo_head *pvo_head;
1614 struct vm_page *pg;
1615 struct pool *pl;
1616 register_t pte_lo;
1617 int s;
1618 int error;
1619 u_int pvo_flags;
1620 u_int was_exec = 0;
1621
1622 if (__predict_false(!pmap_initialized)) {
1623 pvo_head = &pmap_pvo_kunmanaged;
1624 pl = &pmap_upvo_pool;
1625 pvo_flags = 0;
1626 pg = NULL;
1627 was_exec = PTE_EXEC;
1628 } else {
1629 pvo_head = pa_to_pvoh(pa, &pg);
1630 pl = &pmap_mpvo_pool;
1631 pvo_flags = PVO_MANAGED;
1632 }
1633
1634 DPRINTFN(ENTER,
1635 ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):",
1636 pm, va, pa, prot, flags));
1637
1638 /*
1639 * If this is a managed page, and it's the first reference to the
1640 * page clear the execness of the page. Otherwise fetch the execness.
1641 */
1642 if (pg != NULL)
1643 was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1644
1645 DPRINTFN(ENTER, (" was_exec=%d", was_exec));
1646
1647 /*
1648 * Assume the page is cache inhibited and access is guarded unless
1649 * it's in our available memory array. If it is in the memory array,
1650 * asssume it's in memory coherent memory.
1651 */
1652 pte_lo = PTE_IG;
1653 if ((flags & PMAP_NC) == 0) {
1654 for (mp = mem; mp->size; mp++) {
1655 if (pa >= mp->start && pa < mp->start + mp->size) {
1656 pte_lo = PTE_M;
1657 break;
1658 }
1659 }
1660 }
1661
1662 if (prot & VM_PROT_WRITE)
1663 pte_lo |= PTE_BW;
1664 else
1665 pte_lo |= PTE_BR;
1666
1667 /*
1668 * If this was in response to a fault, "pre-fault" the PTE's
1669 * changed/referenced bit appropriately.
1670 */
1671 if (flags & VM_PROT_WRITE)
1672 pte_lo |= PTE_CHG;
1673 if (flags & (VM_PROT_READ|VM_PROT_WRITE))
1674 pte_lo |= PTE_REF;
1675
1676 #if 0
1677 if (pm == pmap_kernel()) {
1678 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ)
1679 printf("pmap_pvo_enter: Kernel RO va %#lx pa %#lx\n",
1680 va, pa);
1681 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_NONE)
1682 printf("pmap_pvo_enter: Kernel N/A va %#lx pa %#lx\n",
1683 va, pa);
1684 }
1685 #endif
1686
1687 /*
1688 * We need to know if this page can be executable
1689 */
1690 flags |= (prot & VM_PROT_EXECUTE);
1691
1692 /*
1693 * Record mapping for later back-translation and pte spilling.
1694 * This will overwrite any existing mapping.
1695 */
1696 s = splvm();
1697 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1698 splx(s);
1699
1700 /*
1701 * Flush the real page from the instruction cache if this page is
1702 * mapped executable and cacheable and has not been flushed since
1703 * the last time it was modified.
1704 */
1705 if (error == 0 &&
1706 (flags & VM_PROT_EXECUTE) &&
1707 (pte_lo & PTE_I) == 0 &&
1708 was_exec == 0) {
1709 DPRINTFN(ENTER, (" syncicache"));
1710 PMAPCOUNT(exec_synced);
1711 pmap_syncicache(pa, NBPG);
1712 if (pg != NULL) {
1713 pmap_attr_save(pg, PTE_EXEC);
1714 PMAPCOUNT(exec_cached);
1715 #if defined(DEBUG) || defined(PMAPDEBUG)
1716 if (pmapdebug & PMAPDEBUG_ENTER)
1717 printf(" marked-as-exec");
1718 else if (pmapdebug & PMAPDEBUG_EXEC)
1719 printf("[pmap_enter: %#lx: marked-as-exec]\n",
1720 pg->phys_addr);
1721
1722 #endif
1723 }
1724 }
1725
1726 DPRINTFN(ENTER, (": error=%d\n", error));
1727
1728 return error;
1729 }
1730
1731 void
1732 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1733 {
1734 struct mem_region *mp;
1735 register_t pte_lo;
1736 register_t msr;
1737 int error;
1738 int s;
1739
1740 if (va < VM_MIN_KERNEL_ADDRESS)
1741 panic("pmap_kenter_pa: attempt to enter "
1742 "non-kernel address %#lx!", va);
1743
1744 DPRINTFN(KENTER,
1745 ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
1746
1747 /*
1748 * Assume the page is cache inhibited and access is guarded unless
1749 * it's in our available memory array. If it is in the memory array,
1750 * asssume it's in memory coherent memory.
1751 */
1752 pte_lo = PTE_IG;
1753 for (mp = mem; mp->size; mp++) {
1754 if (pa >= mp->start && pa < mp->start + mp->size) {
1755 pte_lo = PTE_M;
1756 break;
1757 }
1758 }
1759
1760 if (prot & VM_PROT_WRITE)
1761 pte_lo |= PTE_BW;
1762 else
1763 pte_lo |= PTE_BR;
1764
1765 /*
1766 * We don't care about REF/CHG on PVOs on the unmanaged list.
1767 */
1768 s = splvm();
1769 msr = pmap_interrupts_off();
1770 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
1771 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
1772 pmap_interrupts_restore(msr);
1773 splx(s);
1774
1775 if (error != 0)
1776 panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
1777 va, pa, error);
1778 }
1779
1780 void
1781 pmap_kremove(vaddr_t va, vsize_t len)
1782 {
1783 if (va < VM_MIN_KERNEL_ADDRESS)
1784 panic("pmap_kremove: attempt to remove "
1785 "non-kernel address %#lx!", va);
1786
1787 DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len));
1788 pmap_remove(pmap_kernel(), va, va + len);
1789 }
1790
1791 /*
1792 * Remove the given range of mapping entries.
1793 */
1794 void
1795 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
1796 {
1797 struct pvo_entry *pvo;
1798 register_t msr;
1799 int pteidx;
1800 int s;
1801
1802 for (; va < endva; va += PAGE_SIZE) {
1803 s = splvm();
1804 msr = pmap_interrupts_off();
1805 pvo = pmap_pvo_find_va(pm, va, &pteidx);
1806 if (pvo != NULL) {
1807 pmap_pvo_remove(pvo, pteidx);
1808 }
1809 pmap_interrupts_restore(msr);
1810 splx(s);
1811 }
1812 }
1813
1814 /*
1815 * Get the physical page address for the given pmap/virtual address.
1816 */
1817 boolean_t
1818 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
1819 {
1820 struct pvo_entry *pvo;
1821 register_t msr;
1822 int s;
1823
1824 s = splvm();
1825 msr = pmap_interrupts_off();
1826 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1827 if (pvo != NULL) {
1828 PMAP_PVO_CHECK(pvo); /* sanity check */
1829 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1830 }
1831 pmap_interrupts_restore(msr);
1832 splx(s);
1833 return pvo != NULL;
1834 }
1835
1836 /*
1837 * Lower the protection on the specified range of this pmap.
1838 *
1839 * There are only two cases: either the protection is going to 0,
1840 * or it is going to read-only.
1841 */
1842 void
1843 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
1844 {
1845 struct pvo_entry *pvo;
1846 volatile struct pte *pt;
1847 register_t msr;
1848 int s;
1849 int pteidx;
1850
1851 /*
1852 * Since this routine only downgrades protection, we should
1853 * always be called without WRITE permisison.
1854 */
1855 KASSERT((prot & VM_PROT_WRITE) == 0);
1856
1857 /*
1858 * If there is no protection, this is equivalent to
1859 * remove the pmap from the pmap.
1860 */
1861 if ((prot & VM_PROT_READ) == 0) {
1862 pmap_remove(pm, va, endva);
1863 return;
1864 }
1865
1866 s = splvm();
1867 msr = pmap_interrupts_off();
1868
1869 for (; va < endva; va += NBPG) {
1870 pvo = pmap_pvo_find_va(pm, va, &pteidx);
1871 if (pvo == NULL)
1872 continue;
1873 PMAP_PVO_CHECK(pvo); /* sanity check */
1874
1875 /*
1876 * Revoke executable if asked to do so.
1877 */
1878 if ((prot & VM_PROT_EXECUTE) == 0)
1879 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1880
1881 #if 0
1882 /*
1883 * If the page is already read-only, no change
1884 * needs to be made.
1885 */
1886 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
1887 continue;
1888 #endif
1889 /*
1890 * Grab the PTE pointer before we diddle with
1891 * the cached PTE copy.
1892 */
1893 pt = pmap_pvo_to_pte(pvo, pteidx);
1894 /*
1895 * Change the protection of the page.
1896 */
1897 pvo->pvo_pte.pte_lo &= ~PTE_PP;
1898 pvo->pvo_pte.pte_lo |= PTE_BR;
1899
1900 /*
1901 * If the PVO is in the page table, update
1902 * that pte at well.
1903 */
1904 if (pt != NULL) {
1905 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1906 PMAPCOUNT(ptes_changed);
1907 }
1908
1909 PMAP_PVO_CHECK(pvo); /* sanity check */
1910 }
1911
1912 pmap_interrupts_restore(msr);
1913 splx(s);
1914 }
1915
1916 void
1917 pmap_unwire(pmap_t pm, vaddr_t va)
1918 {
1919 struct pvo_entry *pvo;
1920 register_t msr;
1921 int s;
1922
1923 s = splvm();
1924 msr = pmap_interrupts_off();
1925
1926 pvo = pmap_pvo_find_va(pm, va, NULL);
1927 if (pvo != NULL) {
1928 if (pvo->pvo_vaddr & PVO_WIRED) {
1929 pvo->pvo_vaddr &= ~PVO_WIRED;
1930 pm->pm_stats.wired_count--;
1931 }
1932 PMAP_PVO_CHECK(pvo); /* sanity check */
1933 }
1934
1935 pmap_interrupts_restore(msr);
1936 splx(s);
1937 }
1938
1939 /*
1940 * Lower the protection on the specified physical page.
1941 *
1942 * There are only two cases: either the protection is going to 0,
1943 * or it is going to read-only.
1944 */
1945 void
1946 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1947 {
1948 struct pvo_head *pvo_head;
1949 struct pvo_entry *pvo, *next_pvo;
1950 volatile struct pte *pt;
1951 register_t msr;
1952 int s;
1953
1954 /*
1955 * Since this routine only downgrades protection, if the
1956 * maximal protection is desired, there isn't any change
1957 * to be made.
1958 */
1959 KASSERT((prot & VM_PROT_WRITE) == 0);
1960 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
1961 return;
1962
1963 s = splvm();
1964 msr = pmap_interrupts_off();
1965
1966 /*
1967 * When UVM reuses a page, it does a pmap_page_protect with
1968 * VM_PROT_NONE. At that point, we can clear the exec flag
1969 * since we know the page will have different contents.
1970 */
1971 if ((prot & VM_PROT_READ) == 0) {
1972 DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
1973 pg->phys_addr));
1974 if (pmap_attr_fetch(pg) & PTE_EXEC) {
1975 PMAPCOUNT(exec_uncached_page_protect);
1976 pmap_attr_clear(pg, PTE_EXEC);
1977 }
1978 }
1979
1980 pvo_head = vm_page_to_pvoh(pg);
1981 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1982 next_pvo = LIST_NEXT(pvo, pvo_vlink);
1983 PMAP_PVO_CHECK(pvo); /* sanity check */
1984
1985 /*
1986 * Downgrading to no mapping at all, we just remove the entry.
1987 */
1988 if ((prot & VM_PROT_READ) == 0) {
1989 pmap_pvo_remove(pvo, -1);
1990 continue;
1991 }
1992
1993 /*
1994 * If EXEC permission is being revoked, just clear the
1995 * flag in the PVO.
1996 */
1997 if ((prot & VM_PROT_EXECUTE) == 0)
1998 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1999
2000 /*
2001 * If this entry is already RO, don't diddle with the
2002 * page table.
2003 */
2004 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2005 PMAP_PVO_CHECK(pvo);
2006 continue;
2007 }
2008
2009 /*
2010 * Grab the PTE before the we diddle the bits so
2011 * pvo_to_pte can verify the pte contents are as
2012 * expected.
2013 */
2014 pt = pmap_pvo_to_pte(pvo, -1);
2015 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2016 pvo->pvo_pte.pte_lo |= PTE_BR;
2017 if (pt != NULL) {
2018 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2019 PMAPCOUNT(ptes_changed);
2020 }
2021 PMAP_PVO_CHECK(pvo); /* sanity check */
2022 }
2023
2024 pmap_interrupts_restore(msr);
2025 splx(s);
2026 }
2027
2028 /*
2029 * Activate the address space for the specified process. If the process
2030 * is the current process, load the new MMU context.
2031 */
2032 void
2033 pmap_activate(struct lwp *l)
2034 {
2035 struct pcb *pcb = &l->l_addr->u_pcb;
2036 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2037
2038 DPRINTFN(ACTIVATE,
2039 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
2040
2041 /*
2042 * XXX Normally performed in cpu_fork().
2043 */
2044 if (pcb->pcb_pm != pmap) {
2045 pcb->pcb_pm = pmap;
2046 pcb->pcb_pmreal = pmap;
2047 }
2048
2049 /*
2050 * In theory, the SR registers need only be valid on return
2051 * to user space wait to do them there.
2052 */
2053 if (l == curlwp) {
2054 /* Store pointer to new current pmap. */
2055 curpm = pmap;
2056 }
2057 }
2058
2059 /*
2060 * Deactivate the specified process's address space.
2061 */
2062 void
2063 pmap_deactivate(struct lwp *l)
2064 {
2065 }
2066
2067 boolean_t
2068 pmap_query_bit(struct vm_page *pg, int ptebit)
2069 {
2070 struct pvo_entry *pvo;
2071 volatile struct pte *pt;
2072 register_t msr;
2073 int s;
2074
2075 if (pmap_attr_fetch(pg) & ptebit)
2076 return TRUE;
2077 s = splvm();
2078 msr = pmap_interrupts_off();
2079 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2080 PMAP_PVO_CHECK(pvo); /* sanity check */
2081 /*
2082 * See if we saved the bit off. If so cache, it and return
2083 * success.
2084 */
2085 if (pvo->pvo_pte.pte_lo & ptebit) {
2086 pmap_attr_save(pg, ptebit);
2087 PMAP_PVO_CHECK(pvo); /* sanity check */
2088 pmap_interrupts_restore(msr);
2089 splx(s);
2090 return TRUE;
2091 }
2092 }
2093 /*
2094 * No luck, now go thru the hard part of looking at the ptes
2095 * themselves. Sync so any pending REF/CHG bits are flushed
2096 * to the PTEs.
2097 */
2098 SYNC();
2099 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2100 PMAP_PVO_CHECK(pvo); /* sanity check */
2101 /*
2102 * See if this pvo have a valid PTE. If so, fetch the
2103 * REF/CHG bits from the valid PTE. If the appropriate
2104 * ptebit is set, cache, it and return success.
2105 */
2106 pt = pmap_pvo_to_pte(pvo, -1);
2107 if (pt != NULL) {
2108 pmap_pte_synch(pt, &pvo->pvo_pte);
2109 if (pvo->pvo_pte.pte_lo & ptebit) {
2110 pmap_attr_save(pg, ptebit);
2111 PMAP_PVO_CHECK(pvo); /* sanity check */
2112 pmap_interrupts_restore(msr);
2113 splx(s);
2114 return TRUE;
2115 }
2116 }
2117 }
2118 pmap_interrupts_restore(msr);
2119 splx(s);
2120 return FALSE;
2121 }
2122
2123 boolean_t
2124 pmap_clear_bit(struct vm_page *pg, int ptebit)
2125 {
2126 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2127 struct pvo_entry *pvo;
2128 volatile struct pte *pt;
2129 register_t msr;
2130 int rv = 0;
2131 int s;
2132
2133 s = splvm();
2134 msr = pmap_interrupts_off();
2135
2136 /*
2137 * Fetch the cache value
2138 */
2139 rv |= pmap_attr_fetch(pg);
2140
2141 /*
2142 * Clear the cached value.
2143 */
2144 pmap_attr_clear(pg, ptebit);
2145
2146 /*
2147 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2148 * can reset the right ones). Note that since the pvo entries and
2149 * list heads are accessed via BAT0 and are never placed in the
2150 * page table, we don't have to worry about further accesses setting
2151 * the REF/CHG bits.
2152 */
2153 SYNC();
2154
2155 /*
2156 * For each pvo entry, clear pvo's ptebit. If this pvo have a
2157 * valid PTE. If so, clear the ptebit from the valid PTE.
2158 */
2159 LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2160 PMAP_PVO_CHECK(pvo); /* sanity check */
2161 pt = pmap_pvo_to_pte(pvo, -1);
2162 if (pt != NULL) {
2163 /*
2164 * Only sync the PTE if the bit we are looking
2165 * for is not already set.
2166 */
2167 if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2168 pmap_pte_synch(pt, &pvo->pvo_pte);
2169 /*
2170 * If the bit we are looking for was already set,
2171 * clear that bit in the pte.
2172 */
2173 if (pvo->pvo_pte.pte_lo & ptebit)
2174 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2175 }
2176 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2177 pvo->pvo_pte.pte_lo &= ~ptebit;
2178 PMAP_PVO_CHECK(pvo); /* sanity check */
2179 }
2180 pmap_interrupts_restore(msr);
2181 splx(s);
2182 /*
2183 * If we are clearing the modify bit and this page was marked EXEC
2184 * and the user of the page thinks the page was modified, then we
2185 * need to clean it from the icache if it's mapped or clear the EXEC
2186 * bit if it's not mapped. The page itself might not have the CHG
2187 * bit set if the modification was done via DMA to the page.
2188 */
2189 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2190 if (LIST_EMPTY(pvoh)) {
2191 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
2192 pg->phys_addr));
2193 pmap_attr_clear(pg, PTE_EXEC);
2194 PMAPCOUNT(exec_uncached_clear_modify);
2195 } else {
2196 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
2197 pg->phys_addr));
2198 pmap_syncicache(pg->phys_addr, NBPG);
2199 PMAPCOUNT(exec_synced_clear_modify);
2200 }
2201 }
2202 return (rv & ptebit) != 0;
2203 }
2204
2205 void
2206 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2207 {
2208 struct pvo_entry *pvo;
2209 size_t offset = va & ADDR_POFF;
2210 int s;
2211
2212 s = splvm();
2213 while (len > 0) {
2214 size_t seglen = NBPG - offset;
2215 if (seglen > len)
2216 seglen = len;
2217 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2218 if (pvo != NULL && PVO_ISEXECUTABLE(pvo)) {
2219 pmap_syncicache(
2220 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2221 PMAP_PVO_CHECK(pvo);
2222 }
2223 va += seglen;
2224 len -= seglen;
2225 offset = 0;
2226 }
2227 splx(s);
2228 }
2229
2230 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2231 void
2232 pmap_pte_print(volatile struct pte *pt)
2233 {
2234 printf("PTE %p: ", pt);
2235 /* High word: */
2236 printf("0x%08lx: [", pt->pte_hi);
2237 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2238 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2239 printf("0x%06lx 0x%02lx",
2240 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2241 pt->pte_hi & PTE_API);
2242 printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
2243 /* Low word: */
2244 printf(" 0x%08lx: [", pt->pte_lo);
2245 printf("0x%05lx... ", pt->pte_lo >> 12);
2246 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2247 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2248 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2249 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2250 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2251 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2252 switch (pt->pte_lo & PTE_PP) {
2253 case PTE_BR: printf("br]\n"); break;
2254 case PTE_BW: printf("bw]\n"); break;
2255 case PTE_SO: printf("so]\n"); break;
2256 case PTE_SW: printf("sw]\n"); break;
2257 }
2258 }
2259 #endif
2260
2261 #if defined(DDB)
2262 void
2263 pmap_pteg_check(void)
2264 {
2265 volatile struct pte *pt;
2266 int i;
2267 int ptegidx;
2268 u_int p_valid = 0;
2269 u_int s_valid = 0;
2270 u_int invalid = 0;
2271
2272 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2273 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2274 if (pt->pte_hi & PTE_VALID) {
2275 if (pt->pte_hi & PTE_HID)
2276 s_valid++;
2277 else
2278 p_valid++;
2279 } else
2280 invalid++;
2281 }
2282 }
2283 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2284 p_valid, p_valid, s_valid, s_valid,
2285 invalid, invalid);
2286 }
2287
2288 void
2289 pmap_print_mmuregs(void)
2290 {
2291 int i;
2292 u_int cpuvers;
2293 vaddr_t addr;
2294 register_t soft_sr[16];
2295 struct bat soft_ibat[4];
2296 struct bat soft_dbat[4];
2297 register_t sdr1;
2298
2299 cpuvers = MFPVR() >> 16;
2300
2301 __asm __volatile ("mfsdr1 %0" : "=r"(sdr1));
2302 for (i=0; i<16; i++) {
2303 soft_sr[i] = MFSRIN(addr);
2304 addr += (1 << ADDR_SR_SHFT);
2305 }
2306
2307 /* read iBAT (601: uBAT) registers */
2308 __asm __volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2309 __asm __volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2310 __asm __volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2311 __asm __volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2312 __asm __volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2313 __asm __volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2314 __asm __volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2315 __asm __volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2316
2317
2318 if (cpuvers != MPC601) {
2319 /* read dBAT registers */
2320 __asm __volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2321 __asm __volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2322 __asm __volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2323 __asm __volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2324 __asm __volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2325 __asm __volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2326 __asm __volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2327 __asm __volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2328 }
2329
2330 printf("SDR1:\t%#lx\n", sdr1);
2331 printf("SR[]:\t");
2332 addr = 0;
2333 for (i=0; i<4; i++)
2334 printf("0x%08lx, ", soft_sr[i]);
2335 printf("\n\t");
2336 for ( ; i<8; i++)
2337 printf("0x%08lx, ", soft_sr[i]);
2338 printf("\n\t");
2339 for ( ; i<12; i++)
2340 printf("0x%08lx, ", soft_sr[i]);
2341 printf("\n\t");
2342 for ( ; i<16; i++)
2343 printf("0x%08lx, ", soft_sr[i]);
2344 printf("\n");
2345
2346 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2347 for (i=0; i<4; i++) {
2348 printf("0x%08lx 0x%08lx, ",
2349 soft_ibat[i].batu, soft_ibat[i].batl);
2350 if (i == 1)
2351 printf("\n\t");
2352 }
2353 if (cpuvers != MPC601) {
2354 printf("\ndBAT[]:\t");
2355 for (i=0; i<4; i++) {
2356 printf("0x%08lx 0x%08lx, ",
2357 soft_dbat[i].batu, soft_dbat[i].batl);
2358 if (i == 1)
2359 printf("\n\t");
2360 }
2361 }
2362 printf("\n");
2363 }
2364
2365 void
2366 pmap_print_pte(pmap_t pm, vaddr_t va)
2367 {
2368 struct pvo_entry *pvo;
2369 volatile struct pte *pt;
2370 int pteidx;
2371
2372 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2373 if (pvo != NULL) {
2374 pt = pmap_pvo_to_pte(pvo, pteidx);
2375 if (pt != NULL) {
2376 printf("VA %#lx -> %p -> %s %#lx, %#lx\n",
2377 va, pt,
2378 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2379 pt->pte_hi, pt->pte_lo);
2380 } else {
2381 printf("No valid PTE found\n");
2382 }
2383 } else {
2384 printf("Address not in pmap\n");
2385 }
2386 }
2387
2388 void
2389 pmap_pteg_dist(void)
2390 {
2391 struct pvo_entry *pvo;
2392 int ptegidx;
2393 int depth;
2394 int max_depth = 0;
2395 unsigned int depths[64];
2396
2397 memset(depths, 0, sizeof(depths));
2398 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2399 depth = 0;
2400 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2401 depth++;
2402 }
2403 if (depth > max_depth)
2404 max_depth = depth;
2405 if (depth > 63)
2406 depth = 63;
2407 depths[depth]++;
2408 }
2409
2410 for (depth = 0; depth < 64; depth++) {
2411 printf(" [%2d]: %8u", depth, depths[depth]);
2412 if ((depth & 3) == 3)
2413 printf("\n");
2414 if (depth == max_depth)
2415 break;
2416 }
2417 if ((depth & 3) != 3)
2418 printf("\n");
2419 printf("Max depth found was %d\n", max_depth);
2420 }
2421 #endif /* DEBUG */
2422
2423 #if defined(PMAPCHECK) || defined(DEBUG)
2424 void
2425 pmap_pvo_verify(void)
2426 {
2427 int ptegidx;
2428 int s;
2429
2430 s = splvm();
2431 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2432 struct pvo_entry *pvo;
2433 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2434 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2435 panic("pmap_pvo_verify: invalid pvo %p "
2436 "on list %#x", pvo, ptegidx);
2437 pmap_pvo_check(pvo);
2438 }
2439 }
2440 splx(s);
2441 }
2442 #endif /* PMAPCHECK */
2443
2444
2445 void *
2446 pmap_pool_ualloc(struct pool *pp, int flags)
2447 {
2448 struct pvo_page *pvop;
2449
2450 pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2451 if (pvop != NULL) {
2452 pmap_upvop_free--;
2453 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2454 return pvop;
2455 }
2456 if (uvm.page_init_done != TRUE) {
2457 return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2458 }
2459 return pmap_pool_malloc(pp, flags);
2460 }
2461
2462 void *
2463 pmap_pool_malloc(struct pool *pp, int flags)
2464 {
2465 struct pvo_page *pvop;
2466 struct vm_page *pg;
2467
2468 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2469 if (pvop != NULL) {
2470 pmap_mpvop_free--;
2471 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2472 return pvop;
2473 }
2474 again:
2475 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2476 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2477 if (__predict_false(pg == NULL)) {
2478 if (flags & PR_WAITOK) {
2479 uvm_wait("plpg");
2480 goto again;
2481 } else {
2482 return (0);
2483 }
2484 }
2485 return (void *) VM_PAGE_TO_PHYS(pg);
2486 }
2487
2488 void
2489 pmap_pool_ufree(struct pool *pp, void *va)
2490 {
2491 struct pvo_page *pvop;
2492 #if 0
2493 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2494 pmap_pool_mfree(va, size, tag);
2495 return;
2496 }
2497 #endif
2498 pvop = va;
2499 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2500 pmap_upvop_free++;
2501 if (pmap_upvop_free > pmap_upvop_maxfree)
2502 pmap_upvop_maxfree = pmap_upvop_free;
2503 }
2504
2505 void
2506 pmap_pool_mfree(struct pool *pp, void *va)
2507 {
2508 struct pvo_page *pvop;
2509
2510 pvop = va;
2511 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2512 pmap_mpvop_free++;
2513 if (pmap_mpvop_free > pmap_mpvop_maxfree)
2514 pmap_mpvop_maxfree = pmap_mpvop_free;
2515 #if 0
2516 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2517 #endif
2518 }
2519
2520 /*
2521 * This routine in bootstraping to steal to-be-managed memory (which will
2522 * then be unmanaged). We use it to grab from the first 256MB for our
2523 * pmap needs and above 256MB for other stuff.
2524 */
2525 vaddr_t
2526 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2527 {
2528 vsize_t size;
2529 vaddr_t va;
2530 paddr_t pa = 0;
2531 int npgs, bank;
2532 struct vm_physseg *ps;
2533
2534 if (uvm.page_init_done == TRUE)
2535 panic("pmap_steal_memory: called _after_ bootstrap");
2536
2537 *vstartp = VM_MIN_KERNEL_ADDRESS;
2538 *vendp = VM_MAX_KERNEL_ADDRESS;
2539
2540 size = round_page(vsize);
2541 npgs = atop(size);
2542
2543 /*
2544 * PA 0 will never be among those given to UVM so we can use it
2545 * to indicate we couldn't steal any memory.
2546 */
2547 for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
2548 if (ps->free_list == VM_FREELIST_FIRST256 &&
2549 ps->avail_end - ps->avail_start >= npgs) {
2550 pa = ptoa(ps->avail_start);
2551 break;
2552 }
2553 }
2554
2555 if (pa == 0)
2556 panic("pmap_steal_memory: no approriate memory to steal!");
2557
2558 ps->avail_start += npgs;
2559 ps->start += npgs;
2560
2561 /*
2562 * If we've used up all the pages in the segment, remove it and
2563 * compact the list.
2564 */
2565 if (ps->avail_start == ps->end) {
2566 /*
2567 * If this was the last one, then a very bad thing has occurred
2568 */
2569 if (--vm_nphysseg == 0)
2570 panic("pmap_steal_memory: out of memory!");
2571
2572 printf("pmap_steal_memory: consumed bank %d\n", bank);
2573 for (; bank < vm_nphysseg; bank++, ps++) {
2574 ps[0] = ps[1];
2575 }
2576 }
2577
2578 va = (vaddr_t) pa;
2579 memset((caddr_t) va, 0, size);
2580 pmap_pages_stolen += npgs;
2581 #ifdef DEBUG
2582 if (pmapdebug && npgs > 1) {
2583 u_int cnt = 0;
2584 for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
2585 cnt += ps->avail_end - ps->avail_start;
2586 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2587 npgs, pmap_pages_stolen, cnt);
2588 }
2589 #endif
2590
2591 return va;
2592 }
2593
2594 /*
2595 * Find a chuck of memory with right size and alignment.
2596 */
2597 void *
2598 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2599 {
2600 struct mem_region *mp;
2601 paddr_t s, e;
2602 int i, j;
2603
2604 size = round_page(size);
2605
2606 DPRINTFN(BOOT,
2607 ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
2608 size, alignment, at_end));
2609
2610 if (alignment < NBPG || (alignment & (alignment-1)) != 0)
2611 panic("pmap_boot_find_memory: invalid alignment %lx",
2612 alignment);
2613
2614 if (at_end) {
2615 if (alignment != NBPG)
2616 panic("pmap_boot_find_memory: invalid ending "
2617 "alignment %lx", alignment);
2618
2619 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
2620 s = mp->start + mp->size - size;
2621 if (s >= mp->start && mp->size >= size) {
2622 DPRINTFN(BOOT,(": %lx\n", s));
2623 DPRINTFN(BOOT,
2624 ("pmap_boot_find_memory: b-avail[%d] start "
2625 "0x%lx size 0x%lx\n", mp - avail,
2626 mp->start, mp->size));
2627 mp->size -= size;
2628 DPRINTFN(BOOT,
2629 ("pmap_boot_find_memory: a-avail[%d] start "
2630 "0x%lx size 0x%lx\n", mp - avail,
2631 mp->start, mp->size));
2632 return (void *) s;
2633 }
2634 }
2635 panic("pmap_boot_find_memory: no available memory");
2636 }
2637
2638 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2639 s = (mp->start + alignment - 1) & ~(alignment-1);
2640 e = s + size;
2641
2642 /*
2643 * Is the calculated region entirely within the region?
2644 */
2645 if (s < mp->start || e > mp->start + mp->size)
2646 continue;
2647
2648 DPRINTFN(BOOT,(": %lx\n", s));
2649 if (s == mp->start) {
2650 /*
2651 * If the block starts at the beginning of region,
2652 * adjust the size & start. (the region may now be
2653 * zero in length)
2654 */
2655 DPRINTFN(BOOT,
2656 ("pmap_boot_find_memory: b-avail[%d] start "
2657 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2658 mp->start += size;
2659 mp->size -= size;
2660 DPRINTFN(BOOT,
2661 ("pmap_boot_find_memory: a-avail[%d] start "
2662 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2663 } else if (e == mp->start + mp->size) {
2664 /*
2665 * If the block starts at the beginning of region,
2666 * adjust only the size.
2667 */
2668 DPRINTFN(BOOT,
2669 ("pmap_boot_find_memory: b-avail[%d] start "
2670 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2671 mp->size -= size;
2672 DPRINTFN(BOOT,
2673 ("pmap_boot_find_memory: a-avail[%d] start "
2674 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2675 } else {
2676 /*
2677 * Block is in the middle of the region, so we
2678 * have to split it in two.
2679 */
2680 for (j = avail_cnt; j > i + 1; j--) {
2681 avail[j] = avail[j-1];
2682 }
2683 DPRINTFN(BOOT,
2684 ("pmap_boot_find_memory: b-avail[%d] start "
2685 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2686 mp[1].start = e;
2687 mp[1].size = mp[0].start + mp[0].size - e;
2688 mp[0].size = s - mp[0].start;
2689 avail_cnt++;
2690 for (; i < avail_cnt; i++) {
2691 DPRINTFN(BOOT,
2692 ("pmap_boot_find_memory: a-avail[%d] "
2693 "start 0x%lx size 0x%lx\n", i,
2694 avail[i].start, avail[i].size));
2695 }
2696 }
2697 return (void *) s;
2698 }
2699 panic("pmap_boot_find_memory: not enough memory for "
2700 "%lx/%lx allocation?", size, alignment);
2701 }
2702
2703 /*
2704 * This is not part of the defined PMAP interface and is specific to the
2705 * PowerPC architecture. This is called during initppc, before the system
2706 * is really initialized.
2707 */
2708 void
2709 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
2710 {
2711 struct mem_region *mp, tmp;
2712 paddr_t s, e;
2713 psize_t size;
2714 int i, j;
2715
2716 /*
2717 * Get memory.
2718 */
2719 mem_regions(&mem, &avail);
2720 #if defined(DEBUG)
2721 if (pmapdebug & PMAPDEBUG_BOOT) {
2722 printf("pmap_bootstrap: memory configuration:\n");
2723 for (mp = mem; mp->size; mp++) {
2724 printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n",
2725 mp->start, mp->size);
2726 }
2727 for (mp = avail; mp->size; mp++) {
2728 printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n",
2729 mp->start, mp->size);
2730 }
2731 }
2732 #endif
2733
2734 /*
2735 * Find out how much physical memory we have and in how many chunks.
2736 */
2737 for (mem_cnt = 0, mp = mem; mp->size; mp++) {
2738 if (mp->start >= pmap_memlimit)
2739 continue;
2740 if (mp->start + mp->size > pmap_memlimit) {
2741 size = pmap_memlimit - mp->start;
2742 physmem += btoc(size);
2743 } else {
2744 physmem += btoc(mp->size);
2745 }
2746 mem_cnt++;
2747 }
2748
2749 /*
2750 * Count the number of available entries.
2751 */
2752 for (avail_cnt = 0, mp = avail; mp->size; mp++)
2753 avail_cnt++;
2754
2755 /*
2756 * Page align all regions.
2757 */
2758 kernelstart = trunc_page(kernelstart);
2759 kernelend = round_page(kernelend);
2760 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2761 s = round_page(mp->start);
2762 mp->size -= (s - mp->start);
2763 mp->size = trunc_page(mp->size);
2764 mp->start = s;
2765 e = mp->start + mp->size;
2766
2767 DPRINTFN(BOOT,
2768 ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
2769 i, mp->start, mp->size));
2770
2771 /*
2772 * Don't allow the end to run beyond our artificial limit
2773 */
2774 if (e > pmap_memlimit)
2775 e = pmap_memlimit;
2776
2777 /*
2778 * Is this region empty or strange? skip it.
2779 */
2780 if (e <= s) {
2781 mp->start = 0;
2782 mp->size = 0;
2783 continue;
2784 }
2785
2786 /*
2787 * Does this overlap the beginning of kernel?
2788 * Does extend past the end of the kernel?
2789 */
2790 else if (s < kernelstart && e > kernelstart) {
2791 if (e > kernelend) {
2792 avail[avail_cnt].start = kernelend;
2793 avail[avail_cnt].size = e - kernelend;
2794 avail_cnt++;
2795 }
2796 mp->size = kernelstart - s;
2797 }
2798 /*
2799 * Check whether this region overlaps the end of the kernel.
2800 */
2801 else if (s < kernelend && e > kernelend) {
2802 mp->start = kernelend;
2803 mp->size = e - kernelend;
2804 }
2805 /*
2806 * Look whether this regions is completely inside the kernel.
2807 * Nuke it if it does.
2808 */
2809 else if (s >= kernelstart && e <= kernelend) {
2810 mp->start = 0;
2811 mp->size = 0;
2812 }
2813 /*
2814 * If the user imposed a memory limit, enforce it.
2815 */
2816 else if (s >= pmap_memlimit) {
2817 mp->start = -NBPG; /* let's know why */
2818 mp->size = 0;
2819 }
2820 else {
2821 mp->start = s;
2822 mp->size = e - s;
2823 }
2824 DPRINTFN(BOOT,
2825 ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n",
2826 i, mp->start, mp->size));
2827 }
2828
2829 /*
2830 * Move (and uncount) all the null return to the end.
2831 */
2832 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2833 if (mp->size == 0) {
2834 tmp = avail[i];
2835 avail[i] = avail[--avail_cnt];
2836 avail[avail_cnt] = avail[i];
2837 }
2838 }
2839
2840 /*
2841 * (Bubble)sort them into asecnding order.
2842 */
2843 for (i = 0; i < avail_cnt; i++) {
2844 for (j = i + 1; j < avail_cnt; j++) {
2845 if (avail[i].start > avail[j].start) {
2846 tmp = avail[i];
2847 avail[i] = avail[j];
2848 avail[j] = tmp;
2849 }
2850 }
2851 }
2852
2853 /*
2854 * Make sure they don't overlap.
2855 */
2856 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
2857 if (mp[0].start + mp[0].size > mp[1].start) {
2858 mp[0].size = mp[1].start - mp[0].start;
2859 }
2860 DPRINTFN(BOOT,
2861 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
2862 i, mp->start, mp->size));
2863 }
2864 DPRINTFN(BOOT,
2865 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
2866 i, mp->start, mp->size));
2867
2868 #ifdef PTEGCOUNT
2869 pmap_pteg_cnt = PTEGCOUNT;
2870 #else /* PTEGCOUNT */
2871 pmap_pteg_cnt = 0x1000;
2872
2873 while (pmap_pteg_cnt < physmem)
2874 pmap_pteg_cnt <<= 1;
2875
2876 pmap_pteg_cnt >>= 1;
2877 #endif /* PTEGCOUNT */
2878
2879 /*
2880 * Find suitably aligned memory for PTEG hash table.
2881 */
2882 size = pmap_pteg_cnt * sizeof(struct pteg);
2883 pmap_pteg_table = pmap_boot_find_memory(size, size, 0);
2884 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2885 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
2886 panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB",
2887 pmap_pteg_table, size);
2888 #endif
2889
2890 memset((void *)pmap_pteg_table, 0, pmap_pteg_cnt * sizeof(struct pteg));
2891 pmap_pteg_mask = pmap_pteg_cnt - 1;
2892
2893 /*
2894 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
2895 * with pages. So we just steal them before giving them to UVM.
2896 */
2897 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
2898 pmap_pvo_table = pmap_boot_find_memory(size, NBPG, 0);
2899 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2900 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
2901 panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB",
2902 pmap_pvo_table, size);
2903 #endif
2904
2905 for (i = 0; i < pmap_pteg_cnt; i++)
2906 TAILQ_INIT(&pmap_pvo_table[i]);
2907
2908 #ifndef MSGBUFADDR
2909 /*
2910 * Allocate msgbuf in high memory.
2911 */
2912 msgbuf_paddr = (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, NBPG, 1);
2913 #endif
2914
2915 #ifdef __HAVE_PMAP_PHYSSEG
2916 {
2917 u_int npgs = 0;
2918 for (i = 0, mp = avail; i < avail_cnt; i++, mp++)
2919 npgs += btoc(mp->size);
2920 size = (sizeof(struct pvo_head) + 1) * npgs;
2921 pmap_physseg.pvoh = pmap_boot_find_memory(size, NBPG, 0);
2922 pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs];
2923 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2924 if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH)
2925 panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB",
2926 pmap_physseg.pvoh, size);
2927 #endif
2928 }
2929 #endif
2930
2931 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
2932 paddr_t pfstart = atop(mp->start);
2933 paddr_t pfend = atop(mp->start + mp->size);
2934 if (mp->size == 0)
2935 continue;
2936 if (mp->start + mp->size <= SEGMENT_LENGTH) {
2937 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2938 VM_FREELIST_FIRST256);
2939 } else if (mp->start >= SEGMENT_LENGTH) {
2940 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2941 VM_FREELIST_DEFAULT);
2942 } else {
2943 pfend = atop(SEGMENT_LENGTH);
2944 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2945 VM_FREELIST_FIRST256);
2946 pfstart = atop(SEGMENT_LENGTH);
2947 pfend = atop(mp->start + mp->size);
2948 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2949 VM_FREELIST_DEFAULT);
2950 }
2951 }
2952
2953 /*
2954 * Make sure kernel vsid is allocated as well as VSID 0.
2955 */
2956 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
2957 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
2958 pmap_vsid_bitmap[0] |= 1;
2959
2960 /*
2961 * Initialize kernel pmap and hardware.
2962 */
2963 for (i = 0; i < 16; i++) {
2964 pmap_kernel()->pm_sr[i] = EMPTY_SEGMENT;
2965 __asm __volatile ("mtsrin %0,%1"
2966 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
2967 }
2968
2969 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
2970 __asm __volatile ("mtsr %0,%1"
2971 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
2972 #ifdef KERNEL2_SR
2973 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
2974 __asm __volatile ("mtsr %0,%1"
2975 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
2976 #endif
2977 for (i = 0; i < 16; i++) {
2978 if (iosrtable[i] & SR601_T) {
2979 pmap_kernel()->pm_sr[i] = iosrtable[i];
2980 __asm __volatile ("mtsrin %0,%1"
2981 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
2982 }
2983 }
2984
2985 __asm __volatile ("sync; mtsdr1 %0; isync"
2986 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
2987 tlbia();
2988
2989 #ifdef ALTIVEC
2990 pmap_use_altivec = cpu_altivec;
2991 #endif
2992
2993 #ifdef DEBUG
2994 if (pmapdebug & PMAPDEBUG_BOOT) {
2995 u_int cnt;
2996 int bank;
2997 char pbuf[9];
2998 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
2999 cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
3000 printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n",
3001 bank,
3002 ptoa(vm_physmem[bank].avail_start),
3003 ptoa(vm_physmem[bank].avail_end),
3004 ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
3005 }
3006 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3007 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3008 pbuf, cnt);
3009 }
3010 #endif
3011
3012 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3013 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3014 &pmap_pool_uallocator);
3015
3016 pool_setlowat(&pmap_upvo_pool, 252);
3017
3018 pool_init(&pmap_pool, sizeof(struct pmap),
3019 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator);
3020 }
3021