pmap.c revision 1.6 1 /* $NetBSD: pmap.c,v 1.6 2003/04/02 02:47:19 thorpej Exp $ */
2 /*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 #include "opt_altivec.h"
70 #include "opt_pmap.h"
71 #include <sys/param.h>
72 #include <sys/malloc.h>
73 #include <sys/proc.h>
74 #include <sys/user.h>
75 #include <sys/pool.h>
76 #include <sys/queue.h>
77 #include <sys/device.h> /* for evcnt */
78 #include <sys/systm.h>
79
80 #if __NetBSD_Version__ < 105010000
81 #include <vm/vm.h>
82 #include <vm/vm_kern.h>
83 #define splvm() splimp()
84 #endif
85
86 #include <uvm/uvm.h>
87
88 #include <machine/pcb.h>
89 #include <machine/powerpc.h>
90 #include <powerpc/spr.h>
91 #include <powerpc/oea/sr_601.h>
92 #if __NetBSD_Version__ > 105010000
93 #include <powerpc/oea/bat.h>
94 #else
95 #include <powerpc/bat.h>
96 #endif
97
98 #if defined(DEBUG) || defined(PMAPCHECK)
99 #define STATIC
100 #else
101 #define STATIC static
102 #endif
103
104 #ifdef ALTIVEC
105 int pmap_use_altivec;
106 #endif
107
108 volatile struct pteg *pmap_pteg_table;
109 unsigned int pmap_pteg_cnt;
110 unsigned int pmap_pteg_mask;
111 paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
112
113 struct pmap kernel_pmap_;
114 unsigned int pmap_pages_stolen;
115 u_long pmap_pte_valid;
116 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
117 u_long pmap_pvo_enter_depth;
118 u_long pmap_pvo_remove_depth;
119 #endif
120
121 int physmem;
122 #ifndef MSGBUFADDR
123 extern paddr_t msgbuf_paddr;
124 #endif
125
126 static struct mem_region *mem, *avail;
127 static u_int mem_cnt, avail_cnt;
128
129 #ifdef __HAVE_PMAP_PHYSSEG
130 /*
131 * This is a cache of referenced/modified bits.
132 * Bits herein are shifted by ATTRSHFT.
133 */
134 #define ATTR_SHFT 4
135 struct pmap_physseg pmap_physseg;
136 #endif
137
138 /*
139 * The following structure is exactly 32 bytes long (one cacheline).
140 */
141 struct pvo_entry {
142 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
143 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
144 struct pte pvo_pte; /* Prebuilt PTE */
145 pmap_t pvo_pmap; /* ptr to owning pmap */
146 vaddr_t pvo_vaddr; /* VA of entry */
147 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
148 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
149 #define PVO_WIRED 0x0010 /* PVO entry is wired */
150 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
151 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
152 };
153 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
154 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
155 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
156 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
157 #define PVO_PTEGIDX_CLR(pvo) \
158 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
159 #define PVO_PTEGIDX_SET(pvo,i) \
160 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
161
162 TAILQ_HEAD(pvo_tqhead, pvo_entry);
163 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
164 struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
165 struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
166
167 struct pool pmap_pool; /* pool for pmap structures */
168 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
169 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
170
171 /*
172 * We keep a cache of unmanaged pages to be used for pvo entries for
173 * unmanaged pages.
174 */
175 struct pvo_page {
176 SIMPLEQ_ENTRY(pvo_page) pvop_link;
177 };
178 SIMPLEQ_HEAD(pvop_head, pvo_page);
179 struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
180 struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
181 u_long pmap_upvop_free;
182 u_long pmap_upvop_maxfree;
183 u_long pmap_mpvop_free;
184 u_long pmap_mpvop_maxfree;
185
186 STATIC void *pmap_pool_ualloc(struct pool *, int);
187 STATIC void *pmap_pool_malloc(struct pool *, int);
188
189 STATIC void pmap_pool_ufree(struct pool *, void *);
190 STATIC void pmap_pool_mfree(struct pool *, void *);
191
192 static struct pool_allocator pmap_pool_mallocator = {
193 pmap_pool_malloc, pmap_pool_mfree, 0,
194 };
195
196 static struct pool_allocator pmap_pool_uallocator = {
197 pmap_pool_ualloc, pmap_pool_ufree, 0,
198 };
199
200 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
201 void pmap_pte_print(volatile struct pte *);
202 #endif
203
204 #ifdef DDB
205 void pmap_pteg_check(void);
206 void pmap_pteg_dist(void);
207 void pmap_print_pte(pmap_t, vaddr_t);
208 void pmap_print_mmuregs(void);
209 #endif
210
211 #if defined(DEBUG) || defined(PMAPCHECK)
212 #ifdef PMAPCHECK
213 int pmapcheck = 1;
214 #else
215 int pmapcheck = 0;
216 #endif
217 void pmap_pvo_verify(void);
218 STATIC void pmap_pvo_check(const struct pvo_entry *);
219 #define PMAP_PVO_CHECK(pvo) \
220 do { \
221 if (pmapcheck) \
222 pmap_pvo_check(pvo); \
223 } while (0)
224 #else
225 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
226 #endif
227 STATIC int pmap_pte_insert(int, struct pte *);
228 STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
229 vaddr_t, paddr_t, register_t, int);
230 STATIC void pmap_pvo_remove(struct pvo_entry *, int);
231 STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
232 STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
233
234 STATIC void tlbia(void);
235
236 STATIC void pmap_release(pmap_t);
237 STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
238
239 #define VSID_NBPW (sizeof(uint32_t) * 8)
240 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
241
242 static int pmap_initialized;
243
244 #if defined(DEBUG) || defined(PMAPDEBUG)
245 #define PMAPDEBUG_BOOT 0x0001
246 #define PMAPDEBUG_PTE 0x0002
247 #define PMAPDEBUG_EXEC 0x0008
248 #define PMAPDEBUG_PVOENTER 0x0010
249 #define PMAPDEBUG_PVOREMOVE 0x0020
250 #define PMAPDEBUG_ACTIVATE 0x0100
251 #define PMAPDEBUG_CREATE 0x0200
252 #define PMAPDEBUG_ENTER 0x1000
253 #define PMAPDEBUG_KENTER 0x2000
254 #define PMAPDEBUG_KREMOVE 0x4000
255 #define PMAPDEBUG_REMOVE 0x8000
256 unsigned int pmapdebug = 0;
257 # define DPRINTF(x) printf x
258 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
259 #else
260 # define DPRINTF(x)
261 # define DPRINTFN(n, x)
262 #endif
263
264
265 #ifdef PMAPCOUNTERS
266 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
267 #define PMAPCOUNT2(ev) ((ev).ev_count++)
268
269 struct evcnt pmap_evcnt_mappings =
270 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
271 "pmap", "pages mapped");
272 struct evcnt pmap_evcnt_unmappings =
273 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
274 "pmap", "pages unmapped");
275
276 struct evcnt pmap_evcnt_kernel_mappings =
277 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
278 "pmap", "kernel pages mapped");
279 struct evcnt pmap_evcnt_kernel_unmappings =
280 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
281 "pmap", "kernel pages unmapped");
282
283 struct evcnt pmap_evcnt_mappings_replaced =
284 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
285 "pmap", "page mappings replaced");
286
287 struct evcnt pmap_evcnt_exec_mappings =
288 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
289 "pmap", "exec pages mapped");
290 struct evcnt pmap_evcnt_exec_cached =
291 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
292 "pmap", "exec pages cached");
293
294 struct evcnt pmap_evcnt_exec_synced =
295 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
296 "pmap", "exec pages synced");
297 struct evcnt pmap_evcnt_exec_synced_clear_modify =
298 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
299 "pmap", "exec pages synced (CM)");
300
301 struct evcnt pmap_evcnt_exec_uncached_page_protect =
302 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
303 "pmap", "exec pages uncached (PP)");
304 struct evcnt pmap_evcnt_exec_uncached_clear_modify =
305 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
306 "pmap", "exec pages uncached (CM)");
307 struct evcnt pmap_evcnt_exec_uncached_zero_page =
308 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
309 "pmap", "exec pages uncached (ZP)");
310 struct evcnt pmap_evcnt_exec_uncached_copy_page =
311 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
312 "pmap", "exec pages uncached (CP)");
313
314 struct evcnt pmap_evcnt_updates =
315 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
316 "pmap", "updates");
317 struct evcnt pmap_evcnt_collects =
318 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
319 "pmap", "collects");
320 struct evcnt pmap_evcnt_copies =
321 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
322 "pmap", "copies");
323
324 struct evcnt pmap_evcnt_ptes_spilled =
325 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
326 "pmap", "ptes spilled from overflow");
327 struct evcnt pmap_evcnt_ptes_unspilled =
328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
329 "pmap", "ptes not spilled");
330 struct evcnt pmap_evcnt_ptes_evicted =
331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
332 "pmap", "ptes evicted");
333
334 struct evcnt pmap_evcnt_ptes_primary[8] = {
335 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
336 "pmap", "ptes added at primary[0]"),
337 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
338 "pmap", "ptes added at primary[1]"),
339 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
340 "pmap", "ptes added at primary[2]"),
341 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
342 "pmap", "ptes added at primary[3]"),
343
344 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
345 "pmap", "ptes added at primary[4]"),
346 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
347 "pmap", "ptes added at primary[5]"),
348 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
349 "pmap", "ptes added at primary[6]"),
350 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
351 "pmap", "ptes added at primary[7]"),
352 };
353 struct evcnt pmap_evcnt_ptes_secondary[8] = {
354 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
355 "pmap", "ptes added at secondary[0]"),
356 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
357 "pmap", "ptes added at secondary[1]"),
358 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
359 "pmap", "ptes added at secondary[2]"),
360 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
361 "pmap", "ptes added at secondary[3]"),
362
363 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
364 "pmap", "ptes added at secondary[4]"),
365 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
366 "pmap", "ptes added at secondary[5]"),
367 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
368 "pmap", "ptes added at secondary[6]"),
369 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
370 "pmap", "ptes added at secondary[7]"),
371 };
372 struct evcnt pmap_evcnt_ptes_removed =
373 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
374 "pmap", "ptes removed");
375 struct evcnt pmap_evcnt_ptes_changed =
376 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
377 "pmap", "ptes changed");
378
379 /*
380 * From pmap_subr.c
381 */
382 extern struct evcnt pmap_evcnt_zeroed_pages;
383 extern struct evcnt pmap_evcnt_copied_pages;
384 extern struct evcnt pmap_evcnt_idlezeroed_pages;
385 #else
386 #define PMAPCOUNT(ev) ((void) 0)
387 #define PMAPCOUNT2(ev) ((void) 0)
388 #endif
389
390 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va))
391 #define TLBSYNC() __asm __volatile("tlbsync")
392 #define SYNC() __asm __volatile("sync")
393 #define EIEIO() __asm __volatile("eieio")
394 #define MFMSR() mfmsr()
395 #define MTMSR(psl) mtmsr(psl)
396 #define MFPVR() mfpvr()
397 #define MFSRIN(va) mfsrin(va)
398 #define MFTB() mfrtcltbl()
399
400 static __inline register_t
401 mfsrin(vaddr_t va)
402 {
403 register_t sr;
404 __asm __volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
405 return sr;
406 }
407
408 static __inline register_t
409 pmap_interrupts_off(void)
410 {
411 register_t msr = MFMSR();
412 if (msr & PSL_EE)
413 MTMSR(msr & ~PSL_EE);
414 return msr;
415 }
416
417 static void
418 pmap_interrupts_restore(register_t msr)
419 {
420 if (msr & PSL_EE)
421 MTMSR(msr);
422 }
423
424 static __inline u_int32_t
425 mfrtcltbl(void)
426 {
427
428 if ((MFPVR() >> 16) == MPC601)
429 return (mfrtcl() >> 7);
430 else
431 return (mftbl());
432 }
433
434 /*
435 * These small routines may have to be replaced,
436 * if/when we support processors other that the 604.
437 */
438
439 void
440 tlbia(void)
441 {
442 caddr_t i;
443
444 SYNC();
445 /*
446 * Why not use "tlbia"? Because not all processors implement it.
447 *
448 * This needs to be a per-cpu callback to do the appropriate thing
449 * for the CPU. XXX
450 */
451 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
452 TLBIE(i);
453 EIEIO();
454 SYNC();
455 }
456 TLBSYNC();
457 SYNC();
458 }
459
460 static __inline register_t
461 va_to_vsid(const struct pmap *pm, vaddr_t addr)
462 {
463 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID);
464 }
465
466 static __inline register_t
467 va_to_pteg(const struct pmap *pm, vaddr_t addr)
468 {
469 register_t hash;
470
471 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
472 return hash & pmap_pteg_mask;
473 }
474
475 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
476 /*
477 * Given a PTE in the page table, calculate the VADDR that hashes to it.
478 * The only bit of magic is that the top 4 bits of the address doesn't
479 * technically exist in the PTE. But we know we reserved 4 bits of the
480 * VSID for it so that's how we get it.
481 */
482 static vaddr_t
483 pmap_pte_to_va(volatile const struct pte *pt)
484 {
485 vaddr_t va;
486 uintptr_t ptaddr = (uintptr_t) pt;
487
488 if (pt->pte_hi & PTE_HID)
489 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
490
491 /* PPC Bits 10-19 */
492 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
493 va <<= ADDR_PIDX_SHFT;
494
495 /* PPC Bits 4-9 */
496 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
497
498 /* PPC Bits 0-3 */
499 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
500
501 return va;
502 }
503 #endif
504
505 static __inline struct pvo_head *
506 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
507 {
508 #ifdef __HAVE_VM_PAGE_MD
509 struct vm_page *pg;
510
511 pg = PHYS_TO_VM_PAGE(pa);
512 if (pg_p != NULL)
513 *pg_p = pg;
514 if (pg == NULL)
515 return &pmap_pvo_unmanaged;
516 return &pg->mdpage.mdpg_pvoh;
517 #endif
518 #ifdef __HAVE_PMAP_PHYSSEG
519 int bank, pg;
520
521 bank = vm_physseg_find(atop(pa), &pg);
522 if (pg_p != NULL)
523 *pg_p = pg;
524 if (bank == -1)
525 return &pmap_pvo_unmanaged;
526 return &vm_physmem[bank].pmseg.pvoh[pg];
527 #endif
528 }
529
530 static __inline struct pvo_head *
531 vm_page_to_pvoh(struct vm_page *pg)
532 {
533 #ifdef __HAVE_VM_PAGE_MD
534 return &pg->mdpage.mdpg_pvoh;
535 #endif
536 #ifdef __HAVE_PMAP_PHYSSEG
537 return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
538 #endif
539 }
540
541
542 #ifdef __HAVE_PMAP_PHYSSEG
543 static __inline char *
544 pa_to_attr(paddr_t pa)
545 {
546 int bank, pg;
547
548 bank = vm_physseg_find(atop(pa), &pg);
549 if (bank == -1)
550 return NULL;
551 return &vm_physmem[bank].pmseg.attrs[pg];
552 }
553 #endif
554
555 static __inline void
556 pmap_attr_clear(struct vm_page *pg, int ptebit)
557 {
558 #ifdef __HAVE_PMAP_PHYSSEG
559 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT);
560 #endif
561 #ifdef __HAVE_VM_PAGE_MD
562 pg->mdpage.mdpg_attrs &= ~ptebit;
563 #endif
564 }
565
566 static __inline int
567 pmap_attr_fetch(struct vm_page *pg)
568 {
569 #ifdef __HAVE_PMAP_PHYSSEG
570 return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT;
571 #endif
572 #ifdef __HAVE_VM_PAGE_MD
573 return pg->mdpage.mdpg_attrs;
574 #endif
575 }
576
577 static __inline void
578 pmap_attr_save(struct vm_page *pg, int ptebit)
579 {
580 #ifdef __HAVE_PMAP_PHYSSEG
581 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT);
582 #endif
583 #ifdef __HAVE_VM_PAGE_MD
584 pg->mdpage.mdpg_attrs |= ptebit;
585 #endif
586 }
587
588 static __inline int
589 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
590 {
591 if (pt->pte_hi == pvo_pt->pte_hi
592 #if 0
593 && ((pt->pte_lo ^ pvo_pt->pte_lo) &
594 ~(PTE_REF|PTE_CHG)) == 0
595 #endif
596 )
597 return 1;
598 return 0;
599 }
600
601 static __inline void
602 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
603 {
604 /*
605 * Construct the PTE. Default to IMB initially. Valid bit
606 * only gets set when the real pte is set in memory.
607 *
608 * Note: Don't set the valid bit for correct operation of tlb update.
609 */
610 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
611 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
612 pt->pte_lo = pte_lo;
613 }
614
615 static __inline void
616 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
617 {
618 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
619 }
620
621 static __inline void
622 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
623 {
624 /*
625 * As shown in Section 7.6.3.2.3
626 */
627 pt->pte_lo &= ~ptebit;
628 TLBIE(va);
629 SYNC();
630 EIEIO();
631 TLBSYNC();
632 SYNC();
633 }
634
635 static __inline void
636 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
637 {
638 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
639 if (pvo_pt->pte_hi & PTE_VALID)
640 panic("pte_set: setting an already valid pte %p", pvo_pt);
641 #endif
642 pvo_pt->pte_hi |= PTE_VALID;
643 /*
644 * Update the PTE as defined in section 7.6.3.1
645 * Note that the REF/CHG bits are from pvo_pt and thus should
646 * have been saved so this routine can restore them (if desired).
647 */
648 pt->pte_lo = pvo_pt->pte_lo;
649 EIEIO();
650 pt->pte_hi = pvo_pt->pte_hi;
651 SYNC();
652 pmap_pte_valid++;
653 }
654
655 static __inline void
656 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
657 {
658 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
659 if ((pvo_pt->pte_hi & PTE_VALID) == 0)
660 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
661 if ((pt->pte_hi & PTE_VALID) == 0)
662 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
663 #endif
664
665 pvo_pt->pte_hi &= ~PTE_VALID;
666 /*
667 * Force the ref & chg bits back into the PTEs.
668 */
669 SYNC();
670 /*
671 * Invalidate the pte ... (Section 7.6.3.3)
672 */
673 pt->pte_hi &= ~PTE_VALID;
674 SYNC();
675 TLBIE(va);
676 SYNC();
677 EIEIO();
678 TLBSYNC();
679 SYNC();
680 /*
681 * Save the ref & chg bits ...
682 */
683 pmap_pte_synch(pt, pvo_pt);
684 pmap_pte_valid--;
685 }
686
687 static __inline void
688 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
689 {
690 /*
691 * Invalidate the PTE
692 */
693 pmap_pte_unset(pt, pvo_pt, va);
694 pmap_pte_set(pt, pvo_pt);
695 }
696
697 /*
698 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
699 * (either primary or secondary location).
700 *
701 * Note: both the destination and source PTEs must not have PTE_VALID set.
702 */
703
704 STATIC int
705 pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
706 {
707 volatile struct pte *pt;
708 int i;
709
710 #if defined(DEBUG)
711 DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%lx 0x%lx\n",
712 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
713 #endif
714 /*
715 * First try primary hash.
716 */
717 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
718 if ((pt->pte_hi & PTE_VALID) == 0) {
719 pvo_pt->pte_hi &= ~PTE_HID;
720 pmap_pte_set(pt, pvo_pt);
721 return i;
722 }
723 }
724
725 /*
726 * Now try secondary hash.
727 */
728 ptegidx ^= pmap_pteg_mask;
729 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
730 if ((pt->pte_hi & PTE_VALID) == 0) {
731 pvo_pt->pte_hi |= PTE_HID;
732 pmap_pte_set(pt, pvo_pt);
733 return i;
734 }
735 }
736 return -1;
737 }
738
739 /*
740 * Spill handler.
741 *
742 * Tries to spill a page table entry from the overflow area.
743 * This runs in either real mode (if dealing with a exception spill)
744 * or virtual mode when dealing with manually spilling one of the
745 * kernel's pte entries. In either case, interrupts are already
746 * disabled.
747 */
748 int
749 pmap_pte_spill(struct pmap *pm, vaddr_t addr)
750 {
751 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
752 struct pvo_entry *pvo;
753 struct pvo_tqhead *pvoh, *vpvoh;
754 int ptegidx, i, j;
755 volatile struct pteg *pteg;
756 volatile struct pte *pt;
757
758 ptegidx = va_to_pteg(pm, addr);
759
760 /*
761 * Have to substitute some entry. Use the primary hash for this.
762 *
763 * Use low bits of timebase as random generator
764 */
765 pteg = &pmap_pteg_table[ptegidx];
766 i = MFTB() & 7;
767 pt = &pteg->pt[i];
768
769 source_pvo = NULL;
770 victim_pvo = NULL;
771 pvoh = &pmap_pvo_table[ptegidx];
772 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
773
774 /*
775 * We need to find pvo entry for this address...
776 */
777 PMAP_PVO_CHECK(pvo); /* sanity check */
778
779 /*
780 * If we haven't found the source and we come to a PVO with
781 * a valid PTE, then we know we can't find it because all
782 * evicted PVOs always are first in the list.
783 */
784 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
785 break;
786 if (source_pvo == NULL && pm == pvo->pvo_pmap &&
787 addr == PVO_VADDR(pvo)) {
788
789 /*
790 * Now we have found the entry to be spilled into the
791 * pteg. Attempt to insert it into the page table.
792 */
793 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
794 if (j >= 0) {
795 PVO_PTEGIDX_SET(pvo, j);
796 PMAP_PVO_CHECK(pvo); /* sanity check */
797 pvo->pvo_pmap->pm_evictions--;
798 PMAPCOUNT(ptes_spilled);
799 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
800 ? pmap_evcnt_ptes_secondary
801 : pmap_evcnt_ptes_primary)[j]);
802
803 /*
804 * Since we keep the evicted entries at the
805 * from of the PVO list, we need move this
806 * (now resident) PVO after the evicted
807 * entries.
808 */
809 next_pvo = TAILQ_NEXT(pvo, pvo_olink);
810
811 /*
812 * If we don't have to move (either we were the
813 * last entry or the next entry was valid),
814 * don't change our position. Otherwise
815 * move ourselves to the tail of the queue.
816 */
817 if (next_pvo != NULL &&
818 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
819 TAILQ_REMOVE(pvoh, pvo, pvo_olink);
820 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
821 }
822 return 1;
823 }
824 source_pvo = pvo;
825 if (victim_pvo != NULL)
826 break;
827 }
828
829 /*
830 * We also need the pvo entry of the victim we are replacing
831 * so save the R & C bits of the PTE.
832 */
833 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
834 pmap_pte_compare(pt, &pvo->pvo_pte)) {
835 vpvoh = pvoh;
836 victim_pvo = pvo;
837 if (source_pvo != NULL)
838 break;
839 }
840 }
841
842 if (source_pvo == NULL) {
843 PMAPCOUNT(ptes_unspilled);
844 return 0;
845 }
846
847 if (victim_pvo == NULL) {
848 if ((pt->pte_hi & PTE_HID) == 0)
849 panic("pmap_pte_spill: victim p-pte (%p) has "
850 "no pvo entry!", pt);
851
852 /*
853 * If this is a secondary PTE, we need to search
854 * its primary pvo bucket for the matching PVO.
855 */
856 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask];
857 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
858 PMAP_PVO_CHECK(pvo); /* sanity check */
859
860 /*
861 * We also need the pvo entry of the victim we are
862 * replacing so save the R & C bits of the PTE.
863 */
864 if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
865 victim_pvo = pvo;
866 break;
867 }
868 }
869 if (victim_pvo == NULL)
870 panic("pmap_pte_spill: victim s-pte (%p) has "
871 "no pvo entry!", pt);
872 }
873
874 /*
875 * We are invalidating the TLB entry for the EA for the
876 * we are replacing even though its valid; If we don't
877 * we lose any ref/chg bit changes contained in the TLB
878 * entry.
879 */
880 source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
881
882 /*
883 * To enforce the PVO list ordering constraint that all
884 * evicted entries should come before all valid entries,
885 * move the source PVO to the tail of its list and the
886 * victim PVO to the head of its list (which might not be
887 * the same list, if the victim was using the secondary hash).
888 */
889 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
890 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
891 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
892 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
893 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
894 pmap_pte_set(pt, &source_pvo->pvo_pte);
895 victim_pvo->pvo_pmap->pm_evictions++;
896 source_pvo->pvo_pmap->pm_evictions--;
897
898 PVO_PTEGIDX_CLR(victim_pvo);
899 PVO_PTEGIDX_SET(source_pvo, i);
900 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
901 PMAPCOUNT(ptes_spilled);
902 PMAPCOUNT(ptes_evicted);
903 PMAPCOUNT(ptes_removed);
904
905 PMAP_PVO_CHECK(victim_pvo);
906 PMAP_PVO_CHECK(source_pvo);
907 return 1;
908 }
909
910 /*
911 * Restrict given range to physical memory
912 */
913 void
914 pmap_real_memory(paddr_t *start, psize_t *size)
915 {
916 struct mem_region *mp;
917
918 for (mp = mem; mp->size; mp++) {
919 if (*start + *size > mp->start
920 && *start < mp->start + mp->size) {
921 if (*start < mp->start) {
922 *size -= mp->start - *start;
923 *start = mp->start;
924 }
925 if (*start + *size > mp->start + mp->size)
926 *size = mp->start + mp->size - *start;
927 return;
928 }
929 }
930 *size = 0;
931 }
932
933 /*
934 * Initialize anything else for pmap handling.
935 * Called during vm_init().
936 */
937 void
938 pmap_init(void)
939 {
940 int s;
941 #ifdef __HAVE_PMAP_PHYSSEG
942 struct pvo_tqhead *pvoh;
943 int bank;
944 long sz;
945 char *attr;
946
947 s = splvm();
948 pvoh = pmap_physseg.pvoh;
949 attr = pmap_physseg.attrs;
950 for (bank = 0; bank < vm_nphysseg; bank++) {
951 sz = vm_physmem[bank].end - vm_physmem[bank].start;
952 vm_physmem[bank].pmseg.pvoh = pvoh;
953 vm_physmem[bank].pmseg.attrs = attr;
954 for (; sz > 0; sz--, pvoh++, attr++) {
955 TAILQ_INIT(pvoh);
956 *attr = 0;
957 }
958 }
959 splx(s);
960 #endif
961
962 s = splvm();
963 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
964 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
965 &pmap_pool_mallocator);
966
967 pool_setlowat(&pmap_mpvo_pool, 1008);
968
969 pmap_initialized = 1;
970 splx(s);
971
972 #ifdef PMAPCOUNTERS
973 evcnt_attach_static(&pmap_evcnt_mappings);
974 evcnt_attach_static(&pmap_evcnt_mappings_replaced);
975 evcnt_attach_static(&pmap_evcnt_unmappings);
976
977 evcnt_attach_static(&pmap_evcnt_kernel_mappings);
978 evcnt_attach_static(&pmap_evcnt_kernel_unmappings);
979
980 evcnt_attach_static(&pmap_evcnt_exec_mappings);
981 evcnt_attach_static(&pmap_evcnt_exec_cached);
982 evcnt_attach_static(&pmap_evcnt_exec_synced);
983 evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify);
984
985 evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect);
986 evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify);
987 evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page);
988 evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page);
989
990 evcnt_attach_static(&pmap_evcnt_zeroed_pages);
991 evcnt_attach_static(&pmap_evcnt_copied_pages);
992 evcnt_attach_static(&pmap_evcnt_idlezeroed_pages);
993
994 evcnt_attach_static(&pmap_evcnt_updates);
995 evcnt_attach_static(&pmap_evcnt_collects);
996 evcnt_attach_static(&pmap_evcnt_copies);
997
998 evcnt_attach_static(&pmap_evcnt_ptes_spilled);
999 evcnt_attach_static(&pmap_evcnt_ptes_unspilled);
1000 evcnt_attach_static(&pmap_evcnt_ptes_evicted);
1001 evcnt_attach_static(&pmap_evcnt_ptes_removed);
1002 evcnt_attach_static(&pmap_evcnt_ptes_changed);
1003 evcnt_attach_static(&pmap_evcnt_ptes_primary[0]);
1004 evcnt_attach_static(&pmap_evcnt_ptes_primary[1]);
1005 evcnt_attach_static(&pmap_evcnt_ptes_primary[2]);
1006 evcnt_attach_static(&pmap_evcnt_ptes_primary[3]);
1007 evcnt_attach_static(&pmap_evcnt_ptes_primary[4]);
1008 evcnt_attach_static(&pmap_evcnt_ptes_primary[5]);
1009 evcnt_attach_static(&pmap_evcnt_ptes_primary[6]);
1010 evcnt_attach_static(&pmap_evcnt_ptes_primary[7]);
1011 evcnt_attach_static(&pmap_evcnt_ptes_secondary[0]);
1012 evcnt_attach_static(&pmap_evcnt_ptes_secondary[1]);
1013 evcnt_attach_static(&pmap_evcnt_ptes_secondary[2]);
1014 evcnt_attach_static(&pmap_evcnt_ptes_secondary[3]);
1015 evcnt_attach_static(&pmap_evcnt_ptes_secondary[4]);
1016 evcnt_attach_static(&pmap_evcnt_ptes_secondary[5]);
1017 evcnt_attach_static(&pmap_evcnt_ptes_secondary[6]);
1018 evcnt_attach_static(&pmap_evcnt_ptes_secondary[7]);
1019 #endif
1020 }
1021
1022 /*
1023 * How much virtual space does the kernel get?
1024 */
1025 void
1026 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1027 {
1028 /*
1029 * For now, reserve one segment (minus some overhead) for kernel
1030 * virtual memory
1031 */
1032 *start = VM_MIN_KERNEL_ADDRESS;
1033 *end = VM_MAX_KERNEL_ADDRESS;
1034 }
1035
1036 /*
1037 * Allocate, initialize, and return a new physical map.
1038 */
1039 pmap_t
1040 pmap_create(void)
1041 {
1042 pmap_t pm;
1043
1044 pm = pool_get(&pmap_pool, PR_WAITOK);
1045 memset((caddr_t)pm, 0, sizeof *pm);
1046 pmap_pinit(pm);
1047
1048 DPRINTFN(CREATE,("pmap_create: pm %p:\n"
1049 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n"
1050 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n", pm,
1051 pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3],
1052 pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7],
1053 pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11],
1054 pm->pm_sr[12], pm->pm_sr[13], pm->pm_sr[14], pm->pm_sr[15]));
1055 return pm;
1056 }
1057
1058 /*
1059 * Initialize a preallocated and zeroed pmap structure.
1060 */
1061 void
1062 pmap_pinit(pmap_t pm)
1063 {
1064 register_t entropy = MFTB();
1065 register_t mask;
1066 int i;
1067
1068 /*
1069 * Allocate some segment registers for this pmap.
1070 */
1071 pm->pm_refs = 1;
1072 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1073 static register_t pmap_vsidcontext;
1074 register_t hash;
1075 unsigned int n;
1076
1077 /* Create a new value by multiplying by a prime adding in
1078 * entropy from the timebase register. This is to make the
1079 * VSID more random so that the PT Hash function collides
1080 * less often. (note that the prime causes gcc to do shifts
1081 * instead of a multiply)
1082 */
1083 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1084 hash = pmap_vsidcontext & (NPMAPS - 1);
1085 if (hash == 0) /* 0 is special, avoid it */
1086 continue;
1087 n = hash >> 5;
1088 mask = 1L << (hash & (VSID_NBPW-1));
1089 hash = pmap_vsidcontext;
1090 if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1091 /* anything free in this bucket? */
1092 if (~pmap_vsid_bitmap[n] == 0) {
1093 entropy = hash >> PTE_VSID_SHFT;
1094 continue;
1095 }
1096 i = ffs(~pmap_vsid_bitmap[n]) - 1;
1097 mask = 1L << i;
1098 hash &= ~(VSID_NBPW-1);
1099 hash |= i;
1100 }
1101 /*
1102 * Make sure clear out SR_KEY_LEN bits because we put our
1103 * our data in those bits (to identify the segment).
1104 */
1105 hash &= PTE_VSID >> (PTE_VSID_SHFT + SR_KEY_LEN);
1106 pmap_vsid_bitmap[n] |= mask;
1107 for (i = 0; i < 16; i++)
1108 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY;
1109 return;
1110 }
1111 panic("pmap_pinit: out of segments");
1112 }
1113
1114 /*
1115 * Add a reference to the given pmap.
1116 */
1117 void
1118 pmap_reference(pmap_t pm)
1119 {
1120 pm->pm_refs++;
1121 }
1122
1123 /*
1124 * Retire the given pmap from service.
1125 * Should only be called if the map contains no valid mappings.
1126 */
1127 void
1128 pmap_destroy(pmap_t pm)
1129 {
1130 if (--pm->pm_refs == 0) {
1131 pmap_release(pm);
1132 pool_put(&pmap_pool, pm);
1133 }
1134 }
1135
1136 /*
1137 * Release any resources held by the given physical map.
1138 * Called when a pmap initialized by pmap_pinit is being released.
1139 */
1140 void
1141 pmap_release(pmap_t pm)
1142 {
1143 int idx, mask;
1144
1145 if (pm->pm_sr[0] == 0)
1146 panic("pmap_release");
1147 idx = VSID_TO_HASH(pm->pm_sr[0]) & (NPMAPS-1);
1148 mask = 1 << (idx % VSID_NBPW);
1149 idx /= VSID_NBPW;
1150 pmap_vsid_bitmap[idx] &= ~mask;
1151 }
1152
1153 /*
1154 * Copy the range specified by src_addr/len
1155 * from the source map to the range dst_addr/len
1156 * in the destination map.
1157 *
1158 * This routine is only advisory and need not do anything.
1159 */
1160 void
1161 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1162 vsize_t len, vaddr_t src_addr)
1163 {
1164 PMAPCOUNT(copies);
1165 }
1166
1167 /*
1168 * Require that all active physical maps contain no
1169 * incorrect entries NOW.
1170 */
1171 void
1172 pmap_update(struct pmap *pmap)
1173 {
1174 PMAPCOUNT(updates);
1175 TLBSYNC();
1176 }
1177
1178 /*
1179 * Garbage collects the physical map system for
1180 * pages which are no longer used.
1181 * Success need not be guaranteed -- that is, there
1182 * may well be pages which are not referenced, but
1183 * others may be collected.
1184 * Called by the pageout daemon when pages are scarce.
1185 */
1186 void
1187 pmap_collect(pmap_t pm)
1188 {
1189 PMAPCOUNT(collects);
1190 }
1191
1192 static __inline int
1193 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1194 {
1195 int pteidx;
1196 /*
1197 * We can find the actual pte entry without searching by
1198 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1199 * and by noticing the HID bit.
1200 */
1201 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1202 if (pvo->pvo_pte.pte_hi & PTE_HID)
1203 pteidx ^= pmap_pteg_mask * 8;
1204 return pteidx;
1205 }
1206
1207 volatile struct pte *
1208 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1209 {
1210 volatile struct pte *pt;
1211
1212 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1213 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1214 return NULL;
1215 #endif
1216
1217 /*
1218 * If we haven't been supplied the ptegidx, calculate it.
1219 */
1220 if (pteidx == -1) {
1221 int ptegidx;
1222 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1223 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1224 }
1225
1226 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1227
1228 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1229 return pt;
1230 #else
1231 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1232 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1233 "pvo but no valid pte index", pvo);
1234 }
1235 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1236 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1237 "pvo but no valid pte", pvo);
1238 }
1239
1240 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1241 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1242 #if defined(DEBUG) || defined(PMAPCHECK)
1243 pmap_pte_print(pt);
1244 #endif
1245 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1246 "pmap_pteg_table %p but invalid in pvo",
1247 pvo, pt);
1248 }
1249 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1250 #if defined(DEBUG) || defined(PMAPCHECK)
1251 pmap_pte_print(pt);
1252 #endif
1253 panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1254 "not match pte %p in pmap_pteg_table",
1255 pvo, pt);
1256 }
1257 return pt;
1258 }
1259
1260 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1261 #if defined(DEBUG) || defined(PMAPCHECK)
1262 pmap_pte_print(pt);
1263 #endif
1264 panic("pmap_pvo_to_pte: pvo %p: has invalid pte %p in "
1265 "pmap_pteg_table but valid in pvo", pvo, pt);
1266 }
1267 return NULL;
1268 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1269 }
1270
1271 struct pvo_entry *
1272 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1273 {
1274 struct pvo_entry *pvo;
1275 int ptegidx;
1276
1277 va &= ~ADDR_POFF;
1278 ptegidx = va_to_pteg(pm, va);
1279
1280 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1281 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1282 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1283 panic("pmap_pvo_find_va: invalid pvo %p on "
1284 "list %#x (%p)", pvo, ptegidx,
1285 &pmap_pvo_table[ptegidx]);
1286 #endif
1287 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1288 if (pteidx_p)
1289 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1290 return pvo;
1291 }
1292 }
1293 return NULL;
1294 }
1295
1296 #if defined(DEBUG) || defined(PMAPCHECK)
1297 void
1298 pmap_pvo_check(const struct pvo_entry *pvo)
1299 {
1300 struct pvo_head *pvo_head;
1301 struct pvo_entry *pvo0;
1302 volatile struct pte *pt;
1303 int failed = 0;
1304
1305 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1306 panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1307
1308 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1309 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1310 pvo, pvo->pvo_pmap);
1311 failed = 1;
1312 }
1313
1314 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1315 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1316 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1317 pvo, TAILQ_NEXT(pvo, pvo_olink));
1318 failed = 1;
1319 }
1320
1321 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1322 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1323 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1324 pvo, LIST_NEXT(pvo, pvo_vlink));
1325 failed = 1;
1326 }
1327
1328 if (pvo->pvo_vaddr & PVO_MANAGED) {
1329 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1330 } else {
1331 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1332 printf("pmap_pvo_check: pvo %p: non kernel address "
1333 "on kernel unmanaged list\n", pvo);
1334 failed = 1;
1335 }
1336 pvo_head = &pmap_pvo_kunmanaged;
1337 }
1338 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1339 if (pvo0 == pvo)
1340 break;
1341 }
1342 if (pvo0 == NULL) {
1343 printf("pmap_pvo_check: pvo %p: not present "
1344 "on its vlist head %p\n", pvo, pvo_head);
1345 failed = 1;
1346 }
1347 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1348 printf("pmap_pvo_check: pvo %p: not present "
1349 "on its olist head\n", pvo);
1350 failed = 1;
1351 }
1352 pt = pmap_pvo_to_pte(pvo, -1);
1353 if (pt == NULL) {
1354 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1355 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1356 "no PTE\n", pvo);
1357 failed = 1;
1358 }
1359 } else {
1360 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1361 (uintptr_t) pt >=
1362 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1363 printf("pmap_pvo_check: pvo %p: pte %p not in "
1364 "pteg table\n", pvo, pt);
1365 failed = 1;
1366 }
1367 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1368 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1369 "no PTE\n", pvo);
1370 failed = 1;
1371 }
1372 if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1373 printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1374 "%#lx/%#lx\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi);
1375 failed = 1;
1376 }
1377 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1378 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1379 printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1380 "%#lx/%#lx\n", pvo,
1381 pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN),
1382 pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN));
1383 failed = 1;
1384 }
1385 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1386 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx"
1387 " doesn't not match PVO's VA %#lx\n",
1388 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1389 failed = 1;
1390 }
1391 if (failed)
1392 pmap_pte_print(pt);
1393 }
1394 if (failed)
1395 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1396 pvo->pvo_pmap);
1397 }
1398 #endif /* DEBUG || PMAPCHECK */
1399
1400 /*
1401 * This returns whether this is the first mapping of a page.
1402 */
1403 int
1404 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1405 vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1406 {
1407 struct pvo_entry *pvo;
1408 struct pvo_tqhead *pvoh;
1409 register_t msr;
1410 int ptegidx;
1411 int i;
1412 int poolflags = PR_NOWAIT;
1413
1414 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1415 if (pmap_pvo_remove_depth > 0)
1416 panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1417 if (++pmap_pvo_enter_depth > 1)
1418 panic("pmap_pvo_enter: called recursively!");
1419 #endif
1420
1421 /*
1422 * Compute the PTE Group index.
1423 */
1424 va &= ~ADDR_POFF;
1425 ptegidx = va_to_pteg(pm, va);
1426
1427 msr = pmap_interrupts_off();
1428 /*
1429 * Remove any existing mapping for this page. Reuse the
1430 * pvo entry if there a mapping.
1431 */
1432 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1433 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1434 #ifdef DEBUG
1435 if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1436 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1437 ~(PTE_REF|PTE_CHG)) == 0 &&
1438 va < VM_MIN_KERNEL_ADDRESS) {
1439 printf("pmap_pvo_enter: pvo %p: dup %#lx/%#lx\n",
1440 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1441 printf("pmap_pvo_enter: pte_hi=%#lx sr=%#lx\n",
1442 pvo->pvo_pte.pte_hi,
1443 pm->pm_sr[va >> ADDR_SR_SHFT]);
1444 pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1445 #ifdef DDBX
1446 Debugger();
1447 #endif
1448 }
1449 #endif
1450 PMAPCOUNT(mappings_replaced);
1451 pmap_pvo_remove(pvo, -1);
1452 break;
1453 }
1454 }
1455
1456 /*
1457 * If we aren't overwriting an mapping, try to allocate
1458 */
1459 pmap_interrupts_restore(msr);
1460 pvo = pool_get(pl, poolflags);
1461 msr = pmap_interrupts_off();
1462 if (pvo == NULL) {
1463 #if 0
1464 pvo = pmap_pvo_reclaim(pm);
1465 if (pvo == NULL) {
1466 #endif
1467 if ((flags & PMAP_CANFAIL) == 0)
1468 panic("pmap_pvo_enter: failed");
1469 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1470 pmap_pvo_enter_depth--;
1471 #endif
1472 pmap_interrupts_restore(msr);
1473 return ENOMEM;
1474 #if 0
1475 }
1476 #endif
1477 }
1478 pvo->pvo_vaddr = va;
1479 pvo->pvo_pmap = pm;
1480 pvo->pvo_vaddr &= ~ADDR_POFF;
1481 if (flags & VM_PROT_EXECUTE) {
1482 PMAPCOUNT(exec_mappings);
1483 pvo->pvo_vaddr |= PVO_EXECUTABLE;
1484 }
1485 if (flags & PMAP_WIRED)
1486 pvo->pvo_vaddr |= PVO_WIRED;
1487 if (pvo_head != &pmap_pvo_kunmanaged) {
1488 pvo->pvo_vaddr |= PVO_MANAGED;
1489 PMAPCOUNT(mappings);
1490 } else {
1491 PMAPCOUNT(kernel_mappings);
1492 }
1493 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1494
1495 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1496 if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1497 pvo->pvo_pmap->pm_stats.wired_count++;
1498 pvo->pvo_pmap->pm_stats.resident_count++;
1499 #if defined(DEBUG)
1500 if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS)
1501 DPRINTFN(PVOENTER,
1502 ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n",
1503 pvo, pm, va, pa));
1504 #endif
1505
1506 /*
1507 * We hope this succeeds but it isn't required.
1508 */
1509 pvoh = &pmap_pvo_table[ptegidx];
1510 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1511 if (i >= 0) {
1512 PVO_PTEGIDX_SET(pvo, i);
1513 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1514 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1515 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1516 } else {
1517
1518 /*
1519 * Since we didn't have room for this entry (which makes it
1520 * and evicted entry), place it at the head of the list.
1521 */
1522 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1523 PMAPCOUNT(ptes_evicted);
1524 pm->pm_evictions++;
1525 }
1526 PMAP_PVO_CHECK(pvo); /* sanity check */
1527 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1528 pmap_pvo_enter_depth--;
1529 #endif
1530 pmap_interrupts_restore(msr);
1531 return 0;
1532 }
1533
1534 void
1535 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1536 {
1537 volatile struct pte *pt;
1538 int ptegidx;
1539
1540 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1541 if (++pmap_pvo_remove_depth > 1)
1542 panic("pmap_pvo_remove: called recursively!");
1543 #endif
1544
1545 /*
1546 * If we haven't been supplied the ptegidx, calculate it.
1547 */
1548 if (pteidx == -1) {
1549 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1550 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1551 } else {
1552 ptegidx = pteidx >> 3;
1553 if (pvo->pvo_pte.pte_hi & PTE_HID)
1554 ptegidx ^= pmap_pteg_mask;
1555 }
1556 PMAP_PVO_CHECK(pvo); /* sanity check */
1557
1558 /*
1559 * If there is an active pte entry, we need to deactivate it
1560 * (and save the ref & chg bits).
1561 */
1562 pt = pmap_pvo_to_pte(pvo, pteidx);
1563 if (pt != NULL) {
1564 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1565 PVO_PTEGIDX_CLR(pvo);
1566 PMAPCOUNT(ptes_removed);
1567 } else {
1568 KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1569 pvo->pvo_pmap->pm_evictions--;
1570 }
1571
1572 /*
1573 * Update our statistics
1574 */
1575 pvo->pvo_pmap->pm_stats.resident_count--;
1576 if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1577 pvo->pvo_pmap->pm_stats.wired_count--;
1578
1579 /*
1580 * Save the REF/CHG bits into their cache if the page is managed.
1581 */
1582 if (pvo->pvo_vaddr & PVO_MANAGED) {
1583 register_t ptelo = pvo->pvo_pte.pte_lo;
1584 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1585
1586 if (pg != NULL) {
1587 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1588 }
1589 PMAPCOUNT(unmappings);
1590 } else {
1591 PMAPCOUNT(kernel_unmappings);
1592 }
1593
1594 /*
1595 * Remove the PVO from its lists and return it to the pool.
1596 */
1597 LIST_REMOVE(pvo, pvo_vlink);
1598 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1599 pool_put(pvo->pvo_vaddr & PVO_MANAGED
1600 ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1601 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1602 pmap_pvo_remove_depth--;
1603 #endif
1604 }
1605
1606 /*
1607 * Insert physical page at pa into the given pmap at virtual address va.
1608 */
1609 int
1610 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1611 {
1612 struct mem_region *mp;
1613 struct pvo_head *pvo_head;
1614 struct vm_page *pg;
1615 struct pool *pl;
1616 register_t pte_lo;
1617 int s;
1618 int error;
1619 u_int pvo_flags;
1620 u_int was_exec = 0;
1621
1622 if (__predict_false(!pmap_initialized)) {
1623 pvo_head = &pmap_pvo_kunmanaged;
1624 pl = &pmap_upvo_pool;
1625 pvo_flags = 0;
1626 pg = NULL;
1627 was_exec = PTE_EXEC;
1628 } else {
1629 pvo_head = pa_to_pvoh(pa, &pg);
1630 pl = &pmap_mpvo_pool;
1631 pvo_flags = PVO_MANAGED;
1632 }
1633
1634 DPRINTFN(ENTER,
1635 ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):",
1636 pm, va, pa, prot, flags));
1637
1638 /*
1639 * If this is a managed page, and it's the first reference to the
1640 * page clear the execness of the page. Otherwise fetch the execness.
1641 */
1642 if (pg != NULL)
1643 was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1644
1645 DPRINTFN(ENTER, (" was_exec=%d", was_exec));
1646
1647 /*
1648 * Assume the page is cache inhibited and access is guarded unless
1649 * it's in our available memory array. If it is in the memory array,
1650 * asssume it's in memory coherent memory.
1651 */
1652 pte_lo = PTE_IG;
1653 if ((flags & PMAP_NC) == 0) {
1654 for (mp = mem; mp->size; mp++) {
1655 if (pa >= mp->start && pa < mp->start + mp->size) {
1656 pte_lo = PTE_M;
1657 break;
1658 }
1659 }
1660 }
1661
1662 if (prot & VM_PROT_WRITE)
1663 pte_lo |= PTE_BW;
1664 else
1665 pte_lo |= PTE_BR;
1666
1667 /*
1668 * If this was in response to a fault, "pre-fault" the PTE's
1669 * changed/referenced bit appropriately.
1670 */
1671 if (flags & VM_PROT_WRITE)
1672 pte_lo |= PTE_CHG;
1673 if (flags & (VM_PROT_READ|VM_PROT_WRITE))
1674 pte_lo |= PTE_REF;
1675
1676 #if 0
1677 if (pm == pmap_kernel()) {
1678 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ)
1679 printf("pmap_pvo_enter: Kernel RO va %#lx pa %#lx\n",
1680 va, pa);
1681 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_NONE)
1682 printf("pmap_pvo_enter: Kernel N/A va %#lx pa %#lx\n",
1683 va, pa);
1684 }
1685 #endif
1686
1687 /*
1688 * We need to know if this page can be executable
1689 */
1690 flags |= (prot & VM_PROT_EXECUTE);
1691
1692 /*
1693 * Record mapping for later back-translation and pte spilling.
1694 * This will overwrite any existing mapping.
1695 */
1696 s = splvm();
1697 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1698 splx(s);
1699
1700 /*
1701 * Flush the real page from the instruction cache if this page is
1702 * mapped executable and cacheable and has not been flushed since
1703 * the last time it was modified.
1704 */
1705 if (error == 0 &&
1706 (flags & VM_PROT_EXECUTE) &&
1707 (pte_lo & PTE_I) == 0 &&
1708 was_exec == 0) {
1709 DPRINTFN(ENTER, (" syncicache"));
1710 PMAPCOUNT(exec_synced);
1711 pmap_syncicache(pa, PAGE_SIZE);
1712 if (pg != NULL) {
1713 pmap_attr_save(pg, PTE_EXEC);
1714 PMAPCOUNT(exec_cached);
1715 #if defined(DEBUG) || defined(PMAPDEBUG)
1716 if (pmapdebug & PMAPDEBUG_ENTER)
1717 printf(" marked-as-exec");
1718 else if (pmapdebug & PMAPDEBUG_EXEC)
1719 printf("[pmap_enter: %#lx: marked-as-exec]\n",
1720 pg->phys_addr);
1721
1722 #endif
1723 }
1724 }
1725
1726 DPRINTFN(ENTER, (": error=%d\n", error));
1727
1728 return error;
1729 }
1730
1731 void
1732 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1733 {
1734 struct mem_region *mp;
1735 register_t pte_lo;
1736 register_t msr;
1737 int error;
1738 int s;
1739
1740 if (va < VM_MIN_KERNEL_ADDRESS)
1741 panic("pmap_kenter_pa: attempt to enter "
1742 "non-kernel address %#lx!", va);
1743
1744 DPRINTFN(KENTER,
1745 ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
1746
1747 /*
1748 * Assume the page is cache inhibited and access is guarded unless
1749 * it's in our available memory array. If it is in the memory array,
1750 * asssume it's in memory coherent memory.
1751 */
1752 pte_lo = PTE_IG;
1753 if ((prot & PMAP_NC) == 0) {
1754 for (mp = mem; mp->size; mp++) {
1755 if (pa >= mp->start && pa < mp->start + mp->size) {
1756 pte_lo = PTE_M;
1757 break;
1758 }
1759 }
1760 }
1761
1762 if (prot & VM_PROT_WRITE)
1763 pte_lo |= PTE_BW;
1764 else
1765 pte_lo |= PTE_BR;
1766
1767 /*
1768 * We don't care about REF/CHG on PVOs on the unmanaged list.
1769 */
1770 s = splvm();
1771 msr = pmap_interrupts_off();
1772 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
1773 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
1774 pmap_interrupts_restore(msr);
1775 splx(s);
1776
1777 if (error != 0)
1778 panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
1779 va, pa, error);
1780 }
1781
1782 void
1783 pmap_kremove(vaddr_t va, vsize_t len)
1784 {
1785 if (va < VM_MIN_KERNEL_ADDRESS)
1786 panic("pmap_kremove: attempt to remove "
1787 "non-kernel address %#lx!", va);
1788
1789 DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len));
1790 pmap_remove(pmap_kernel(), va, va + len);
1791 }
1792
1793 /*
1794 * Remove the given range of mapping entries.
1795 */
1796 void
1797 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
1798 {
1799 struct pvo_entry *pvo;
1800 register_t msr;
1801 int pteidx;
1802 int s;
1803
1804 for (; va < endva; va += PAGE_SIZE) {
1805 s = splvm();
1806 msr = pmap_interrupts_off();
1807 pvo = pmap_pvo_find_va(pm, va, &pteidx);
1808 if (pvo != NULL) {
1809 pmap_pvo_remove(pvo, pteidx);
1810 }
1811 pmap_interrupts_restore(msr);
1812 splx(s);
1813 }
1814 }
1815
1816 /*
1817 * Get the physical page address for the given pmap/virtual address.
1818 */
1819 boolean_t
1820 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
1821 {
1822 struct pvo_entry *pvo;
1823 register_t msr;
1824 int s;
1825
1826 s = splvm();
1827 msr = pmap_interrupts_off();
1828 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1829 if (pvo != NULL) {
1830 PMAP_PVO_CHECK(pvo); /* sanity check */
1831 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1832 }
1833 pmap_interrupts_restore(msr);
1834 splx(s);
1835 return pvo != NULL;
1836 }
1837
1838 /*
1839 * Lower the protection on the specified range of this pmap.
1840 *
1841 * There are only two cases: either the protection is going to 0,
1842 * or it is going to read-only.
1843 */
1844 void
1845 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
1846 {
1847 struct pvo_entry *pvo;
1848 volatile struct pte *pt;
1849 register_t msr;
1850 int s;
1851 int pteidx;
1852
1853 /*
1854 * Since this routine only downgrades protection, we should
1855 * always be called without WRITE permisison.
1856 */
1857 KASSERT((prot & VM_PROT_WRITE) == 0);
1858
1859 /*
1860 * If there is no protection, this is equivalent to
1861 * remove the pmap from the pmap.
1862 */
1863 if ((prot & VM_PROT_READ) == 0) {
1864 pmap_remove(pm, va, endva);
1865 return;
1866 }
1867
1868 s = splvm();
1869 msr = pmap_interrupts_off();
1870
1871 for (; va < endva; va += PAGE_SIZE) {
1872 pvo = pmap_pvo_find_va(pm, va, &pteidx);
1873 if (pvo == NULL)
1874 continue;
1875 PMAP_PVO_CHECK(pvo); /* sanity check */
1876
1877 /*
1878 * Revoke executable if asked to do so.
1879 */
1880 if ((prot & VM_PROT_EXECUTE) == 0)
1881 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1882
1883 #if 0
1884 /*
1885 * If the page is already read-only, no change
1886 * needs to be made.
1887 */
1888 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
1889 continue;
1890 #endif
1891 /*
1892 * Grab the PTE pointer before we diddle with
1893 * the cached PTE copy.
1894 */
1895 pt = pmap_pvo_to_pte(pvo, pteidx);
1896 /*
1897 * Change the protection of the page.
1898 */
1899 pvo->pvo_pte.pte_lo &= ~PTE_PP;
1900 pvo->pvo_pte.pte_lo |= PTE_BR;
1901
1902 /*
1903 * If the PVO is in the page table, update
1904 * that pte at well.
1905 */
1906 if (pt != NULL) {
1907 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1908 PMAPCOUNT(ptes_changed);
1909 }
1910
1911 PMAP_PVO_CHECK(pvo); /* sanity check */
1912 }
1913
1914 pmap_interrupts_restore(msr);
1915 splx(s);
1916 }
1917
1918 void
1919 pmap_unwire(pmap_t pm, vaddr_t va)
1920 {
1921 struct pvo_entry *pvo;
1922 register_t msr;
1923 int s;
1924
1925 s = splvm();
1926 msr = pmap_interrupts_off();
1927
1928 pvo = pmap_pvo_find_va(pm, va, NULL);
1929 if (pvo != NULL) {
1930 if (pvo->pvo_vaddr & PVO_WIRED) {
1931 pvo->pvo_vaddr &= ~PVO_WIRED;
1932 pm->pm_stats.wired_count--;
1933 }
1934 PMAP_PVO_CHECK(pvo); /* sanity check */
1935 }
1936
1937 pmap_interrupts_restore(msr);
1938 splx(s);
1939 }
1940
1941 /*
1942 * Lower the protection on the specified physical page.
1943 *
1944 * There are only two cases: either the protection is going to 0,
1945 * or it is going to read-only.
1946 */
1947 void
1948 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1949 {
1950 struct pvo_head *pvo_head;
1951 struct pvo_entry *pvo, *next_pvo;
1952 volatile struct pte *pt;
1953 register_t msr;
1954 int s;
1955
1956 /*
1957 * Since this routine only downgrades protection, if the
1958 * maximal protection is desired, there isn't any change
1959 * to be made.
1960 */
1961 KASSERT((prot & VM_PROT_WRITE) == 0);
1962 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
1963 return;
1964
1965 s = splvm();
1966 msr = pmap_interrupts_off();
1967
1968 /*
1969 * When UVM reuses a page, it does a pmap_page_protect with
1970 * VM_PROT_NONE. At that point, we can clear the exec flag
1971 * since we know the page will have different contents.
1972 */
1973 if ((prot & VM_PROT_READ) == 0) {
1974 DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
1975 pg->phys_addr));
1976 if (pmap_attr_fetch(pg) & PTE_EXEC) {
1977 PMAPCOUNT(exec_uncached_page_protect);
1978 pmap_attr_clear(pg, PTE_EXEC);
1979 }
1980 }
1981
1982 pvo_head = vm_page_to_pvoh(pg);
1983 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1984 next_pvo = LIST_NEXT(pvo, pvo_vlink);
1985 PMAP_PVO_CHECK(pvo); /* sanity check */
1986
1987 /*
1988 * Downgrading to no mapping at all, we just remove the entry.
1989 */
1990 if ((prot & VM_PROT_READ) == 0) {
1991 pmap_pvo_remove(pvo, -1);
1992 continue;
1993 }
1994
1995 /*
1996 * If EXEC permission is being revoked, just clear the
1997 * flag in the PVO.
1998 */
1999 if ((prot & VM_PROT_EXECUTE) == 0)
2000 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
2001
2002 /*
2003 * If this entry is already RO, don't diddle with the
2004 * page table.
2005 */
2006 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2007 PMAP_PVO_CHECK(pvo);
2008 continue;
2009 }
2010
2011 /*
2012 * Grab the PTE before the we diddle the bits so
2013 * pvo_to_pte can verify the pte contents are as
2014 * expected.
2015 */
2016 pt = pmap_pvo_to_pte(pvo, -1);
2017 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2018 pvo->pvo_pte.pte_lo |= PTE_BR;
2019 if (pt != NULL) {
2020 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2021 PMAPCOUNT(ptes_changed);
2022 }
2023 PMAP_PVO_CHECK(pvo); /* sanity check */
2024 }
2025
2026 pmap_interrupts_restore(msr);
2027 splx(s);
2028 }
2029
2030 /*
2031 * Activate the address space for the specified process. If the process
2032 * is the current process, load the new MMU context.
2033 */
2034 void
2035 pmap_activate(struct lwp *l)
2036 {
2037 struct pcb *pcb = &l->l_addr->u_pcb;
2038 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2039
2040 DPRINTFN(ACTIVATE,
2041 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
2042
2043 /*
2044 * XXX Normally performed in cpu_fork().
2045 */
2046 if (pcb->pcb_pm != pmap) {
2047 pcb->pcb_pm = pmap;
2048 pcb->pcb_pmreal = pmap;
2049 }
2050
2051 /*
2052 * In theory, the SR registers need only be valid on return
2053 * to user space wait to do them there.
2054 */
2055 if (l == curlwp) {
2056 /* Store pointer to new current pmap. */
2057 curpm = pmap;
2058 }
2059 }
2060
2061 /*
2062 * Deactivate the specified process's address space.
2063 */
2064 void
2065 pmap_deactivate(struct lwp *l)
2066 {
2067 }
2068
2069 boolean_t
2070 pmap_query_bit(struct vm_page *pg, int ptebit)
2071 {
2072 struct pvo_entry *pvo;
2073 volatile struct pte *pt;
2074 register_t msr;
2075 int s;
2076
2077 if (pmap_attr_fetch(pg) & ptebit)
2078 return TRUE;
2079 s = splvm();
2080 msr = pmap_interrupts_off();
2081 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2082 PMAP_PVO_CHECK(pvo); /* sanity check */
2083 /*
2084 * See if we saved the bit off. If so cache, it and return
2085 * success.
2086 */
2087 if (pvo->pvo_pte.pte_lo & ptebit) {
2088 pmap_attr_save(pg, ptebit);
2089 PMAP_PVO_CHECK(pvo); /* sanity check */
2090 pmap_interrupts_restore(msr);
2091 splx(s);
2092 return TRUE;
2093 }
2094 }
2095 /*
2096 * No luck, now go thru the hard part of looking at the ptes
2097 * themselves. Sync so any pending REF/CHG bits are flushed
2098 * to the PTEs.
2099 */
2100 SYNC();
2101 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2102 PMAP_PVO_CHECK(pvo); /* sanity check */
2103 /*
2104 * See if this pvo have a valid PTE. If so, fetch the
2105 * REF/CHG bits from the valid PTE. If the appropriate
2106 * ptebit is set, cache, it and return success.
2107 */
2108 pt = pmap_pvo_to_pte(pvo, -1);
2109 if (pt != NULL) {
2110 pmap_pte_synch(pt, &pvo->pvo_pte);
2111 if (pvo->pvo_pte.pte_lo & ptebit) {
2112 pmap_attr_save(pg, ptebit);
2113 PMAP_PVO_CHECK(pvo); /* sanity check */
2114 pmap_interrupts_restore(msr);
2115 splx(s);
2116 return TRUE;
2117 }
2118 }
2119 }
2120 pmap_interrupts_restore(msr);
2121 splx(s);
2122 return FALSE;
2123 }
2124
2125 boolean_t
2126 pmap_clear_bit(struct vm_page *pg, int ptebit)
2127 {
2128 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2129 struct pvo_entry *pvo;
2130 volatile struct pte *pt;
2131 register_t msr;
2132 int rv = 0;
2133 int s;
2134
2135 s = splvm();
2136 msr = pmap_interrupts_off();
2137
2138 /*
2139 * Fetch the cache value
2140 */
2141 rv |= pmap_attr_fetch(pg);
2142
2143 /*
2144 * Clear the cached value.
2145 */
2146 pmap_attr_clear(pg, ptebit);
2147
2148 /*
2149 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2150 * can reset the right ones). Note that since the pvo entries and
2151 * list heads are accessed via BAT0 and are never placed in the
2152 * page table, we don't have to worry about further accesses setting
2153 * the REF/CHG bits.
2154 */
2155 SYNC();
2156
2157 /*
2158 * For each pvo entry, clear pvo's ptebit. If this pvo have a
2159 * valid PTE. If so, clear the ptebit from the valid PTE.
2160 */
2161 LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2162 PMAP_PVO_CHECK(pvo); /* sanity check */
2163 pt = pmap_pvo_to_pte(pvo, -1);
2164 if (pt != NULL) {
2165 /*
2166 * Only sync the PTE if the bit we are looking
2167 * for is not already set.
2168 */
2169 if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2170 pmap_pte_synch(pt, &pvo->pvo_pte);
2171 /*
2172 * If the bit we are looking for was already set,
2173 * clear that bit in the pte.
2174 */
2175 if (pvo->pvo_pte.pte_lo & ptebit)
2176 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2177 }
2178 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2179 pvo->pvo_pte.pte_lo &= ~ptebit;
2180 PMAP_PVO_CHECK(pvo); /* sanity check */
2181 }
2182 pmap_interrupts_restore(msr);
2183 splx(s);
2184 /*
2185 * If we are clearing the modify bit and this page was marked EXEC
2186 * and the user of the page thinks the page was modified, then we
2187 * need to clean it from the icache if it's mapped or clear the EXEC
2188 * bit if it's not mapped. The page itself might not have the CHG
2189 * bit set if the modification was done via DMA to the page.
2190 */
2191 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2192 if (LIST_EMPTY(pvoh)) {
2193 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
2194 pg->phys_addr));
2195 pmap_attr_clear(pg, PTE_EXEC);
2196 PMAPCOUNT(exec_uncached_clear_modify);
2197 } else {
2198 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
2199 pg->phys_addr));
2200 pmap_syncicache(pg->phys_addr, PAGE_SIZE);
2201 PMAPCOUNT(exec_synced_clear_modify);
2202 }
2203 }
2204 return (rv & ptebit) != 0;
2205 }
2206
2207 void
2208 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2209 {
2210 struct pvo_entry *pvo;
2211 size_t offset = va & ADDR_POFF;
2212 int s;
2213
2214 s = splvm();
2215 while (len > 0) {
2216 size_t seglen = PAGE_SIZE - offset;
2217 if (seglen > len)
2218 seglen = len;
2219 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2220 if (pvo != NULL && PVO_ISEXECUTABLE(pvo)) {
2221 pmap_syncicache(
2222 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2223 PMAP_PVO_CHECK(pvo);
2224 }
2225 va += seglen;
2226 len -= seglen;
2227 offset = 0;
2228 }
2229 splx(s);
2230 }
2231
2232 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2233 void
2234 pmap_pte_print(volatile struct pte *pt)
2235 {
2236 printf("PTE %p: ", pt);
2237 /* High word: */
2238 printf("0x%08lx: [", pt->pte_hi);
2239 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2240 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2241 printf("0x%06lx 0x%02lx",
2242 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2243 pt->pte_hi & PTE_API);
2244 printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
2245 /* Low word: */
2246 printf(" 0x%08lx: [", pt->pte_lo);
2247 printf("0x%05lx... ", pt->pte_lo >> 12);
2248 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2249 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2250 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2251 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2252 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2253 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2254 switch (pt->pte_lo & PTE_PP) {
2255 case PTE_BR: printf("br]\n"); break;
2256 case PTE_BW: printf("bw]\n"); break;
2257 case PTE_SO: printf("so]\n"); break;
2258 case PTE_SW: printf("sw]\n"); break;
2259 }
2260 }
2261 #endif
2262
2263 #if defined(DDB)
2264 void
2265 pmap_pteg_check(void)
2266 {
2267 volatile struct pte *pt;
2268 int i;
2269 int ptegidx;
2270 u_int p_valid = 0;
2271 u_int s_valid = 0;
2272 u_int invalid = 0;
2273
2274 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2275 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2276 if (pt->pte_hi & PTE_VALID) {
2277 if (pt->pte_hi & PTE_HID)
2278 s_valid++;
2279 else
2280 p_valid++;
2281 } else
2282 invalid++;
2283 }
2284 }
2285 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2286 p_valid, p_valid, s_valid, s_valid,
2287 invalid, invalid);
2288 }
2289
2290 void
2291 pmap_print_mmuregs(void)
2292 {
2293 int i;
2294 u_int cpuvers;
2295 vaddr_t addr;
2296 register_t soft_sr[16];
2297 struct bat soft_ibat[4];
2298 struct bat soft_dbat[4];
2299 register_t sdr1;
2300
2301 cpuvers = MFPVR() >> 16;
2302
2303 __asm __volatile ("mfsdr1 %0" : "=r"(sdr1));
2304 for (i=0; i<16; i++) {
2305 soft_sr[i] = MFSRIN(addr);
2306 addr += (1 << ADDR_SR_SHFT);
2307 }
2308
2309 /* read iBAT (601: uBAT) registers */
2310 __asm __volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2311 __asm __volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2312 __asm __volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2313 __asm __volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2314 __asm __volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2315 __asm __volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2316 __asm __volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2317 __asm __volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2318
2319
2320 if (cpuvers != MPC601) {
2321 /* read dBAT registers */
2322 __asm __volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2323 __asm __volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2324 __asm __volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2325 __asm __volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2326 __asm __volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2327 __asm __volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2328 __asm __volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2329 __asm __volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2330 }
2331
2332 printf("SDR1:\t%#lx\n", sdr1);
2333 printf("SR[]:\t");
2334 addr = 0;
2335 for (i=0; i<4; i++)
2336 printf("0x%08lx, ", soft_sr[i]);
2337 printf("\n\t");
2338 for ( ; i<8; i++)
2339 printf("0x%08lx, ", soft_sr[i]);
2340 printf("\n\t");
2341 for ( ; i<12; i++)
2342 printf("0x%08lx, ", soft_sr[i]);
2343 printf("\n\t");
2344 for ( ; i<16; i++)
2345 printf("0x%08lx, ", soft_sr[i]);
2346 printf("\n");
2347
2348 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2349 for (i=0; i<4; i++) {
2350 printf("0x%08lx 0x%08lx, ",
2351 soft_ibat[i].batu, soft_ibat[i].batl);
2352 if (i == 1)
2353 printf("\n\t");
2354 }
2355 if (cpuvers != MPC601) {
2356 printf("\ndBAT[]:\t");
2357 for (i=0; i<4; i++) {
2358 printf("0x%08lx 0x%08lx, ",
2359 soft_dbat[i].batu, soft_dbat[i].batl);
2360 if (i == 1)
2361 printf("\n\t");
2362 }
2363 }
2364 printf("\n");
2365 }
2366
2367 void
2368 pmap_print_pte(pmap_t pm, vaddr_t va)
2369 {
2370 struct pvo_entry *pvo;
2371 volatile struct pte *pt;
2372 int pteidx;
2373
2374 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2375 if (pvo != NULL) {
2376 pt = pmap_pvo_to_pte(pvo, pteidx);
2377 if (pt != NULL) {
2378 printf("VA %#lx -> %p -> %s %#lx, %#lx\n",
2379 va, pt,
2380 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2381 pt->pte_hi, pt->pte_lo);
2382 } else {
2383 printf("No valid PTE found\n");
2384 }
2385 } else {
2386 printf("Address not in pmap\n");
2387 }
2388 }
2389
2390 void
2391 pmap_pteg_dist(void)
2392 {
2393 struct pvo_entry *pvo;
2394 int ptegidx;
2395 int depth;
2396 int max_depth = 0;
2397 unsigned int depths[64];
2398
2399 memset(depths, 0, sizeof(depths));
2400 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2401 depth = 0;
2402 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2403 depth++;
2404 }
2405 if (depth > max_depth)
2406 max_depth = depth;
2407 if (depth > 63)
2408 depth = 63;
2409 depths[depth]++;
2410 }
2411
2412 for (depth = 0; depth < 64; depth++) {
2413 printf(" [%2d]: %8u", depth, depths[depth]);
2414 if ((depth & 3) == 3)
2415 printf("\n");
2416 if (depth == max_depth)
2417 break;
2418 }
2419 if ((depth & 3) != 3)
2420 printf("\n");
2421 printf("Max depth found was %d\n", max_depth);
2422 }
2423 #endif /* DEBUG */
2424
2425 #if defined(PMAPCHECK) || defined(DEBUG)
2426 void
2427 pmap_pvo_verify(void)
2428 {
2429 int ptegidx;
2430 int s;
2431
2432 s = splvm();
2433 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2434 struct pvo_entry *pvo;
2435 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2436 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2437 panic("pmap_pvo_verify: invalid pvo %p "
2438 "on list %#x", pvo, ptegidx);
2439 pmap_pvo_check(pvo);
2440 }
2441 }
2442 splx(s);
2443 }
2444 #endif /* PMAPCHECK */
2445
2446
2447 void *
2448 pmap_pool_ualloc(struct pool *pp, int flags)
2449 {
2450 struct pvo_page *pvop;
2451
2452 pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2453 if (pvop != NULL) {
2454 pmap_upvop_free--;
2455 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2456 return pvop;
2457 }
2458 if (uvm.page_init_done != TRUE) {
2459 return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2460 }
2461 return pmap_pool_malloc(pp, flags);
2462 }
2463
2464 void *
2465 pmap_pool_malloc(struct pool *pp, int flags)
2466 {
2467 struct pvo_page *pvop;
2468 struct vm_page *pg;
2469
2470 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2471 if (pvop != NULL) {
2472 pmap_mpvop_free--;
2473 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2474 return pvop;
2475 }
2476 again:
2477 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2478 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2479 if (__predict_false(pg == NULL)) {
2480 if (flags & PR_WAITOK) {
2481 uvm_wait("plpg");
2482 goto again;
2483 } else {
2484 return (0);
2485 }
2486 }
2487 return (void *) VM_PAGE_TO_PHYS(pg);
2488 }
2489
2490 void
2491 pmap_pool_ufree(struct pool *pp, void *va)
2492 {
2493 struct pvo_page *pvop;
2494 #if 0
2495 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2496 pmap_pool_mfree(va, size, tag);
2497 return;
2498 }
2499 #endif
2500 pvop = va;
2501 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2502 pmap_upvop_free++;
2503 if (pmap_upvop_free > pmap_upvop_maxfree)
2504 pmap_upvop_maxfree = pmap_upvop_free;
2505 }
2506
2507 void
2508 pmap_pool_mfree(struct pool *pp, void *va)
2509 {
2510 struct pvo_page *pvop;
2511
2512 pvop = va;
2513 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2514 pmap_mpvop_free++;
2515 if (pmap_mpvop_free > pmap_mpvop_maxfree)
2516 pmap_mpvop_maxfree = pmap_mpvop_free;
2517 #if 0
2518 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2519 #endif
2520 }
2521
2522 /*
2523 * This routine in bootstraping to steal to-be-managed memory (which will
2524 * then be unmanaged). We use it to grab from the first 256MB for our
2525 * pmap needs and above 256MB for other stuff.
2526 */
2527 vaddr_t
2528 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2529 {
2530 vsize_t size;
2531 vaddr_t va;
2532 paddr_t pa = 0;
2533 int npgs, bank;
2534 struct vm_physseg *ps;
2535
2536 if (uvm.page_init_done == TRUE)
2537 panic("pmap_steal_memory: called _after_ bootstrap");
2538
2539 *vstartp = VM_MIN_KERNEL_ADDRESS;
2540 *vendp = VM_MAX_KERNEL_ADDRESS;
2541
2542 size = round_page(vsize);
2543 npgs = atop(size);
2544
2545 /*
2546 * PA 0 will never be among those given to UVM so we can use it
2547 * to indicate we couldn't steal any memory.
2548 */
2549 for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
2550 if (ps->free_list == VM_FREELIST_FIRST256 &&
2551 ps->avail_end - ps->avail_start >= npgs) {
2552 pa = ptoa(ps->avail_start);
2553 break;
2554 }
2555 }
2556
2557 if (pa == 0)
2558 panic("pmap_steal_memory: no approriate memory to steal!");
2559
2560 ps->avail_start += npgs;
2561 ps->start += npgs;
2562
2563 /*
2564 * If we've used up all the pages in the segment, remove it and
2565 * compact the list.
2566 */
2567 if (ps->avail_start == ps->end) {
2568 /*
2569 * If this was the last one, then a very bad thing has occurred
2570 */
2571 if (--vm_nphysseg == 0)
2572 panic("pmap_steal_memory: out of memory!");
2573
2574 printf("pmap_steal_memory: consumed bank %d\n", bank);
2575 for (; bank < vm_nphysseg; bank++, ps++) {
2576 ps[0] = ps[1];
2577 }
2578 }
2579
2580 va = (vaddr_t) pa;
2581 memset((caddr_t) va, 0, size);
2582 pmap_pages_stolen += npgs;
2583 #ifdef DEBUG
2584 if (pmapdebug && npgs > 1) {
2585 u_int cnt = 0;
2586 for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
2587 cnt += ps->avail_end - ps->avail_start;
2588 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2589 npgs, pmap_pages_stolen, cnt);
2590 }
2591 #endif
2592
2593 return va;
2594 }
2595
2596 /*
2597 * Find a chuck of memory with right size and alignment.
2598 */
2599 void *
2600 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2601 {
2602 struct mem_region *mp;
2603 paddr_t s, e;
2604 int i, j;
2605
2606 size = round_page(size);
2607
2608 DPRINTFN(BOOT,
2609 ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
2610 size, alignment, at_end));
2611
2612 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
2613 panic("pmap_boot_find_memory: invalid alignment %lx",
2614 alignment);
2615
2616 if (at_end) {
2617 if (alignment != PAGE_SIZE)
2618 panic("pmap_boot_find_memory: invalid ending "
2619 "alignment %lx", alignment);
2620
2621 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
2622 s = mp->start + mp->size - size;
2623 if (s >= mp->start && mp->size >= size) {
2624 DPRINTFN(BOOT,(": %lx\n", s));
2625 DPRINTFN(BOOT,
2626 ("pmap_boot_find_memory: b-avail[%d] start "
2627 "0x%lx size 0x%lx\n", mp - avail,
2628 mp->start, mp->size));
2629 mp->size -= size;
2630 DPRINTFN(BOOT,
2631 ("pmap_boot_find_memory: a-avail[%d] start "
2632 "0x%lx size 0x%lx\n", mp - avail,
2633 mp->start, mp->size));
2634 return (void *) s;
2635 }
2636 }
2637 panic("pmap_boot_find_memory: no available memory");
2638 }
2639
2640 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2641 s = (mp->start + alignment - 1) & ~(alignment-1);
2642 e = s + size;
2643
2644 /*
2645 * Is the calculated region entirely within the region?
2646 */
2647 if (s < mp->start || e > mp->start + mp->size)
2648 continue;
2649
2650 DPRINTFN(BOOT,(": %lx\n", s));
2651 if (s == mp->start) {
2652 /*
2653 * If the block starts at the beginning of region,
2654 * adjust the size & start. (the region may now be
2655 * zero in length)
2656 */
2657 DPRINTFN(BOOT,
2658 ("pmap_boot_find_memory: b-avail[%d] start "
2659 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2660 mp->start += size;
2661 mp->size -= size;
2662 DPRINTFN(BOOT,
2663 ("pmap_boot_find_memory: a-avail[%d] start "
2664 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2665 } else if (e == mp->start + mp->size) {
2666 /*
2667 * If the block starts at the beginning of region,
2668 * adjust only the size.
2669 */
2670 DPRINTFN(BOOT,
2671 ("pmap_boot_find_memory: b-avail[%d] start "
2672 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2673 mp->size -= size;
2674 DPRINTFN(BOOT,
2675 ("pmap_boot_find_memory: a-avail[%d] start "
2676 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2677 } else {
2678 /*
2679 * Block is in the middle of the region, so we
2680 * have to split it in two.
2681 */
2682 for (j = avail_cnt; j > i + 1; j--) {
2683 avail[j] = avail[j-1];
2684 }
2685 DPRINTFN(BOOT,
2686 ("pmap_boot_find_memory: b-avail[%d] start "
2687 "0x%lx size 0x%lx\n", i, mp->start, mp->size));
2688 mp[1].start = e;
2689 mp[1].size = mp[0].start + mp[0].size - e;
2690 mp[0].size = s - mp[0].start;
2691 avail_cnt++;
2692 for (; i < avail_cnt; i++) {
2693 DPRINTFN(BOOT,
2694 ("pmap_boot_find_memory: a-avail[%d] "
2695 "start 0x%lx size 0x%lx\n", i,
2696 avail[i].start, avail[i].size));
2697 }
2698 }
2699 return (void *) s;
2700 }
2701 panic("pmap_boot_find_memory: not enough memory for "
2702 "%lx/%lx allocation?", size, alignment);
2703 }
2704
2705 /*
2706 * This is not part of the defined PMAP interface and is specific to the
2707 * PowerPC architecture. This is called during initppc, before the system
2708 * is really initialized.
2709 */
2710 void
2711 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
2712 {
2713 struct mem_region *mp, tmp;
2714 paddr_t s, e;
2715 psize_t size;
2716 int i, j;
2717
2718 /*
2719 * Get memory.
2720 */
2721 mem_regions(&mem, &avail);
2722 #if defined(DEBUG)
2723 if (pmapdebug & PMAPDEBUG_BOOT) {
2724 printf("pmap_bootstrap: memory configuration:\n");
2725 for (mp = mem; mp->size; mp++) {
2726 printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n",
2727 mp->start, mp->size);
2728 }
2729 for (mp = avail; mp->size; mp++) {
2730 printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n",
2731 mp->start, mp->size);
2732 }
2733 }
2734 #endif
2735
2736 /*
2737 * Find out how much physical memory we have and in how many chunks.
2738 */
2739 for (mem_cnt = 0, mp = mem; mp->size; mp++) {
2740 if (mp->start >= pmap_memlimit)
2741 continue;
2742 if (mp->start + mp->size > pmap_memlimit) {
2743 size = pmap_memlimit - mp->start;
2744 physmem += btoc(size);
2745 } else {
2746 physmem += btoc(mp->size);
2747 }
2748 mem_cnt++;
2749 }
2750
2751 /*
2752 * Count the number of available entries.
2753 */
2754 for (avail_cnt = 0, mp = avail; mp->size; mp++)
2755 avail_cnt++;
2756
2757 /*
2758 * Page align all regions.
2759 */
2760 kernelstart = trunc_page(kernelstart);
2761 kernelend = round_page(kernelend);
2762 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2763 s = round_page(mp->start);
2764 mp->size -= (s - mp->start);
2765 mp->size = trunc_page(mp->size);
2766 mp->start = s;
2767 e = mp->start + mp->size;
2768
2769 DPRINTFN(BOOT,
2770 ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
2771 i, mp->start, mp->size));
2772
2773 /*
2774 * Don't allow the end to run beyond our artificial limit
2775 */
2776 if (e > pmap_memlimit)
2777 e = pmap_memlimit;
2778
2779 /*
2780 * Is this region empty or strange? skip it.
2781 */
2782 if (e <= s) {
2783 mp->start = 0;
2784 mp->size = 0;
2785 continue;
2786 }
2787
2788 /*
2789 * Does this overlap the beginning of kernel?
2790 * Does extend past the end of the kernel?
2791 */
2792 else if (s < kernelstart && e > kernelstart) {
2793 if (e > kernelend) {
2794 avail[avail_cnt].start = kernelend;
2795 avail[avail_cnt].size = e - kernelend;
2796 avail_cnt++;
2797 }
2798 mp->size = kernelstart - s;
2799 }
2800 /*
2801 * Check whether this region overlaps the end of the kernel.
2802 */
2803 else if (s < kernelend && e > kernelend) {
2804 mp->start = kernelend;
2805 mp->size = e - kernelend;
2806 }
2807 /*
2808 * Look whether this regions is completely inside the kernel.
2809 * Nuke it if it does.
2810 */
2811 else if (s >= kernelstart && e <= kernelend) {
2812 mp->start = 0;
2813 mp->size = 0;
2814 }
2815 /*
2816 * If the user imposed a memory limit, enforce it.
2817 */
2818 else if (s >= pmap_memlimit) {
2819 mp->start = -PAGE_SIZE; /* let's know why */
2820 mp->size = 0;
2821 }
2822 else {
2823 mp->start = s;
2824 mp->size = e - s;
2825 }
2826 DPRINTFN(BOOT,
2827 ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n",
2828 i, mp->start, mp->size));
2829 }
2830
2831 /*
2832 * Move (and uncount) all the null return to the end.
2833 */
2834 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
2835 if (mp->size == 0) {
2836 tmp = avail[i];
2837 avail[i] = avail[--avail_cnt];
2838 avail[avail_cnt] = avail[i];
2839 }
2840 }
2841
2842 /*
2843 * (Bubble)sort them into asecnding order.
2844 */
2845 for (i = 0; i < avail_cnt; i++) {
2846 for (j = i + 1; j < avail_cnt; j++) {
2847 if (avail[i].start > avail[j].start) {
2848 tmp = avail[i];
2849 avail[i] = avail[j];
2850 avail[j] = tmp;
2851 }
2852 }
2853 }
2854
2855 /*
2856 * Make sure they don't overlap.
2857 */
2858 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
2859 if (mp[0].start + mp[0].size > mp[1].start) {
2860 mp[0].size = mp[1].start - mp[0].start;
2861 }
2862 DPRINTFN(BOOT,
2863 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
2864 i, mp->start, mp->size));
2865 }
2866 DPRINTFN(BOOT,
2867 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
2868 i, mp->start, mp->size));
2869
2870 #ifdef PTEGCOUNT
2871 pmap_pteg_cnt = PTEGCOUNT;
2872 #else /* PTEGCOUNT */
2873 pmap_pteg_cnt = 0x1000;
2874
2875 while (pmap_pteg_cnt < physmem)
2876 pmap_pteg_cnt <<= 1;
2877
2878 pmap_pteg_cnt >>= 1;
2879 #endif /* PTEGCOUNT */
2880
2881 /*
2882 * Find suitably aligned memory for PTEG hash table.
2883 */
2884 size = pmap_pteg_cnt * sizeof(struct pteg);
2885 pmap_pteg_table = pmap_boot_find_memory(size, size, 0);
2886 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2887 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
2888 panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB",
2889 pmap_pteg_table, size);
2890 #endif
2891
2892 memset((void *)pmap_pteg_table, 0, pmap_pteg_cnt * sizeof(struct pteg));
2893 pmap_pteg_mask = pmap_pteg_cnt - 1;
2894
2895 /*
2896 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
2897 * with pages. So we just steal them before giving them to UVM.
2898 */
2899 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
2900 pmap_pvo_table = pmap_boot_find_memory(size, PAGE_SIZE, 0);
2901 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2902 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
2903 panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB",
2904 pmap_pvo_table, size);
2905 #endif
2906
2907 for (i = 0; i < pmap_pteg_cnt; i++)
2908 TAILQ_INIT(&pmap_pvo_table[i]);
2909
2910 #ifndef MSGBUFADDR
2911 /*
2912 * Allocate msgbuf in high memory.
2913 */
2914 msgbuf_paddr =
2915 (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
2916 #endif
2917
2918 #ifdef __HAVE_PMAP_PHYSSEG
2919 {
2920 u_int npgs = 0;
2921 for (i = 0, mp = avail; i < avail_cnt; i++, mp++)
2922 npgs += btoc(mp->size);
2923 size = (sizeof(struct pvo_head) + 1) * npgs;
2924 pmap_physseg.pvoh = pmap_boot_find_memory(size, PAGE_SIZE, 0);
2925 pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs];
2926 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
2927 if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH)
2928 panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB",
2929 pmap_physseg.pvoh, size);
2930 #endif
2931 }
2932 #endif
2933
2934 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
2935 paddr_t pfstart = atop(mp->start);
2936 paddr_t pfend = atop(mp->start + mp->size);
2937 if (mp->size == 0)
2938 continue;
2939 if (mp->start + mp->size <= SEGMENT_LENGTH) {
2940 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2941 VM_FREELIST_FIRST256);
2942 } else if (mp->start >= SEGMENT_LENGTH) {
2943 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2944 VM_FREELIST_DEFAULT);
2945 } else {
2946 pfend = atop(SEGMENT_LENGTH);
2947 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2948 VM_FREELIST_FIRST256);
2949 pfstart = atop(SEGMENT_LENGTH);
2950 pfend = atop(mp->start + mp->size);
2951 uvm_page_physload(pfstart, pfend, pfstart, pfend,
2952 VM_FREELIST_DEFAULT);
2953 }
2954 }
2955
2956 /*
2957 * Make sure kernel vsid is allocated as well as VSID 0.
2958 */
2959 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
2960 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
2961 pmap_vsid_bitmap[0] |= 1;
2962
2963 /*
2964 * Initialize kernel pmap and hardware.
2965 */
2966 for (i = 0; i < 16; i++) {
2967 pmap_kernel()->pm_sr[i] = EMPTY_SEGMENT;
2968 __asm __volatile ("mtsrin %0,%1"
2969 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
2970 }
2971
2972 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
2973 __asm __volatile ("mtsr %0,%1"
2974 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
2975 #ifdef KERNEL2_SR
2976 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
2977 __asm __volatile ("mtsr %0,%1"
2978 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
2979 #endif
2980 for (i = 0; i < 16; i++) {
2981 if (iosrtable[i] & SR601_T) {
2982 pmap_kernel()->pm_sr[i] = iosrtable[i];
2983 __asm __volatile ("mtsrin %0,%1"
2984 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
2985 }
2986 }
2987
2988 __asm __volatile ("sync; mtsdr1 %0; isync"
2989 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
2990 tlbia();
2991
2992 #ifdef ALTIVEC
2993 pmap_use_altivec = cpu_altivec;
2994 #endif
2995
2996 #ifdef DEBUG
2997 if (pmapdebug & PMAPDEBUG_BOOT) {
2998 u_int cnt;
2999 int bank;
3000 char pbuf[9];
3001 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
3002 cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
3003 printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n",
3004 bank,
3005 ptoa(vm_physmem[bank].avail_start),
3006 ptoa(vm_physmem[bank].avail_end),
3007 ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
3008 }
3009 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3010 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3011 pbuf, cnt);
3012 }
3013 #endif
3014
3015 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3016 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3017 &pmap_pool_uallocator);
3018
3019 pool_setlowat(&pmap_upvo_pool, 252);
3020
3021 pool_init(&pmap_pool, sizeof(struct pmap),
3022 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator);
3023 }
3024