pmap.c revision 1.87 1 /* $NetBSD: pmap.c,v 1.87 2012/10/20 14:42:15 kiyohara Exp $ */
2 /*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
8 *
9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com>
10 * of Kyma Systems LLC.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
36 * Copyright (C) 1995, 1996 TooLs GmbH.
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by TooLs GmbH.
50 * 4. The name of TooLs GmbH may not be used to endorse or promote products
51 * derived from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 */
64
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.87 2012/10/20 14:42:15 kiyohara Exp $");
67
68 #define PMAP_NOOPNAMES
69
70 #include "opt_ppcarch.h"
71 #include "opt_altivec.h"
72 #include "opt_multiprocessor.h"
73 #include "opt_pmap.h"
74
75 #include <sys/param.h>
76 #include <sys/proc.h>
77 #include <sys/pool.h>
78 #include <sys/queue.h>
79 #include <sys/device.h> /* for evcnt */
80 #include <sys/systm.h>
81 #include <sys/atomic.h>
82
83 #include <uvm/uvm.h>
84
85 #include <machine/powerpc.h>
86 #include <powerpc/bat.h>
87 #include <powerpc/pcb.h>
88 #include <powerpc/psl.h>
89 #include <powerpc/spr.h>
90 #include <powerpc/oea/spr.h>
91 #include <powerpc/oea/sr_601.h>
92
93 #ifdef ALTIVEC
94 extern int pmap_use_altivec;
95 #endif
96
97 #ifdef PMAP_MEMLIMIT
98 static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
99 #else
100 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
101 #endif
102
103 extern struct pmap kernel_pmap_;
104 static unsigned int pmap_pages_stolen;
105 static u_long pmap_pte_valid;
106 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
107 static u_long pmap_pvo_enter_depth;
108 static u_long pmap_pvo_remove_depth;
109 #endif
110
111 #ifndef MSGBUFADDR
112 extern paddr_t msgbuf_paddr;
113 #endif
114
115 static struct mem_region *mem, *avail;
116 static u_int mem_cnt, avail_cnt;
117
118 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
119 # define PMAP_OEA 1
120 #endif
121
122 #if defined(PMAP_OEA)
123 #define _PRIxpte "lx"
124 #else
125 #define _PRIxpte PRIx64
126 #endif
127 #define _PRIxpa "lx"
128 #define _PRIxva "lx"
129 #define _PRIsr "lx"
130
131 #ifdef PMAP_NEEDS_FIXUP
132 #if defined(PMAP_OEA)
133 #define PMAPNAME(name) pmap32_##name
134 #elif defined(PMAP_OEA64)
135 #define PMAPNAME(name) pmap64_##name
136 #elif defined(PMAP_OEA64_BRIDGE)
137 #define PMAPNAME(name) pmap64bridge_##name
138 #else
139 #error unknown variant for pmap
140 #endif
141 #endif /* PMAP_NEEDS_FIXUP */
142
143 #ifdef PMAPNAME
144 #define STATIC static
145 #define pmap_pte_spill PMAPNAME(pte_spill)
146 #define pmap_real_memory PMAPNAME(real_memory)
147 #define pmap_init PMAPNAME(init)
148 #define pmap_virtual_space PMAPNAME(virtual_space)
149 #define pmap_create PMAPNAME(create)
150 #define pmap_reference PMAPNAME(reference)
151 #define pmap_destroy PMAPNAME(destroy)
152 #define pmap_copy PMAPNAME(copy)
153 #define pmap_update PMAPNAME(update)
154 #define pmap_enter PMAPNAME(enter)
155 #define pmap_remove PMAPNAME(remove)
156 #define pmap_kenter_pa PMAPNAME(kenter_pa)
157 #define pmap_kremove PMAPNAME(kremove)
158 #define pmap_extract PMAPNAME(extract)
159 #define pmap_protect PMAPNAME(protect)
160 #define pmap_unwire PMAPNAME(unwire)
161 #define pmap_page_protect PMAPNAME(page_protect)
162 #define pmap_query_bit PMAPNAME(query_bit)
163 #define pmap_clear_bit PMAPNAME(clear_bit)
164
165 #define pmap_activate PMAPNAME(activate)
166 #define pmap_deactivate PMAPNAME(deactivate)
167
168 #define pmap_pinit PMAPNAME(pinit)
169 #define pmap_procwr PMAPNAME(procwr)
170
171 #define pmap_pool PMAPNAME(pool)
172 #define pmap_upvo_pool PMAPNAME(upvo_pool)
173 #define pmap_mpvo_pool PMAPNAME(mpvo_pool)
174 #define pmap_pvo_table PMAPNAME(pvo_table)
175 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
176 #define pmap_pte_print PMAPNAME(pte_print)
177 #define pmap_pteg_check PMAPNAME(pteg_check)
178 #define pmap_print_mmruregs PMAPNAME(print_mmuregs)
179 #define pmap_print_pte PMAPNAME(print_pte)
180 #define pmap_pteg_dist PMAPNAME(pteg_dist)
181 #endif
182 #if defined(DEBUG) || defined(PMAPCHECK)
183 #define pmap_pvo_verify PMAPNAME(pvo_verify)
184 #define pmapcheck PMAPNAME(check)
185 #endif
186 #if defined(DEBUG) || defined(PMAPDEBUG)
187 #define pmapdebug PMAPNAME(debug)
188 #endif
189 #define pmap_steal_memory PMAPNAME(steal_memory)
190 #define pmap_bootstrap PMAPNAME(bootstrap)
191 #else
192 #define STATIC /* nothing */
193 #endif /* PMAPNAME */
194
195 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
196 STATIC void pmap_real_memory(paddr_t *, psize_t *);
197 STATIC void pmap_init(void);
198 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
199 STATIC pmap_t pmap_create(void);
200 STATIC void pmap_reference(pmap_t);
201 STATIC void pmap_destroy(pmap_t);
202 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
203 STATIC void pmap_update(pmap_t);
204 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
205 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
206 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
207 STATIC void pmap_kremove(vaddr_t, vsize_t);
208 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
209
210 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
211 STATIC void pmap_unwire(pmap_t, vaddr_t);
212 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
213 STATIC bool pmap_query_bit(struct vm_page *, int);
214 STATIC bool pmap_clear_bit(struct vm_page *, int);
215
216 STATIC void pmap_activate(struct lwp *);
217 STATIC void pmap_deactivate(struct lwp *);
218
219 STATIC void pmap_pinit(pmap_t pm);
220 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
221
222 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
223 STATIC void pmap_pte_print(volatile struct pte *);
224 STATIC void pmap_pteg_check(void);
225 STATIC void pmap_print_mmuregs(void);
226 STATIC void pmap_print_pte(pmap_t, vaddr_t);
227 STATIC void pmap_pteg_dist(void);
228 #endif
229 #if defined(DEBUG) || defined(PMAPCHECK)
230 STATIC void pmap_pvo_verify(void);
231 #endif
232 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
233 STATIC void pmap_bootstrap(paddr_t, paddr_t);
234
235 #ifdef PMAPNAME
236 const struct pmap_ops PMAPNAME(ops) = {
237 .pmapop_pte_spill = pmap_pte_spill,
238 .pmapop_real_memory = pmap_real_memory,
239 .pmapop_init = pmap_init,
240 .pmapop_virtual_space = pmap_virtual_space,
241 .pmapop_create = pmap_create,
242 .pmapop_reference = pmap_reference,
243 .pmapop_destroy = pmap_destroy,
244 .pmapop_copy = pmap_copy,
245 .pmapop_update = pmap_update,
246 .pmapop_enter = pmap_enter,
247 .pmapop_remove = pmap_remove,
248 .pmapop_kenter_pa = pmap_kenter_pa,
249 .pmapop_kremove = pmap_kremove,
250 .pmapop_extract = pmap_extract,
251 .pmapop_protect = pmap_protect,
252 .pmapop_unwire = pmap_unwire,
253 .pmapop_page_protect = pmap_page_protect,
254 .pmapop_query_bit = pmap_query_bit,
255 .pmapop_clear_bit = pmap_clear_bit,
256 .pmapop_activate = pmap_activate,
257 .pmapop_deactivate = pmap_deactivate,
258 .pmapop_pinit = pmap_pinit,
259 .pmapop_procwr = pmap_procwr,
260 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
261 .pmapop_pte_print = pmap_pte_print,
262 .pmapop_pteg_check = pmap_pteg_check,
263 .pmapop_print_mmuregs = pmap_print_mmuregs,
264 .pmapop_print_pte = pmap_print_pte,
265 .pmapop_pteg_dist = pmap_pteg_dist,
266 #else
267 .pmapop_pte_print = NULL,
268 .pmapop_pteg_check = NULL,
269 .pmapop_print_mmuregs = NULL,
270 .pmapop_print_pte = NULL,
271 .pmapop_pteg_dist = NULL,
272 #endif
273 #if defined(DEBUG) || defined(PMAPCHECK)
274 .pmapop_pvo_verify = pmap_pvo_verify,
275 #else
276 .pmapop_pvo_verify = NULL,
277 #endif
278 .pmapop_steal_memory = pmap_steal_memory,
279 .pmapop_bootstrap = pmap_bootstrap,
280 };
281 #endif /* !PMAPNAME */
282
283 /*
284 * The following structure is aligned to 32 bytes
285 */
286 struct pvo_entry {
287 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
288 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
289 struct pte pvo_pte; /* Prebuilt PTE */
290 pmap_t pvo_pmap; /* ptr to owning pmap */
291 vaddr_t pvo_vaddr; /* VA of entry */
292 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
293 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
294 #define PVO_WIRED 0x0010 /* PVO entry is wired */
295 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
296 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
297 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED)
298 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED)
299 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
300 #define PVO_ENTER_INSERT 0 /* PVO has been removed */
301 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */
302 #define PVO_SPILL_SET 2 /* PVO has been spilled */
303 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */
304 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */
305 #define PVO_PMAP_PROTECT 5 /* PVO has changed */
306 #define PVO_REMOVE 6 /* PVO has been removed */
307 #define PVO_WHERE_MASK 15
308 #define PVO_WHERE_SHFT 8
309 } __attribute__ ((aligned (32)));
310 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
311 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
312 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
313 #define PVO_PTEGIDX_CLR(pvo) \
314 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
315 #define PVO_PTEGIDX_SET(pvo,i) \
316 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
317 #define PVO_WHERE(pvo,w) \
318 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
319 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
320
321 TAILQ_HEAD(pvo_tqhead, pvo_entry);
322 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
323 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
324 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
325
326 struct pool pmap_pool; /* pool for pmap structures */
327 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
328 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
329
330 /*
331 * We keep a cache of unmanaged pages to be used for pvo entries for
332 * unmanaged pages.
333 */
334 struct pvo_page {
335 SIMPLEQ_ENTRY(pvo_page) pvop_link;
336 };
337 SIMPLEQ_HEAD(pvop_head, pvo_page);
338 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
339 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
340 static u_long pmap_upvop_free;
341 static u_long pmap_upvop_maxfree;
342 static u_long pmap_mpvop_free;
343 static u_long pmap_mpvop_maxfree;
344
345 static void *pmap_pool_ualloc(struct pool *, int);
346 static void *pmap_pool_malloc(struct pool *, int);
347
348 static void pmap_pool_ufree(struct pool *, void *);
349 static void pmap_pool_mfree(struct pool *, void *);
350
351 static struct pool_allocator pmap_pool_mallocator = {
352 .pa_alloc = pmap_pool_malloc,
353 .pa_free = pmap_pool_mfree,
354 .pa_pagesz = 0,
355 };
356
357 static struct pool_allocator pmap_pool_uallocator = {
358 .pa_alloc = pmap_pool_ualloc,
359 .pa_free = pmap_pool_ufree,
360 .pa_pagesz = 0,
361 };
362
363 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
364 void pmap_pte_print(volatile struct pte *);
365 void pmap_pteg_check(void);
366 void pmap_pteg_dist(void);
367 void pmap_print_pte(pmap_t, vaddr_t);
368 void pmap_print_mmuregs(void);
369 #endif
370
371 #if defined(DEBUG) || defined(PMAPCHECK)
372 #ifdef PMAPCHECK
373 int pmapcheck = 1;
374 #else
375 int pmapcheck = 0;
376 #endif
377 void pmap_pvo_verify(void);
378 static void pmap_pvo_check(const struct pvo_entry *);
379 #define PMAP_PVO_CHECK(pvo) \
380 do { \
381 if (pmapcheck) \
382 pmap_pvo_check(pvo); \
383 } while (0)
384 #else
385 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
386 #endif
387 static int pmap_pte_insert(int, struct pte *);
388 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
389 vaddr_t, paddr_t, register_t, int);
390 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
391 static void pmap_pvo_free(struct pvo_entry *);
392 static void pmap_pvo_free_list(struct pvo_head *);
393 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
394 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
395 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
396 static void pvo_set_exec(struct pvo_entry *);
397 static void pvo_clear_exec(struct pvo_entry *);
398
399 static void tlbia(void);
400
401 static void pmap_release(pmap_t);
402 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
403
404 static uint32_t pmap_pvo_reclaim_nextidx;
405 #ifdef DEBUG
406 static int pmap_pvo_reclaim_debugctr;
407 #endif
408
409 #define VSID_NBPW (sizeof(uint32_t) * 8)
410 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
411
412 static int pmap_initialized;
413
414 #if defined(DEBUG) || defined(PMAPDEBUG)
415 #define PMAPDEBUG_BOOT 0x0001
416 #define PMAPDEBUG_PTE 0x0002
417 #define PMAPDEBUG_EXEC 0x0008
418 #define PMAPDEBUG_PVOENTER 0x0010
419 #define PMAPDEBUG_PVOREMOVE 0x0020
420 #define PMAPDEBUG_ACTIVATE 0x0100
421 #define PMAPDEBUG_CREATE 0x0200
422 #define PMAPDEBUG_ENTER 0x1000
423 #define PMAPDEBUG_KENTER 0x2000
424 #define PMAPDEBUG_KREMOVE 0x4000
425 #define PMAPDEBUG_REMOVE 0x8000
426
427 unsigned int pmapdebug = 0;
428
429 # define DPRINTF(x, ...) printf(x, __VA_ARGS__)
430 # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0)
431 #else
432 # define DPRINTF(x, ...) do { } while (0)
433 # define DPRINTFN(n, x, ...) do { } while (0)
434 #endif
435
436
437 #ifdef PMAPCOUNTERS
438 /*
439 * From pmap_subr.c
440 */
441 extern struct evcnt pmap_evcnt_mappings;
442 extern struct evcnt pmap_evcnt_unmappings;
443
444 extern struct evcnt pmap_evcnt_kernel_mappings;
445 extern struct evcnt pmap_evcnt_kernel_unmappings;
446
447 extern struct evcnt pmap_evcnt_mappings_replaced;
448
449 extern struct evcnt pmap_evcnt_exec_mappings;
450 extern struct evcnt pmap_evcnt_exec_cached;
451
452 extern struct evcnt pmap_evcnt_exec_synced;
453 extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
454 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
455
456 extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
457 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
458 extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
459 extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
460 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
461
462 extern struct evcnt pmap_evcnt_updates;
463 extern struct evcnt pmap_evcnt_collects;
464 extern struct evcnt pmap_evcnt_copies;
465
466 extern struct evcnt pmap_evcnt_ptes_spilled;
467 extern struct evcnt pmap_evcnt_ptes_unspilled;
468 extern struct evcnt pmap_evcnt_ptes_evicted;
469
470 extern struct evcnt pmap_evcnt_ptes_primary[8];
471 extern struct evcnt pmap_evcnt_ptes_secondary[8];
472 extern struct evcnt pmap_evcnt_ptes_removed;
473 extern struct evcnt pmap_evcnt_ptes_changed;
474 extern struct evcnt pmap_evcnt_pvos_reclaimed;
475 extern struct evcnt pmap_evcnt_pvos_failed;
476
477 extern struct evcnt pmap_evcnt_zeroed_pages;
478 extern struct evcnt pmap_evcnt_copied_pages;
479 extern struct evcnt pmap_evcnt_idlezeroed_pages;
480
481 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
482 #define PMAPCOUNT2(ev) ((ev).ev_count++)
483 #else
484 #define PMAPCOUNT(ev) ((void) 0)
485 #define PMAPCOUNT2(ev) ((void) 0)
486 #endif
487
488 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va))
489
490 /* XXXSL: this needs to be moved to assembler */
491 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va))
492
493 #ifdef MD_TLBSYNC
494 #define TLBSYNC() MD_TLBSYNC()
495 #else
496 #define TLBSYNC() __asm volatile("tlbsync")
497 #endif
498 #define SYNC() __asm volatile("sync")
499 #define EIEIO() __asm volatile("eieio")
500 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va))
501 #define MFMSR() mfmsr()
502 #define MTMSR(psl) mtmsr(psl)
503 #define MFPVR() mfpvr()
504 #define MFSRIN(va) mfsrin(va)
505 #define MFTB() mfrtcltbl()
506
507 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
508 static inline register_t
509 mfsrin(vaddr_t va)
510 {
511 register_t sr;
512 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
513 return sr;
514 }
515 #endif /* PMAP_OEA*/
516
517 #if defined (PMAP_OEA64_BRIDGE)
518 extern void mfmsr64 (register64_t *result);
519 #endif /* PMAP_OEA64_BRIDGE */
520
521 #define PMAP_LOCK() KERNEL_LOCK(1, NULL)
522 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
523
524 static inline register_t
525 pmap_interrupts_off(void)
526 {
527 register_t msr = MFMSR();
528 if (msr & PSL_EE)
529 MTMSR(msr & ~PSL_EE);
530 return msr;
531 }
532
533 static void
534 pmap_interrupts_restore(register_t msr)
535 {
536 if (msr & PSL_EE)
537 MTMSR(msr);
538 }
539
540 static inline u_int32_t
541 mfrtcltbl(void)
542 {
543 #ifdef PPC_OEA601
544 if ((MFPVR() >> 16) == MPC601)
545 return (mfrtcl() >> 7);
546 else
547 #endif
548 return (mftbl());
549 }
550
551 /*
552 * These small routines may have to be replaced,
553 * if/when we support processors other that the 604.
554 */
555
556 void
557 tlbia(void)
558 {
559 char *i;
560
561 SYNC();
562 #if defined(PMAP_OEA)
563 /*
564 * Why not use "tlbia"? Because not all processors implement it.
565 *
566 * This needs to be a per-CPU callback to do the appropriate thing
567 * for the CPU. XXX
568 */
569 for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
570 TLBIE(i);
571 EIEIO();
572 SYNC();
573 }
574 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
575 /* This is specifically for the 970, 970UM v1.6 pp. 140. */
576 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
577 TLBIEL(i);
578 EIEIO();
579 SYNC();
580 }
581 #endif
582 TLBSYNC();
583 SYNC();
584 }
585
586 static inline register_t
587 va_to_vsid(const struct pmap *pm, vaddr_t addr)
588 {
589 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
590 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
591 #else /* PMAP_OEA64 */
592 #if 0
593 const struct ste *ste;
594 register_t hash;
595 int i;
596
597 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
598
599 /*
600 * Try the primary group first
601 */
602 ste = pm->pm_stes[hash].stes;
603 for (i = 0; i < 8; i++, ste++) {
604 if (ste->ste_hi & STE_V) &&
605 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
606 return ste;
607 }
608
609 /*
610 * Then the secondary group.
611 */
612 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
613 for (i = 0; i < 8; i++, ste++) {
614 if (ste->ste_hi & STE_V) &&
615 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
616 return addr;
617 }
618
619 return NULL;
620 #else
621 /*
622 * Rather than searching the STE groups for the VSID, we know
623 * how we generate that from the ESID and so do that.
624 */
625 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
626 #endif
627 #endif /* PMAP_OEA */
628 }
629
630 static inline register_t
631 va_to_pteg(const struct pmap *pm, vaddr_t addr)
632 {
633 register_t hash;
634
635 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
636 return hash & pmap_pteg_mask;
637 }
638
639 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
640 /*
641 * Given a PTE in the page table, calculate the VADDR that hashes to it.
642 * The only bit of magic is that the top 4 bits of the address doesn't
643 * technically exist in the PTE. But we know we reserved 4 bits of the
644 * VSID for it so that's how we get it.
645 */
646 static vaddr_t
647 pmap_pte_to_va(volatile const struct pte *pt)
648 {
649 vaddr_t va;
650 uintptr_t ptaddr = (uintptr_t) pt;
651
652 if (pt->pte_hi & PTE_HID)
653 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
654
655 /* PPC Bits 10-19 PPC64 Bits 42-51 */
656 #if defined(PMAP_OEA)
657 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
658 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
659 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
660 #endif
661 va <<= ADDR_PIDX_SHFT;
662
663 /* PPC Bits 4-9 PPC64 Bits 36-41 */
664 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
665
666 #if defined(PMAP_OEA64)
667 /* PPC63 Bits 0-35 */
668 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
669 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
670 /* PPC Bits 0-3 */
671 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
672 #endif
673
674 return va;
675 }
676 #endif
677
678 static inline struct pvo_head *
679 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
680 {
681 struct vm_page *pg;
682 struct vm_page_md *md;
683
684 pg = PHYS_TO_VM_PAGE(pa);
685 if (pg_p != NULL)
686 *pg_p = pg;
687 if (pg == NULL)
688 return &pmap_pvo_unmanaged;
689 md = VM_PAGE_TO_MD(pg);
690 return &md->mdpg_pvoh;
691 }
692
693 static inline struct pvo_head *
694 vm_page_to_pvoh(struct vm_page *pg)
695 {
696 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
697
698 return &md->mdpg_pvoh;
699 }
700
701
702 static inline void
703 pmap_attr_clear(struct vm_page *pg, int ptebit)
704 {
705 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
706
707 md->mdpg_attrs &= ~ptebit;
708 }
709
710 static inline int
711 pmap_attr_fetch(struct vm_page *pg)
712 {
713 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
714
715 return md->mdpg_attrs;
716 }
717
718 static inline void
719 pmap_attr_save(struct vm_page *pg, int ptebit)
720 {
721 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
722
723 md->mdpg_attrs |= ptebit;
724 }
725
726 static inline int
727 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
728 {
729 if (pt->pte_hi == pvo_pt->pte_hi
730 #if 0
731 && ((pt->pte_lo ^ pvo_pt->pte_lo) &
732 ~(PTE_REF|PTE_CHG)) == 0
733 #endif
734 )
735 return 1;
736 return 0;
737 }
738
739 static inline void
740 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
741 {
742 /*
743 * Construct the PTE. Default to IMB initially. Valid bit
744 * only gets set when the real pte is set in memory.
745 *
746 * Note: Don't set the valid bit for correct operation of tlb update.
747 */
748 #if defined(PMAP_OEA)
749 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
750 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
751 pt->pte_lo = pte_lo;
752 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64)
753 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
754 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
755 pt->pte_lo = (u_int64_t) pte_lo;
756 #endif /* PMAP_OEA */
757 }
758
759 static inline void
760 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
761 {
762 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
763 }
764
765 static inline void
766 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
767 {
768 /*
769 * As shown in Section 7.6.3.2.3
770 */
771 pt->pte_lo &= ~ptebit;
772 TLBIE(va);
773 SYNC();
774 EIEIO();
775 TLBSYNC();
776 SYNC();
777 #ifdef MULTIPROCESSOR
778 DCBST(pt);
779 #endif
780 }
781
782 static inline void
783 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
784 {
785 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
786 if (pvo_pt->pte_hi & PTE_VALID)
787 panic("pte_set: setting an already valid pte %p", pvo_pt);
788 #endif
789 pvo_pt->pte_hi |= PTE_VALID;
790
791 /*
792 * Update the PTE as defined in section 7.6.3.1
793 * Note that the REF/CHG bits are from pvo_pt and thus should
794 * have been saved so this routine can restore them (if desired).
795 */
796 pt->pte_lo = pvo_pt->pte_lo;
797 EIEIO();
798 pt->pte_hi = pvo_pt->pte_hi;
799 TLBSYNC();
800 SYNC();
801 #ifdef MULTIPROCESSOR
802 DCBST(pt);
803 #endif
804 pmap_pte_valid++;
805 }
806
807 static inline void
808 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
809 {
810 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
811 if ((pvo_pt->pte_hi & PTE_VALID) == 0)
812 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
813 if ((pt->pte_hi & PTE_VALID) == 0)
814 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
815 #endif
816
817 pvo_pt->pte_hi &= ~PTE_VALID;
818 /*
819 * Force the ref & chg bits back into the PTEs.
820 */
821 SYNC();
822 /*
823 * Invalidate the pte ... (Section 7.6.3.3)
824 */
825 pt->pte_hi &= ~PTE_VALID;
826 SYNC();
827 TLBIE(va);
828 SYNC();
829 EIEIO();
830 TLBSYNC();
831 SYNC();
832 /*
833 * Save the ref & chg bits ...
834 */
835 pmap_pte_synch(pt, pvo_pt);
836 pmap_pte_valid--;
837 }
838
839 static inline void
840 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
841 {
842 /*
843 * Invalidate the PTE
844 */
845 pmap_pte_unset(pt, pvo_pt, va);
846 pmap_pte_set(pt, pvo_pt);
847 }
848
849 /*
850 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
851 * (either primary or secondary location).
852 *
853 * Note: both the destination and source PTEs must not have PTE_VALID set.
854 */
855
856 static int
857 pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
858 {
859 volatile struct pte *pt;
860 int i;
861
862 #if defined(DEBUG)
863 DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
864 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo);
865 #endif
866 /*
867 * First try primary hash.
868 */
869 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
870 if ((pt->pte_hi & PTE_VALID) == 0) {
871 pvo_pt->pte_hi &= ~PTE_HID;
872 pmap_pte_set(pt, pvo_pt);
873 return i;
874 }
875 }
876
877 /*
878 * Now try secondary hash.
879 */
880 ptegidx ^= pmap_pteg_mask;
881 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
882 if ((pt->pte_hi & PTE_VALID) == 0) {
883 pvo_pt->pte_hi |= PTE_HID;
884 pmap_pte_set(pt, pvo_pt);
885 return i;
886 }
887 }
888 return -1;
889 }
890
891 /*
892 * Spill handler.
893 *
894 * Tries to spill a page table entry from the overflow area.
895 * This runs in either real mode (if dealing with a exception spill)
896 * or virtual mode when dealing with manually spilling one of the
897 * kernel's pte entries. In either case, interrupts are already
898 * disabled.
899 */
900
901 int
902 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
903 {
904 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
905 struct pvo_entry *pvo;
906 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
907 struct pvo_tqhead *pvoh, *vpvoh = NULL;
908 int ptegidx, i, j;
909 volatile struct pteg *pteg;
910 volatile struct pte *pt;
911
912 PMAP_LOCK();
913
914 ptegidx = va_to_pteg(pm, addr);
915
916 /*
917 * Have to substitute some entry. Use the primary hash for this.
918 * Use low bits of timebase as random generator. Make sure we are
919 * not picking a kernel pte for replacement.
920 */
921 pteg = &pmap_pteg_table[ptegidx];
922 i = MFTB() & 7;
923 for (j = 0; j < 8; j++) {
924 pt = &pteg->pt[i];
925 if ((pt->pte_hi & PTE_VALID) == 0)
926 break;
927 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
928 < PHYSMAP_VSIDBITS)
929 break;
930 i = (i + 1) & 7;
931 }
932 KASSERT(j < 8);
933
934 source_pvo = NULL;
935 victim_pvo = NULL;
936 pvoh = &pmap_pvo_table[ptegidx];
937 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
938
939 /*
940 * We need to find pvo entry for this address...
941 */
942 PMAP_PVO_CHECK(pvo); /* sanity check */
943
944 /*
945 * If we haven't found the source and we come to a PVO with
946 * a valid PTE, then we know we can't find it because all
947 * evicted PVOs always are first in the list.
948 */
949 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
950 break;
951 if (source_pvo == NULL && pm == pvo->pvo_pmap &&
952 addr == PVO_VADDR(pvo)) {
953
954 /*
955 * Now we have found the entry to be spilled into the
956 * pteg. Attempt to insert it into the page table.
957 */
958 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
959 if (j >= 0) {
960 PVO_PTEGIDX_SET(pvo, j);
961 PMAP_PVO_CHECK(pvo); /* sanity check */
962 PVO_WHERE(pvo, SPILL_INSERT);
963 pvo->pvo_pmap->pm_evictions--;
964 PMAPCOUNT(ptes_spilled);
965 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
966 ? pmap_evcnt_ptes_secondary
967 : pmap_evcnt_ptes_primary)[j]);
968
969 /*
970 * Since we keep the evicted entries at the
971 * from of the PVO list, we need move this
972 * (now resident) PVO after the evicted
973 * entries.
974 */
975 next_pvo = TAILQ_NEXT(pvo, pvo_olink);
976
977 /*
978 * If we don't have to move (either we were the
979 * last entry or the next entry was valid),
980 * don't change our position. Otherwise
981 * move ourselves to the tail of the queue.
982 */
983 if (next_pvo != NULL &&
984 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
985 TAILQ_REMOVE(pvoh, pvo, pvo_olink);
986 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
987 }
988 PMAP_UNLOCK();
989 return 1;
990 }
991 source_pvo = pvo;
992 if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
993 return 0;
994 }
995 if (victim_pvo != NULL)
996 break;
997 }
998
999 /*
1000 * We also need the pvo entry of the victim we are replacing
1001 * so save the R & C bits of the PTE.
1002 */
1003 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1004 pmap_pte_compare(pt, &pvo->pvo_pte)) {
1005 vpvoh = pvoh; /* *1* */
1006 victim_pvo = pvo;
1007 if (source_pvo != NULL)
1008 break;
1009 }
1010 }
1011
1012 if (source_pvo == NULL) {
1013 PMAPCOUNT(ptes_unspilled);
1014 PMAP_UNLOCK();
1015 return 0;
1016 }
1017
1018 if (victim_pvo == NULL) {
1019 if ((pt->pte_hi & PTE_HID) == 0)
1020 panic("pmap_pte_spill: victim p-pte (%p) has "
1021 "no pvo entry!", pt);
1022
1023 /*
1024 * If this is a secondary PTE, we need to search
1025 * its primary pvo bucket for the matching PVO.
1026 */
1027 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
1028 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
1029 PMAP_PVO_CHECK(pvo); /* sanity check */
1030
1031 /*
1032 * We also need the pvo entry of the victim we are
1033 * replacing so save the R & C bits of the PTE.
1034 */
1035 if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1036 victim_pvo = pvo;
1037 break;
1038 }
1039 }
1040 if (victim_pvo == NULL)
1041 panic("pmap_pte_spill: victim s-pte (%p) has "
1042 "no pvo entry!", pt);
1043 }
1044
1045 /*
1046 * The victim should be not be a kernel PVO/PTE entry.
1047 */
1048 KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
1049 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
1050 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
1051
1052 /*
1053 * We are invalidating the TLB entry for the EA for the
1054 * we are replacing even though its valid; If we don't
1055 * we lose any ref/chg bit changes contained in the TLB
1056 * entry.
1057 */
1058 source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1059
1060 /*
1061 * To enforce the PVO list ordering constraint that all
1062 * evicted entries should come before all valid entries,
1063 * move the source PVO to the tail of its list and the
1064 * victim PVO to the head of its list (which might not be
1065 * the same list, if the victim was using the secondary hash).
1066 */
1067 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
1068 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
1069 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
1070 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
1071 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1072 pmap_pte_set(pt, &source_pvo->pvo_pte);
1073 victim_pvo->pvo_pmap->pm_evictions++;
1074 source_pvo->pvo_pmap->pm_evictions--;
1075 PVO_WHERE(victim_pvo, SPILL_UNSET);
1076 PVO_WHERE(source_pvo, SPILL_SET);
1077
1078 PVO_PTEGIDX_CLR(victim_pvo);
1079 PVO_PTEGIDX_SET(source_pvo, i);
1080 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
1081 PMAPCOUNT(ptes_spilled);
1082 PMAPCOUNT(ptes_evicted);
1083 PMAPCOUNT(ptes_removed);
1084
1085 PMAP_PVO_CHECK(victim_pvo);
1086 PMAP_PVO_CHECK(source_pvo);
1087
1088 PMAP_UNLOCK();
1089 return 1;
1090 }
1091
1092 /*
1093 * Restrict given range to physical memory
1094 */
1095 void
1096 pmap_real_memory(paddr_t *start, psize_t *size)
1097 {
1098 struct mem_region *mp;
1099
1100 for (mp = mem; mp->size; mp++) {
1101 if (*start + *size > mp->start
1102 && *start < mp->start + mp->size) {
1103 if (*start < mp->start) {
1104 *size -= mp->start - *start;
1105 *start = mp->start;
1106 }
1107 if (*start + *size > mp->start + mp->size)
1108 *size = mp->start + mp->size - *start;
1109 return;
1110 }
1111 }
1112 *size = 0;
1113 }
1114
1115 /*
1116 * Initialize anything else for pmap handling.
1117 * Called during vm_init().
1118 */
1119 void
1120 pmap_init(void)
1121 {
1122 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
1123 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
1124 &pmap_pool_mallocator, IPL_NONE);
1125
1126 pool_setlowat(&pmap_mpvo_pool, 1008);
1127
1128 pmap_initialized = 1;
1129
1130 }
1131
1132 /*
1133 * How much virtual space does the kernel get?
1134 */
1135 void
1136 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1137 {
1138 /*
1139 * For now, reserve one segment (minus some overhead) for kernel
1140 * virtual memory
1141 */
1142 *start = VM_MIN_KERNEL_ADDRESS;
1143 *end = VM_MAX_KERNEL_ADDRESS;
1144 }
1145
1146 /*
1147 * Allocate, initialize, and return a new physical map.
1148 */
1149 pmap_t
1150 pmap_create(void)
1151 {
1152 pmap_t pm;
1153
1154 pm = pool_get(&pmap_pool, PR_WAITOK);
1155 KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS);
1156 memset((void *)pm, 0, sizeof *pm);
1157 pmap_pinit(pm);
1158
1159 DPRINTFN(CREATE, "pmap_create: pm %p:\n"
1160 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1161 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
1162 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1163 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
1164 pm,
1165 pm->pm_sr[0], pm->pm_sr[1],
1166 pm->pm_sr[2], pm->pm_sr[3],
1167 pm->pm_sr[4], pm->pm_sr[5],
1168 pm->pm_sr[6], pm->pm_sr[7],
1169 pm->pm_sr[8], pm->pm_sr[9],
1170 pm->pm_sr[10], pm->pm_sr[11],
1171 pm->pm_sr[12], pm->pm_sr[13],
1172 pm->pm_sr[14], pm->pm_sr[15]);
1173 return pm;
1174 }
1175
1176 /*
1177 * Initialize a preallocated and zeroed pmap structure.
1178 */
1179 void
1180 pmap_pinit(pmap_t pm)
1181 {
1182 register_t entropy = MFTB();
1183 register_t mask;
1184 int i;
1185
1186 /*
1187 * Allocate some segment registers for this pmap.
1188 */
1189 pm->pm_refs = 1;
1190 PMAP_LOCK();
1191 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1192 static register_t pmap_vsidcontext;
1193 register_t hash;
1194 unsigned int n;
1195
1196 /* Create a new value by multiplying by a prime adding in
1197 * entropy from the timebase register. This is to make the
1198 * VSID more random so that the PT Hash function collides
1199 * less often. (note that the prime causes gcc to do shifts
1200 * instead of a multiply)
1201 */
1202 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1203 hash = pmap_vsidcontext & (NPMAPS - 1);
1204 if (hash == 0) { /* 0 is special, avoid it */
1205 entropy += 0xbadf00d;
1206 continue;
1207 }
1208 n = hash >> 5;
1209 mask = 1L << (hash & (VSID_NBPW-1));
1210 hash = pmap_vsidcontext;
1211 if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1212 /* anything free in this bucket? */
1213 if (~pmap_vsid_bitmap[n] == 0) {
1214 entropy = hash ^ (hash >> 16);
1215 continue;
1216 }
1217 i = ffs(~pmap_vsid_bitmap[n]) - 1;
1218 mask = 1L << i;
1219 hash &= ~(VSID_NBPW-1);
1220 hash |= i;
1221 }
1222 hash &= PTE_VSID >> PTE_VSID_SHFT;
1223 pmap_vsid_bitmap[n] |= mask;
1224 pm->pm_vsid = hash;
1225 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1226 for (i = 0; i < 16; i++)
1227 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
1228 SR_NOEXEC;
1229 #endif
1230 PMAP_UNLOCK();
1231 return;
1232 }
1233 PMAP_UNLOCK();
1234 panic("pmap_pinit: out of segments");
1235 }
1236
1237 /*
1238 * Add a reference to the given pmap.
1239 */
1240 void
1241 pmap_reference(pmap_t pm)
1242 {
1243 atomic_inc_uint(&pm->pm_refs);
1244 }
1245
1246 /*
1247 * Retire the given pmap from service.
1248 * Should only be called if the map contains no valid mappings.
1249 */
1250 void
1251 pmap_destroy(pmap_t pm)
1252 {
1253 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
1254 pmap_release(pm);
1255 pool_put(&pmap_pool, pm);
1256 }
1257 }
1258
1259 /*
1260 * Release any resources held by the given physical map.
1261 * Called when a pmap initialized by pmap_pinit is being released.
1262 */
1263 void
1264 pmap_release(pmap_t pm)
1265 {
1266 int idx, mask;
1267
1268 KASSERT(pm->pm_stats.resident_count == 0);
1269 KASSERT(pm->pm_stats.wired_count == 0);
1270
1271 PMAP_LOCK();
1272 if (pm->pm_sr[0] == 0)
1273 panic("pmap_release");
1274 idx = pm->pm_vsid & (NPMAPS-1);
1275 mask = 1 << (idx % VSID_NBPW);
1276 idx /= VSID_NBPW;
1277
1278 KASSERT(pmap_vsid_bitmap[idx] & mask);
1279 pmap_vsid_bitmap[idx] &= ~mask;
1280 PMAP_UNLOCK();
1281 }
1282
1283 /*
1284 * Copy the range specified by src_addr/len
1285 * from the source map to the range dst_addr/len
1286 * in the destination map.
1287 *
1288 * This routine is only advisory and need not do anything.
1289 */
1290 void
1291 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1292 vsize_t len, vaddr_t src_addr)
1293 {
1294 PMAPCOUNT(copies);
1295 }
1296
1297 /*
1298 * Require that all active physical maps contain no
1299 * incorrect entries NOW.
1300 */
1301 void
1302 pmap_update(struct pmap *pmap)
1303 {
1304 PMAPCOUNT(updates);
1305 TLBSYNC();
1306 }
1307
1308 static inline int
1309 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1310 {
1311 int pteidx;
1312 /*
1313 * We can find the actual pte entry without searching by
1314 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1315 * and by noticing the HID bit.
1316 */
1317 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1318 if (pvo->pvo_pte.pte_hi & PTE_HID)
1319 pteidx ^= pmap_pteg_mask * 8;
1320 return pteidx;
1321 }
1322
1323 volatile struct pte *
1324 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1325 {
1326 volatile struct pte *pt;
1327
1328 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1329 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1330 return NULL;
1331 #endif
1332
1333 /*
1334 * If we haven't been supplied the ptegidx, calculate it.
1335 */
1336 if (pteidx == -1) {
1337 int ptegidx;
1338 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1339 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1340 }
1341
1342 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1343
1344 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1345 return pt;
1346 #else
1347 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1348 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1349 "pvo but no valid pte index", pvo);
1350 }
1351 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1352 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1353 "pvo but no valid pte", pvo);
1354 }
1355
1356 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1357 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1358 #if defined(DEBUG) || defined(PMAPCHECK)
1359 pmap_pte_print(pt);
1360 #endif
1361 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1362 "pmap_pteg_table %p but invalid in pvo",
1363 pvo, pt);
1364 }
1365 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1366 #if defined(DEBUG) || defined(PMAPCHECK)
1367 pmap_pte_print(pt);
1368 #endif
1369 panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1370 "not match pte %p in pmap_pteg_table",
1371 pvo, pt);
1372 }
1373 return pt;
1374 }
1375
1376 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1377 #if defined(DEBUG) || defined(PMAPCHECK)
1378 pmap_pte_print(pt);
1379 #endif
1380 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1381 "pmap_pteg_table but valid in pvo", pvo, pt);
1382 }
1383 return NULL;
1384 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1385 }
1386
1387 struct pvo_entry *
1388 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1389 {
1390 struct pvo_entry *pvo;
1391 int ptegidx;
1392
1393 va &= ~ADDR_POFF;
1394 ptegidx = va_to_pteg(pm, va);
1395
1396 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1397 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1398 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1399 panic("pmap_pvo_find_va: invalid pvo %p on "
1400 "list %#x (%p)", pvo, ptegidx,
1401 &pmap_pvo_table[ptegidx]);
1402 #endif
1403 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1404 if (pteidx_p)
1405 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1406 return pvo;
1407 }
1408 }
1409 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
1410 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
1411 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
1412 return NULL;
1413 }
1414
1415 #if defined(DEBUG) || defined(PMAPCHECK)
1416 void
1417 pmap_pvo_check(const struct pvo_entry *pvo)
1418 {
1419 struct pvo_head *pvo_head;
1420 struct pvo_entry *pvo0;
1421 volatile struct pte *pt;
1422 int failed = 0;
1423
1424 PMAP_LOCK();
1425
1426 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1427 panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1428
1429 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1430 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1431 pvo, pvo->pvo_pmap);
1432 failed = 1;
1433 }
1434
1435 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1436 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1437 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1438 pvo, TAILQ_NEXT(pvo, pvo_olink));
1439 failed = 1;
1440 }
1441
1442 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1443 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1444 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1445 pvo, LIST_NEXT(pvo, pvo_vlink));
1446 failed = 1;
1447 }
1448
1449 if (PVO_MANAGED_P(pvo)) {
1450 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1451 } else {
1452 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1453 printf("pmap_pvo_check: pvo %p: non kernel address "
1454 "on kernel unmanaged list\n", pvo);
1455 failed = 1;
1456 }
1457 pvo_head = &pmap_pvo_kunmanaged;
1458 }
1459 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1460 if (pvo0 == pvo)
1461 break;
1462 }
1463 if (pvo0 == NULL) {
1464 printf("pmap_pvo_check: pvo %p: not present "
1465 "on its vlist head %p\n", pvo, pvo_head);
1466 failed = 1;
1467 }
1468 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1469 printf("pmap_pvo_check: pvo %p: not present "
1470 "on its olist head\n", pvo);
1471 failed = 1;
1472 }
1473 pt = pmap_pvo_to_pte(pvo, -1);
1474 if (pt == NULL) {
1475 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1476 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1477 "no PTE\n", pvo);
1478 failed = 1;
1479 }
1480 } else {
1481 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1482 (uintptr_t) pt >=
1483 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1484 printf("pmap_pvo_check: pvo %p: pte %p not in "
1485 "pteg table\n", pvo, pt);
1486 failed = 1;
1487 }
1488 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1489 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1490 "no PTE\n", pvo);
1491 failed = 1;
1492 }
1493 if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1494 printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1495 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1496 pvo->pvo_pte.pte_hi,
1497 pt->pte_hi);
1498 failed = 1;
1499 }
1500 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1501 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1502 printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1503 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1504 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
1505 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1506 failed = 1;
1507 }
1508 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1509 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
1510 " doesn't not match PVO's VA %#" _PRIxva "\n",
1511 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1512 failed = 1;
1513 }
1514 if (failed)
1515 pmap_pte_print(pt);
1516 }
1517 if (failed)
1518 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1519 pvo->pvo_pmap);
1520
1521 PMAP_UNLOCK();
1522 }
1523 #endif /* DEBUG || PMAPCHECK */
1524
1525 /*
1526 * Search the PVO table looking for a non-wired entry.
1527 * If we find one, remove it and return it.
1528 */
1529
1530 struct pvo_entry *
1531 pmap_pvo_reclaim(struct pmap *pm)
1532 {
1533 struct pvo_tqhead *pvoh;
1534 struct pvo_entry *pvo;
1535 uint32_t idx, endidx;
1536
1537 endidx = pmap_pvo_reclaim_nextidx;
1538 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
1539 idx = (idx + 1) & pmap_pteg_mask) {
1540 pvoh = &pmap_pvo_table[idx];
1541 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1542 if (!PVO_WIRED_P(pvo)) {
1543 pmap_pvo_remove(pvo, -1, NULL);
1544 pmap_pvo_reclaim_nextidx = idx;
1545 PMAPCOUNT(pvos_reclaimed);
1546 return pvo;
1547 }
1548 }
1549 }
1550 return NULL;
1551 }
1552
1553 /*
1554 * This returns whether this is the first mapping of a page.
1555 */
1556 int
1557 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1558 vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1559 {
1560 struct pvo_entry *pvo;
1561 struct pvo_tqhead *pvoh;
1562 register_t msr;
1563 int ptegidx;
1564 int i;
1565 int poolflags = PR_NOWAIT;
1566
1567 /*
1568 * Compute the PTE Group index.
1569 */
1570 va &= ~ADDR_POFF;
1571 ptegidx = va_to_pteg(pm, va);
1572
1573 msr = pmap_interrupts_off();
1574
1575 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1576 if (pmap_pvo_remove_depth > 0)
1577 panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1578 if (++pmap_pvo_enter_depth > 1)
1579 panic("pmap_pvo_enter: called recursively!");
1580 #endif
1581
1582 /*
1583 * Remove any existing mapping for this page. Reuse the
1584 * pvo entry if there a mapping.
1585 */
1586 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1587 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1588 #ifdef DEBUG
1589 if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1590 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1591 ~(PTE_REF|PTE_CHG)) == 0 &&
1592 va < VM_MIN_KERNEL_ADDRESS) {
1593 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
1594 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1595 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
1596 pvo->pvo_pte.pte_hi,
1597 pm->pm_sr[va >> ADDR_SR_SHFT]);
1598 pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1599 #ifdef DDBX
1600 Debugger();
1601 #endif
1602 }
1603 #endif
1604 PMAPCOUNT(mappings_replaced);
1605 pmap_pvo_remove(pvo, -1, NULL);
1606 break;
1607 }
1608 }
1609
1610 /*
1611 * If we aren't overwriting an mapping, try to allocate
1612 */
1613 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1614 --pmap_pvo_enter_depth;
1615 #endif
1616 pmap_interrupts_restore(msr);
1617 if (pvo) {
1618 pmap_pvo_free(pvo);
1619 }
1620 pvo = pool_get(pl, poolflags);
1621 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
1622
1623 #ifdef DEBUG
1624 /*
1625 * Exercise pmap_pvo_reclaim() a little.
1626 */
1627 if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1628 pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1629 (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1630 pool_put(pl, pvo);
1631 pvo = NULL;
1632 }
1633 #endif
1634
1635 msr = pmap_interrupts_off();
1636 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1637 ++pmap_pvo_enter_depth;
1638 #endif
1639 if (pvo == NULL) {
1640 pvo = pmap_pvo_reclaim(pm);
1641 if (pvo == NULL) {
1642 if ((flags & PMAP_CANFAIL) == 0)
1643 panic("pmap_pvo_enter: failed");
1644 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1645 pmap_pvo_enter_depth--;
1646 #endif
1647 PMAPCOUNT(pvos_failed);
1648 pmap_interrupts_restore(msr);
1649 return ENOMEM;
1650 }
1651 }
1652
1653 pvo->pvo_vaddr = va;
1654 pvo->pvo_pmap = pm;
1655 pvo->pvo_vaddr &= ~ADDR_POFF;
1656 if (flags & VM_PROT_EXECUTE) {
1657 PMAPCOUNT(exec_mappings);
1658 pvo_set_exec(pvo);
1659 }
1660 if (flags & PMAP_WIRED)
1661 pvo->pvo_vaddr |= PVO_WIRED;
1662 if (pvo_head != &pmap_pvo_kunmanaged) {
1663 pvo->pvo_vaddr |= PVO_MANAGED;
1664 PMAPCOUNT(mappings);
1665 } else {
1666 PMAPCOUNT(kernel_mappings);
1667 }
1668 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1669
1670 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1671 if (PVO_WIRED_P(pvo))
1672 pvo->pvo_pmap->pm_stats.wired_count++;
1673 pvo->pvo_pmap->pm_stats.resident_count++;
1674 #if defined(DEBUG)
1675 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1676 DPRINTFN(PVOENTER,
1677 "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
1678 pvo, pm, va, pa);
1679 #endif
1680
1681 /*
1682 * We hope this succeeds but it isn't required.
1683 */
1684 pvoh = &pmap_pvo_table[ptegidx];
1685 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1686 if (i >= 0) {
1687 PVO_PTEGIDX_SET(pvo, i);
1688 PVO_WHERE(pvo, ENTER_INSERT);
1689 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1690 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1691 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1692
1693 } else {
1694 /*
1695 * Since we didn't have room for this entry (which makes it
1696 * and evicted entry), place it at the head of the list.
1697 */
1698 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1699 PMAPCOUNT(ptes_evicted);
1700 pm->pm_evictions++;
1701 /*
1702 * If this is a kernel page, make sure it's active.
1703 */
1704 if (pm == pmap_kernel()) {
1705 i = pmap_pte_spill(pm, va, false);
1706 KASSERT(i);
1707 }
1708 }
1709 PMAP_PVO_CHECK(pvo); /* sanity check */
1710 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1711 pmap_pvo_enter_depth--;
1712 #endif
1713 pmap_interrupts_restore(msr);
1714 return 0;
1715 }
1716
1717 static void
1718 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1719 {
1720 volatile struct pte *pt;
1721 int ptegidx;
1722
1723 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1724 if (++pmap_pvo_remove_depth > 1)
1725 panic("pmap_pvo_remove: called recursively!");
1726 #endif
1727
1728 /*
1729 * If we haven't been supplied the ptegidx, calculate it.
1730 */
1731 if (pteidx == -1) {
1732 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1733 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1734 } else {
1735 ptegidx = pteidx >> 3;
1736 if (pvo->pvo_pte.pte_hi & PTE_HID)
1737 ptegidx ^= pmap_pteg_mask;
1738 }
1739 PMAP_PVO_CHECK(pvo); /* sanity check */
1740
1741 /*
1742 * If there is an active pte entry, we need to deactivate it
1743 * (and save the ref & chg bits).
1744 */
1745 pt = pmap_pvo_to_pte(pvo, pteidx);
1746 if (pt != NULL) {
1747 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1748 PVO_WHERE(pvo, REMOVE);
1749 PVO_PTEGIDX_CLR(pvo);
1750 PMAPCOUNT(ptes_removed);
1751 } else {
1752 KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1753 pvo->pvo_pmap->pm_evictions--;
1754 }
1755
1756 /*
1757 * Account for executable mappings.
1758 */
1759 if (PVO_EXECUTABLE_P(pvo))
1760 pvo_clear_exec(pvo);
1761
1762 /*
1763 * Update our statistics.
1764 */
1765 pvo->pvo_pmap->pm_stats.resident_count--;
1766 if (PVO_WIRED_P(pvo))
1767 pvo->pvo_pmap->pm_stats.wired_count--;
1768
1769 /*
1770 * Save the REF/CHG bits into their cache if the page is managed.
1771 */
1772 if (PVO_MANAGED_P(pvo)) {
1773 register_t ptelo = pvo->pvo_pte.pte_lo;
1774 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1775
1776 if (pg != NULL) {
1777 /*
1778 * If this page was changed and it is mapped exec,
1779 * invalidate it.
1780 */
1781 if ((ptelo & PTE_CHG) &&
1782 (pmap_attr_fetch(pg) & PTE_EXEC)) {
1783 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
1784 if (LIST_EMPTY(pvoh)) {
1785 DPRINTFN(EXEC, "[pmap_pvo_remove: "
1786 "%#" _PRIxpa ": clear-exec]\n",
1787 VM_PAGE_TO_PHYS(pg));
1788 pmap_attr_clear(pg, PTE_EXEC);
1789 PMAPCOUNT(exec_uncached_pvo_remove);
1790 } else {
1791 DPRINTFN(EXEC, "[pmap_pvo_remove: "
1792 "%#" _PRIxpa ": syncicache]\n",
1793 VM_PAGE_TO_PHYS(pg));
1794 pmap_syncicache(VM_PAGE_TO_PHYS(pg),
1795 PAGE_SIZE);
1796 PMAPCOUNT(exec_synced_pvo_remove);
1797 }
1798 }
1799
1800 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1801 }
1802 PMAPCOUNT(unmappings);
1803 } else {
1804 PMAPCOUNT(kernel_unmappings);
1805 }
1806
1807 /*
1808 * Remove the PVO from its lists and return it to the pool.
1809 */
1810 LIST_REMOVE(pvo, pvo_vlink);
1811 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1812 if (pvol) {
1813 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1814 }
1815 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1816 pmap_pvo_remove_depth--;
1817 #endif
1818 }
1819
1820 void
1821 pmap_pvo_free(struct pvo_entry *pvo)
1822 {
1823
1824 pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1825 }
1826
1827 void
1828 pmap_pvo_free_list(struct pvo_head *pvol)
1829 {
1830 struct pvo_entry *pvo, *npvo;
1831
1832 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1833 npvo = LIST_NEXT(pvo, pvo_vlink);
1834 LIST_REMOVE(pvo, pvo_vlink);
1835 pmap_pvo_free(pvo);
1836 }
1837 }
1838
1839 /*
1840 * Mark a mapping as executable.
1841 * If this is the first executable mapping in the segment,
1842 * clear the noexec flag.
1843 */
1844 static void
1845 pvo_set_exec(struct pvo_entry *pvo)
1846 {
1847 struct pmap *pm = pvo->pvo_pmap;
1848
1849 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1850 return;
1851 }
1852 pvo->pvo_vaddr |= PVO_EXECUTABLE;
1853 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1854 {
1855 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1856 if (pm->pm_exec[sr]++ == 0) {
1857 pm->pm_sr[sr] &= ~SR_NOEXEC;
1858 }
1859 }
1860 #endif
1861 }
1862
1863 /*
1864 * Mark a mapping as non-executable.
1865 * If this was the last executable mapping in the segment,
1866 * set the noexec flag.
1867 */
1868 static void
1869 pvo_clear_exec(struct pvo_entry *pvo)
1870 {
1871 struct pmap *pm = pvo->pvo_pmap;
1872
1873 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1874 return;
1875 }
1876 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1877 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1878 {
1879 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1880 if (--pm->pm_exec[sr] == 0) {
1881 pm->pm_sr[sr] |= SR_NOEXEC;
1882 }
1883 }
1884 #endif
1885 }
1886
1887 /*
1888 * Insert physical page at pa into the given pmap at virtual address va.
1889 */
1890 int
1891 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1892 {
1893 struct mem_region *mp;
1894 struct pvo_head *pvo_head;
1895 struct vm_page *pg;
1896 struct pool *pl;
1897 register_t pte_lo;
1898 int error;
1899 u_int pvo_flags;
1900 u_int was_exec = 0;
1901
1902 PMAP_LOCK();
1903
1904 if (__predict_false(!pmap_initialized)) {
1905 pvo_head = &pmap_pvo_kunmanaged;
1906 pl = &pmap_upvo_pool;
1907 pvo_flags = 0;
1908 pg = NULL;
1909 was_exec = PTE_EXEC;
1910 } else {
1911 pvo_head = pa_to_pvoh(pa, &pg);
1912 pl = &pmap_mpvo_pool;
1913 pvo_flags = PVO_MANAGED;
1914 }
1915
1916 DPRINTFN(ENTER,
1917 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
1918 pm, va, pa, prot, flags);
1919
1920 /*
1921 * If this is a managed page, and it's the first reference to the
1922 * page clear the execness of the page. Otherwise fetch the execness.
1923 */
1924 if (pg != NULL)
1925 was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1926
1927 DPRINTFN(ENTER, " was_exec=%d", was_exec);
1928
1929 /*
1930 * Assume the page is cache inhibited and access is guarded unless
1931 * it's in our available memory array. If it is in the memory array,
1932 * asssume it's in memory coherent memory.
1933 */
1934 if (flags & PMAP_MD_PREFETCHABLE) {
1935 pte_lo = 0;
1936 } else
1937 pte_lo = PTE_G;
1938
1939 if ((flags & PMAP_NOCACHE) == 0) {
1940 for (mp = mem; mp->size; mp++) {
1941 if (pa >= mp->start && pa < mp->start + mp->size) {
1942 pte_lo = PTE_M;
1943 break;
1944 }
1945 }
1946 #ifdef MULTIPROCESSOR
1947 if (((mfpvr() >> 16) & 0xffff) == MPC603e)
1948 pte_lo = PTE_M;
1949 #endif
1950 } else {
1951 pte_lo |= PTE_I;
1952 }
1953
1954 if (prot & VM_PROT_WRITE)
1955 pte_lo |= PTE_BW;
1956 else
1957 pte_lo |= PTE_BR;
1958
1959 /*
1960 * If this was in response to a fault, "pre-fault" the PTE's
1961 * changed/referenced bit appropriately.
1962 */
1963 if (flags & VM_PROT_WRITE)
1964 pte_lo |= PTE_CHG;
1965 if (flags & VM_PROT_ALL)
1966 pte_lo |= PTE_REF;
1967
1968 /*
1969 * We need to know if this page can be executable
1970 */
1971 flags |= (prot & VM_PROT_EXECUTE);
1972
1973 /*
1974 * Record mapping for later back-translation and pte spilling.
1975 * This will overwrite any existing mapping.
1976 */
1977 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1978
1979 /*
1980 * Flush the real page from the instruction cache if this page is
1981 * mapped executable and cacheable and has not been flushed since
1982 * the last time it was modified.
1983 */
1984 if (error == 0 &&
1985 (flags & VM_PROT_EXECUTE) &&
1986 (pte_lo & PTE_I) == 0 &&
1987 was_exec == 0) {
1988 DPRINTFN(ENTER, " %s", "syncicache");
1989 PMAPCOUNT(exec_synced);
1990 pmap_syncicache(pa, PAGE_SIZE);
1991 if (pg != NULL) {
1992 pmap_attr_save(pg, PTE_EXEC);
1993 PMAPCOUNT(exec_cached);
1994 #if defined(DEBUG) || defined(PMAPDEBUG)
1995 if (pmapdebug & PMAPDEBUG_ENTER)
1996 printf(" marked-as-exec");
1997 else if (pmapdebug & PMAPDEBUG_EXEC)
1998 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
1999 VM_PAGE_TO_PHYS(pg));
2000
2001 #endif
2002 }
2003 }
2004
2005 DPRINTFN(ENTER, ": error=%d\n", error);
2006
2007 PMAP_UNLOCK();
2008
2009 return error;
2010 }
2011
2012 void
2013 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2014 {
2015 struct mem_region *mp;
2016 register_t pte_lo;
2017 int error;
2018
2019 #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA)
2020 if (va < VM_MIN_KERNEL_ADDRESS)
2021 panic("pmap_kenter_pa: attempt to enter "
2022 "non-kernel address %#" _PRIxva "!", va);
2023 #endif
2024
2025 DPRINTFN(KENTER,
2026 "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot);
2027
2028 PMAP_LOCK();
2029
2030 /*
2031 * Assume the page is cache inhibited and access is guarded unless
2032 * it's in our available memory array. If it is in the memory array,
2033 * asssume it's in memory coherent memory.
2034 */
2035 pte_lo = PTE_IG;
2036 if ((flags & PMAP_NOCACHE) == 0) {
2037 for (mp = mem; mp->size; mp++) {
2038 if (pa >= mp->start && pa < mp->start + mp->size) {
2039 pte_lo = PTE_M;
2040 break;
2041 }
2042 }
2043 #ifdef MULTIPROCESSOR
2044 if (((mfpvr() >> 16) & 0xffff) == MPC603e)
2045 pte_lo = PTE_M;
2046 #endif
2047 }
2048
2049 if (prot & VM_PROT_WRITE)
2050 pte_lo |= PTE_BW;
2051 else
2052 pte_lo |= PTE_BR;
2053
2054 /*
2055 * We don't care about REF/CHG on PVOs on the unmanaged list.
2056 */
2057 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
2058 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
2059
2060 if (error != 0)
2061 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
2062 va, pa, error);
2063
2064 PMAP_UNLOCK();
2065 }
2066
2067 void
2068 pmap_kremove(vaddr_t va, vsize_t len)
2069 {
2070 if (va < VM_MIN_KERNEL_ADDRESS)
2071 panic("pmap_kremove: attempt to remove "
2072 "non-kernel address %#" _PRIxva "!", va);
2073
2074 DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len);
2075 pmap_remove(pmap_kernel(), va, va + len);
2076 }
2077
2078 /*
2079 * Remove the given range of mapping entries.
2080 */
2081 void
2082 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
2083 {
2084 struct pvo_head pvol;
2085 struct pvo_entry *pvo;
2086 register_t msr;
2087 int pteidx;
2088
2089 PMAP_LOCK();
2090 LIST_INIT(&pvol);
2091 msr = pmap_interrupts_off();
2092 for (; va < endva; va += PAGE_SIZE) {
2093 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2094 if (pvo != NULL) {
2095 pmap_pvo_remove(pvo, pteidx, &pvol);
2096 }
2097 }
2098 pmap_interrupts_restore(msr);
2099 pmap_pvo_free_list(&pvol);
2100 PMAP_UNLOCK();
2101 }
2102
2103 /*
2104 * Get the physical page address for the given pmap/virtual address.
2105 */
2106 bool
2107 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
2108 {
2109 struct pvo_entry *pvo;
2110 register_t msr;
2111
2112 PMAP_LOCK();
2113
2114 /*
2115 * If this is a kernel pmap lookup, also check the battable
2116 * and if we get a hit, translate the VA to a PA using the
2117 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is
2118 * that will wrap back to 0.
2119 */
2120 if (pm == pmap_kernel() &&
2121 (va < VM_MIN_KERNEL_ADDRESS ||
2122 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
2123 KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
2124 #if defined (PMAP_OEA)
2125 #ifdef PPC_OEA601
2126 if ((MFPVR() >> 16) == MPC601) {
2127 register_t batu = battable[va >> 23].batu;
2128 register_t batl = battable[va >> 23].batl;
2129 register_t sr = iosrtable[va >> ADDR_SR_SHFT];
2130 if (BAT601_VALID_P(batl) &&
2131 BAT601_VA_MATCH_P(batu, batl, va)) {
2132 register_t mask =
2133 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
2134 if (pap)
2135 *pap = (batl & mask) | (va & ~mask);
2136 PMAP_UNLOCK();
2137 return true;
2138 } else if (SR601_VALID_P(sr) &&
2139 SR601_PA_MATCH_P(sr, va)) {
2140 if (pap)
2141 *pap = va;
2142 PMAP_UNLOCK();
2143 return true;
2144 }
2145 } else
2146 #endif /* PPC_OEA601 */
2147 {
2148 register_t batu = battable[BAT_VA2IDX(va)].batu;
2149 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
2150 register_t batl = battable[BAT_VA2IDX(va)].batl;
2151 register_t mask =
2152 (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL;
2153 if (pap)
2154 *pap = (batl & mask) | (va & ~mask);
2155 PMAP_UNLOCK();
2156 return true;
2157 }
2158 }
2159 return false;
2160 #elif defined (PMAP_OEA64_BRIDGE)
2161 if (va >= SEGMENT_LENGTH)
2162 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
2163 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
2164 else {
2165 if (pap)
2166 *pap = va;
2167 PMAP_UNLOCK();
2168 return true;
2169 }
2170 #elif defined (PMAP_OEA64)
2171 #error PPC_OEA64 not supported
2172 #endif /* PPC_OEA */
2173 }
2174
2175 msr = pmap_interrupts_off();
2176 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2177 if (pvo != NULL) {
2178 PMAP_PVO_CHECK(pvo); /* sanity check */
2179 if (pap)
2180 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
2181 | (va & ADDR_POFF);
2182 }
2183 pmap_interrupts_restore(msr);
2184 PMAP_UNLOCK();
2185 return pvo != NULL;
2186 }
2187
2188 /*
2189 * Lower the protection on the specified range of this pmap.
2190 */
2191 void
2192 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
2193 {
2194 struct pvo_entry *pvo;
2195 volatile struct pte *pt;
2196 register_t msr;
2197 int pteidx;
2198
2199 /*
2200 * Since this routine only downgrades protection, we should
2201 * always be called with at least one bit not set.
2202 */
2203 KASSERT(prot != VM_PROT_ALL);
2204
2205 /*
2206 * If there is no protection, this is equivalent to
2207 * remove the pmap from the pmap.
2208 */
2209 if ((prot & VM_PROT_READ) == 0) {
2210 pmap_remove(pm, va, endva);
2211 return;
2212 }
2213
2214 PMAP_LOCK();
2215
2216 msr = pmap_interrupts_off();
2217 for (; va < endva; va += PAGE_SIZE) {
2218 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2219 if (pvo == NULL)
2220 continue;
2221 PMAP_PVO_CHECK(pvo); /* sanity check */
2222
2223 /*
2224 * Revoke executable if asked to do so.
2225 */
2226 if ((prot & VM_PROT_EXECUTE) == 0)
2227 pvo_clear_exec(pvo);
2228
2229 #if 0
2230 /*
2231 * If the page is already read-only, no change
2232 * needs to be made.
2233 */
2234 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
2235 continue;
2236 #endif
2237 /*
2238 * Grab the PTE pointer before we diddle with
2239 * the cached PTE copy.
2240 */
2241 pt = pmap_pvo_to_pte(pvo, pteidx);
2242 /*
2243 * Change the protection of the page.
2244 */
2245 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2246 pvo->pvo_pte.pte_lo |= PTE_BR;
2247
2248 /*
2249 * If the PVO is in the page table, update
2250 * that pte at well.
2251 */
2252 if (pt != NULL) {
2253 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2254 PVO_WHERE(pvo, PMAP_PROTECT);
2255 PMAPCOUNT(ptes_changed);
2256 }
2257
2258 PMAP_PVO_CHECK(pvo); /* sanity check */
2259 }
2260 pmap_interrupts_restore(msr);
2261 PMAP_UNLOCK();
2262 }
2263
2264 void
2265 pmap_unwire(pmap_t pm, vaddr_t va)
2266 {
2267 struct pvo_entry *pvo;
2268 register_t msr;
2269
2270 PMAP_LOCK();
2271 msr = pmap_interrupts_off();
2272 pvo = pmap_pvo_find_va(pm, va, NULL);
2273 if (pvo != NULL) {
2274 if (PVO_WIRED_P(pvo)) {
2275 pvo->pvo_vaddr &= ~PVO_WIRED;
2276 pm->pm_stats.wired_count--;
2277 }
2278 PMAP_PVO_CHECK(pvo); /* sanity check */
2279 }
2280 pmap_interrupts_restore(msr);
2281 PMAP_UNLOCK();
2282 }
2283
2284 /*
2285 * Lower the protection on the specified physical page.
2286 */
2287 void
2288 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2289 {
2290 struct pvo_head *pvo_head, pvol;
2291 struct pvo_entry *pvo, *next_pvo;
2292 volatile struct pte *pt;
2293 register_t msr;
2294
2295 PMAP_LOCK();
2296
2297 KASSERT(prot != VM_PROT_ALL);
2298 LIST_INIT(&pvol);
2299 msr = pmap_interrupts_off();
2300
2301 /*
2302 * When UVM reuses a page, it does a pmap_page_protect with
2303 * VM_PROT_NONE. At that point, we can clear the exec flag
2304 * since we know the page will have different contents.
2305 */
2306 if ((prot & VM_PROT_READ) == 0) {
2307 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
2308 VM_PAGE_TO_PHYS(pg));
2309 if (pmap_attr_fetch(pg) & PTE_EXEC) {
2310 PMAPCOUNT(exec_uncached_page_protect);
2311 pmap_attr_clear(pg, PTE_EXEC);
2312 }
2313 }
2314
2315 pvo_head = vm_page_to_pvoh(pg);
2316 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2317 next_pvo = LIST_NEXT(pvo, pvo_vlink);
2318 PMAP_PVO_CHECK(pvo); /* sanity check */
2319
2320 /*
2321 * Downgrading to no mapping at all, we just remove the entry.
2322 */
2323 if ((prot & VM_PROT_READ) == 0) {
2324 pmap_pvo_remove(pvo, -1, &pvol);
2325 continue;
2326 }
2327
2328 /*
2329 * If EXEC permission is being revoked, just clear the
2330 * flag in the PVO.
2331 */
2332 if ((prot & VM_PROT_EXECUTE) == 0)
2333 pvo_clear_exec(pvo);
2334
2335 /*
2336 * If this entry is already RO, don't diddle with the
2337 * page table.
2338 */
2339 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2340 PMAP_PVO_CHECK(pvo);
2341 continue;
2342 }
2343
2344 /*
2345 * Grab the PTE before the we diddle the bits so
2346 * pvo_to_pte can verify the pte contents are as
2347 * expected.
2348 */
2349 pt = pmap_pvo_to_pte(pvo, -1);
2350 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2351 pvo->pvo_pte.pte_lo |= PTE_BR;
2352 if (pt != NULL) {
2353 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2354 PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
2355 PMAPCOUNT(ptes_changed);
2356 }
2357 PMAP_PVO_CHECK(pvo); /* sanity check */
2358 }
2359 pmap_interrupts_restore(msr);
2360 pmap_pvo_free_list(&pvol);
2361
2362 PMAP_UNLOCK();
2363 }
2364
2365 /*
2366 * Activate the address space for the specified process. If the process
2367 * is the current process, load the new MMU context.
2368 */
2369 void
2370 pmap_activate(struct lwp *l)
2371 {
2372 struct pcb *pcb = lwp_getpcb(l);
2373 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2374
2375 DPRINTFN(ACTIVATE,
2376 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp);
2377
2378 /*
2379 * XXX Normally performed in cpu_lwp_fork().
2380 */
2381 pcb->pcb_pm = pmap;
2382
2383 /*
2384 * In theory, the SR registers need only be valid on return
2385 * to user space wait to do them there.
2386 */
2387 if (l == curlwp) {
2388 /* Store pointer to new current pmap. */
2389 curpm = pmap;
2390 }
2391 }
2392
2393 /*
2394 * Deactivate the specified process's address space.
2395 */
2396 void
2397 pmap_deactivate(struct lwp *l)
2398 {
2399 }
2400
2401 bool
2402 pmap_query_bit(struct vm_page *pg, int ptebit)
2403 {
2404 struct pvo_entry *pvo;
2405 volatile struct pte *pt;
2406 register_t msr;
2407
2408 PMAP_LOCK();
2409
2410 if (pmap_attr_fetch(pg) & ptebit) {
2411 PMAP_UNLOCK();
2412 return true;
2413 }
2414
2415 msr = pmap_interrupts_off();
2416 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2417 PMAP_PVO_CHECK(pvo); /* sanity check */
2418 /*
2419 * See if we saved the bit off. If so cache, it and return
2420 * success.
2421 */
2422 if (pvo->pvo_pte.pte_lo & ptebit) {
2423 pmap_attr_save(pg, ptebit);
2424 PMAP_PVO_CHECK(pvo); /* sanity check */
2425 pmap_interrupts_restore(msr);
2426 PMAP_UNLOCK();
2427 return true;
2428 }
2429 }
2430 /*
2431 * No luck, now go thru the hard part of looking at the ptes
2432 * themselves. Sync so any pending REF/CHG bits are flushed
2433 * to the PTEs.
2434 */
2435 SYNC();
2436 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2437 PMAP_PVO_CHECK(pvo); /* sanity check */
2438 /*
2439 * See if this pvo have a valid PTE. If so, fetch the
2440 * REF/CHG bits from the valid PTE. If the appropriate
2441 * ptebit is set, cache, it and return success.
2442 */
2443 pt = pmap_pvo_to_pte(pvo, -1);
2444 if (pt != NULL) {
2445 pmap_pte_synch(pt, &pvo->pvo_pte);
2446 if (pvo->pvo_pte.pte_lo & ptebit) {
2447 pmap_attr_save(pg, ptebit);
2448 PMAP_PVO_CHECK(pvo); /* sanity check */
2449 pmap_interrupts_restore(msr);
2450 PMAP_UNLOCK();
2451 return true;
2452 }
2453 }
2454 }
2455 pmap_interrupts_restore(msr);
2456 PMAP_UNLOCK();
2457 return false;
2458 }
2459
2460 bool
2461 pmap_clear_bit(struct vm_page *pg, int ptebit)
2462 {
2463 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2464 struct pvo_entry *pvo;
2465 volatile struct pte *pt;
2466 register_t msr;
2467 int rv = 0;
2468
2469 PMAP_LOCK();
2470 msr = pmap_interrupts_off();
2471
2472 /*
2473 * Fetch the cache value
2474 */
2475 rv |= pmap_attr_fetch(pg);
2476
2477 /*
2478 * Clear the cached value.
2479 */
2480 pmap_attr_clear(pg, ptebit);
2481
2482 /*
2483 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2484 * can reset the right ones). Note that since the pvo entries and
2485 * list heads are accessed via BAT0 and are never placed in the
2486 * page table, we don't have to worry about further accesses setting
2487 * the REF/CHG bits.
2488 */
2489 SYNC();
2490
2491 /*
2492 * For each pvo entry, clear pvo's ptebit. If this pvo have a
2493 * valid PTE. If so, clear the ptebit from the valid PTE.
2494 */
2495 LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2496 PMAP_PVO_CHECK(pvo); /* sanity check */
2497 pt = pmap_pvo_to_pte(pvo, -1);
2498 if (pt != NULL) {
2499 /*
2500 * Only sync the PTE if the bit we are looking
2501 * for is not already set.
2502 */
2503 if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2504 pmap_pte_synch(pt, &pvo->pvo_pte);
2505 /*
2506 * If the bit we are looking for was already set,
2507 * clear that bit in the pte.
2508 */
2509 if (pvo->pvo_pte.pte_lo & ptebit)
2510 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2511 }
2512 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2513 pvo->pvo_pte.pte_lo &= ~ptebit;
2514 PMAP_PVO_CHECK(pvo); /* sanity check */
2515 }
2516 pmap_interrupts_restore(msr);
2517
2518 /*
2519 * If we are clearing the modify bit and this page was marked EXEC
2520 * and the user of the page thinks the page was modified, then we
2521 * need to clean it from the icache if it's mapped or clear the EXEC
2522 * bit if it's not mapped. The page itself might not have the CHG
2523 * bit set if the modification was done via DMA to the page.
2524 */
2525 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2526 if (LIST_EMPTY(pvoh)) {
2527 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
2528 VM_PAGE_TO_PHYS(pg));
2529 pmap_attr_clear(pg, PTE_EXEC);
2530 PMAPCOUNT(exec_uncached_clear_modify);
2531 } else {
2532 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
2533 VM_PAGE_TO_PHYS(pg));
2534 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2535 PMAPCOUNT(exec_synced_clear_modify);
2536 }
2537 }
2538 PMAP_UNLOCK();
2539 return (rv & ptebit) != 0;
2540 }
2541
2542 void
2543 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2544 {
2545 struct pvo_entry *pvo;
2546 size_t offset = va & ADDR_POFF;
2547 int s;
2548
2549 PMAP_LOCK();
2550 s = splvm();
2551 while (len > 0) {
2552 size_t seglen = PAGE_SIZE - offset;
2553 if (seglen > len)
2554 seglen = len;
2555 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2556 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
2557 pmap_syncicache(
2558 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2559 PMAP_PVO_CHECK(pvo);
2560 }
2561 va += seglen;
2562 len -= seglen;
2563 offset = 0;
2564 }
2565 splx(s);
2566 PMAP_UNLOCK();
2567 }
2568
2569 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2570 void
2571 pmap_pte_print(volatile struct pte *pt)
2572 {
2573 printf("PTE %p: ", pt);
2574
2575 #if defined(PMAP_OEA)
2576 /* High word: */
2577 printf("%#" _PRIxpte ": [", pt->pte_hi);
2578 #else
2579 printf("%#" _PRIxpte ": [", pt->pte_hi);
2580 #endif /* PMAP_OEA */
2581
2582 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2583 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2584
2585 printf("%#" _PRIxpte " %#" _PRIxpte "",
2586 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2587 pt->pte_hi & PTE_API);
2588 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2589 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2590 #else
2591 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2592 #endif /* PMAP_OEA */
2593
2594 /* Low word: */
2595 #if defined (PMAP_OEA)
2596 printf(" %#" _PRIxpte ": [", pt->pte_lo);
2597 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2598 #else
2599 printf(" %#" _PRIxpte ": [", pt->pte_lo);
2600 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2601 #endif
2602 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2603 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2604 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2605 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2606 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2607 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2608 switch (pt->pte_lo & PTE_PP) {
2609 case PTE_BR: printf("br]\n"); break;
2610 case PTE_BW: printf("bw]\n"); break;
2611 case PTE_SO: printf("so]\n"); break;
2612 case PTE_SW: printf("sw]\n"); break;
2613 }
2614 }
2615 #endif
2616
2617 #if defined(DDB)
2618 void
2619 pmap_pteg_check(void)
2620 {
2621 volatile struct pte *pt;
2622 int i;
2623 int ptegidx;
2624 u_int p_valid = 0;
2625 u_int s_valid = 0;
2626 u_int invalid = 0;
2627
2628 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2629 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2630 if (pt->pte_hi & PTE_VALID) {
2631 if (pt->pte_hi & PTE_HID)
2632 s_valid++;
2633 else
2634 {
2635 p_valid++;
2636 }
2637 } else
2638 invalid++;
2639 }
2640 }
2641 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2642 p_valid, p_valid, s_valid, s_valid,
2643 invalid, invalid);
2644 }
2645
2646 void
2647 pmap_print_mmuregs(void)
2648 {
2649 int i;
2650 u_int cpuvers;
2651 #ifndef PMAP_OEA64
2652 vaddr_t addr;
2653 register_t soft_sr[16];
2654 #endif
2655 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
2656 struct bat soft_ibat[4];
2657 struct bat soft_dbat[4];
2658 #endif
2659 paddr_t sdr1;
2660
2661 cpuvers = MFPVR() >> 16;
2662 __asm volatile ("mfsdr1 %0" : "=r"(sdr1));
2663 #ifndef PMAP_OEA64
2664 addr = 0;
2665 for (i = 0; i < 16; i++) {
2666 soft_sr[i] = MFSRIN(addr);
2667 addr += (1 << ADDR_SR_SHFT);
2668 }
2669 #endif
2670
2671 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
2672 /* read iBAT (601: uBAT) registers */
2673 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2674 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2675 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2676 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2677 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2678 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2679 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2680 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2681
2682
2683 if (cpuvers != MPC601) {
2684 /* read dBAT registers */
2685 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2686 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2687 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2688 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2689 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2690 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2691 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2692 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2693 }
2694 #endif
2695
2696 printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
2697 #ifndef PMAP_OEA64
2698 printf("SR[]:\t");
2699 for (i = 0; i < 4; i++)
2700 printf("0x%08lx, ", soft_sr[i]);
2701 printf("\n\t");
2702 for ( ; i < 8; i++)
2703 printf("0x%08lx, ", soft_sr[i]);
2704 printf("\n\t");
2705 for ( ; i < 12; i++)
2706 printf("0x%08lx, ", soft_sr[i]);
2707 printf("\n\t");
2708 for ( ; i < 16; i++)
2709 printf("0x%08lx, ", soft_sr[i]);
2710 printf("\n");
2711 #endif
2712
2713 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE)
2714 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2715 for (i = 0; i < 4; i++) {
2716 printf("0x%08lx 0x%08lx, ",
2717 soft_ibat[i].batu, soft_ibat[i].batl);
2718 if (i == 1)
2719 printf("\n\t");
2720 }
2721 if (cpuvers != MPC601) {
2722 printf("\ndBAT[]:\t");
2723 for (i = 0; i < 4; i++) {
2724 printf("0x%08lx 0x%08lx, ",
2725 soft_dbat[i].batu, soft_dbat[i].batl);
2726 if (i == 1)
2727 printf("\n\t");
2728 }
2729 }
2730 printf("\n");
2731 #endif /* PMAP_OEA... */
2732 }
2733
2734 void
2735 pmap_print_pte(pmap_t pm, vaddr_t va)
2736 {
2737 struct pvo_entry *pvo;
2738 volatile struct pte *pt;
2739 int pteidx;
2740
2741 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2742 if (pvo != NULL) {
2743 pt = pmap_pvo_to_pte(pvo, pteidx);
2744 if (pt != NULL) {
2745 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
2746 va, pt,
2747 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2748 pt->pte_hi, pt->pte_lo);
2749 } else {
2750 printf("No valid PTE found\n");
2751 }
2752 } else {
2753 printf("Address not in pmap\n");
2754 }
2755 }
2756
2757 void
2758 pmap_pteg_dist(void)
2759 {
2760 struct pvo_entry *pvo;
2761 int ptegidx;
2762 int depth;
2763 int max_depth = 0;
2764 unsigned int depths[64];
2765
2766 memset(depths, 0, sizeof(depths));
2767 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2768 depth = 0;
2769 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2770 depth++;
2771 }
2772 if (depth > max_depth)
2773 max_depth = depth;
2774 if (depth > 63)
2775 depth = 63;
2776 depths[depth]++;
2777 }
2778
2779 for (depth = 0; depth < 64; depth++) {
2780 printf(" [%2d]: %8u", depth, depths[depth]);
2781 if ((depth & 3) == 3)
2782 printf("\n");
2783 if (depth == max_depth)
2784 break;
2785 }
2786 if ((depth & 3) != 3)
2787 printf("\n");
2788 printf("Max depth found was %d\n", max_depth);
2789 }
2790 #endif /* DEBUG */
2791
2792 #if defined(PMAPCHECK) || defined(DEBUG)
2793 void
2794 pmap_pvo_verify(void)
2795 {
2796 int ptegidx;
2797 int s;
2798
2799 s = splvm();
2800 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2801 struct pvo_entry *pvo;
2802 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2803 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2804 panic("pmap_pvo_verify: invalid pvo %p "
2805 "on list %#x", pvo, ptegidx);
2806 pmap_pvo_check(pvo);
2807 }
2808 }
2809 splx(s);
2810 }
2811 #endif /* PMAPCHECK */
2812
2813
2814 void *
2815 pmap_pool_ualloc(struct pool *pp, int flags)
2816 {
2817 struct pvo_page *pvop;
2818
2819 if (uvm.page_init_done != true) {
2820 return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2821 }
2822
2823 PMAP_LOCK();
2824 pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2825 if (pvop != NULL) {
2826 pmap_upvop_free--;
2827 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2828 PMAP_UNLOCK();
2829 return pvop;
2830 }
2831 PMAP_UNLOCK();
2832 return pmap_pool_malloc(pp, flags);
2833 }
2834
2835 void *
2836 pmap_pool_malloc(struct pool *pp, int flags)
2837 {
2838 struct pvo_page *pvop;
2839 struct vm_page *pg;
2840
2841 PMAP_LOCK();
2842 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2843 if (pvop != NULL) {
2844 pmap_mpvop_free--;
2845 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2846 PMAP_UNLOCK();
2847 return pvop;
2848 }
2849 PMAP_UNLOCK();
2850 again:
2851 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2852 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2853 if (__predict_false(pg == NULL)) {
2854 if (flags & PR_WAITOK) {
2855 uvm_wait("plpg");
2856 goto again;
2857 } else {
2858 return (0);
2859 }
2860 }
2861 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
2862 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
2863 }
2864
2865 void
2866 pmap_pool_ufree(struct pool *pp, void *va)
2867 {
2868 struct pvo_page *pvop;
2869 #if 0
2870 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2871 pmap_pool_mfree(va, size, tag);
2872 return;
2873 }
2874 #endif
2875 PMAP_LOCK();
2876 pvop = va;
2877 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2878 pmap_upvop_free++;
2879 if (pmap_upvop_free > pmap_upvop_maxfree)
2880 pmap_upvop_maxfree = pmap_upvop_free;
2881 PMAP_UNLOCK();
2882 }
2883
2884 void
2885 pmap_pool_mfree(struct pool *pp, void *va)
2886 {
2887 struct pvo_page *pvop;
2888
2889 PMAP_LOCK();
2890 pvop = va;
2891 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2892 pmap_mpvop_free++;
2893 if (pmap_mpvop_free > pmap_mpvop_maxfree)
2894 pmap_mpvop_maxfree = pmap_mpvop_free;
2895 PMAP_UNLOCK();
2896 #if 0
2897 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2898 #endif
2899 }
2900
2901 /*
2902 * This routine in bootstraping to steal to-be-managed memory (which will
2903 * then be unmanaged). We use it to grab from the first 256MB for our
2904 * pmap needs and above 256MB for other stuff.
2905 */
2906 vaddr_t
2907 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2908 {
2909 vsize_t size;
2910 vaddr_t va;
2911 paddr_t pa = 0;
2912 int npgs, bank;
2913 struct vm_physseg *ps;
2914
2915 if (uvm.page_init_done == true)
2916 panic("pmap_steal_memory: called _after_ bootstrap");
2917
2918 *vstartp = VM_MIN_KERNEL_ADDRESS;
2919 *vendp = VM_MAX_KERNEL_ADDRESS;
2920
2921 size = round_page(vsize);
2922 npgs = atop(size);
2923
2924 /*
2925 * PA 0 will never be among those given to UVM so we can use it
2926 * to indicate we couldn't steal any memory.
2927 */
2928 for (bank = 0; bank < vm_nphysseg; bank++) {
2929 ps = VM_PHYSMEM_PTR(bank);
2930 if (ps->free_list == VM_FREELIST_FIRST256 &&
2931 ps->avail_end - ps->avail_start >= npgs) {
2932 pa = ptoa(ps->avail_start);
2933 break;
2934 }
2935 }
2936
2937 if (pa == 0)
2938 panic("pmap_steal_memory: no approriate memory to steal!");
2939
2940 ps->avail_start += npgs;
2941 ps->start += npgs;
2942
2943 /*
2944 * If we've used up all the pages in the segment, remove it and
2945 * compact the list.
2946 */
2947 if (ps->avail_start == ps->end) {
2948 /*
2949 * If this was the last one, then a very bad thing has occurred
2950 */
2951 if (--vm_nphysseg == 0)
2952 panic("pmap_steal_memory: out of memory!");
2953
2954 printf("pmap_steal_memory: consumed bank %d\n", bank);
2955 for (; bank < vm_nphysseg; bank++, ps++) {
2956 ps[0] = ps[1];
2957 }
2958 }
2959
2960 va = (vaddr_t) pa;
2961 memset((void *) va, 0, size);
2962 pmap_pages_stolen += npgs;
2963 #ifdef DEBUG
2964 if (pmapdebug && npgs > 1) {
2965 u_int cnt = 0;
2966 for (bank = 0; bank < vm_nphysseg; bank++) {
2967 ps = VM_PHYSMEM_PTR(bank);
2968 cnt += ps->avail_end - ps->avail_start;
2969 }
2970 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2971 npgs, pmap_pages_stolen, cnt);
2972 }
2973 #endif
2974
2975 return va;
2976 }
2977
2978 /*
2979 * Find a chuck of memory with right size and alignment.
2980 */
2981 paddr_t
2982 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2983 {
2984 struct mem_region *mp;
2985 paddr_t s, e;
2986 int i, j;
2987
2988 size = round_page(size);
2989
2990 DPRINTFN(BOOT,
2991 "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
2992 size, alignment, at_end);
2993
2994 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
2995 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
2996 alignment);
2997
2998 if (at_end) {
2999 if (alignment != PAGE_SIZE)
3000 panic("pmap_boot_find_memory: invalid ending "
3001 "alignment %#" _PRIxpa, alignment);
3002
3003 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
3004 s = mp->start + mp->size - size;
3005 if (s >= mp->start && mp->size >= size) {
3006 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
3007 DPRINTFN(BOOT,
3008 "pmap_boot_find_memory: b-avail[%d] start "
3009 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
3010 mp->start, mp->size);
3011 mp->size -= size;
3012 DPRINTFN(BOOT,
3013 "pmap_boot_find_memory: a-avail[%d] start "
3014 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
3015 mp->start, mp->size);
3016 return s;
3017 }
3018 }
3019 panic("pmap_boot_find_memory: no available memory");
3020 }
3021
3022 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3023 s = (mp->start + alignment - 1) & ~(alignment-1);
3024 e = s + size;
3025
3026 /*
3027 * Is the calculated region entirely within the region?
3028 */
3029 if (s < mp->start || e > mp->start + mp->size)
3030 continue;
3031
3032 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
3033 if (s == mp->start) {
3034 /*
3035 * If the block starts at the beginning of region,
3036 * adjust the size & start. (the region may now be
3037 * zero in length)
3038 */
3039 DPRINTFN(BOOT,
3040 "pmap_boot_find_memory: b-avail[%d] start "
3041 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3042 mp->start += size;
3043 mp->size -= size;
3044 DPRINTFN(BOOT,
3045 "pmap_boot_find_memory: a-avail[%d] start "
3046 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3047 } else if (e == mp->start + mp->size) {
3048 /*
3049 * If the block starts at the beginning of region,
3050 * adjust only the size.
3051 */
3052 DPRINTFN(BOOT,
3053 "pmap_boot_find_memory: b-avail[%d] start "
3054 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3055 mp->size -= size;
3056 DPRINTFN(BOOT,
3057 "pmap_boot_find_memory: a-avail[%d] start "
3058 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3059 } else {
3060 /*
3061 * Block is in the middle of the region, so we
3062 * have to split it in two.
3063 */
3064 for (j = avail_cnt; j > i + 1; j--) {
3065 avail[j] = avail[j-1];
3066 }
3067 DPRINTFN(BOOT,
3068 "pmap_boot_find_memory: b-avail[%d] start "
3069 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3070 mp[1].start = e;
3071 mp[1].size = mp[0].start + mp[0].size - e;
3072 mp[0].size = s - mp[0].start;
3073 avail_cnt++;
3074 for (; i < avail_cnt; i++) {
3075 DPRINTFN(BOOT,
3076 "pmap_boot_find_memory: a-avail[%d] "
3077 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
3078 avail[i].start, avail[i].size);
3079 }
3080 }
3081 KASSERT(s == (uintptr_t) s);
3082 return s;
3083 }
3084 panic("pmap_boot_find_memory: not enough memory for "
3085 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
3086 }
3087
3088 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
3089 #if defined (PMAP_OEA64_BRIDGE)
3090 int
3091 pmap_setup_segment0_map(int use_large_pages, ...)
3092 {
3093 vaddr_t va;
3094
3095 register_t pte_lo = 0x0;
3096 int ptegidx = 0, i = 0;
3097 struct pte pte;
3098 va_list ap;
3099
3100 /* Coherent + Supervisor RW, no user access */
3101 pte_lo = PTE_M;
3102
3103 /* XXXSL
3104 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3105 * these have to take priority.
3106 */
3107 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
3108 ptegidx = va_to_pteg(pmap_kernel(), va);
3109 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
3110 i = pmap_pte_insert(ptegidx, &pte);
3111 }
3112
3113 va_start(ap, use_large_pages);
3114 while (1) {
3115 paddr_t pa;
3116 size_t size;
3117
3118 va = va_arg(ap, vaddr_t);
3119
3120 if (va == 0)
3121 break;
3122
3123 pa = va_arg(ap, paddr_t);
3124 size = va_arg(ap, size_t);
3125
3126 for (; va < (va + size); va += 0x1000, pa += 0x1000) {
3127 #if 0
3128 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa);
3129 #endif
3130 ptegidx = va_to_pteg(pmap_kernel(), va);
3131 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
3132 i = pmap_pte_insert(ptegidx, &pte);
3133 }
3134 }
3135
3136 TLBSYNC();
3137 SYNC();
3138 return (0);
3139 }
3140 #endif /* PMAP_OEA64_BRIDGE */
3141
3142 /*
3143 * This is not part of the defined PMAP interface and is specific to the
3144 * PowerPC architecture. This is called during initppc, before the system
3145 * is really initialized.
3146 */
3147 void
3148 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
3149 {
3150 struct mem_region *mp, tmp;
3151 paddr_t s, e;
3152 psize_t size;
3153 int i, j;
3154
3155 /*
3156 * Get memory.
3157 */
3158 mem_regions(&mem, &avail);
3159 #if defined(DEBUG)
3160 if (pmapdebug & PMAPDEBUG_BOOT) {
3161 printf("pmap_bootstrap: memory configuration:\n");
3162 for (mp = mem; mp->size; mp++) {
3163 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
3164 mp->start, mp->size);
3165 }
3166 for (mp = avail; mp->size; mp++) {
3167 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
3168 mp->start, mp->size);
3169 }
3170 }
3171 #endif
3172
3173 /*
3174 * Find out how much physical memory we have and in how many chunks.
3175 */
3176 for (mem_cnt = 0, mp = mem; mp->size; mp++) {
3177 if (mp->start >= pmap_memlimit)
3178 continue;
3179 if (mp->start + mp->size > pmap_memlimit) {
3180 size = pmap_memlimit - mp->start;
3181 physmem += btoc(size);
3182 } else {
3183 physmem += btoc(mp->size);
3184 }
3185 mem_cnt++;
3186 }
3187
3188 /*
3189 * Count the number of available entries.
3190 */
3191 for (avail_cnt = 0, mp = avail; mp->size; mp++)
3192 avail_cnt++;
3193
3194 /*
3195 * Page align all regions.
3196 */
3197 kernelstart = trunc_page(kernelstart);
3198 kernelend = round_page(kernelend);
3199 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3200 s = round_page(mp->start);
3201 mp->size -= (s - mp->start);
3202 mp->size = trunc_page(mp->size);
3203 mp->start = s;
3204 e = mp->start + mp->size;
3205
3206 DPRINTFN(BOOT,
3207 "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3208 i, mp->start, mp->size);
3209
3210 /*
3211 * Don't allow the end to run beyond our artificial limit
3212 */
3213 if (e > pmap_memlimit)
3214 e = pmap_memlimit;
3215
3216 /*
3217 * Is this region empty or strange? skip it.
3218 */
3219 if (e <= s) {
3220 mp->start = 0;
3221 mp->size = 0;
3222 continue;
3223 }
3224
3225 /*
3226 * Does this overlap the beginning of kernel?
3227 * Does extend past the end of the kernel?
3228 */
3229 else if (s < kernelstart && e > kernelstart) {
3230 if (e > kernelend) {
3231 avail[avail_cnt].start = kernelend;
3232 avail[avail_cnt].size = e - kernelend;
3233 avail_cnt++;
3234 }
3235 mp->size = kernelstart - s;
3236 }
3237 /*
3238 * Check whether this region overlaps the end of the kernel.
3239 */
3240 else if (s < kernelend && e > kernelend) {
3241 mp->start = kernelend;
3242 mp->size = e - kernelend;
3243 }
3244 /*
3245 * Look whether this regions is completely inside the kernel.
3246 * Nuke it if it does.
3247 */
3248 else if (s >= kernelstart && e <= kernelend) {
3249 mp->start = 0;
3250 mp->size = 0;
3251 }
3252 /*
3253 * If the user imposed a memory limit, enforce it.
3254 */
3255 else if (s >= pmap_memlimit) {
3256 mp->start = -PAGE_SIZE; /* let's know why */
3257 mp->size = 0;
3258 }
3259 else {
3260 mp->start = s;
3261 mp->size = e - s;
3262 }
3263 DPRINTFN(BOOT,
3264 "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3265 i, mp->start, mp->size);
3266 }
3267
3268 /*
3269 * Move (and uncount) all the null return to the end.
3270 */
3271 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3272 if (mp->size == 0) {
3273 tmp = avail[i];
3274 avail[i] = avail[--avail_cnt];
3275 avail[avail_cnt] = avail[i];
3276 }
3277 }
3278
3279 /*
3280 * (Bubble)sort them into ascending order.
3281 */
3282 for (i = 0; i < avail_cnt; i++) {
3283 for (j = i + 1; j < avail_cnt; j++) {
3284 if (avail[i].start > avail[j].start) {
3285 tmp = avail[i];
3286 avail[i] = avail[j];
3287 avail[j] = tmp;
3288 }
3289 }
3290 }
3291
3292 /*
3293 * Make sure they don't overlap.
3294 */
3295 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
3296 if (mp[0].start + mp[0].size > mp[1].start) {
3297 mp[0].size = mp[1].start - mp[0].start;
3298 }
3299 DPRINTFN(BOOT,
3300 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3301 i, mp->start, mp->size);
3302 }
3303 DPRINTFN(BOOT,
3304 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3305 i, mp->start, mp->size);
3306
3307 #ifdef PTEGCOUNT
3308 pmap_pteg_cnt = PTEGCOUNT;
3309 #else /* PTEGCOUNT */
3310
3311 pmap_pteg_cnt = 0x1000;
3312
3313 while (pmap_pteg_cnt < physmem)
3314 pmap_pteg_cnt <<= 1;
3315
3316 pmap_pteg_cnt >>= 1;
3317 #endif /* PTEGCOUNT */
3318
3319 #ifdef DEBUG
3320 DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt);
3321 #endif
3322
3323 /*
3324 * Find suitably aligned memory for PTEG hash table.
3325 */
3326 size = pmap_pteg_cnt * sizeof(struct pteg);
3327 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
3328
3329 #ifdef DEBUG
3330 DPRINTFN(BOOT,
3331 "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table);
3332 #endif
3333
3334
3335 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3336 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
3337 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
3338 pmap_pteg_table, size);
3339 #endif
3340
3341 memset(__UNVOLATILE(pmap_pteg_table), 0,
3342 pmap_pteg_cnt * sizeof(struct pteg));
3343 pmap_pteg_mask = pmap_pteg_cnt - 1;
3344
3345 /*
3346 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3347 * with pages. So we just steal them before giving them to UVM.
3348 */
3349 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
3350 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
3351 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3352 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
3353 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
3354 pmap_pvo_table, size);
3355 #endif
3356
3357 for (i = 0; i < pmap_pteg_cnt; i++)
3358 TAILQ_INIT(&pmap_pvo_table[i]);
3359
3360 #ifndef MSGBUFADDR
3361 /*
3362 * Allocate msgbuf in high memory.
3363 */
3364 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
3365 #endif
3366
3367 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
3368 paddr_t pfstart = atop(mp->start);
3369 paddr_t pfend = atop(mp->start + mp->size);
3370 if (mp->size == 0)
3371 continue;
3372 if (mp->start + mp->size <= SEGMENT_LENGTH) {
3373 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3374 VM_FREELIST_FIRST256);
3375 } else if (mp->start >= SEGMENT_LENGTH) {
3376 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3377 VM_FREELIST_DEFAULT);
3378 } else {
3379 pfend = atop(SEGMENT_LENGTH);
3380 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3381 VM_FREELIST_FIRST256);
3382 pfstart = atop(SEGMENT_LENGTH);
3383 pfend = atop(mp->start + mp->size);
3384 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3385 VM_FREELIST_DEFAULT);
3386 }
3387 }
3388
3389 /*
3390 * Make sure kernel vsid is allocated as well as VSID 0.
3391 */
3392 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3393 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
3394 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3395 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
3396 pmap_vsid_bitmap[0] |= 1;
3397
3398 /*
3399 * Initialize kernel pmap and hardware.
3400 */
3401
3402 /* PMAP_OEA64_BRIDGE does support these instructions */
3403 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
3404 for (i = 0; i < 16; i++) {
3405 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
3406 __asm volatile ("mtsrin %0,%1"
3407 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
3408 }
3409
3410 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
3411 __asm volatile ("mtsr %0,%1"
3412 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
3413 #ifdef KERNEL2_SR
3414 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
3415 __asm volatile ("mtsr %0,%1"
3416 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
3417 #endif
3418 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3419 #if defined (PMAP_OEA)
3420 for (i = 0; i < 16; i++) {
3421 if (iosrtable[i] & SR601_T) {
3422 pmap_kernel()->pm_sr[i] = iosrtable[i];
3423 __asm volatile ("mtsrin %0,%1"
3424 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
3425 }
3426 }
3427 __asm volatile ("sync; mtsdr1 %0; isync"
3428 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
3429 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
3430 __asm __volatile ("sync; mtsdr1 %0; isync"
3431 :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11))));
3432 #endif
3433 tlbia();
3434
3435 #ifdef ALTIVEC
3436 pmap_use_altivec = cpu_altivec;
3437 #endif
3438
3439 #ifdef DEBUG
3440 if (pmapdebug & PMAPDEBUG_BOOT) {
3441 u_int cnt;
3442 int bank;
3443 char pbuf[9];
3444 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
3445 cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start;
3446 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
3447 bank,
3448 ptoa(VM_PHYSMEM_PTR(bank)->avail_start),
3449 ptoa(VM_PHYSMEM_PTR(bank)->avail_end),
3450 ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start));
3451 }
3452 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3453 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3454 pbuf, cnt);
3455 }
3456 #endif
3457
3458 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3459 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3460 &pmap_pool_uallocator, IPL_VM);
3461
3462 pool_setlowat(&pmap_upvo_pool, 252);
3463
3464 pool_init(&pmap_pool, sizeof(struct pmap),
3465 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
3466 IPL_NONE);
3467
3468 #if defined(PMAP_NEED_MAPKERNEL) || 1
3469 {
3470 struct pmap *pm = pmap_kernel();
3471 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3472 extern int etext[], kernel_text[];
3473 vaddr_t va, va_etext = (paddr_t) etext;
3474 #endif
3475 paddr_t pa, pa_end;
3476 register_t sr;
3477 struct pte pt;
3478 unsigned int ptegidx;
3479 int bank;
3480
3481 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
3482 pm->pm_sr[0] = sr;
3483
3484 for (bank = 0; bank < vm_nphysseg; bank++) {
3485 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
3486 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
3487 for (; pa < pa_end; pa += PAGE_SIZE) {
3488 ptegidx = va_to_pteg(pm, pa);
3489 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
3490 pmap_pte_insert(ptegidx, &pt);
3491 }
3492 }
3493
3494 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3495 va = (vaddr_t) kernel_text;
3496
3497 for (pa = kernelstart; va < va_etext;
3498 pa += PAGE_SIZE, va += PAGE_SIZE) {
3499 ptegidx = va_to_pteg(pm, va);
3500 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3501 pmap_pte_insert(ptegidx, &pt);
3502 }
3503
3504 for (; pa < kernelend;
3505 pa += PAGE_SIZE, va += PAGE_SIZE) {
3506 ptegidx = va_to_pteg(pm, va);
3507 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3508 pmap_pte_insert(ptegidx, &pt);
3509 }
3510
3511 for (va = 0, pa = 0; va < kernelstart;
3512 pa += PAGE_SIZE, va += PAGE_SIZE) {
3513 ptegidx = va_to_pteg(pm, va);
3514 if (va < 0x3000)
3515 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3516 else
3517 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3518 pmap_pte_insert(ptegidx, &pt);
3519 }
3520 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
3521 pa += PAGE_SIZE, va += PAGE_SIZE) {
3522 ptegidx = va_to_pteg(pm, va);
3523 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3524 pmap_pte_insert(ptegidx, &pt);
3525 }
3526 #endif
3527
3528 __asm volatile ("mtsrin %0,%1"
3529 :: "r"(sr), "r"(kernelstart));
3530 }
3531 #endif
3532 }
3533