pmap.c revision 1.98 1 1.98 rin /* $NetBSD: pmap.c,v 1.98 2020/07/06 09:34:17 rin Exp $ */
2 1.1 matt /*-
3 1.1 matt * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1 matt * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
8 1.1 matt *
9 1.38 sanjayl * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com>
10 1.38 sanjayl * of Kyma Systems LLC.
11 1.38 sanjayl *
12 1.1 matt * Redistribution and use in source and binary forms, with or without
13 1.1 matt * modification, are permitted provided that the following conditions
14 1.1 matt * are met:
15 1.1 matt * 1. Redistributions of source code must retain the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer.
17 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 matt * notice, this list of conditions and the following disclaimer in the
19 1.1 matt * documentation and/or other materials provided with the distribution.
20 1.1 matt *
21 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
32 1.1 matt */
33 1.1 matt
34 1.1 matt /*
35 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank.
36 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH.
37 1.1 matt * All rights reserved.
38 1.1 matt *
39 1.1 matt * Redistribution and use in source and binary forms, with or without
40 1.1 matt * modification, are permitted provided that the following conditions
41 1.1 matt * are met:
42 1.1 matt * 1. Redistributions of source code must retain the above copyright
43 1.1 matt * notice, this list of conditions and the following disclaimer.
44 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
45 1.1 matt * notice, this list of conditions and the following disclaimer in the
46 1.1 matt * documentation and/or other materials provided with the distribution.
47 1.1 matt * 3. All advertising materials mentioning features or use of this software
48 1.1 matt * must display the following acknowledgement:
49 1.1 matt * This product includes software developed by TooLs GmbH.
50 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products
51 1.1 matt * derived from this software without specific prior written permission.
52 1.1 matt *
53 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 1.1 matt */
64 1.11 lukem
65 1.11 lukem #include <sys/cdefs.h>
66 1.98 rin __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.98 2020/07/06 09:34:17 rin Exp $");
67 1.53 garbled
68 1.53 garbled #define PMAP_NOOPNAMES
69 1.1 matt
70 1.98 rin #ifdef _KERNEL_OPT
71 1.1 matt #include "opt_altivec.h"
72 1.57 matt #include "opt_multiprocessor.h"
73 1.1 matt #include "opt_pmap.h"
74 1.98 rin #include "opt_ppcarch.h"
75 1.98 rin #endif
76 1.57 matt
77 1.1 matt #include <sys/param.h>
78 1.1 matt #include <sys/proc.h>
79 1.1 matt #include <sys/pool.h>
80 1.1 matt #include <sys/queue.h>
81 1.1 matt #include <sys/device.h> /* for evcnt */
82 1.1 matt #include <sys/systm.h>
83 1.50 ad #include <sys/atomic.h>
84 1.1 matt
85 1.1 matt #include <uvm/uvm.h>
86 1.94 cherry #include <uvm/uvm_physseg.h>
87 1.1 matt
88 1.1 matt #include <machine/powerpc.h>
89 1.80 matt #include <powerpc/bat.h>
90 1.80 matt #include <powerpc/pcb.h>
91 1.80 matt #include <powerpc/psl.h>
92 1.1 matt #include <powerpc/spr.h>
93 1.71 matt #include <powerpc/oea/spr.h>
94 1.71 matt #include <powerpc/oea/sr_601.h>
95 1.1 matt
96 1.1 matt #ifdef ALTIVEC
97 1.86 matt extern int pmap_use_altivec;
98 1.1 matt #endif
99 1.1 matt
100 1.21 aymeric #ifdef PMAP_MEMLIMIT
101 1.53 garbled static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
102 1.21 aymeric #else
103 1.53 garbled static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
104 1.21 aymeric #endif
105 1.1 matt
106 1.86 matt extern struct pmap kernel_pmap_;
107 1.86 matt static unsigned int pmap_pages_stolen;
108 1.86 matt static u_long pmap_pte_valid;
109 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
110 1.86 matt static u_long pmap_pvo_enter_depth;
111 1.86 matt static u_long pmap_pvo_remove_depth;
112 1.1 matt #endif
113 1.1 matt
114 1.1 matt #ifndef MSGBUFADDR
115 1.1 matt extern paddr_t msgbuf_paddr;
116 1.1 matt #endif
117 1.1 matt
118 1.1 matt static struct mem_region *mem, *avail;
119 1.1 matt static u_int mem_cnt, avail_cnt;
120 1.1 matt
121 1.53 garbled #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
122 1.53 garbled # define PMAP_OEA 1
123 1.53 garbled #endif
124 1.53 garbled
125 1.53 garbled #if defined(PMAP_OEA)
126 1.53 garbled #define _PRIxpte "lx"
127 1.53 garbled #else
128 1.53 garbled #define _PRIxpte PRIx64
129 1.53 garbled #endif
130 1.53 garbled #define _PRIxpa "lx"
131 1.53 garbled #define _PRIxva "lx"
132 1.54 mlelstv #define _PRIsr "lx"
133 1.53 garbled
134 1.76 matt #ifdef PMAP_NEEDS_FIXUP
135 1.53 garbled #if defined(PMAP_OEA)
136 1.53 garbled #define PMAPNAME(name) pmap32_##name
137 1.53 garbled #elif defined(PMAP_OEA64)
138 1.53 garbled #define PMAPNAME(name) pmap64_##name
139 1.53 garbled #elif defined(PMAP_OEA64_BRIDGE)
140 1.53 garbled #define PMAPNAME(name) pmap64bridge_##name
141 1.53 garbled #else
142 1.53 garbled #error unknown variant for pmap
143 1.53 garbled #endif
144 1.76 matt #endif /* PMAP_NEEDS_FIXUP */
145 1.53 garbled
146 1.76 matt #ifdef PMAPNAME
147 1.53 garbled #define STATIC static
148 1.53 garbled #define pmap_pte_spill PMAPNAME(pte_spill)
149 1.53 garbled #define pmap_real_memory PMAPNAME(real_memory)
150 1.53 garbled #define pmap_init PMAPNAME(init)
151 1.53 garbled #define pmap_virtual_space PMAPNAME(virtual_space)
152 1.53 garbled #define pmap_create PMAPNAME(create)
153 1.53 garbled #define pmap_reference PMAPNAME(reference)
154 1.53 garbled #define pmap_destroy PMAPNAME(destroy)
155 1.53 garbled #define pmap_copy PMAPNAME(copy)
156 1.53 garbled #define pmap_update PMAPNAME(update)
157 1.53 garbled #define pmap_enter PMAPNAME(enter)
158 1.53 garbled #define pmap_remove PMAPNAME(remove)
159 1.53 garbled #define pmap_kenter_pa PMAPNAME(kenter_pa)
160 1.53 garbled #define pmap_kremove PMAPNAME(kremove)
161 1.53 garbled #define pmap_extract PMAPNAME(extract)
162 1.53 garbled #define pmap_protect PMAPNAME(protect)
163 1.53 garbled #define pmap_unwire PMAPNAME(unwire)
164 1.53 garbled #define pmap_page_protect PMAPNAME(page_protect)
165 1.53 garbled #define pmap_query_bit PMAPNAME(query_bit)
166 1.53 garbled #define pmap_clear_bit PMAPNAME(clear_bit)
167 1.53 garbled
168 1.53 garbled #define pmap_activate PMAPNAME(activate)
169 1.53 garbled #define pmap_deactivate PMAPNAME(deactivate)
170 1.53 garbled
171 1.53 garbled #define pmap_pinit PMAPNAME(pinit)
172 1.53 garbled #define pmap_procwr PMAPNAME(procwr)
173 1.53 garbled
174 1.86 matt #define pmap_pool PMAPNAME(pool)
175 1.86 matt #define pmap_upvo_pool PMAPNAME(upvo_pool)
176 1.86 matt #define pmap_mpvo_pool PMAPNAME(mpvo_pool)
177 1.86 matt #define pmap_pvo_table PMAPNAME(pvo_table)
178 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
179 1.53 garbled #define pmap_pte_print PMAPNAME(pte_print)
180 1.53 garbled #define pmap_pteg_check PMAPNAME(pteg_check)
181 1.53 garbled #define pmap_print_mmruregs PMAPNAME(print_mmuregs)
182 1.53 garbled #define pmap_print_pte PMAPNAME(print_pte)
183 1.53 garbled #define pmap_pteg_dist PMAPNAME(pteg_dist)
184 1.53 garbled #endif
185 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK)
186 1.53 garbled #define pmap_pvo_verify PMAPNAME(pvo_verify)
187 1.56 phx #define pmapcheck PMAPNAME(check)
188 1.56 phx #endif
189 1.56 phx #if defined(DEBUG) || defined(PMAPDEBUG)
190 1.56 phx #define pmapdebug PMAPNAME(debug)
191 1.53 garbled #endif
192 1.53 garbled #define pmap_steal_memory PMAPNAME(steal_memory)
193 1.53 garbled #define pmap_bootstrap PMAPNAME(bootstrap)
194 1.53 garbled #else
195 1.53 garbled #define STATIC /* nothing */
196 1.53 garbled #endif /* PMAPNAME */
197 1.53 garbled
198 1.53 garbled STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
199 1.53 garbled STATIC void pmap_real_memory(paddr_t *, psize_t *);
200 1.53 garbled STATIC void pmap_init(void);
201 1.53 garbled STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
202 1.53 garbled STATIC pmap_t pmap_create(void);
203 1.53 garbled STATIC void pmap_reference(pmap_t);
204 1.53 garbled STATIC void pmap_destroy(pmap_t);
205 1.53 garbled STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
206 1.53 garbled STATIC void pmap_update(pmap_t);
207 1.65 cegger STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
208 1.53 garbled STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
209 1.68 cegger STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
210 1.53 garbled STATIC void pmap_kremove(vaddr_t, vsize_t);
211 1.53 garbled STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
212 1.53 garbled
213 1.53 garbled STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
214 1.53 garbled STATIC void pmap_unwire(pmap_t, vaddr_t);
215 1.53 garbled STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
216 1.53 garbled STATIC bool pmap_query_bit(struct vm_page *, int);
217 1.53 garbled STATIC bool pmap_clear_bit(struct vm_page *, int);
218 1.53 garbled
219 1.53 garbled STATIC void pmap_activate(struct lwp *);
220 1.53 garbled STATIC void pmap_deactivate(struct lwp *);
221 1.53 garbled
222 1.53 garbled STATIC void pmap_pinit(pmap_t pm);
223 1.53 garbled STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
224 1.53 garbled
225 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
226 1.53 garbled STATIC void pmap_pte_print(volatile struct pte *);
227 1.53 garbled STATIC void pmap_pteg_check(void);
228 1.53 garbled STATIC void pmap_print_mmuregs(void);
229 1.53 garbled STATIC void pmap_print_pte(pmap_t, vaddr_t);
230 1.53 garbled STATIC void pmap_pteg_dist(void);
231 1.53 garbled #endif
232 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK)
233 1.53 garbled STATIC void pmap_pvo_verify(void);
234 1.53 garbled #endif
235 1.53 garbled STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
236 1.53 garbled STATIC void pmap_bootstrap(paddr_t, paddr_t);
237 1.53 garbled
238 1.53 garbled #ifdef PMAPNAME
239 1.53 garbled const struct pmap_ops PMAPNAME(ops) = {
240 1.53 garbled .pmapop_pte_spill = pmap_pte_spill,
241 1.53 garbled .pmapop_real_memory = pmap_real_memory,
242 1.53 garbled .pmapop_init = pmap_init,
243 1.53 garbled .pmapop_virtual_space = pmap_virtual_space,
244 1.53 garbled .pmapop_create = pmap_create,
245 1.53 garbled .pmapop_reference = pmap_reference,
246 1.53 garbled .pmapop_destroy = pmap_destroy,
247 1.53 garbled .pmapop_copy = pmap_copy,
248 1.53 garbled .pmapop_update = pmap_update,
249 1.53 garbled .pmapop_enter = pmap_enter,
250 1.53 garbled .pmapop_remove = pmap_remove,
251 1.53 garbled .pmapop_kenter_pa = pmap_kenter_pa,
252 1.53 garbled .pmapop_kremove = pmap_kremove,
253 1.53 garbled .pmapop_extract = pmap_extract,
254 1.53 garbled .pmapop_protect = pmap_protect,
255 1.53 garbled .pmapop_unwire = pmap_unwire,
256 1.53 garbled .pmapop_page_protect = pmap_page_protect,
257 1.53 garbled .pmapop_query_bit = pmap_query_bit,
258 1.53 garbled .pmapop_clear_bit = pmap_clear_bit,
259 1.53 garbled .pmapop_activate = pmap_activate,
260 1.53 garbled .pmapop_deactivate = pmap_deactivate,
261 1.53 garbled .pmapop_pinit = pmap_pinit,
262 1.53 garbled .pmapop_procwr = pmap_procwr,
263 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
264 1.53 garbled .pmapop_pte_print = pmap_pte_print,
265 1.53 garbled .pmapop_pteg_check = pmap_pteg_check,
266 1.53 garbled .pmapop_print_mmuregs = pmap_print_mmuregs,
267 1.53 garbled .pmapop_print_pte = pmap_print_pte,
268 1.53 garbled .pmapop_pteg_dist = pmap_pteg_dist,
269 1.53 garbled #else
270 1.53 garbled .pmapop_pte_print = NULL,
271 1.53 garbled .pmapop_pteg_check = NULL,
272 1.53 garbled .pmapop_print_mmuregs = NULL,
273 1.53 garbled .pmapop_print_pte = NULL,
274 1.53 garbled .pmapop_pteg_dist = NULL,
275 1.53 garbled #endif
276 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK)
277 1.53 garbled .pmapop_pvo_verify = pmap_pvo_verify,
278 1.53 garbled #else
279 1.53 garbled .pmapop_pvo_verify = NULL,
280 1.1 matt #endif
281 1.53 garbled .pmapop_steal_memory = pmap_steal_memory,
282 1.53 garbled .pmapop_bootstrap = pmap_bootstrap,
283 1.53 garbled };
284 1.53 garbled #endif /* !PMAPNAME */
285 1.1 matt
286 1.1 matt /*
287 1.38 sanjayl * The following structure is aligned to 32 bytes
288 1.1 matt */
289 1.1 matt struct pvo_entry {
290 1.1 matt LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
291 1.1 matt TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
292 1.1 matt struct pte pvo_pte; /* Prebuilt PTE */
293 1.1 matt pmap_t pvo_pmap; /* ptr to owning pmap */
294 1.1 matt vaddr_t pvo_vaddr; /* VA of entry */
295 1.1 matt #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
296 1.1 matt #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
297 1.1 matt #define PVO_WIRED 0x0010 /* PVO entry is wired */
298 1.1 matt #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
299 1.1 matt #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
300 1.39 matt #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED)
301 1.39 matt #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED)
302 1.39 matt #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
303 1.12 matt #define PVO_ENTER_INSERT 0 /* PVO has been removed */
304 1.12 matt #define PVO_SPILL_UNSET 1 /* PVO has been evicted */
305 1.12 matt #define PVO_SPILL_SET 2 /* PVO has been spilled */
306 1.12 matt #define PVO_SPILL_INSERT 3 /* PVO has been inserted */
307 1.12 matt #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */
308 1.12 matt #define PVO_PMAP_PROTECT 5 /* PVO has changed */
309 1.12 matt #define PVO_REMOVE 6 /* PVO has been removed */
310 1.12 matt #define PVO_WHERE_MASK 15
311 1.12 matt #define PVO_WHERE_SHFT 8
312 1.38 sanjayl } __attribute__ ((aligned (32)));
313 1.1 matt #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
314 1.1 matt #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
315 1.1 matt #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
316 1.1 matt #define PVO_PTEGIDX_CLR(pvo) \
317 1.1 matt ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
318 1.1 matt #define PVO_PTEGIDX_SET(pvo,i) \
319 1.1 matt ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
320 1.12 matt #define PVO_WHERE(pvo,w) \
321 1.12 matt ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
322 1.12 matt (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
323 1.1 matt
324 1.1 matt TAILQ_HEAD(pvo_tqhead, pvo_entry);
325 1.1 matt struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
326 1.53 garbled static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
327 1.53 garbled static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
328 1.1 matt
329 1.1 matt struct pool pmap_pool; /* pool for pmap structures */
330 1.1 matt struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
331 1.1 matt struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
332 1.1 matt
333 1.1 matt /*
334 1.1 matt * We keep a cache of unmanaged pages to be used for pvo entries for
335 1.1 matt * unmanaged pages.
336 1.1 matt */
337 1.1 matt struct pvo_page {
338 1.1 matt SIMPLEQ_ENTRY(pvo_page) pvop_link;
339 1.1 matt };
340 1.1 matt SIMPLEQ_HEAD(pvop_head, pvo_page);
341 1.53 garbled static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
342 1.53 garbled static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
343 1.86 matt static u_long pmap_upvop_free;
344 1.86 matt static u_long pmap_upvop_maxfree;
345 1.86 matt static u_long pmap_mpvop_free;
346 1.86 matt static u_long pmap_mpvop_maxfree;
347 1.1 matt
348 1.53 garbled static void *pmap_pool_ualloc(struct pool *, int);
349 1.53 garbled static void *pmap_pool_malloc(struct pool *, int);
350 1.1 matt
351 1.53 garbled static void pmap_pool_ufree(struct pool *, void *);
352 1.53 garbled static void pmap_pool_mfree(struct pool *, void *);
353 1.1 matt
354 1.1 matt static struct pool_allocator pmap_pool_mallocator = {
355 1.43 garbled .pa_alloc = pmap_pool_malloc,
356 1.43 garbled .pa_free = pmap_pool_mfree,
357 1.43 garbled .pa_pagesz = 0,
358 1.1 matt };
359 1.1 matt
360 1.1 matt static struct pool_allocator pmap_pool_uallocator = {
361 1.43 garbled .pa_alloc = pmap_pool_ualloc,
362 1.43 garbled .pa_free = pmap_pool_ufree,
363 1.43 garbled .pa_pagesz = 0,
364 1.1 matt };
365 1.1 matt
366 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
367 1.2 matt void pmap_pte_print(volatile struct pte *);
368 1.1 matt void pmap_pteg_check(void);
369 1.1 matt void pmap_pteg_dist(void);
370 1.1 matt void pmap_print_pte(pmap_t, vaddr_t);
371 1.1 matt void pmap_print_mmuregs(void);
372 1.1 matt #endif
373 1.1 matt
374 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
375 1.1 matt #ifdef PMAPCHECK
376 1.1 matt int pmapcheck = 1;
377 1.1 matt #else
378 1.1 matt int pmapcheck = 0;
379 1.1 matt #endif
380 1.1 matt void pmap_pvo_verify(void);
381 1.53 garbled static void pmap_pvo_check(const struct pvo_entry *);
382 1.1 matt #define PMAP_PVO_CHECK(pvo) \
383 1.1 matt do { \
384 1.1 matt if (pmapcheck) \
385 1.1 matt pmap_pvo_check(pvo); \
386 1.1 matt } while (0)
387 1.1 matt #else
388 1.1 matt #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
389 1.1 matt #endif
390 1.53 garbled static int pmap_pte_insert(int, struct pte *);
391 1.53 garbled static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
392 1.2 matt vaddr_t, paddr_t, register_t, int);
393 1.53 garbled static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
394 1.53 garbled static void pmap_pvo_free(struct pvo_entry *);
395 1.53 garbled static void pmap_pvo_free_list(struct pvo_head *);
396 1.53 garbled static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
397 1.53 garbled static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
398 1.53 garbled static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
399 1.53 garbled static void pvo_set_exec(struct pvo_entry *);
400 1.53 garbled static void pvo_clear_exec(struct pvo_entry *);
401 1.1 matt
402 1.53 garbled static void tlbia(void);
403 1.1 matt
404 1.53 garbled static void pmap_release(pmap_t);
405 1.53 garbled static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
406 1.1 matt
407 1.25 chs static uint32_t pmap_pvo_reclaim_nextidx;
408 1.25 chs #ifdef DEBUG
409 1.25 chs static int pmap_pvo_reclaim_debugctr;
410 1.25 chs #endif
411 1.25 chs
412 1.1 matt #define VSID_NBPW (sizeof(uint32_t) * 8)
413 1.1 matt static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
414 1.1 matt
415 1.1 matt static int pmap_initialized;
416 1.1 matt
417 1.1 matt #if defined(DEBUG) || defined(PMAPDEBUG)
418 1.1 matt #define PMAPDEBUG_BOOT 0x0001
419 1.1 matt #define PMAPDEBUG_PTE 0x0002
420 1.1 matt #define PMAPDEBUG_EXEC 0x0008
421 1.1 matt #define PMAPDEBUG_PVOENTER 0x0010
422 1.1 matt #define PMAPDEBUG_PVOREMOVE 0x0020
423 1.1 matt #define PMAPDEBUG_ACTIVATE 0x0100
424 1.1 matt #define PMAPDEBUG_CREATE 0x0200
425 1.1 matt #define PMAPDEBUG_ENTER 0x1000
426 1.1 matt #define PMAPDEBUG_KENTER 0x2000
427 1.1 matt #define PMAPDEBUG_KREMOVE 0x4000
428 1.1 matt #define PMAPDEBUG_REMOVE 0x8000
429 1.38 sanjayl
430 1.1 matt unsigned int pmapdebug = 0;
431 1.38 sanjayl
432 1.85 matt # define DPRINTF(x, ...) printf(x, __VA_ARGS__)
433 1.85 matt # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0)
434 1.1 matt #else
435 1.85 matt # define DPRINTF(x, ...) do { } while (0)
436 1.85 matt # define DPRINTFN(n, x, ...) do { } while (0)
437 1.1 matt #endif
438 1.1 matt
439 1.1 matt
440 1.1 matt #ifdef PMAPCOUNTERS
441 1.1 matt /*
442 1.1 matt * From pmap_subr.c
443 1.1 matt */
444 1.53 garbled extern struct evcnt pmap_evcnt_mappings;
445 1.53 garbled extern struct evcnt pmap_evcnt_unmappings;
446 1.53 garbled
447 1.53 garbled extern struct evcnt pmap_evcnt_kernel_mappings;
448 1.53 garbled extern struct evcnt pmap_evcnt_kernel_unmappings;
449 1.53 garbled
450 1.53 garbled extern struct evcnt pmap_evcnt_mappings_replaced;
451 1.53 garbled
452 1.53 garbled extern struct evcnt pmap_evcnt_exec_mappings;
453 1.53 garbled extern struct evcnt pmap_evcnt_exec_cached;
454 1.53 garbled
455 1.53 garbled extern struct evcnt pmap_evcnt_exec_synced;
456 1.53 garbled extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
457 1.53 garbled extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
458 1.53 garbled
459 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
460 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
461 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
462 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
463 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
464 1.53 garbled
465 1.53 garbled extern struct evcnt pmap_evcnt_updates;
466 1.53 garbled extern struct evcnt pmap_evcnt_collects;
467 1.53 garbled extern struct evcnt pmap_evcnt_copies;
468 1.53 garbled
469 1.53 garbled extern struct evcnt pmap_evcnt_ptes_spilled;
470 1.53 garbled extern struct evcnt pmap_evcnt_ptes_unspilled;
471 1.53 garbled extern struct evcnt pmap_evcnt_ptes_evicted;
472 1.53 garbled
473 1.53 garbled extern struct evcnt pmap_evcnt_ptes_primary[8];
474 1.53 garbled extern struct evcnt pmap_evcnt_ptes_secondary[8];
475 1.53 garbled extern struct evcnt pmap_evcnt_ptes_removed;
476 1.53 garbled extern struct evcnt pmap_evcnt_ptes_changed;
477 1.53 garbled extern struct evcnt pmap_evcnt_pvos_reclaimed;
478 1.53 garbled extern struct evcnt pmap_evcnt_pvos_failed;
479 1.53 garbled
480 1.1 matt extern struct evcnt pmap_evcnt_zeroed_pages;
481 1.1 matt extern struct evcnt pmap_evcnt_copied_pages;
482 1.1 matt extern struct evcnt pmap_evcnt_idlezeroed_pages;
483 1.26 matt
484 1.53 garbled #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
485 1.53 garbled #define PMAPCOUNT2(ev) ((ev).ev_count++)
486 1.1 matt #else
487 1.1 matt #define PMAPCOUNT(ev) ((void) 0)
488 1.1 matt #define PMAPCOUNT2(ev) ((void) 0)
489 1.1 matt #endif
490 1.1 matt
491 1.35 perry #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va))
492 1.38 sanjayl
493 1.38 sanjayl /* XXXSL: this needs to be moved to assembler */
494 1.38 sanjayl #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va))
495 1.38 sanjayl
496 1.87 kiyohara #ifdef MD_TLBSYNC
497 1.87 kiyohara #define TLBSYNC() MD_TLBSYNC()
498 1.87 kiyohara #else
499 1.35 perry #define TLBSYNC() __asm volatile("tlbsync")
500 1.87 kiyohara #endif
501 1.35 perry #define SYNC() __asm volatile("sync")
502 1.35 perry #define EIEIO() __asm volatile("eieio")
503 1.57 matt #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va))
504 1.1 matt #define MFMSR() mfmsr()
505 1.1 matt #define MTMSR(psl) mtmsr(psl)
506 1.1 matt #define MFPVR() mfpvr()
507 1.1 matt #define MFSRIN(va) mfsrin(va)
508 1.1 matt #define MFTB() mfrtcltbl()
509 1.1 matt
510 1.92 joerg #if defined(DDB) && !defined(PMAP_OEA64)
511 1.35 perry static inline register_t
512 1.1 matt mfsrin(vaddr_t va)
513 1.1 matt {
514 1.2 matt register_t sr;
515 1.35 perry __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
516 1.1 matt return sr;
517 1.1 matt }
518 1.92 joerg #endif /* DDB && !PMAP_OEA64 */
519 1.38 sanjayl
520 1.53 garbled #if defined (PMAP_OEA64_BRIDGE)
521 1.38 sanjayl extern void mfmsr64 (register64_t *result);
522 1.53 garbled #endif /* PMAP_OEA64_BRIDGE */
523 1.38 sanjayl
524 1.50 ad #define PMAP_LOCK() KERNEL_LOCK(1, NULL)
525 1.50 ad #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
526 1.1 matt
527 1.35 perry static inline register_t
528 1.1 matt pmap_interrupts_off(void)
529 1.1 matt {
530 1.2 matt register_t msr = MFMSR();
531 1.1 matt if (msr & PSL_EE)
532 1.1 matt MTMSR(msr & ~PSL_EE);
533 1.1 matt return msr;
534 1.1 matt }
535 1.1 matt
536 1.1 matt static void
537 1.2 matt pmap_interrupts_restore(register_t msr)
538 1.1 matt {
539 1.1 matt if (msr & PSL_EE)
540 1.1 matt MTMSR(msr);
541 1.1 matt }
542 1.1 matt
543 1.35 perry static inline u_int32_t
544 1.1 matt mfrtcltbl(void)
545 1.1 matt {
546 1.55 garbled #ifdef PPC_OEA601
547 1.1 matt if ((MFPVR() >> 16) == MPC601)
548 1.1 matt return (mfrtcl() >> 7);
549 1.1 matt else
550 1.55 garbled #endif
551 1.1 matt return (mftbl());
552 1.1 matt }
553 1.1 matt
554 1.1 matt /*
555 1.1 matt * These small routines may have to be replaced,
556 1.1 matt * if/when we support processors other that the 604.
557 1.1 matt */
558 1.1 matt
559 1.1 matt void
560 1.1 matt tlbia(void)
561 1.1 matt {
562 1.47 macallan char *i;
563 1.1 matt
564 1.1 matt SYNC();
565 1.53 garbled #if defined(PMAP_OEA)
566 1.1 matt /*
567 1.1 matt * Why not use "tlbia"? Because not all processors implement it.
568 1.1 matt *
569 1.20 wiz * This needs to be a per-CPU callback to do the appropriate thing
570 1.1 matt * for the CPU. XXX
571 1.1 matt */
572 1.47 macallan for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
573 1.1 matt TLBIE(i);
574 1.1 matt EIEIO();
575 1.1 matt SYNC();
576 1.1 matt }
577 1.53 garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
578 1.38 sanjayl /* This is specifically for the 970, 970UM v1.6 pp. 140. */
579 1.51 garbled for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
580 1.38 sanjayl TLBIEL(i);
581 1.38 sanjayl EIEIO();
582 1.38 sanjayl SYNC();
583 1.38 sanjayl }
584 1.38 sanjayl #endif
585 1.1 matt TLBSYNC();
586 1.1 matt SYNC();
587 1.1 matt }
588 1.1 matt
589 1.35 perry static inline register_t
590 1.2 matt va_to_vsid(const struct pmap *pm, vaddr_t addr)
591 1.1 matt {
592 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
593 1.38 sanjayl return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
594 1.53 garbled #else /* PMAP_OEA64 */
595 1.18 matt #if 0
596 1.18 matt const struct ste *ste;
597 1.18 matt register_t hash;
598 1.18 matt int i;
599 1.18 matt
600 1.18 matt hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
601 1.18 matt
602 1.18 matt /*
603 1.18 matt * Try the primary group first
604 1.18 matt */
605 1.18 matt ste = pm->pm_stes[hash].stes;
606 1.18 matt for (i = 0; i < 8; i++, ste++) {
607 1.18 matt if (ste->ste_hi & STE_V) &&
608 1.18 matt (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
609 1.18 matt return ste;
610 1.18 matt }
611 1.18 matt
612 1.18 matt /*
613 1.18 matt * Then the secondary group.
614 1.18 matt */
615 1.18 matt ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
616 1.18 matt for (i = 0; i < 8; i++, ste++) {
617 1.18 matt if (ste->ste_hi & STE_V) &&
618 1.18 matt (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
619 1.18 matt return addr;
620 1.18 matt }
621 1.18 matt
622 1.18 matt return NULL;
623 1.18 matt #else
624 1.18 matt /*
625 1.18 matt * Rather than searching the STE groups for the VSID, we know
626 1.18 matt * how we generate that from the ESID and so do that.
627 1.18 matt */
628 1.18 matt return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
629 1.18 matt #endif
630 1.53 garbled #endif /* PMAP_OEA */
631 1.1 matt }
632 1.1 matt
633 1.35 perry static inline register_t
634 1.2 matt va_to_pteg(const struct pmap *pm, vaddr_t addr)
635 1.1 matt {
636 1.2 matt register_t hash;
637 1.2 matt
638 1.2 matt hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
639 1.1 matt return hash & pmap_pteg_mask;
640 1.1 matt }
641 1.1 matt
642 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
643 1.1 matt /*
644 1.1 matt * Given a PTE in the page table, calculate the VADDR that hashes to it.
645 1.1 matt * The only bit of magic is that the top 4 bits of the address doesn't
646 1.1 matt * technically exist in the PTE. But we know we reserved 4 bits of the
647 1.1 matt * VSID for it so that's how we get it.
648 1.1 matt */
649 1.1 matt static vaddr_t
650 1.2 matt pmap_pte_to_va(volatile const struct pte *pt)
651 1.1 matt {
652 1.1 matt vaddr_t va;
653 1.1 matt uintptr_t ptaddr = (uintptr_t) pt;
654 1.1 matt
655 1.1 matt if (pt->pte_hi & PTE_HID)
656 1.2 matt ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
657 1.1 matt
658 1.18 matt /* PPC Bits 10-19 PPC64 Bits 42-51 */
659 1.53 garbled #if defined(PMAP_OEA)
660 1.4 matt va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
661 1.53 garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
662 1.38 sanjayl va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
663 1.38 sanjayl #endif
664 1.1 matt va <<= ADDR_PIDX_SHFT;
665 1.1 matt
666 1.18 matt /* PPC Bits 4-9 PPC64 Bits 36-41 */
667 1.1 matt va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
668 1.1 matt
669 1.53 garbled #if defined(PMAP_OEA64)
670 1.18 matt /* PPC63 Bits 0-35 */
671 1.18 matt /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
672 1.53 garbled #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
673 1.1 matt /* PPC Bits 0-3 */
674 1.1 matt va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
675 1.18 matt #endif
676 1.1 matt
677 1.1 matt return va;
678 1.1 matt }
679 1.1 matt #endif
680 1.1 matt
681 1.35 perry static inline struct pvo_head *
682 1.1 matt pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
683 1.1 matt {
684 1.1 matt struct vm_page *pg;
685 1.72 uebayasi struct vm_page_md *md;
686 1.1 matt
687 1.1 matt pg = PHYS_TO_VM_PAGE(pa);
688 1.1 matt if (pg_p != NULL)
689 1.1 matt *pg_p = pg;
690 1.1 matt if (pg == NULL)
691 1.1 matt return &pmap_pvo_unmanaged;
692 1.72 uebayasi md = VM_PAGE_TO_MD(pg);
693 1.72 uebayasi return &md->mdpg_pvoh;
694 1.1 matt }
695 1.1 matt
696 1.35 perry static inline struct pvo_head *
697 1.1 matt vm_page_to_pvoh(struct vm_page *pg)
698 1.1 matt {
699 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
700 1.72 uebayasi
701 1.72 uebayasi return &md->mdpg_pvoh;
702 1.1 matt }
703 1.1 matt
704 1.1 matt
705 1.35 perry static inline void
706 1.1 matt pmap_attr_clear(struct vm_page *pg, int ptebit)
707 1.1 matt {
708 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
709 1.72 uebayasi
710 1.72 uebayasi md->mdpg_attrs &= ~ptebit;
711 1.1 matt }
712 1.1 matt
713 1.35 perry static inline int
714 1.1 matt pmap_attr_fetch(struct vm_page *pg)
715 1.1 matt {
716 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
717 1.72 uebayasi
718 1.72 uebayasi return md->mdpg_attrs;
719 1.1 matt }
720 1.1 matt
721 1.35 perry static inline void
722 1.1 matt pmap_attr_save(struct vm_page *pg, int ptebit)
723 1.1 matt {
724 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
725 1.72 uebayasi
726 1.72 uebayasi md->mdpg_attrs |= ptebit;
727 1.1 matt }
728 1.1 matt
729 1.35 perry static inline int
730 1.2 matt pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
731 1.1 matt {
732 1.1 matt if (pt->pte_hi == pvo_pt->pte_hi
733 1.1 matt #if 0
734 1.1 matt && ((pt->pte_lo ^ pvo_pt->pte_lo) &
735 1.1 matt ~(PTE_REF|PTE_CHG)) == 0
736 1.1 matt #endif
737 1.1 matt )
738 1.1 matt return 1;
739 1.1 matt return 0;
740 1.1 matt }
741 1.1 matt
742 1.35 perry static inline void
743 1.2 matt pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
744 1.1 matt {
745 1.1 matt /*
746 1.1 matt * Construct the PTE. Default to IMB initially. Valid bit
747 1.1 matt * only gets set when the real pte is set in memory.
748 1.1 matt *
749 1.1 matt * Note: Don't set the valid bit for correct operation of tlb update.
750 1.1 matt */
751 1.53 garbled #if defined(PMAP_OEA)
752 1.2 matt pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
753 1.2 matt | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
754 1.1 matt pt->pte_lo = pte_lo;
755 1.79 matt #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64)
756 1.38 sanjayl pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
757 1.38 sanjayl | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
758 1.38 sanjayl pt->pte_lo = (u_int64_t) pte_lo;
759 1.53 garbled #endif /* PMAP_OEA */
760 1.1 matt }
761 1.1 matt
762 1.35 perry static inline void
763 1.2 matt pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
764 1.1 matt {
765 1.1 matt pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
766 1.1 matt }
767 1.1 matt
768 1.35 perry static inline void
769 1.2 matt pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
770 1.1 matt {
771 1.1 matt /*
772 1.1 matt * As shown in Section 7.6.3.2.3
773 1.1 matt */
774 1.1 matt pt->pte_lo &= ~ptebit;
775 1.1 matt TLBIE(va);
776 1.1 matt SYNC();
777 1.1 matt EIEIO();
778 1.1 matt TLBSYNC();
779 1.1 matt SYNC();
780 1.57 matt #ifdef MULTIPROCESSOR
781 1.57 matt DCBST(pt);
782 1.57 matt #endif
783 1.1 matt }
784 1.1 matt
785 1.35 perry static inline void
786 1.2 matt pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
787 1.1 matt {
788 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
789 1.1 matt if (pvo_pt->pte_hi & PTE_VALID)
790 1.1 matt panic("pte_set: setting an already valid pte %p", pvo_pt);
791 1.1 matt #endif
792 1.1 matt pvo_pt->pte_hi |= PTE_VALID;
793 1.38 sanjayl
794 1.1 matt /*
795 1.1 matt * Update the PTE as defined in section 7.6.3.1
796 1.1 matt * Note that the REF/CHG bits are from pvo_pt and thus should
797 1.1 matt * have been saved so this routine can restore them (if desired).
798 1.1 matt */
799 1.1 matt pt->pte_lo = pvo_pt->pte_lo;
800 1.1 matt EIEIO();
801 1.1 matt pt->pte_hi = pvo_pt->pte_hi;
802 1.38 sanjayl TLBSYNC();
803 1.1 matt SYNC();
804 1.57 matt #ifdef MULTIPROCESSOR
805 1.57 matt DCBST(pt);
806 1.57 matt #endif
807 1.1 matt pmap_pte_valid++;
808 1.1 matt }
809 1.1 matt
810 1.35 perry static inline void
811 1.2 matt pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
812 1.1 matt {
813 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
814 1.1 matt if ((pvo_pt->pte_hi & PTE_VALID) == 0)
815 1.1 matt panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
816 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0)
817 1.1 matt panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
818 1.1 matt #endif
819 1.1 matt
820 1.1 matt pvo_pt->pte_hi &= ~PTE_VALID;
821 1.1 matt /*
822 1.1 matt * Force the ref & chg bits back into the PTEs.
823 1.1 matt */
824 1.1 matt SYNC();
825 1.1 matt /*
826 1.1 matt * Invalidate the pte ... (Section 7.6.3.3)
827 1.1 matt */
828 1.1 matt pt->pte_hi &= ~PTE_VALID;
829 1.1 matt SYNC();
830 1.1 matt TLBIE(va);
831 1.1 matt SYNC();
832 1.1 matt EIEIO();
833 1.1 matt TLBSYNC();
834 1.1 matt SYNC();
835 1.1 matt /*
836 1.1 matt * Save the ref & chg bits ...
837 1.1 matt */
838 1.1 matt pmap_pte_synch(pt, pvo_pt);
839 1.1 matt pmap_pte_valid--;
840 1.1 matt }
841 1.1 matt
842 1.35 perry static inline void
843 1.2 matt pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
844 1.1 matt {
845 1.1 matt /*
846 1.1 matt * Invalidate the PTE
847 1.1 matt */
848 1.1 matt pmap_pte_unset(pt, pvo_pt, va);
849 1.1 matt pmap_pte_set(pt, pvo_pt);
850 1.1 matt }
851 1.1 matt
852 1.1 matt /*
853 1.1 matt * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
854 1.1 matt * (either primary or secondary location).
855 1.1 matt *
856 1.1 matt * Note: both the destination and source PTEs must not have PTE_VALID set.
857 1.1 matt */
858 1.1 matt
859 1.53 garbled static int
860 1.2 matt pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
861 1.1 matt {
862 1.2 matt volatile struct pte *pt;
863 1.1 matt int i;
864 1.1 matt
865 1.1 matt #if defined(DEBUG)
866 1.85 matt DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
867 1.85 matt ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo);
868 1.1 matt #endif
869 1.1 matt /*
870 1.1 matt * First try primary hash.
871 1.1 matt */
872 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
873 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) {
874 1.1 matt pvo_pt->pte_hi &= ~PTE_HID;
875 1.1 matt pmap_pte_set(pt, pvo_pt);
876 1.1 matt return i;
877 1.1 matt }
878 1.1 matt }
879 1.1 matt
880 1.1 matt /*
881 1.1 matt * Now try secondary hash.
882 1.1 matt */
883 1.1 matt ptegidx ^= pmap_pteg_mask;
884 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
885 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) {
886 1.1 matt pvo_pt->pte_hi |= PTE_HID;
887 1.1 matt pmap_pte_set(pt, pvo_pt);
888 1.1 matt return i;
889 1.1 matt }
890 1.1 matt }
891 1.1 matt return -1;
892 1.1 matt }
893 1.1 matt
894 1.1 matt /*
895 1.1 matt * Spill handler.
896 1.1 matt *
897 1.1 matt * Tries to spill a page table entry from the overflow area.
898 1.1 matt * This runs in either real mode (if dealing with a exception spill)
899 1.1 matt * or virtual mode when dealing with manually spilling one of the
900 1.1 matt * kernel's pte entries. In either case, interrupts are already
901 1.1 matt * disabled.
902 1.1 matt */
903 1.14 chs
904 1.1 matt int
905 1.44 thorpej pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
906 1.1 matt {
907 1.1 matt struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
908 1.1 matt struct pvo_entry *pvo;
909 1.15 dyoung /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
910 1.15 dyoung struct pvo_tqhead *pvoh, *vpvoh = NULL;
911 1.1 matt int ptegidx, i, j;
912 1.2 matt volatile struct pteg *pteg;
913 1.2 matt volatile struct pte *pt;
914 1.1 matt
915 1.50 ad PMAP_LOCK();
916 1.50 ad
917 1.2 matt ptegidx = va_to_pteg(pm, addr);
918 1.1 matt
919 1.1 matt /*
920 1.1 matt * Have to substitute some entry. Use the primary hash for this.
921 1.12 matt * Use low bits of timebase as random generator. Make sure we are
922 1.12 matt * not picking a kernel pte for replacement.
923 1.1 matt */
924 1.1 matt pteg = &pmap_pteg_table[ptegidx];
925 1.1 matt i = MFTB() & 7;
926 1.12 matt for (j = 0; j < 8; j++) {
927 1.12 matt pt = &pteg->pt[i];
928 1.53 garbled if ((pt->pte_hi & PTE_VALID) == 0)
929 1.53 garbled break;
930 1.53 garbled if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
931 1.53 garbled < PHYSMAP_VSIDBITS)
932 1.12 matt break;
933 1.12 matt i = (i + 1) & 7;
934 1.12 matt }
935 1.12 matt KASSERT(j < 8);
936 1.1 matt
937 1.1 matt source_pvo = NULL;
938 1.1 matt victim_pvo = NULL;
939 1.1 matt pvoh = &pmap_pvo_table[ptegidx];
940 1.1 matt TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
941 1.1 matt
942 1.1 matt /*
943 1.1 matt * We need to find pvo entry for this address...
944 1.1 matt */
945 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
946 1.1 matt
947 1.1 matt /*
948 1.1 matt * If we haven't found the source and we come to a PVO with
949 1.1 matt * a valid PTE, then we know we can't find it because all
950 1.1 matt * evicted PVOs always are first in the list.
951 1.1 matt */
952 1.1 matt if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
953 1.1 matt break;
954 1.2 matt if (source_pvo == NULL && pm == pvo->pvo_pmap &&
955 1.2 matt addr == PVO_VADDR(pvo)) {
956 1.1 matt
957 1.1 matt /*
958 1.1 matt * Now we have found the entry to be spilled into the
959 1.1 matt * pteg. Attempt to insert it into the page table.
960 1.1 matt */
961 1.1 matt j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
962 1.1 matt if (j >= 0) {
963 1.1 matt PVO_PTEGIDX_SET(pvo, j);
964 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
965 1.12 matt PVO_WHERE(pvo, SPILL_INSERT);
966 1.1 matt pvo->pvo_pmap->pm_evictions--;
967 1.1 matt PMAPCOUNT(ptes_spilled);
968 1.1 matt PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
969 1.1 matt ? pmap_evcnt_ptes_secondary
970 1.1 matt : pmap_evcnt_ptes_primary)[j]);
971 1.1 matt
972 1.1 matt /*
973 1.1 matt * Since we keep the evicted entries at the
974 1.1 matt * from of the PVO list, we need move this
975 1.1 matt * (now resident) PVO after the evicted
976 1.1 matt * entries.
977 1.1 matt */
978 1.1 matt next_pvo = TAILQ_NEXT(pvo, pvo_olink);
979 1.1 matt
980 1.1 matt /*
981 1.5 matt * If we don't have to move (either we were the
982 1.5 matt * last entry or the next entry was valid),
983 1.1 matt * don't change our position. Otherwise
984 1.1 matt * move ourselves to the tail of the queue.
985 1.1 matt */
986 1.1 matt if (next_pvo != NULL &&
987 1.1 matt !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
988 1.1 matt TAILQ_REMOVE(pvoh, pvo, pvo_olink);
989 1.1 matt TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
990 1.1 matt }
991 1.50 ad PMAP_UNLOCK();
992 1.1 matt return 1;
993 1.1 matt }
994 1.1 matt source_pvo = pvo;
995 1.39 matt if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
996 1.96 rin PMAP_UNLOCK();
997 1.14 chs return 0;
998 1.14 chs }
999 1.1 matt if (victim_pvo != NULL)
1000 1.1 matt break;
1001 1.1 matt }
1002 1.1 matt
1003 1.1 matt /*
1004 1.1 matt * We also need the pvo entry of the victim we are replacing
1005 1.1 matt * so save the R & C bits of the PTE.
1006 1.1 matt */
1007 1.1 matt if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1008 1.1 matt pmap_pte_compare(pt, &pvo->pvo_pte)) {
1009 1.15 dyoung vpvoh = pvoh; /* *1* */
1010 1.1 matt victim_pvo = pvo;
1011 1.1 matt if (source_pvo != NULL)
1012 1.1 matt break;
1013 1.1 matt }
1014 1.1 matt }
1015 1.1 matt
1016 1.1 matt if (source_pvo == NULL) {
1017 1.1 matt PMAPCOUNT(ptes_unspilled);
1018 1.50 ad PMAP_UNLOCK();
1019 1.1 matt return 0;
1020 1.1 matt }
1021 1.1 matt
1022 1.1 matt if (victim_pvo == NULL) {
1023 1.1 matt if ((pt->pte_hi & PTE_HID) == 0)
1024 1.1 matt panic("pmap_pte_spill: victim p-pte (%p) has "
1025 1.1 matt "no pvo entry!", pt);
1026 1.1 matt
1027 1.1 matt /*
1028 1.1 matt * If this is a secondary PTE, we need to search
1029 1.1 matt * its primary pvo bucket for the matching PVO.
1030 1.1 matt */
1031 1.15 dyoung vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
1032 1.1 matt TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
1033 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
1034 1.1 matt
1035 1.1 matt /*
1036 1.1 matt * We also need the pvo entry of the victim we are
1037 1.1 matt * replacing so save the R & C bits of the PTE.
1038 1.1 matt */
1039 1.1 matt if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1040 1.1 matt victim_pvo = pvo;
1041 1.1 matt break;
1042 1.1 matt }
1043 1.1 matt }
1044 1.1 matt if (victim_pvo == NULL)
1045 1.1 matt panic("pmap_pte_spill: victim s-pte (%p) has "
1046 1.1 matt "no pvo entry!", pt);
1047 1.1 matt }
1048 1.1 matt
1049 1.1 matt /*
1050 1.12 matt * The victim should be not be a kernel PVO/PTE entry.
1051 1.12 matt */
1052 1.12 matt KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
1053 1.12 matt KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
1054 1.12 matt KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
1055 1.12 matt
1056 1.12 matt /*
1057 1.1 matt * We are invalidating the TLB entry for the EA for the
1058 1.1 matt * we are replacing even though its valid; If we don't
1059 1.1 matt * we lose any ref/chg bit changes contained in the TLB
1060 1.1 matt * entry.
1061 1.1 matt */
1062 1.1 matt source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1063 1.1 matt
1064 1.1 matt /*
1065 1.1 matt * To enforce the PVO list ordering constraint that all
1066 1.1 matt * evicted entries should come before all valid entries,
1067 1.1 matt * move the source PVO to the tail of its list and the
1068 1.1 matt * victim PVO to the head of its list (which might not be
1069 1.1 matt * the same list, if the victim was using the secondary hash).
1070 1.1 matt */
1071 1.1 matt TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
1072 1.1 matt TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
1073 1.1 matt TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
1074 1.1 matt TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
1075 1.1 matt pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1076 1.1 matt pmap_pte_set(pt, &source_pvo->pvo_pte);
1077 1.1 matt victim_pvo->pvo_pmap->pm_evictions++;
1078 1.1 matt source_pvo->pvo_pmap->pm_evictions--;
1079 1.12 matt PVO_WHERE(victim_pvo, SPILL_UNSET);
1080 1.12 matt PVO_WHERE(source_pvo, SPILL_SET);
1081 1.1 matt
1082 1.1 matt PVO_PTEGIDX_CLR(victim_pvo);
1083 1.1 matt PVO_PTEGIDX_SET(source_pvo, i);
1084 1.1 matt PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
1085 1.1 matt PMAPCOUNT(ptes_spilled);
1086 1.1 matt PMAPCOUNT(ptes_evicted);
1087 1.1 matt PMAPCOUNT(ptes_removed);
1088 1.1 matt
1089 1.1 matt PMAP_PVO_CHECK(victim_pvo);
1090 1.1 matt PMAP_PVO_CHECK(source_pvo);
1091 1.50 ad
1092 1.50 ad PMAP_UNLOCK();
1093 1.1 matt return 1;
1094 1.1 matt }
1095 1.1 matt
1096 1.1 matt /*
1097 1.1 matt * Restrict given range to physical memory
1098 1.1 matt */
1099 1.1 matt void
1100 1.1 matt pmap_real_memory(paddr_t *start, psize_t *size)
1101 1.1 matt {
1102 1.1 matt struct mem_region *mp;
1103 1.1 matt
1104 1.1 matt for (mp = mem; mp->size; mp++) {
1105 1.1 matt if (*start + *size > mp->start
1106 1.1 matt && *start < mp->start + mp->size) {
1107 1.1 matt if (*start < mp->start) {
1108 1.1 matt *size -= mp->start - *start;
1109 1.1 matt *start = mp->start;
1110 1.1 matt }
1111 1.1 matt if (*start + *size > mp->start + mp->size)
1112 1.1 matt *size = mp->start + mp->size - *start;
1113 1.1 matt return;
1114 1.1 matt }
1115 1.1 matt }
1116 1.1 matt *size = 0;
1117 1.1 matt }
1118 1.1 matt
1119 1.1 matt /*
1120 1.1 matt * Initialize anything else for pmap handling.
1121 1.1 matt * Called during vm_init().
1122 1.1 matt */
1123 1.1 matt void
1124 1.1 matt pmap_init(void)
1125 1.1 matt {
1126 1.1 matt pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
1127 1.1 matt sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
1128 1.48 ad &pmap_pool_mallocator, IPL_NONE);
1129 1.1 matt
1130 1.1 matt pool_setlowat(&pmap_mpvo_pool, 1008);
1131 1.1 matt
1132 1.1 matt pmap_initialized = 1;
1133 1.1 matt
1134 1.1 matt }
1135 1.1 matt
1136 1.1 matt /*
1137 1.10 thorpej * How much virtual space does the kernel get?
1138 1.10 thorpej */
1139 1.10 thorpej void
1140 1.10 thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1141 1.10 thorpej {
1142 1.10 thorpej /*
1143 1.10 thorpej * For now, reserve one segment (minus some overhead) for kernel
1144 1.10 thorpej * virtual memory
1145 1.10 thorpej */
1146 1.10 thorpej *start = VM_MIN_KERNEL_ADDRESS;
1147 1.10 thorpej *end = VM_MAX_KERNEL_ADDRESS;
1148 1.10 thorpej }
1149 1.10 thorpej
1150 1.10 thorpej /*
1151 1.1 matt * Allocate, initialize, and return a new physical map.
1152 1.1 matt */
1153 1.1 matt pmap_t
1154 1.1 matt pmap_create(void)
1155 1.1 matt {
1156 1.1 matt pmap_t pm;
1157 1.38 sanjayl
1158 1.1 matt pm = pool_get(&pmap_pool, PR_WAITOK);
1159 1.84 matt KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS);
1160 1.46 christos memset((void *)pm, 0, sizeof *pm);
1161 1.1 matt pmap_pinit(pm);
1162 1.1 matt
1163 1.85 matt DPRINTFN(CREATE, "pmap_create: pm %p:\n"
1164 1.54 mlelstv "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1165 1.54 mlelstv " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
1166 1.54 mlelstv "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1167 1.54 mlelstv " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
1168 1.54 mlelstv pm,
1169 1.54 mlelstv pm->pm_sr[0], pm->pm_sr[1],
1170 1.54 mlelstv pm->pm_sr[2], pm->pm_sr[3],
1171 1.54 mlelstv pm->pm_sr[4], pm->pm_sr[5],
1172 1.54 mlelstv pm->pm_sr[6], pm->pm_sr[7],
1173 1.54 mlelstv pm->pm_sr[8], pm->pm_sr[9],
1174 1.54 mlelstv pm->pm_sr[10], pm->pm_sr[11],
1175 1.54 mlelstv pm->pm_sr[12], pm->pm_sr[13],
1176 1.85 matt pm->pm_sr[14], pm->pm_sr[15]);
1177 1.1 matt return pm;
1178 1.1 matt }
1179 1.1 matt
1180 1.1 matt /*
1181 1.1 matt * Initialize a preallocated and zeroed pmap structure.
1182 1.1 matt */
1183 1.1 matt void
1184 1.1 matt pmap_pinit(pmap_t pm)
1185 1.1 matt {
1186 1.2 matt register_t entropy = MFTB();
1187 1.2 matt register_t mask;
1188 1.2 matt int i;
1189 1.1 matt
1190 1.1 matt /*
1191 1.1 matt * Allocate some segment registers for this pmap.
1192 1.1 matt */
1193 1.1 matt pm->pm_refs = 1;
1194 1.50 ad PMAP_LOCK();
1195 1.2 matt for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1196 1.2 matt static register_t pmap_vsidcontext;
1197 1.2 matt register_t hash;
1198 1.2 matt unsigned int n;
1199 1.1 matt
1200 1.1 matt /* Create a new value by multiplying by a prime adding in
1201 1.1 matt * entropy from the timebase register. This is to make the
1202 1.1 matt * VSID more random so that the PT Hash function collides
1203 1.1 matt * less often. (note that the prime causes gcc to do shifts
1204 1.1 matt * instead of a multiply)
1205 1.1 matt */
1206 1.1 matt pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1207 1.1 matt hash = pmap_vsidcontext & (NPMAPS - 1);
1208 1.23 aymeric if (hash == 0) { /* 0 is special, avoid it */
1209 1.23 aymeric entropy += 0xbadf00d;
1210 1.1 matt continue;
1211 1.23 aymeric }
1212 1.1 matt n = hash >> 5;
1213 1.2 matt mask = 1L << (hash & (VSID_NBPW-1));
1214 1.2 matt hash = pmap_vsidcontext;
1215 1.1 matt if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1216 1.1 matt /* anything free in this bucket? */
1217 1.2 matt if (~pmap_vsid_bitmap[n] == 0) {
1218 1.23 aymeric entropy = hash ^ (hash >> 16);
1219 1.1 matt continue;
1220 1.1 matt }
1221 1.1 matt i = ffs(~pmap_vsid_bitmap[n]) - 1;
1222 1.2 matt mask = 1L << i;
1223 1.2 matt hash &= ~(VSID_NBPW-1);
1224 1.1 matt hash |= i;
1225 1.1 matt }
1226 1.18 matt hash &= PTE_VSID >> PTE_VSID_SHFT;
1227 1.1 matt pmap_vsid_bitmap[n] |= mask;
1228 1.18 matt pm->pm_vsid = hash;
1229 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1230 1.1 matt for (i = 0; i < 16; i++)
1231 1.14 chs pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
1232 1.14 chs SR_NOEXEC;
1233 1.18 matt #endif
1234 1.50 ad PMAP_UNLOCK();
1235 1.1 matt return;
1236 1.1 matt }
1237 1.50 ad PMAP_UNLOCK();
1238 1.1 matt panic("pmap_pinit: out of segments");
1239 1.1 matt }
1240 1.1 matt
1241 1.1 matt /*
1242 1.1 matt * Add a reference to the given pmap.
1243 1.1 matt */
1244 1.1 matt void
1245 1.1 matt pmap_reference(pmap_t pm)
1246 1.1 matt {
1247 1.50 ad atomic_inc_uint(&pm->pm_refs);
1248 1.1 matt }
1249 1.1 matt
1250 1.1 matt /*
1251 1.1 matt * Retire the given pmap from service.
1252 1.1 matt * Should only be called if the map contains no valid mappings.
1253 1.1 matt */
1254 1.1 matt void
1255 1.1 matt pmap_destroy(pmap_t pm)
1256 1.1 matt {
1257 1.50 ad if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
1258 1.1 matt pmap_release(pm);
1259 1.1 matt pool_put(&pmap_pool, pm);
1260 1.1 matt }
1261 1.1 matt }
1262 1.1 matt
1263 1.1 matt /*
1264 1.1 matt * Release any resources held by the given physical map.
1265 1.1 matt * Called when a pmap initialized by pmap_pinit is being released.
1266 1.1 matt */
1267 1.1 matt void
1268 1.1 matt pmap_release(pmap_t pm)
1269 1.1 matt {
1270 1.1 matt int idx, mask;
1271 1.39 matt
1272 1.39 matt KASSERT(pm->pm_stats.resident_count == 0);
1273 1.39 matt KASSERT(pm->pm_stats.wired_count == 0);
1274 1.1 matt
1275 1.50 ad PMAP_LOCK();
1276 1.1 matt if (pm->pm_sr[0] == 0)
1277 1.1 matt panic("pmap_release");
1278 1.22 aymeric idx = pm->pm_vsid & (NPMAPS-1);
1279 1.1 matt mask = 1 << (idx % VSID_NBPW);
1280 1.1 matt idx /= VSID_NBPW;
1281 1.22 aymeric
1282 1.22 aymeric KASSERT(pmap_vsid_bitmap[idx] & mask);
1283 1.1 matt pmap_vsid_bitmap[idx] &= ~mask;
1284 1.50 ad PMAP_UNLOCK();
1285 1.1 matt }
1286 1.1 matt
1287 1.1 matt /*
1288 1.1 matt * Copy the range specified by src_addr/len
1289 1.1 matt * from the source map to the range dst_addr/len
1290 1.1 matt * in the destination map.
1291 1.1 matt *
1292 1.1 matt * This routine is only advisory and need not do anything.
1293 1.1 matt */
1294 1.1 matt void
1295 1.1 matt pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1296 1.1 matt vsize_t len, vaddr_t src_addr)
1297 1.1 matt {
1298 1.1 matt PMAPCOUNT(copies);
1299 1.1 matt }
1300 1.1 matt
1301 1.1 matt /*
1302 1.1 matt * Require that all active physical maps contain no
1303 1.1 matt * incorrect entries NOW.
1304 1.1 matt */
1305 1.1 matt void
1306 1.1 matt pmap_update(struct pmap *pmap)
1307 1.1 matt {
1308 1.1 matt PMAPCOUNT(updates);
1309 1.1 matt TLBSYNC();
1310 1.1 matt }
1311 1.1 matt
1312 1.35 perry static inline int
1313 1.1 matt pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1314 1.1 matt {
1315 1.1 matt int pteidx;
1316 1.1 matt /*
1317 1.1 matt * We can find the actual pte entry without searching by
1318 1.1 matt * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1319 1.1 matt * and by noticing the HID bit.
1320 1.1 matt */
1321 1.1 matt pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1322 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_HID)
1323 1.1 matt pteidx ^= pmap_pteg_mask * 8;
1324 1.1 matt return pteidx;
1325 1.1 matt }
1326 1.1 matt
1327 1.2 matt volatile struct pte *
1328 1.1 matt pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1329 1.1 matt {
1330 1.2 matt volatile struct pte *pt;
1331 1.1 matt
1332 1.1 matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1333 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1334 1.1 matt return NULL;
1335 1.1 matt #endif
1336 1.1 matt
1337 1.1 matt /*
1338 1.1 matt * If we haven't been supplied the ptegidx, calculate it.
1339 1.1 matt */
1340 1.1 matt if (pteidx == -1) {
1341 1.1 matt int ptegidx;
1342 1.2 matt ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1343 1.1 matt pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1344 1.1 matt }
1345 1.1 matt
1346 1.1 matt pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1347 1.1 matt
1348 1.1 matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1349 1.1 matt return pt;
1350 1.1 matt #else
1351 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1352 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1353 1.1 matt "pvo but no valid pte index", pvo);
1354 1.1 matt }
1355 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1356 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1357 1.1 matt "pvo but no valid pte", pvo);
1358 1.1 matt }
1359 1.1 matt
1360 1.1 matt if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1361 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1362 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1363 1.1 matt pmap_pte_print(pt);
1364 1.1 matt #endif
1365 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1366 1.1 matt "pmap_pteg_table %p but invalid in pvo",
1367 1.1 matt pvo, pt);
1368 1.1 matt }
1369 1.1 matt if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1370 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1371 1.1 matt pmap_pte_print(pt);
1372 1.1 matt #endif
1373 1.1 matt panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1374 1.1 matt "not match pte %p in pmap_pteg_table",
1375 1.1 matt pvo, pt);
1376 1.1 matt }
1377 1.1 matt return pt;
1378 1.1 matt }
1379 1.1 matt
1380 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1381 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1382 1.1 matt pmap_pte_print(pt);
1383 1.1 matt #endif
1384 1.12 matt panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1385 1.1 matt "pmap_pteg_table but valid in pvo", pvo, pt);
1386 1.1 matt }
1387 1.1 matt return NULL;
1388 1.1 matt #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1389 1.1 matt }
1390 1.1 matt
1391 1.1 matt struct pvo_entry *
1392 1.1 matt pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1393 1.1 matt {
1394 1.1 matt struct pvo_entry *pvo;
1395 1.1 matt int ptegidx;
1396 1.1 matt
1397 1.1 matt va &= ~ADDR_POFF;
1398 1.2 matt ptegidx = va_to_pteg(pm, va);
1399 1.1 matt
1400 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1401 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1402 1.1 matt if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1403 1.1 matt panic("pmap_pvo_find_va: invalid pvo %p on "
1404 1.1 matt "list %#x (%p)", pvo, ptegidx,
1405 1.1 matt &pmap_pvo_table[ptegidx]);
1406 1.1 matt #endif
1407 1.1 matt if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1408 1.1 matt if (pteidx_p)
1409 1.1 matt *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1410 1.1 matt return pvo;
1411 1.1 matt }
1412 1.1 matt }
1413 1.38 sanjayl if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
1414 1.54 mlelstv panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
1415 1.53 garbled __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
1416 1.1 matt return NULL;
1417 1.1 matt }
1418 1.1 matt
1419 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1420 1.1 matt void
1421 1.1 matt pmap_pvo_check(const struct pvo_entry *pvo)
1422 1.1 matt {
1423 1.1 matt struct pvo_head *pvo_head;
1424 1.1 matt struct pvo_entry *pvo0;
1425 1.2 matt volatile struct pte *pt;
1426 1.1 matt int failed = 0;
1427 1.1 matt
1428 1.50 ad PMAP_LOCK();
1429 1.50 ad
1430 1.1 matt if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1431 1.1 matt panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1432 1.1 matt
1433 1.1 matt if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1434 1.1 matt printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1435 1.1 matt pvo, pvo->pvo_pmap);
1436 1.1 matt failed = 1;
1437 1.1 matt }
1438 1.1 matt
1439 1.1 matt if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1440 1.1 matt (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1441 1.1 matt printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1442 1.1 matt pvo, TAILQ_NEXT(pvo, pvo_olink));
1443 1.1 matt failed = 1;
1444 1.1 matt }
1445 1.1 matt
1446 1.1 matt if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1447 1.1 matt (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1448 1.1 matt printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1449 1.1 matt pvo, LIST_NEXT(pvo, pvo_vlink));
1450 1.1 matt failed = 1;
1451 1.1 matt }
1452 1.1 matt
1453 1.39 matt if (PVO_MANAGED_P(pvo)) {
1454 1.1 matt pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1455 1.1 matt } else {
1456 1.1 matt if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1457 1.1 matt printf("pmap_pvo_check: pvo %p: non kernel address "
1458 1.1 matt "on kernel unmanaged list\n", pvo);
1459 1.1 matt failed = 1;
1460 1.1 matt }
1461 1.1 matt pvo_head = &pmap_pvo_kunmanaged;
1462 1.1 matt }
1463 1.1 matt LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1464 1.1 matt if (pvo0 == pvo)
1465 1.1 matt break;
1466 1.1 matt }
1467 1.1 matt if (pvo0 == NULL) {
1468 1.1 matt printf("pmap_pvo_check: pvo %p: not present "
1469 1.1 matt "on its vlist head %p\n", pvo, pvo_head);
1470 1.1 matt failed = 1;
1471 1.1 matt }
1472 1.1 matt if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1473 1.1 matt printf("pmap_pvo_check: pvo %p: not present "
1474 1.1 matt "on its olist head\n", pvo);
1475 1.1 matt failed = 1;
1476 1.1 matt }
1477 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
1478 1.1 matt if (pt == NULL) {
1479 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1480 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1481 1.1 matt "no PTE\n", pvo);
1482 1.1 matt failed = 1;
1483 1.1 matt }
1484 1.1 matt } else {
1485 1.1 matt if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1486 1.1 matt (uintptr_t) pt >=
1487 1.1 matt (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1488 1.1 matt printf("pmap_pvo_check: pvo %p: pte %p not in "
1489 1.1 matt "pteg table\n", pvo, pt);
1490 1.1 matt failed = 1;
1491 1.1 matt }
1492 1.1 matt if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1493 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1494 1.1 matt "no PTE\n", pvo);
1495 1.1 matt failed = 1;
1496 1.1 matt }
1497 1.1 matt if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1498 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1499 1.54 mlelstv "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1500 1.54 mlelstv pvo->pvo_pte.pte_hi,
1501 1.54 mlelstv pt->pte_hi);
1502 1.1 matt failed = 1;
1503 1.1 matt }
1504 1.1 matt if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1505 1.1 matt (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1506 1.1 matt printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1507 1.54 mlelstv "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1508 1.54 mlelstv (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
1509 1.54 mlelstv (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1510 1.1 matt failed = 1;
1511 1.1 matt }
1512 1.1 matt if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1513 1.53 garbled printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
1514 1.53 garbled " doesn't not match PVO's VA %#" _PRIxva "\n",
1515 1.1 matt pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1516 1.1 matt failed = 1;
1517 1.1 matt }
1518 1.1 matt if (failed)
1519 1.1 matt pmap_pte_print(pt);
1520 1.1 matt }
1521 1.1 matt if (failed)
1522 1.1 matt panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1523 1.1 matt pvo->pvo_pmap);
1524 1.50 ad
1525 1.50 ad PMAP_UNLOCK();
1526 1.1 matt }
1527 1.1 matt #endif /* DEBUG || PMAPCHECK */
1528 1.1 matt
1529 1.1 matt /*
1530 1.25 chs * Search the PVO table looking for a non-wired entry.
1531 1.25 chs * If we find one, remove it and return it.
1532 1.25 chs */
1533 1.25 chs
1534 1.25 chs struct pvo_entry *
1535 1.25 chs pmap_pvo_reclaim(struct pmap *pm)
1536 1.25 chs {
1537 1.25 chs struct pvo_tqhead *pvoh;
1538 1.25 chs struct pvo_entry *pvo;
1539 1.25 chs uint32_t idx, endidx;
1540 1.25 chs
1541 1.25 chs endidx = pmap_pvo_reclaim_nextidx;
1542 1.25 chs for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
1543 1.25 chs idx = (idx + 1) & pmap_pteg_mask) {
1544 1.25 chs pvoh = &pmap_pvo_table[idx];
1545 1.25 chs TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1546 1.39 matt if (!PVO_WIRED_P(pvo)) {
1547 1.33 chs pmap_pvo_remove(pvo, -1, NULL);
1548 1.25 chs pmap_pvo_reclaim_nextidx = idx;
1549 1.26 matt PMAPCOUNT(pvos_reclaimed);
1550 1.25 chs return pvo;
1551 1.25 chs }
1552 1.25 chs }
1553 1.25 chs }
1554 1.25 chs return NULL;
1555 1.25 chs }
1556 1.25 chs
1557 1.95 chs static struct pool *
1558 1.95 chs pmap_pvo_pl(struct pvo_entry *pvo)
1559 1.95 chs {
1560 1.95 chs
1561 1.95 chs return PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool;
1562 1.95 chs }
1563 1.95 chs
1564 1.25 chs /*
1565 1.1 matt * This returns whether this is the first mapping of a page.
1566 1.1 matt */
1567 1.1 matt int
1568 1.1 matt pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1569 1.2 matt vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1570 1.1 matt {
1571 1.1 matt struct pvo_entry *pvo;
1572 1.1 matt struct pvo_tqhead *pvoh;
1573 1.2 matt register_t msr;
1574 1.1 matt int ptegidx;
1575 1.1 matt int i;
1576 1.1 matt int poolflags = PR_NOWAIT;
1577 1.1 matt
1578 1.28 chs /*
1579 1.28 chs * Compute the PTE Group index.
1580 1.28 chs */
1581 1.28 chs va &= ~ADDR_POFF;
1582 1.28 chs ptegidx = va_to_pteg(pm, va);
1583 1.28 chs
1584 1.28 chs msr = pmap_interrupts_off();
1585 1.28 chs
1586 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1587 1.1 matt if (pmap_pvo_remove_depth > 0)
1588 1.1 matt panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1589 1.1 matt if (++pmap_pvo_enter_depth > 1)
1590 1.1 matt panic("pmap_pvo_enter: called recursively!");
1591 1.1 matt #endif
1592 1.1 matt
1593 1.1 matt /*
1594 1.1 matt * Remove any existing mapping for this page. Reuse the
1595 1.1 matt * pvo entry if there a mapping.
1596 1.1 matt */
1597 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1598 1.1 matt if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1599 1.1 matt #ifdef DEBUG
1600 1.1 matt if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1601 1.1 matt ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1602 1.1 matt ~(PTE_REF|PTE_CHG)) == 0 &&
1603 1.1 matt va < VM_MIN_KERNEL_ADDRESS) {
1604 1.56 phx printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
1605 1.54 mlelstv pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1606 1.56 phx printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
1607 1.54 mlelstv pvo->pvo_pte.pte_hi,
1608 1.54 mlelstv pm->pm_sr[va >> ADDR_SR_SHFT]);
1609 1.1 matt pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1610 1.1 matt #ifdef DDBX
1611 1.1 matt Debugger();
1612 1.1 matt #endif
1613 1.1 matt }
1614 1.1 matt #endif
1615 1.1 matt PMAPCOUNT(mappings_replaced);
1616 1.33 chs pmap_pvo_remove(pvo, -1, NULL);
1617 1.1 matt break;
1618 1.1 matt }
1619 1.1 matt }
1620 1.1 matt
1621 1.1 matt /*
1622 1.1 matt * If we aren't overwriting an mapping, try to allocate
1623 1.1 matt */
1624 1.26 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1625 1.26 matt --pmap_pvo_enter_depth;
1626 1.26 matt #endif
1627 1.1 matt pmap_interrupts_restore(msr);
1628 1.33 chs if (pvo) {
1629 1.95 chs KASSERT(pmap_pvo_pl(pvo) == pl);
1630 1.95 chs } else {
1631 1.95 chs pvo = pool_get(pl, poolflags);
1632 1.33 chs }
1633 1.84 matt KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
1634 1.25 chs
1635 1.25 chs #ifdef DEBUG
1636 1.25 chs /*
1637 1.25 chs * Exercise pmap_pvo_reclaim() a little.
1638 1.25 chs */
1639 1.25 chs if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1640 1.25 chs pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1641 1.25 chs (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1642 1.25 chs pool_put(pl, pvo);
1643 1.25 chs pvo = NULL;
1644 1.25 chs }
1645 1.25 chs #endif
1646 1.25 chs
1647 1.1 matt msr = pmap_interrupts_off();
1648 1.26 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1649 1.26 matt ++pmap_pvo_enter_depth;
1650 1.26 matt #endif
1651 1.1 matt if (pvo == NULL) {
1652 1.1 matt pvo = pmap_pvo_reclaim(pm);
1653 1.1 matt if (pvo == NULL) {
1654 1.1 matt if ((flags & PMAP_CANFAIL) == 0)
1655 1.1 matt panic("pmap_pvo_enter: failed");
1656 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1657 1.1 matt pmap_pvo_enter_depth--;
1658 1.1 matt #endif
1659 1.26 matt PMAPCOUNT(pvos_failed);
1660 1.1 matt pmap_interrupts_restore(msr);
1661 1.1 matt return ENOMEM;
1662 1.1 matt }
1663 1.1 matt }
1664 1.25 chs
1665 1.1 matt pvo->pvo_vaddr = va;
1666 1.1 matt pvo->pvo_pmap = pm;
1667 1.1 matt pvo->pvo_vaddr &= ~ADDR_POFF;
1668 1.1 matt if (flags & VM_PROT_EXECUTE) {
1669 1.1 matt PMAPCOUNT(exec_mappings);
1670 1.14 chs pvo_set_exec(pvo);
1671 1.1 matt }
1672 1.1 matt if (flags & PMAP_WIRED)
1673 1.1 matt pvo->pvo_vaddr |= PVO_WIRED;
1674 1.1 matt if (pvo_head != &pmap_pvo_kunmanaged) {
1675 1.1 matt pvo->pvo_vaddr |= PVO_MANAGED;
1676 1.1 matt PMAPCOUNT(mappings);
1677 1.1 matt } else {
1678 1.1 matt PMAPCOUNT(kernel_mappings);
1679 1.1 matt }
1680 1.2 matt pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1681 1.1 matt
1682 1.1 matt LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1683 1.39 matt if (PVO_WIRED_P(pvo))
1684 1.1 matt pvo->pvo_pmap->pm_stats.wired_count++;
1685 1.1 matt pvo->pvo_pmap->pm_stats.resident_count++;
1686 1.1 matt #if defined(DEBUG)
1687 1.38 sanjayl /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1688 1.1 matt DPRINTFN(PVOENTER,
1689 1.85 matt "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
1690 1.85 matt pvo, pm, va, pa);
1691 1.1 matt #endif
1692 1.1 matt
1693 1.1 matt /*
1694 1.1 matt * We hope this succeeds but it isn't required.
1695 1.1 matt */
1696 1.1 matt pvoh = &pmap_pvo_table[ptegidx];
1697 1.1 matt i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1698 1.1 matt if (i >= 0) {
1699 1.1 matt PVO_PTEGIDX_SET(pvo, i);
1700 1.12 matt PVO_WHERE(pvo, ENTER_INSERT);
1701 1.1 matt PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1702 1.1 matt ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1703 1.1 matt TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1704 1.38 sanjayl
1705 1.1 matt } else {
1706 1.1 matt /*
1707 1.1 matt * Since we didn't have room for this entry (which makes it
1708 1.1 matt * and evicted entry), place it at the head of the list.
1709 1.1 matt */
1710 1.1 matt TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1711 1.1 matt PMAPCOUNT(ptes_evicted);
1712 1.1 matt pm->pm_evictions++;
1713 1.12 matt /*
1714 1.12 matt * If this is a kernel page, make sure it's active.
1715 1.12 matt */
1716 1.12 matt if (pm == pmap_kernel()) {
1717 1.45 thorpej i = pmap_pte_spill(pm, va, false);
1718 1.12 matt KASSERT(i);
1719 1.12 matt }
1720 1.1 matt }
1721 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
1722 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1723 1.1 matt pmap_pvo_enter_depth--;
1724 1.1 matt #endif
1725 1.1 matt pmap_interrupts_restore(msr);
1726 1.1 matt return 0;
1727 1.1 matt }
1728 1.1 matt
1729 1.53 garbled static void
1730 1.33 chs pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1731 1.1 matt {
1732 1.2 matt volatile struct pte *pt;
1733 1.1 matt int ptegidx;
1734 1.1 matt
1735 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1736 1.1 matt if (++pmap_pvo_remove_depth > 1)
1737 1.1 matt panic("pmap_pvo_remove: called recursively!");
1738 1.1 matt #endif
1739 1.1 matt
1740 1.1 matt /*
1741 1.1 matt * If we haven't been supplied the ptegidx, calculate it.
1742 1.1 matt */
1743 1.1 matt if (pteidx == -1) {
1744 1.2 matt ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1745 1.1 matt pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1746 1.1 matt } else {
1747 1.1 matt ptegidx = pteidx >> 3;
1748 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_HID)
1749 1.1 matt ptegidx ^= pmap_pteg_mask;
1750 1.1 matt }
1751 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
1752 1.1 matt
1753 1.1 matt /*
1754 1.1 matt * If there is an active pte entry, we need to deactivate it
1755 1.1 matt * (and save the ref & chg bits).
1756 1.1 matt */
1757 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx);
1758 1.1 matt if (pt != NULL) {
1759 1.1 matt pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1760 1.12 matt PVO_WHERE(pvo, REMOVE);
1761 1.1 matt PVO_PTEGIDX_CLR(pvo);
1762 1.1 matt PMAPCOUNT(ptes_removed);
1763 1.1 matt } else {
1764 1.1 matt KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1765 1.1 matt pvo->pvo_pmap->pm_evictions--;
1766 1.1 matt }
1767 1.1 matt
1768 1.1 matt /*
1769 1.14 chs * Account for executable mappings.
1770 1.14 chs */
1771 1.39 matt if (PVO_EXECUTABLE_P(pvo))
1772 1.14 chs pvo_clear_exec(pvo);
1773 1.14 chs
1774 1.14 chs /*
1775 1.14 chs * Update our statistics.
1776 1.1 matt */
1777 1.1 matt pvo->pvo_pmap->pm_stats.resident_count--;
1778 1.39 matt if (PVO_WIRED_P(pvo))
1779 1.1 matt pvo->pvo_pmap->pm_stats.wired_count--;
1780 1.1 matt
1781 1.1 matt /*
1782 1.1 matt * Save the REF/CHG bits into their cache if the page is managed.
1783 1.1 matt */
1784 1.39 matt if (PVO_MANAGED_P(pvo)) {
1785 1.2 matt register_t ptelo = pvo->pvo_pte.pte_lo;
1786 1.1 matt struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1787 1.1 matt
1788 1.1 matt if (pg != NULL) {
1789 1.37 matt /*
1790 1.37 matt * If this page was changed and it is mapped exec,
1791 1.37 matt * invalidate it.
1792 1.37 matt */
1793 1.37 matt if ((ptelo & PTE_CHG) &&
1794 1.37 matt (pmap_attr_fetch(pg) & PTE_EXEC)) {
1795 1.37 matt struct pvo_head *pvoh = vm_page_to_pvoh(pg);
1796 1.37 matt if (LIST_EMPTY(pvoh)) {
1797 1.85 matt DPRINTFN(EXEC, "[pmap_pvo_remove: "
1798 1.53 garbled "%#" _PRIxpa ": clear-exec]\n",
1799 1.85 matt VM_PAGE_TO_PHYS(pg));
1800 1.37 matt pmap_attr_clear(pg, PTE_EXEC);
1801 1.37 matt PMAPCOUNT(exec_uncached_pvo_remove);
1802 1.37 matt } else {
1803 1.85 matt DPRINTFN(EXEC, "[pmap_pvo_remove: "
1804 1.53 garbled "%#" _PRIxpa ": syncicache]\n",
1805 1.85 matt VM_PAGE_TO_PHYS(pg));
1806 1.37 matt pmap_syncicache(VM_PAGE_TO_PHYS(pg),
1807 1.37 matt PAGE_SIZE);
1808 1.37 matt PMAPCOUNT(exec_synced_pvo_remove);
1809 1.37 matt }
1810 1.37 matt }
1811 1.37 matt
1812 1.1 matt pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1813 1.1 matt }
1814 1.1 matt PMAPCOUNT(unmappings);
1815 1.1 matt } else {
1816 1.1 matt PMAPCOUNT(kernel_unmappings);
1817 1.1 matt }
1818 1.1 matt
1819 1.1 matt /*
1820 1.1 matt * Remove the PVO from its lists and return it to the pool.
1821 1.1 matt */
1822 1.1 matt LIST_REMOVE(pvo, pvo_vlink);
1823 1.1 matt TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1824 1.33 chs if (pvol) {
1825 1.33 chs LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1826 1.25 chs }
1827 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1828 1.1 matt pmap_pvo_remove_depth--;
1829 1.1 matt #endif
1830 1.1 matt }
1831 1.1 matt
1832 1.33 chs void
1833 1.33 chs pmap_pvo_free(struct pvo_entry *pvo)
1834 1.33 chs {
1835 1.33 chs
1836 1.95 chs pool_put(pmap_pvo_pl(pvo), pvo);
1837 1.33 chs }
1838 1.33 chs
1839 1.33 chs void
1840 1.33 chs pmap_pvo_free_list(struct pvo_head *pvol)
1841 1.33 chs {
1842 1.33 chs struct pvo_entry *pvo, *npvo;
1843 1.33 chs
1844 1.33 chs for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1845 1.33 chs npvo = LIST_NEXT(pvo, pvo_vlink);
1846 1.33 chs LIST_REMOVE(pvo, pvo_vlink);
1847 1.33 chs pmap_pvo_free(pvo);
1848 1.33 chs }
1849 1.33 chs }
1850 1.33 chs
1851 1.1 matt /*
1852 1.14 chs * Mark a mapping as executable.
1853 1.14 chs * If this is the first executable mapping in the segment,
1854 1.14 chs * clear the noexec flag.
1855 1.14 chs */
1856 1.53 garbled static void
1857 1.14 chs pvo_set_exec(struct pvo_entry *pvo)
1858 1.14 chs {
1859 1.14 chs struct pmap *pm = pvo->pvo_pmap;
1860 1.14 chs
1861 1.39 matt if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1862 1.14 chs return;
1863 1.14 chs }
1864 1.14 chs pvo->pvo_vaddr |= PVO_EXECUTABLE;
1865 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1866 1.18 matt {
1867 1.18 matt int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1868 1.18 matt if (pm->pm_exec[sr]++ == 0) {
1869 1.18 matt pm->pm_sr[sr] &= ~SR_NOEXEC;
1870 1.18 matt }
1871 1.14 chs }
1872 1.18 matt #endif
1873 1.14 chs }
1874 1.14 chs
1875 1.14 chs /*
1876 1.14 chs * Mark a mapping as non-executable.
1877 1.14 chs * If this was the last executable mapping in the segment,
1878 1.14 chs * set the noexec flag.
1879 1.14 chs */
1880 1.53 garbled static void
1881 1.14 chs pvo_clear_exec(struct pvo_entry *pvo)
1882 1.14 chs {
1883 1.14 chs struct pmap *pm = pvo->pvo_pmap;
1884 1.14 chs
1885 1.39 matt if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1886 1.14 chs return;
1887 1.14 chs }
1888 1.14 chs pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1889 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1890 1.18 matt {
1891 1.18 matt int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1892 1.18 matt if (--pm->pm_exec[sr] == 0) {
1893 1.18 matt pm->pm_sr[sr] |= SR_NOEXEC;
1894 1.18 matt }
1895 1.14 chs }
1896 1.18 matt #endif
1897 1.14 chs }
1898 1.14 chs
1899 1.14 chs /*
1900 1.1 matt * Insert physical page at pa into the given pmap at virtual address va.
1901 1.1 matt */
1902 1.1 matt int
1903 1.65 cegger pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1904 1.1 matt {
1905 1.1 matt struct mem_region *mp;
1906 1.1 matt struct pvo_head *pvo_head;
1907 1.1 matt struct vm_page *pg;
1908 1.1 matt struct pool *pl;
1909 1.2 matt register_t pte_lo;
1910 1.1 matt int error;
1911 1.1 matt u_int was_exec = 0;
1912 1.1 matt
1913 1.50 ad PMAP_LOCK();
1914 1.50 ad
1915 1.1 matt if (__predict_false(!pmap_initialized)) {
1916 1.1 matt pvo_head = &pmap_pvo_kunmanaged;
1917 1.1 matt pl = &pmap_upvo_pool;
1918 1.1 matt pg = NULL;
1919 1.1 matt was_exec = PTE_EXEC;
1920 1.1 matt } else {
1921 1.1 matt pvo_head = pa_to_pvoh(pa, &pg);
1922 1.1 matt pl = &pmap_mpvo_pool;
1923 1.1 matt }
1924 1.1 matt
1925 1.1 matt DPRINTFN(ENTER,
1926 1.85 matt "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
1927 1.85 matt pm, va, pa, prot, flags);
1928 1.1 matt
1929 1.1 matt /*
1930 1.1 matt * If this is a managed page, and it's the first reference to the
1931 1.1 matt * page clear the execness of the page. Otherwise fetch the execness.
1932 1.1 matt */
1933 1.1 matt if (pg != NULL)
1934 1.1 matt was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1935 1.1 matt
1936 1.85 matt DPRINTFN(ENTER, " was_exec=%d", was_exec);
1937 1.1 matt
1938 1.1 matt /*
1939 1.1 matt * Assume the page is cache inhibited and access is guarded unless
1940 1.1 matt * it's in our available memory array. If it is in the memory array,
1941 1.1 matt * asssume it's in memory coherent memory.
1942 1.1 matt */
1943 1.77 macallan if (flags & PMAP_MD_PREFETCHABLE) {
1944 1.77 macallan pte_lo = 0;
1945 1.77 macallan } else
1946 1.77 macallan pte_lo = PTE_G;
1947 1.77 macallan
1948 1.81 matt if ((flags & PMAP_NOCACHE) == 0) {
1949 1.1 matt for (mp = mem; mp->size; mp++) {
1950 1.1 matt if (pa >= mp->start && pa < mp->start + mp->size) {
1951 1.1 matt pte_lo = PTE_M;
1952 1.1 matt break;
1953 1.1 matt }
1954 1.1 matt }
1955 1.87 kiyohara #ifdef MULTIPROCESSOR
1956 1.87 kiyohara if (((mfpvr() >> 16) & 0xffff) == MPC603e)
1957 1.87 kiyohara pte_lo = PTE_M;
1958 1.87 kiyohara #endif
1959 1.77 macallan } else {
1960 1.77 macallan pte_lo |= PTE_I;
1961 1.1 matt }
1962 1.1 matt
1963 1.1 matt if (prot & VM_PROT_WRITE)
1964 1.1 matt pte_lo |= PTE_BW;
1965 1.1 matt else
1966 1.1 matt pte_lo |= PTE_BR;
1967 1.1 matt
1968 1.1 matt /*
1969 1.1 matt * If this was in response to a fault, "pre-fault" the PTE's
1970 1.1 matt * changed/referenced bit appropriately.
1971 1.1 matt */
1972 1.1 matt if (flags & VM_PROT_WRITE)
1973 1.1 matt pte_lo |= PTE_CHG;
1974 1.30 chs if (flags & VM_PROT_ALL)
1975 1.1 matt pte_lo |= PTE_REF;
1976 1.1 matt
1977 1.1 matt /*
1978 1.1 matt * We need to know if this page can be executable
1979 1.1 matt */
1980 1.1 matt flags |= (prot & VM_PROT_EXECUTE);
1981 1.1 matt
1982 1.1 matt /*
1983 1.1 matt * Record mapping for later back-translation and pte spilling.
1984 1.1 matt * This will overwrite any existing mapping.
1985 1.1 matt */
1986 1.1 matt error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1987 1.1 matt
1988 1.1 matt /*
1989 1.1 matt * Flush the real page from the instruction cache if this page is
1990 1.1 matt * mapped executable and cacheable and has not been flushed since
1991 1.1 matt * the last time it was modified.
1992 1.1 matt */
1993 1.1 matt if (error == 0 &&
1994 1.1 matt (flags & VM_PROT_EXECUTE) &&
1995 1.1 matt (pte_lo & PTE_I) == 0 &&
1996 1.1 matt was_exec == 0) {
1997 1.85 matt DPRINTFN(ENTER, " %s", "syncicache");
1998 1.1 matt PMAPCOUNT(exec_synced);
1999 1.6 thorpej pmap_syncicache(pa, PAGE_SIZE);
2000 1.1 matt if (pg != NULL) {
2001 1.1 matt pmap_attr_save(pg, PTE_EXEC);
2002 1.1 matt PMAPCOUNT(exec_cached);
2003 1.1 matt #if defined(DEBUG) || defined(PMAPDEBUG)
2004 1.1 matt if (pmapdebug & PMAPDEBUG_ENTER)
2005 1.1 matt printf(" marked-as-exec");
2006 1.1 matt else if (pmapdebug & PMAPDEBUG_EXEC)
2007 1.53 garbled printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
2008 1.34 yamt VM_PAGE_TO_PHYS(pg));
2009 1.1 matt
2010 1.1 matt #endif
2011 1.1 matt }
2012 1.1 matt }
2013 1.1 matt
2014 1.85 matt DPRINTFN(ENTER, ": error=%d\n", error);
2015 1.1 matt
2016 1.50 ad PMAP_UNLOCK();
2017 1.50 ad
2018 1.1 matt return error;
2019 1.1 matt }
2020 1.1 matt
2021 1.1 matt void
2022 1.68 cegger pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2023 1.1 matt {
2024 1.1 matt struct mem_region *mp;
2025 1.2 matt register_t pte_lo;
2026 1.1 matt int error;
2027 1.1 matt
2028 1.85 matt #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA)
2029 1.1 matt if (va < VM_MIN_KERNEL_ADDRESS)
2030 1.1 matt panic("pmap_kenter_pa: attempt to enter "
2031 1.53 garbled "non-kernel address %#" _PRIxva "!", va);
2032 1.38 sanjayl #endif
2033 1.1 matt
2034 1.1 matt DPRINTFN(KENTER,
2035 1.85 matt "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot);
2036 1.1 matt
2037 1.50 ad PMAP_LOCK();
2038 1.50 ad
2039 1.1 matt /*
2040 1.1 matt * Assume the page is cache inhibited and access is guarded unless
2041 1.1 matt * it's in our available memory array. If it is in the memory array,
2042 1.1 matt * asssume it's in memory coherent memory.
2043 1.1 matt */
2044 1.1 matt pte_lo = PTE_IG;
2045 1.81 matt if ((flags & PMAP_NOCACHE) == 0) {
2046 1.4 matt for (mp = mem; mp->size; mp++) {
2047 1.4 matt if (pa >= mp->start && pa < mp->start + mp->size) {
2048 1.4 matt pte_lo = PTE_M;
2049 1.4 matt break;
2050 1.4 matt }
2051 1.1 matt }
2052 1.87 kiyohara #ifdef MULTIPROCESSOR
2053 1.87 kiyohara if (((mfpvr() >> 16) & 0xffff) == MPC603e)
2054 1.87 kiyohara pte_lo = PTE_M;
2055 1.87 kiyohara #endif
2056 1.1 matt }
2057 1.1 matt
2058 1.1 matt if (prot & VM_PROT_WRITE)
2059 1.1 matt pte_lo |= PTE_BW;
2060 1.1 matt else
2061 1.1 matt pte_lo |= PTE_BR;
2062 1.1 matt
2063 1.1 matt /*
2064 1.1 matt * We don't care about REF/CHG on PVOs on the unmanaged list.
2065 1.1 matt */
2066 1.1 matt error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
2067 1.1 matt &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
2068 1.1 matt
2069 1.1 matt if (error != 0)
2070 1.53 garbled panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
2071 1.1 matt va, pa, error);
2072 1.50 ad
2073 1.50 ad PMAP_UNLOCK();
2074 1.1 matt }
2075 1.1 matt
2076 1.1 matt void
2077 1.1 matt pmap_kremove(vaddr_t va, vsize_t len)
2078 1.1 matt {
2079 1.1 matt if (va < VM_MIN_KERNEL_ADDRESS)
2080 1.1 matt panic("pmap_kremove: attempt to remove "
2081 1.53 garbled "non-kernel address %#" _PRIxva "!", va);
2082 1.1 matt
2083 1.85 matt DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len);
2084 1.1 matt pmap_remove(pmap_kernel(), va, va + len);
2085 1.1 matt }
2086 1.1 matt
2087 1.1 matt /*
2088 1.1 matt * Remove the given range of mapping entries.
2089 1.1 matt */
2090 1.1 matt void
2091 1.1 matt pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
2092 1.1 matt {
2093 1.33 chs struct pvo_head pvol;
2094 1.1 matt struct pvo_entry *pvo;
2095 1.2 matt register_t msr;
2096 1.1 matt int pteidx;
2097 1.1 matt
2098 1.50 ad PMAP_LOCK();
2099 1.33 chs LIST_INIT(&pvol);
2100 1.14 chs msr = pmap_interrupts_off();
2101 1.1 matt for (; va < endva; va += PAGE_SIZE) {
2102 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx);
2103 1.1 matt if (pvo != NULL) {
2104 1.33 chs pmap_pvo_remove(pvo, pteidx, &pvol);
2105 1.1 matt }
2106 1.1 matt }
2107 1.14 chs pmap_interrupts_restore(msr);
2108 1.33 chs pmap_pvo_free_list(&pvol);
2109 1.50 ad PMAP_UNLOCK();
2110 1.1 matt }
2111 1.1 matt
2112 1.1 matt /*
2113 1.1 matt * Get the physical page address for the given pmap/virtual address.
2114 1.1 matt */
2115 1.44 thorpej bool
2116 1.1 matt pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
2117 1.1 matt {
2118 1.1 matt struct pvo_entry *pvo;
2119 1.2 matt register_t msr;
2120 1.7 matt
2121 1.50 ad PMAP_LOCK();
2122 1.38 sanjayl
2123 1.7 matt /*
2124 1.7 matt * If this is a kernel pmap lookup, also check the battable
2125 1.7 matt * and if we get a hit, translate the VA to a PA using the
2126 1.36 nathanw * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is
2127 1.7 matt * that will wrap back to 0.
2128 1.7 matt */
2129 1.7 matt if (pm == pmap_kernel() &&
2130 1.7 matt (va < VM_MIN_KERNEL_ADDRESS ||
2131 1.7 matt (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
2132 1.8 matt KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
2133 1.53 garbled #if defined (PMAP_OEA)
2134 1.55 garbled #ifdef PPC_OEA601
2135 1.55 garbled if ((MFPVR() >> 16) == MPC601) {
2136 1.24 kleink register_t batu = battable[va >> 23].batu;
2137 1.24 kleink register_t batl = battable[va >> 23].batl;
2138 1.24 kleink register_t sr = iosrtable[va >> ADDR_SR_SHFT];
2139 1.24 kleink if (BAT601_VALID_P(batl) &&
2140 1.24 kleink BAT601_VA_MATCH_P(batu, batl, va)) {
2141 1.24 kleink register_t mask =
2142 1.24 kleink (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
2143 1.29 briggs if (pap)
2144 1.29 briggs *pap = (batl & mask) | (va & ~mask);
2145 1.50 ad PMAP_UNLOCK();
2146 1.45 thorpej return true;
2147 1.24 kleink } else if (SR601_VALID_P(sr) &&
2148 1.24 kleink SR601_PA_MATCH_P(sr, va)) {
2149 1.29 briggs if (pap)
2150 1.29 briggs *pap = va;
2151 1.50 ad PMAP_UNLOCK();
2152 1.45 thorpej return true;
2153 1.24 kleink }
2154 1.55 garbled } else
2155 1.55 garbled #endif /* PPC_OEA601 */
2156 1.55 garbled {
2157 1.83 matt register_t batu = battable[BAT_VA2IDX(va)].batu;
2158 1.55 garbled if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
2159 1.83 matt register_t batl = battable[BAT_VA2IDX(va)].batl;
2160 1.55 garbled register_t mask =
2161 1.83 matt (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL;
2162 1.55 garbled if (pap)
2163 1.55 garbled *pap = (batl & mask) | (va & ~mask);
2164 1.55 garbled PMAP_UNLOCK();
2165 1.55 garbled return true;
2166 1.55 garbled }
2167 1.7 matt }
2168 1.96 rin PMAP_UNLOCK();
2169 1.45 thorpej return false;
2170 1.53 garbled #elif defined (PMAP_OEA64_BRIDGE)
2171 1.52 garbled if (va >= SEGMENT_LENGTH)
2172 1.52 garbled panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
2173 1.52 garbled __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
2174 1.52 garbled else {
2175 1.52 garbled if (pap)
2176 1.52 garbled *pap = va;
2177 1.52 garbled PMAP_UNLOCK();
2178 1.52 garbled return true;
2179 1.52 garbled }
2180 1.53 garbled #elif defined (PMAP_OEA64)
2181 1.38 sanjayl #error PPC_OEA64 not supported
2182 1.38 sanjayl #endif /* PPC_OEA */
2183 1.7 matt }
2184 1.1 matt
2185 1.1 matt msr = pmap_interrupts_off();
2186 1.1 matt pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2187 1.1 matt if (pvo != NULL) {
2188 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2189 1.29 briggs if (pap)
2190 1.29 briggs *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
2191 1.29 briggs | (va & ADDR_POFF);
2192 1.1 matt }
2193 1.1 matt pmap_interrupts_restore(msr);
2194 1.50 ad PMAP_UNLOCK();
2195 1.1 matt return pvo != NULL;
2196 1.1 matt }
2197 1.1 matt
2198 1.1 matt /*
2199 1.1 matt * Lower the protection on the specified range of this pmap.
2200 1.1 matt */
2201 1.1 matt void
2202 1.1 matt pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
2203 1.1 matt {
2204 1.1 matt struct pvo_entry *pvo;
2205 1.2 matt volatile struct pte *pt;
2206 1.2 matt register_t msr;
2207 1.1 matt int pteidx;
2208 1.1 matt
2209 1.1 matt /*
2210 1.1 matt * Since this routine only downgrades protection, we should
2211 1.14 chs * always be called with at least one bit not set.
2212 1.1 matt */
2213 1.14 chs KASSERT(prot != VM_PROT_ALL);
2214 1.1 matt
2215 1.1 matt /*
2216 1.1 matt * If there is no protection, this is equivalent to
2217 1.1 matt * remove the pmap from the pmap.
2218 1.1 matt */
2219 1.1 matt if ((prot & VM_PROT_READ) == 0) {
2220 1.1 matt pmap_remove(pm, va, endva);
2221 1.1 matt return;
2222 1.1 matt }
2223 1.1 matt
2224 1.50 ad PMAP_LOCK();
2225 1.50 ad
2226 1.1 matt msr = pmap_interrupts_off();
2227 1.6 thorpej for (; va < endva; va += PAGE_SIZE) {
2228 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx);
2229 1.1 matt if (pvo == NULL)
2230 1.1 matt continue;
2231 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2232 1.1 matt
2233 1.1 matt /*
2234 1.1 matt * Revoke executable if asked to do so.
2235 1.1 matt */
2236 1.1 matt if ((prot & VM_PROT_EXECUTE) == 0)
2237 1.14 chs pvo_clear_exec(pvo);
2238 1.1 matt
2239 1.1 matt #if 0
2240 1.1 matt /*
2241 1.1 matt * If the page is already read-only, no change
2242 1.1 matt * needs to be made.
2243 1.1 matt */
2244 1.1 matt if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
2245 1.1 matt continue;
2246 1.1 matt #endif
2247 1.1 matt /*
2248 1.1 matt * Grab the PTE pointer before we diddle with
2249 1.1 matt * the cached PTE copy.
2250 1.1 matt */
2251 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx);
2252 1.1 matt /*
2253 1.1 matt * Change the protection of the page.
2254 1.1 matt */
2255 1.1 matt pvo->pvo_pte.pte_lo &= ~PTE_PP;
2256 1.1 matt pvo->pvo_pte.pte_lo |= PTE_BR;
2257 1.1 matt
2258 1.1 matt /*
2259 1.1 matt * If the PVO is in the page table, update
2260 1.1 matt * that pte at well.
2261 1.1 matt */
2262 1.1 matt if (pt != NULL) {
2263 1.1 matt pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2264 1.12 matt PVO_WHERE(pvo, PMAP_PROTECT);
2265 1.1 matt PMAPCOUNT(ptes_changed);
2266 1.1 matt }
2267 1.1 matt
2268 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2269 1.1 matt }
2270 1.1 matt pmap_interrupts_restore(msr);
2271 1.50 ad PMAP_UNLOCK();
2272 1.1 matt }
2273 1.1 matt
2274 1.1 matt void
2275 1.1 matt pmap_unwire(pmap_t pm, vaddr_t va)
2276 1.1 matt {
2277 1.1 matt struct pvo_entry *pvo;
2278 1.2 matt register_t msr;
2279 1.1 matt
2280 1.50 ad PMAP_LOCK();
2281 1.1 matt msr = pmap_interrupts_off();
2282 1.1 matt pvo = pmap_pvo_find_va(pm, va, NULL);
2283 1.1 matt if (pvo != NULL) {
2284 1.39 matt if (PVO_WIRED_P(pvo)) {
2285 1.1 matt pvo->pvo_vaddr &= ~PVO_WIRED;
2286 1.1 matt pm->pm_stats.wired_count--;
2287 1.1 matt }
2288 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2289 1.1 matt }
2290 1.1 matt pmap_interrupts_restore(msr);
2291 1.50 ad PMAP_UNLOCK();
2292 1.1 matt }
2293 1.1 matt
2294 1.1 matt /*
2295 1.1 matt * Lower the protection on the specified physical page.
2296 1.1 matt */
2297 1.1 matt void
2298 1.1 matt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2299 1.1 matt {
2300 1.33 chs struct pvo_head *pvo_head, pvol;
2301 1.1 matt struct pvo_entry *pvo, *next_pvo;
2302 1.2 matt volatile struct pte *pt;
2303 1.2 matt register_t msr;
2304 1.1 matt
2305 1.50 ad PMAP_LOCK();
2306 1.50 ad
2307 1.14 chs KASSERT(prot != VM_PROT_ALL);
2308 1.33 chs LIST_INIT(&pvol);
2309 1.1 matt msr = pmap_interrupts_off();
2310 1.1 matt
2311 1.1 matt /*
2312 1.1 matt * When UVM reuses a page, it does a pmap_page_protect with
2313 1.1 matt * VM_PROT_NONE. At that point, we can clear the exec flag
2314 1.1 matt * since we know the page will have different contents.
2315 1.1 matt */
2316 1.1 matt if ((prot & VM_PROT_READ) == 0) {
2317 1.85 matt DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
2318 1.85 matt VM_PAGE_TO_PHYS(pg));
2319 1.1 matt if (pmap_attr_fetch(pg) & PTE_EXEC) {
2320 1.1 matt PMAPCOUNT(exec_uncached_page_protect);
2321 1.1 matt pmap_attr_clear(pg, PTE_EXEC);
2322 1.1 matt }
2323 1.1 matt }
2324 1.1 matt
2325 1.1 matt pvo_head = vm_page_to_pvoh(pg);
2326 1.1 matt for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2327 1.1 matt next_pvo = LIST_NEXT(pvo, pvo_vlink);
2328 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2329 1.1 matt
2330 1.1 matt /*
2331 1.1 matt * Downgrading to no mapping at all, we just remove the entry.
2332 1.1 matt */
2333 1.1 matt if ((prot & VM_PROT_READ) == 0) {
2334 1.33 chs pmap_pvo_remove(pvo, -1, &pvol);
2335 1.1 matt continue;
2336 1.1 matt }
2337 1.1 matt
2338 1.1 matt /*
2339 1.1 matt * If EXEC permission is being revoked, just clear the
2340 1.1 matt * flag in the PVO.
2341 1.1 matt */
2342 1.1 matt if ((prot & VM_PROT_EXECUTE) == 0)
2343 1.14 chs pvo_clear_exec(pvo);
2344 1.1 matt
2345 1.1 matt /*
2346 1.1 matt * If this entry is already RO, don't diddle with the
2347 1.1 matt * page table.
2348 1.1 matt */
2349 1.1 matt if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2350 1.1 matt PMAP_PVO_CHECK(pvo);
2351 1.1 matt continue;
2352 1.1 matt }
2353 1.1 matt
2354 1.1 matt /*
2355 1.1 matt * Grab the PTE before the we diddle the bits so
2356 1.1 matt * pvo_to_pte can verify the pte contents are as
2357 1.1 matt * expected.
2358 1.1 matt */
2359 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
2360 1.1 matt pvo->pvo_pte.pte_lo &= ~PTE_PP;
2361 1.1 matt pvo->pvo_pte.pte_lo |= PTE_BR;
2362 1.1 matt if (pt != NULL) {
2363 1.1 matt pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2364 1.12 matt PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
2365 1.1 matt PMAPCOUNT(ptes_changed);
2366 1.1 matt }
2367 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2368 1.1 matt }
2369 1.1 matt pmap_interrupts_restore(msr);
2370 1.33 chs pmap_pvo_free_list(&pvol);
2371 1.50 ad
2372 1.50 ad PMAP_UNLOCK();
2373 1.1 matt }
2374 1.1 matt
2375 1.1 matt /*
2376 1.1 matt * Activate the address space for the specified process. If the process
2377 1.1 matt * is the current process, load the new MMU context.
2378 1.1 matt */
2379 1.1 matt void
2380 1.1 matt pmap_activate(struct lwp *l)
2381 1.1 matt {
2382 1.69 rmind struct pcb *pcb = lwp_getpcb(l);
2383 1.1 matt pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2384 1.1 matt
2385 1.1 matt DPRINTFN(ACTIVATE,
2386 1.85 matt "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp);
2387 1.1 matt
2388 1.1 matt /*
2389 1.70 skrll * XXX Normally performed in cpu_lwp_fork().
2390 1.1 matt */
2391 1.13 matt pcb->pcb_pm = pmap;
2392 1.17 matt
2393 1.17 matt /*
2394 1.17 matt * In theory, the SR registers need only be valid on return
2395 1.17 matt * to user space wait to do them there.
2396 1.17 matt */
2397 1.17 matt if (l == curlwp) {
2398 1.17 matt /* Store pointer to new current pmap. */
2399 1.17 matt curpm = pmap;
2400 1.17 matt }
2401 1.1 matt }
2402 1.1 matt
2403 1.1 matt /*
2404 1.1 matt * Deactivate the specified process's address space.
2405 1.1 matt */
2406 1.1 matt void
2407 1.1 matt pmap_deactivate(struct lwp *l)
2408 1.1 matt {
2409 1.1 matt }
2410 1.1 matt
2411 1.44 thorpej bool
2412 1.1 matt pmap_query_bit(struct vm_page *pg, int ptebit)
2413 1.1 matt {
2414 1.1 matt struct pvo_entry *pvo;
2415 1.2 matt volatile struct pte *pt;
2416 1.2 matt register_t msr;
2417 1.1 matt
2418 1.50 ad PMAP_LOCK();
2419 1.50 ad
2420 1.50 ad if (pmap_attr_fetch(pg) & ptebit) {
2421 1.50 ad PMAP_UNLOCK();
2422 1.45 thorpej return true;
2423 1.50 ad }
2424 1.14 chs
2425 1.1 matt msr = pmap_interrupts_off();
2426 1.1 matt LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2427 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2428 1.1 matt /*
2429 1.1 matt * See if we saved the bit off. If so cache, it and return
2430 1.1 matt * success.
2431 1.1 matt */
2432 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) {
2433 1.1 matt pmap_attr_save(pg, ptebit);
2434 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2435 1.1 matt pmap_interrupts_restore(msr);
2436 1.50 ad PMAP_UNLOCK();
2437 1.45 thorpej return true;
2438 1.1 matt }
2439 1.1 matt }
2440 1.1 matt /*
2441 1.1 matt * No luck, now go thru the hard part of looking at the ptes
2442 1.1 matt * themselves. Sync so any pending REF/CHG bits are flushed
2443 1.1 matt * to the PTEs.
2444 1.1 matt */
2445 1.1 matt SYNC();
2446 1.1 matt LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2447 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2448 1.1 matt /*
2449 1.1 matt * See if this pvo have a valid PTE. If so, fetch the
2450 1.1 matt * REF/CHG bits from the valid PTE. If the appropriate
2451 1.1 matt * ptebit is set, cache, it and return success.
2452 1.1 matt */
2453 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
2454 1.1 matt if (pt != NULL) {
2455 1.1 matt pmap_pte_synch(pt, &pvo->pvo_pte);
2456 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) {
2457 1.1 matt pmap_attr_save(pg, ptebit);
2458 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2459 1.1 matt pmap_interrupts_restore(msr);
2460 1.50 ad PMAP_UNLOCK();
2461 1.45 thorpej return true;
2462 1.1 matt }
2463 1.1 matt }
2464 1.1 matt }
2465 1.1 matt pmap_interrupts_restore(msr);
2466 1.50 ad PMAP_UNLOCK();
2467 1.45 thorpej return false;
2468 1.1 matt }
2469 1.1 matt
2470 1.44 thorpej bool
2471 1.1 matt pmap_clear_bit(struct vm_page *pg, int ptebit)
2472 1.1 matt {
2473 1.1 matt struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2474 1.1 matt struct pvo_entry *pvo;
2475 1.2 matt volatile struct pte *pt;
2476 1.2 matt register_t msr;
2477 1.1 matt int rv = 0;
2478 1.1 matt
2479 1.50 ad PMAP_LOCK();
2480 1.1 matt msr = pmap_interrupts_off();
2481 1.1 matt
2482 1.1 matt /*
2483 1.1 matt * Fetch the cache value
2484 1.1 matt */
2485 1.1 matt rv |= pmap_attr_fetch(pg);
2486 1.1 matt
2487 1.1 matt /*
2488 1.1 matt * Clear the cached value.
2489 1.1 matt */
2490 1.1 matt pmap_attr_clear(pg, ptebit);
2491 1.1 matt
2492 1.1 matt /*
2493 1.1 matt * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2494 1.1 matt * can reset the right ones). Note that since the pvo entries and
2495 1.1 matt * list heads are accessed via BAT0 and are never placed in the
2496 1.1 matt * page table, we don't have to worry about further accesses setting
2497 1.1 matt * the REF/CHG bits.
2498 1.1 matt */
2499 1.1 matt SYNC();
2500 1.1 matt
2501 1.1 matt /*
2502 1.1 matt * For each pvo entry, clear pvo's ptebit. If this pvo have a
2503 1.1 matt * valid PTE. If so, clear the ptebit from the valid PTE.
2504 1.1 matt */
2505 1.1 matt LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2506 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2507 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
2508 1.1 matt if (pt != NULL) {
2509 1.1 matt /*
2510 1.1 matt * Only sync the PTE if the bit we are looking
2511 1.1 matt * for is not already set.
2512 1.1 matt */
2513 1.1 matt if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2514 1.1 matt pmap_pte_synch(pt, &pvo->pvo_pte);
2515 1.1 matt /*
2516 1.1 matt * If the bit we are looking for was already set,
2517 1.1 matt * clear that bit in the pte.
2518 1.1 matt */
2519 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit)
2520 1.1 matt pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2521 1.1 matt }
2522 1.1 matt rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2523 1.1 matt pvo->pvo_pte.pte_lo &= ~ptebit;
2524 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2525 1.1 matt }
2526 1.1 matt pmap_interrupts_restore(msr);
2527 1.14 chs
2528 1.1 matt /*
2529 1.1 matt * If we are clearing the modify bit and this page was marked EXEC
2530 1.1 matt * and the user of the page thinks the page was modified, then we
2531 1.1 matt * need to clean it from the icache if it's mapped or clear the EXEC
2532 1.1 matt * bit if it's not mapped. The page itself might not have the CHG
2533 1.1 matt * bit set if the modification was done via DMA to the page.
2534 1.1 matt */
2535 1.1 matt if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2536 1.1 matt if (LIST_EMPTY(pvoh)) {
2537 1.85 matt DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
2538 1.85 matt VM_PAGE_TO_PHYS(pg));
2539 1.1 matt pmap_attr_clear(pg, PTE_EXEC);
2540 1.1 matt PMAPCOUNT(exec_uncached_clear_modify);
2541 1.1 matt } else {
2542 1.85 matt DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
2543 1.85 matt VM_PAGE_TO_PHYS(pg));
2544 1.34 yamt pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2545 1.1 matt PMAPCOUNT(exec_synced_clear_modify);
2546 1.1 matt }
2547 1.1 matt }
2548 1.50 ad PMAP_UNLOCK();
2549 1.1 matt return (rv & ptebit) != 0;
2550 1.1 matt }
2551 1.1 matt
2552 1.1 matt void
2553 1.1 matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2554 1.1 matt {
2555 1.1 matt struct pvo_entry *pvo;
2556 1.1 matt size_t offset = va & ADDR_POFF;
2557 1.1 matt int s;
2558 1.1 matt
2559 1.50 ad PMAP_LOCK();
2560 1.1 matt s = splvm();
2561 1.1 matt while (len > 0) {
2562 1.6 thorpej size_t seglen = PAGE_SIZE - offset;
2563 1.1 matt if (seglen > len)
2564 1.1 matt seglen = len;
2565 1.1 matt pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2566 1.39 matt if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
2567 1.1 matt pmap_syncicache(
2568 1.1 matt (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2569 1.1 matt PMAP_PVO_CHECK(pvo);
2570 1.1 matt }
2571 1.1 matt va += seglen;
2572 1.1 matt len -= seglen;
2573 1.1 matt offset = 0;
2574 1.1 matt }
2575 1.1 matt splx(s);
2576 1.50 ad PMAP_UNLOCK();
2577 1.1 matt }
2578 1.1 matt
2579 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2580 1.1 matt void
2581 1.2 matt pmap_pte_print(volatile struct pte *pt)
2582 1.1 matt {
2583 1.1 matt printf("PTE %p: ", pt);
2584 1.38 sanjayl
2585 1.53 garbled #if defined(PMAP_OEA)
2586 1.1 matt /* High word: */
2587 1.54 mlelstv printf("%#" _PRIxpte ": [", pt->pte_hi);
2588 1.53 garbled #else
2589 1.54 mlelstv printf("%#" _PRIxpte ": [", pt->pte_hi);
2590 1.53 garbled #endif /* PMAP_OEA */
2591 1.38 sanjayl
2592 1.1 matt printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2593 1.1 matt printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2594 1.38 sanjayl
2595 1.54 mlelstv printf("%#" _PRIxpte " %#" _PRIxpte "",
2596 1.38 sanjayl (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2597 1.38 sanjayl pt->pte_hi & PTE_API);
2598 1.53 garbled #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2599 1.54 mlelstv printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2600 1.38 sanjayl #else
2601 1.54 mlelstv printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2602 1.53 garbled #endif /* PMAP_OEA */
2603 1.38 sanjayl
2604 1.1 matt /* Low word: */
2605 1.53 garbled #if defined (PMAP_OEA)
2606 1.54 mlelstv printf(" %#" _PRIxpte ": [", pt->pte_lo);
2607 1.54 mlelstv printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2608 1.53 garbled #else
2609 1.54 mlelstv printf(" %#" _PRIxpte ": [", pt->pte_lo);
2610 1.54 mlelstv printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2611 1.38 sanjayl #endif
2612 1.1 matt printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2613 1.1 matt printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2614 1.1 matt printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2615 1.1 matt printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2616 1.1 matt printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2617 1.1 matt printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2618 1.1 matt switch (pt->pte_lo & PTE_PP) {
2619 1.1 matt case PTE_BR: printf("br]\n"); break;
2620 1.1 matt case PTE_BW: printf("bw]\n"); break;
2621 1.1 matt case PTE_SO: printf("so]\n"); break;
2622 1.1 matt case PTE_SW: printf("sw]\n"); break;
2623 1.1 matt }
2624 1.1 matt }
2625 1.1 matt #endif
2626 1.1 matt
2627 1.1 matt #if defined(DDB)
2628 1.1 matt void
2629 1.1 matt pmap_pteg_check(void)
2630 1.1 matt {
2631 1.2 matt volatile struct pte *pt;
2632 1.1 matt int i;
2633 1.1 matt int ptegidx;
2634 1.1 matt u_int p_valid = 0;
2635 1.1 matt u_int s_valid = 0;
2636 1.1 matt u_int invalid = 0;
2637 1.38 sanjayl
2638 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2639 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2640 1.1 matt if (pt->pte_hi & PTE_VALID) {
2641 1.1 matt if (pt->pte_hi & PTE_HID)
2642 1.1 matt s_valid++;
2643 1.1 matt else
2644 1.38 sanjayl {
2645 1.1 matt p_valid++;
2646 1.38 sanjayl }
2647 1.1 matt } else
2648 1.1 matt invalid++;
2649 1.1 matt }
2650 1.1 matt }
2651 1.1 matt printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2652 1.1 matt p_valid, p_valid, s_valid, s_valid,
2653 1.1 matt invalid, invalid);
2654 1.1 matt }
2655 1.1 matt
2656 1.1 matt void
2657 1.1 matt pmap_print_mmuregs(void)
2658 1.1 matt {
2659 1.1 matt int i;
2660 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2661 1.1 matt u_int cpuvers;
2662 1.90 mrg #endif
2663 1.53 garbled #ifndef PMAP_OEA64
2664 1.1 matt vaddr_t addr;
2665 1.2 matt register_t soft_sr[16];
2666 1.18 matt #endif
2667 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2668 1.1 matt struct bat soft_ibat[4];
2669 1.1 matt struct bat soft_dbat[4];
2670 1.38 sanjayl #endif
2671 1.53 garbled paddr_t sdr1;
2672 1.1 matt
2673 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2674 1.1 matt cpuvers = MFPVR() >> 16;
2675 1.90 mrg #endif
2676 1.35 perry __asm volatile ("mfsdr1 %0" : "=r"(sdr1));
2677 1.53 garbled #ifndef PMAP_OEA64
2678 1.16 kleink addr = 0;
2679 1.27 chs for (i = 0; i < 16; i++) {
2680 1.1 matt soft_sr[i] = MFSRIN(addr);
2681 1.1 matt addr += (1 << ADDR_SR_SHFT);
2682 1.1 matt }
2683 1.18 matt #endif
2684 1.1 matt
2685 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
2686 1.1 matt /* read iBAT (601: uBAT) registers */
2687 1.35 perry __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2688 1.35 perry __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2689 1.35 perry __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2690 1.35 perry __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2691 1.35 perry __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2692 1.35 perry __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2693 1.35 perry __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2694 1.35 perry __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2695 1.1 matt
2696 1.1 matt
2697 1.1 matt if (cpuvers != MPC601) {
2698 1.1 matt /* read dBAT registers */
2699 1.35 perry __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2700 1.35 perry __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2701 1.35 perry __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2702 1.35 perry __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2703 1.35 perry __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2704 1.35 perry __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2705 1.35 perry __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2706 1.35 perry __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2707 1.1 matt }
2708 1.38 sanjayl #endif
2709 1.1 matt
2710 1.54 mlelstv printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
2711 1.53 garbled #ifndef PMAP_OEA64
2712 1.1 matt printf("SR[]:\t");
2713 1.27 chs for (i = 0; i < 4; i++)
2714 1.53 garbled printf("0x%08lx, ", soft_sr[i]);
2715 1.1 matt printf("\n\t");
2716 1.27 chs for ( ; i < 8; i++)
2717 1.53 garbled printf("0x%08lx, ", soft_sr[i]);
2718 1.1 matt printf("\n\t");
2719 1.27 chs for ( ; i < 12; i++)
2720 1.53 garbled printf("0x%08lx, ", soft_sr[i]);
2721 1.1 matt printf("\n\t");
2722 1.27 chs for ( ; i < 16; i++)
2723 1.53 garbled printf("0x%08lx, ", soft_sr[i]);
2724 1.1 matt printf("\n");
2725 1.18 matt #endif
2726 1.1 matt
2727 1.97 rin #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2728 1.1 matt printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2729 1.27 chs for (i = 0; i < 4; i++) {
2730 1.2 matt printf("0x%08lx 0x%08lx, ",
2731 1.1 matt soft_ibat[i].batu, soft_ibat[i].batl);
2732 1.1 matt if (i == 1)
2733 1.1 matt printf("\n\t");
2734 1.1 matt }
2735 1.1 matt if (cpuvers != MPC601) {
2736 1.1 matt printf("\ndBAT[]:\t");
2737 1.27 chs for (i = 0; i < 4; i++) {
2738 1.2 matt printf("0x%08lx 0x%08lx, ",
2739 1.1 matt soft_dbat[i].batu, soft_dbat[i].batl);
2740 1.1 matt if (i == 1)
2741 1.1 matt printf("\n\t");
2742 1.1 matt }
2743 1.1 matt }
2744 1.1 matt printf("\n");
2745 1.53 garbled #endif /* PMAP_OEA... */
2746 1.1 matt }
2747 1.1 matt
2748 1.1 matt void
2749 1.1 matt pmap_print_pte(pmap_t pm, vaddr_t va)
2750 1.1 matt {
2751 1.1 matt struct pvo_entry *pvo;
2752 1.2 matt volatile struct pte *pt;
2753 1.1 matt int pteidx;
2754 1.1 matt
2755 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx);
2756 1.1 matt if (pvo != NULL) {
2757 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx);
2758 1.1 matt if (pt != NULL) {
2759 1.53 garbled printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
2760 1.38 sanjayl va, pt,
2761 1.38 sanjayl pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2762 1.38 sanjayl pt->pte_hi, pt->pte_lo);
2763 1.1 matt } else {
2764 1.1 matt printf("No valid PTE found\n");
2765 1.1 matt }
2766 1.1 matt } else {
2767 1.1 matt printf("Address not in pmap\n");
2768 1.1 matt }
2769 1.1 matt }
2770 1.1 matt
2771 1.1 matt void
2772 1.1 matt pmap_pteg_dist(void)
2773 1.1 matt {
2774 1.1 matt struct pvo_entry *pvo;
2775 1.1 matt int ptegidx;
2776 1.1 matt int depth;
2777 1.1 matt int max_depth = 0;
2778 1.1 matt unsigned int depths[64];
2779 1.1 matt
2780 1.1 matt memset(depths, 0, sizeof(depths));
2781 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2782 1.1 matt depth = 0;
2783 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2784 1.1 matt depth++;
2785 1.1 matt }
2786 1.1 matt if (depth > max_depth)
2787 1.1 matt max_depth = depth;
2788 1.1 matt if (depth > 63)
2789 1.1 matt depth = 63;
2790 1.1 matt depths[depth]++;
2791 1.1 matt }
2792 1.1 matt
2793 1.1 matt for (depth = 0; depth < 64; depth++) {
2794 1.1 matt printf(" [%2d]: %8u", depth, depths[depth]);
2795 1.1 matt if ((depth & 3) == 3)
2796 1.1 matt printf("\n");
2797 1.1 matt if (depth == max_depth)
2798 1.1 matt break;
2799 1.1 matt }
2800 1.1 matt if ((depth & 3) != 3)
2801 1.1 matt printf("\n");
2802 1.1 matt printf("Max depth found was %d\n", max_depth);
2803 1.1 matt }
2804 1.1 matt #endif /* DEBUG */
2805 1.1 matt
2806 1.1 matt #if defined(PMAPCHECK) || defined(DEBUG)
2807 1.1 matt void
2808 1.1 matt pmap_pvo_verify(void)
2809 1.1 matt {
2810 1.1 matt int ptegidx;
2811 1.1 matt int s;
2812 1.1 matt
2813 1.1 matt s = splvm();
2814 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2815 1.1 matt struct pvo_entry *pvo;
2816 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2817 1.1 matt if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2818 1.1 matt panic("pmap_pvo_verify: invalid pvo %p "
2819 1.1 matt "on list %#x", pvo, ptegidx);
2820 1.1 matt pmap_pvo_check(pvo);
2821 1.1 matt }
2822 1.1 matt }
2823 1.1 matt splx(s);
2824 1.1 matt }
2825 1.1 matt #endif /* PMAPCHECK */
2826 1.1 matt
2827 1.1 matt
2828 1.1 matt void *
2829 1.1 matt pmap_pool_ualloc(struct pool *pp, int flags)
2830 1.1 matt {
2831 1.1 matt struct pvo_page *pvop;
2832 1.1 matt
2833 1.50 ad if (uvm.page_init_done != true) {
2834 1.50 ad return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2835 1.50 ad }
2836 1.50 ad
2837 1.50 ad PMAP_LOCK();
2838 1.1 matt pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2839 1.1 matt if (pvop != NULL) {
2840 1.1 matt pmap_upvop_free--;
2841 1.1 matt SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2842 1.50 ad PMAP_UNLOCK();
2843 1.1 matt return pvop;
2844 1.1 matt }
2845 1.50 ad PMAP_UNLOCK();
2846 1.1 matt return pmap_pool_malloc(pp, flags);
2847 1.1 matt }
2848 1.1 matt
2849 1.1 matt void *
2850 1.1 matt pmap_pool_malloc(struct pool *pp, int flags)
2851 1.1 matt {
2852 1.1 matt struct pvo_page *pvop;
2853 1.1 matt struct vm_page *pg;
2854 1.1 matt
2855 1.50 ad PMAP_LOCK();
2856 1.1 matt pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2857 1.1 matt if (pvop != NULL) {
2858 1.1 matt pmap_mpvop_free--;
2859 1.1 matt SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2860 1.50 ad PMAP_UNLOCK();
2861 1.1 matt return pvop;
2862 1.1 matt }
2863 1.50 ad PMAP_UNLOCK();
2864 1.1 matt again:
2865 1.1 matt pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2866 1.1 matt UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2867 1.1 matt if (__predict_false(pg == NULL)) {
2868 1.1 matt if (flags & PR_WAITOK) {
2869 1.1 matt uvm_wait("plpg");
2870 1.1 matt goto again;
2871 1.1 matt } else {
2872 1.1 matt return (0);
2873 1.1 matt }
2874 1.1 matt }
2875 1.53 garbled KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
2876 1.53 garbled return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
2877 1.1 matt }
2878 1.1 matt
2879 1.1 matt void
2880 1.1 matt pmap_pool_ufree(struct pool *pp, void *va)
2881 1.1 matt {
2882 1.1 matt struct pvo_page *pvop;
2883 1.1 matt #if 0
2884 1.1 matt if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2885 1.1 matt pmap_pool_mfree(va, size, tag);
2886 1.1 matt return;
2887 1.1 matt }
2888 1.1 matt #endif
2889 1.50 ad PMAP_LOCK();
2890 1.1 matt pvop = va;
2891 1.1 matt SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2892 1.1 matt pmap_upvop_free++;
2893 1.1 matt if (pmap_upvop_free > pmap_upvop_maxfree)
2894 1.1 matt pmap_upvop_maxfree = pmap_upvop_free;
2895 1.50 ad PMAP_UNLOCK();
2896 1.1 matt }
2897 1.1 matt
2898 1.1 matt void
2899 1.1 matt pmap_pool_mfree(struct pool *pp, void *va)
2900 1.1 matt {
2901 1.1 matt struct pvo_page *pvop;
2902 1.1 matt
2903 1.50 ad PMAP_LOCK();
2904 1.1 matt pvop = va;
2905 1.1 matt SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2906 1.1 matt pmap_mpvop_free++;
2907 1.1 matt if (pmap_mpvop_free > pmap_mpvop_maxfree)
2908 1.1 matt pmap_mpvop_maxfree = pmap_mpvop_free;
2909 1.50 ad PMAP_UNLOCK();
2910 1.1 matt #if 0
2911 1.1 matt uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2912 1.1 matt #endif
2913 1.1 matt }
2914 1.1 matt
2915 1.1 matt /*
2916 1.1 matt * This routine in bootstraping to steal to-be-managed memory (which will
2917 1.1 matt * then be unmanaged). We use it to grab from the first 256MB for our
2918 1.1 matt * pmap needs and above 256MB for other stuff.
2919 1.1 matt */
2920 1.1 matt vaddr_t
2921 1.10 thorpej pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2922 1.1 matt {
2923 1.1 matt vsize_t size;
2924 1.1 matt vaddr_t va;
2925 1.94 cherry paddr_t start, end, pa = 0;
2926 1.94 cherry int npgs, freelist;
2927 1.94 cherry uvm_physseg_t bank;
2928 1.1 matt
2929 1.45 thorpej if (uvm.page_init_done == true)
2930 1.1 matt panic("pmap_steal_memory: called _after_ bootstrap");
2931 1.1 matt
2932 1.10 thorpej *vstartp = VM_MIN_KERNEL_ADDRESS;
2933 1.10 thorpej *vendp = VM_MAX_KERNEL_ADDRESS;
2934 1.10 thorpej
2935 1.1 matt size = round_page(vsize);
2936 1.1 matt npgs = atop(size);
2937 1.1 matt
2938 1.1 matt /*
2939 1.1 matt * PA 0 will never be among those given to UVM so we can use it
2940 1.1 matt * to indicate we couldn't steal any memory.
2941 1.1 matt */
2942 1.94 cherry
2943 1.94 cherry for (bank = uvm_physseg_get_first();
2944 1.94 cherry uvm_physseg_valid_p(bank);
2945 1.94 cherry bank = uvm_physseg_get_next(bank)) {
2946 1.94 cherry
2947 1.94 cherry freelist = uvm_physseg_get_free_list(bank);
2948 1.94 cherry start = uvm_physseg_get_start(bank);
2949 1.94 cherry end = uvm_physseg_get_end(bank);
2950 1.94 cherry
2951 1.94 cherry if (freelist == VM_FREELIST_FIRST256 &&
2952 1.94 cherry (end - start) >= npgs) {
2953 1.94 cherry pa = ptoa(start);
2954 1.1 matt break;
2955 1.1 matt }
2956 1.1 matt }
2957 1.1 matt
2958 1.1 matt if (pa == 0)
2959 1.1 matt panic("pmap_steal_memory: no approriate memory to steal!");
2960 1.1 matt
2961 1.94 cherry uvm_physseg_unplug(start, npgs);
2962 1.1 matt
2963 1.1 matt va = (vaddr_t) pa;
2964 1.46 christos memset((void *) va, 0, size);
2965 1.1 matt pmap_pages_stolen += npgs;
2966 1.1 matt #ifdef DEBUG
2967 1.1 matt if (pmapdebug && npgs > 1) {
2968 1.1 matt u_int cnt = 0;
2969 1.94 cherry for (bank = uvm_physseg_get_first();
2970 1.94 cherry uvm_physseg_valid_p(bank);
2971 1.94 cherry bank = uvm_physseg_get_next(bank)) {
2972 1.94 cherry cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank);
2973 1.73 uebayasi }
2974 1.1 matt printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2975 1.1 matt npgs, pmap_pages_stolen, cnt);
2976 1.1 matt }
2977 1.1 matt #endif
2978 1.1 matt
2979 1.1 matt return va;
2980 1.1 matt }
2981 1.1 matt
2982 1.1 matt /*
2983 1.1 matt * Find a chuck of memory with right size and alignment.
2984 1.1 matt */
2985 1.53 garbled paddr_t
2986 1.1 matt pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2987 1.1 matt {
2988 1.1 matt struct mem_region *mp;
2989 1.1 matt paddr_t s, e;
2990 1.1 matt int i, j;
2991 1.1 matt
2992 1.1 matt size = round_page(size);
2993 1.1 matt
2994 1.1 matt DPRINTFN(BOOT,
2995 1.85 matt "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
2996 1.85 matt size, alignment, at_end);
2997 1.1 matt
2998 1.6 thorpej if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
2999 1.54 mlelstv panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
3000 1.1 matt alignment);
3001 1.1 matt
3002 1.1 matt if (at_end) {
3003 1.6 thorpej if (alignment != PAGE_SIZE)
3004 1.1 matt panic("pmap_boot_find_memory: invalid ending "
3005 1.53 garbled "alignment %#" _PRIxpa, alignment);
3006 1.1 matt
3007 1.1 matt for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
3008 1.1 matt s = mp->start + mp->size - size;
3009 1.1 matt if (s >= mp->start && mp->size >= size) {
3010 1.85 matt DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
3011 1.1 matt DPRINTFN(BOOT,
3012 1.85 matt "pmap_boot_find_memory: b-avail[%d] start "
3013 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
3014 1.85 matt mp->start, mp->size);
3015 1.1 matt mp->size -= size;
3016 1.1 matt DPRINTFN(BOOT,
3017 1.85 matt "pmap_boot_find_memory: a-avail[%d] start "
3018 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
3019 1.85 matt mp->start, mp->size);
3020 1.53 garbled return s;
3021 1.1 matt }
3022 1.1 matt }
3023 1.1 matt panic("pmap_boot_find_memory: no available memory");
3024 1.1 matt }
3025 1.1 matt
3026 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3027 1.1 matt s = (mp->start + alignment - 1) & ~(alignment-1);
3028 1.1 matt e = s + size;
3029 1.1 matt
3030 1.1 matt /*
3031 1.1 matt * Is the calculated region entirely within the region?
3032 1.1 matt */
3033 1.1 matt if (s < mp->start || e > mp->start + mp->size)
3034 1.1 matt continue;
3035 1.1 matt
3036 1.85 matt DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
3037 1.1 matt if (s == mp->start) {
3038 1.1 matt /*
3039 1.1 matt * If the block starts at the beginning of region,
3040 1.1 matt * adjust the size & start. (the region may now be
3041 1.1 matt * zero in length)
3042 1.1 matt */
3043 1.1 matt DPRINTFN(BOOT,
3044 1.85 matt "pmap_boot_find_memory: b-avail[%d] start "
3045 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3046 1.1 matt mp->start += size;
3047 1.1 matt mp->size -= size;
3048 1.1 matt DPRINTFN(BOOT,
3049 1.85 matt "pmap_boot_find_memory: a-avail[%d] start "
3050 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3051 1.1 matt } else if (e == mp->start + mp->size) {
3052 1.1 matt /*
3053 1.1 matt * If the block starts at the beginning of region,
3054 1.1 matt * adjust only the size.
3055 1.1 matt */
3056 1.1 matt DPRINTFN(BOOT,
3057 1.85 matt "pmap_boot_find_memory: b-avail[%d] start "
3058 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3059 1.1 matt mp->size -= size;
3060 1.1 matt DPRINTFN(BOOT,
3061 1.85 matt "pmap_boot_find_memory: a-avail[%d] start "
3062 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3063 1.1 matt } else {
3064 1.1 matt /*
3065 1.1 matt * Block is in the middle of the region, so we
3066 1.1 matt * have to split it in two.
3067 1.1 matt */
3068 1.1 matt for (j = avail_cnt; j > i + 1; j--) {
3069 1.1 matt avail[j] = avail[j-1];
3070 1.1 matt }
3071 1.1 matt DPRINTFN(BOOT,
3072 1.85 matt "pmap_boot_find_memory: b-avail[%d] start "
3073 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
3074 1.1 matt mp[1].start = e;
3075 1.1 matt mp[1].size = mp[0].start + mp[0].size - e;
3076 1.1 matt mp[0].size = s - mp[0].start;
3077 1.1 matt avail_cnt++;
3078 1.1 matt for (; i < avail_cnt; i++) {
3079 1.1 matt DPRINTFN(BOOT,
3080 1.85 matt "pmap_boot_find_memory: a-avail[%d] "
3081 1.85 matt "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
3082 1.85 matt avail[i].start, avail[i].size);
3083 1.1 matt }
3084 1.1 matt }
3085 1.53 garbled KASSERT(s == (uintptr_t) s);
3086 1.53 garbled return s;
3087 1.1 matt }
3088 1.1 matt panic("pmap_boot_find_memory: not enough memory for "
3089 1.54 mlelstv "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
3090 1.1 matt }
3091 1.1 matt
3092 1.38 sanjayl /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
3093 1.53 garbled #if defined (PMAP_OEA64_BRIDGE)
3094 1.38 sanjayl int
3095 1.38 sanjayl pmap_setup_segment0_map(int use_large_pages, ...)
3096 1.38 sanjayl {
3097 1.88 christos vaddr_t va, va_end;
3098 1.38 sanjayl
3099 1.38 sanjayl register_t pte_lo = 0x0;
3100 1.90 mrg int ptegidx = 0;
3101 1.38 sanjayl struct pte pte;
3102 1.38 sanjayl va_list ap;
3103 1.38 sanjayl
3104 1.38 sanjayl /* Coherent + Supervisor RW, no user access */
3105 1.38 sanjayl pte_lo = PTE_M;
3106 1.38 sanjayl
3107 1.38 sanjayl /* XXXSL
3108 1.38 sanjayl * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3109 1.38 sanjayl * these have to take priority.
3110 1.38 sanjayl */
3111 1.38 sanjayl for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
3112 1.38 sanjayl ptegidx = va_to_pteg(pmap_kernel(), va);
3113 1.38 sanjayl pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
3114 1.90 mrg (void)pmap_pte_insert(ptegidx, &pte);
3115 1.38 sanjayl }
3116 1.38 sanjayl
3117 1.38 sanjayl va_start(ap, use_large_pages);
3118 1.38 sanjayl while (1) {
3119 1.38 sanjayl paddr_t pa;
3120 1.38 sanjayl size_t size;
3121 1.38 sanjayl
3122 1.38 sanjayl va = va_arg(ap, vaddr_t);
3123 1.38 sanjayl
3124 1.38 sanjayl if (va == 0)
3125 1.38 sanjayl break;
3126 1.38 sanjayl
3127 1.38 sanjayl pa = va_arg(ap, paddr_t);
3128 1.38 sanjayl size = va_arg(ap, size_t);
3129 1.38 sanjayl
3130 1.88 christos for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) {
3131 1.38 sanjayl #if 0
3132 1.54 mlelstv printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa);
3133 1.38 sanjayl #endif
3134 1.38 sanjayl ptegidx = va_to_pteg(pmap_kernel(), va);
3135 1.38 sanjayl pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
3136 1.90 mrg (void)pmap_pte_insert(ptegidx, &pte);
3137 1.38 sanjayl }
3138 1.38 sanjayl }
3139 1.93 dholland va_end(ap);
3140 1.38 sanjayl
3141 1.38 sanjayl TLBSYNC();
3142 1.38 sanjayl SYNC();
3143 1.38 sanjayl return (0);
3144 1.38 sanjayl }
3145 1.53 garbled #endif /* PMAP_OEA64_BRIDGE */
3146 1.38 sanjayl
3147 1.1 matt /*
3148 1.1 matt * This is not part of the defined PMAP interface and is specific to the
3149 1.1 matt * PowerPC architecture. This is called during initppc, before the system
3150 1.1 matt * is really initialized.
3151 1.1 matt */
3152 1.1 matt void
3153 1.1 matt pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
3154 1.1 matt {
3155 1.1 matt struct mem_region *mp, tmp;
3156 1.1 matt paddr_t s, e;
3157 1.1 matt psize_t size;
3158 1.1 matt int i, j;
3159 1.1 matt
3160 1.1 matt /*
3161 1.1 matt * Get memory.
3162 1.1 matt */
3163 1.1 matt mem_regions(&mem, &avail);
3164 1.1 matt #if defined(DEBUG)
3165 1.1 matt if (pmapdebug & PMAPDEBUG_BOOT) {
3166 1.1 matt printf("pmap_bootstrap: memory configuration:\n");
3167 1.1 matt for (mp = mem; mp->size; mp++) {
3168 1.54 mlelstv printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
3169 1.1 matt mp->start, mp->size);
3170 1.1 matt }
3171 1.1 matt for (mp = avail; mp->size; mp++) {
3172 1.54 mlelstv printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
3173 1.1 matt mp->start, mp->size);
3174 1.1 matt }
3175 1.1 matt }
3176 1.1 matt #endif
3177 1.1 matt
3178 1.1 matt /*
3179 1.1 matt * Find out how much physical memory we have and in how many chunks.
3180 1.1 matt */
3181 1.1 matt for (mem_cnt = 0, mp = mem; mp->size; mp++) {
3182 1.1 matt if (mp->start >= pmap_memlimit)
3183 1.1 matt continue;
3184 1.1 matt if (mp->start + mp->size > pmap_memlimit) {
3185 1.1 matt size = pmap_memlimit - mp->start;
3186 1.1 matt physmem += btoc(size);
3187 1.1 matt } else {
3188 1.1 matt physmem += btoc(mp->size);
3189 1.1 matt }
3190 1.1 matt mem_cnt++;
3191 1.1 matt }
3192 1.1 matt
3193 1.1 matt /*
3194 1.1 matt * Count the number of available entries.
3195 1.1 matt */
3196 1.1 matt for (avail_cnt = 0, mp = avail; mp->size; mp++)
3197 1.1 matt avail_cnt++;
3198 1.1 matt
3199 1.1 matt /*
3200 1.1 matt * Page align all regions.
3201 1.1 matt */
3202 1.1 matt kernelstart = trunc_page(kernelstart);
3203 1.1 matt kernelend = round_page(kernelend);
3204 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3205 1.1 matt s = round_page(mp->start);
3206 1.1 matt mp->size -= (s - mp->start);
3207 1.1 matt mp->size = trunc_page(mp->size);
3208 1.1 matt mp->start = s;
3209 1.1 matt e = mp->start + mp->size;
3210 1.1 matt
3211 1.1 matt DPRINTFN(BOOT,
3212 1.85 matt "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3213 1.85 matt i, mp->start, mp->size);
3214 1.1 matt
3215 1.1 matt /*
3216 1.1 matt * Don't allow the end to run beyond our artificial limit
3217 1.1 matt */
3218 1.1 matt if (e > pmap_memlimit)
3219 1.1 matt e = pmap_memlimit;
3220 1.1 matt
3221 1.1 matt /*
3222 1.1 matt * Is this region empty or strange? skip it.
3223 1.1 matt */
3224 1.1 matt if (e <= s) {
3225 1.1 matt mp->start = 0;
3226 1.1 matt mp->size = 0;
3227 1.1 matt continue;
3228 1.1 matt }
3229 1.1 matt
3230 1.1 matt /*
3231 1.1 matt * Does this overlap the beginning of kernel?
3232 1.1 matt * Does extend past the end of the kernel?
3233 1.1 matt */
3234 1.1 matt else if (s < kernelstart && e > kernelstart) {
3235 1.1 matt if (e > kernelend) {
3236 1.1 matt avail[avail_cnt].start = kernelend;
3237 1.1 matt avail[avail_cnt].size = e - kernelend;
3238 1.1 matt avail_cnt++;
3239 1.1 matt }
3240 1.1 matt mp->size = kernelstart - s;
3241 1.1 matt }
3242 1.1 matt /*
3243 1.1 matt * Check whether this region overlaps the end of the kernel.
3244 1.1 matt */
3245 1.1 matt else if (s < kernelend && e > kernelend) {
3246 1.1 matt mp->start = kernelend;
3247 1.1 matt mp->size = e - kernelend;
3248 1.1 matt }
3249 1.1 matt /*
3250 1.1 matt * Look whether this regions is completely inside the kernel.
3251 1.1 matt * Nuke it if it does.
3252 1.1 matt */
3253 1.1 matt else if (s >= kernelstart && e <= kernelend) {
3254 1.1 matt mp->start = 0;
3255 1.1 matt mp->size = 0;
3256 1.1 matt }
3257 1.1 matt /*
3258 1.1 matt * If the user imposed a memory limit, enforce it.
3259 1.1 matt */
3260 1.1 matt else if (s >= pmap_memlimit) {
3261 1.6 thorpej mp->start = -PAGE_SIZE; /* let's know why */
3262 1.1 matt mp->size = 0;
3263 1.1 matt }
3264 1.1 matt else {
3265 1.1 matt mp->start = s;
3266 1.1 matt mp->size = e - s;
3267 1.1 matt }
3268 1.1 matt DPRINTFN(BOOT,
3269 1.85 matt "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3270 1.85 matt i, mp->start, mp->size);
3271 1.1 matt }
3272 1.1 matt
3273 1.1 matt /*
3274 1.1 matt * Move (and uncount) all the null return to the end.
3275 1.1 matt */
3276 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3277 1.1 matt if (mp->size == 0) {
3278 1.1 matt tmp = avail[i];
3279 1.1 matt avail[i] = avail[--avail_cnt];
3280 1.1 matt avail[avail_cnt] = avail[i];
3281 1.1 matt }
3282 1.1 matt }
3283 1.1 matt
3284 1.1 matt /*
3285 1.61 skrll * (Bubble)sort them into ascending order.
3286 1.1 matt */
3287 1.1 matt for (i = 0; i < avail_cnt; i++) {
3288 1.1 matt for (j = i + 1; j < avail_cnt; j++) {
3289 1.1 matt if (avail[i].start > avail[j].start) {
3290 1.1 matt tmp = avail[i];
3291 1.1 matt avail[i] = avail[j];
3292 1.1 matt avail[j] = tmp;
3293 1.1 matt }
3294 1.1 matt }
3295 1.1 matt }
3296 1.1 matt
3297 1.1 matt /*
3298 1.1 matt * Make sure they don't overlap.
3299 1.1 matt */
3300 1.1 matt for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
3301 1.1 matt if (mp[0].start + mp[0].size > mp[1].start) {
3302 1.1 matt mp[0].size = mp[1].start - mp[0].start;
3303 1.1 matt }
3304 1.1 matt DPRINTFN(BOOT,
3305 1.85 matt "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3306 1.85 matt i, mp->start, mp->size);
3307 1.1 matt }
3308 1.1 matt DPRINTFN(BOOT,
3309 1.85 matt "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3310 1.85 matt i, mp->start, mp->size);
3311 1.1 matt
3312 1.1 matt #ifdef PTEGCOUNT
3313 1.1 matt pmap_pteg_cnt = PTEGCOUNT;
3314 1.1 matt #else /* PTEGCOUNT */
3315 1.38 sanjayl
3316 1.1 matt pmap_pteg_cnt = 0x1000;
3317 1.1 matt
3318 1.1 matt while (pmap_pteg_cnt < physmem)
3319 1.1 matt pmap_pteg_cnt <<= 1;
3320 1.1 matt
3321 1.1 matt pmap_pteg_cnt >>= 1;
3322 1.1 matt #endif /* PTEGCOUNT */
3323 1.1 matt
3324 1.38 sanjayl #ifdef DEBUG
3325 1.85 matt DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt);
3326 1.38 sanjayl #endif
3327 1.38 sanjayl
3328 1.1 matt /*
3329 1.1 matt * Find suitably aligned memory for PTEG hash table.
3330 1.1 matt */
3331 1.2 matt size = pmap_pteg_cnt * sizeof(struct pteg);
3332 1.53 garbled pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
3333 1.38 sanjayl
3334 1.38 sanjayl #ifdef DEBUG
3335 1.38 sanjayl DPRINTFN(BOOT,
3336 1.85 matt "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table);
3337 1.38 sanjayl #endif
3338 1.38 sanjayl
3339 1.38 sanjayl
3340 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3341 1.1 matt if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
3342 1.54 mlelstv panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
3343 1.1 matt pmap_pteg_table, size);
3344 1.1 matt #endif
3345 1.1 matt
3346 1.32 he memset(__UNVOLATILE(pmap_pteg_table), 0,
3347 1.32 he pmap_pteg_cnt * sizeof(struct pteg));
3348 1.1 matt pmap_pteg_mask = pmap_pteg_cnt - 1;
3349 1.1 matt
3350 1.1 matt /*
3351 1.1 matt * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3352 1.1 matt * with pages. So we just steal them before giving them to UVM.
3353 1.1 matt */
3354 1.1 matt size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
3355 1.53 garbled pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
3356 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3357 1.1 matt if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
3358 1.54 mlelstv panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
3359 1.1 matt pmap_pvo_table, size);
3360 1.1 matt #endif
3361 1.1 matt
3362 1.1 matt for (i = 0; i < pmap_pteg_cnt; i++)
3363 1.1 matt TAILQ_INIT(&pmap_pvo_table[i]);
3364 1.1 matt
3365 1.1 matt #ifndef MSGBUFADDR
3366 1.1 matt /*
3367 1.1 matt * Allocate msgbuf in high memory.
3368 1.1 matt */
3369 1.53 garbled msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
3370 1.1 matt #endif
3371 1.1 matt
3372 1.1 matt for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
3373 1.1 matt paddr_t pfstart = atop(mp->start);
3374 1.1 matt paddr_t pfend = atop(mp->start + mp->size);
3375 1.1 matt if (mp->size == 0)
3376 1.1 matt continue;
3377 1.1 matt if (mp->start + mp->size <= SEGMENT_LENGTH) {
3378 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3379 1.1 matt VM_FREELIST_FIRST256);
3380 1.1 matt } else if (mp->start >= SEGMENT_LENGTH) {
3381 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3382 1.1 matt VM_FREELIST_DEFAULT);
3383 1.1 matt } else {
3384 1.1 matt pfend = atop(SEGMENT_LENGTH);
3385 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3386 1.1 matt VM_FREELIST_FIRST256);
3387 1.1 matt pfstart = atop(SEGMENT_LENGTH);
3388 1.1 matt pfend = atop(mp->start + mp->size);
3389 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3390 1.1 matt VM_FREELIST_DEFAULT);
3391 1.1 matt }
3392 1.1 matt }
3393 1.1 matt
3394 1.1 matt /*
3395 1.1 matt * Make sure kernel vsid is allocated as well as VSID 0.
3396 1.1 matt */
3397 1.1 matt pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3398 1.1 matt |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
3399 1.53 garbled pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3400 1.53 garbled |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
3401 1.1 matt pmap_vsid_bitmap[0] |= 1;
3402 1.1 matt
3403 1.1 matt /*
3404 1.1 matt * Initialize kernel pmap and hardware.
3405 1.1 matt */
3406 1.38 sanjayl
3407 1.53 garbled /* PMAP_OEA64_BRIDGE does support these instructions */
3408 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
3409 1.1 matt for (i = 0; i < 16; i++) {
3410 1.91 macallan #if defined(PPC_OEA601)
3411 1.91 macallan /* XXX wedges for segment register 0xf , so set later */
3412 1.91 macallan if ((iosrtable[i] & SR601_T) && ((MFPVR() >> 16) == MPC601))
3413 1.91 macallan continue;
3414 1.91 macallan #endif
3415 1.38 sanjayl pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
3416 1.35 perry __asm volatile ("mtsrin %0,%1"
3417 1.38 sanjayl :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
3418 1.1 matt }
3419 1.1 matt
3420 1.1 matt pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
3421 1.35 perry __asm volatile ("mtsr %0,%1"
3422 1.1 matt :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
3423 1.1 matt #ifdef KERNEL2_SR
3424 1.1 matt pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
3425 1.35 perry __asm volatile ("mtsr %0,%1"
3426 1.1 matt :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
3427 1.1 matt #endif
3428 1.53 garbled #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3429 1.53 garbled #if defined (PMAP_OEA)
3430 1.1 matt for (i = 0; i < 16; i++) {
3431 1.1 matt if (iosrtable[i] & SR601_T) {
3432 1.1 matt pmap_kernel()->pm_sr[i] = iosrtable[i];
3433 1.35 perry __asm volatile ("mtsrin %0,%1"
3434 1.1 matt :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
3435 1.1 matt }
3436 1.1 matt }
3437 1.35 perry __asm volatile ("sync; mtsdr1 %0; isync"
3438 1.2 matt :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
3439 1.53 garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
3440 1.38 sanjayl __asm __volatile ("sync; mtsdr1 %0; isync"
3441 1.80 matt :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11))));
3442 1.38 sanjayl #endif
3443 1.1 matt tlbia();
3444 1.1 matt
3445 1.1 matt #ifdef ALTIVEC
3446 1.1 matt pmap_use_altivec = cpu_altivec;
3447 1.1 matt #endif
3448 1.1 matt
3449 1.1 matt #ifdef DEBUG
3450 1.1 matt if (pmapdebug & PMAPDEBUG_BOOT) {
3451 1.1 matt u_int cnt;
3452 1.94 cherry uvm_physseg_t bank;
3453 1.1 matt char pbuf[9];
3454 1.94 cherry for (cnt = 0, bank = uvm_physseg_get_first();
3455 1.94 cherry uvm_physseg_valid_p(bank);
3456 1.94 cherry bank = uvm_physseg_get_next(bank)) {
3457 1.94 cherry cnt += uvm_physseg_get_avail_end(bank) -
3458 1.94 cherry uvm_physseg_get_avail_start(bank);
3459 1.53 garbled printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
3460 1.1 matt bank,
3461 1.94 cherry ptoa(uvm_physseg_get_avail_start(bank)),
3462 1.94 cherry ptoa(uvm_physseg_get_avail_end(bank)),
3463 1.94 cherry ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)));
3464 1.1 matt }
3465 1.1 matt format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3466 1.1 matt printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3467 1.1 matt pbuf, cnt);
3468 1.1 matt }
3469 1.1 matt #endif
3470 1.1 matt
3471 1.1 matt pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3472 1.1 matt sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3473 1.60 chs &pmap_pool_uallocator, IPL_VM);
3474 1.1 matt
3475 1.1 matt pool_setlowat(&pmap_upvo_pool, 252);
3476 1.1 matt
3477 1.1 matt pool_init(&pmap_pool, sizeof(struct pmap),
3478 1.48 ad sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
3479 1.48 ad IPL_NONE);
3480 1.41 matt
3481 1.89 macallan #if defined(PMAP_NEED_MAPKERNEL)
3482 1.41 matt {
3483 1.53 garbled struct pmap *pm = pmap_kernel();
3484 1.58 garbled #if defined(PMAP_NEED_FULL_MAPKERNEL)
3485 1.41 matt extern int etext[], kernel_text[];
3486 1.41 matt vaddr_t va, va_etext = (paddr_t) etext;
3487 1.53 garbled #endif
3488 1.53 garbled paddr_t pa, pa_end;
3489 1.42 matt register_t sr;
3490 1.53 garbled struct pte pt;
3491 1.53 garbled unsigned int ptegidx;
3492 1.53 garbled int bank;
3493 1.42 matt
3494 1.53 garbled sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
3495 1.53 garbled pm->pm_sr[0] = sr;
3496 1.41 matt
3497 1.53 garbled for (bank = 0; bank < vm_nphysseg; bank++) {
3498 1.73 uebayasi pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
3499 1.73 uebayasi pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
3500 1.53 garbled for (; pa < pa_end; pa += PAGE_SIZE) {
3501 1.53 garbled ptegidx = va_to_pteg(pm, pa);
3502 1.53 garbled pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
3503 1.53 garbled pmap_pte_insert(ptegidx, &pt);
3504 1.53 garbled }
3505 1.53 garbled }
3506 1.53 garbled
3507 1.58 garbled #if defined(PMAP_NEED_FULL_MAPKERNEL)
3508 1.41 matt va = (vaddr_t) kernel_text;
3509 1.41 matt
3510 1.41 matt for (pa = kernelstart; va < va_etext;
3511 1.53 garbled pa += PAGE_SIZE, va += PAGE_SIZE) {
3512 1.53 garbled ptegidx = va_to_pteg(pm, va);
3513 1.53 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3514 1.53 garbled pmap_pte_insert(ptegidx, &pt);
3515 1.53 garbled }
3516 1.41 matt
3517 1.41 matt for (; pa < kernelend;
3518 1.53 garbled pa += PAGE_SIZE, va += PAGE_SIZE) {
3519 1.53 garbled ptegidx = va_to_pteg(pm, va);
3520 1.53 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3521 1.53 garbled pmap_pte_insert(ptegidx, &pt);
3522 1.53 garbled }
3523 1.53 garbled
3524 1.58 garbled for (va = 0, pa = 0; va < kernelstart;
3525 1.53 garbled pa += PAGE_SIZE, va += PAGE_SIZE) {
3526 1.53 garbled ptegidx = va_to_pteg(pm, va);
3527 1.58 garbled if (va < 0x3000)
3528 1.58 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3529 1.58 garbled else
3530 1.58 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3531 1.58 garbled pmap_pte_insert(ptegidx, &pt);
3532 1.58 garbled }
3533 1.58 garbled for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
3534 1.58 garbled pa += PAGE_SIZE, va += PAGE_SIZE) {
3535 1.58 garbled ptegidx = va_to_pteg(pm, va);
3536 1.53 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3537 1.53 garbled pmap_pte_insert(ptegidx, &pt);
3538 1.53 garbled }
3539 1.53 garbled #endif
3540 1.42 matt
3541 1.42 matt __asm volatile ("mtsrin %0,%1"
3542 1.42 matt :: "r"(sr), "r"(kernelstart));
3543 1.41 matt }
3544 1.41 matt #endif
3545 1.91 macallan
3546 1.91 macallan #if defined(PMAPDEBUG)
3547 1.91 macallan if ( pmapdebug )
3548 1.91 macallan pmap_print_mmuregs();
3549 1.91 macallan #endif
3550 1.1 matt }
3551