pmap.c revision 1.41 1 1.41 matt /* $NetBSD: pmap.c,v 1.41 2006/09/19 20:19:53 matt Exp $ */
2 1.1 matt /*-
3 1.1 matt * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 1.1 matt * All rights reserved.
5 1.1 matt *
6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.1 matt * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
8 1.1 matt *
9 1.38 sanjayl * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com>
10 1.38 sanjayl * of Kyma Systems LLC.
11 1.38 sanjayl *
12 1.1 matt * Redistribution and use in source and binary forms, with or without
13 1.1 matt * modification, are permitted provided that the following conditions
14 1.1 matt * are met:
15 1.1 matt * 1. Redistributions of source code must retain the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer.
17 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 matt * notice, this list of conditions and the following disclaimer in the
19 1.1 matt * documentation and/or other materials provided with the distribution.
20 1.1 matt * 3. All advertising materials mentioning features or use of this software
21 1.1 matt * must display the following acknowledgement:
22 1.1 matt * This product includes software developed by the NetBSD
23 1.1 matt * Foundation, Inc. and its contributors.
24 1.1 matt * 4. Neither the name of The NetBSD Foundation nor the names of its
25 1.1 matt * contributors may be used to endorse or promote products derived
26 1.1 matt * from this software without specific prior written permission.
27 1.1 matt *
28 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
39 1.1 matt */
40 1.1 matt
41 1.1 matt /*
42 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank.
43 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH.
44 1.1 matt * All rights reserved.
45 1.1 matt *
46 1.1 matt * Redistribution and use in source and binary forms, with or without
47 1.1 matt * modification, are permitted provided that the following conditions
48 1.1 matt * are met:
49 1.1 matt * 1. Redistributions of source code must retain the above copyright
50 1.1 matt * notice, this list of conditions and the following disclaimer.
51 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
52 1.1 matt * notice, this list of conditions and the following disclaimer in the
53 1.1 matt * documentation and/or other materials provided with the distribution.
54 1.1 matt * 3. All advertising materials mentioning features or use of this software
55 1.1 matt * must display the following acknowledgement:
56 1.1 matt * This product includes software developed by TooLs GmbH.
57 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products
58 1.1 matt * derived from this software without specific prior written permission.
59 1.1 matt *
60 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
61 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
62 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
64 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
65 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
66 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
67 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
68 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
69 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
70 1.1 matt */
71 1.11 lukem
72 1.11 lukem #include <sys/cdefs.h>
73 1.41 matt __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.41 2006/09/19 20:19:53 matt Exp $");
74 1.1 matt
75 1.18 matt #include "opt_ppcarch.h"
76 1.1 matt #include "opt_altivec.h"
77 1.1 matt #include "opt_pmap.h"
78 1.1 matt #include <sys/param.h>
79 1.1 matt #include <sys/malloc.h>
80 1.1 matt #include <sys/proc.h>
81 1.1 matt #include <sys/user.h>
82 1.1 matt #include <sys/pool.h>
83 1.1 matt #include <sys/queue.h>
84 1.1 matt #include <sys/device.h> /* for evcnt */
85 1.1 matt #include <sys/systm.h>
86 1.1 matt
87 1.1 matt #if __NetBSD_Version__ < 105010000
88 1.1 matt #include <vm/vm.h>
89 1.1 matt #include <vm/vm_kern.h>
90 1.1 matt #define splvm() splimp()
91 1.1 matt #endif
92 1.1 matt
93 1.1 matt #include <uvm/uvm.h>
94 1.1 matt
95 1.1 matt #include <machine/pcb.h>
96 1.1 matt #include <machine/powerpc.h>
97 1.1 matt #include <powerpc/spr.h>
98 1.1 matt #include <powerpc/oea/sr_601.h>
99 1.1 matt #include <powerpc/bat.h>
100 1.38 sanjayl #include <powerpc/stdarg.h>
101 1.1 matt
102 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
103 1.1 matt #define STATIC
104 1.1 matt #else
105 1.1 matt #define STATIC static
106 1.1 matt #endif
107 1.1 matt
108 1.1 matt #ifdef ALTIVEC
109 1.1 matt int pmap_use_altivec;
110 1.1 matt #endif
111 1.1 matt
112 1.2 matt volatile struct pteg *pmap_pteg_table;
113 1.1 matt unsigned int pmap_pteg_cnt;
114 1.1 matt unsigned int pmap_pteg_mask;
115 1.21 aymeric #ifdef PMAP_MEMLIMIT
116 1.21 aymeric paddr_t pmap_memlimit = PMAP_MEMLIMIT;
117 1.21 aymeric #else
118 1.6 thorpej paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
119 1.21 aymeric #endif
120 1.1 matt
121 1.1 matt struct pmap kernel_pmap_;
122 1.1 matt unsigned int pmap_pages_stolen;
123 1.1 matt u_long pmap_pte_valid;
124 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
125 1.1 matt u_long pmap_pvo_enter_depth;
126 1.1 matt u_long pmap_pvo_remove_depth;
127 1.1 matt #endif
128 1.1 matt
129 1.1 matt int physmem;
130 1.1 matt #ifndef MSGBUFADDR
131 1.1 matt extern paddr_t msgbuf_paddr;
132 1.1 matt #endif
133 1.1 matt
134 1.1 matt static struct mem_region *mem, *avail;
135 1.1 matt static u_int mem_cnt, avail_cnt;
136 1.1 matt
137 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
138 1.1 matt /*
139 1.1 matt * This is a cache of referenced/modified bits.
140 1.1 matt * Bits herein are shifted by ATTRSHFT.
141 1.1 matt */
142 1.1 matt #define ATTR_SHFT 4
143 1.1 matt struct pmap_physseg pmap_physseg;
144 1.1 matt #endif
145 1.1 matt
146 1.1 matt /*
147 1.38 sanjayl * The following structure is aligned to 32 bytes
148 1.1 matt */
149 1.1 matt struct pvo_entry {
150 1.1 matt LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
151 1.1 matt TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
152 1.1 matt struct pte pvo_pte; /* Prebuilt PTE */
153 1.1 matt pmap_t pvo_pmap; /* ptr to owning pmap */
154 1.1 matt vaddr_t pvo_vaddr; /* VA of entry */
155 1.1 matt #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
156 1.1 matt #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
157 1.1 matt #define PVO_WIRED 0x0010 /* PVO entry is wired */
158 1.1 matt #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
159 1.1 matt #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
160 1.39 matt #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED)
161 1.39 matt #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED)
162 1.39 matt #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
163 1.12 matt #define PVO_ENTER_INSERT 0 /* PVO has been removed */
164 1.12 matt #define PVO_SPILL_UNSET 1 /* PVO has been evicted */
165 1.12 matt #define PVO_SPILL_SET 2 /* PVO has been spilled */
166 1.12 matt #define PVO_SPILL_INSERT 3 /* PVO has been inserted */
167 1.12 matt #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */
168 1.12 matt #define PVO_PMAP_PROTECT 5 /* PVO has changed */
169 1.12 matt #define PVO_REMOVE 6 /* PVO has been removed */
170 1.12 matt #define PVO_WHERE_MASK 15
171 1.12 matt #define PVO_WHERE_SHFT 8
172 1.38 sanjayl } __attribute__ ((aligned (32)));
173 1.1 matt #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
174 1.1 matt #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
175 1.1 matt #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
176 1.1 matt #define PVO_PTEGIDX_CLR(pvo) \
177 1.1 matt ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
178 1.1 matt #define PVO_PTEGIDX_SET(pvo,i) \
179 1.1 matt ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
180 1.12 matt #define PVO_WHERE(pvo,w) \
181 1.12 matt ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
182 1.12 matt (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
183 1.1 matt
184 1.1 matt TAILQ_HEAD(pvo_tqhead, pvo_entry);
185 1.1 matt struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
186 1.1 matt struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
187 1.1 matt struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
188 1.1 matt
189 1.1 matt struct pool pmap_pool; /* pool for pmap structures */
190 1.1 matt struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
191 1.1 matt struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
192 1.1 matt
193 1.1 matt /*
194 1.1 matt * We keep a cache of unmanaged pages to be used for pvo entries for
195 1.1 matt * unmanaged pages.
196 1.1 matt */
197 1.1 matt struct pvo_page {
198 1.1 matt SIMPLEQ_ENTRY(pvo_page) pvop_link;
199 1.1 matt };
200 1.1 matt SIMPLEQ_HEAD(pvop_head, pvo_page);
201 1.1 matt struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
202 1.1 matt struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
203 1.1 matt u_long pmap_upvop_free;
204 1.1 matt u_long pmap_upvop_maxfree;
205 1.1 matt u_long pmap_mpvop_free;
206 1.1 matt u_long pmap_mpvop_maxfree;
207 1.1 matt
208 1.1 matt STATIC void *pmap_pool_ualloc(struct pool *, int);
209 1.1 matt STATIC void *pmap_pool_malloc(struct pool *, int);
210 1.1 matt
211 1.1 matt STATIC void pmap_pool_ufree(struct pool *, void *);
212 1.1 matt STATIC void pmap_pool_mfree(struct pool *, void *);
213 1.1 matt
214 1.1 matt static struct pool_allocator pmap_pool_mallocator = {
215 1.1 matt pmap_pool_malloc, pmap_pool_mfree, 0,
216 1.1 matt };
217 1.1 matt
218 1.1 matt static struct pool_allocator pmap_pool_uallocator = {
219 1.1 matt pmap_pool_ualloc, pmap_pool_ufree, 0,
220 1.1 matt };
221 1.1 matt
222 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
223 1.2 matt void pmap_pte_print(volatile struct pte *);
224 1.1 matt void pmap_pteg_check(void);
225 1.1 matt void pmap_pteg_dist(void);
226 1.1 matt void pmap_print_pte(pmap_t, vaddr_t);
227 1.1 matt void pmap_print_mmuregs(void);
228 1.1 matt #endif
229 1.1 matt
230 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
231 1.1 matt #ifdef PMAPCHECK
232 1.1 matt int pmapcheck = 1;
233 1.1 matt #else
234 1.1 matt int pmapcheck = 0;
235 1.1 matt #endif
236 1.1 matt void pmap_pvo_verify(void);
237 1.1 matt STATIC void pmap_pvo_check(const struct pvo_entry *);
238 1.1 matt #define PMAP_PVO_CHECK(pvo) \
239 1.1 matt do { \
240 1.1 matt if (pmapcheck) \
241 1.1 matt pmap_pvo_check(pvo); \
242 1.1 matt } while (0)
243 1.1 matt #else
244 1.1 matt #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
245 1.1 matt #endif
246 1.2 matt STATIC int pmap_pte_insert(int, struct pte *);
247 1.1 matt STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
248 1.2 matt vaddr_t, paddr_t, register_t, int);
249 1.33 chs STATIC void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
250 1.33 chs STATIC void pmap_pvo_free(struct pvo_entry *);
251 1.33 chs STATIC void pmap_pvo_free_list(struct pvo_head *);
252 1.1 matt STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
253 1.2 matt STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
254 1.25 chs STATIC struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
255 1.14 chs STATIC void pvo_set_exec(struct pvo_entry *);
256 1.14 chs STATIC void pvo_clear_exec(struct pvo_entry *);
257 1.1 matt
258 1.1 matt STATIC void tlbia(void);
259 1.1 matt
260 1.1 matt STATIC void pmap_release(pmap_t);
261 1.1 matt STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
262 1.1 matt
263 1.25 chs static uint32_t pmap_pvo_reclaim_nextidx;
264 1.25 chs #ifdef DEBUG
265 1.25 chs static int pmap_pvo_reclaim_debugctr;
266 1.25 chs #endif
267 1.25 chs
268 1.1 matt #define VSID_NBPW (sizeof(uint32_t) * 8)
269 1.1 matt static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
270 1.1 matt
271 1.1 matt static int pmap_initialized;
272 1.1 matt
273 1.1 matt #if defined(DEBUG) || defined(PMAPDEBUG)
274 1.1 matt #define PMAPDEBUG_BOOT 0x0001
275 1.1 matt #define PMAPDEBUG_PTE 0x0002
276 1.1 matt #define PMAPDEBUG_EXEC 0x0008
277 1.1 matt #define PMAPDEBUG_PVOENTER 0x0010
278 1.1 matt #define PMAPDEBUG_PVOREMOVE 0x0020
279 1.1 matt #define PMAPDEBUG_ACTIVATE 0x0100
280 1.1 matt #define PMAPDEBUG_CREATE 0x0200
281 1.1 matt #define PMAPDEBUG_ENTER 0x1000
282 1.1 matt #define PMAPDEBUG_KENTER 0x2000
283 1.1 matt #define PMAPDEBUG_KREMOVE 0x4000
284 1.1 matt #define PMAPDEBUG_REMOVE 0x8000
285 1.38 sanjayl
286 1.1 matt unsigned int pmapdebug = 0;
287 1.38 sanjayl
288 1.1 matt # define DPRINTF(x) printf x
289 1.1 matt # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
290 1.1 matt #else
291 1.1 matt # define DPRINTF(x)
292 1.1 matt # define DPRINTFN(n, x)
293 1.1 matt #endif
294 1.1 matt
295 1.1 matt
296 1.1 matt #ifdef PMAPCOUNTERS
297 1.1 matt #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
298 1.1 matt #define PMAPCOUNT2(ev) ((ev).ev_count++)
299 1.1 matt
300 1.1 matt struct evcnt pmap_evcnt_mappings =
301 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
302 1.1 matt "pmap", "pages mapped");
303 1.1 matt struct evcnt pmap_evcnt_unmappings =
304 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
305 1.1 matt "pmap", "pages unmapped");
306 1.1 matt
307 1.1 matt struct evcnt pmap_evcnt_kernel_mappings =
308 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
309 1.1 matt "pmap", "kernel pages mapped");
310 1.1 matt struct evcnt pmap_evcnt_kernel_unmappings =
311 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
312 1.1 matt "pmap", "kernel pages unmapped");
313 1.1 matt
314 1.1 matt struct evcnt pmap_evcnt_mappings_replaced =
315 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
316 1.1 matt "pmap", "page mappings replaced");
317 1.1 matt
318 1.1 matt struct evcnt pmap_evcnt_exec_mappings =
319 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
320 1.1 matt "pmap", "exec pages mapped");
321 1.1 matt struct evcnt pmap_evcnt_exec_cached =
322 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
323 1.1 matt "pmap", "exec pages cached");
324 1.1 matt
325 1.1 matt struct evcnt pmap_evcnt_exec_synced =
326 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
327 1.1 matt "pmap", "exec pages synced");
328 1.1 matt struct evcnt pmap_evcnt_exec_synced_clear_modify =
329 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
330 1.1 matt "pmap", "exec pages synced (CM)");
331 1.37 matt struct evcnt pmap_evcnt_exec_synced_pvo_remove =
332 1.37 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
333 1.37 matt "pmap", "exec pages synced (PR)");
334 1.1 matt
335 1.1 matt struct evcnt pmap_evcnt_exec_uncached_page_protect =
336 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
337 1.1 matt "pmap", "exec pages uncached (PP)");
338 1.1 matt struct evcnt pmap_evcnt_exec_uncached_clear_modify =
339 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
340 1.1 matt "pmap", "exec pages uncached (CM)");
341 1.1 matt struct evcnt pmap_evcnt_exec_uncached_zero_page =
342 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
343 1.1 matt "pmap", "exec pages uncached (ZP)");
344 1.1 matt struct evcnt pmap_evcnt_exec_uncached_copy_page =
345 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
346 1.1 matt "pmap", "exec pages uncached (CP)");
347 1.37 matt struct evcnt pmap_evcnt_exec_uncached_pvo_remove =
348 1.37 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
349 1.37 matt "pmap", "exec pages uncached (PR)");
350 1.1 matt
351 1.1 matt struct evcnt pmap_evcnt_updates =
352 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
353 1.1 matt "pmap", "updates");
354 1.1 matt struct evcnt pmap_evcnt_collects =
355 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
356 1.1 matt "pmap", "collects");
357 1.1 matt struct evcnt pmap_evcnt_copies =
358 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
359 1.1 matt "pmap", "copies");
360 1.1 matt
361 1.1 matt struct evcnt pmap_evcnt_ptes_spilled =
362 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
363 1.1 matt "pmap", "ptes spilled from overflow");
364 1.1 matt struct evcnt pmap_evcnt_ptes_unspilled =
365 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
366 1.1 matt "pmap", "ptes not spilled");
367 1.1 matt struct evcnt pmap_evcnt_ptes_evicted =
368 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
369 1.1 matt "pmap", "ptes evicted");
370 1.1 matt
371 1.1 matt struct evcnt pmap_evcnt_ptes_primary[8] = {
372 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
373 1.1 matt "pmap", "ptes added at primary[0]"),
374 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
375 1.1 matt "pmap", "ptes added at primary[1]"),
376 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
377 1.1 matt "pmap", "ptes added at primary[2]"),
378 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
379 1.1 matt "pmap", "ptes added at primary[3]"),
380 1.1 matt
381 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
382 1.1 matt "pmap", "ptes added at primary[4]"),
383 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
384 1.1 matt "pmap", "ptes added at primary[5]"),
385 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
386 1.1 matt "pmap", "ptes added at primary[6]"),
387 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
388 1.1 matt "pmap", "ptes added at primary[7]"),
389 1.1 matt };
390 1.1 matt struct evcnt pmap_evcnt_ptes_secondary[8] = {
391 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
392 1.1 matt "pmap", "ptes added at secondary[0]"),
393 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
394 1.1 matt "pmap", "ptes added at secondary[1]"),
395 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
396 1.1 matt "pmap", "ptes added at secondary[2]"),
397 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
398 1.1 matt "pmap", "ptes added at secondary[3]"),
399 1.1 matt
400 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
401 1.1 matt "pmap", "ptes added at secondary[4]"),
402 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
403 1.1 matt "pmap", "ptes added at secondary[5]"),
404 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
405 1.1 matt "pmap", "ptes added at secondary[6]"),
406 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
407 1.1 matt "pmap", "ptes added at secondary[7]"),
408 1.1 matt };
409 1.1 matt struct evcnt pmap_evcnt_ptes_removed =
410 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
411 1.1 matt "pmap", "ptes removed");
412 1.1 matt struct evcnt pmap_evcnt_ptes_changed =
413 1.1 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
414 1.1 matt "pmap", "ptes changed");
415 1.26 matt struct evcnt pmap_evcnt_pvos_reclaimed =
416 1.26 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
417 1.26 matt "pmap", "pvos reclaimed");
418 1.26 matt struct evcnt pmap_evcnt_pvos_failed =
419 1.26 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
420 1.26 matt "pmap", "pvo allocation failures");
421 1.1 matt
422 1.1 matt /*
423 1.1 matt * From pmap_subr.c
424 1.1 matt */
425 1.1 matt extern struct evcnt pmap_evcnt_zeroed_pages;
426 1.1 matt extern struct evcnt pmap_evcnt_copied_pages;
427 1.1 matt extern struct evcnt pmap_evcnt_idlezeroed_pages;
428 1.26 matt
429 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_mappings);
430 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_mappings_replaced);
431 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_unmappings);
432 1.26 matt
433 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_kernel_mappings);
434 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_kernel_unmappings);
435 1.26 matt
436 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_mappings);
437 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_cached);
438 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced);
439 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced_clear_modify);
440 1.37 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced_pvo_remove);
441 1.26 matt
442 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_page_protect);
443 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_clear_modify);
444 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_zero_page);
445 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_copy_page);
446 1.37 matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_pvo_remove);
447 1.26 matt
448 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_zeroed_pages);
449 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_copied_pages);
450 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_idlezeroed_pages);
451 1.26 matt
452 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_updates);
453 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_collects);
454 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_copies);
455 1.26 matt
456 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_spilled);
457 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_unspilled);
458 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_evicted);
459 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_removed);
460 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_changed);
461 1.26 matt
462 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 0);
463 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 1);
464 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 2);
465 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 3);
466 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 4);
467 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 5);
468 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 6);
469 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 7);
470 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 0);
471 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 1);
472 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 2);
473 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 3);
474 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 4);
475 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 5);
476 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 6);
477 1.26 matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 7);
478 1.26 matt
479 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_pvos_reclaimed);
480 1.26 matt EVCNT_ATTACH_STATIC(pmap_evcnt_pvos_failed);
481 1.1 matt #else
482 1.1 matt #define PMAPCOUNT(ev) ((void) 0)
483 1.1 matt #define PMAPCOUNT2(ev) ((void) 0)
484 1.1 matt #endif
485 1.1 matt
486 1.35 perry #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va))
487 1.38 sanjayl
488 1.38 sanjayl /* XXXSL: this needs to be moved to assembler */
489 1.38 sanjayl #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va))
490 1.38 sanjayl
491 1.35 perry #define TLBSYNC() __asm volatile("tlbsync")
492 1.35 perry #define SYNC() __asm volatile("sync")
493 1.35 perry #define EIEIO() __asm volatile("eieio")
494 1.1 matt #define MFMSR() mfmsr()
495 1.1 matt #define MTMSR(psl) mtmsr(psl)
496 1.1 matt #define MFPVR() mfpvr()
497 1.1 matt #define MFSRIN(va) mfsrin(va)
498 1.1 matt #define MFTB() mfrtcltbl()
499 1.1 matt
500 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
501 1.35 perry static inline register_t
502 1.1 matt mfsrin(vaddr_t va)
503 1.1 matt {
504 1.2 matt register_t sr;
505 1.35 perry __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
506 1.1 matt return sr;
507 1.1 matt }
508 1.38 sanjayl #endif /* PPC_OEA*/
509 1.38 sanjayl
510 1.38 sanjayl #if defined (PPC_OEA64_BRIDGE)
511 1.38 sanjayl extern void mfmsr64 (register64_t *result);
512 1.38 sanjayl #endif /* PPC_OEA64_BRIDGE */
513 1.38 sanjayl
514 1.1 matt
515 1.35 perry static inline register_t
516 1.1 matt pmap_interrupts_off(void)
517 1.1 matt {
518 1.2 matt register_t msr = MFMSR();
519 1.1 matt if (msr & PSL_EE)
520 1.1 matt MTMSR(msr & ~PSL_EE);
521 1.1 matt return msr;
522 1.1 matt }
523 1.1 matt
524 1.1 matt static void
525 1.2 matt pmap_interrupts_restore(register_t msr)
526 1.1 matt {
527 1.1 matt if (msr & PSL_EE)
528 1.1 matt MTMSR(msr);
529 1.1 matt }
530 1.1 matt
531 1.35 perry static inline u_int32_t
532 1.1 matt mfrtcltbl(void)
533 1.1 matt {
534 1.1 matt
535 1.1 matt if ((MFPVR() >> 16) == MPC601)
536 1.1 matt return (mfrtcl() >> 7);
537 1.1 matt else
538 1.1 matt return (mftbl());
539 1.1 matt }
540 1.1 matt
541 1.1 matt /*
542 1.1 matt * These small routines may have to be replaced,
543 1.1 matt * if/when we support processors other that the 604.
544 1.1 matt */
545 1.1 matt
546 1.1 matt void
547 1.1 matt tlbia(void)
548 1.1 matt {
549 1.1 matt caddr_t i;
550 1.1 matt
551 1.1 matt SYNC();
552 1.38 sanjayl #if defined(PPC_OEA)
553 1.1 matt /*
554 1.1 matt * Why not use "tlbia"? Because not all processors implement it.
555 1.1 matt *
556 1.20 wiz * This needs to be a per-CPU callback to do the appropriate thing
557 1.1 matt * for the CPU. XXX
558 1.1 matt */
559 1.1 matt for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
560 1.1 matt TLBIE(i);
561 1.1 matt EIEIO();
562 1.1 matt SYNC();
563 1.1 matt }
564 1.38 sanjayl #elif defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
565 1.38 sanjayl printf("Invalidating ALL TLB entries......\n");
566 1.38 sanjayl /* This is specifically for the 970, 970UM v1.6 pp. 140. */
567 1.38 sanjayl for (i = 0; i <= (caddr_t)0xFF000; i += 0x00001000) {
568 1.38 sanjayl TLBIEL(i);
569 1.38 sanjayl EIEIO();
570 1.38 sanjayl SYNC();
571 1.38 sanjayl }
572 1.38 sanjayl #endif
573 1.1 matt TLBSYNC();
574 1.1 matt SYNC();
575 1.1 matt }
576 1.1 matt
577 1.35 perry static inline register_t
578 1.2 matt va_to_vsid(const struct pmap *pm, vaddr_t addr)
579 1.1 matt {
580 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
581 1.38 sanjayl return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
582 1.38 sanjayl #else /* PPC_OEA64 */
583 1.18 matt #if 0
584 1.18 matt const struct ste *ste;
585 1.18 matt register_t hash;
586 1.18 matt int i;
587 1.18 matt
588 1.18 matt hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
589 1.18 matt
590 1.18 matt /*
591 1.18 matt * Try the primary group first
592 1.18 matt */
593 1.18 matt ste = pm->pm_stes[hash].stes;
594 1.18 matt for (i = 0; i < 8; i++, ste++) {
595 1.18 matt if (ste->ste_hi & STE_V) &&
596 1.18 matt (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
597 1.18 matt return ste;
598 1.18 matt }
599 1.18 matt
600 1.18 matt /*
601 1.18 matt * Then the secondary group.
602 1.18 matt */
603 1.18 matt ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
604 1.18 matt for (i = 0; i < 8; i++, ste++) {
605 1.18 matt if (ste->ste_hi & STE_V) &&
606 1.18 matt (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
607 1.18 matt return addr;
608 1.18 matt }
609 1.18 matt
610 1.18 matt return NULL;
611 1.18 matt #else
612 1.18 matt /*
613 1.18 matt * Rather than searching the STE groups for the VSID, we know
614 1.18 matt * how we generate that from the ESID and so do that.
615 1.18 matt */
616 1.18 matt return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
617 1.18 matt #endif
618 1.38 sanjayl #endif /* PPC_OEA */
619 1.1 matt }
620 1.1 matt
621 1.35 perry static inline register_t
622 1.2 matt va_to_pteg(const struct pmap *pm, vaddr_t addr)
623 1.1 matt {
624 1.2 matt register_t hash;
625 1.2 matt
626 1.2 matt hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
627 1.1 matt return hash & pmap_pteg_mask;
628 1.1 matt }
629 1.1 matt
630 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
631 1.1 matt /*
632 1.1 matt * Given a PTE in the page table, calculate the VADDR that hashes to it.
633 1.1 matt * The only bit of magic is that the top 4 bits of the address doesn't
634 1.1 matt * technically exist in the PTE. But we know we reserved 4 bits of the
635 1.1 matt * VSID for it so that's how we get it.
636 1.1 matt */
637 1.1 matt static vaddr_t
638 1.2 matt pmap_pte_to_va(volatile const struct pte *pt)
639 1.1 matt {
640 1.1 matt vaddr_t va;
641 1.1 matt uintptr_t ptaddr = (uintptr_t) pt;
642 1.1 matt
643 1.1 matt if (pt->pte_hi & PTE_HID)
644 1.2 matt ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
645 1.1 matt
646 1.18 matt /* PPC Bits 10-19 PPC64 Bits 42-51 */
647 1.38 sanjayl #if defined(PPC_OEA)
648 1.4 matt va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
649 1.38 sanjayl #elif defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
650 1.38 sanjayl va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
651 1.38 sanjayl #endif
652 1.1 matt va <<= ADDR_PIDX_SHFT;
653 1.1 matt
654 1.18 matt /* PPC Bits 4-9 PPC64 Bits 36-41 */
655 1.1 matt va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
656 1.1 matt
657 1.38 sanjayl #if defined(PPC_OEA64)
658 1.18 matt /* PPC63 Bits 0-35 */
659 1.18 matt /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
660 1.38 sanjayl #elif defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE)
661 1.1 matt /* PPC Bits 0-3 */
662 1.1 matt va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
663 1.18 matt #endif
664 1.1 matt
665 1.1 matt return va;
666 1.1 matt }
667 1.1 matt #endif
668 1.1 matt
669 1.35 perry static inline struct pvo_head *
670 1.1 matt pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
671 1.1 matt {
672 1.1 matt #ifdef __HAVE_VM_PAGE_MD
673 1.1 matt struct vm_page *pg;
674 1.1 matt
675 1.1 matt pg = PHYS_TO_VM_PAGE(pa);
676 1.1 matt if (pg_p != NULL)
677 1.1 matt *pg_p = pg;
678 1.1 matt if (pg == NULL)
679 1.1 matt return &pmap_pvo_unmanaged;
680 1.1 matt return &pg->mdpage.mdpg_pvoh;
681 1.1 matt #endif
682 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
683 1.1 matt int bank, pg;
684 1.1 matt
685 1.1 matt bank = vm_physseg_find(atop(pa), &pg);
686 1.1 matt if (pg_p != NULL)
687 1.1 matt *pg_p = pg;
688 1.1 matt if (bank == -1)
689 1.1 matt return &pmap_pvo_unmanaged;
690 1.1 matt return &vm_physmem[bank].pmseg.pvoh[pg];
691 1.1 matt #endif
692 1.1 matt }
693 1.1 matt
694 1.35 perry static inline struct pvo_head *
695 1.1 matt vm_page_to_pvoh(struct vm_page *pg)
696 1.1 matt {
697 1.1 matt #ifdef __HAVE_VM_PAGE_MD
698 1.1 matt return &pg->mdpage.mdpg_pvoh;
699 1.1 matt #endif
700 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
701 1.1 matt return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
702 1.1 matt #endif
703 1.1 matt }
704 1.1 matt
705 1.1 matt
706 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
707 1.35 perry static inline char *
708 1.1 matt pa_to_attr(paddr_t pa)
709 1.1 matt {
710 1.1 matt int bank, pg;
711 1.1 matt
712 1.1 matt bank = vm_physseg_find(atop(pa), &pg);
713 1.1 matt if (bank == -1)
714 1.1 matt return NULL;
715 1.1 matt return &vm_physmem[bank].pmseg.attrs[pg];
716 1.1 matt }
717 1.1 matt #endif
718 1.1 matt
719 1.35 perry static inline void
720 1.1 matt pmap_attr_clear(struct vm_page *pg, int ptebit)
721 1.1 matt {
722 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
723 1.1 matt *pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT);
724 1.1 matt #endif
725 1.1 matt #ifdef __HAVE_VM_PAGE_MD
726 1.1 matt pg->mdpage.mdpg_attrs &= ~ptebit;
727 1.1 matt #endif
728 1.1 matt }
729 1.1 matt
730 1.35 perry static inline int
731 1.1 matt pmap_attr_fetch(struct vm_page *pg)
732 1.1 matt {
733 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
734 1.1 matt return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT;
735 1.1 matt #endif
736 1.1 matt #ifdef __HAVE_VM_PAGE_MD
737 1.1 matt return pg->mdpage.mdpg_attrs;
738 1.1 matt #endif
739 1.1 matt }
740 1.1 matt
741 1.35 perry static inline void
742 1.1 matt pmap_attr_save(struct vm_page *pg, int ptebit)
743 1.1 matt {
744 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
745 1.1 matt *pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT);
746 1.1 matt #endif
747 1.1 matt #ifdef __HAVE_VM_PAGE_MD
748 1.1 matt pg->mdpage.mdpg_attrs |= ptebit;
749 1.1 matt #endif
750 1.1 matt }
751 1.1 matt
752 1.35 perry static inline int
753 1.2 matt pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
754 1.1 matt {
755 1.1 matt if (pt->pte_hi == pvo_pt->pte_hi
756 1.1 matt #if 0
757 1.1 matt && ((pt->pte_lo ^ pvo_pt->pte_lo) &
758 1.1 matt ~(PTE_REF|PTE_CHG)) == 0
759 1.1 matt #endif
760 1.1 matt )
761 1.1 matt return 1;
762 1.1 matt return 0;
763 1.1 matt }
764 1.1 matt
765 1.35 perry static inline void
766 1.2 matt pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
767 1.1 matt {
768 1.1 matt /*
769 1.1 matt * Construct the PTE. Default to IMB initially. Valid bit
770 1.1 matt * only gets set when the real pte is set in memory.
771 1.1 matt *
772 1.1 matt * Note: Don't set the valid bit for correct operation of tlb update.
773 1.1 matt */
774 1.38 sanjayl #if defined(PPC_OEA)
775 1.2 matt pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
776 1.2 matt | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
777 1.1 matt pt->pte_lo = pte_lo;
778 1.38 sanjayl #elif defined (PPC_OEA64_BRIDGE)
779 1.38 sanjayl pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
780 1.38 sanjayl | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
781 1.38 sanjayl pt->pte_lo = (u_int64_t) pte_lo;
782 1.38 sanjayl #elif defined (PPC_OEA64)
783 1.38 sanjayl #error PPC_OEA64 not supported
784 1.38 sanjayl #endif /* PPC_OEA */
785 1.1 matt }
786 1.1 matt
787 1.35 perry static inline void
788 1.2 matt pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
789 1.1 matt {
790 1.1 matt pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
791 1.1 matt }
792 1.1 matt
793 1.35 perry static inline void
794 1.2 matt pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
795 1.1 matt {
796 1.1 matt /*
797 1.1 matt * As shown in Section 7.6.3.2.3
798 1.1 matt */
799 1.1 matt pt->pte_lo &= ~ptebit;
800 1.1 matt TLBIE(va);
801 1.1 matt SYNC();
802 1.1 matt EIEIO();
803 1.1 matt TLBSYNC();
804 1.1 matt SYNC();
805 1.1 matt }
806 1.1 matt
807 1.35 perry static inline void
808 1.2 matt pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
809 1.1 matt {
810 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
811 1.1 matt if (pvo_pt->pte_hi & PTE_VALID)
812 1.1 matt panic("pte_set: setting an already valid pte %p", pvo_pt);
813 1.1 matt #endif
814 1.1 matt pvo_pt->pte_hi |= PTE_VALID;
815 1.38 sanjayl
816 1.1 matt /*
817 1.1 matt * Update the PTE as defined in section 7.6.3.1
818 1.1 matt * Note that the REF/CHG bits are from pvo_pt and thus should
819 1.1 matt * have been saved so this routine can restore them (if desired).
820 1.1 matt */
821 1.1 matt pt->pte_lo = pvo_pt->pte_lo;
822 1.1 matt EIEIO();
823 1.1 matt pt->pte_hi = pvo_pt->pte_hi;
824 1.38 sanjayl TLBSYNC();
825 1.1 matt SYNC();
826 1.1 matt pmap_pte_valid++;
827 1.1 matt }
828 1.1 matt
829 1.35 perry static inline void
830 1.2 matt pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
831 1.1 matt {
832 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
833 1.1 matt if ((pvo_pt->pte_hi & PTE_VALID) == 0)
834 1.1 matt panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
835 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0)
836 1.1 matt panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
837 1.1 matt #endif
838 1.1 matt
839 1.1 matt pvo_pt->pte_hi &= ~PTE_VALID;
840 1.1 matt /*
841 1.1 matt * Force the ref & chg bits back into the PTEs.
842 1.1 matt */
843 1.1 matt SYNC();
844 1.1 matt /*
845 1.1 matt * Invalidate the pte ... (Section 7.6.3.3)
846 1.1 matt */
847 1.1 matt pt->pte_hi &= ~PTE_VALID;
848 1.1 matt SYNC();
849 1.1 matt TLBIE(va);
850 1.1 matt SYNC();
851 1.1 matt EIEIO();
852 1.1 matt TLBSYNC();
853 1.1 matt SYNC();
854 1.1 matt /*
855 1.1 matt * Save the ref & chg bits ...
856 1.1 matt */
857 1.1 matt pmap_pte_synch(pt, pvo_pt);
858 1.1 matt pmap_pte_valid--;
859 1.1 matt }
860 1.1 matt
861 1.35 perry static inline void
862 1.2 matt pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
863 1.1 matt {
864 1.1 matt /*
865 1.1 matt * Invalidate the PTE
866 1.1 matt */
867 1.1 matt pmap_pte_unset(pt, pvo_pt, va);
868 1.1 matt pmap_pte_set(pt, pvo_pt);
869 1.1 matt }
870 1.1 matt
871 1.1 matt /*
872 1.1 matt * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
873 1.1 matt * (either primary or secondary location).
874 1.1 matt *
875 1.1 matt * Note: both the destination and source PTEs must not have PTE_VALID set.
876 1.1 matt */
877 1.1 matt
878 1.1 matt STATIC int
879 1.2 matt pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
880 1.1 matt {
881 1.2 matt volatile struct pte *pt;
882 1.1 matt int i;
883 1.1 matt
884 1.1 matt #if defined(DEBUG)
885 1.38 sanjayl #if defined (PPC_OEA)
886 1.18 matt DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%x 0x%x\n",
887 1.19 mjl ptegidx, (unsigned int) pvo_pt->pte_hi, (unsigned int) pvo_pt->pte_lo));
888 1.38 sanjayl #elif defined (PPC_OEA64_BRIDGE)
889 1.38 sanjayl DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%016llx 0x%016llx\n",
890 1.38 sanjayl ptegidx, (unsigned long long) pvo_pt->pte_hi,
891 1.38 sanjayl (unsigned long long) pvo_pt->pte_lo));
892 1.38 sanjayl
893 1.38 sanjayl #endif
894 1.1 matt #endif
895 1.1 matt /*
896 1.1 matt * First try primary hash.
897 1.1 matt */
898 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
899 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) {
900 1.1 matt pvo_pt->pte_hi &= ~PTE_HID;
901 1.1 matt pmap_pte_set(pt, pvo_pt);
902 1.1 matt return i;
903 1.1 matt }
904 1.1 matt }
905 1.1 matt
906 1.1 matt /*
907 1.1 matt * Now try secondary hash.
908 1.1 matt */
909 1.1 matt ptegidx ^= pmap_pteg_mask;
910 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
911 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) {
912 1.1 matt pvo_pt->pte_hi |= PTE_HID;
913 1.1 matt pmap_pte_set(pt, pvo_pt);
914 1.1 matt return i;
915 1.1 matt }
916 1.1 matt }
917 1.1 matt return -1;
918 1.1 matt }
919 1.1 matt
920 1.1 matt /*
921 1.1 matt * Spill handler.
922 1.1 matt *
923 1.1 matt * Tries to spill a page table entry from the overflow area.
924 1.1 matt * This runs in either real mode (if dealing with a exception spill)
925 1.1 matt * or virtual mode when dealing with manually spilling one of the
926 1.1 matt * kernel's pte entries. In either case, interrupts are already
927 1.1 matt * disabled.
928 1.1 matt */
929 1.14 chs
930 1.1 matt int
931 1.14 chs pmap_pte_spill(struct pmap *pm, vaddr_t addr, boolean_t exec)
932 1.1 matt {
933 1.1 matt struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
934 1.1 matt struct pvo_entry *pvo;
935 1.15 dyoung /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
936 1.15 dyoung struct pvo_tqhead *pvoh, *vpvoh = NULL;
937 1.1 matt int ptegidx, i, j;
938 1.2 matt volatile struct pteg *pteg;
939 1.2 matt volatile struct pte *pt;
940 1.1 matt
941 1.2 matt ptegidx = va_to_pteg(pm, addr);
942 1.1 matt
943 1.1 matt /*
944 1.1 matt * Have to substitute some entry. Use the primary hash for this.
945 1.12 matt * Use low bits of timebase as random generator. Make sure we are
946 1.12 matt * not picking a kernel pte for replacement.
947 1.1 matt */
948 1.1 matt pteg = &pmap_pteg_table[ptegidx];
949 1.1 matt i = MFTB() & 7;
950 1.12 matt for (j = 0; j < 8; j++) {
951 1.12 matt pt = &pteg->pt[i];
952 1.12 matt if ((pt->pte_hi & PTE_VALID) == 0 ||
953 1.12 matt VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
954 1.12 matt != KERNEL_VSIDBITS)
955 1.12 matt break;
956 1.12 matt i = (i + 1) & 7;
957 1.12 matt }
958 1.12 matt KASSERT(j < 8);
959 1.1 matt
960 1.1 matt source_pvo = NULL;
961 1.1 matt victim_pvo = NULL;
962 1.1 matt pvoh = &pmap_pvo_table[ptegidx];
963 1.1 matt TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
964 1.1 matt
965 1.1 matt /*
966 1.1 matt * We need to find pvo entry for this address...
967 1.1 matt */
968 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
969 1.1 matt
970 1.1 matt /*
971 1.1 matt * If we haven't found the source and we come to a PVO with
972 1.1 matt * a valid PTE, then we know we can't find it because all
973 1.1 matt * evicted PVOs always are first in the list.
974 1.1 matt */
975 1.1 matt if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
976 1.1 matt break;
977 1.2 matt if (source_pvo == NULL && pm == pvo->pvo_pmap &&
978 1.2 matt addr == PVO_VADDR(pvo)) {
979 1.1 matt
980 1.1 matt /*
981 1.1 matt * Now we have found the entry to be spilled into the
982 1.1 matt * pteg. Attempt to insert it into the page table.
983 1.1 matt */
984 1.1 matt j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
985 1.1 matt if (j >= 0) {
986 1.1 matt PVO_PTEGIDX_SET(pvo, j);
987 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
988 1.12 matt PVO_WHERE(pvo, SPILL_INSERT);
989 1.1 matt pvo->pvo_pmap->pm_evictions--;
990 1.1 matt PMAPCOUNT(ptes_spilled);
991 1.1 matt PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
992 1.1 matt ? pmap_evcnt_ptes_secondary
993 1.1 matt : pmap_evcnt_ptes_primary)[j]);
994 1.1 matt
995 1.1 matt /*
996 1.1 matt * Since we keep the evicted entries at the
997 1.1 matt * from of the PVO list, we need move this
998 1.1 matt * (now resident) PVO after the evicted
999 1.1 matt * entries.
1000 1.1 matt */
1001 1.1 matt next_pvo = TAILQ_NEXT(pvo, pvo_olink);
1002 1.1 matt
1003 1.1 matt /*
1004 1.5 matt * If we don't have to move (either we were the
1005 1.5 matt * last entry or the next entry was valid),
1006 1.1 matt * don't change our position. Otherwise
1007 1.1 matt * move ourselves to the tail of the queue.
1008 1.1 matt */
1009 1.1 matt if (next_pvo != NULL &&
1010 1.1 matt !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
1011 1.1 matt TAILQ_REMOVE(pvoh, pvo, pvo_olink);
1012 1.1 matt TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1013 1.1 matt }
1014 1.1 matt return 1;
1015 1.1 matt }
1016 1.1 matt source_pvo = pvo;
1017 1.39 matt if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
1018 1.14 chs return 0;
1019 1.14 chs }
1020 1.1 matt if (victim_pvo != NULL)
1021 1.1 matt break;
1022 1.1 matt }
1023 1.1 matt
1024 1.1 matt /*
1025 1.1 matt * We also need the pvo entry of the victim we are replacing
1026 1.1 matt * so save the R & C bits of the PTE.
1027 1.1 matt */
1028 1.1 matt if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1029 1.1 matt pmap_pte_compare(pt, &pvo->pvo_pte)) {
1030 1.15 dyoung vpvoh = pvoh; /* *1* */
1031 1.1 matt victim_pvo = pvo;
1032 1.1 matt if (source_pvo != NULL)
1033 1.1 matt break;
1034 1.1 matt }
1035 1.1 matt }
1036 1.1 matt
1037 1.1 matt if (source_pvo == NULL) {
1038 1.1 matt PMAPCOUNT(ptes_unspilled);
1039 1.1 matt return 0;
1040 1.1 matt }
1041 1.1 matt
1042 1.1 matt if (victim_pvo == NULL) {
1043 1.1 matt if ((pt->pte_hi & PTE_HID) == 0)
1044 1.1 matt panic("pmap_pte_spill: victim p-pte (%p) has "
1045 1.1 matt "no pvo entry!", pt);
1046 1.1 matt
1047 1.1 matt /*
1048 1.1 matt * If this is a secondary PTE, we need to search
1049 1.1 matt * its primary pvo bucket for the matching PVO.
1050 1.1 matt */
1051 1.15 dyoung vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
1052 1.1 matt TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
1053 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
1054 1.1 matt
1055 1.1 matt /*
1056 1.1 matt * We also need the pvo entry of the victim we are
1057 1.1 matt * replacing so save the R & C bits of the PTE.
1058 1.1 matt */
1059 1.1 matt if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1060 1.1 matt victim_pvo = pvo;
1061 1.1 matt break;
1062 1.1 matt }
1063 1.1 matt }
1064 1.1 matt if (victim_pvo == NULL)
1065 1.1 matt panic("pmap_pte_spill: victim s-pte (%p) has "
1066 1.1 matt "no pvo entry!", pt);
1067 1.1 matt }
1068 1.1 matt
1069 1.1 matt /*
1070 1.12 matt * The victim should be not be a kernel PVO/PTE entry.
1071 1.12 matt */
1072 1.12 matt KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
1073 1.12 matt KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
1074 1.12 matt KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
1075 1.12 matt
1076 1.12 matt /*
1077 1.1 matt * We are invalidating the TLB entry for the EA for the
1078 1.1 matt * we are replacing even though its valid; If we don't
1079 1.1 matt * we lose any ref/chg bit changes contained in the TLB
1080 1.1 matt * entry.
1081 1.1 matt */
1082 1.1 matt source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1083 1.1 matt
1084 1.1 matt /*
1085 1.1 matt * To enforce the PVO list ordering constraint that all
1086 1.1 matt * evicted entries should come before all valid entries,
1087 1.1 matt * move the source PVO to the tail of its list and the
1088 1.1 matt * victim PVO to the head of its list (which might not be
1089 1.1 matt * the same list, if the victim was using the secondary hash).
1090 1.1 matt */
1091 1.1 matt TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
1092 1.1 matt TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
1093 1.1 matt TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
1094 1.1 matt TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
1095 1.1 matt pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1096 1.1 matt pmap_pte_set(pt, &source_pvo->pvo_pte);
1097 1.1 matt victim_pvo->pvo_pmap->pm_evictions++;
1098 1.1 matt source_pvo->pvo_pmap->pm_evictions--;
1099 1.12 matt PVO_WHERE(victim_pvo, SPILL_UNSET);
1100 1.12 matt PVO_WHERE(source_pvo, SPILL_SET);
1101 1.1 matt
1102 1.1 matt PVO_PTEGIDX_CLR(victim_pvo);
1103 1.1 matt PVO_PTEGIDX_SET(source_pvo, i);
1104 1.1 matt PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
1105 1.1 matt PMAPCOUNT(ptes_spilled);
1106 1.1 matt PMAPCOUNT(ptes_evicted);
1107 1.1 matt PMAPCOUNT(ptes_removed);
1108 1.1 matt
1109 1.1 matt PMAP_PVO_CHECK(victim_pvo);
1110 1.1 matt PMAP_PVO_CHECK(source_pvo);
1111 1.1 matt return 1;
1112 1.1 matt }
1113 1.1 matt
1114 1.1 matt /*
1115 1.1 matt * Restrict given range to physical memory
1116 1.1 matt */
1117 1.1 matt void
1118 1.1 matt pmap_real_memory(paddr_t *start, psize_t *size)
1119 1.1 matt {
1120 1.1 matt struct mem_region *mp;
1121 1.1 matt
1122 1.1 matt for (mp = mem; mp->size; mp++) {
1123 1.1 matt if (*start + *size > mp->start
1124 1.1 matt && *start < mp->start + mp->size) {
1125 1.1 matt if (*start < mp->start) {
1126 1.1 matt *size -= mp->start - *start;
1127 1.1 matt *start = mp->start;
1128 1.1 matt }
1129 1.1 matt if (*start + *size > mp->start + mp->size)
1130 1.1 matt *size = mp->start + mp->size - *start;
1131 1.1 matt return;
1132 1.1 matt }
1133 1.1 matt }
1134 1.1 matt *size = 0;
1135 1.1 matt }
1136 1.1 matt
1137 1.1 matt /*
1138 1.1 matt * Initialize anything else for pmap handling.
1139 1.1 matt * Called during vm_init().
1140 1.1 matt */
1141 1.1 matt void
1142 1.1 matt pmap_init(void)
1143 1.1 matt {
1144 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
1145 1.1 matt struct pvo_tqhead *pvoh;
1146 1.1 matt int bank;
1147 1.1 matt long sz;
1148 1.1 matt char *attr;
1149 1.1 matt
1150 1.1 matt pvoh = pmap_physseg.pvoh;
1151 1.1 matt attr = pmap_physseg.attrs;
1152 1.1 matt for (bank = 0; bank < vm_nphysseg; bank++) {
1153 1.1 matt sz = vm_physmem[bank].end - vm_physmem[bank].start;
1154 1.1 matt vm_physmem[bank].pmseg.pvoh = pvoh;
1155 1.1 matt vm_physmem[bank].pmseg.attrs = attr;
1156 1.1 matt for (; sz > 0; sz--, pvoh++, attr++) {
1157 1.1 matt TAILQ_INIT(pvoh);
1158 1.1 matt *attr = 0;
1159 1.1 matt }
1160 1.1 matt }
1161 1.1 matt #endif
1162 1.1 matt
1163 1.1 matt pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
1164 1.1 matt sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
1165 1.1 matt &pmap_pool_mallocator);
1166 1.1 matt
1167 1.1 matt pool_setlowat(&pmap_mpvo_pool, 1008);
1168 1.1 matt
1169 1.1 matt pmap_initialized = 1;
1170 1.1 matt
1171 1.1 matt }
1172 1.1 matt
1173 1.1 matt /*
1174 1.10 thorpej * How much virtual space does the kernel get?
1175 1.10 thorpej */
1176 1.10 thorpej void
1177 1.10 thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1178 1.10 thorpej {
1179 1.10 thorpej /*
1180 1.10 thorpej * For now, reserve one segment (minus some overhead) for kernel
1181 1.10 thorpej * virtual memory
1182 1.10 thorpej */
1183 1.10 thorpej *start = VM_MIN_KERNEL_ADDRESS;
1184 1.10 thorpej *end = VM_MAX_KERNEL_ADDRESS;
1185 1.10 thorpej }
1186 1.10 thorpej
1187 1.10 thorpej /*
1188 1.1 matt * Allocate, initialize, and return a new physical map.
1189 1.1 matt */
1190 1.1 matt pmap_t
1191 1.1 matt pmap_create(void)
1192 1.1 matt {
1193 1.1 matt pmap_t pm;
1194 1.38 sanjayl
1195 1.1 matt pm = pool_get(&pmap_pool, PR_WAITOK);
1196 1.1 matt memset((caddr_t)pm, 0, sizeof *pm);
1197 1.1 matt pmap_pinit(pm);
1198 1.1 matt
1199 1.1 matt DPRINTFN(CREATE,("pmap_create: pm %p:\n"
1200 1.18 matt "\t%06x %06x %06x %06x %06x %06x %06x %06x\n"
1201 1.18 matt "\t%06x %06x %06x %06x %06x %06x %06x %06x\n", pm,
1202 1.19 mjl (unsigned int) pm->pm_sr[0], (unsigned int) pm->pm_sr[1],
1203 1.19 mjl (unsigned int) pm->pm_sr[2], (unsigned int) pm->pm_sr[3],
1204 1.19 mjl (unsigned int) pm->pm_sr[4], (unsigned int) pm->pm_sr[5],
1205 1.19 mjl (unsigned int) pm->pm_sr[6], (unsigned int) pm->pm_sr[7],
1206 1.19 mjl (unsigned int) pm->pm_sr[8], (unsigned int) pm->pm_sr[9],
1207 1.19 mjl (unsigned int) pm->pm_sr[10], (unsigned int) pm->pm_sr[11],
1208 1.19 mjl (unsigned int) pm->pm_sr[12], (unsigned int) pm->pm_sr[13],
1209 1.19 mjl (unsigned int) pm->pm_sr[14], (unsigned int) pm->pm_sr[15]));
1210 1.1 matt return pm;
1211 1.1 matt }
1212 1.1 matt
1213 1.1 matt /*
1214 1.1 matt * Initialize a preallocated and zeroed pmap structure.
1215 1.1 matt */
1216 1.1 matt void
1217 1.1 matt pmap_pinit(pmap_t pm)
1218 1.1 matt {
1219 1.2 matt register_t entropy = MFTB();
1220 1.2 matt register_t mask;
1221 1.2 matt int i;
1222 1.1 matt
1223 1.1 matt /*
1224 1.1 matt * Allocate some segment registers for this pmap.
1225 1.1 matt */
1226 1.1 matt pm->pm_refs = 1;
1227 1.2 matt for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1228 1.2 matt static register_t pmap_vsidcontext;
1229 1.2 matt register_t hash;
1230 1.2 matt unsigned int n;
1231 1.1 matt
1232 1.1 matt /* Create a new value by multiplying by a prime adding in
1233 1.1 matt * entropy from the timebase register. This is to make the
1234 1.1 matt * VSID more random so that the PT Hash function collides
1235 1.1 matt * less often. (note that the prime causes gcc to do shifts
1236 1.1 matt * instead of a multiply)
1237 1.1 matt */
1238 1.1 matt pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1239 1.1 matt hash = pmap_vsidcontext & (NPMAPS - 1);
1240 1.23 aymeric if (hash == 0) { /* 0 is special, avoid it */
1241 1.23 aymeric entropy += 0xbadf00d;
1242 1.1 matt continue;
1243 1.23 aymeric }
1244 1.1 matt n = hash >> 5;
1245 1.2 matt mask = 1L << (hash & (VSID_NBPW-1));
1246 1.2 matt hash = pmap_vsidcontext;
1247 1.1 matt if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1248 1.1 matt /* anything free in this bucket? */
1249 1.2 matt if (~pmap_vsid_bitmap[n] == 0) {
1250 1.23 aymeric entropy = hash ^ (hash >> 16);
1251 1.1 matt continue;
1252 1.1 matt }
1253 1.1 matt i = ffs(~pmap_vsid_bitmap[n]) - 1;
1254 1.2 matt mask = 1L << i;
1255 1.2 matt hash &= ~(VSID_NBPW-1);
1256 1.1 matt hash |= i;
1257 1.1 matt }
1258 1.18 matt hash &= PTE_VSID >> PTE_VSID_SHFT;
1259 1.1 matt pmap_vsid_bitmap[n] |= mask;
1260 1.18 matt pm->pm_vsid = hash;
1261 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
1262 1.1 matt for (i = 0; i < 16; i++)
1263 1.14 chs pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
1264 1.14 chs SR_NOEXEC;
1265 1.18 matt #endif
1266 1.1 matt return;
1267 1.1 matt }
1268 1.1 matt panic("pmap_pinit: out of segments");
1269 1.1 matt }
1270 1.1 matt
1271 1.1 matt /*
1272 1.1 matt * Add a reference to the given pmap.
1273 1.1 matt */
1274 1.1 matt void
1275 1.1 matt pmap_reference(pmap_t pm)
1276 1.1 matt {
1277 1.1 matt pm->pm_refs++;
1278 1.1 matt }
1279 1.1 matt
1280 1.1 matt /*
1281 1.1 matt * Retire the given pmap from service.
1282 1.1 matt * Should only be called if the map contains no valid mappings.
1283 1.1 matt */
1284 1.1 matt void
1285 1.1 matt pmap_destroy(pmap_t pm)
1286 1.1 matt {
1287 1.1 matt if (--pm->pm_refs == 0) {
1288 1.1 matt pmap_release(pm);
1289 1.1 matt pool_put(&pmap_pool, pm);
1290 1.1 matt }
1291 1.1 matt }
1292 1.1 matt
1293 1.1 matt /*
1294 1.1 matt * Release any resources held by the given physical map.
1295 1.1 matt * Called when a pmap initialized by pmap_pinit is being released.
1296 1.1 matt */
1297 1.1 matt void
1298 1.1 matt pmap_release(pmap_t pm)
1299 1.1 matt {
1300 1.1 matt int idx, mask;
1301 1.39 matt
1302 1.39 matt KASSERT(pm->pm_stats.resident_count == 0);
1303 1.39 matt KASSERT(pm->pm_stats.wired_count == 0);
1304 1.1 matt
1305 1.1 matt if (pm->pm_sr[0] == 0)
1306 1.1 matt panic("pmap_release");
1307 1.22 aymeric idx = pm->pm_vsid & (NPMAPS-1);
1308 1.1 matt mask = 1 << (idx % VSID_NBPW);
1309 1.1 matt idx /= VSID_NBPW;
1310 1.22 aymeric
1311 1.22 aymeric KASSERT(pmap_vsid_bitmap[idx] & mask);
1312 1.1 matt pmap_vsid_bitmap[idx] &= ~mask;
1313 1.1 matt }
1314 1.1 matt
1315 1.1 matt /*
1316 1.1 matt * Copy the range specified by src_addr/len
1317 1.1 matt * from the source map to the range dst_addr/len
1318 1.1 matt * in the destination map.
1319 1.1 matt *
1320 1.1 matt * This routine is only advisory and need not do anything.
1321 1.1 matt */
1322 1.1 matt void
1323 1.1 matt pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1324 1.1 matt vsize_t len, vaddr_t src_addr)
1325 1.1 matt {
1326 1.1 matt PMAPCOUNT(copies);
1327 1.1 matt }
1328 1.1 matt
1329 1.1 matt /*
1330 1.1 matt * Require that all active physical maps contain no
1331 1.1 matt * incorrect entries NOW.
1332 1.1 matt */
1333 1.1 matt void
1334 1.1 matt pmap_update(struct pmap *pmap)
1335 1.1 matt {
1336 1.1 matt PMAPCOUNT(updates);
1337 1.1 matt TLBSYNC();
1338 1.1 matt }
1339 1.1 matt
1340 1.1 matt /*
1341 1.1 matt * Garbage collects the physical map system for
1342 1.1 matt * pages which are no longer used.
1343 1.1 matt * Success need not be guaranteed -- that is, there
1344 1.1 matt * may well be pages which are not referenced, but
1345 1.1 matt * others may be collected.
1346 1.1 matt * Called by the pageout daemon when pages are scarce.
1347 1.1 matt */
1348 1.1 matt void
1349 1.1 matt pmap_collect(pmap_t pm)
1350 1.1 matt {
1351 1.1 matt PMAPCOUNT(collects);
1352 1.1 matt }
1353 1.1 matt
1354 1.35 perry static inline int
1355 1.1 matt pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1356 1.1 matt {
1357 1.1 matt int pteidx;
1358 1.1 matt /*
1359 1.1 matt * We can find the actual pte entry without searching by
1360 1.1 matt * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1361 1.1 matt * and by noticing the HID bit.
1362 1.1 matt */
1363 1.1 matt pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1364 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_HID)
1365 1.1 matt pteidx ^= pmap_pteg_mask * 8;
1366 1.1 matt return pteidx;
1367 1.1 matt }
1368 1.1 matt
1369 1.2 matt volatile struct pte *
1370 1.1 matt pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1371 1.1 matt {
1372 1.2 matt volatile struct pte *pt;
1373 1.1 matt
1374 1.1 matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1375 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1376 1.1 matt return NULL;
1377 1.1 matt #endif
1378 1.1 matt
1379 1.1 matt /*
1380 1.1 matt * If we haven't been supplied the ptegidx, calculate it.
1381 1.1 matt */
1382 1.1 matt if (pteidx == -1) {
1383 1.1 matt int ptegidx;
1384 1.2 matt ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1385 1.1 matt pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1386 1.1 matt }
1387 1.1 matt
1388 1.1 matt pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1389 1.1 matt
1390 1.1 matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1391 1.1 matt return pt;
1392 1.1 matt #else
1393 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1394 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1395 1.1 matt "pvo but no valid pte index", pvo);
1396 1.1 matt }
1397 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1398 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1399 1.1 matt "pvo but no valid pte", pvo);
1400 1.1 matt }
1401 1.1 matt
1402 1.1 matt if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1403 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1404 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1405 1.1 matt pmap_pte_print(pt);
1406 1.1 matt #endif
1407 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1408 1.1 matt "pmap_pteg_table %p but invalid in pvo",
1409 1.1 matt pvo, pt);
1410 1.1 matt }
1411 1.1 matt if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1412 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1413 1.1 matt pmap_pte_print(pt);
1414 1.1 matt #endif
1415 1.1 matt panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1416 1.1 matt "not match pte %p in pmap_pteg_table",
1417 1.1 matt pvo, pt);
1418 1.1 matt }
1419 1.1 matt return pt;
1420 1.1 matt }
1421 1.1 matt
1422 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1423 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1424 1.1 matt pmap_pte_print(pt);
1425 1.1 matt #endif
1426 1.12 matt panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1427 1.1 matt "pmap_pteg_table but valid in pvo", pvo, pt);
1428 1.1 matt }
1429 1.1 matt return NULL;
1430 1.1 matt #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1431 1.1 matt }
1432 1.1 matt
1433 1.1 matt struct pvo_entry *
1434 1.1 matt pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1435 1.1 matt {
1436 1.1 matt struct pvo_entry *pvo;
1437 1.1 matt int ptegidx;
1438 1.1 matt
1439 1.1 matt va &= ~ADDR_POFF;
1440 1.2 matt ptegidx = va_to_pteg(pm, va);
1441 1.1 matt
1442 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1443 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1444 1.1 matt if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1445 1.1 matt panic("pmap_pvo_find_va: invalid pvo %p on "
1446 1.1 matt "list %#x (%p)", pvo, ptegidx,
1447 1.1 matt &pmap_pvo_table[ptegidx]);
1448 1.1 matt #endif
1449 1.1 matt if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1450 1.1 matt if (pteidx_p)
1451 1.1 matt *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1452 1.1 matt return pvo;
1453 1.1 matt }
1454 1.1 matt }
1455 1.38 sanjayl if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
1456 1.38 sanjayl panic("%s: returning NULL for %s pmap, va: 0x%08lx\n", __FUNCTION__,
1457 1.38 sanjayl (pm == pmap_kernel() ? "kernel" : "user"), va);
1458 1.1 matt return NULL;
1459 1.1 matt }
1460 1.1 matt
1461 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK)
1462 1.1 matt void
1463 1.1 matt pmap_pvo_check(const struct pvo_entry *pvo)
1464 1.1 matt {
1465 1.1 matt struct pvo_head *pvo_head;
1466 1.1 matt struct pvo_entry *pvo0;
1467 1.2 matt volatile struct pte *pt;
1468 1.1 matt int failed = 0;
1469 1.1 matt
1470 1.1 matt if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1471 1.1 matt panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1472 1.1 matt
1473 1.1 matt if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1474 1.1 matt printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1475 1.1 matt pvo, pvo->pvo_pmap);
1476 1.1 matt failed = 1;
1477 1.1 matt }
1478 1.1 matt
1479 1.1 matt if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1480 1.1 matt (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1481 1.1 matt printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1482 1.1 matt pvo, TAILQ_NEXT(pvo, pvo_olink));
1483 1.1 matt failed = 1;
1484 1.1 matt }
1485 1.1 matt
1486 1.1 matt if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1487 1.1 matt (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1488 1.1 matt printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1489 1.1 matt pvo, LIST_NEXT(pvo, pvo_vlink));
1490 1.1 matt failed = 1;
1491 1.1 matt }
1492 1.1 matt
1493 1.39 matt if (PVO_MANAGED_P(pvo)) {
1494 1.1 matt pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1495 1.1 matt } else {
1496 1.1 matt if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1497 1.1 matt printf("pmap_pvo_check: pvo %p: non kernel address "
1498 1.1 matt "on kernel unmanaged list\n", pvo);
1499 1.1 matt failed = 1;
1500 1.1 matt }
1501 1.1 matt pvo_head = &pmap_pvo_kunmanaged;
1502 1.1 matt }
1503 1.1 matt LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1504 1.1 matt if (pvo0 == pvo)
1505 1.1 matt break;
1506 1.1 matt }
1507 1.1 matt if (pvo0 == NULL) {
1508 1.1 matt printf("pmap_pvo_check: pvo %p: not present "
1509 1.1 matt "on its vlist head %p\n", pvo, pvo_head);
1510 1.1 matt failed = 1;
1511 1.1 matt }
1512 1.1 matt if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1513 1.1 matt printf("pmap_pvo_check: pvo %p: not present "
1514 1.1 matt "on its olist head\n", pvo);
1515 1.1 matt failed = 1;
1516 1.1 matt }
1517 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
1518 1.1 matt if (pt == NULL) {
1519 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1520 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1521 1.1 matt "no PTE\n", pvo);
1522 1.1 matt failed = 1;
1523 1.1 matt }
1524 1.1 matt } else {
1525 1.1 matt if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1526 1.1 matt (uintptr_t) pt >=
1527 1.1 matt (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1528 1.1 matt printf("pmap_pvo_check: pvo %p: pte %p not in "
1529 1.1 matt "pteg table\n", pvo, pt);
1530 1.1 matt failed = 1;
1531 1.1 matt }
1532 1.1 matt if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1533 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1534 1.1 matt "no PTE\n", pvo);
1535 1.1 matt failed = 1;
1536 1.1 matt }
1537 1.1 matt if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1538 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1539 1.19 mjl "%#x/%#x\n", pvo, (unsigned int) pvo->pvo_pte.pte_hi, (unsigned int) pt->pte_hi);
1540 1.1 matt failed = 1;
1541 1.1 matt }
1542 1.1 matt if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1543 1.1 matt (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1544 1.1 matt printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1545 1.18 matt "%#x/%#x\n", pvo,
1546 1.19 mjl (unsigned int) (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
1547 1.19 mjl (unsigned int) (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1548 1.1 matt failed = 1;
1549 1.1 matt }
1550 1.1 matt if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1551 1.1 matt printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx"
1552 1.1 matt " doesn't not match PVO's VA %#lx\n",
1553 1.1 matt pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1554 1.1 matt failed = 1;
1555 1.1 matt }
1556 1.1 matt if (failed)
1557 1.1 matt pmap_pte_print(pt);
1558 1.1 matt }
1559 1.1 matt if (failed)
1560 1.1 matt panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1561 1.1 matt pvo->pvo_pmap);
1562 1.1 matt }
1563 1.1 matt #endif /* DEBUG || PMAPCHECK */
1564 1.1 matt
1565 1.1 matt /*
1566 1.25 chs * Search the PVO table looking for a non-wired entry.
1567 1.25 chs * If we find one, remove it and return it.
1568 1.25 chs */
1569 1.25 chs
1570 1.25 chs struct pvo_entry *
1571 1.25 chs pmap_pvo_reclaim(struct pmap *pm)
1572 1.25 chs {
1573 1.25 chs struct pvo_tqhead *pvoh;
1574 1.25 chs struct pvo_entry *pvo;
1575 1.25 chs uint32_t idx, endidx;
1576 1.25 chs
1577 1.25 chs endidx = pmap_pvo_reclaim_nextidx;
1578 1.25 chs for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
1579 1.25 chs idx = (idx + 1) & pmap_pteg_mask) {
1580 1.25 chs pvoh = &pmap_pvo_table[idx];
1581 1.25 chs TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1582 1.39 matt if (!PVO_WIRED_P(pvo)) {
1583 1.33 chs pmap_pvo_remove(pvo, -1, NULL);
1584 1.25 chs pmap_pvo_reclaim_nextidx = idx;
1585 1.26 matt PMAPCOUNT(pvos_reclaimed);
1586 1.25 chs return pvo;
1587 1.25 chs }
1588 1.25 chs }
1589 1.25 chs }
1590 1.25 chs return NULL;
1591 1.25 chs }
1592 1.25 chs
1593 1.25 chs /*
1594 1.1 matt * This returns whether this is the first mapping of a page.
1595 1.1 matt */
1596 1.1 matt int
1597 1.1 matt pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1598 1.2 matt vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1599 1.1 matt {
1600 1.1 matt struct pvo_entry *pvo;
1601 1.1 matt struct pvo_tqhead *pvoh;
1602 1.2 matt register_t msr;
1603 1.1 matt int ptegidx;
1604 1.1 matt int i;
1605 1.1 matt int poolflags = PR_NOWAIT;
1606 1.1 matt
1607 1.28 chs /*
1608 1.28 chs * Compute the PTE Group index.
1609 1.28 chs */
1610 1.28 chs va &= ~ADDR_POFF;
1611 1.28 chs ptegidx = va_to_pteg(pm, va);
1612 1.28 chs
1613 1.28 chs msr = pmap_interrupts_off();
1614 1.28 chs
1615 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1616 1.1 matt if (pmap_pvo_remove_depth > 0)
1617 1.1 matt panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1618 1.1 matt if (++pmap_pvo_enter_depth > 1)
1619 1.1 matt panic("pmap_pvo_enter: called recursively!");
1620 1.1 matt #endif
1621 1.1 matt
1622 1.1 matt /*
1623 1.1 matt * Remove any existing mapping for this page. Reuse the
1624 1.1 matt * pvo entry if there a mapping.
1625 1.1 matt */
1626 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1627 1.1 matt if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1628 1.1 matt #ifdef DEBUG
1629 1.1 matt if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1630 1.1 matt ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1631 1.1 matt ~(PTE_REF|PTE_CHG)) == 0 &&
1632 1.1 matt va < VM_MIN_KERNEL_ADDRESS) {
1633 1.18 matt printf("pmap_pvo_enter: pvo %p: dup %#x/%#lx\n",
1634 1.19 mjl pvo, (unsigned int) pvo->pvo_pte.pte_lo, (unsigned int) pte_lo|pa);
1635 1.18 matt printf("pmap_pvo_enter: pte_hi=%#x sr=%#x\n",
1636 1.19 mjl (unsigned int) pvo->pvo_pte.pte_hi,
1637 1.19 mjl (unsigned int) pm->pm_sr[va >> ADDR_SR_SHFT]);
1638 1.1 matt pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1639 1.1 matt #ifdef DDBX
1640 1.1 matt Debugger();
1641 1.1 matt #endif
1642 1.1 matt }
1643 1.1 matt #endif
1644 1.1 matt PMAPCOUNT(mappings_replaced);
1645 1.33 chs pmap_pvo_remove(pvo, -1, NULL);
1646 1.1 matt break;
1647 1.1 matt }
1648 1.1 matt }
1649 1.1 matt
1650 1.1 matt /*
1651 1.1 matt * If we aren't overwriting an mapping, try to allocate
1652 1.1 matt */
1653 1.26 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1654 1.26 matt --pmap_pvo_enter_depth;
1655 1.26 matt #endif
1656 1.1 matt pmap_interrupts_restore(msr);
1657 1.33 chs if (pvo) {
1658 1.33 chs pmap_pvo_free(pvo);
1659 1.33 chs }
1660 1.1 matt pvo = pool_get(pl, poolflags);
1661 1.25 chs
1662 1.25 chs #ifdef DEBUG
1663 1.25 chs /*
1664 1.25 chs * Exercise pmap_pvo_reclaim() a little.
1665 1.25 chs */
1666 1.25 chs if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1667 1.25 chs pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1668 1.25 chs (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1669 1.25 chs pool_put(pl, pvo);
1670 1.25 chs pvo = NULL;
1671 1.25 chs }
1672 1.25 chs #endif
1673 1.25 chs
1674 1.1 matt msr = pmap_interrupts_off();
1675 1.26 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1676 1.26 matt ++pmap_pvo_enter_depth;
1677 1.26 matt #endif
1678 1.1 matt if (pvo == NULL) {
1679 1.1 matt pvo = pmap_pvo_reclaim(pm);
1680 1.1 matt if (pvo == NULL) {
1681 1.1 matt if ((flags & PMAP_CANFAIL) == 0)
1682 1.1 matt panic("pmap_pvo_enter: failed");
1683 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1684 1.1 matt pmap_pvo_enter_depth--;
1685 1.1 matt #endif
1686 1.26 matt PMAPCOUNT(pvos_failed);
1687 1.1 matt pmap_interrupts_restore(msr);
1688 1.1 matt return ENOMEM;
1689 1.1 matt }
1690 1.1 matt }
1691 1.25 chs
1692 1.1 matt pvo->pvo_vaddr = va;
1693 1.1 matt pvo->pvo_pmap = pm;
1694 1.1 matt pvo->pvo_vaddr &= ~ADDR_POFF;
1695 1.1 matt if (flags & VM_PROT_EXECUTE) {
1696 1.1 matt PMAPCOUNT(exec_mappings);
1697 1.14 chs pvo_set_exec(pvo);
1698 1.1 matt }
1699 1.1 matt if (flags & PMAP_WIRED)
1700 1.1 matt pvo->pvo_vaddr |= PVO_WIRED;
1701 1.1 matt if (pvo_head != &pmap_pvo_kunmanaged) {
1702 1.1 matt pvo->pvo_vaddr |= PVO_MANAGED;
1703 1.1 matt PMAPCOUNT(mappings);
1704 1.1 matt } else {
1705 1.1 matt PMAPCOUNT(kernel_mappings);
1706 1.1 matt }
1707 1.2 matt pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1708 1.1 matt
1709 1.1 matt LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1710 1.39 matt if (PVO_WIRED_P(pvo))
1711 1.1 matt pvo->pvo_pmap->pm_stats.wired_count++;
1712 1.1 matt pvo->pvo_pmap->pm_stats.resident_count++;
1713 1.1 matt #if defined(DEBUG)
1714 1.38 sanjayl /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1715 1.1 matt DPRINTFN(PVOENTER,
1716 1.1 matt ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n",
1717 1.1 matt pvo, pm, va, pa));
1718 1.1 matt #endif
1719 1.1 matt
1720 1.1 matt /*
1721 1.1 matt * We hope this succeeds but it isn't required.
1722 1.1 matt */
1723 1.1 matt pvoh = &pmap_pvo_table[ptegidx];
1724 1.1 matt i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1725 1.1 matt if (i >= 0) {
1726 1.1 matt PVO_PTEGIDX_SET(pvo, i);
1727 1.12 matt PVO_WHERE(pvo, ENTER_INSERT);
1728 1.1 matt PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1729 1.1 matt ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1730 1.1 matt TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1731 1.38 sanjayl
1732 1.1 matt } else {
1733 1.1 matt /*
1734 1.1 matt * Since we didn't have room for this entry (which makes it
1735 1.1 matt * and evicted entry), place it at the head of the list.
1736 1.1 matt */
1737 1.1 matt TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1738 1.1 matt PMAPCOUNT(ptes_evicted);
1739 1.1 matt pm->pm_evictions++;
1740 1.12 matt /*
1741 1.12 matt * If this is a kernel page, make sure it's active.
1742 1.12 matt */
1743 1.12 matt if (pm == pmap_kernel()) {
1744 1.14 chs i = pmap_pte_spill(pm, va, FALSE);
1745 1.12 matt KASSERT(i);
1746 1.12 matt }
1747 1.1 matt }
1748 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
1749 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1750 1.1 matt pmap_pvo_enter_depth--;
1751 1.1 matt #endif
1752 1.1 matt pmap_interrupts_restore(msr);
1753 1.1 matt return 0;
1754 1.1 matt }
1755 1.1 matt
1756 1.1 matt void
1757 1.33 chs pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1758 1.1 matt {
1759 1.2 matt volatile struct pte *pt;
1760 1.1 matt int ptegidx;
1761 1.1 matt
1762 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1763 1.1 matt if (++pmap_pvo_remove_depth > 1)
1764 1.1 matt panic("pmap_pvo_remove: called recursively!");
1765 1.1 matt #endif
1766 1.1 matt
1767 1.1 matt /*
1768 1.1 matt * If we haven't been supplied the ptegidx, calculate it.
1769 1.1 matt */
1770 1.1 matt if (pteidx == -1) {
1771 1.2 matt ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1772 1.1 matt pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1773 1.1 matt } else {
1774 1.1 matt ptegidx = pteidx >> 3;
1775 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_HID)
1776 1.1 matt ptegidx ^= pmap_pteg_mask;
1777 1.1 matt }
1778 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
1779 1.1 matt
1780 1.1 matt /*
1781 1.1 matt * If there is an active pte entry, we need to deactivate it
1782 1.1 matt * (and save the ref & chg bits).
1783 1.1 matt */
1784 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx);
1785 1.1 matt if (pt != NULL) {
1786 1.1 matt pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1787 1.12 matt PVO_WHERE(pvo, REMOVE);
1788 1.1 matt PVO_PTEGIDX_CLR(pvo);
1789 1.1 matt PMAPCOUNT(ptes_removed);
1790 1.1 matt } else {
1791 1.1 matt KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1792 1.1 matt pvo->pvo_pmap->pm_evictions--;
1793 1.1 matt }
1794 1.1 matt
1795 1.1 matt /*
1796 1.14 chs * Account for executable mappings.
1797 1.14 chs */
1798 1.39 matt if (PVO_EXECUTABLE_P(pvo))
1799 1.14 chs pvo_clear_exec(pvo);
1800 1.14 chs
1801 1.14 chs /*
1802 1.14 chs * Update our statistics.
1803 1.1 matt */
1804 1.1 matt pvo->pvo_pmap->pm_stats.resident_count--;
1805 1.39 matt if (PVO_WIRED_P(pvo))
1806 1.1 matt pvo->pvo_pmap->pm_stats.wired_count--;
1807 1.1 matt
1808 1.1 matt /*
1809 1.1 matt * Save the REF/CHG bits into their cache if the page is managed.
1810 1.1 matt */
1811 1.39 matt if (PVO_MANAGED_P(pvo)) {
1812 1.2 matt register_t ptelo = pvo->pvo_pte.pte_lo;
1813 1.1 matt struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1814 1.1 matt
1815 1.1 matt if (pg != NULL) {
1816 1.37 matt /*
1817 1.37 matt * If this page was changed and it is mapped exec,
1818 1.37 matt * invalidate it.
1819 1.37 matt */
1820 1.37 matt if ((ptelo & PTE_CHG) &&
1821 1.37 matt (pmap_attr_fetch(pg) & PTE_EXEC)) {
1822 1.37 matt struct pvo_head *pvoh = vm_page_to_pvoh(pg);
1823 1.37 matt if (LIST_EMPTY(pvoh)) {
1824 1.37 matt DPRINTFN(EXEC, ("[pmap_pvo_remove: "
1825 1.37 matt "%#lx: clear-exec]\n",
1826 1.37 matt VM_PAGE_TO_PHYS(pg)));
1827 1.37 matt pmap_attr_clear(pg, PTE_EXEC);
1828 1.37 matt PMAPCOUNT(exec_uncached_pvo_remove);
1829 1.37 matt } else {
1830 1.37 matt DPRINTFN(EXEC, ("[pmap_pvo_remove: "
1831 1.37 matt "%#lx: syncicache]\n",
1832 1.37 matt VM_PAGE_TO_PHYS(pg)));
1833 1.37 matt pmap_syncicache(VM_PAGE_TO_PHYS(pg),
1834 1.37 matt PAGE_SIZE);
1835 1.37 matt PMAPCOUNT(exec_synced_pvo_remove);
1836 1.37 matt }
1837 1.37 matt }
1838 1.37 matt
1839 1.1 matt pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1840 1.1 matt }
1841 1.1 matt PMAPCOUNT(unmappings);
1842 1.1 matt } else {
1843 1.1 matt PMAPCOUNT(kernel_unmappings);
1844 1.1 matt }
1845 1.1 matt
1846 1.1 matt /*
1847 1.1 matt * Remove the PVO from its lists and return it to the pool.
1848 1.1 matt */
1849 1.1 matt LIST_REMOVE(pvo, pvo_vlink);
1850 1.1 matt TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1851 1.33 chs if (pvol) {
1852 1.33 chs LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1853 1.25 chs }
1854 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1855 1.1 matt pmap_pvo_remove_depth--;
1856 1.1 matt #endif
1857 1.1 matt }
1858 1.1 matt
1859 1.33 chs void
1860 1.33 chs pmap_pvo_free(struct pvo_entry *pvo)
1861 1.33 chs {
1862 1.33 chs
1863 1.39 matt pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1864 1.33 chs }
1865 1.33 chs
1866 1.33 chs void
1867 1.33 chs pmap_pvo_free_list(struct pvo_head *pvol)
1868 1.33 chs {
1869 1.33 chs struct pvo_entry *pvo, *npvo;
1870 1.33 chs
1871 1.33 chs for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1872 1.33 chs npvo = LIST_NEXT(pvo, pvo_vlink);
1873 1.33 chs LIST_REMOVE(pvo, pvo_vlink);
1874 1.33 chs pmap_pvo_free(pvo);
1875 1.33 chs }
1876 1.33 chs }
1877 1.33 chs
1878 1.1 matt /*
1879 1.14 chs * Mark a mapping as executable.
1880 1.14 chs * If this is the first executable mapping in the segment,
1881 1.14 chs * clear the noexec flag.
1882 1.14 chs */
1883 1.14 chs STATIC void
1884 1.14 chs pvo_set_exec(struct pvo_entry *pvo)
1885 1.14 chs {
1886 1.14 chs struct pmap *pm = pvo->pvo_pmap;
1887 1.14 chs
1888 1.39 matt if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1889 1.14 chs return;
1890 1.14 chs }
1891 1.14 chs pvo->pvo_vaddr |= PVO_EXECUTABLE;
1892 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
1893 1.18 matt {
1894 1.18 matt int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1895 1.18 matt if (pm->pm_exec[sr]++ == 0) {
1896 1.18 matt pm->pm_sr[sr] &= ~SR_NOEXEC;
1897 1.18 matt }
1898 1.14 chs }
1899 1.18 matt #endif
1900 1.14 chs }
1901 1.14 chs
1902 1.14 chs /*
1903 1.14 chs * Mark a mapping as non-executable.
1904 1.14 chs * If this was the last executable mapping in the segment,
1905 1.14 chs * set the noexec flag.
1906 1.14 chs */
1907 1.14 chs STATIC void
1908 1.14 chs pvo_clear_exec(struct pvo_entry *pvo)
1909 1.14 chs {
1910 1.14 chs struct pmap *pm = pvo->pvo_pmap;
1911 1.14 chs
1912 1.39 matt if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1913 1.14 chs return;
1914 1.14 chs }
1915 1.14 chs pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1916 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
1917 1.18 matt {
1918 1.18 matt int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1919 1.18 matt if (--pm->pm_exec[sr] == 0) {
1920 1.18 matt pm->pm_sr[sr] |= SR_NOEXEC;
1921 1.18 matt }
1922 1.14 chs }
1923 1.18 matt #endif
1924 1.14 chs }
1925 1.14 chs
1926 1.14 chs /*
1927 1.1 matt * Insert physical page at pa into the given pmap at virtual address va.
1928 1.1 matt */
1929 1.1 matt int
1930 1.1 matt pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1931 1.1 matt {
1932 1.1 matt struct mem_region *mp;
1933 1.1 matt struct pvo_head *pvo_head;
1934 1.1 matt struct vm_page *pg;
1935 1.1 matt struct pool *pl;
1936 1.2 matt register_t pte_lo;
1937 1.1 matt int error;
1938 1.1 matt u_int pvo_flags;
1939 1.1 matt u_int was_exec = 0;
1940 1.1 matt
1941 1.1 matt if (__predict_false(!pmap_initialized)) {
1942 1.1 matt pvo_head = &pmap_pvo_kunmanaged;
1943 1.1 matt pl = &pmap_upvo_pool;
1944 1.1 matt pvo_flags = 0;
1945 1.1 matt pg = NULL;
1946 1.1 matt was_exec = PTE_EXEC;
1947 1.1 matt } else {
1948 1.1 matt pvo_head = pa_to_pvoh(pa, &pg);
1949 1.1 matt pl = &pmap_mpvo_pool;
1950 1.1 matt pvo_flags = PVO_MANAGED;
1951 1.1 matt }
1952 1.1 matt
1953 1.1 matt DPRINTFN(ENTER,
1954 1.1 matt ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):",
1955 1.1 matt pm, va, pa, prot, flags));
1956 1.1 matt
1957 1.1 matt /*
1958 1.1 matt * If this is a managed page, and it's the first reference to the
1959 1.1 matt * page clear the execness of the page. Otherwise fetch the execness.
1960 1.1 matt */
1961 1.1 matt if (pg != NULL)
1962 1.1 matt was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1963 1.1 matt
1964 1.1 matt DPRINTFN(ENTER, (" was_exec=%d", was_exec));
1965 1.1 matt
1966 1.1 matt /*
1967 1.1 matt * Assume the page is cache inhibited and access is guarded unless
1968 1.1 matt * it's in our available memory array. If it is in the memory array,
1969 1.1 matt * asssume it's in memory coherent memory.
1970 1.1 matt */
1971 1.1 matt pte_lo = PTE_IG;
1972 1.1 matt if ((flags & PMAP_NC) == 0) {
1973 1.1 matt for (mp = mem; mp->size; mp++) {
1974 1.1 matt if (pa >= mp->start && pa < mp->start + mp->size) {
1975 1.1 matt pte_lo = PTE_M;
1976 1.1 matt break;
1977 1.1 matt }
1978 1.1 matt }
1979 1.1 matt }
1980 1.1 matt
1981 1.1 matt if (prot & VM_PROT_WRITE)
1982 1.1 matt pte_lo |= PTE_BW;
1983 1.1 matt else
1984 1.1 matt pte_lo |= PTE_BR;
1985 1.1 matt
1986 1.1 matt /*
1987 1.1 matt * If this was in response to a fault, "pre-fault" the PTE's
1988 1.1 matt * changed/referenced bit appropriately.
1989 1.1 matt */
1990 1.1 matt if (flags & VM_PROT_WRITE)
1991 1.1 matt pte_lo |= PTE_CHG;
1992 1.30 chs if (flags & VM_PROT_ALL)
1993 1.1 matt pte_lo |= PTE_REF;
1994 1.1 matt
1995 1.1 matt /*
1996 1.1 matt * We need to know if this page can be executable
1997 1.1 matt */
1998 1.1 matt flags |= (prot & VM_PROT_EXECUTE);
1999 1.1 matt
2000 1.1 matt /*
2001 1.1 matt * Record mapping for later back-translation and pte spilling.
2002 1.1 matt * This will overwrite any existing mapping.
2003 1.1 matt */
2004 1.1 matt error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
2005 1.1 matt
2006 1.1 matt /*
2007 1.1 matt * Flush the real page from the instruction cache if this page is
2008 1.1 matt * mapped executable and cacheable and has not been flushed since
2009 1.1 matt * the last time it was modified.
2010 1.1 matt */
2011 1.1 matt if (error == 0 &&
2012 1.1 matt (flags & VM_PROT_EXECUTE) &&
2013 1.1 matt (pte_lo & PTE_I) == 0 &&
2014 1.1 matt was_exec == 0) {
2015 1.1 matt DPRINTFN(ENTER, (" syncicache"));
2016 1.1 matt PMAPCOUNT(exec_synced);
2017 1.6 thorpej pmap_syncicache(pa, PAGE_SIZE);
2018 1.1 matt if (pg != NULL) {
2019 1.1 matt pmap_attr_save(pg, PTE_EXEC);
2020 1.1 matt PMAPCOUNT(exec_cached);
2021 1.1 matt #if defined(DEBUG) || defined(PMAPDEBUG)
2022 1.1 matt if (pmapdebug & PMAPDEBUG_ENTER)
2023 1.1 matt printf(" marked-as-exec");
2024 1.1 matt else if (pmapdebug & PMAPDEBUG_EXEC)
2025 1.1 matt printf("[pmap_enter: %#lx: marked-as-exec]\n",
2026 1.34 yamt VM_PAGE_TO_PHYS(pg));
2027 1.1 matt
2028 1.1 matt #endif
2029 1.1 matt }
2030 1.1 matt }
2031 1.1 matt
2032 1.1 matt DPRINTFN(ENTER, (": error=%d\n", error));
2033 1.1 matt
2034 1.1 matt return error;
2035 1.1 matt }
2036 1.1 matt
2037 1.1 matt void
2038 1.1 matt pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2039 1.1 matt {
2040 1.1 matt struct mem_region *mp;
2041 1.2 matt register_t pte_lo;
2042 1.1 matt int error;
2043 1.1 matt
2044 1.38 sanjayl #if defined (PPC_OEA64_BRIDGE)
2045 1.1 matt if (va < VM_MIN_KERNEL_ADDRESS)
2046 1.1 matt panic("pmap_kenter_pa: attempt to enter "
2047 1.1 matt "non-kernel address %#lx!", va);
2048 1.38 sanjayl #endif
2049 1.1 matt
2050 1.1 matt DPRINTFN(KENTER,
2051 1.1 matt ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
2052 1.1 matt
2053 1.1 matt /*
2054 1.1 matt * Assume the page is cache inhibited and access is guarded unless
2055 1.1 matt * it's in our available memory array. If it is in the memory array,
2056 1.1 matt * asssume it's in memory coherent memory.
2057 1.1 matt */
2058 1.1 matt pte_lo = PTE_IG;
2059 1.4 matt if ((prot & PMAP_NC) == 0) {
2060 1.4 matt for (mp = mem; mp->size; mp++) {
2061 1.4 matt if (pa >= mp->start && pa < mp->start + mp->size) {
2062 1.4 matt pte_lo = PTE_M;
2063 1.4 matt break;
2064 1.4 matt }
2065 1.1 matt }
2066 1.1 matt }
2067 1.1 matt
2068 1.1 matt if (prot & VM_PROT_WRITE)
2069 1.1 matt pte_lo |= PTE_BW;
2070 1.1 matt else
2071 1.1 matt pte_lo |= PTE_BR;
2072 1.1 matt
2073 1.1 matt /*
2074 1.1 matt * We don't care about REF/CHG on PVOs on the unmanaged list.
2075 1.1 matt */
2076 1.1 matt error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
2077 1.1 matt &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
2078 1.1 matt
2079 1.1 matt if (error != 0)
2080 1.1 matt panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
2081 1.1 matt va, pa, error);
2082 1.1 matt }
2083 1.1 matt
2084 1.1 matt void
2085 1.1 matt pmap_kremove(vaddr_t va, vsize_t len)
2086 1.1 matt {
2087 1.1 matt if (va < VM_MIN_KERNEL_ADDRESS)
2088 1.1 matt panic("pmap_kremove: attempt to remove "
2089 1.1 matt "non-kernel address %#lx!", va);
2090 1.1 matt
2091 1.1 matt DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len));
2092 1.1 matt pmap_remove(pmap_kernel(), va, va + len);
2093 1.1 matt }
2094 1.1 matt
2095 1.1 matt /*
2096 1.1 matt * Remove the given range of mapping entries.
2097 1.1 matt */
2098 1.1 matt void
2099 1.1 matt pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
2100 1.1 matt {
2101 1.33 chs struct pvo_head pvol;
2102 1.1 matt struct pvo_entry *pvo;
2103 1.2 matt register_t msr;
2104 1.1 matt int pteidx;
2105 1.1 matt
2106 1.33 chs LIST_INIT(&pvol);
2107 1.14 chs msr = pmap_interrupts_off();
2108 1.1 matt for (; va < endva; va += PAGE_SIZE) {
2109 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx);
2110 1.1 matt if (pvo != NULL) {
2111 1.33 chs pmap_pvo_remove(pvo, pteidx, &pvol);
2112 1.1 matt }
2113 1.1 matt }
2114 1.14 chs pmap_interrupts_restore(msr);
2115 1.33 chs pmap_pvo_free_list(&pvol);
2116 1.1 matt }
2117 1.1 matt
2118 1.1 matt /*
2119 1.1 matt * Get the physical page address for the given pmap/virtual address.
2120 1.1 matt */
2121 1.1 matt boolean_t
2122 1.1 matt pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
2123 1.1 matt {
2124 1.1 matt struct pvo_entry *pvo;
2125 1.2 matt register_t msr;
2126 1.7 matt
2127 1.38 sanjayl
2128 1.7 matt /*
2129 1.7 matt * If this is a kernel pmap lookup, also check the battable
2130 1.7 matt * and if we get a hit, translate the VA to a PA using the
2131 1.36 nathanw * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is
2132 1.7 matt * that will wrap back to 0.
2133 1.7 matt */
2134 1.7 matt if (pm == pmap_kernel() &&
2135 1.7 matt (va < VM_MIN_KERNEL_ADDRESS ||
2136 1.7 matt (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
2137 1.8 matt KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
2138 1.38 sanjayl #if defined (PPC_OEA)
2139 1.24 kleink if ((MFPVR() >> 16) != MPC601) {
2140 1.24 kleink register_t batu = battable[va >> ADDR_SR_SHFT].batu;
2141 1.24 kleink if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
2142 1.24 kleink register_t batl =
2143 1.24 kleink battable[va >> ADDR_SR_SHFT].batl;
2144 1.24 kleink register_t mask =
2145 1.24 kleink (~(batu & BAT_BL) << 15) & ~0x1ffffL;
2146 1.29 briggs if (pap)
2147 1.29 briggs *pap = (batl & mask) | (va & ~mask);
2148 1.24 kleink return TRUE;
2149 1.24 kleink }
2150 1.24 kleink } else {
2151 1.24 kleink register_t batu = battable[va >> 23].batu;
2152 1.24 kleink register_t batl = battable[va >> 23].batl;
2153 1.24 kleink register_t sr = iosrtable[va >> ADDR_SR_SHFT];
2154 1.24 kleink if (BAT601_VALID_P(batl) &&
2155 1.24 kleink BAT601_VA_MATCH_P(batu, batl, va)) {
2156 1.24 kleink register_t mask =
2157 1.24 kleink (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
2158 1.29 briggs if (pap)
2159 1.29 briggs *pap = (batl & mask) | (va & ~mask);
2160 1.24 kleink return TRUE;
2161 1.24 kleink } else if (SR601_VALID_P(sr) &&
2162 1.24 kleink SR601_PA_MATCH_P(sr, va)) {
2163 1.29 briggs if (pap)
2164 1.29 briggs *pap = va;
2165 1.24 kleink return TRUE;
2166 1.24 kleink }
2167 1.7 matt }
2168 1.7 matt return FALSE;
2169 1.38 sanjayl #elif defined (PPC_OEA64_BRIDGE)
2170 1.38 sanjayl panic("%s: pm: %s, va: 0x%08lx\n", __FUNCTION__,
2171 1.38 sanjayl (pm == pmap_kernel() ? "kernel" : "user"), va);
2172 1.38 sanjayl #elif defined (PPC_OEA64)
2173 1.38 sanjayl #error PPC_OEA64 not supported
2174 1.38 sanjayl #endif /* PPC_OEA */
2175 1.7 matt }
2176 1.1 matt
2177 1.1 matt msr = pmap_interrupts_off();
2178 1.1 matt pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2179 1.1 matt if (pvo != NULL) {
2180 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2181 1.29 briggs if (pap)
2182 1.29 briggs *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
2183 1.29 briggs | (va & ADDR_POFF);
2184 1.1 matt }
2185 1.1 matt pmap_interrupts_restore(msr);
2186 1.1 matt return pvo != NULL;
2187 1.1 matt }
2188 1.1 matt
2189 1.1 matt /*
2190 1.1 matt * Lower the protection on the specified range of this pmap.
2191 1.1 matt */
2192 1.1 matt void
2193 1.1 matt pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
2194 1.1 matt {
2195 1.1 matt struct pvo_entry *pvo;
2196 1.2 matt volatile struct pte *pt;
2197 1.2 matt register_t msr;
2198 1.1 matt int pteidx;
2199 1.1 matt
2200 1.1 matt /*
2201 1.1 matt * Since this routine only downgrades protection, we should
2202 1.14 chs * always be called with at least one bit not set.
2203 1.1 matt */
2204 1.14 chs KASSERT(prot != VM_PROT_ALL);
2205 1.1 matt
2206 1.1 matt /*
2207 1.1 matt * If there is no protection, this is equivalent to
2208 1.1 matt * remove the pmap from the pmap.
2209 1.1 matt */
2210 1.1 matt if ((prot & VM_PROT_READ) == 0) {
2211 1.1 matt pmap_remove(pm, va, endva);
2212 1.1 matt return;
2213 1.1 matt }
2214 1.1 matt
2215 1.1 matt msr = pmap_interrupts_off();
2216 1.6 thorpej for (; va < endva; va += PAGE_SIZE) {
2217 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx);
2218 1.1 matt if (pvo == NULL)
2219 1.1 matt continue;
2220 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2221 1.1 matt
2222 1.1 matt /*
2223 1.1 matt * Revoke executable if asked to do so.
2224 1.1 matt */
2225 1.1 matt if ((prot & VM_PROT_EXECUTE) == 0)
2226 1.14 chs pvo_clear_exec(pvo);
2227 1.1 matt
2228 1.1 matt #if 0
2229 1.1 matt /*
2230 1.1 matt * If the page is already read-only, no change
2231 1.1 matt * needs to be made.
2232 1.1 matt */
2233 1.1 matt if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
2234 1.1 matt continue;
2235 1.1 matt #endif
2236 1.1 matt /*
2237 1.1 matt * Grab the PTE pointer before we diddle with
2238 1.1 matt * the cached PTE copy.
2239 1.1 matt */
2240 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx);
2241 1.1 matt /*
2242 1.1 matt * Change the protection of the page.
2243 1.1 matt */
2244 1.1 matt pvo->pvo_pte.pte_lo &= ~PTE_PP;
2245 1.1 matt pvo->pvo_pte.pte_lo |= PTE_BR;
2246 1.1 matt
2247 1.1 matt /*
2248 1.1 matt * If the PVO is in the page table, update
2249 1.1 matt * that pte at well.
2250 1.1 matt */
2251 1.1 matt if (pt != NULL) {
2252 1.1 matt pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2253 1.12 matt PVO_WHERE(pvo, PMAP_PROTECT);
2254 1.1 matt PMAPCOUNT(ptes_changed);
2255 1.1 matt }
2256 1.1 matt
2257 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2258 1.1 matt }
2259 1.1 matt pmap_interrupts_restore(msr);
2260 1.1 matt }
2261 1.1 matt
2262 1.1 matt void
2263 1.1 matt pmap_unwire(pmap_t pm, vaddr_t va)
2264 1.1 matt {
2265 1.1 matt struct pvo_entry *pvo;
2266 1.2 matt register_t msr;
2267 1.1 matt
2268 1.1 matt msr = pmap_interrupts_off();
2269 1.1 matt pvo = pmap_pvo_find_va(pm, va, NULL);
2270 1.1 matt if (pvo != NULL) {
2271 1.39 matt if (PVO_WIRED_P(pvo)) {
2272 1.1 matt pvo->pvo_vaddr &= ~PVO_WIRED;
2273 1.1 matt pm->pm_stats.wired_count--;
2274 1.1 matt }
2275 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2276 1.1 matt }
2277 1.1 matt pmap_interrupts_restore(msr);
2278 1.1 matt }
2279 1.1 matt
2280 1.1 matt /*
2281 1.1 matt * Lower the protection on the specified physical page.
2282 1.1 matt */
2283 1.1 matt void
2284 1.1 matt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2285 1.1 matt {
2286 1.33 chs struct pvo_head *pvo_head, pvol;
2287 1.1 matt struct pvo_entry *pvo, *next_pvo;
2288 1.2 matt volatile struct pte *pt;
2289 1.2 matt register_t msr;
2290 1.1 matt
2291 1.14 chs KASSERT(prot != VM_PROT_ALL);
2292 1.33 chs LIST_INIT(&pvol);
2293 1.1 matt msr = pmap_interrupts_off();
2294 1.1 matt
2295 1.1 matt /*
2296 1.1 matt * When UVM reuses a page, it does a pmap_page_protect with
2297 1.1 matt * VM_PROT_NONE. At that point, we can clear the exec flag
2298 1.1 matt * since we know the page will have different contents.
2299 1.1 matt */
2300 1.1 matt if ((prot & VM_PROT_READ) == 0) {
2301 1.1 matt DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
2302 1.34 yamt VM_PAGE_TO_PHYS(pg)));
2303 1.1 matt if (pmap_attr_fetch(pg) & PTE_EXEC) {
2304 1.1 matt PMAPCOUNT(exec_uncached_page_protect);
2305 1.1 matt pmap_attr_clear(pg, PTE_EXEC);
2306 1.1 matt }
2307 1.1 matt }
2308 1.1 matt
2309 1.1 matt pvo_head = vm_page_to_pvoh(pg);
2310 1.1 matt for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2311 1.1 matt next_pvo = LIST_NEXT(pvo, pvo_vlink);
2312 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2313 1.1 matt
2314 1.1 matt /*
2315 1.1 matt * Downgrading to no mapping at all, we just remove the entry.
2316 1.1 matt */
2317 1.1 matt if ((prot & VM_PROT_READ) == 0) {
2318 1.33 chs pmap_pvo_remove(pvo, -1, &pvol);
2319 1.1 matt continue;
2320 1.1 matt }
2321 1.1 matt
2322 1.1 matt /*
2323 1.1 matt * If EXEC permission is being revoked, just clear the
2324 1.1 matt * flag in the PVO.
2325 1.1 matt */
2326 1.1 matt if ((prot & VM_PROT_EXECUTE) == 0)
2327 1.14 chs pvo_clear_exec(pvo);
2328 1.1 matt
2329 1.1 matt /*
2330 1.1 matt * If this entry is already RO, don't diddle with the
2331 1.1 matt * page table.
2332 1.1 matt */
2333 1.1 matt if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2334 1.1 matt PMAP_PVO_CHECK(pvo);
2335 1.1 matt continue;
2336 1.1 matt }
2337 1.1 matt
2338 1.1 matt /*
2339 1.1 matt * Grab the PTE before the we diddle the bits so
2340 1.1 matt * pvo_to_pte can verify the pte contents are as
2341 1.1 matt * expected.
2342 1.1 matt */
2343 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
2344 1.1 matt pvo->pvo_pte.pte_lo &= ~PTE_PP;
2345 1.1 matt pvo->pvo_pte.pte_lo |= PTE_BR;
2346 1.1 matt if (pt != NULL) {
2347 1.1 matt pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2348 1.12 matt PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
2349 1.1 matt PMAPCOUNT(ptes_changed);
2350 1.1 matt }
2351 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2352 1.1 matt }
2353 1.1 matt pmap_interrupts_restore(msr);
2354 1.33 chs pmap_pvo_free_list(&pvol);
2355 1.1 matt }
2356 1.1 matt
2357 1.1 matt /*
2358 1.1 matt * Activate the address space for the specified process. If the process
2359 1.1 matt * is the current process, load the new MMU context.
2360 1.1 matt */
2361 1.1 matt void
2362 1.1 matt pmap_activate(struct lwp *l)
2363 1.1 matt {
2364 1.1 matt struct pcb *pcb = &l->l_addr->u_pcb;
2365 1.1 matt pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2366 1.1 matt
2367 1.1 matt DPRINTFN(ACTIVATE,
2368 1.1 matt ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
2369 1.1 matt
2370 1.1 matt /*
2371 1.1 matt * XXX Normally performed in cpu_fork().
2372 1.1 matt */
2373 1.13 matt pcb->pcb_pm = pmap;
2374 1.17 matt
2375 1.17 matt /*
2376 1.17 matt * In theory, the SR registers need only be valid on return
2377 1.17 matt * to user space wait to do them there.
2378 1.17 matt */
2379 1.17 matt if (l == curlwp) {
2380 1.17 matt /* Store pointer to new current pmap. */
2381 1.17 matt curpm = pmap;
2382 1.17 matt }
2383 1.1 matt }
2384 1.1 matt
2385 1.1 matt /*
2386 1.1 matt * Deactivate the specified process's address space.
2387 1.1 matt */
2388 1.1 matt void
2389 1.1 matt pmap_deactivate(struct lwp *l)
2390 1.1 matt {
2391 1.1 matt }
2392 1.1 matt
2393 1.1 matt boolean_t
2394 1.1 matt pmap_query_bit(struct vm_page *pg, int ptebit)
2395 1.1 matt {
2396 1.1 matt struct pvo_entry *pvo;
2397 1.2 matt volatile struct pte *pt;
2398 1.2 matt register_t msr;
2399 1.1 matt
2400 1.1 matt if (pmap_attr_fetch(pg) & ptebit)
2401 1.1 matt return TRUE;
2402 1.14 chs
2403 1.1 matt msr = pmap_interrupts_off();
2404 1.1 matt LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2405 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2406 1.1 matt /*
2407 1.1 matt * See if we saved the bit off. If so cache, it and return
2408 1.1 matt * success.
2409 1.1 matt */
2410 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) {
2411 1.1 matt pmap_attr_save(pg, ptebit);
2412 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2413 1.1 matt pmap_interrupts_restore(msr);
2414 1.1 matt return TRUE;
2415 1.1 matt }
2416 1.1 matt }
2417 1.1 matt /*
2418 1.1 matt * No luck, now go thru the hard part of looking at the ptes
2419 1.1 matt * themselves. Sync so any pending REF/CHG bits are flushed
2420 1.1 matt * to the PTEs.
2421 1.1 matt */
2422 1.1 matt SYNC();
2423 1.1 matt LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2424 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2425 1.1 matt /*
2426 1.1 matt * See if this pvo have a valid PTE. If so, fetch the
2427 1.1 matt * REF/CHG bits from the valid PTE. If the appropriate
2428 1.1 matt * ptebit is set, cache, it and return success.
2429 1.1 matt */
2430 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
2431 1.1 matt if (pt != NULL) {
2432 1.1 matt pmap_pte_synch(pt, &pvo->pvo_pte);
2433 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) {
2434 1.1 matt pmap_attr_save(pg, ptebit);
2435 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2436 1.1 matt pmap_interrupts_restore(msr);
2437 1.1 matt return TRUE;
2438 1.1 matt }
2439 1.1 matt }
2440 1.1 matt }
2441 1.1 matt pmap_interrupts_restore(msr);
2442 1.1 matt return FALSE;
2443 1.1 matt }
2444 1.1 matt
2445 1.1 matt boolean_t
2446 1.1 matt pmap_clear_bit(struct vm_page *pg, int ptebit)
2447 1.1 matt {
2448 1.1 matt struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2449 1.1 matt struct pvo_entry *pvo;
2450 1.2 matt volatile struct pte *pt;
2451 1.2 matt register_t msr;
2452 1.1 matt int rv = 0;
2453 1.1 matt
2454 1.1 matt msr = pmap_interrupts_off();
2455 1.1 matt
2456 1.1 matt /*
2457 1.1 matt * Fetch the cache value
2458 1.1 matt */
2459 1.1 matt rv |= pmap_attr_fetch(pg);
2460 1.1 matt
2461 1.1 matt /*
2462 1.1 matt * Clear the cached value.
2463 1.1 matt */
2464 1.1 matt pmap_attr_clear(pg, ptebit);
2465 1.1 matt
2466 1.1 matt /*
2467 1.1 matt * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2468 1.1 matt * can reset the right ones). Note that since the pvo entries and
2469 1.1 matt * list heads are accessed via BAT0 and are never placed in the
2470 1.1 matt * page table, we don't have to worry about further accesses setting
2471 1.1 matt * the REF/CHG bits.
2472 1.1 matt */
2473 1.1 matt SYNC();
2474 1.1 matt
2475 1.1 matt /*
2476 1.1 matt * For each pvo entry, clear pvo's ptebit. If this pvo have a
2477 1.1 matt * valid PTE. If so, clear the ptebit from the valid PTE.
2478 1.1 matt */
2479 1.1 matt LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2480 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2481 1.1 matt pt = pmap_pvo_to_pte(pvo, -1);
2482 1.1 matt if (pt != NULL) {
2483 1.1 matt /*
2484 1.1 matt * Only sync the PTE if the bit we are looking
2485 1.1 matt * for is not already set.
2486 1.1 matt */
2487 1.1 matt if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2488 1.1 matt pmap_pte_synch(pt, &pvo->pvo_pte);
2489 1.1 matt /*
2490 1.1 matt * If the bit we are looking for was already set,
2491 1.1 matt * clear that bit in the pte.
2492 1.1 matt */
2493 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit)
2494 1.1 matt pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2495 1.1 matt }
2496 1.1 matt rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2497 1.1 matt pvo->pvo_pte.pte_lo &= ~ptebit;
2498 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */
2499 1.1 matt }
2500 1.1 matt pmap_interrupts_restore(msr);
2501 1.14 chs
2502 1.1 matt /*
2503 1.1 matt * If we are clearing the modify bit and this page was marked EXEC
2504 1.1 matt * and the user of the page thinks the page was modified, then we
2505 1.1 matt * need to clean it from the icache if it's mapped or clear the EXEC
2506 1.1 matt * bit if it's not mapped. The page itself might not have the CHG
2507 1.1 matt * bit set if the modification was done via DMA to the page.
2508 1.1 matt */
2509 1.1 matt if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2510 1.1 matt if (LIST_EMPTY(pvoh)) {
2511 1.1 matt DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
2512 1.34 yamt VM_PAGE_TO_PHYS(pg)));
2513 1.1 matt pmap_attr_clear(pg, PTE_EXEC);
2514 1.1 matt PMAPCOUNT(exec_uncached_clear_modify);
2515 1.1 matt } else {
2516 1.1 matt DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
2517 1.34 yamt VM_PAGE_TO_PHYS(pg)));
2518 1.34 yamt pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2519 1.1 matt PMAPCOUNT(exec_synced_clear_modify);
2520 1.1 matt }
2521 1.1 matt }
2522 1.1 matt return (rv & ptebit) != 0;
2523 1.1 matt }
2524 1.1 matt
2525 1.1 matt void
2526 1.1 matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2527 1.1 matt {
2528 1.1 matt struct pvo_entry *pvo;
2529 1.1 matt size_t offset = va & ADDR_POFF;
2530 1.1 matt int s;
2531 1.1 matt
2532 1.1 matt s = splvm();
2533 1.1 matt while (len > 0) {
2534 1.6 thorpej size_t seglen = PAGE_SIZE - offset;
2535 1.1 matt if (seglen > len)
2536 1.1 matt seglen = len;
2537 1.1 matt pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2538 1.39 matt if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
2539 1.1 matt pmap_syncicache(
2540 1.1 matt (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2541 1.1 matt PMAP_PVO_CHECK(pvo);
2542 1.1 matt }
2543 1.1 matt va += seglen;
2544 1.1 matt len -= seglen;
2545 1.1 matt offset = 0;
2546 1.1 matt }
2547 1.1 matt splx(s);
2548 1.1 matt }
2549 1.1 matt
2550 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2551 1.1 matt void
2552 1.2 matt pmap_pte_print(volatile struct pte *pt)
2553 1.1 matt {
2554 1.1 matt printf("PTE %p: ", pt);
2555 1.38 sanjayl
2556 1.38 sanjayl #if defined(PPC_OEA)
2557 1.1 matt /* High word: */
2558 1.2 matt printf("0x%08lx: [", pt->pte_hi);
2559 1.38 sanjayl #elif defined (PPC_OEA64_BRIDGE)
2560 1.38 sanjayl printf("0x%016llx: [", pt->pte_hi);
2561 1.38 sanjayl #else /* PPC_OEA64 */
2562 1.38 sanjayl printf("0x%016lx: [", pt->pte_hi);
2563 1.38 sanjayl #endif /* PPC_OEA */
2564 1.38 sanjayl
2565 1.1 matt printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2566 1.1 matt printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2567 1.38 sanjayl
2568 1.38 sanjayl #if defined (PPC_OEA)
2569 1.2 matt printf("0x%06lx 0x%02lx",
2570 1.1 matt (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2571 1.1 matt pt->pte_hi & PTE_API);
2572 1.1 matt printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
2573 1.38 sanjayl #elif defined (PPC_OEA64)
2574 1.38 sanjayl printf("0x%06lx 0x%02lx",
2575 1.38 sanjayl (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2576 1.38 sanjayl pt->pte_hi & PTE_API);
2577 1.38 sanjayl printf(" (va 0x%016lx)] ", pmap_pte_to_va(pt));
2578 1.38 sanjayl #else
2579 1.38 sanjayl /* PPC_OEA64_BRIDGE */
2580 1.38 sanjayl printf("0x%06llx 0x%02llx",
2581 1.38 sanjayl (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2582 1.38 sanjayl pt->pte_hi & PTE_API);
2583 1.38 sanjayl printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
2584 1.38 sanjayl #endif /* PPC_OEA */
2585 1.38 sanjayl
2586 1.1 matt /* Low word: */
2587 1.38 sanjayl #if defined (PPC_OEA)
2588 1.2 matt printf(" 0x%08lx: [", pt->pte_lo);
2589 1.2 matt printf("0x%05lx... ", pt->pte_lo >> 12);
2590 1.38 sanjayl #elif defined (PPC_OEA64)
2591 1.38 sanjayl printf(" 0x%016lx: [", pt->pte_lo);
2592 1.38 sanjayl printf("0x%012lx... ", pt->pte_lo >> 12);
2593 1.38 sanjayl #else /* PPC_OEA64_BRIDGE */
2594 1.38 sanjayl printf(" 0x%016llx: [", pt->pte_lo);
2595 1.38 sanjayl printf("0x%012llx... ", pt->pte_lo >> 12);
2596 1.38 sanjayl #endif
2597 1.1 matt printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2598 1.1 matt printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2599 1.1 matt printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2600 1.1 matt printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2601 1.1 matt printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2602 1.1 matt printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2603 1.1 matt switch (pt->pte_lo & PTE_PP) {
2604 1.1 matt case PTE_BR: printf("br]\n"); break;
2605 1.1 matt case PTE_BW: printf("bw]\n"); break;
2606 1.1 matt case PTE_SO: printf("so]\n"); break;
2607 1.1 matt case PTE_SW: printf("sw]\n"); break;
2608 1.1 matt }
2609 1.1 matt }
2610 1.1 matt #endif
2611 1.1 matt
2612 1.1 matt #if defined(DDB)
2613 1.1 matt void
2614 1.1 matt pmap_pteg_check(void)
2615 1.1 matt {
2616 1.2 matt volatile struct pte *pt;
2617 1.1 matt int i;
2618 1.1 matt int ptegidx;
2619 1.1 matt u_int p_valid = 0;
2620 1.1 matt u_int s_valid = 0;
2621 1.1 matt u_int invalid = 0;
2622 1.38 sanjayl
2623 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2624 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2625 1.1 matt if (pt->pte_hi & PTE_VALID) {
2626 1.1 matt if (pt->pte_hi & PTE_HID)
2627 1.1 matt s_valid++;
2628 1.1 matt else
2629 1.38 sanjayl {
2630 1.1 matt p_valid++;
2631 1.38 sanjayl }
2632 1.1 matt } else
2633 1.1 matt invalid++;
2634 1.1 matt }
2635 1.1 matt }
2636 1.1 matt printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2637 1.1 matt p_valid, p_valid, s_valid, s_valid,
2638 1.1 matt invalid, invalid);
2639 1.1 matt }
2640 1.1 matt
2641 1.1 matt void
2642 1.1 matt pmap_print_mmuregs(void)
2643 1.1 matt {
2644 1.1 matt int i;
2645 1.1 matt u_int cpuvers;
2646 1.18 matt #ifndef PPC_OEA64
2647 1.1 matt vaddr_t addr;
2648 1.2 matt register_t soft_sr[16];
2649 1.18 matt #endif
2650 1.38 sanjayl #if defined (PPC_OEA) && !defined (PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
2651 1.1 matt struct bat soft_ibat[4];
2652 1.1 matt struct bat soft_dbat[4];
2653 1.38 sanjayl #endif
2654 1.2 matt register_t sdr1;
2655 1.1 matt
2656 1.1 matt cpuvers = MFPVR() >> 16;
2657 1.35 perry __asm volatile ("mfsdr1 %0" : "=r"(sdr1));
2658 1.18 matt #ifndef PPC_OEA64
2659 1.16 kleink addr = 0;
2660 1.27 chs for (i = 0; i < 16; i++) {
2661 1.1 matt soft_sr[i] = MFSRIN(addr);
2662 1.1 matt addr += (1 << ADDR_SR_SHFT);
2663 1.1 matt }
2664 1.18 matt #endif
2665 1.1 matt
2666 1.38 sanjayl #if defined(PPC_OEA) && !defined (PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
2667 1.1 matt /* read iBAT (601: uBAT) registers */
2668 1.35 perry __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2669 1.35 perry __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2670 1.35 perry __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2671 1.35 perry __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2672 1.35 perry __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2673 1.35 perry __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2674 1.35 perry __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2675 1.35 perry __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2676 1.1 matt
2677 1.1 matt
2678 1.1 matt if (cpuvers != MPC601) {
2679 1.1 matt /* read dBAT registers */
2680 1.35 perry __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2681 1.35 perry __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2682 1.35 perry __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2683 1.35 perry __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2684 1.35 perry __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2685 1.35 perry __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2686 1.35 perry __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2687 1.35 perry __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2688 1.1 matt }
2689 1.38 sanjayl #endif
2690 1.1 matt
2691 1.18 matt printf("SDR1:\t0x%lx\n", (long) sdr1);
2692 1.18 matt #ifndef PPC_OEA64
2693 1.1 matt printf("SR[]:\t");
2694 1.27 chs for (i = 0; i < 4; i++)
2695 1.38 sanjayl printf("0x%08lx, ", (long) soft_sr[i]);
2696 1.1 matt printf("\n\t");
2697 1.27 chs for ( ; i < 8; i++)
2698 1.38 sanjayl printf("0x%08lx, ", (long) soft_sr[i]);
2699 1.1 matt printf("\n\t");
2700 1.27 chs for ( ; i < 12; i++)
2701 1.38 sanjayl printf("0x%08lx, ", (long) soft_sr[i]);
2702 1.1 matt printf("\n\t");
2703 1.27 chs for ( ; i < 16; i++)
2704 1.38 sanjayl printf("0x%08lx, ", (long) soft_sr[i]);
2705 1.1 matt printf("\n");
2706 1.18 matt #endif
2707 1.1 matt
2708 1.38 sanjayl #if defined(PPC_OEA) && !defined (PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
2709 1.1 matt printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2710 1.27 chs for (i = 0; i < 4; i++) {
2711 1.2 matt printf("0x%08lx 0x%08lx, ",
2712 1.1 matt soft_ibat[i].batu, soft_ibat[i].batl);
2713 1.1 matt if (i == 1)
2714 1.1 matt printf("\n\t");
2715 1.1 matt }
2716 1.1 matt if (cpuvers != MPC601) {
2717 1.1 matt printf("\ndBAT[]:\t");
2718 1.27 chs for (i = 0; i < 4; i++) {
2719 1.2 matt printf("0x%08lx 0x%08lx, ",
2720 1.1 matt soft_dbat[i].batu, soft_dbat[i].batl);
2721 1.1 matt if (i == 1)
2722 1.1 matt printf("\n\t");
2723 1.1 matt }
2724 1.1 matt }
2725 1.1 matt printf("\n");
2726 1.38 sanjayl #endif /* PPC_OEA... */
2727 1.1 matt }
2728 1.1 matt
2729 1.1 matt void
2730 1.1 matt pmap_print_pte(pmap_t pm, vaddr_t va)
2731 1.1 matt {
2732 1.1 matt struct pvo_entry *pvo;
2733 1.2 matt volatile struct pte *pt;
2734 1.1 matt int pteidx;
2735 1.1 matt
2736 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx);
2737 1.1 matt if (pvo != NULL) {
2738 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx);
2739 1.1 matt if (pt != NULL) {
2740 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64)
2741 1.2 matt printf("VA %#lx -> %p -> %s %#lx, %#lx\n",
2742 1.1 matt va, pt,
2743 1.1 matt pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2744 1.1 matt pt->pte_hi, pt->pte_lo);
2745 1.38 sanjayl #else /* PPC_OEA64_BRIDGE */
2746 1.38 sanjayl printf("VA %#lx -> %p -> %s %#llx, %#llx\n",
2747 1.38 sanjayl va, pt,
2748 1.38 sanjayl pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2749 1.38 sanjayl pt->pte_hi, pt->pte_lo);
2750 1.38 sanjayl #endif
2751 1.1 matt } else {
2752 1.1 matt printf("No valid PTE found\n");
2753 1.1 matt }
2754 1.1 matt } else {
2755 1.1 matt printf("Address not in pmap\n");
2756 1.1 matt }
2757 1.1 matt }
2758 1.1 matt
2759 1.1 matt void
2760 1.1 matt pmap_pteg_dist(void)
2761 1.1 matt {
2762 1.1 matt struct pvo_entry *pvo;
2763 1.1 matt int ptegidx;
2764 1.1 matt int depth;
2765 1.1 matt int max_depth = 0;
2766 1.1 matt unsigned int depths[64];
2767 1.1 matt
2768 1.1 matt memset(depths, 0, sizeof(depths));
2769 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2770 1.1 matt depth = 0;
2771 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2772 1.1 matt depth++;
2773 1.1 matt }
2774 1.1 matt if (depth > max_depth)
2775 1.1 matt max_depth = depth;
2776 1.1 matt if (depth > 63)
2777 1.1 matt depth = 63;
2778 1.1 matt depths[depth]++;
2779 1.1 matt }
2780 1.1 matt
2781 1.1 matt for (depth = 0; depth < 64; depth++) {
2782 1.1 matt printf(" [%2d]: %8u", depth, depths[depth]);
2783 1.1 matt if ((depth & 3) == 3)
2784 1.1 matt printf("\n");
2785 1.1 matt if (depth == max_depth)
2786 1.1 matt break;
2787 1.1 matt }
2788 1.1 matt if ((depth & 3) != 3)
2789 1.1 matt printf("\n");
2790 1.1 matt printf("Max depth found was %d\n", max_depth);
2791 1.1 matt }
2792 1.1 matt #endif /* DEBUG */
2793 1.1 matt
2794 1.1 matt #if defined(PMAPCHECK) || defined(DEBUG)
2795 1.1 matt void
2796 1.1 matt pmap_pvo_verify(void)
2797 1.1 matt {
2798 1.1 matt int ptegidx;
2799 1.1 matt int s;
2800 1.1 matt
2801 1.1 matt s = splvm();
2802 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2803 1.1 matt struct pvo_entry *pvo;
2804 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2805 1.1 matt if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2806 1.1 matt panic("pmap_pvo_verify: invalid pvo %p "
2807 1.1 matt "on list %#x", pvo, ptegidx);
2808 1.1 matt pmap_pvo_check(pvo);
2809 1.1 matt }
2810 1.1 matt }
2811 1.1 matt splx(s);
2812 1.1 matt }
2813 1.1 matt #endif /* PMAPCHECK */
2814 1.1 matt
2815 1.1 matt
2816 1.1 matt void *
2817 1.1 matt pmap_pool_ualloc(struct pool *pp, int flags)
2818 1.1 matt {
2819 1.1 matt struct pvo_page *pvop;
2820 1.1 matt
2821 1.1 matt pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2822 1.1 matt if (pvop != NULL) {
2823 1.1 matt pmap_upvop_free--;
2824 1.1 matt SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2825 1.1 matt return pvop;
2826 1.1 matt }
2827 1.1 matt if (uvm.page_init_done != TRUE) {
2828 1.1 matt return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2829 1.1 matt }
2830 1.1 matt return pmap_pool_malloc(pp, flags);
2831 1.1 matt }
2832 1.1 matt
2833 1.1 matt void *
2834 1.1 matt pmap_pool_malloc(struct pool *pp, int flags)
2835 1.1 matt {
2836 1.1 matt struct pvo_page *pvop;
2837 1.1 matt struct vm_page *pg;
2838 1.1 matt
2839 1.1 matt pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2840 1.1 matt if (pvop != NULL) {
2841 1.1 matt pmap_mpvop_free--;
2842 1.1 matt SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2843 1.1 matt return pvop;
2844 1.1 matt }
2845 1.1 matt again:
2846 1.1 matt pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2847 1.1 matt UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2848 1.1 matt if (__predict_false(pg == NULL)) {
2849 1.1 matt if (flags & PR_WAITOK) {
2850 1.1 matt uvm_wait("plpg");
2851 1.1 matt goto again;
2852 1.1 matt } else {
2853 1.1 matt return (0);
2854 1.1 matt }
2855 1.1 matt }
2856 1.1 matt return (void *) VM_PAGE_TO_PHYS(pg);
2857 1.1 matt }
2858 1.1 matt
2859 1.1 matt void
2860 1.1 matt pmap_pool_ufree(struct pool *pp, void *va)
2861 1.1 matt {
2862 1.1 matt struct pvo_page *pvop;
2863 1.1 matt #if 0
2864 1.1 matt if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2865 1.1 matt pmap_pool_mfree(va, size, tag);
2866 1.1 matt return;
2867 1.1 matt }
2868 1.1 matt #endif
2869 1.1 matt pvop = va;
2870 1.1 matt SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2871 1.1 matt pmap_upvop_free++;
2872 1.1 matt if (pmap_upvop_free > pmap_upvop_maxfree)
2873 1.1 matt pmap_upvop_maxfree = pmap_upvop_free;
2874 1.1 matt }
2875 1.1 matt
2876 1.1 matt void
2877 1.1 matt pmap_pool_mfree(struct pool *pp, void *va)
2878 1.1 matt {
2879 1.1 matt struct pvo_page *pvop;
2880 1.1 matt
2881 1.1 matt pvop = va;
2882 1.1 matt SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2883 1.1 matt pmap_mpvop_free++;
2884 1.1 matt if (pmap_mpvop_free > pmap_mpvop_maxfree)
2885 1.1 matt pmap_mpvop_maxfree = pmap_mpvop_free;
2886 1.1 matt #if 0
2887 1.1 matt uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2888 1.1 matt #endif
2889 1.1 matt }
2890 1.1 matt
2891 1.1 matt /*
2892 1.1 matt * This routine in bootstraping to steal to-be-managed memory (which will
2893 1.1 matt * then be unmanaged). We use it to grab from the first 256MB for our
2894 1.1 matt * pmap needs and above 256MB for other stuff.
2895 1.1 matt */
2896 1.1 matt vaddr_t
2897 1.10 thorpej pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2898 1.1 matt {
2899 1.1 matt vsize_t size;
2900 1.1 matt vaddr_t va;
2901 1.1 matt paddr_t pa = 0;
2902 1.1 matt int npgs, bank;
2903 1.1 matt struct vm_physseg *ps;
2904 1.1 matt
2905 1.1 matt if (uvm.page_init_done == TRUE)
2906 1.1 matt panic("pmap_steal_memory: called _after_ bootstrap");
2907 1.1 matt
2908 1.10 thorpej *vstartp = VM_MIN_KERNEL_ADDRESS;
2909 1.10 thorpej *vendp = VM_MAX_KERNEL_ADDRESS;
2910 1.10 thorpej
2911 1.1 matt size = round_page(vsize);
2912 1.1 matt npgs = atop(size);
2913 1.1 matt
2914 1.1 matt /*
2915 1.1 matt * PA 0 will never be among those given to UVM so we can use it
2916 1.1 matt * to indicate we couldn't steal any memory.
2917 1.1 matt */
2918 1.1 matt for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
2919 1.1 matt if (ps->free_list == VM_FREELIST_FIRST256 &&
2920 1.1 matt ps->avail_end - ps->avail_start >= npgs) {
2921 1.1 matt pa = ptoa(ps->avail_start);
2922 1.1 matt break;
2923 1.1 matt }
2924 1.1 matt }
2925 1.1 matt
2926 1.1 matt if (pa == 0)
2927 1.1 matt panic("pmap_steal_memory: no approriate memory to steal!");
2928 1.1 matt
2929 1.1 matt ps->avail_start += npgs;
2930 1.1 matt ps->start += npgs;
2931 1.1 matt
2932 1.1 matt /*
2933 1.1 matt * If we've used up all the pages in the segment, remove it and
2934 1.1 matt * compact the list.
2935 1.1 matt */
2936 1.1 matt if (ps->avail_start == ps->end) {
2937 1.1 matt /*
2938 1.1 matt * If this was the last one, then a very bad thing has occurred
2939 1.1 matt */
2940 1.1 matt if (--vm_nphysseg == 0)
2941 1.1 matt panic("pmap_steal_memory: out of memory!");
2942 1.1 matt
2943 1.1 matt printf("pmap_steal_memory: consumed bank %d\n", bank);
2944 1.1 matt for (; bank < vm_nphysseg; bank++, ps++) {
2945 1.1 matt ps[0] = ps[1];
2946 1.1 matt }
2947 1.1 matt }
2948 1.1 matt
2949 1.1 matt va = (vaddr_t) pa;
2950 1.1 matt memset((caddr_t) va, 0, size);
2951 1.1 matt pmap_pages_stolen += npgs;
2952 1.1 matt #ifdef DEBUG
2953 1.1 matt if (pmapdebug && npgs > 1) {
2954 1.1 matt u_int cnt = 0;
2955 1.1 matt for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
2956 1.1 matt cnt += ps->avail_end - ps->avail_start;
2957 1.1 matt printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2958 1.1 matt npgs, pmap_pages_stolen, cnt);
2959 1.1 matt }
2960 1.1 matt #endif
2961 1.1 matt
2962 1.1 matt return va;
2963 1.1 matt }
2964 1.1 matt
2965 1.1 matt /*
2966 1.1 matt * Find a chuck of memory with right size and alignment.
2967 1.1 matt */
2968 1.1 matt void *
2969 1.1 matt pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2970 1.1 matt {
2971 1.1 matt struct mem_region *mp;
2972 1.1 matt paddr_t s, e;
2973 1.1 matt int i, j;
2974 1.1 matt
2975 1.1 matt size = round_page(size);
2976 1.1 matt
2977 1.1 matt DPRINTFN(BOOT,
2978 1.1 matt ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
2979 1.1 matt size, alignment, at_end));
2980 1.1 matt
2981 1.6 thorpej if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
2982 1.1 matt panic("pmap_boot_find_memory: invalid alignment %lx",
2983 1.1 matt alignment);
2984 1.1 matt
2985 1.1 matt if (at_end) {
2986 1.6 thorpej if (alignment != PAGE_SIZE)
2987 1.1 matt panic("pmap_boot_find_memory: invalid ending "
2988 1.1 matt "alignment %lx", alignment);
2989 1.1 matt
2990 1.1 matt for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
2991 1.1 matt s = mp->start + mp->size - size;
2992 1.1 matt if (s >= mp->start && mp->size >= size) {
2993 1.1 matt DPRINTFN(BOOT,(": %lx\n", s));
2994 1.1 matt DPRINTFN(BOOT,
2995 1.1 matt ("pmap_boot_find_memory: b-avail[%d] start "
2996 1.1 matt "0x%lx size 0x%lx\n", mp - avail,
2997 1.1 matt mp->start, mp->size));
2998 1.1 matt mp->size -= size;
2999 1.1 matt DPRINTFN(BOOT,
3000 1.1 matt ("pmap_boot_find_memory: a-avail[%d] start "
3001 1.1 matt "0x%lx size 0x%lx\n", mp - avail,
3002 1.1 matt mp->start, mp->size));
3003 1.1 matt return (void *) s;
3004 1.1 matt }
3005 1.1 matt }
3006 1.1 matt panic("pmap_boot_find_memory: no available memory");
3007 1.1 matt }
3008 1.1 matt
3009 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3010 1.1 matt s = (mp->start + alignment - 1) & ~(alignment-1);
3011 1.1 matt e = s + size;
3012 1.1 matt
3013 1.1 matt /*
3014 1.1 matt * Is the calculated region entirely within the region?
3015 1.1 matt */
3016 1.1 matt if (s < mp->start || e > mp->start + mp->size)
3017 1.1 matt continue;
3018 1.1 matt
3019 1.1 matt DPRINTFN(BOOT,(": %lx\n", s));
3020 1.1 matt if (s == mp->start) {
3021 1.1 matt /*
3022 1.1 matt * If the block starts at the beginning of region,
3023 1.1 matt * adjust the size & start. (the region may now be
3024 1.1 matt * zero in length)
3025 1.1 matt */
3026 1.1 matt DPRINTFN(BOOT,
3027 1.1 matt ("pmap_boot_find_memory: b-avail[%d] start "
3028 1.1 matt "0x%lx size 0x%lx\n", i, mp->start, mp->size));
3029 1.1 matt mp->start += size;
3030 1.1 matt mp->size -= size;
3031 1.1 matt DPRINTFN(BOOT,
3032 1.1 matt ("pmap_boot_find_memory: a-avail[%d] start "
3033 1.1 matt "0x%lx size 0x%lx\n", i, mp->start, mp->size));
3034 1.1 matt } else if (e == mp->start + mp->size) {
3035 1.1 matt /*
3036 1.1 matt * If the block starts at the beginning of region,
3037 1.1 matt * adjust only the size.
3038 1.1 matt */
3039 1.1 matt DPRINTFN(BOOT,
3040 1.1 matt ("pmap_boot_find_memory: b-avail[%d] start "
3041 1.1 matt "0x%lx size 0x%lx\n", i, mp->start, mp->size));
3042 1.1 matt mp->size -= size;
3043 1.1 matt DPRINTFN(BOOT,
3044 1.1 matt ("pmap_boot_find_memory: a-avail[%d] start "
3045 1.1 matt "0x%lx size 0x%lx\n", i, mp->start, mp->size));
3046 1.1 matt } else {
3047 1.1 matt /*
3048 1.1 matt * Block is in the middle of the region, so we
3049 1.1 matt * have to split it in two.
3050 1.1 matt */
3051 1.1 matt for (j = avail_cnt; j > i + 1; j--) {
3052 1.1 matt avail[j] = avail[j-1];
3053 1.1 matt }
3054 1.1 matt DPRINTFN(BOOT,
3055 1.1 matt ("pmap_boot_find_memory: b-avail[%d] start "
3056 1.1 matt "0x%lx size 0x%lx\n", i, mp->start, mp->size));
3057 1.1 matt mp[1].start = e;
3058 1.1 matt mp[1].size = mp[0].start + mp[0].size - e;
3059 1.1 matt mp[0].size = s - mp[0].start;
3060 1.1 matt avail_cnt++;
3061 1.1 matt for (; i < avail_cnt; i++) {
3062 1.1 matt DPRINTFN(BOOT,
3063 1.1 matt ("pmap_boot_find_memory: a-avail[%d] "
3064 1.1 matt "start 0x%lx size 0x%lx\n", i,
3065 1.1 matt avail[i].start, avail[i].size));
3066 1.1 matt }
3067 1.1 matt }
3068 1.1 matt return (void *) s;
3069 1.1 matt }
3070 1.1 matt panic("pmap_boot_find_memory: not enough memory for "
3071 1.1 matt "%lx/%lx allocation?", size, alignment);
3072 1.1 matt }
3073 1.1 matt
3074 1.38 sanjayl /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
3075 1.38 sanjayl #if defined (PPC_OEA64_BRIDGE)
3076 1.38 sanjayl int
3077 1.38 sanjayl pmap_setup_segment0_map(int use_large_pages, ...)
3078 1.38 sanjayl {
3079 1.38 sanjayl vaddr_t va;
3080 1.38 sanjayl
3081 1.38 sanjayl register_t pte_lo = 0x0;
3082 1.38 sanjayl int ptegidx = 0, i = 0;
3083 1.38 sanjayl struct pte pte;
3084 1.38 sanjayl va_list ap;
3085 1.38 sanjayl
3086 1.38 sanjayl /* Coherent + Supervisor RW, no user access */
3087 1.38 sanjayl pte_lo = PTE_M;
3088 1.38 sanjayl
3089 1.38 sanjayl /* XXXSL
3090 1.38 sanjayl * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3091 1.38 sanjayl * these have to take priority.
3092 1.38 sanjayl */
3093 1.38 sanjayl for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
3094 1.38 sanjayl ptegidx = va_to_pteg(pmap_kernel(), va);
3095 1.38 sanjayl pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
3096 1.38 sanjayl i = pmap_pte_insert(ptegidx, &pte);
3097 1.38 sanjayl }
3098 1.38 sanjayl
3099 1.38 sanjayl va_start(ap, use_large_pages);
3100 1.38 sanjayl while (1) {
3101 1.38 sanjayl paddr_t pa;
3102 1.38 sanjayl size_t size;
3103 1.38 sanjayl
3104 1.38 sanjayl va = va_arg(ap, vaddr_t);
3105 1.38 sanjayl
3106 1.38 sanjayl if (va == 0)
3107 1.38 sanjayl break;
3108 1.38 sanjayl
3109 1.38 sanjayl pa = va_arg(ap, paddr_t);
3110 1.38 sanjayl size = va_arg(ap, size_t);
3111 1.38 sanjayl
3112 1.38 sanjayl for (; va < (va + size); va += 0x1000, pa += 0x1000) {
3113 1.38 sanjayl #if 0
3114 1.38 sanjayl printf("%s: Inserting: va: 0x%08lx, pa: 0x%08lx\n", __FUNCTION__, va, pa);
3115 1.38 sanjayl #endif
3116 1.38 sanjayl ptegidx = va_to_pteg(pmap_kernel(), va);
3117 1.38 sanjayl pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
3118 1.38 sanjayl i = pmap_pte_insert(ptegidx, &pte);
3119 1.38 sanjayl }
3120 1.38 sanjayl }
3121 1.38 sanjayl
3122 1.38 sanjayl TLBSYNC();
3123 1.38 sanjayl SYNC();
3124 1.38 sanjayl return (0);
3125 1.38 sanjayl }
3126 1.38 sanjayl #endif /* PPC_OEA64_BRIDGE */
3127 1.38 sanjayl
3128 1.1 matt /*
3129 1.1 matt * This is not part of the defined PMAP interface and is specific to the
3130 1.1 matt * PowerPC architecture. This is called during initppc, before the system
3131 1.1 matt * is really initialized.
3132 1.1 matt */
3133 1.1 matt void
3134 1.1 matt pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
3135 1.1 matt {
3136 1.1 matt struct mem_region *mp, tmp;
3137 1.1 matt paddr_t s, e;
3138 1.1 matt psize_t size;
3139 1.1 matt int i, j;
3140 1.1 matt
3141 1.1 matt /*
3142 1.1 matt * Get memory.
3143 1.1 matt */
3144 1.1 matt mem_regions(&mem, &avail);
3145 1.1 matt #if defined(DEBUG)
3146 1.1 matt if (pmapdebug & PMAPDEBUG_BOOT) {
3147 1.1 matt printf("pmap_bootstrap: memory configuration:\n");
3148 1.1 matt for (mp = mem; mp->size; mp++) {
3149 1.1 matt printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n",
3150 1.1 matt mp->start, mp->size);
3151 1.1 matt }
3152 1.1 matt for (mp = avail; mp->size; mp++) {
3153 1.1 matt printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n",
3154 1.1 matt mp->start, mp->size);
3155 1.1 matt }
3156 1.1 matt }
3157 1.1 matt #endif
3158 1.1 matt
3159 1.1 matt /*
3160 1.1 matt * Find out how much physical memory we have and in how many chunks.
3161 1.1 matt */
3162 1.1 matt for (mem_cnt = 0, mp = mem; mp->size; mp++) {
3163 1.1 matt if (mp->start >= pmap_memlimit)
3164 1.1 matt continue;
3165 1.1 matt if (mp->start + mp->size > pmap_memlimit) {
3166 1.1 matt size = pmap_memlimit - mp->start;
3167 1.1 matt physmem += btoc(size);
3168 1.1 matt } else {
3169 1.1 matt physmem += btoc(mp->size);
3170 1.1 matt }
3171 1.1 matt mem_cnt++;
3172 1.1 matt }
3173 1.1 matt
3174 1.1 matt /*
3175 1.1 matt * Count the number of available entries.
3176 1.1 matt */
3177 1.1 matt for (avail_cnt = 0, mp = avail; mp->size; mp++)
3178 1.1 matt avail_cnt++;
3179 1.1 matt
3180 1.1 matt /*
3181 1.1 matt * Page align all regions.
3182 1.1 matt */
3183 1.1 matt kernelstart = trunc_page(kernelstart);
3184 1.1 matt kernelend = round_page(kernelend);
3185 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3186 1.1 matt s = round_page(mp->start);
3187 1.1 matt mp->size -= (s - mp->start);
3188 1.1 matt mp->size = trunc_page(mp->size);
3189 1.1 matt mp->start = s;
3190 1.1 matt e = mp->start + mp->size;
3191 1.1 matt
3192 1.1 matt DPRINTFN(BOOT,
3193 1.1 matt ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
3194 1.1 matt i, mp->start, mp->size));
3195 1.1 matt
3196 1.1 matt /*
3197 1.1 matt * Don't allow the end to run beyond our artificial limit
3198 1.1 matt */
3199 1.1 matt if (e > pmap_memlimit)
3200 1.1 matt e = pmap_memlimit;
3201 1.1 matt
3202 1.1 matt /*
3203 1.1 matt * Is this region empty or strange? skip it.
3204 1.1 matt */
3205 1.1 matt if (e <= s) {
3206 1.1 matt mp->start = 0;
3207 1.1 matt mp->size = 0;
3208 1.1 matt continue;
3209 1.1 matt }
3210 1.1 matt
3211 1.1 matt /*
3212 1.1 matt * Does this overlap the beginning of kernel?
3213 1.1 matt * Does extend past the end of the kernel?
3214 1.1 matt */
3215 1.1 matt else if (s < kernelstart && e > kernelstart) {
3216 1.1 matt if (e > kernelend) {
3217 1.1 matt avail[avail_cnt].start = kernelend;
3218 1.1 matt avail[avail_cnt].size = e - kernelend;
3219 1.1 matt avail_cnt++;
3220 1.1 matt }
3221 1.1 matt mp->size = kernelstart - s;
3222 1.1 matt }
3223 1.1 matt /*
3224 1.1 matt * Check whether this region overlaps the end of the kernel.
3225 1.1 matt */
3226 1.1 matt else if (s < kernelend && e > kernelend) {
3227 1.1 matt mp->start = kernelend;
3228 1.1 matt mp->size = e - kernelend;
3229 1.1 matt }
3230 1.1 matt /*
3231 1.1 matt * Look whether this regions is completely inside the kernel.
3232 1.1 matt * Nuke it if it does.
3233 1.1 matt */
3234 1.1 matt else if (s >= kernelstart && e <= kernelend) {
3235 1.1 matt mp->start = 0;
3236 1.1 matt mp->size = 0;
3237 1.1 matt }
3238 1.1 matt /*
3239 1.1 matt * If the user imposed a memory limit, enforce it.
3240 1.1 matt */
3241 1.1 matt else if (s >= pmap_memlimit) {
3242 1.6 thorpej mp->start = -PAGE_SIZE; /* let's know why */
3243 1.1 matt mp->size = 0;
3244 1.1 matt }
3245 1.1 matt else {
3246 1.1 matt mp->start = s;
3247 1.1 matt mp->size = e - s;
3248 1.1 matt }
3249 1.1 matt DPRINTFN(BOOT,
3250 1.1 matt ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n",
3251 1.1 matt i, mp->start, mp->size));
3252 1.1 matt }
3253 1.1 matt
3254 1.1 matt /*
3255 1.1 matt * Move (and uncount) all the null return to the end.
3256 1.1 matt */
3257 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3258 1.1 matt if (mp->size == 0) {
3259 1.1 matt tmp = avail[i];
3260 1.1 matt avail[i] = avail[--avail_cnt];
3261 1.1 matt avail[avail_cnt] = avail[i];
3262 1.1 matt }
3263 1.1 matt }
3264 1.1 matt
3265 1.1 matt /*
3266 1.1 matt * (Bubble)sort them into asecnding order.
3267 1.1 matt */
3268 1.1 matt for (i = 0; i < avail_cnt; i++) {
3269 1.1 matt for (j = i + 1; j < avail_cnt; j++) {
3270 1.1 matt if (avail[i].start > avail[j].start) {
3271 1.1 matt tmp = avail[i];
3272 1.1 matt avail[i] = avail[j];
3273 1.1 matt avail[j] = tmp;
3274 1.1 matt }
3275 1.1 matt }
3276 1.1 matt }
3277 1.1 matt
3278 1.1 matt /*
3279 1.1 matt * Make sure they don't overlap.
3280 1.1 matt */
3281 1.1 matt for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
3282 1.1 matt if (mp[0].start + mp[0].size > mp[1].start) {
3283 1.1 matt mp[0].size = mp[1].start - mp[0].start;
3284 1.1 matt }
3285 1.1 matt DPRINTFN(BOOT,
3286 1.1 matt ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
3287 1.1 matt i, mp->start, mp->size));
3288 1.1 matt }
3289 1.1 matt DPRINTFN(BOOT,
3290 1.1 matt ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
3291 1.1 matt i, mp->start, mp->size));
3292 1.1 matt
3293 1.1 matt #ifdef PTEGCOUNT
3294 1.1 matt pmap_pteg_cnt = PTEGCOUNT;
3295 1.1 matt #else /* PTEGCOUNT */
3296 1.38 sanjayl
3297 1.1 matt pmap_pteg_cnt = 0x1000;
3298 1.1 matt
3299 1.1 matt while (pmap_pteg_cnt < physmem)
3300 1.1 matt pmap_pteg_cnt <<= 1;
3301 1.1 matt
3302 1.1 matt pmap_pteg_cnt >>= 1;
3303 1.1 matt #endif /* PTEGCOUNT */
3304 1.1 matt
3305 1.38 sanjayl #ifdef DEBUG
3306 1.38 sanjayl DPRINTFN(BOOT,
3307 1.38 sanjayl ("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt));
3308 1.38 sanjayl #endif
3309 1.38 sanjayl
3310 1.1 matt /*
3311 1.1 matt * Find suitably aligned memory for PTEG hash table.
3312 1.1 matt */
3313 1.2 matt size = pmap_pteg_cnt * sizeof(struct pteg);
3314 1.1 matt pmap_pteg_table = pmap_boot_find_memory(size, size, 0);
3315 1.38 sanjayl
3316 1.38 sanjayl #ifdef DEBUG
3317 1.38 sanjayl DPRINTFN(BOOT,
3318 1.38 sanjayl ("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table));
3319 1.38 sanjayl #endif
3320 1.38 sanjayl
3321 1.38 sanjayl
3322 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3323 1.1 matt if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
3324 1.1 matt panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB",
3325 1.1 matt pmap_pteg_table, size);
3326 1.1 matt #endif
3327 1.1 matt
3328 1.32 he memset(__UNVOLATILE(pmap_pteg_table), 0,
3329 1.32 he pmap_pteg_cnt * sizeof(struct pteg));
3330 1.1 matt pmap_pteg_mask = pmap_pteg_cnt - 1;
3331 1.1 matt
3332 1.1 matt /*
3333 1.1 matt * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3334 1.1 matt * with pages. So we just steal them before giving them to UVM.
3335 1.1 matt */
3336 1.1 matt size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
3337 1.6 thorpej pmap_pvo_table = pmap_boot_find_memory(size, PAGE_SIZE, 0);
3338 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3339 1.1 matt if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
3340 1.1 matt panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB",
3341 1.1 matt pmap_pvo_table, size);
3342 1.1 matt #endif
3343 1.1 matt
3344 1.1 matt for (i = 0; i < pmap_pteg_cnt; i++)
3345 1.1 matt TAILQ_INIT(&pmap_pvo_table[i]);
3346 1.1 matt
3347 1.1 matt #ifndef MSGBUFADDR
3348 1.1 matt /*
3349 1.1 matt * Allocate msgbuf in high memory.
3350 1.1 matt */
3351 1.6 thorpej msgbuf_paddr =
3352 1.6 thorpej (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
3353 1.1 matt #endif
3354 1.1 matt
3355 1.1 matt #ifdef __HAVE_PMAP_PHYSSEG
3356 1.1 matt {
3357 1.1 matt u_int npgs = 0;
3358 1.1 matt for (i = 0, mp = avail; i < avail_cnt; i++, mp++)
3359 1.1 matt npgs += btoc(mp->size);
3360 1.1 matt size = (sizeof(struct pvo_head) + 1) * npgs;
3361 1.6 thorpej pmap_physseg.pvoh = pmap_boot_find_memory(size, PAGE_SIZE, 0);
3362 1.1 matt pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs];
3363 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3364 1.1 matt if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH)
3365 1.1 matt panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB",
3366 1.1 matt pmap_physseg.pvoh, size);
3367 1.1 matt #endif
3368 1.1 matt }
3369 1.1 matt #endif
3370 1.1 matt
3371 1.38 sanjayl
3372 1.1 matt for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
3373 1.1 matt paddr_t pfstart = atop(mp->start);
3374 1.1 matt paddr_t pfend = atop(mp->start + mp->size);
3375 1.1 matt if (mp->size == 0)
3376 1.1 matt continue;
3377 1.1 matt if (mp->start + mp->size <= SEGMENT_LENGTH) {
3378 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3379 1.1 matt VM_FREELIST_FIRST256);
3380 1.1 matt } else if (mp->start >= SEGMENT_LENGTH) {
3381 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3382 1.1 matt VM_FREELIST_DEFAULT);
3383 1.1 matt } else {
3384 1.1 matt pfend = atop(SEGMENT_LENGTH);
3385 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3386 1.1 matt VM_FREELIST_FIRST256);
3387 1.1 matt pfstart = atop(SEGMENT_LENGTH);
3388 1.1 matt pfend = atop(mp->start + mp->size);
3389 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend,
3390 1.1 matt VM_FREELIST_DEFAULT);
3391 1.1 matt }
3392 1.1 matt }
3393 1.1 matt
3394 1.1 matt /*
3395 1.1 matt * Make sure kernel vsid is allocated as well as VSID 0.
3396 1.1 matt */
3397 1.1 matt pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3398 1.1 matt |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
3399 1.1 matt pmap_vsid_bitmap[0] |= 1;
3400 1.1 matt
3401 1.1 matt /*
3402 1.1 matt * Initialize kernel pmap and hardware.
3403 1.1 matt */
3404 1.38 sanjayl
3405 1.38 sanjayl /* PPC_OEA64_BRIDGE does support these instructions */
3406 1.38 sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
3407 1.1 matt for (i = 0; i < 16; i++) {
3408 1.38 sanjayl pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
3409 1.35 perry __asm volatile ("mtsrin %0,%1"
3410 1.38 sanjayl :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
3411 1.1 matt }
3412 1.1 matt
3413 1.1 matt pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
3414 1.35 perry __asm volatile ("mtsr %0,%1"
3415 1.1 matt :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
3416 1.1 matt #ifdef KERNEL2_SR
3417 1.1 matt pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
3418 1.35 perry __asm volatile ("mtsr %0,%1"
3419 1.1 matt :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
3420 1.1 matt #endif
3421 1.1 matt for (i = 0; i < 16; i++) {
3422 1.1 matt if (iosrtable[i] & SR601_T) {
3423 1.1 matt pmap_kernel()->pm_sr[i] = iosrtable[i];
3424 1.35 perry __asm volatile ("mtsrin %0,%1"
3425 1.1 matt :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
3426 1.1 matt }
3427 1.1 matt }
3428 1.38 sanjayl #endif /* PPC_OEA || PPC_OEA64_BRIDGE */
3429 1.38 sanjayl #if defined (PPC_OEA)
3430 1.35 perry __asm volatile ("sync; mtsdr1 %0; isync"
3431 1.2 matt :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
3432 1.38 sanjayl #elif defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
3433 1.38 sanjayl __asm __volatile ("sync; mtsdr1 %0; isync"
3434 1.38 sanjayl :: "r"((uintptr_t)pmap_pteg_table | (32 - cntlzw(pmap_pteg_mask >> 11))));
3435 1.38 sanjayl #endif
3436 1.1 matt tlbia();
3437 1.1 matt
3438 1.1 matt #ifdef ALTIVEC
3439 1.1 matt pmap_use_altivec = cpu_altivec;
3440 1.1 matt #endif
3441 1.1 matt
3442 1.1 matt #ifdef DEBUG
3443 1.1 matt if (pmapdebug & PMAPDEBUG_BOOT) {
3444 1.1 matt u_int cnt;
3445 1.1 matt int bank;
3446 1.1 matt char pbuf[9];
3447 1.1 matt for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
3448 1.1 matt cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
3449 1.1 matt printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n",
3450 1.1 matt bank,
3451 1.1 matt ptoa(vm_physmem[bank].avail_start),
3452 1.1 matt ptoa(vm_physmem[bank].avail_end),
3453 1.1 matt ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
3454 1.1 matt }
3455 1.1 matt format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3456 1.1 matt printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3457 1.1 matt pbuf, cnt);
3458 1.1 matt }
3459 1.1 matt #endif
3460 1.1 matt
3461 1.1 matt pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3462 1.1 matt sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3463 1.1 matt &pmap_pool_uallocator);
3464 1.1 matt
3465 1.1 matt pool_setlowat(&pmap_upvo_pool, 252);
3466 1.1 matt
3467 1.1 matt pool_init(&pmap_pool, sizeof(struct pmap),
3468 1.1 matt sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator);
3469 1.41 matt
3470 1.41 matt #if defined(PMAP_NEED_MAPKERNEL)
3471 1.41 matt {
3472 1.41 matt extern int etext[], kernel_text[];
3473 1.41 matt vaddr_t va, va_etext = (paddr_t) etext;
3474 1.41 matt paddr_t pa;
3475 1.41 matt
3476 1.41 matt va = (vaddr_t) kernel_text;
3477 1.41 matt
3478 1.41 matt for (pa = kernelstart; va < va_etext;
3479 1.41 matt pa += PAGE_SIZE, va += PAGE_SIZE)
3480 1.41 matt pmap_enter(pmap_kernel(), va, pa,
3481 1.41 matt VM_PROT_READ|VM_PROT_EXECUTE, 0);
3482 1.41 matt
3483 1.41 matt for (; pa < kernelend;
3484 1.41 matt pa += PAGE_SIZE, va += PAGE_SIZE)
3485 1.41 matt pmap_enter(pmap_kernel(), va, pa,
3486 1.41 matt VM_PROT_READ|VM_PROT_WRITE, 0);
3487 1.41 matt }
3488 1.41 matt #endif
3489 1.1 matt }
3490