1 1.122 andvar /* $NetBSD: pmap.c,v 1.122 2025/02/28 09:07:11 andvar Exp $ */ 2 1.1 matt /*- 3 1.1 matt * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 1.1 matt * All rights reserved. 5 1.1 matt * 6 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 7 1.1 matt * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc. 8 1.1 matt * 9 1.38 sanjayl * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com> 10 1.38 sanjayl * of Kyma Systems LLC. 11 1.38 sanjayl * 12 1.1 matt * Redistribution and use in source and binary forms, with or without 13 1.1 matt * modification, are permitted provided that the following conditions 14 1.1 matt * are met: 15 1.1 matt * 1. Redistributions of source code must retain the above copyright 16 1.1 matt * notice, this list of conditions and the following disclaimer. 17 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 18 1.1 matt * notice, this list of conditions and the following disclaimer in the 19 1.1 matt * documentation and/or other materials provided with the distribution. 20 1.1 matt * 21 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 32 1.1 matt */ 33 1.1 matt 34 1.1 matt /* 35 1.1 matt * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 1.1 matt * Copyright (C) 1995, 1996 TooLs GmbH. 37 1.1 matt * All rights reserved. 38 1.1 matt * 39 1.1 matt * Redistribution and use in source and binary forms, with or without 40 1.1 matt * modification, are permitted provided that the following conditions 41 1.1 matt * are met: 42 1.1 matt * 1. Redistributions of source code must retain the above copyright 43 1.1 matt * notice, this list of conditions and the following disclaimer. 44 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 45 1.1 matt * notice, this list of conditions and the following disclaimer in the 46 1.1 matt * documentation and/or other materials provided with the distribution. 47 1.1 matt * 3. All advertising materials mentioning features or use of this software 48 1.1 matt * must display the following acknowledgement: 49 1.1 matt * This product includes software developed by TooLs GmbH. 50 1.1 matt * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 1.1 matt * derived from this software without specific prior written permission. 52 1.1 matt * 53 1.1 matt * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 1.1 matt * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 1.1 matt * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 1.1 matt * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 1.1 matt * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 1.1 matt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 1.1 matt * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 1.1 matt * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 1.1 matt */ 64 1.11 lukem 65 1.11 lukem #include <sys/cdefs.h> 66 1.122 andvar __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.122 2025/02/28 09:07:11 andvar Exp $"); 67 1.53 garbled 68 1.53 garbled #define PMAP_NOOPNAMES 69 1.1 matt 70 1.98 rin #ifdef _KERNEL_OPT 71 1.1 matt #include "opt_altivec.h" 72 1.57 matt #include "opt_multiprocessor.h" 73 1.1 matt #include "opt_pmap.h" 74 1.98 rin #include "opt_ppcarch.h" 75 1.98 rin #endif 76 1.57 matt 77 1.1 matt #include <sys/param.h> 78 1.1 matt #include <sys/proc.h> 79 1.1 matt #include <sys/pool.h> 80 1.1 matt #include <sys/queue.h> 81 1.1 matt #include <sys/device.h> /* for evcnt */ 82 1.1 matt #include <sys/systm.h> 83 1.50 ad #include <sys/atomic.h> 84 1.1 matt 85 1.1 matt #include <uvm/uvm.h> 86 1.94 cherry #include <uvm/uvm_physseg.h> 87 1.1 matt 88 1.1 matt #include <machine/powerpc.h> 89 1.80 matt #include <powerpc/bat.h> 90 1.80 matt #include <powerpc/pcb.h> 91 1.80 matt #include <powerpc/psl.h> 92 1.1 matt #include <powerpc/spr.h> 93 1.71 matt #include <powerpc/oea/spr.h> 94 1.71 matt #include <powerpc/oea/sr_601.h> 95 1.1 matt 96 1.1 matt #ifdef ALTIVEC 97 1.86 matt extern int pmap_use_altivec; 98 1.1 matt #endif 99 1.1 matt 100 1.21 aymeric #ifdef PMAP_MEMLIMIT 101 1.53 garbled static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 102 1.21 aymeric #else 103 1.53 garbled static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 104 1.21 aymeric #endif 105 1.1 matt 106 1.86 matt extern struct pmap kernel_pmap_; 107 1.86 matt static unsigned int pmap_pages_stolen; 108 1.86 matt static u_long pmap_pte_valid; 109 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 110 1.86 matt static u_long pmap_pvo_enter_depth; 111 1.86 matt static u_long pmap_pvo_remove_depth; 112 1.1 matt #endif 113 1.1 matt 114 1.1 matt #ifndef MSGBUFADDR 115 1.1 matt extern paddr_t msgbuf_paddr; 116 1.1 matt #endif 117 1.1 matt 118 1.1 matt static struct mem_region *mem, *avail; 119 1.1 matt static u_int mem_cnt, avail_cnt; 120 1.1 matt 121 1.53 garbled #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 122 1.53 garbled # define PMAP_OEA 1 123 1.53 garbled #endif 124 1.53 garbled 125 1.53 garbled #if defined(PMAP_OEA) 126 1.53 garbled #define _PRIxpte "lx" 127 1.53 garbled #else 128 1.53 garbled #define _PRIxpte PRIx64 129 1.53 garbled #endif 130 1.53 garbled #define _PRIxpa "lx" 131 1.53 garbled #define _PRIxva "lx" 132 1.54 mlelstv #define _PRIsr "lx" 133 1.53 garbled 134 1.76 matt #ifdef PMAP_NEEDS_FIXUP 135 1.53 garbled #if defined(PMAP_OEA) 136 1.53 garbled #define PMAPNAME(name) pmap32_##name 137 1.53 garbled #elif defined(PMAP_OEA64) 138 1.53 garbled #define PMAPNAME(name) pmap64_##name 139 1.53 garbled #elif defined(PMAP_OEA64_BRIDGE) 140 1.53 garbled #define PMAPNAME(name) pmap64bridge_##name 141 1.53 garbled #else 142 1.53 garbled #error unknown variant for pmap 143 1.53 garbled #endif 144 1.76 matt #endif /* PMAP_NEEDS_FIXUP */ 145 1.53 garbled 146 1.76 matt #ifdef PMAPNAME 147 1.53 garbled #define STATIC static 148 1.53 garbled #define pmap_pte_spill PMAPNAME(pte_spill) 149 1.53 garbled #define pmap_real_memory PMAPNAME(real_memory) 150 1.53 garbled #define pmap_init PMAPNAME(init) 151 1.53 garbled #define pmap_virtual_space PMAPNAME(virtual_space) 152 1.53 garbled #define pmap_create PMAPNAME(create) 153 1.53 garbled #define pmap_reference PMAPNAME(reference) 154 1.53 garbled #define pmap_destroy PMAPNAME(destroy) 155 1.53 garbled #define pmap_copy PMAPNAME(copy) 156 1.53 garbled #define pmap_update PMAPNAME(update) 157 1.53 garbled #define pmap_enter PMAPNAME(enter) 158 1.53 garbled #define pmap_remove PMAPNAME(remove) 159 1.53 garbled #define pmap_kenter_pa PMAPNAME(kenter_pa) 160 1.53 garbled #define pmap_kremove PMAPNAME(kremove) 161 1.53 garbled #define pmap_extract PMAPNAME(extract) 162 1.53 garbled #define pmap_protect PMAPNAME(protect) 163 1.53 garbled #define pmap_unwire PMAPNAME(unwire) 164 1.53 garbled #define pmap_page_protect PMAPNAME(page_protect) 165 1.111 martin #define pmap_pv_protect PMAPNAME(pv_protect) 166 1.53 garbled #define pmap_query_bit PMAPNAME(query_bit) 167 1.53 garbled #define pmap_clear_bit PMAPNAME(clear_bit) 168 1.53 garbled 169 1.53 garbled #define pmap_activate PMAPNAME(activate) 170 1.53 garbled #define pmap_deactivate PMAPNAME(deactivate) 171 1.53 garbled 172 1.53 garbled #define pmap_pinit PMAPNAME(pinit) 173 1.53 garbled #define pmap_procwr PMAPNAME(procwr) 174 1.53 garbled 175 1.86 matt #define pmap_pool PMAPNAME(pool) 176 1.106 martin #define pmap_pvo_pool PMAPNAME(pvo_pool) 177 1.86 matt #define pmap_pvo_table PMAPNAME(pvo_table) 178 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 179 1.53 garbled #define pmap_pte_print PMAPNAME(pte_print) 180 1.53 garbled #define pmap_pteg_check PMAPNAME(pteg_check) 181 1.53 garbled #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 182 1.53 garbled #define pmap_print_pte PMAPNAME(print_pte) 183 1.53 garbled #define pmap_pteg_dist PMAPNAME(pteg_dist) 184 1.53 garbled #endif 185 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) 186 1.53 garbled #define pmap_pvo_verify PMAPNAME(pvo_verify) 187 1.56 phx #define pmapcheck PMAPNAME(check) 188 1.56 phx #endif 189 1.56 phx #if defined(DEBUG) || defined(PMAPDEBUG) 190 1.56 phx #define pmapdebug PMAPNAME(debug) 191 1.53 garbled #endif 192 1.53 garbled #define pmap_steal_memory PMAPNAME(steal_memory) 193 1.53 garbled #define pmap_bootstrap PMAPNAME(bootstrap) 194 1.100 rin #define pmap_bootstrap1 PMAPNAME(bootstrap1) 195 1.100 rin #define pmap_bootstrap2 PMAPNAME(bootstrap2) 196 1.53 garbled #else 197 1.53 garbled #define STATIC /* nothing */ 198 1.53 garbled #endif /* PMAPNAME */ 199 1.53 garbled 200 1.53 garbled STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 201 1.53 garbled STATIC void pmap_real_memory(paddr_t *, psize_t *); 202 1.53 garbled STATIC void pmap_init(void); 203 1.53 garbled STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 204 1.53 garbled STATIC pmap_t pmap_create(void); 205 1.53 garbled STATIC void pmap_reference(pmap_t); 206 1.53 garbled STATIC void pmap_destroy(pmap_t); 207 1.53 garbled STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 208 1.53 garbled STATIC void pmap_update(pmap_t); 209 1.65 cegger STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 210 1.53 garbled STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 211 1.68 cegger STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 212 1.53 garbled STATIC void pmap_kremove(vaddr_t, vsize_t); 213 1.53 garbled STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 214 1.53 garbled 215 1.53 garbled STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 216 1.53 garbled STATIC void pmap_unwire(pmap_t, vaddr_t); 217 1.53 garbled STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 218 1.111 martin STATIC void pmap_pv_protect(paddr_t, vm_prot_t); 219 1.53 garbled STATIC bool pmap_query_bit(struct vm_page *, int); 220 1.53 garbled STATIC bool pmap_clear_bit(struct vm_page *, int); 221 1.53 garbled 222 1.53 garbled STATIC void pmap_activate(struct lwp *); 223 1.53 garbled STATIC void pmap_deactivate(struct lwp *); 224 1.53 garbled 225 1.53 garbled STATIC void pmap_pinit(pmap_t pm); 226 1.53 garbled STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 227 1.53 garbled 228 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 229 1.53 garbled STATIC void pmap_pte_print(volatile struct pte *); 230 1.53 garbled STATIC void pmap_pteg_check(void); 231 1.53 garbled STATIC void pmap_print_mmuregs(void); 232 1.53 garbled STATIC void pmap_print_pte(pmap_t, vaddr_t); 233 1.53 garbled STATIC void pmap_pteg_dist(void); 234 1.53 garbled #endif 235 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) 236 1.53 garbled STATIC void pmap_pvo_verify(void); 237 1.53 garbled #endif 238 1.53 garbled STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 239 1.53 garbled STATIC void pmap_bootstrap(paddr_t, paddr_t); 240 1.100 rin STATIC void pmap_bootstrap1(paddr_t, paddr_t); 241 1.100 rin STATIC void pmap_bootstrap2(void); 242 1.53 garbled 243 1.53 garbled #ifdef PMAPNAME 244 1.53 garbled const struct pmap_ops PMAPNAME(ops) = { 245 1.53 garbled .pmapop_pte_spill = pmap_pte_spill, 246 1.53 garbled .pmapop_real_memory = pmap_real_memory, 247 1.53 garbled .pmapop_init = pmap_init, 248 1.53 garbled .pmapop_virtual_space = pmap_virtual_space, 249 1.53 garbled .pmapop_create = pmap_create, 250 1.53 garbled .pmapop_reference = pmap_reference, 251 1.53 garbled .pmapop_destroy = pmap_destroy, 252 1.53 garbled .pmapop_copy = pmap_copy, 253 1.53 garbled .pmapop_update = pmap_update, 254 1.53 garbled .pmapop_enter = pmap_enter, 255 1.53 garbled .pmapop_remove = pmap_remove, 256 1.53 garbled .pmapop_kenter_pa = pmap_kenter_pa, 257 1.53 garbled .pmapop_kremove = pmap_kremove, 258 1.53 garbled .pmapop_extract = pmap_extract, 259 1.53 garbled .pmapop_protect = pmap_protect, 260 1.53 garbled .pmapop_unwire = pmap_unwire, 261 1.53 garbled .pmapop_page_protect = pmap_page_protect, 262 1.111 martin .pmapop_pv_protect = pmap_pv_protect, 263 1.53 garbled .pmapop_query_bit = pmap_query_bit, 264 1.53 garbled .pmapop_clear_bit = pmap_clear_bit, 265 1.53 garbled .pmapop_activate = pmap_activate, 266 1.53 garbled .pmapop_deactivate = pmap_deactivate, 267 1.53 garbled .pmapop_pinit = pmap_pinit, 268 1.53 garbled .pmapop_procwr = pmap_procwr, 269 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 270 1.53 garbled .pmapop_pte_print = pmap_pte_print, 271 1.53 garbled .pmapop_pteg_check = pmap_pteg_check, 272 1.53 garbled .pmapop_print_mmuregs = pmap_print_mmuregs, 273 1.53 garbled .pmapop_print_pte = pmap_print_pte, 274 1.53 garbled .pmapop_pteg_dist = pmap_pteg_dist, 275 1.53 garbled #else 276 1.53 garbled .pmapop_pte_print = NULL, 277 1.53 garbled .pmapop_pteg_check = NULL, 278 1.53 garbled .pmapop_print_mmuregs = NULL, 279 1.53 garbled .pmapop_print_pte = NULL, 280 1.53 garbled .pmapop_pteg_dist = NULL, 281 1.53 garbled #endif 282 1.53 garbled #if defined(DEBUG) || defined(PMAPCHECK) 283 1.53 garbled .pmapop_pvo_verify = pmap_pvo_verify, 284 1.53 garbled #else 285 1.53 garbled .pmapop_pvo_verify = NULL, 286 1.1 matt #endif 287 1.53 garbled .pmapop_steal_memory = pmap_steal_memory, 288 1.53 garbled .pmapop_bootstrap = pmap_bootstrap, 289 1.101 thorpej .pmapop_bootstrap1 = pmap_bootstrap1, 290 1.101 thorpej .pmapop_bootstrap2 = pmap_bootstrap2, 291 1.53 garbled }; 292 1.53 garbled #endif /* !PMAPNAME */ 293 1.1 matt 294 1.1 matt /* 295 1.119 rin * The following structure is aligned to 32 bytes, if reasonably possible. 296 1.1 matt */ 297 1.1 matt struct pvo_entry { 298 1.1 matt LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 299 1.1 matt TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 300 1.1 matt struct pte pvo_pte; /* Prebuilt PTE */ 301 1.1 matt pmap_t pvo_pmap; /* ptr to owning pmap */ 302 1.1 matt vaddr_t pvo_vaddr; /* VA of entry */ 303 1.1 matt #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 304 1.1 matt #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 305 1.1 matt #define PVO_WIRED 0x0010 /* PVO entry is wired */ 306 1.1 matt #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 307 1.1 matt #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 308 1.39 matt #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 309 1.39 matt #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 310 1.39 matt #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 311 1.12 matt #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 312 1.12 matt #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 313 1.12 matt #define PVO_SPILL_SET 2 /* PVO has been spilled */ 314 1.12 matt #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 315 1.12 matt #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 316 1.12 matt #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 317 1.12 matt #define PVO_REMOVE 6 /* PVO has been removed */ 318 1.12 matt #define PVO_WHERE_MASK 15 319 1.12 matt #define PVO_WHERE_SHFT 8 320 1.119 rin }; 321 1.119 rin 322 1.119 rin #if defined(PMAP_OEA) && !defined(DIAGNOSTIC) 323 1.119 rin #define PMAP_PVO_ENTRY_ALIGN 32 324 1.119 rin #else 325 1.119 rin #define PMAP_PVO_ENTRY_ALIGN __alignof(struct pvo_entry) 326 1.119 rin #endif 327 1.119 rin 328 1.1 matt #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 329 1.1 matt #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 330 1.1 matt #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 331 1.1 matt #define PVO_PTEGIDX_CLR(pvo) \ 332 1.1 matt ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 333 1.1 matt #define PVO_PTEGIDX_SET(pvo,i) \ 334 1.1 matt ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 335 1.12 matt #define PVO_WHERE(pvo,w) \ 336 1.12 matt ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 337 1.12 matt (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 338 1.1 matt 339 1.1 matt TAILQ_HEAD(pvo_tqhead, pvo_entry); 340 1.1 matt struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 341 1.1 matt 342 1.1 matt struct pool pmap_pool; /* pool for pmap structures */ 343 1.106 martin struct pool pmap_pvo_pool; /* pool for pvo entries */ 344 1.1 matt 345 1.106 martin static void *pmap_pool_alloc(struct pool *, int); 346 1.106 martin static void pmap_pool_free(struct pool *, void *); 347 1.106 martin 348 1.106 martin static struct pool_allocator pmap_pool_allocator = { 349 1.106 martin .pa_alloc = pmap_pool_alloc, 350 1.106 martin .pa_free = pmap_pool_free, 351 1.43 garbled .pa_pagesz = 0, 352 1.1 matt }; 353 1.1 matt 354 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 355 1.2 matt void pmap_pte_print(volatile struct pte *); 356 1.1 matt void pmap_pteg_check(void); 357 1.1 matt void pmap_pteg_dist(void); 358 1.1 matt void pmap_print_pte(pmap_t, vaddr_t); 359 1.1 matt void pmap_print_mmuregs(void); 360 1.1 matt #endif 361 1.1 matt 362 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) 363 1.1 matt #ifdef PMAPCHECK 364 1.1 matt int pmapcheck = 1; 365 1.1 matt #else 366 1.1 matt int pmapcheck = 0; 367 1.1 matt #endif 368 1.1 matt void pmap_pvo_verify(void); 369 1.53 garbled static void pmap_pvo_check(const struct pvo_entry *); 370 1.1 matt #define PMAP_PVO_CHECK(pvo) \ 371 1.1 matt do { \ 372 1.1 matt if (pmapcheck) \ 373 1.1 matt pmap_pvo_check(pvo); \ 374 1.1 matt } while (0) 375 1.1 matt #else 376 1.1 matt #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 377 1.1 matt #endif 378 1.53 garbled static int pmap_pte_insert(int, struct pte *); 379 1.53 garbled static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 380 1.2 matt vaddr_t, paddr_t, register_t, int); 381 1.53 garbled static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 382 1.53 garbled static void pmap_pvo_free(struct pvo_entry *); 383 1.53 garbled static void pmap_pvo_free_list(struct pvo_head *); 384 1.53 garbled static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 385 1.53 garbled static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 386 1.117 rin static struct pvo_entry *pmap_pvo_reclaim(void); 387 1.53 garbled static void pvo_set_exec(struct pvo_entry *); 388 1.53 garbled static void pvo_clear_exec(struct pvo_entry *); 389 1.1 matt 390 1.53 garbled static void tlbia(void); 391 1.1 matt 392 1.53 garbled static void pmap_release(pmap_t); 393 1.53 garbled static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 394 1.1 matt 395 1.25 chs static uint32_t pmap_pvo_reclaim_nextidx; 396 1.25 chs #ifdef DEBUG 397 1.25 chs static int pmap_pvo_reclaim_debugctr; 398 1.25 chs #endif 399 1.25 chs 400 1.1 matt #define VSID_NBPW (sizeof(uint32_t) * 8) 401 1.1 matt static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 402 1.1 matt 403 1.1 matt static int pmap_initialized; 404 1.1 matt 405 1.1 matt #if defined(DEBUG) || defined(PMAPDEBUG) 406 1.1 matt #define PMAPDEBUG_BOOT 0x0001 407 1.1 matt #define PMAPDEBUG_PTE 0x0002 408 1.1 matt #define PMAPDEBUG_EXEC 0x0008 409 1.1 matt #define PMAPDEBUG_PVOENTER 0x0010 410 1.1 matt #define PMAPDEBUG_PVOREMOVE 0x0020 411 1.1 matt #define PMAPDEBUG_ACTIVATE 0x0100 412 1.1 matt #define PMAPDEBUG_CREATE 0x0200 413 1.1 matt #define PMAPDEBUG_ENTER 0x1000 414 1.1 matt #define PMAPDEBUG_KENTER 0x2000 415 1.1 matt #define PMAPDEBUG_KREMOVE 0x4000 416 1.1 matt #define PMAPDEBUG_REMOVE 0x8000 417 1.38 sanjayl 418 1.1 matt unsigned int pmapdebug = 0; 419 1.38 sanjayl 420 1.85 matt # define DPRINTF(x, ...) printf(x, __VA_ARGS__) 421 1.85 matt # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0) 422 1.1 matt #else 423 1.85 matt # define DPRINTF(x, ...) do { } while (0) 424 1.85 matt # define DPRINTFN(n, x, ...) do { } while (0) 425 1.1 matt #endif 426 1.1 matt 427 1.1 matt 428 1.1 matt #ifdef PMAPCOUNTERS 429 1.1 matt /* 430 1.1 matt * From pmap_subr.c 431 1.1 matt */ 432 1.53 garbled extern struct evcnt pmap_evcnt_mappings; 433 1.53 garbled extern struct evcnt pmap_evcnt_unmappings; 434 1.53 garbled 435 1.53 garbled extern struct evcnt pmap_evcnt_kernel_mappings; 436 1.53 garbled extern struct evcnt pmap_evcnt_kernel_unmappings; 437 1.53 garbled 438 1.53 garbled extern struct evcnt pmap_evcnt_mappings_replaced; 439 1.53 garbled 440 1.53 garbled extern struct evcnt pmap_evcnt_exec_mappings; 441 1.53 garbled extern struct evcnt pmap_evcnt_exec_cached; 442 1.53 garbled 443 1.53 garbled extern struct evcnt pmap_evcnt_exec_synced; 444 1.53 garbled extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 445 1.53 garbled extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 446 1.53 garbled 447 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 448 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 449 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 450 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 451 1.53 garbled extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 452 1.53 garbled 453 1.53 garbled extern struct evcnt pmap_evcnt_updates; 454 1.53 garbled extern struct evcnt pmap_evcnt_collects; 455 1.53 garbled extern struct evcnt pmap_evcnt_copies; 456 1.53 garbled 457 1.53 garbled extern struct evcnt pmap_evcnt_ptes_spilled; 458 1.53 garbled extern struct evcnt pmap_evcnt_ptes_unspilled; 459 1.53 garbled extern struct evcnt pmap_evcnt_ptes_evicted; 460 1.53 garbled 461 1.53 garbled extern struct evcnt pmap_evcnt_ptes_primary[8]; 462 1.53 garbled extern struct evcnt pmap_evcnt_ptes_secondary[8]; 463 1.53 garbled extern struct evcnt pmap_evcnt_ptes_removed; 464 1.53 garbled extern struct evcnt pmap_evcnt_ptes_changed; 465 1.53 garbled extern struct evcnt pmap_evcnt_pvos_reclaimed; 466 1.53 garbled extern struct evcnt pmap_evcnt_pvos_failed; 467 1.53 garbled 468 1.1 matt extern struct evcnt pmap_evcnt_zeroed_pages; 469 1.1 matt extern struct evcnt pmap_evcnt_copied_pages; 470 1.1 matt extern struct evcnt pmap_evcnt_idlezeroed_pages; 471 1.26 matt 472 1.53 garbled #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 473 1.53 garbled #define PMAPCOUNT2(ev) ((ev).ev_count++) 474 1.1 matt #else 475 1.1 matt #define PMAPCOUNT(ev) ((void) 0) 476 1.1 matt #define PMAPCOUNT2(ev) ((void) 0) 477 1.1 matt #endif 478 1.1 matt 479 1.109 riastrad #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va) : "memory") 480 1.38 sanjayl 481 1.38 sanjayl /* XXXSL: this needs to be moved to assembler */ 482 1.109 riastrad #define TLBIEL(va) __asm volatile("tlbie %0" :: "r"(va) : "memory") 483 1.38 sanjayl 484 1.87 kiyohara #ifdef MD_TLBSYNC 485 1.87 kiyohara #define TLBSYNC() MD_TLBSYNC() 486 1.87 kiyohara #else 487 1.109 riastrad #define TLBSYNC() __asm volatile("tlbsync" ::: "memory") 488 1.87 kiyohara #endif 489 1.109 riastrad #define SYNC() __asm volatile("sync" ::: "memory") 490 1.109 riastrad #define EIEIO() __asm volatile("eieio" ::: "memory") 491 1.109 riastrad #define DCBST(va) __asm volatile("dcbst 0,%0" :: "r"(va) : "memory") 492 1.1 matt #define MFMSR() mfmsr() 493 1.1 matt #define MTMSR(psl) mtmsr(psl) 494 1.1 matt #define MFPVR() mfpvr() 495 1.1 matt #define MFSRIN(va) mfsrin(va) 496 1.1 matt #define MFTB() mfrtcltbl() 497 1.1 matt 498 1.92 joerg #if defined(DDB) && !defined(PMAP_OEA64) 499 1.35 perry static inline register_t 500 1.1 matt mfsrin(vaddr_t va) 501 1.1 matt { 502 1.2 matt register_t sr; 503 1.35 perry __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 504 1.1 matt return sr; 505 1.1 matt } 506 1.92 joerg #endif /* DDB && !PMAP_OEA64 */ 507 1.38 sanjayl 508 1.53 garbled #if defined (PMAP_OEA64_BRIDGE) 509 1.38 sanjayl extern void mfmsr64 (register64_t *result); 510 1.53 garbled #endif /* PMAP_OEA64_BRIDGE */ 511 1.38 sanjayl 512 1.50 ad #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 513 1.50 ad #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 514 1.1 matt 515 1.35 perry static inline register_t 516 1.1 matt pmap_interrupts_off(void) 517 1.1 matt { 518 1.2 matt register_t msr = MFMSR(); 519 1.1 matt if (msr & PSL_EE) 520 1.1 matt MTMSR(msr & ~PSL_EE); 521 1.1 matt return msr; 522 1.1 matt } 523 1.1 matt 524 1.1 matt static void 525 1.2 matt pmap_interrupts_restore(register_t msr) 526 1.1 matt { 527 1.1 matt if (msr & PSL_EE) 528 1.1 matt MTMSR(msr); 529 1.1 matt } 530 1.1 matt 531 1.35 perry static inline u_int32_t 532 1.1 matt mfrtcltbl(void) 533 1.1 matt { 534 1.55 garbled #ifdef PPC_OEA601 535 1.1 matt if ((MFPVR() >> 16) == MPC601) 536 1.1 matt return (mfrtcl() >> 7); 537 1.1 matt else 538 1.55 garbled #endif 539 1.1 matt return (mftbl()); 540 1.1 matt } 541 1.1 matt 542 1.1 matt /* 543 1.1 matt * These small routines may have to be replaced, 544 1.1 matt * if/when we support processors other that the 604. 545 1.1 matt */ 546 1.1 matt 547 1.1 matt void 548 1.1 matt tlbia(void) 549 1.1 matt { 550 1.47 macallan char *i; 551 1.1 matt 552 1.1 matt SYNC(); 553 1.53 garbled #if defined(PMAP_OEA) 554 1.1 matt /* 555 1.1 matt * Why not use "tlbia"? Because not all processors implement it. 556 1.1 matt * 557 1.20 wiz * This needs to be a per-CPU callback to do the appropriate thing 558 1.1 matt * for the CPU. XXX 559 1.1 matt */ 560 1.47 macallan for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 561 1.1 matt TLBIE(i); 562 1.1 matt EIEIO(); 563 1.1 matt SYNC(); 564 1.1 matt } 565 1.53 garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 566 1.38 sanjayl /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 567 1.51 garbled for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 568 1.38 sanjayl TLBIEL(i); 569 1.38 sanjayl EIEIO(); 570 1.38 sanjayl SYNC(); 571 1.38 sanjayl } 572 1.38 sanjayl #endif 573 1.1 matt TLBSYNC(); 574 1.1 matt SYNC(); 575 1.1 matt } 576 1.1 matt 577 1.35 perry static inline register_t 578 1.2 matt va_to_vsid(const struct pmap *pm, vaddr_t addr) 579 1.1 matt { 580 1.18 matt /* 581 1.102 thorpej * Rather than searching the STE groups for the VSID or extracting 582 1.102 thorpej * it from the SR, we know how we generate that from the ESID and 583 1.102 thorpej * so do that. 584 1.102 thorpej * 585 1.102 thorpej * This makes the code the same for OEA and OEA64, and also allows 586 1.102 thorpej * us to generate a correct-for-that-address-space VSID even if the 587 1.102 thorpej * pmap contains a different SR value at any given moment (e.g. 588 1.102 thorpej * kernel pmap on a 601 that is using I/O segments). 589 1.18 matt */ 590 1.18 matt return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 591 1.1 matt } 592 1.1 matt 593 1.35 perry static inline register_t 594 1.2 matt va_to_pteg(const struct pmap *pm, vaddr_t addr) 595 1.1 matt { 596 1.2 matt register_t hash; 597 1.2 matt 598 1.2 matt hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 599 1.1 matt return hash & pmap_pteg_mask; 600 1.1 matt } 601 1.1 matt 602 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 603 1.1 matt /* 604 1.1 matt * Given a PTE in the page table, calculate the VADDR that hashes to it. 605 1.1 matt * The only bit of magic is that the top 4 bits of the address doesn't 606 1.1 matt * technically exist in the PTE. But we know we reserved 4 bits of the 607 1.1 matt * VSID for it so that's how we get it. 608 1.1 matt */ 609 1.1 matt static vaddr_t 610 1.2 matt pmap_pte_to_va(volatile const struct pte *pt) 611 1.1 matt { 612 1.1 matt vaddr_t va; 613 1.1 matt uintptr_t ptaddr = (uintptr_t) pt; 614 1.1 matt 615 1.1 matt if (pt->pte_hi & PTE_HID) 616 1.2 matt ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 617 1.1 matt 618 1.18 matt /* PPC Bits 10-19 PPC64 Bits 42-51 */ 619 1.53 garbled #if defined(PMAP_OEA) 620 1.4 matt va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 621 1.53 garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 622 1.38 sanjayl va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 623 1.38 sanjayl #endif 624 1.1 matt va <<= ADDR_PIDX_SHFT; 625 1.1 matt 626 1.18 matt /* PPC Bits 4-9 PPC64 Bits 36-41 */ 627 1.1 matt va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 628 1.1 matt 629 1.53 garbled #if defined(PMAP_OEA64) 630 1.18 matt /* PPC63 Bits 0-35 */ 631 1.18 matt /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 632 1.53 garbled #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 633 1.1 matt /* PPC Bits 0-3 */ 634 1.1 matt va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 635 1.18 matt #endif 636 1.1 matt 637 1.1 matt return va; 638 1.1 matt } 639 1.1 matt #endif 640 1.1 matt 641 1.35 perry static inline struct pvo_head * 642 1.1 matt pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 643 1.1 matt { 644 1.1 matt struct vm_page *pg; 645 1.72 uebayasi struct vm_page_md *md; 646 1.108 riastrad struct pmap_page *pp; 647 1.1 matt 648 1.1 matt pg = PHYS_TO_VM_PAGE(pa); 649 1.1 matt if (pg_p != NULL) 650 1.1 matt *pg_p = pg; 651 1.108 riastrad if (pg == NULL) { 652 1.108 riastrad if ((pp = pmap_pv_tracked(pa)) != NULL) 653 1.108 riastrad return &pp->pp_pvoh; 654 1.107 chs return NULL; 655 1.108 riastrad } 656 1.72 uebayasi md = VM_PAGE_TO_MD(pg); 657 1.72 uebayasi return &md->mdpg_pvoh; 658 1.1 matt } 659 1.1 matt 660 1.35 perry static inline struct pvo_head * 661 1.1 matt vm_page_to_pvoh(struct vm_page *pg) 662 1.1 matt { 663 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 664 1.72 uebayasi 665 1.72 uebayasi return &md->mdpg_pvoh; 666 1.1 matt } 667 1.1 matt 668 1.108 riastrad static inline void 669 1.108 riastrad pmap_pp_attr_clear(struct pmap_page *pp, int ptebit) 670 1.108 riastrad { 671 1.108 riastrad 672 1.114 rin pp->pp_attrs &= ~ptebit; 673 1.108 riastrad } 674 1.1 matt 675 1.35 perry static inline void 676 1.1 matt pmap_attr_clear(struct vm_page *pg, int ptebit) 677 1.1 matt { 678 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 679 1.72 uebayasi 680 1.108 riastrad pmap_pp_attr_clear(&md->mdpg_pp, ptebit); 681 1.108 riastrad } 682 1.108 riastrad 683 1.108 riastrad static inline int 684 1.108 riastrad pmap_pp_attr_fetch(struct pmap_page *pp) 685 1.108 riastrad { 686 1.108 riastrad 687 1.108 riastrad return pp->pp_attrs; 688 1.1 matt } 689 1.1 matt 690 1.35 perry static inline int 691 1.1 matt pmap_attr_fetch(struct vm_page *pg) 692 1.1 matt { 693 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 694 1.72 uebayasi 695 1.108 riastrad return pmap_pp_attr_fetch(&md->mdpg_pp); 696 1.1 matt } 697 1.1 matt 698 1.35 perry static inline void 699 1.1 matt pmap_attr_save(struct vm_page *pg, int ptebit) 700 1.1 matt { 701 1.72 uebayasi struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 702 1.72 uebayasi 703 1.72 uebayasi md->mdpg_attrs |= ptebit; 704 1.1 matt } 705 1.1 matt 706 1.35 perry static inline int 707 1.2 matt pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 708 1.1 matt { 709 1.1 matt if (pt->pte_hi == pvo_pt->pte_hi 710 1.1 matt #if 0 711 1.1 matt && ((pt->pte_lo ^ pvo_pt->pte_lo) & 712 1.1 matt ~(PTE_REF|PTE_CHG)) == 0 713 1.1 matt #endif 714 1.1 matt ) 715 1.1 matt return 1; 716 1.1 matt return 0; 717 1.1 matt } 718 1.1 matt 719 1.35 perry static inline void 720 1.2 matt pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 721 1.1 matt { 722 1.1 matt /* 723 1.1 matt * Construct the PTE. Default to IMB initially. Valid bit 724 1.1 matt * only gets set when the real pte is set in memory. 725 1.1 matt * 726 1.1 matt * Note: Don't set the valid bit for correct operation of tlb update. 727 1.1 matt */ 728 1.53 garbled #if defined(PMAP_OEA) 729 1.2 matt pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 730 1.2 matt | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 731 1.1 matt pt->pte_lo = pte_lo; 732 1.79 matt #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64) 733 1.38 sanjayl pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 734 1.38 sanjayl | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 735 1.38 sanjayl pt->pte_lo = (u_int64_t) pte_lo; 736 1.53 garbled #endif /* PMAP_OEA */ 737 1.1 matt } 738 1.1 matt 739 1.35 perry static inline void 740 1.2 matt pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 741 1.1 matt { 742 1.1 matt pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 743 1.1 matt } 744 1.1 matt 745 1.35 perry static inline void 746 1.2 matt pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 747 1.1 matt { 748 1.1 matt /* 749 1.1 matt * As shown in Section 7.6.3.2.3 750 1.1 matt */ 751 1.1 matt pt->pte_lo &= ~ptebit; 752 1.1 matt TLBIE(va); 753 1.1 matt SYNC(); 754 1.1 matt EIEIO(); 755 1.1 matt TLBSYNC(); 756 1.1 matt SYNC(); 757 1.57 matt #ifdef MULTIPROCESSOR 758 1.57 matt DCBST(pt); 759 1.57 matt #endif 760 1.1 matt } 761 1.1 matt 762 1.35 perry static inline void 763 1.2 matt pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 764 1.1 matt { 765 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 766 1.1 matt if (pvo_pt->pte_hi & PTE_VALID) 767 1.1 matt panic("pte_set: setting an already valid pte %p", pvo_pt); 768 1.1 matt #endif 769 1.1 matt pvo_pt->pte_hi |= PTE_VALID; 770 1.38 sanjayl 771 1.1 matt /* 772 1.1 matt * Update the PTE as defined in section 7.6.3.1 773 1.1 matt * Note that the REF/CHG bits are from pvo_pt and thus should 774 1.1 matt * have been saved so this routine can restore them (if desired). 775 1.1 matt */ 776 1.1 matt pt->pte_lo = pvo_pt->pte_lo; 777 1.1 matt EIEIO(); 778 1.1 matt pt->pte_hi = pvo_pt->pte_hi; 779 1.38 sanjayl TLBSYNC(); 780 1.1 matt SYNC(); 781 1.57 matt #ifdef MULTIPROCESSOR 782 1.57 matt DCBST(pt); 783 1.57 matt #endif 784 1.1 matt pmap_pte_valid++; 785 1.1 matt } 786 1.1 matt 787 1.35 perry static inline void 788 1.2 matt pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 789 1.1 matt { 790 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 791 1.1 matt if ((pvo_pt->pte_hi & PTE_VALID) == 0) 792 1.1 matt panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 793 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) 794 1.1 matt panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 795 1.1 matt #endif 796 1.1 matt 797 1.1 matt pvo_pt->pte_hi &= ~PTE_VALID; 798 1.1 matt /* 799 1.1 matt * Force the ref & chg bits back into the PTEs. 800 1.1 matt */ 801 1.1 matt SYNC(); 802 1.1 matt /* 803 1.1 matt * Invalidate the pte ... (Section 7.6.3.3) 804 1.1 matt */ 805 1.1 matt pt->pte_hi &= ~PTE_VALID; 806 1.1 matt SYNC(); 807 1.1 matt TLBIE(va); 808 1.1 matt SYNC(); 809 1.1 matt EIEIO(); 810 1.1 matt TLBSYNC(); 811 1.1 matt SYNC(); 812 1.1 matt /* 813 1.1 matt * Save the ref & chg bits ... 814 1.1 matt */ 815 1.1 matt pmap_pte_synch(pt, pvo_pt); 816 1.1 matt pmap_pte_valid--; 817 1.1 matt } 818 1.1 matt 819 1.35 perry static inline void 820 1.2 matt pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 821 1.1 matt { 822 1.1 matt /* 823 1.1 matt * Invalidate the PTE 824 1.1 matt */ 825 1.1 matt pmap_pte_unset(pt, pvo_pt, va); 826 1.1 matt pmap_pte_set(pt, pvo_pt); 827 1.1 matt } 828 1.1 matt 829 1.1 matt /* 830 1.1 matt * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 831 1.1 matt * (either primary or secondary location). 832 1.1 matt * 833 1.1 matt * Note: both the destination and source PTEs must not have PTE_VALID set. 834 1.1 matt */ 835 1.1 matt 836 1.53 garbled static int 837 1.2 matt pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 838 1.1 matt { 839 1.2 matt volatile struct pte *pt; 840 1.1 matt int i; 841 1.1 matt 842 1.1 matt #if defined(DEBUG) 843 1.85 matt DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 844 1.85 matt ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo); 845 1.1 matt #endif 846 1.1 matt /* 847 1.1 matt * First try primary hash. 848 1.1 matt */ 849 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 850 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) { 851 1.1 matt pvo_pt->pte_hi &= ~PTE_HID; 852 1.1 matt pmap_pte_set(pt, pvo_pt); 853 1.1 matt return i; 854 1.1 matt } 855 1.1 matt } 856 1.1 matt 857 1.1 matt /* 858 1.1 matt * Now try secondary hash. 859 1.1 matt */ 860 1.1 matt ptegidx ^= pmap_pteg_mask; 861 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 862 1.1 matt if ((pt->pte_hi & PTE_VALID) == 0) { 863 1.1 matt pvo_pt->pte_hi |= PTE_HID; 864 1.1 matt pmap_pte_set(pt, pvo_pt); 865 1.1 matt return i; 866 1.1 matt } 867 1.1 matt } 868 1.1 matt return -1; 869 1.1 matt } 870 1.1 matt 871 1.1 matt /* 872 1.1 matt * Spill handler. 873 1.1 matt * 874 1.1 matt * Tries to spill a page table entry from the overflow area. 875 1.1 matt * This runs in either real mode (if dealing with a exception spill) 876 1.1 matt * or virtual mode when dealing with manually spilling one of the 877 1.118 rin * kernel's pte entries. 878 1.1 matt */ 879 1.14 chs 880 1.1 matt int 881 1.118 rin pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool isi_p) 882 1.1 matt { 883 1.118 rin struct pvo_tqhead *spvoh, *vpvoh; 884 1.118 rin struct pvo_entry *pvo, *source_pvo, *victim_pvo; 885 1.2 matt volatile struct pteg *pteg; 886 1.2 matt volatile struct pte *pt; 887 1.118 rin register_t msr, vsid, hash; 888 1.118 rin int ptegidx, hid, i, j; 889 1.118 rin int done = 0; 890 1.1 matt 891 1.50 ad PMAP_LOCK(); 892 1.118 rin msr = pmap_interrupts_off(); 893 1.118 rin 894 1.118 rin /* XXXRO paranoid? */ 895 1.118 rin if (pm->pm_evictions == 0) 896 1.118 rin goto out; 897 1.50 ad 898 1.2 matt ptegidx = va_to_pteg(pm, addr); 899 1.1 matt 900 1.1 matt /* 901 1.118 rin * Find source pvo. 902 1.1 matt */ 903 1.118 rin spvoh = &pmap_pvo_table[ptegidx]; 904 1.1 matt source_pvo = NULL; 905 1.118 rin TAILQ_FOREACH(pvo, spvoh, pvo_olink) { 906 1.1 matt /* 907 1.1 matt * We need to find pvo entry for this address... 908 1.1 matt */ 909 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 910 1.1 matt 911 1.1 matt /* 912 1.1 matt * If we haven't found the source and we come to a PVO with 913 1.1 matt * a valid PTE, then we know we can't find it because all 914 1.1 matt * evicted PVOs always are first in the list. 915 1.1 matt */ 916 1.118 rin if ((pvo->pvo_pte.pte_hi & PTE_VALID) != 0) 917 1.1 matt break; 918 1.1 matt 919 1.118 rin if (pm == pvo->pvo_pmap && addr == PVO_VADDR(pvo)) { 920 1.118 rin if (isi_p) { 921 1.118 rin if (!PVO_EXECUTABLE_P(pvo)) 922 1.118 rin goto out; 923 1.118 rin #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 924 1.118 rin int sr __diagused = 925 1.118 rin PVO_VADDR(pvo) >> ADDR_SR_SHFT; 926 1.118 rin KASSERT((pm->pm_sr[sr] & SR_NOEXEC) == 0); 927 1.118 rin #endif 928 1.1 matt } 929 1.118 rin KASSERT(!PVO_PTEGIDX_ISSET(pvo)); 930 1.118 rin /* XXXRO where check */ 931 1.1 matt source_pvo = pvo; 932 1.118 rin break; 933 1.1 matt } 934 1.118 rin } 935 1.118 rin if (source_pvo == NULL) { 936 1.118 rin PMAPCOUNT(ptes_unspilled); 937 1.118 rin goto out; 938 1.118 rin } 939 1.118 rin 940 1.118 rin /* 941 1.118 rin * Now we have found the entry to be spilled into the 942 1.118 rin * pteg. Attempt to insert it into the page table. 943 1.118 rin */ 944 1.118 rin i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 945 1.118 rin if (i >= 0) { 946 1.118 rin PVO_PTEGIDX_SET(pvo, i); 947 1.118 rin PMAP_PVO_CHECK(pvo); /* sanity check */ 948 1.118 rin PVO_WHERE(pvo, SPILL_INSERT); 949 1.118 rin pvo->pvo_pmap->pm_evictions--; 950 1.118 rin PMAPCOUNT(ptes_spilled); 951 1.118 rin PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) != 0 952 1.118 rin ? pmap_evcnt_ptes_secondary 953 1.118 rin : pmap_evcnt_ptes_primary)[i]); 954 1.118 rin 955 1.118 rin TAILQ_REMOVE(spvoh, pvo, pvo_olink); 956 1.118 rin TAILQ_INSERT_TAIL(spvoh, pvo, pvo_olink); 957 1.1 matt 958 1.118 rin done = 1; 959 1.118 rin goto out; 960 1.1 matt } 961 1.1 matt 962 1.118 rin /* 963 1.118 rin * Have to substitute some entry. Use the primary hash for this. 964 1.118 rin * Use low bits of timebase as random generator. 965 1.118 rin * 966 1.118 rin * XXX: 967 1.118 rin * Make sure we are not picking a kernel pte for replacement. 968 1.118 rin */ 969 1.118 rin hid = 0; 970 1.118 rin i = MFTB() & 7; 971 1.118 rin pteg = &pmap_pteg_table[ptegidx]; 972 1.118 rin retry: 973 1.118 rin for (j = 0; j < 8; j++, i = (i + 1) & 7) { 974 1.118 rin pt = &pteg->pt[i]; 975 1.118 rin 976 1.118 rin if ((pt->pte_hi & PTE_VALID) == 0) 977 1.118 rin break; 978 1.118 rin 979 1.118 rin vsid = (pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT; 980 1.118 rin hash = VSID_TO_HASH(vsid); 981 1.118 rin if (hash < PHYSMAP_VSIDBITS) 982 1.118 rin break; 983 1.118 rin } 984 1.118 rin if (j == 8) { 985 1.118 rin if (hid != 0) 986 1.118 rin panic("%s: no victim\n", __func__); 987 1.118 rin hid = PTE_HID; 988 1.118 rin pteg = &pmap_pteg_table[ptegidx ^ pmap_pteg_mask]; 989 1.118 rin goto retry; 990 1.1 matt } 991 1.1 matt 992 1.118 rin /* 993 1.118 rin * We also need the pvo entry of the victim we are replacing 994 1.118 rin * so save the R & C bits of the PTE. 995 1.118 rin */ 996 1.118 rin if ((pt->pte_hi & PTE_HID) == hid) 997 1.118 rin vpvoh = spvoh; 998 1.118 rin else 999 1.118 rin vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; 1000 1.118 rin victim_pvo = NULL; 1001 1.118 rin TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1002 1.118 rin PMAP_PVO_CHECK(pvo); /* sanity check */ 1003 1.1 matt 1004 1.118 rin if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1005 1.118 rin continue; 1006 1.1 matt 1007 1.118 rin if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1008 1.118 rin victim_pvo = pvo; 1009 1.118 rin break; 1010 1.1 matt } 1011 1.118 rin } 1012 1.118 rin if (victim_pvo == NULL) { 1013 1.118 rin panic("%s: victim p-pte (%p) has no pvo entry!", 1014 1.118 rin __func__, pt); 1015 1.1 matt } 1016 1.1 matt 1017 1.1 matt /* 1018 1.12 matt * The victim should be not be a kernel PVO/PTE entry. 1019 1.12 matt */ 1020 1.12 matt KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1021 1.12 matt KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1022 1.12 matt KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1023 1.12 matt 1024 1.12 matt /* 1025 1.1 matt * We are invalidating the TLB entry for the EA for the 1026 1.1 matt * we are replacing even though its valid; If we don't 1027 1.1 matt * we lose any ref/chg bit changes contained in the TLB 1028 1.1 matt * entry. 1029 1.1 matt */ 1030 1.118 rin if (hid == 0) 1031 1.118 rin source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1032 1.118 rin else 1033 1.118 rin source_pvo->pvo_pte.pte_hi |= PTE_HID; 1034 1.1 matt 1035 1.1 matt /* 1036 1.1 matt * To enforce the PVO list ordering constraint that all 1037 1.1 matt * evicted entries should come before all valid entries, 1038 1.1 matt * move the source PVO to the tail of its list and the 1039 1.1 matt * victim PVO to the head of its list (which might not be 1040 1.1 matt * the same list, if the victim was using the secondary hash). 1041 1.1 matt */ 1042 1.118 rin TAILQ_REMOVE(spvoh, source_pvo, pvo_olink); 1043 1.118 rin TAILQ_INSERT_TAIL(spvoh, source_pvo, pvo_olink); 1044 1.1 matt TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1045 1.1 matt TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1046 1.1 matt pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1047 1.1 matt pmap_pte_set(pt, &source_pvo->pvo_pte); 1048 1.1 matt victim_pvo->pvo_pmap->pm_evictions++; 1049 1.1 matt source_pvo->pvo_pmap->pm_evictions--; 1050 1.12 matt PVO_WHERE(victim_pvo, SPILL_UNSET); 1051 1.12 matt PVO_WHERE(source_pvo, SPILL_SET); 1052 1.1 matt 1053 1.1 matt PVO_PTEGIDX_CLR(victim_pvo); 1054 1.1 matt PVO_PTEGIDX_SET(source_pvo, i); 1055 1.1 matt PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1056 1.1 matt PMAPCOUNT(ptes_spilled); 1057 1.1 matt PMAPCOUNT(ptes_evicted); 1058 1.1 matt PMAPCOUNT(ptes_removed); 1059 1.1 matt 1060 1.1 matt PMAP_PVO_CHECK(victim_pvo); 1061 1.1 matt PMAP_PVO_CHECK(source_pvo); 1062 1.50 ad 1063 1.118 rin done = 1; 1064 1.118 rin 1065 1.118 rin out: 1066 1.118 rin pmap_interrupts_restore(msr); 1067 1.50 ad PMAP_UNLOCK(); 1068 1.118 rin return done; 1069 1.1 matt } 1070 1.1 matt 1071 1.1 matt /* 1072 1.1 matt * Restrict given range to physical memory 1073 1.1 matt */ 1074 1.1 matt void 1075 1.1 matt pmap_real_memory(paddr_t *start, psize_t *size) 1076 1.1 matt { 1077 1.1 matt struct mem_region *mp; 1078 1.1 matt 1079 1.1 matt for (mp = mem; mp->size; mp++) { 1080 1.1 matt if (*start + *size > mp->start 1081 1.1 matt && *start < mp->start + mp->size) { 1082 1.1 matt if (*start < mp->start) { 1083 1.1 matt *size -= mp->start - *start; 1084 1.1 matt *start = mp->start; 1085 1.1 matt } 1086 1.1 matt if (*start + *size > mp->start + mp->size) 1087 1.1 matt *size = mp->start + mp->size - *start; 1088 1.1 matt return; 1089 1.1 matt } 1090 1.1 matt } 1091 1.1 matt *size = 0; 1092 1.1 matt } 1093 1.1 matt 1094 1.1 matt /* 1095 1.1 matt * Initialize anything else for pmap handling. 1096 1.1 matt * Called during vm_init(). 1097 1.1 matt */ 1098 1.1 matt void 1099 1.1 matt pmap_init(void) 1100 1.1 matt { 1101 1.1 matt 1102 1.1 matt pmap_initialized = 1; 1103 1.1 matt } 1104 1.1 matt 1105 1.1 matt /* 1106 1.10 thorpej * How much virtual space does the kernel get? 1107 1.10 thorpej */ 1108 1.10 thorpej void 1109 1.10 thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1110 1.10 thorpej { 1111 1.10 thorpej /* 1112 1.10 thorpej * For now, reserve one segment (minus some overhead) for kernel 1113 1.10 thorpej * virtual memory 1114 1.10 thorpej */ 1115 1.10 thorpej *start = VM_MIN_KERNEL_ADDRESS; 1116 1.10 thorpej *end = VM_MAX_KERNEL_ADDRESS; 1117 1.10 thorpej } 1118 1.10 thorpej 1119 1.10 thorpej /* 1120 1.1 matt * Allocate, initialize, and return a new physical map. 1121 1.1 matt */ 1122 1.1 matt pmap_t 1123 1.1 matt pmap_create(void) 1124 1.1 matt { 1125 1.1 matt pmap_t pm; 1126 1.38 sanjayl 1127 1.120 rin pm = pool_get(&pmap_pool, PR_WAITOK | PR_ZERO); 1128 1.121 rin KASSERT((vaddr_t)pm < PMAP_DIRECT_MAPPED_LEN); 1129 1.1 matt pmap_pinit(pm); 1130 1.1 matt 1131 1.85 matt DPRINTFN(CREATE, "pmap_create: pm %p:\n" 1132 1.54 mlelstv "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1133 1.54 mlelstv " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1134 1.54 mlelstv "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1135 1.54 mlelstv " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1136 1.54 mlelstv pm, 1137 1.54 mlelstv pm->pm_sr[0], pm->pm_sr[1], 1138 1.54 mlelstv pm->pm_sr[2], pm->pm_sr[3], 1139 1.54 mlelstv pm->pm_sr[4], pm->pm_sr[5], 1140 1.54 mlelstv pm->pm_sr[6], pm->pm_sr[7], 1141 1.54 mlelstv pm->pm_sr[8], pm->pm_sr[9], 1142 1.54 mlelstv pm->pm_sr[10], pm->pm_sr[11], 1143 1.54 mlelstv pm->pm_sr[12], pm->pm_sr[13], 1144 1.85 matt pm->pm_sr[14], pm->pm_sr[15]); 1145 1.1 matt return pm; 1146 1.1 matt } 1147 1.1 matt 1148 1.1 matt /* 1149 1.1 matt * Initialize a preallocated and zeroed pmap structure. 1150 1.1 matt */ 1151 1.1 matt void 1152 1.1 matt pmap_pinit(pmap_t pm) 1153 1.1 matt { 1154 1.2 matt register_t entropy = MFTB(); 1155 1.2 matt register_t mask; 1156 1.2 matt int i; 1157 1.1 matt 1158 1.1 matt /* 1159 1.1 matt * Allocate some segment registers for this pmap. 1160 1.1 matt */ 1161 1.1 matt pm->pm_refs = 1; 1162 1.50 ad PMAP_LOCK(); 1163 1.2 matt for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1164 1.2 matt static register_t pmap_vsidcontext; 1165 1.2 matt register_t hash; 1166 1.2 matt unsigned int n; 1167 1.1 matt 1168 1.1 matt /* Create a new value by multiplying by a prime adding in 1169 1.1 matt * entropy from the timebase register. This is to make the 1170 1.1 matt * VSID more random so that the PT Hash function collides 1171 1.1 matt * less often. (note that the prime causes gcc to do shifts 1172 1.1 matt * instead of a multiply) 1173 1.1 matt */ 1174 1.1 matt pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1175 1.1 matt hash = pmap_vsidcontext & (NPMAPS - 1); 1176 1.23 aymeric if (hash == 0) { /* 0 is special, avoid it */ 1177 1.23 aymeric entropy += 0xbadf00d; 1178 1.1 matt continue; 1179 1.23 aymeric } 1180 1.1 matt n = hash >> 5; 1181 1.2 matt mask = 1L << (hash & (VSID_NBPW-1)); 1182 1.2 matt hash = pmap_vsidcontext; 1183 1.1 matt if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1184 1.1 matt /* anything free in this bucket? */ 1185 1.2 matt if (~pmap_vsid_bitmap[n] == 0) { 1186 1.23 aymeric entropy = hash ^ (hash >> 16); 1187 1.1 matt continue; 1188 1.1 matt } 1189 1.1 matt i = ffs(~pmap_vsid_bitmap[n]) - 1; 1190 1.2 matt mask = 1L << i; 1191 1.2 matt hash &= ~(VSID_NBPW-1); 1192 1.1 matt hash |= i; 1193 1.1 matt } 1194 1.18 matt hash &= PTE_VSID >> PTE_VSID_SHFT; 1195 1.1 matt pmap_vsid_bitmap[n] |= mask; 1196 1.18 matt pm->pm_vsid = hash; 1197 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1198 1.1 matt for (i = 0; i < 16; i++) 1199 1.14 chs pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1200 1.14 chs SR_NOEXEC; 1201 1.18 matt #endif 1202 1.50 ad PMAP_UNLOCK(); 1203 1.1 matt return; 1204 1.1 matt } 1205 1.50 ad PMAP_UNLOCK(); 1206 1.1 matt panic("pmap_pinit: out of segments"); 1207 1.1 matt } 1208 1.1 matt 1209 1.1 matt /* 1210 1.1 matt * Add a reference to the given pmap. 1211 1.1 matt */ 1212 1.1 matt void 1213 1.1 matt pmap_reference(pmap_t pm) 1214 1.1 matt { 1215 1.50 ad atomic_inc_uint(&pm->pm_refs); 1216 1.1 matt } 1217 1.1 matt 1218 1.1 matt /* 1219 1.1 matt * Retire the given pmap from service. 1220 1.1 matt * Should only be called if the map contains no valid mappings. 1221 1.1 matt */ 1222 1.1 matt void 1223 1.1 matt pmap_destroy(pmap_t pm) 1224 1.1 matt { 1225 1.113 riastrad membar_release(); 1226 1.50 ad if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1227 1.113 riastrad membar_acquire(); 1228 1.1 matt pmap_release(pm); 1229 1.1 matt pool_put(&pmap_pool, pm); 1230 1.1 matt } 1231 1.1 matt } 1232 1.1 matt 1233 1.1 matt /* 1234 1.1 matt * Release any resources held by the given physical map. 1235 1.1 matt * Called when a pmap initialized by pmap_pinit is being released. 1236 1.1 matt */ 1237 1.1 matt void 1238 1.1 matt pmap_release(pmap_t pm) 1239 1.1 matt { 1240 1.1 matt int idx, mask; 1241 1.39 matt 1242 1.39 matt KASSERT(pm->pm_stats.resident_count == 0); 1243 1.39 matt KASSERT(pm->pm_stats.wired_count == 0); 1244 1.1 matt 1245 1.50 ad PMAP_LOCK(); 1246 1.1 matt if (pm->pm_sr[0] == 0) 1247 1.1 matt panic("pmap_release"); 1248 1.22 aymeric idx = pm->pm_vsid & (NPMAPS-1); 1249 1.1 matt mask = 1 << (idx % VSID_NBPW); 1250 1.1 matt idx /= VSID_NBPW; 1251 1.22 aymeric 1252 1.22 aymeric KASSERT(pmap_vsid_bitmap[idx] & mask); 1253 1.1 matt pmap_vsid_bitmap[idx] &= ~mask; 1254 1.50 ad PMAP_UNLOCK(); 1255 1.1 matt } 1256 1.1 matt 1257 1.1 matt /* 1258 1.1 matt * Copy the range specified by src_addr/len 1259 1.1 matt * from the source map to the range dst_addr/len 1260 1.1 matt * in the destination map. 1261 1.1 matt * 1262 1.1 matt * This routine is only advisory and need not do anything. 1263 1.1 matt */ 1264 1.1 matt void 1265 1.1 matt pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1266 1.1 matt vsize_t len, vaddr_t src_addr) 1267 1.1 matt { 1268 1.1 matt PMAPCOUNT(copies); 1269 1.1 matt } 1270 1.1 matt 1271 1.1 matt /* 1272 1.1 matt * Require that all active physical maps contain no 1273 1.1 matt * incorrect entries NOW. 1274 1.1 matt */ 1275 1.1 matt void 1276 1.1 matt pmap_update(struct pmap *pmap) 1277 1.1 matt { 1278 1.1 matt PMAPCOUNT(updates); 1279 1.1 matt TLBSYNC(); 1280 1.1 matt } 1281 1.1 matt 1282 1.35 perry static inline int 1283 1.1 matt pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1284 1.1 matt { 1285 1.1 matt int pteidx; 1286 1.1 matt /* 1287 1.1 matt * We can find the actual pte entry without searching by 1288 1.1 matt * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1289 1.1 matt * and by noticing the HID bit. 1290 1.1 matt */ 1291 1.1 matt pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1292 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_HID) 1293 1.1 matt pteidx ^= pmap_pteg_mask * 8; 1294 1.1 matt return pteidx; 1295 1.1 matt } 1296 1.1 matt 1297 1.2 matt volatile struct pte * 1298 1.1 matt pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1299 1.1 matt { 1300 1.2 matt volatile struct pte *pt; 1301 1.1 matt 1302 1.1 matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1303 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1304 1.1 matt return NULL; 1305 1.1 matt #endif 1306 1.1 matt 1307 1.1 matt /* 1308 1.1 matt * If we haven't been supplied the ptegidx, calculate it. 1309 1.1 matt */ 1310 1.1 matt if (pteidx == -1) { 1311 1.1 matt int ptegidx; 1312 1.2 matt ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1313 1.1 matt pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1314 1.1 matt } 1315 1.1 matt 1316 1.1 matt pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1317 1.1 matt 1318 1.1 matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1319 1.1 matt return pt; 1320 1.1 matt #else 1321 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1322 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1323 1.1 matt "pvo but no valid pte index", pvo); 1324 1.1 matt } 1325 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1326 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1327 1.1 matt "pvo but no valid pte", pvo); 1328 1.1 matt } 1329 1.1 matt 1330 1.1 matt if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1331 1.1 matt if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1332 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) 1333 1.1 matt pmap_pte_print(pt); 1334 1.1 matt #endif 1335 1.1 matt panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1336 1.1 matt "pmap_pteg_table %p but invalid in pvo", 1337 1.1 matt pvo, pt); 1338 1.1 matt } 1339 1.1 matt if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1340 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) 1341 1.1 matt pmap_pte_print(pt); 1342 1.1 matt #endif 1343 1.1 matt panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1344 1.1 matt "not match pte %p in pmap_pteg_table", 1345 1.1 matt pvo, pt); 1346 1.1 matt } 1347 1.1 matt return pt; 1348 1.1 matt } 1349 1.1 matt 1350 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1351 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) 1352 1.1 matt pmap_pte_print(pt); 1353 1.1 matt #endif 1354 1.12 matt panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1355 1.1 matt "pmap_pteg_table but valid in pvo", pvo, pt); 1356 1.1 matt } 1357 1.1 matt return NULL; 1358 1.1 matt #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1359 1.1 matt } 1360 1.1 matt 1361 1.1 matt struct pvo_entry * 1362 1.1 matt pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1363 1.1 matt { 1364 1.1 matt struct pvo_entry *pvo; 1365 1.1 matt int ptegidx; 1366 1.1 matt 1367 1.1 matt va &= ~ADDR_POFF; 1368 1.2 matt ptegidx = va_to_pteg(pm, va); 1369 1.1 matt 1370 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1371 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1372 1.121 rin if ((uintptr_t) pvo >= PMAP_DIRECT_MAPPED_LEN) 1373 1.1 matt panic("pmap_pvo_find_va: invalid pvo %p on " 1374 1.1 matt "list %#x (%p)", pvo, ptegidx, 1375 1.1 matt &pmap_pvo_table[ptegidx]); 1376 1.1 matt #endif 1377 1.1 matt if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1378 1.1 matt if (pteidx_p) 1379 1.1 matt *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1380 1.1 matt return pvo; 1381 1.1 matt } 1382 1.1 matt } 1383 1.121 rin if ((pm == pmap_kernel()) && (va < PMAP_DIRECT_MAPPED_LEN)) 1384 1.54 mlelstv panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1385 1.53 garbled __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1386 1.1 matt return NULL; 1387 1.1 matt } 1388 1.1 matt 1389 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) 1390 1.1 matt void 1391 1.1 matt pmap_pvo_check(const struct pvo_entry *pvo) 1392 1.1 matt { 1393 1.1 matt struct pvo_head *pvo_head; 1394 1.1 matt struct pvo_entry *pvo0; 1395 1.2 matt volatile struct pte *pt; 1396 1.1 matt int failed = 0; 1397 1.1 matt 1398 1.50 ad PMAP_LOCK(); 1399 1.50 ad 1400 1.121 rin if ((uintptr_t)(pvo+1) >= PMAP_DIRECT_MAPPED_LEN) 1401 1.1 matt panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1402 1.1 matt 1403 1.121 rin if ((uintptr_t)(pvo->pvo_pmap+1) >= PMAP_DIRECT_MAPPED_LEN) { 1404 1.1 matt printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1405 1.1 matt pvo, pvo->pvo_pmap); 1406 1.1 matt failed = 1; 1407 1.1 matt } 1408 1.1 matt 1409 1.121 rin if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= PMAP_DIRECT_MAPPED_LEN || 1410 1.1 matt (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1411 1.1 matt printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1412 1.1 matt pvo, TAILQ_NEXT(pvo, pvo_olink)); 1413 1.1 matt failed = 1; 1414 1.1 matt } 1415 1.1 matt 1416 1.121 rin if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= PMAP_DIRECT_MAPPED_LEN || 1417 1.1 matt (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1418 1.1 matt printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1419 1.1 matt pvo, LIST_NEXT(pvo, pvo_vlink)); 1420 1.1 matt failed = 1; 1421 1.1 matt } 1422 1.1 matt 1423 1.39 matt if (PVO_MANAGED_P(pvo)) { 1424 1.1 matt pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1425 1.107 chs LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1426 1.107 chs if (pvo0 == pvo) 1427 1.107 chs break; 1428 1.107 chs } 1429 1.107 chs if (pvo0 == NULL) { 1430 1.107 chs printf("pmap_pvo_check: pvo %p: not present " 1431 1.107 chs "on its vlist head %p\n", pvo, pvo_head); 1432 1.107 chs failed = 1; 1433 1.107 chs } 1434 1.1 matt } else { 1435 1.107 chs KASSERT(pvo->pvo_vaddr >= VM_MIN_KERNEL_ADDRESS); 1436 1.107 chs if (__predict_false(pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS)) 1437 1.1 matt failed = 1; 1438 1.1 matt } 1439 1.1 matt if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1440 1.1 matt printf("pmap_pvo_check: pvo %p: not present " 1441 1.1 matt "on its olist head\n", pvo); 1442 1.1 matt failed = 1; 1443 1.1 matt } 1444 1.1 matt pt = pmap_pvo_to_pte(pvo, -1); 1445 1.1 matt if (pt == NULL) { 1446 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1447 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1448 1.1 matt "no PTE\n", pvo); 1449 1.1 matt failed = 1; 1450 1.1 matt } 1451 1.1 matt } else { 1452 1.1 matt if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1453 1.1 matt (uintptr_t) pt >= 1454 1.1 matt (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1455 1.1 matt printf("pmap_pvo_check: pvo %p: pte %p not in " 1456 1.1 matt "pteg table\n", pvo, pt); 1457 1.1 matt failed = 1; 1458 1.1 matt } 1459 1.1 matt if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1460 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1461 1.1 matt "no PTE\n", pvo); 1462 1.1 matt failed = 1; 1463 1.1 matt } 1464 1.1 matt if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1465 1.1 matt printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1466 1.54 mlelstv "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1467 1.54 mlelstv pvo->pvo_pte.pte_hi, 1468 1.54 mlelstv pt->pte_hi); 1469 1.1 matt failed = 1; 1470 1.1 matt } 1471 1.1 matt if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1472 1.1 matt (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1473 1.1 matt printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1474 1.54 mlelstv "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1475 1.54 mlelstv (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1476 1.54 mlelstv (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1477 1.1 matt failed = 1; 1478 1.1 matt } 1479 1.1 matt if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1480 1.53 garbled printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1481 1.53 garbled " doesn't not match PVO's VA %#" _PRIxva "\n", 1482 1.1 matt pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1483 1.1 matt failed = 1; 1484 1.1 matt } 1485 1.1 matt if (failed) 1486 1.1 matt pmap_pte_print(pt); 1487 1.1 matt } 1488 1.1 matt if (failed) 1489 1.1 matt panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1490 1.1 matt pvo->pvo_pmap); 1491 1.50 ad 1492 1.50 ad PMAP_UNLOCK(); 1493 1.1 matt } 1494 1.1 matt #endif /* DEBUG || PMAPCHECK */ 1495 1.1 matt 1496 1.1 matt /* 1497 1.25 chs * Search the PVO table looking for a non-wired entry. 1498 1.25 chs * If we find one, remove it and return it. 1499 1.25 chs */ 1500 1.25 chs 1501 1.25 chs struct pvo_entry * 1502 1.117 rin pmap_pvo_reclaim(void) 1503 1.25 chs { 1504 1.25 chs struct pvo_tqhead *pvoh; 1505 1.25 chs struct pvo_entry *pvo; 1506 1.25 chs uint32_t idx, endidx; 1507 1.25 chs 1508 1.25 chs endidx = pmap_pvo_reclaim_nextidx; 1509 1.25 chs for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1510 1.25 chs idx = (idx + 1) & pmap_pteg_mask) { 1511 1.25 chs pvoh = &pmap_pvo_table[idx]; 1512 1.25 chs TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1513 1.39 matt if (!PVO_WIRED_P(pvo)) { 1514 1.33 chs pmap_pvo_remove(pvo, -1, NULL); 1515 1.25 chs pmap_pvo_reclaim_nextidx = idx; 1516 1.26 matt PMAPCOUNT(pvos_reclaimed); 1517 1.25 chs return pvo; 1518 1.25 chs } 1519 1.25 chs } 1520 1.25 chs } 1521 1.25 chs return NULL; 1522 1.25 chs } 1523 1.25 chs 1524 1.25 chs /* 1525 1.1 matt * This returns whether this is the first mapping of a page. 1526 1.1 matt */ 1527 1.1 matt int 1528 1.1 matt pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1529 1.2 matt vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1530 1.1 matt { 1531 1.1 matt struct pvo_entry *pvo; 1532 1.1 matt struct pvo_tqhead *pvoh; 1533 1.2 matt register_t msr; 1534 1.1 matt int ptegidx; 1535 1.1 matt int i; 1536 1.1 matt int poolflags = PR_NOWAIT; 1537 1.1 matt 1538 1.28 chs /* 1539 1.28 chs * Compute the PTE Group index. 1540 1.28 chs */ 1541 1.28 chs va &= ~ADDR_POFF; 1542 1.28 chs ptegidx = va_to_pteg(pm, va); 1543 1.28 chs 1544 1.28 chs msr = pmap_interrupts_off(); 1545 1.28 chs 1546 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1547 1.1 matt if (pmap_pvo_remove_depth > 0) 1548 1.1 matt panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1549 1.1 matt if (++pmap_pvo_enter_depth > 1) 1550 1.1 matt panic("pmap_pvo_enter: called recursively!"); 1551 1.1 matt #endif 1552 1.1 matt 1553 1.1 matt /* 1554 1.1 matt * Remove any existing mapping for this page. Reuse the 1555 1.1 matt * pvo entry if there a mapping. 1556 1.1 matt */ 1557 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1558 1.1 matt if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1559 1.1 matt #ifdef DEBUG 1560 1.1 matt if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1561 1.1 matt ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1562 1.1 matt ~(PTE_REF|PTE_CHG)) == 0 && 1563 1.1 matt va < VM_MIN_KERNEL_ADDRESS) { 1564 1.56 phx printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1565 1.54 mlelstv pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1566 1.56 phx printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1567 1.54 mlelstv pvo->pvo_pte.pte_hi, 1568 1.54 mlelstv pm->pm_sr[va >> ADDR_SR_SHFT]); 1569 1.1 matt pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1570 1.1 matt #ifdef DDBX 1571 1.1 matt Debugger(); 1572 1.1 matt #endif 1573 1.1 matt } 1574 1.1 matt #endif 1575 1.1 matt PMAPCOUNT(mappings_replaced); 1576 1.33 chs pmap_pvo_remove(pvo, -1, NULL); 1577 1.1 matt break; 1578 1.1 matt } 1579 1.1 matt } 1580 1.1 matt 1581 1.1 matt /* 1582 1.1 matt * If we aren't overwriting an mapping, try to allocate 1583 1.1 matt */ 1584 1.26 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1585 1.26 matt --pmap_pvo_enter_depth; 1586 1.26 matt #endif 1587 1.1 matt pmap_interrupts_restore(msr); 1588 1.106 martin if (pvo == NULL) { 1589 1.95 chs pvo = pool_get(pl, poolflags); 1590 1.33 chs } 1591 1.84 matt KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); 1592 1.25 chs 1593 1.25 chs #ifdef DEBUG 1594 1.25 chs /* 1595 1.25 chs * Exercise pmap_pvo_reclaim() a little. 1596 1.25 chs */ 1597 1.25 chs if (pvo && (flags & PMAP_CANFAIL) != 0 && 1598 1.25 chs pmap_pvo_reclaim_debugctr++ > 0x1000 && 1599 1.25 chs (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1600 1.25 chs pool_put(pl, pvo); 1601 1.25 chs pvo = NULL; 1602 1.25 chs } 1603 1.25 chs #endif 1604 1.25 chs 1605 1.1 matt msr = pmap_interrupts_off(); 1606 1.26 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1607 1.26 matt ++pmap_pvo_enter_depth; 1608 1.26 matt #endif 1609 1.1 matt if (pvo == NULL) { 1610 1.117 rin pvo = pmap_pvo_reclaim(); 1611 1.1 matt if (pvo == NULL) { 1612 1.1 matt if ((flags & PMAP_CANFAIL) == 0) 1613 1.1 matt panic("pmap_pvo_enter: failed"); 1614 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1615 1.1 matt pmap_pvo_enter_depth--; 1616 1.1 matt #endif 1617 1.26 matt PMAPCOUNT(pvos_failed); 1618 1.1 matt pmap_interrupts_restore(msr); 1619 1.1 matt return ENOMEM; 1620 1.1 matt } 1621 1.1 matt } 1622 1.25 chs 1623 1.1 matt pvo->pvo_vaddr = va; 1624 1.1 matt pvo->pvo_pmap = pm; 1625 1.1 matt pvo->pvo_vaddr &= ~ADDR_POFF; 1626 1.1 matt if (flags & VM_PROT_EXECUTE) { 1627 1.1 matt PMAPCOUNT(exec_mappings); 1628 1.14 chs pvo_set_exec(pvo); 1629 1.1 matt } 1630 1.1 matt if (flags & PMAP_WIRED) 1631 1.1 matt pvo->pvo_vaddr |= PVO_WIRED; 1632 1.107 chs if (pvo_head != NULL) { 1633 1.1 matt pvo->pvo_vaddr |= PVO_MANAGED; 1634 1.1 matt PMAPCOUNT(mappings); 1635 1.1 matt } else { 1636 1.1 matt PMAPCOUNT(kernel_mappings); 1637 1.1 matt } 1638 1.2 matt pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1639 1.1 matt 1640 1.107 chs if (pvo_head != NULL) 1641 1.107 chs LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1642 1.39 matt if (PVO_WIRED_P(pvo)) 1643 1.1 matt pvo->pvo_pmap->pm_stats.wired_count++; 1644 1.1 matt pvo->pvo_pmap->pm_stats.resident_count++; 1645 1.1 matt #if defined(DEBUG) 1646 1.38 sanjayl /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1647 1.1 matt DPRINTFN(PVOENTER, 1648 1.85 matt "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1649 1.85 matt pvo, pm, va, pa); 1650 1.1 matt #endif 1651 1.1 matt 1652 1.1 matt /* 1653 1.1 matt * We hope this succeeds but it isn't required. 1654 1.1 matt */ 1655 1.1 matt pvoh = &pmap_pvo_table[ptegidx]; 1656 1.1 matt i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1657 1.1 matt if (i >= 0) { 1658 1.1 matt PVO_PTEGIDX_SET(pvo, i); 1659 1.12 matt PVO_WHERE(pvo, ENTER_INSERT); 1660 1.1 matt PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1661 1.1 matt ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1662 1.1 matt TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1663 1.38 sanjayl 1664 1.1 matt } else { 1665 1.1 matt /* 1666 1.1 matt * Since we didn't have room for this entry (which makes it 1667 1.1 matt * and evicted entry), place it at the head of the list. 1668 1.1 matt */ 1669 1.1 matt TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1670 1.1 matt PMAPCOUNT(ptes_evicted); 1671 1.1 matt pm->pm_evictions++; 1672 1.12 matt /* 1673 1.12 matt * If this is a kernel page, make sure it's active. 1674 1.12 matt */ 1675 1.12 matt if (pm == pmap_kernel()) { 1676 1.45 thorpej i = pmap_pte_spill(pm, va, false); 1677 1.12 matt KASSERT(i); 1678 1.12 matt } 1679 1.1 matt } 1680 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 1681 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1682 1.1 matt pmap_pvo_enter_depth--; 1683 1.1 matt #endif 1684 1.1 matt pmap_interrupts_restore(msr); 1685 1.1 matt return 0; 1686 1.1 matt } 1687 1.1 matt 1688 1.53 garbled static void 1689 1.33 chs pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1690 1.1 matt { 1691 1.2 matt volatile struct pte *pt; 1692 1.1 matt int ptegidx; 1693 1.1 matt 1694 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1695 1.1 matt if (++pmap_pvo_remove_depth > 1) 1696 1.1 matt panic("pmap_pvo_remove: called recursively!"); 1697 1.1 matt #endif 1698 1.1 matt 1699 1.1 matt /* 1700 1.1 matt * If we haven't been supplied the ptegidx, calculate it. 1701 1.1 matt */ 1702 1.1 matt if (pteidx == -1) { 1703 1.2 matt ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1704 1.1 matt pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1705 1.1 matt } else { 1706 1.1 matt ptegidx = pteidx >> 3; 1707 1.1 matt if (pvo->pvo_pte.pte_hi & PTE_HID) 1708 1.1 matt ptegidx ^= pmap_pteg_mask; 1709 1.1 matt } 1710 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 1711 1.1 matt 1712 1.1 matt /* 1713 1.1 matt * If there is an active pte entry, we need to deactivate it 1714 1.1 matt * (and save the ref & chg bits). 1715 1.1 matt */ 1716 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx); 1717 1.1 matt if (pt != NULL) { 1718 1.1 matt pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1719 1.12 matt PVO_WHERE(pvo, REMOVE); 1720 1.1 matt PVO_PTEGIDX_CLR(pvo); 1721 1.1 matt PMAPCOUNT(ptes_removed); 1722 1.1 matt } else { 1723 1.1 matt KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1724 1.1 matt pvo->pvo_pmap->pm_evictions--; 1725 1.1 matt } 1726 1.1 matt 1727 1.1 matt /* 1728 1.14 chs * Account for executable mappings. 1729 1.14 chs */ 1730 1.39 matt if (PVO_EXECUTABLE_P(pvo)) 1731 1.14 chs pvo_clear_exec(pvo); 1732 1.14 chs 1733 1.14 chs /* 1734 1.14 chs * Update our statistics. 1735 1.1 matt */ 1736 1.1 matt pvo->pvo_pmap->pm_stats.resident_count--; 1737 1.39 matt if (PVO_WIRED_P(pvo)) 1738 1.1 matt pvo->pvo_pmap->pm_stats.wired_count--; 1739 1.1 matt 1740 1.1 matt /* 1741 1.107 chs * If the page is managed: 1742 1.107 chs * Save the REF/CHG bits into their cache. 1743 1.107 chs * Remove the PVO from the P/V list. 1744 1.1 matt */ 1745 1.39 matt if (PVO_MANAGED_P(pvo)) { 1746 1.2 matt register_t ptelo = pvo->pvo_pte.pte_lo; 1747 1.1 matt struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1748 1.1 matt 1749 1.1 matt if (pg != NULL) { 1750 1.37 matt /* 1751 1.37 matt * If this page was changed and it is mapped exec, 1752 1.37 matt * invalidate it. 1753 1.37 matt */ 1754 1.37 matt if ((ptelo & PTE_CHG) && 1755 1.37 matt (pmap_attr_fetch(pg) & PTE_EXEC)) { 1756 1.37 matt struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1757 1.37 matt if (LIST_EMPTY(pvoh)) { 1758 1.85 matt DPRINTFN(EXEC, "[pmap_pvo_remove: " 1759 1.53 garbled "%#" _PRIxpa ": clear-exec]\n", 1760 1.85 matt VM_PAGE_TO_PHYS(pg)); 1761 1.37 matt pmap_attr_clear(pg, PTE_EXEC); 1762 1.37 matt PMAPCOUNT(exec_uncached_pvo_remove); 1763 1.37 matt } else { 1764 1.85 matt DPRINTFN(EXEC, "[pmap_pvo_remove: " 1765 1.53 garbled "%#" _PRIxpa ": syncicache]\n", 1766 1.85 matt VM_PAGE_TO_PHYS(pg)); 1767 1.37 matt pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1768 1.37 matt PAGE_SIZE); 1769 1.37 matt PMAPCOUNT(exec_synced_pvo_remove); 1770 1.37 matt } 1771 1.37 matt } 1772 1.37 matt 1773 1.1 matt pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1774 1.1 matt } 1775 1.107 chs LIST_REMOVE(pvo, pvo_vlink); 1776 1.1 matt PMAPCOUNT(unmappings); 1777 1.1 matt } else { 1778 1.1 matt PMAPCOUNT(kernel_unmappings); 1779 1.1 matt } 1780 1.1 matt 1781 1.1 matt /* 1782 1.107 chs * Remove the PVO from its list and return it to the pool. 1783 1.1 matt */ 1784 1.1 matt TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1785 1.33 chs if (pvol) { 1786 1.33 chs LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1787 1.25 chs } 1788 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1789 1.1 matt pmap_pvo_remove_depth--; 1790 1.1 matt #endif 1791 1.1 matt } 1792 1.1 matt 1793 1.33 chs void 1794 1.33 chs pmap_pvo_free(struct pvo_entry *pvo) 1795 1.33 chs { 1796 1.33 chs 1797 1.106 martin pool_put(&pmap_pvo_pool, pvo); 1798 1.33 chs } 1799 1.33 chs 1800 1.33 chs void 1801 1.33 chs pmap_pvo_free_list(struct pvo_head *pvol) 1802 1.33 chs { 1803 1.33 chs struct pvo_entry *pvo, *npvo; 1804 1.33 chs 1805 1.33 chs for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1806 1.33 chs npvo = LIST_NEXT(pvo, pvo_vlink); 1807 1.33 chs LIST_REMOVE(pvo, pvo_vlink); 1808 1.33 chs pmap_pvo_free(pvo); 1809 1.33 chs } 1810 1.33 chs } 1811 1.33 chs 1812 1.1 matt /* 1813 1.14 chs * Mark a mapping as executable. 1814 1.14 chs * If this is the first executable mapping in the segment, 1815 1.14 chs * clear the noexec flag. 1816 1.14 chs */ 1817 1.53 garbled static void 1818 1.14 chs pvo_set_exec(struct pvo_entry *pvo) 1819 1.14 chs { 1820 1.14 chs struct pmap *pm = pvo->pvo_pmap; 1821 1.14 chs 1822 1.39 matt if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1823 1.14 chs return; 1824 1.14 chs } 1825 1.14 chs pvo->pvo_vaddr |= PVO_EXECUTABLE; 1826 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1827 1.18 matt { 1828 1.18 matt int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1829 1.18 matt if (pm->pm_exec[sr]++ == 0) { 1830 1.18 matt pm->pm_sr[sr] &= ~SR_NOEXEC; 1831 1.18 matt } 1832 1.14 chs } 1833 1.18 matt #endif 1834 1.14 chs } 1835 1.14 chs 1836 1.14 chs /* 1837 1.14 chs * Mark a mapping as non-executable. 1838 1.14 chs * If this was the last executable mapping in the segment, 1839 1.14 chs * set the noexec flag. 1840 1.14 chs */ 1841 1.53 garbled static void 1842 1.14 chs pvo_clear_exec(struct pvo_entry *pvo) 1843 1.14 chs { 1844 1.14 chs struct pmap *pm = pvo->pvo_pmap; 1845 1.14 chs 1846 1.39 matt if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1847 1.14 chs return; 1848 1.14 chs } 1849 1.14 chs pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1850 1.53 garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1851 1.18 matt { 1852 1.18 matt int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1853 1.18 matt if (--pm->pm_exec[sr] == 0) { 1854 1.18 matt pm->pm_sr[sr] |= SR_NOEXEC; 1855 1.18 matt } 1856 1.14 chs } 1857 1.18 matt #endif 1858 1.14 chs } 1859 1.14 chs 1860 1.14 chs /* 1861 1.1 matt * Insert physical page at pa into the given pmap at virtual address va. 1862 1.1 matt */ 1863 1.1 matt int 1864 1.65 cegger pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1865 1.1 matt { 1866 1.1 matt struct mem_region *mp; 1867 1.1 matt struct pvo_head *pvo_head; 1868 1.1 matt struct vm_page *pg; 1869 1.2 matt register_t pte_lo; 1870 1.1 matt int error; 1871 1.1 matt u_int was_exec = 0; 1872 1.1 matt 1873 1.50 ad PMAP_LOCK(); 1874 1.50 ad 1875 1.1 matt if (__predict_false(!pmap_initialized)) { 1876 1.107 chs pvo_head = NULL; 1877 1.1 matt pg = NULL; 1878 1.1 matt was_exec = PTE_EXEC; 1879 1.107 chs 1880 1.1 matt } else { 1881 1.1 matt pvo_head = pa_to_pvoh(pa, &pg); 1882 1.1 matt } 1883 1.1 matt 1884 1.1 matt DPRINTFN(ENTER, 1885 1.85 matt "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1886 1.85 matt pm, va, pa, prot, flags); 1887 1.1 matt 1888 1.1 matt /* 1889 1.1 matt * If this is a managed page, and it's the first reference to the 1890 1.1 matt * page clear the execness of the page. Otherwise fetch the execness. 1891 1.1 matt */ 1892 1.1 matt if (pg != NULL) 1893 1.1 matt was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1894 1.1 matt 1895 1.85 matt DPRINTFN(ENTER, " was_exec=%d", was_exec); 1896 1.1 matt 1897 1.1 matt /* 1898 1.1 matt * Assume the page is cache inhibited and access is guarded unless 1899 1.1 matt * it's in our available memory array. If it is in the memory array, 1900 1.116 andvar * assume it's in memory coherent memory. 1901 1.1 matt */ 1902 1.77 macallan if (flags & PMAP_MD_PREFETCHABLE) { 1903 1.77 macallan pte_lo = 0; 1904 1.77 macallan } else 1905 1.77 macallan pte_lo = PTE_G; 1906 1.77 macallan 1907 1.81 matt if ((flags & PMAP_NOCACHE) == 0) { 1908 1.1 matt for (mp = mem; mp->size; mp++) { 1909 1.1 matt if (pa >= mp->start && pa < mp->start + mp->size) { 1910 1.1 matt pte_lo = PTE_M; 1911 1.1 matt break; 1912 1.1 matt } 1913 1.1 matt } 1914 1.87 kiyohara #ifdef MULTIPROCESSOR 1915 1.87 kiyohara if (((mfpvr() >> 16) & 0xffff) == MPC603e) 1916 1.87 kiyohara pte_lo = PTE_M; 1917 1.87 kiyohara #endif 1918 1.77 macallan } else { 1919 1.77 macallan pte_lo |= PTE_I; 1920 1.1 matt } 1921 1.1 matt 1922 1.1 matt if (prot & VM_PROT_WRITE) 1923 1.1 matt pte_lo |= PTE_BW; 1924 1.1 matt else 1925 1.1 matt pte_lo |= PTE_BR; 1926 1.1 matt 1927 1.1 matt /* 1928 1.1 matt * If this was in response to a fault, "pre-fault" the PTE's 1929 1.1 matt * changed/referenced bit appropriately. 1930 1.1 matt */ 1931 1.1 matt if (flags & VM_PROT_WRITE) 1932 1.1 matt pte_lo |= PTE_CHG; 1933 1.30 chs if (flags & VM_PROT_ALL) 1934 1.1 matt pte_lo |= PTE_REF; 1935 1.1 matt 1936 1.1 matt /* 1937 1.1 matt * We need to know if this page can be executable 1938 1.1 matt */ 1939 1.1 matt flags |= (prot & VM_PROT_EXECUTE); 1940 1.1 matt 1941 1.1 matt /* 1942 1.1 matt * Record mapping for later back-translation and pte spilling. 1943 1.1 matt * This will overwrite any existing mapping. 1944 1.1 matt */ 1945 1.106 martin error = pmap_pvo_enter(pm, &pmap_pvo_pool, pvo_head, va, pa, pte_lo, flags); 1946 1.1 matt 1947 1.1 matt /* 1948 1.1 matt * Flush the real page from the instruction cache if this page is 1949 1.1 matt * mapped executable and cacheable and has not been flushed since 1950 1.1 matt * the last time it was modified. 1951 1.1 matt */ 1952 1.1 matt if (error == 0 && 1953 1.1 matt (flags & VM_PROT_EXECUTE) && 1954 1.1 matt (pte_lo & PTE_I) == 0 && 1955 1.1 matt was_exec == 0) { 1956 1.85 matt DPRINTFN(ENTER, " %s", "syncicache"); 1957 1.1 matt PMAPCOUNT(exec_synced); 1958 1.6 thorpej pmap_syncicache(pa, PAGE_SIZE); 1959 1.1 matt if (pg != NULL) { 1960 1.1 matt pmap_attr_save(pg, PTE_EXEC); 1961 1.1 matt PMAPCOUNT(exec_cached); 1962 1.1 matt #if defined(DEBUG) || defined(PMAPDEBUG) 1963 1.1 matt if (pmapdebug & PMAPDEBUG_ENTER) 1964 1.1 matt printf(" marked-as-exec"); 1965 1.1 matt else if (pmapdebug & PMAPDEBUG_EXEC) 1966 1.53 garbled printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 1967 1.34 yamt VM_PAGE_TO_PHYS(pg)); 1968 1.1 matt #endif 1969 1.1 matt } 1970 1.1 matt } 1971 1.1 matt 1972 1.85 matt DPRINTFN(ENTER, ": error=%d\n", error); 1973 1.1 matt 1974 1.50 ad PMAP_UNLOCK(); 1975 1.50 ad 1976 1.1 matt return error; 1977 1.1 matt } 1978 1.1 matt 1979 1.1 matt void 1980 1.68 cegger pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1981 1.1 matt { 1982 1.1 matt struct mem_region *mp; 1983 1.2 matt register_t pte_lo; 1984 1.1 matt int error; 1985 1.1 matt 1986 1.85 matt #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA) 1987 1.1 matt if (va < VM_MIN_KERNEL_ADDRESS) 1988 1.1 matt panic("pmap_kenter_pa: attempt to enter " 1989 1.53 garbled "non-kernel address %#" _PRIxva "!", va); 1990 1.38 sanjayl #endif 1991 1.1 matt 1992 1.1 matt DPRINTFN(KENTER, 1993 1.85 matt "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot); 1994 1.1 matt 1995 1.50 ad PMAP_LOCK(); 1996 1.50 ad 1997 1.1 matt /* 1998 1.1 matt * Assume the page is cache inhibited and access is guarded unless 1999 1.1 matt * it's in our available memory array. If it is in the memory array, 2000 1.116 andvar * assume it's in memory coherent memory. 2001 1.1 matt */ 2002 1.1 matt pte_lo = PTE_IG; 2003 1.81 matt if ((flags & PMAP_NOCACHE) == 0) { 2004 1.4 matt for (mp = mem; mp->size; mp++) { 2005 1.4 matt if (pa >= mp->start && pa < mp->start + mp->size) { 2006 1.4 matt pte_lo = PTE_M; 2007 1.4 matt break; 2008 1.4 matt } 2009 1.1 matt } 2010 1.87 kiyohara #ifdef MULTIPROCESSOR 2011 1.87 kiyohara if (((mfpvr() >> 16) & 0xffff) == MPC603e) 2012 1.87 kiyohara pte_lo = PTE_M; 2013 1.87 kiyohara #endif 2014 1.1 matt } 2015 1.1 matt 2016 1.1 matt if (prot & VM_PROT_WRITE) 2017 1.1 matt pte_lo |= PTE_BW; 2018 1.1 matt else 2019 1.1 matt pte_lo |= PTE_BR; 2020 1.1 matt 2021 1.1 matt /* 2022 1.1 matt * We don't care about REF/CHG on PVOs on the unmanaged list. 2023 1.1 matt */ 2024 1.106 martin error = pmap_pvo_enter(pmap_kernel(), &pmap_pvo_pool, 2025 1.107 chs NULL, va, pa, pte_lo, prot|PMAP_WIRED); 2026 1.1 matt 2027 1.1 matt if (error != 0) 2028 1.53 garbled panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2029 1.1 matt va, pa, error); 2030 1.50 ad 2031 1.50 ad PMAP_UNLOCK(); 2032 1.1 matt } 2033 1.1 matt 2034 1.1 matt void 2035 1.1 matt pmap_kremove(vaddr_t va, vsize_t len) 2036 1.1 matt { 2037 1.1 matt if (va < VM_MIN_KERNEL_ADDRESS) 2038 1.1 matt panic("pmap_kremove: attempt to remove " 2039 1.53 garbled "non-kernel address %#" _PRIxva "!", va); 2040 1.1 matt 2041 1.85 matt DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len); 2042 1.1 matt pmap_remove(pmap_kernel(), va, va + len); 2043 1.1 matt } 2044 1.1 matt 2045 1.1 matt /* 2046 1.1 matt * Remove the given range of mapping entries. 2047 1.1 matt */ 2048 1.1 matt void 2049 1.1 matt pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2050 1.1 matt { 2051 1.33 chs struct pvo_head pvol; 2052 1.1 matt struct pvo_entry *pvo; 2053 1.2 matt register_t msr; 2054 1.1 matt int pteidx; 2055 1.1 matt 2056 1.50 ad PMAP_LOCK(); 2057 1.33 chs LIST_INIT(&pvol); 2058 1.14 chs msr = pmap_interrupts_off(); 2059 1.1 matt for (; va < endva; va += PAGE_SIZE) { 2060 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx); 2061 1.1 matt if (pvo != NULL) { 2062 1.33 chs pmap_pvo_remove(pvo, pteidx, &pvol); 2063 1.1 matt } 2064 1.1 matt } 2065 1.14 chs pmap_interrupts_restore(msr); 2066 1.33 chs pmap_pvo_free_list(&pvol); 2067 1.50 ad PMAP_UNLOCK(); 2068 1.1 matt } 2069 1.1 matt 2070 1.104 thorpej #if defined(PMAP_OEA) 2071 1.104 thorpej #ifdef PPC_OEA601 2072 1.104 thorpej bool 2073 1.104 thorpej pmap_extract_ioseg601(vaddr_t va, paddr_t *pap) 2074 1.104 thorpej { 2075 1.104 thorpej if ((MFPVR() >> 16) != MPC601) 2076 1.104 thorpej return false; 2077 1.104 thorpej 2078 1.104 thorpej const register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2079 1.104 thorpej 2080 1.104 thorpej if (SR601_VALID_P(sr) && SR601_PA_MATCH_P(sr, va)) { 2081 1.104 thorpej if (pap) 2082 1.104 thorpej *pap = va; 2083 1.104 thorpej return true; 2084 1.104 thorpej } 2085 1.104 thorpej return false; 2086 1.104 thorpej } 2087 1.104 thorpej 2088 1.104 thorpej static bool 2089 1.104 thorpej pmap_extract_battable601(vaddr_t va, paddr_t *pap) 2090 1.104 thorpej { 2091 1.104 thorpej const register_t batu = battable[va >> 23].batu; 2092 1.104 thorpej const register_t batl = battable[va >> 23].batl; 2093 1.104 thorpej 2094 1.104 thorpej if (BAT601_VALID_P(batl) && BAT601_VA_MATCH_P(batu, batl, va)) { 2095 1.104 thorpej const register_t mask = 2096 1.104 thorpej (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2097 1.104 thorpej if (pap) 2098 1.104 thorpej *pap = (batl & mask) | (va & ~mask); 2099 1.104 thorpej return true; 2100 1.104 thorpej } 2101 1.104 thorpej return false; 2102 1.104 thorpej } 2103 1.104 thorpej #endif /* PPC_OEA601 */ 2104 1.104 thorpej 2105 1.104 thorpej bool 2106 1.104 thorpej pmap_extract_battable(vaddr_t va, paddr_t *pap) 2107 1.104 thorpej { 2108 1.104 thorpej #ifdef PPC_OEA601 2109 1.104 thorpej if ((MFPVR() >> 16) == MPC601) 2110 1.104 thorpej return pmap_extract_battable601(va, pap); 2111 1.104 thorpej #endif /* PPC_OEA601 */ 2112 1.104 thorpej 2113 1.104 thorpej if (oeacpufeat & OEACPU_NOBAT) 2114 1.104 thorpej return false; 2115 1.104 thorpej 2116 1.104 thorpej const register_t batu = battable[BAT_VA2IDX(va)].batu; 2117 1.104 thorpej 2118 1.104 thorpej if (BAT_VALID_P(batu, 0) && BAT_VA_MATCH_P(batu, va)) { 2119 1.104 thorpej const register_t batl = battable[BAT_VA2IDX(va)].batl; 2120 1.104 thorpej const register_t mask = 2121 1.104 thorpej (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL; 2122 1.104 thorpej if (pap) 2123 1.104 thorpej *pap = (batl & mask) | (va & ~mask); 2124 1.104 thorpej return true; 2125 1.104 thorpej } 2126 1.104 thorpej return false; 2127 1.104 thorpej } 2128 1.104 thorpej #endif /* PMAP_OEA */ 2129 1.104 thorpej 2130 1.1 matt /* 2131 1.1 matt * Get the physical page address for the given pmap/virtual address. 2132 1.1 matt */ 2133 1.44 thorpej bool 2134 1.1 matt pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2135 1.1 matt { 2136 1.1 matt struct pvo_entry *pvo; 2137 1.2 matt register_t msr; 2138 1.7 matt 2139 1.50 ad PMAP_LOCK(); 2140 1.38 sanjayl 2141 1.7 matt /* 2142 1.104 thorpej * If this is the kernel pmap, check the battable and I/O 2143 1.104 thorpej * segments for a hit. This is done only for regions outside 2144 1.104 thorpej * VM_MIN_KERNEL_ADDRESS-VM_MAX_KERNEL_ADDRESS. 2145 1.104 thorpej * 2146 1.104 thorpej * Be careful when checking VM_MAX_KERNEL_ADDRESS; you don't 2147 1.104 thorpej * want to wrap around to 0. 2148 1.7 matt */ 2149 1.7 matt if (pm == pmap_kernel() && 2150 1.7 matt (va < VM_MIN_KERNEL_ADDRESS || 2151 1.7 matt (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2152 1.8 matt KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2153 1.104 thorpej #if defined(PMAP_OEA) 2154 1.55 garbled #ifdef PPC_OEA601 2155 1.104 thorpej if (pmap_extract_ioseg601(va, pap)) { 2156 1.104 thorpej PMAP_UNLOCK(); 2157 1.104 thorpej return true; 2158 1.104 thorpej } 2159 1.55 garbled #endif /* PPC_OEA601 */ 2160 1.104 thorpej if (pmap_extract_battable(va, pap)) { 2161 1.104 thorpej PMAP_UNLOCK(); 2162 1.104 thorpej return true; 2163 1.7 matt } 2164 1.104 thorpej /* 2165 1.104 thorpej * We still check the HTAB... 2166 1.104 thorpej */ 2167 1.104 thorpej #elif defined(PMAP_OEA64_BRIDGE) 2168 1.121 rin if (va < PMAP_DIRECT_MAPPED_LEN) { 2169 1.104 thorpej if (pap) 2170 1.104 thorpej *pap = va; 2171 1.52 garbled PMAP_UNLOCK(); 2172 1.52 garbled return true; 2173 1.104 thorpej } 2174 1.104 thorpej /* 2175 1.104 thorpej * We still check the HTAB... 2176 1.104 thorpej */ 2177 1.104 thorpej #elif defined(PMAP_OEA64) 2178 1.38 sanjayl #error PPC_OEA64 not supported 2179 1.38 sanjayl #endif /* PPC_OEA */ 2180 1.7 matt } 2181 1.1 matt 2182 1.1 matt msr = pmap_interrupts_off(); 2183 1.1 matt pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2184 1.1 matt if (pvo != NULL) { 2185 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2186 1.29 briggs if (pap) 2187 1.29 briggs *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2188 1.29 briggs | (va & ADDR_POFF); 2189 1.1 matt } 2190 1.1 matt pmap_interrupts_restore(msr); 2191 1.50 ad PMAP_UNLOCK(); 2192 1.1 matt return pvo != NULL; 2193 1.1 matt } 2194 1.1 matt 2195 1.1 matt /* 2196 1.1 matt * Lower the protection on the specified range of this pmap. 2197 1.1 matt */ 2198 1.1 matt void 2199 1.1 matt pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2200 1.1 matt { 2201 1.1 matt struct pvo_entry *pvo; 2202 1.2 matt volatile struct pte *pt; 2203 1.2 matt register_t msr; 2204 1.1 matt int pteidx; 2205 1.1 matt 2206 1.1 matt /* 2207 1.1 matt * Since this routine only downgrades protection, we should 2208 1.14 chs * always be called with at least one bit not set. 2209 1.1 matt */ 2210 1.14 chs KASSERT(prot != VM_PROT_ALL); 2211 1.1 matt 2212 1.1 matt /* 2213 1.1 matt * If there is no protection, this is equivalent to 2214 1.1 matt * remove the pmap from the pmap. 2215 1.1 matt */ 2216 1.1 matt if ((prot & VM_PROT_READ) == 0) { 2217 1.1 matt pmap_remove(pm, va, endva); 2218 1.1 matt return; 2219 1.1 matt } 2220 1.1 matt 2221 1.50 ad PMAP_LOCK(); 2222 1.50 ad 2223 1.1 matt msr = pmap_interrupts_off(); 2224 1.6 thorpej for (; va < endva; va += PAGE_SIZE) { 2225 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx); 2226 1.1 matt if (pvo == NULL) 2227 1.1 matt continue; 2228 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2229 1.1 matt 2230 1.1 matt /* 2231 1.1 matt * Revoke executable if asked to do so. 2232 1.1 matt */ 2233 1.1 matt if ((prot & VM_PROT_EXECUTE) == 0) 2234 1.14 chs pvo_clear_exec(pvo); 2235 1.1 matt 2236 1.1 matt #if 0 2237 1.1 matt /* 2238 1.1 matt * If the page is already read-only, no change 2239 1.1 matt * needs to be made. 2240 1.1 matt */ 2241 1.1 matt if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2242 1.1 matt continue; 2243 1.1 matt #endif 2244 1.1 matt /* 2245 1.1 matt * Grab the PTE pointer before we diddle with 2246 1.1 matt * the cached PTE copy. 2247 1.1 matt */ 2248 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx); 2249 1.1 matt /* 2250 1.1 matt * Change the protection of the page. 2251 1.1 matt */ 2252 1.1 matt pvo->pvo_pte.pte_lo &= ~PTE_PP; 2253 1.1 matt pvo->pvo_pte.pte_lo |= PTE_BR; 2254 1.1 matt 2255 1.1 matt /* 2256 1.1 matt * If the PVO is in the page table, update 2257 1.1 matt * that pte at well. 2258 1.1 matt */ 2259 1.1 matt if (pt != NULL) { 2260 1.1 matt pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2261 1.12 matt PVO_WHERE(pvo, PMAP_PROTECT); 2262 1.1 matt PMAPCOUNT(ptes_changed); 2263 1.1 matt } 2264 1.1 matt 2265 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2266 1.1 matt } 2267 1.1 matt pmap_interrupts_restore(msr); 2268 1.50 ad PMAP_UNLOCK(); 2269 1.1 matt } 2270 1.1 matt 2271 1.1 matt void 2272 1.1 matt pmap_unwire(pmap_t pm, vaddr_t va) 2273 1.1 matt { 2274 1.1 matt struct pvo_entry *pvo; 2275 1.2 matt register_t msr; 2276 1.1 matt 2277 1.50 ad PMAP_LOCK(); 2278 1.1 matt msr = pmap_interrupts_off(); 2279 1.1 matt pvo = pmap_pvo_find_va(pm, va, NULL); 2280 1.1 matt if (pvo != NULL) { 2281 1.39 matt if (PVO_WIRED_P(pvo)) { 2282 1.1 matt pvo->pvo_vaddr &= ~PVO_WIRED; 2283 1.1 matt pm->pm_stats.wired_count--; 2284 1.1 matt } 2285 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2286 1.1 matt } 2287 1.1 matt pmap_interrupts_restore(msr); 2288 1.50 ad PMAP_UNLOCK(); 2289 1.1 matt } 2290 1.1 matt 2291 1.108 riastrad static void 2292 1.108 riastrad pmap_pp_protect(struct pmap_page *pp, paddr_t pa, vm_prot_t prot) 2293 1.1 matt { 2294 1.33 chs struct pvo_head *pvo_head, pvol; 2295 1.1 matt struct pvo_entry *pvo, *next_pvo; 2296 1.2 matt volatile struct pte *pt; 2297 1.2 matt register_t msr; 2298 1.1 matt 2299 1.50 ad PMAP_LOCK(); 2300 1.50 ad 2301 1.14 chs KASSERT(prot != VM_PROT_ALL); 2302 1.33 chs LIST_INIT(&pvol); 2303 1.1 matt msr = pmap_interrupts_off(); 2304 1.1 matt 2305 1.1 matt /* 2306 1.1 matt * When UVM reuses a page, it does a pmap_page_protect with 2307 1.1 matt * VM_PROT_NONE. At that point, we can clear the exec flag 2308 1.1 matt * since we know the page will have different contents. 2309 1.1 matt */ 2310 1.1 matt if ((prot & VM_PROT_READ) == 0) { 2311 1.85 matt DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2312 1.108 riastrad pa); 2313 1.108 riastrad if (pmap_pp_attr_fetch(pp) & PTE_EXEC) { 2314 1.1 matt PMAPCOUNT(exec_uncached_page_protect); 2315 1.108 riastrad pmap_pp_attr_clear(pp, PTE_EXEC); 2316 1.1 matt } 2317 1.1 matt } 2318 1.1 matt 2319 1.108 riastrad pvo_head = &pp->pp_pvoh; 2320 1.1 matt for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2321 1.1 matt next_pvo = LIST_NEXT(pvo, pvo_vlink); 2322 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2323 1.1 matt 2324 1.1 matt /* 2325 1.1 matt * Downgrading to no mapping at all, we just remove the entry. 2326 1.1 matt */ 2327 1.1 matt if ((prot & VM_PROT_READ) == 0) { 2328 1.33 chs pmap_pvo_remove(pvo, -1, &pvol); 2329 1.1 matt continue; 2330 1.1 matt } 2331 1.1 matt 2332 1.1 matt /* 2333 1.1 matt * If EXEC permission is being revoked, just clear the 2334 1.1 matt * flag in the PVO. 2335 1.1 matt */ 2336 1.1 matt if ((prot & VM_PROT_EXECUTE) == 0) 2337 1.14 chs pvo_clear_exec(pvo); 2338 1.1 matt 2339 1.1 matt /* 2340 1.1 matt * If this entry is already RO, don't diddle with the 2341 1.1 matt * page table. 2342 1.1 matt */ 2343 1.1 matt if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2344 1.1 matt PMAP_PVO_CHECK(pvo); 2345 1.1 matt continue; 2346 1.1 matt } 2347 1.1 matt 2348 1.1 matt /* 2349 1.1 matt * Grab the PTE before the we diddle the bits so 2350 1.1 matt * pvo_to_pte can verify the pte contents are as 2351 1.1 matt * expected. 2352 1.1 matt */ 2353 1.1 matt pt = pmap_pvo_to_pte(pvo, -1); 2354 1.1 matt pvo->pvo_pte.pte_lo &= ~PTE_PP; 2355 1.1 matt pvo->pvo_pte.pte_lo |= PTE_BR; 2356 1.1 matt if (pt != NULL) { 2357 1.1 matt pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2358 1.12 matt PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2359 1.1 matt PMAPCOUNT(ptes_changed); 2360 1.1 matt } 2361 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2362 1.1 matt } 2363 1.1 matt pmap_interrupts_restore(msr); 2364 1.33 chs pmap_pvo_free_list(&pvol); 2365 1.50 ad 2366 1.50 ad PMAP_UNLOCK(); 2367 1.1 matt } 2368 1.1 matt 2369 1.1 matt /* 2370 1.108 riastrad * Lower the protection on the specified physical page. 2371 1.108 riastrad */ 2372 1.108 riastrad void 2373 1.108 riastrad pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2374 1.108 riastrad { 2375 1.108 riastrad struct vm_page_md *md = VM_PAGE_TO_MD(pg); 2376 1.108 riastrad 2377 1.108 riastrad pmap_pp_protect(&md->mdpg_pp, VM_PAGE_TO_PHYS(pg), prot); 2378 1.108 riastrad } 2379 1.108 riastrad 2380 1.108 riastrad /* 2381 1.108 riastrad * Lower the protection on the physical page at the specified physical 2382 1.108 riastrad * address, which may not be managed and so may not have a struct 2383 1.108 riastrad * vm_page. 2384 1.108 riastrad */ 2385 1.108 riastrad void 2386 1.108 riastrad pmap_pv_protect(paddr_t pa, vm_prot_t prot) 2387 1.108 riastrad { 2388 1.108 riastrad struct pmap_page *pp; 2389 1.108 riastrad 2390 1.108 riastrad if ((pp = pmap_pv_tracked(pa)) == NULL) 2391 1.108 riastrad return; 2392 1.108 riastrad pmap_pp_protect(pp, pa, prot); 2393 1.108 riastrad } 2394 1.108 riastrad 2395 1.108 riastrad /* 2396 1.1 matt * Activate the address space for the specified process. If the process 2397 1.1 matt * is the current process, load the new MMU context. 2398 1.1 matt */ 2399 1.1 matt void 2400 1.1 matt pmap_activate(struct lwp *l) 2401 1.1 matt { 2402 1.69 rmind struct pcb *pcb = lwp_getpcb(l); 2403 1.1 matt pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2404 1.1 matt 2405 1.1 matt DPRINTFN(ACTIVATE, 2406 1.85 matt "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp); 2407 1.1 matt 2408 1.1 matt /* 2409 1.70 skrll * XXX Normally performed in cpu_lwp_fork(). 2410 1.1 matt */ 2411 1.13 matt pcb->pcb_pm = pmap; 2412 1.17 matt 2413 1.17 matt /* 2414 1.17 matt * In theory, the SR registers need only be valid on return 2415 1.17 matt * to user space wait to do them there. 2416 1.17 matt */ 2417 1.17 matt if (l == curlwp) { 2418 1.17 matt /* Store pointer to new current pmap. */ 2419 1.17 matt curpm = pmap; 2420 1.17 matt } 2421 1.1 matt } 2422 1.1 matt 2423 1.1 matt /* 2424 1.1 matt * Deactivate the specified process's address space. 2425 1.1 matt */ 2426 1.1 matt void 2427 1.1 matt pmap_deactivate(struct lwp *l) 2428 1.1 matt { 2429 1.1 matt } 2430 1.1 matt 2431 1.44 thorpej bool 2432 1.1 matt pmap_query_bit(struct vm_page *pg, int ptebit) 2433 1.1 matt { 2434 1.1 matt struct pvo_entry *pvo; 2435 1.2 matt volatile struct pte *pt; 2436 1.2 matt register_t msr; 2437 1.1 matt 2438 1.50 ad PMAP_LOCK(); 2439 1.50 ad 2440 1.50 ad if (pmap_attr_fetch(pg) & ptebit) { 2441 1.50 ad PMAP_UNLOCK(); 2442 1.45 thorpej return true; 2443 1.50 ad } 2444 1.14 chs 2445 1.1 matt msr = pmap_interrupts_off(); 2446 1.1 matt LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2447 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2448 1.1 matt /* 2449 1.1 matt * See if we saved the bit off. If so cache, it and return 2450 1.1 matt * success. 2451 1.1 matt */ 2452 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) { 2453 1.1 matt pmap_attr_save(pg, ptebit); 2454 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2455 1.1 matt pmap_interrupts_restore(msr); 2456 1.50 ad PMAP_UNLOCK(); 2457 1.45 thorpej return true; 2458 1.1 matt } 2459 1.1 matt } 2460 1.1 matt /* 2461 1.1 matt * No luck, now go thru the hard part of looking at the ptes 2462 1.1 matt * themselves. Sync so any pending REF/CHG bits are flushed 2463 1.1 matt * to the PTEs. 2464 1.1 matt */ 2465 1.1 matt SYNC(); 2466 1.1 matt LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2467 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2468 1.1 matt /* 2469 1.1 matt * See if this pvo have a valid PTE. If so, fetch the 2470 1.1 matt * REF/CHG bits from the valid PTE. If the appropriate 2471 1.1 matt * ptebit is set, cache, it and return success. 2472 1.1 matt */ 2473 1.1 matt pt = pmap_pvo_to_pte(pvo, -1); 2474 1.1 matt if (pt != NULL) { 2475 1.1 matt pmap_pte_synch(pt, &pvo->pvo_pte); 2476 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) { 2477 1.1 matt pmap_attr_save(pg, ptebit); 2478 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2479 1.1 matt pmap_interrupts_restore(msr); 2480 1.50 ad PMAP_UNLOCK(); 2481 1.45 thorpej return true; 2482 1.1 matt } 2483 1.1 matt } 2484 1.1 matt } 2485 1.1 matt pmap_interrupts_restore(msr); 2486 1.50 ad PMAP_UNLOCK(); 2487 1.45 thorpej return false; 2488 1.1 matt } 2489 1.1 matt 2490 1.44 thorpej bool 2491 1.1 matt pmap_clear_bit(struct vm_page *pg, int ptebit) 2492 1.1 matt { 2493 1.1 matt struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2494 1.1 matt struct pvo_entry *pvo; 2495 1.2 matt volatile struct pte *pt; 2496 1.2 matt register_t msr; 2497 1.1 matt int rv = 0; 2498 1.1 matt 2499 1.50 ad PMAP_LOCK(); 2500 1.1 matt msr = pmap_interrupts_off(); 2501 1.1 matt 2502 1.1 matt /* 2503 1.1 matt * Fetch the cache value 2504 1.1 matt */ 2505 1.1 matt rv |= pmap_attr_fetch(pg); 2506 1.1 matt 2507 1.1 matt /* 2508 1.1 matt * Clear the cached value. 2509 1.1 matt */ 2510 1.1 matt pmap_attr_clear(pg, ptebit); 2511 1.1 matt 2512 1.1 matt /* 2513 1.1 matt * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2514 1.1 matt * can reset the right ones). Note that since the pvo entries and 2515 1.1 matt * list heads are accessed via BAT0 and are never placed in the 2516 1.1 matt * page table, we don't have to worry about further accesses setting 2517 1.1 matt * the REF/CHG bits. 2518 1.1 matt */ 2519 1.1 matt SYNC(); 2520 1.1 matt 2521 1.1 matt /* 2522 1.1 matt * For each pvo entry, clear pvo's ptebit. If this pvo have a 2523 1.1 matt * valid PTE. If so, clear the ptebit from the valid PTE. 2524 1.1 matt */ 2525 1.1 matt LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2526 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2527 1.1 matt pt = pmap_pvo_to_pte(pvo, -1); 2528 1.1 matt if (pt != NULL) { 2529 1.1 matt /* 2530 1.1 matt * Only sync the PTE if the bit we are looking 2531 1.1 matt * for is not already set. 2532 1.1 matt */ 2533 1.1 matt if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2534 1.1 matt pmap_pte_synch(pt, &pvo->pvo_pte); 2535 1.1 matt /* 2536 1.1 matt * If the bit we are looking for was already set, 2537 1.1 matt * clear that bit in the pte. 2538 1.1 matt */ 2539 1.1 matt if (pvo->pvo_pte.pte_lo & ptebit) 2540 1.1 matt pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2541 1.1 matt } 2542 1.1 matt rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2543 1.1 matt pvo->pvo_pte.pte_lo &= ~ptebit; 2544 1.1 matt PMAP_PVO_CHECK(pvo); /* sanity check */ 2545 1.1 matt } 2546 1.1 matt pmap_interrupts_restore(msr); 2547 1.14 chs 2548 1.1 matt /* 2549 1.1 matt * If we are clearing the modify bit and this page was marked EXEC 2550 1.1 matt * and the user of the page thinks the page was modified, then we 2551 1.1 matt * need to clean it from the icache if it's mapped or clear the EXEC 2552 1.1 matt * bit if it's not mapped. The page itself might not have the CHG 2553 1.1 matt * bit set if the modification was done via DMA to the page. 2554 1.1 matt */ 2555 1.1 matt if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2556 1.1 matt if (LIST_EMPTY(pvoh)) { 2557 1.85 matt DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2558 1.85 matt VM_PAGE_TO_PHYS(pg)); 2559 1.1 matt pmap_attr_clear(pg, PTE_EXEC); 2560 1.1 matt PMAPCOUNT(exec_uncached_clear_modify); 2561 1.1 matt } else { 2562 1.85 matt DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2563 1.85 matt VM_PAGE_TO_PHYS(pg)); 2564 1.34 yamt pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2565 1.1 matt PMAPCOUNT(exec_synced_clear_modify); 2566 1.1 matt } 2567 1.1 matt } 2568 1.50 ad PMAP_UNLOCK(); 2569 1.1 matt return (rv & ptebit) != 0; 2570 1.1 matt } 2571 1.1 matt 2572 1.1 matt void 2573 1.1 matt pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2574 1.1 matt { 2575 1.1 matt struct pvo_entry *pvo; 2576 1.1 matt size_t offset = va & ADDR_POFF; 2577 1.1 matt int s; 2578 1.1 matt 2579 1.50 ad PMAP_LOCK(); 2580 1.1 matt s = splvm(); 2581 1.1 matt while (len > 0) { 2582 1.6 thorpej size_t seglen = PAGE_SIZE - offset; 2583 1.1 matt if (seglen > len) 2584 1.1 matt seglen = len; 2585 1.1 matt pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2586 1.39 matt if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2587 1.1 matt pmap_syncicache( 2588 1.1 matt (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2589 1.1 matt PMAP_PVO_CHECK(pvo); 2590 1.1 matt } 2591 1.1 matt va += seglen; 2592 1.1 matt len -= seglen; 2593 1.1 matt offset = 0; 2594 1.1 matt } 2595 1.1 matt splx(s); 2596 1.50 ad PMAP_UNLOCK(); 2597 1.1 matt } 2598 1.1 matt 2599 1.1 matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2600 1.1 matt void 2601 1.2 matt pmap_pte_print(volatile struct pte *pt) 2602 1.1 matt { 2603 1.1 matt printf("PTE %p: ", pt); 2604 1.38 sanjayl 2605 1.53 garbled #if defined(PMAP_OEA) 2606 1.1 matt /* High word: */ 2607 1.54 mlelstv printf("%#" _PRIxpte ": [", pt->pte_hi); 2608 1.53 garbled #else 2609 1.54 mlelstv printf("%#" _PRIxpte ": [", pt->pte_hi); 2610 1.53 garbled #endif /* PMAP_OEA */ 2611 1.38 sanjayl 2612 1.1 matt printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2613 1.1 matt printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2614 1.38 sanjayl 2615 1.54 mlelstv printf("%#" _PRIxpte " %#" _PRIxpte "", 2616 1.38 sanjayl (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2617 1.38 sanjayl pt->pte_hi & PTE_API); 2618 1.53 garbled #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2619 1.54 mlelstv printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2620 1.38 sanjayl #else 2621 1.54 mlelstv printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2622 1.53 garbled #endif /* PMAP_OEA */ 2623 1.38 sanjayl 2624 1.1 matt /* Low word: */ 2625 1.53 garbled #if defined (PMAP_OEA) 2626 1.54 mlelstv printf(" %#" _PRIxpte ": [", pt->pte_lo); 2627 1.54 mlelstv printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2628 1.53 garbled #else 2629 1.54 mlelstv printf(" %#" _PRIxpte ": [", pt->pte_lo); 2630 1.54 mlelstv printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2631 1.38 sanjayl #endif 2632 1.1 matt printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2633 1.1 matt printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2634 1.1 matt printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2635 1.1 matt printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2636 1.1 matt printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2637 1.1 matt printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2638 1.1 matt switch (pt->pte_lo & PTE_PP) { 2639 1.1 matt case PTE_BR: printf("br]\n"); break; 2640 1.1 matt case PTE_BW: printf("bw]\n"); break; 2641 1.1 matt case PTE_SO: printf("so]\n"); break; 2642 1.1 matt case PTE_SW: printf("sw]\n"); break; 2643 1.1 matt } 2644 1.1 matt } 2645 1.1 matt #endif 2646 1.1 matt 2647 1.1 matt #if defined(DDB) 2648 1.1 matt void 2649 1.1 matt pmap_pteg_check(void) 2650 1.1 matt { 2651 1.2 matt volatile struct pte *pt; 2652 1.1 matt int i; 2653 1.1 matt int ptegidx; 2654 1.1 matt u_int p_valid = 0; 2655 1.1 matt u_int s_valid = 0; 2656 1.1 matt u_int invalid = 0; 2657 1.38 sanjayl 2658 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2659 1.1 matt for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2660 1.1 matt if (pt->pte_hi & PTE_VALID) { 2661 1.1 matt if (pt->pte_hi & PTE_HID) 2662 1.1 matt s_valid++; 2663 1.1 matt else 2664 1.38 sanjayl { 2665 1.1 matt p_valid++; 2666 1.38 sanjayl } 2667 1.1 matt } else 2668 1.1 matt invalid++; 2669 1.1 matt } 2670 1.1 matt } 2671 1.1 matt printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2672 1.1 matt p_valid, p_valid, s_valid, s_valid, 2673 1.1 matt invalid, invalid); 2674 1.1 matt } 2675 1.1 matt 2676 1.1 matt void 2677 1.1 matt pmap_print_mmuregs(void) 2678 1.1 matt { 2679 1.1 matt int i; 2680 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2681 1.1 matt u_int cpuvers; 2682 1.90 mrg #endif 2683 1.53 garbled #ifndef PMAP_OEA64 2684 1.1 matt vaddr_t addr; 2685 1.2 matt register_t soft_sr[16]; 2686 1.18 matt #endif 2687 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2688 1.1 matt struct bat soft_ibat[4]; 2689 1.1 matt struct bat soft_dbat[4]; 2690 1.38 sanjayl #endif 2691 1.53 garbled paddr_t sdr1; 2692 1.1 matt 2693 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2694 1.1 matt cpuvers = MFPVR() >> 16; 2695 1.90 mrg #endif 2696 1.35 perry __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2697 1.53 garbled #ifndef PMAP_OEA64 2698 1.16 kleink addr = 0; 2699 1.27 chs for (i = 0; i < 16; i++) { 2700 1.1 matt soft_sr[i] = MFSRIN(addr); 2701 1.1 matt addr += (1 << ADDR_SR_SHFT); 2702 1.1 matt } 2703 1.18 matt #endif 2704 1.1 matt 2705 1.97 rin #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2706 1.1 matt /* read iBAT (601: uBAT) registers */ 2707 1.35 perry __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2708 1.35 perry __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2709 1.35 perry __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2710 1.35 perry __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2711 1.35 perry __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2712 1.35 perry __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2713 1.35 perry __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2714 1.35 perry __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2715 1.1 matt 2716 1.1 matt 2717 1.1 matt if (cpuvers != MPC601) { 2718 1.1 matt /* read dBAT registers */ 2719 1.35 perry __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2720 1.35 perry __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2721 1.35 perry __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2722 1.35 perry __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2723 1.35 perry __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2724 1.35 perry __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2725 1.35 perry __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2726 1.35 perry __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2727 1.1 matt } 2728 1.38 sanjayl #endif 2729 1.1 matt 2730 1.54 mlelstv printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2731 1.53 garbled #ifndef PMAP_OEA64 2732 1.1 matt printf("SR[]:\t"); 2733 1.27 chs for (i = 0; i < 4; i++) 2734 1.53 garbled printf("0x%08lx, ", soft_sr[i]); 2735 1.1 matt printf("\n\t"); 2736 1.27 chs for ( ; i < 8; i++) 2737 1.53 garbled printf("0x%08lx, ", soft_sr[i]); 2738 1.1 matt printf("\n\t"); 2739 1.27 chs for ( ; i < 12; i++) 2740 1.53 garbled printf("0x%08lx, ", soft_sr[i]); 2741 1.1 matt printf("\n\t"); 2742 1.27 chs for ( ; i < 16; i++) 2743 1.53 garbled printf("0x%08lx, ", soft_sr[i]); 2744 1.1 matt printf("\n"); 2745 1.18 matt #endif 2746 1.1 matt 2747 1.97 rin #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2748 1.1 matt printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2749 1.27 chs for (i = 0; i < 4; i++) { 2750 1.2 matt printf("0x%08lx 0x%08lx, ", 2751 1.1 matt soft_ibat[i].batu, soft_ibat[i].batl); 2752 1.1 matt if (i == 1) 2753 1.1 matt printf("\n\t"); 2754 1.1 matt } 2755 1.1 matt if (cpuvers != MPC601) { 2756 1.1 matt printf("\ndBAT[]:\t"); 2757 1.27 chs for (i = 0; i < 4; i++) { 2758 1.2 matt printf("0x%08lx 0x%08lx, ", 2759 1.1 matt soft_dbat[i].batu, soft_dbat[i].batl); 2760 1.1 matt if (i == 1) 2761 1.1 matt printf("\n\t"); 2762 1.1 matt } 2763 1.1 matt } 2764 1.1 matt printf("\n"); 2765 1.53 garbled #endif /* PMAP_OEA... */ 2766 1.1 matt } 2767 1.1 matt 2768 1.1 matt void 2769 1.1 matt pmap_print_pte(pmap_t pm, vaddr_t va) 2770 1.1 matt { 2771 1.1 matt struct pvo_entry *pvo; 2772 1.2 matt volatile struct pte *pt; 2773 1.1 matt int pteidx; 2774 1.1 matt 2775 1.1 matt pvo = pmap_pvo_find_va(pm, va, &pteidx); 2776 1.1 matt if (pvo != NULL) { 2777 1.1 matt pt = pmap_pvo_to_pte(pvo, pteidx); 2778 1.1 matt if (pt != NULL) { 2779 1.53 garbled printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2780 1.38 sanjayl va, pt, 2781 1.38 sanjayl pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2782 1.38 sanjayl pt->pte_hi, pt->pte_lo); 2783 1.1 matt } else { 2784 1.1 matt printf("No valid PTE found\n"); 2785 1.1 matt } 2786 1.1 matt } else { 2787 1.1 matt printf("Address not in pmap\n"); 2788 1.1 matt } 2789 1.1 matt } 2790 1.1 matt 2791 1.1 matt void 2792 1.1 matt pmap_pteg_dist(void) 2793 1.1 matt { 2794 1.1 matt struct pvo_entry *pvo; 2795 1.1 matt int ptegidx; 2796 1.1 matt int depth; 2797 1.1 matt int max_depth = 0; 2798 1.1 matt unsigned int depths[64]; 2799 1.1 matt 2800 1.1 matt memset(depths, 0, sizeof(depths)); 2801 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2802 1.1 matt depth = 0; 2803 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2804 1.1 matt depth++; 2805 1.1 matt } 2806 1.1 matt if (depth > max_depth) 2807 1.1 matt max_depth = depth; 2808 1.1 matt if (depth > 63) 2809 1.1 matt depth = 63; 2810 1.1 matt depths[depth]++; 2811 1.1 matt } 2812 1.1 matt 2813 1.1 matt for (depth = 0; depth < 64; depth++) { 2814 1.1 matt printf(" [%2d]: %8u", depth, depths[depth]); 2815 1.1 matt if ((depth & 3) == 3) 2816 1.1 matt printf("\n"); 2817 1.1 matt if (depth == max_depth) 2818 1.1 matt break; 2819 1.1 matt } 2820 1.1 matt if ((depth & 3) != 3) 2821 1.1 matt printf("\n"); 2822 1.1 matt printf("Max depth found was %d\n", max_depth); 2823 1.1 matt } 2824 1.1 matt #endif /* DEBUG */ 2825 1.1 matt 2826 1.1 matt #if defined(PMAPCHECK) || defined(DEBUG) 2827 1.1 matt void 2828 1.1 matt pmap_pvo_verify(void) 2829 1.1 matt { 2830 1.1 matt int ptegidx; 2831 1.1 matt int s; 2832 1.1 matt 2833 1.1 matt s = splvm(); 2834 1.1 matt for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2835 1.1 matt struct pvo_entry *pvo; 2836 1.1 matt TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2837 1.121 rin if ((uintptr_t) pvo >= PMAP_DIRECT_MAPPED_LEN) 2838 1.1 matt panic("pmap_pvo_verify: invalid pvo %p " 2839 1.1 matt "on list %#x", pvo, ptegidx); 2840 1.1 matt pmap_pvo_check(pvo); 2841 1.1 matt } 2842 1.1 matt } 2843 1.1 matt splx(s); 2844 1.1 matt } 2845 1.1 matt #endif /* PMAPCHECK */ 2846 1.1 matt 2847 1.121 rin /* 2848 1.121 rin * Queue for unmanaged pages, used before uvm.page_init_done. 2849 1.121 rin * Reuse when pool shortage; see pmap_pool_alloc() below. 2850 1.121 rin */ 2851 1.121 rin struct pup { 2852 1.121 rin SIMPLEQ_ENTRY(pup) pup_link; 2853 1.121 rin }; 2854 1.121 rin SIMPLEQ_HEAD(pup_head, pup); 2855 1.121 rin static struct pup_head pup_head = SIMPLEQ_HEAD_INITIALIZER(pup_head); 2856 1.121 rin 2857 1.121 rin static struct pup * 2858 1.121 rin pmap_alloc_unmanaged(void) 2859 1.121 rin { 2860 1.121 rin struct pup *pup; 2861 1.121 rin register_t msr; 2862 1.121 rin 2863 1.121 rin PMAP_LOCK(); 2864 1.121 rin msr = pmap_interrupts_off(); 2865 1.121 rin pup = SIMPLEQ_FIRST(&pup_head); 2866 1.121 rin if (pup != NULL) 2867 1.121 rin SIMPLEQ_REMOVE_HEAD(&pup_head, pup_link); 2868 1.121 rin pmap_interrupts_restore(msr); 2869 1.121 rin PMAP_UNLOCK(); 2870 1.121 rin return pup; 2871 1.121 rin } 2872 1.121 rin 2873 1.121 rin static void 2874 1.121 rin pmap_free_unmanaged(struct pup *pup) 2875 1.121 rin { 2876 1.121 rin register_t msr; 2877 1.121 rin 2878 1.121 rin PMAP_LOCK(); 2879 1.121 rin msr = pmap_interrupts_off(); 2880 1.121 rin SIMPLEQ_INSERT_HEAD(&pup_head, pup, pup_link); 2881 1.121 rin pmap_interrupts_restore(msr); 2882 1.121 rin PMAP_UNLOCK(); 2883 1.121 rin } 2884 1.121 rin 2885 1.1 matt void * 2886 1.106 martin pmap_pool_alloc(struct pool *pp, int flags) 2887 1.1 matt { 2888 1.106 martin struct vm_page *pg; 2889 1.121 rin paddr_t pa; 2890 1.1 matt 2891 1.121 rin if (__predict_false(!uvm.page_init_done)) 2892 1.121 rin return (void *)uvm_pageboot_alloc(PAGE_SIZE); 2893 1.50 ad 2894 1.121 rin retry: 2895 1.121 rin pg = uvm_pagealloc_strat(NULL /*obj*/, 0 /*off*/, NULL /*anon*/, 2896 1.121 rin UVM_PGA_USERESERVE /*flags*/, UVM_PGA_STRAT_ONLY /*strat*/, 2897 1.121 rin VM_FREELIST_DIRECT_MAPPED /*free_list*/); 2898 1.1 matt if (__predict_false(pg == NULL)) { 2899 1.121 rin void *va = pmap_alloc_unmanaged(); 2900 1.121 rin if (va != NULL) 2901 1.121 rin return va; 2902 1.121 rin 2903 1.121 rin if ((flags & PR_WAITOK) == 0) 2904 1.121 rin return NULL; 2905 1.121 rin uvm_wait("plpg"); 2906 1.121 rin goto retry; 2907 1.1 matt } 2908 1.53 garbled KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2909 1.121 rin pa = VM_PAGE_TO_PHYS(pg); 2910 1.121 rin return (void *)(uintptr_t)pa; 2911 1.1 matt } 2912 1.1 matt 2913 1.1 matt void 2914 1.106 martin pmap_pool_free(struct pool *pp, void *va) 2915 1.1 matt { 2916 1.121 rin struct vm_page *pg; 2917 1.1 matt 2918 1.121 rin pg = PHYS_TO_VM_PAGE((paddr_t)va); 2919 1.121 rin if (__predict_false(pg == NULL)) { 2920 1.121 rin pmap_free_unmanaged(va); 2921 1.121 rin return; 2922 1.121 rin } 2923 1.121 rin uvm_pagefree(pg); 2924 1.1 matt } 2925 1.1 matt 2926 1.1 matt /* 2927 1.1 matt * This routine in bootstraping to steal to-be-managed memory (which will 2928 1.121 rin * then be unmanaged). We use it to grab from the first PMAP_DIRECT_MAPPED_LEN 2929 1.121 rin * for our pmap needs and above it for other stuff. 2930 1.1 matt */ 2931 1.1 matt vaddr_t 2932 1.10 thorpej pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2933 1.1 matt { 2934 1.1 matt vsize_t size; 2935 1.1 matt vaddr_t va; 2936 1.94 cherry paddr_t start, end, pa = 0; 2937 1.94 cherry int npgs, freelist; 2938 1.94 cherry uvm_physseg_t bank; 2939 1.1 matt 2940 1.45 thorpej if (uvm.page_init_done == true) 2941 1.1 matt panic("pmap_steal_memory: called _after_ bootstrap"); 2942 1.1 matt 2943 1.10 thorpej *vstartp = VM_MIN_KERNEL_ADDRESS; 2944 1.10 thorpej *vendp = VM_MAX_KERNEL_ADDRESS; 2945 1.10 thorpej 2946 1.1 matt size = round_page(vsize); 2947 1.1 matt npgs = atop(size); 2948 1.1 matt 2949 1.1 matt /* 2950 1.1 matt * PA 0 will never be among those given to UVM so we can use it 2951 1.1 matt * to indicate we couldn't steal any memory. 2952 1.1 matt */ 2953 1.94 cherry 2954 1.94 cherry for (bank = uvm_physseg_get_first(); 2955 1.94 cherry uvm_physseg_valid_p(bank); 2956 1.94 cherry bank = uvm_physseg_get_next(bank)) { 2957 1.94 cherry 2958 1.94 cherry freelist = uvm_physseg_get_free_list(bank); 2959 1.94 cherry start = uvm_physseg_get_start(bank); 2960 1.94 cherry end = uvm_physseg_get_end(bank); 2961 1.94 cherry 2962 1.121 rin if (freelist == VM_FREELIST_DIRECT_MAPPED && 2963 1.94 cherry (end - start) >= npgs) { 2964 1.94 cherry pa = ptoa(start); 2965 1.1 matt break; 2966 1.1 matt } 2967 1.1 matt } 2968 1.1 matt 2969 1.1 matt if (pa == 0) 2970 1.1 matt panic("pmap_steal_memory: no approriate memory to steal!"); 2971 1.1 matt 2972 1.94 cherry uvm_physseg_unplug(start, npgs); 2973 1.1 matt 2974 1.1 matt va = (vaddr_t) pa; 2975 1.46 christos memset((void *) va, 0, size); 2976 1.1 matt pmap_pages_stolen += npgs; 2977 1.1 matt #ifdef DEBUG 2978 1.1 matt if (pmapdebug && npgs > 1) { 2979 1.1 matt u_int cnt = 0; 2980 1.94 cherry for (bank = uvm_physseg_get_first(); 2981 1.94 cherry uvm_physseg_valid_p(bank); 2982 1.94 cherry bank = uvm_physseg_get_next(bank)) { 2983 1.94 cherry cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank); 2984 1.73 uebayasi } 2985 1.1 matt printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2986 1.1 matt npgs, pmap_pages_stolen, cnt); 2987 1.1 matt } 2988 1.1 matt #endif 2989 1.1 matt 2990 1.1 matt return va; 2991 1.1 matt } 2992 1.1 matt 2993 1.1 matt /* 2994 1.122 andvar * Find a chunk of memory with right size and alignment. 2995 1.1 matt */ 2996 1.53 garbled paddr_t 2997 1.1 matt pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2998 1.1 matt { 2999 1.1 matt struct mem_region *mp; 3000 1.1 matt paddr_t s, e; 3001 1.1 matt int i, j; 3002 1.1 matt 3003 1.1 matt size = round_page(size); 3004 1.1 matt 3005 1.1 matt DPRINTFN(BOOT, 3006 1.85 matt "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 3007 1.85 matt size, alignment, at_end); 3008 1.1 matt 3009 1.6 thorpej if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 3010 1.54 mlelstv panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 3011 1.1 matt alignment); 3012 1.1 matt 3013 1.1 matt if (at_end) { 3014 1.6 thorpej if (alignment != PAGE_SIZE) 3015 1.1 matt panic("pmap_boot_find_memory: invalid ending " 3016 1.53 garbled "alignment %#" _PRIxpa, alignment); 3017 1.1 matt 3018 1.1 matt for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 3019 1.1 matt s = mp->start + mp->size - size; 3020 1.1 matt if (s >= mp->start && mp->size >= size) { 3021 1.85 matt DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3022 1.1 matt DPRINTFN(BOOT, 3023 1.85 matt "pmap_boot_find_memory: b-avail[%d] start " 3024 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3025 1.85 matt mp->start, mp->size); 3026 1.1 matt mp->size -= size; 3027 1.1 matt DPRINTFN(BOOT, 3028 1.85 matt "pmap_boot_find_memory: a-avail[%d] start " 3029 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3030 1.85 matt mp->start, mp->size); 3031 1.53 garbled return s; 3032 1.1 matt } 3033 1.1 matt } 3034 1.1 matt panic("pmap_boot_find_memory: no available memory"); 3035 1.1 matt } 3036 1.1 matt 3037 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3038 1.1 matt s = (mp->start + alignment - 1) & ~(alignment-1); 3039 1.1 matt e = s + size; 3040 1.1 matt 3041 1.1 matt /* 3042 1.1 matt * Is the calculated region entirely within the region? 3043 1.1 matt */ 3044 1.1 matt if (s < mp->start || e > mp->start + mp->size) 3045 1.1 matt continue; 3046 1.1 matt 3047 1.85 matt DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3048 1.1 matt if (s == mp->start) { 3049 1.1 matt /* 3050 1.1 matt * If the block starts at the beginning of region, 3051 1.1 matt * adjust the size & start. (the region may now be 3052 1.1 matt * zero in length) 3053 1.1 matt */ 3054 1.1 matt DPRINTFN(BOOT, 3055 1.85 matt "pmap_boot_find_memory: b-avail[%d] start " 3056 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3057 1.1 matt mp->start += size; 3058 1.1 matt mp->size -= size; 3059 1.1 matt DPRINTFN(BOOT, 3060 1.85 matt "pmap_boot_find_memory: a-avail[%d] start " 3061 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3062 1.1 matt } else if (e == mp->start + mp->size) { 3063 1.1 matt /* 3064 1.1 matt * If the block starts at the beginning of region, 3065 1.1 matt * adjust only the size. 3066 1.1 matt */ 3067 1.1 matt DPRINTFN(BOOT, 3068 1.85 matt "pmap_boot_find_memory: b-avail[%d] start " 3069 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3070 1.1 matt mp->size -= size; 3071 1.1 matt DPRINTFN(BOOT, 3072 1.85 matt "pmap_boot_find_memory: a-avail[%d] start " 3073 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3074 1.1 matt } else { 3075 1.1 matt /* 3076 1.1 matt * Block is in the middle of the region, so we 3077 1.1 matt * have to split it in two. 3078 1.1 matt */ 3079 1.1 matt for (j = avail_cnt; j > i + 1; j--) { 3080 1.1 matt avail[j] = avail[j-1]; 3081 1.1 matt } 3082 1.1 matt DPRINTFN(BOOT, 3083 1.85 matt "pmap_boot_find_memory: b-avail[%d] start " 3084 1.85 matt "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3085 1.1 matt mp[1].start = e; 3086 1.1 matt mp[1].size = mp[0].start + mp[0].size - e; 3087 1.1 matt mp[0].size = s - mp[0].start; 3088 1.1 matt avail_cnt++; 3089 1.1 matt for (; i < avail_cnt; i++) { 3090 1.1 matt DPRINTFN(BOOT, 3091 1.85 matt "pmap_boot_find_memory: a-avail[%d] " 3092 1.85 matt "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3093 1.85 matt avail[i].start, avail[i].size); 3094 1.1 matt } 3095 1.1 matt } 3096 1.53 garbled KASSERT(s == (uintptr_t) s); 3097 1.53 garbled return s; 3098 1.1 matt } 3099 1.1 matt panic("pmap_boot_find_memory: not enough memory for " 3100 1.54 mlelstv "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3101 1.1 matt } 3102 1.1 matt 3103 1.38 sanjayl /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3104 1.53 garbled #if defined (PMAP_OEA64_BRIDGE) 3105 1.38 sanjayl int 3106 1.38 sanjayl pmap_setup_segment0_map(int use_large_pages, ...) 3107 1.38 sanjayl { 3108 1.88 christos vaddr_t va, va_end; 3109 1.38 sanjayl 3110 1.38 sanjayl register_t pte_lo = 0x0; 3111 1.90 mrg int ptegidx = 0; 3112 1.38 sanjayl struct pte pte; 3113 1.38 sanjayl va_list ap; 3114 1.38 sanjayl 3115 1.38 sanjayl /* Coherent + Supervisor RW, no user access */ 3116 1.38 sanjayl pte_lo = PTE_M; 3117 1.38 sanjayl 3118 1.38 sanjayl /* XXXSL 3119 1.38 sanjayl * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3120 1.38 sanjayl * these have to take priority. 3121 1.38 sanjayl */ 3122 1.38 sanjayl for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3123 1.38 sanjayl ptegidx = va_to_pteg(pmap_kernel(), va); 3124 1.38 sanjayl pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3125 1.90 mrg (void)pmap_pte_insert(ptegidx, &pte); 3126 1.38 sanjayl } 3127 1.38 sanjayl 3128 1.38 sanjayl va_start(ap, use_large_pages); 3129 1.38 sanjayl while (1) { 3130 1.38 sanjayl paddr_t pa; 3131 1.38 sanjayl size_t size; 3132 1.38 sanjayl 3133 1.38 sanjayl va = va_arg(ap, vaddr_t); 3134 1.38 sanjayl 3135 1.38 sanjayl if (va == 0) 3136 1.38 sanjayl break; 3137 1.38 sanjayl 3138 1.38 sanjayl pa = va_arg(ap, paddr_t); 3139 1.38 sanjayl size = va_arg(ap, size_t); 3140 1.38 sanjayl 3141 1.88 christos for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) { 3142 1.38 sanjayl #if 0 3143 1.54 mlelstv printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3144 1.38 sanjayl #endif 3145 1.38 sanjayl ptegidx = va_to_pteg(pmap_kernel(), va); 3146 1.38 sanjayl pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3147 1.90 mrg (void)pmap_pte_insert(ptegidx, &pte); 3148 1.38 sanjayl } 3149 1.38 sanjayl } 3150 1.93 dholland va_end(ap); 3151 1.38 sanjayl 3152 1.38 sanjayl TLBSYNC(); 3153 1.38 sanjayl SYNC(); 3154 1.38 sanjayl return (0); 3155 1.38 sanjayl } 3156 1.53 garbled #endif /* PMAP_OEA64_BRIDGE */ 3157 1.38 sanjayl 3158 1.1 matt /* 3159 1.99 thorpej * Set up the bottom level of the data structures necessary for the kernel 3160 1.99 thorpej * to manage memory. MMU hardware is programmed in pmap_bootstrap2(). 3161 1.1 matt */ 3162 1.1 matt void 3163 1.99 thorpej pmap_bootstrap1(paddr_t kernelstart, paddr_t kernelend) 3164 1.1 matt { 3165 1.1 matt struct mem_region *mp, tmp; 3166 1.1 matt paddr_t s, e; 3167 1.1 matt psize_t size; 3168 1.1 matt int i, j; 3169 1.1 matt 3170 1.1 matt /* 3171 1.1 matt * Get memory. 3172 1.1 matt */ 3173 1.1 matt mem_regions(&mem, &avail); 3174 1.1 matt #if defined(DEBUG) 3175 1.1 matt if (pmapdebug & PMAPDEBUG_BOOT) { 3176 1.1 matt printf("pmap_bootstrap: memory configuration:\n"); 3177 1.1 matt for (mp = mem; mp->size; mp++) { 3178 1.54 mlelstv printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3179 1.1 matt mp->start, mp->size); 3180 1.1 matt } 3181 1.1 matt for (mp = avail; mp->size; mp++) { 3182 1.54 mlelstv printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3183 1.1 matt mp->start, mp->size); 3184 1.1 matt } 3185 1.1 matt } 3186 1.1 matt #endif 3187 1.1 matt 3188 1.1 matt /* 3189 1.1 matt * Find out how much physical memory we have and in how many chunks. 3190 1.1 matt */ 3191 1.1 matt for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3192 1.1 matt if (mp->start >= pmap_memlimit) 3193 1.1 matt continue; 3194 1.1 matt if (mp->start + mp->size > pmap_memlimit) { 3195 1.1 matt size = pmap_memlimit - mp->start; 3196 1.1 matt physmem += btoc(size); 3197 1.1 matt } else { 3198 1.1 matt physmem += btoc(mp->size); 3199 1.1 matt } 3200 1.1 matt mem_cnt++; 3201 1.1 matt } 3202 1.1 matt 3203 1.1 matt /* 3204 1.1 matt * Count the number of available entries. 3205 1.1 matt */ 3206 1.1 matt for (avail_cnt = 0, mp = avail; mp->size; mp++) 3207 1.1 matt avail_cnt++; 3208 1.1 matt 3209 1.1 matt /* 3210 1.1 matt * Page align all regions. 3211 1.1 matt */ 3212 1.1 matt kernelstart = trunc_page(kernelstart); 3213 1.1 matt kernelend = round_page(kernelend); 3214 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3215 1.1 matt s = round_page(mp->start); 3216 1.1 matt mp->size -= (s - mp->start); 3217 1.1 matt mp->size = trunc_page(mp->size); 3218 1.1 matt mp->start = s; 3219 1.1 matt e = mp->start + mp->size; 3220 1.1 matt 3221 1.1 matt DPRINTFN(BOOT, 3222 1.85 matt "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3223 1.85 matt i, mp->start, mp->size); 3224 1.1 matt 3225 1.1 matt /* 3226 1.1 matt * Don't allow the end to run beyond our artificial limit 3227 1.1 matt */ 3228 1.1 matt if (e > pmap_memlimit) 3229 1.1 matt e = pmap_memlimit; 3230 1.1 matt 3231 1.1 matt /* 3232 1.1 matt * Is this region empty or strange? skip it. 3233 1.1 matt */ 3234 1.1 matt if (e <= s) { 3235 1.1 matt mp->start = 0; 3236 1.1 matt mp->size = 0; 3237 1.1 matt continue; 3238 1.1 matt } 3239 1.1 matt 3240 1.1 matt /* 3241 1.1 matt * Does this overlap the beginning of kernel? 3242 1.1 matt * Does extend past the end of the kernel? 3243 1.1 matt */ 3244 1.1 matt else if (s < kernelstart && e > kernelstart) { 3245 1.1 matt if (e > kernelend) { 3246 1.1 matt avail[avail_cnt].start = kernelend; 3247 1.1 matt avail[avail_cnt].size = e - kernelend; 3248 1.1 matt avail_cnt++; 3249 1.1 matt } 3250 1.1 matt mp->size = kernelstart - s; 3251 1.1 matt } 3252 1.1 matt /* 3253 1.1 matt * Check whether this region overlaps the end of the kernel. 3254 1.1 matt */ 3255 1.1 matt else if (s < kernelend && e > kernelend) { 3256 1.1 matt mp->start = kernelend; 3257 1.1 matt mp->size = e - kernelend; 3258 1.1 matt } 3259 1.1 matt /* 3260 1.1 matt * Look whether this regions is completely inside the kernel. 3261 1.1 matt * Nuke it if it does. 3262 1.1 matt */ 3263 1.1 matt else if (s >= kernelstart && e <= kernelend) { 3264 1.1 matt mp->start = 0; 3265 1.1 matt mp->size = 0; 3266 1.1 matt } 3267 1.1 matt /* 3268 1.1 matt * If the user imposed a memory limit, enforce it. 3269 1.1 matt */ 3270 1.1 matt else if (s >= pmap_memlimit) { 3271 1.6 thorpej mp->start = -PAGE_SIZE; /* let's know why */ 3272 1.1 matt mp->size = 0; 3273 1.1 matt } 3274 1.1 matt else { 3275 1.1 matt mp->start = s; 3276 1.1 matt mp->size = e - s; 3277 1.1 matt } 3278 1.1 matt DPRINTFN(BOOT, 3279 1.85 matt "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3280 1.85 matt i, mp->start, mp->size); 3281 1.1 matt } 3282 1.1 matt 3283 1.1 matt /* 3284 1.1 matt * Move (and uncount) all the null return to the end. 3285 1.1 matt */ 3286 1.1 matt for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3287 1.1 matt if (mp->size == 0) { 3288 1.1 matt tmp = avail[i]; 3289 1.1 matt avail[i] = avail[--avail_cnt]; 3290 1.1 matt avail[avail_cnt] = avail[i]; 3291 1.1 matt } 3292 1.1 matt } 3293 1.1 matt 3294 1.1 matt /* 3295 1.61 skrll * (Bubble)sort them into ascending order. 3296 1.1 matt */ 3297 1.1 matt for (i = 0; i < avail_cnt; i++) { 3298 1.1 matt for (j = i + 1; j < avail_cnt; j++) { 3299 1.1 matt if (avail[i].start > avail[j].start) { 3300 1.1 matt tmp = avail[i]; 3301 1.1 matt avail[i] = avail[j]; 3302 1.1 matt avail[j] = tmp; 3303 1.1 matt } 3304 1.1 matt } 3305 1.1 matt } 3306 1.1 matt 3307 1.1 matt /* 3308 1.1 matt * Make sure they don't overlap. 3309 1.1 matt */ 3310 1.1 matt for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3311 1.1 matt if (mp[0].start + mp[0].size > mp[1].start) { 3312 1.1 matt mp[0].size = mp[1].start - mp[0].start; 3313 1.1 matt } 3314 1.1 matt DPRINTFN(BOOT, 3315 1.85 matt "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3316 1.85 matt i, mp->start, mp->size); 3317 1.1 matt } 3318 1.1 matt DPRINTFN(BOOT, 3319 1.85 matt "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3320 1.85 matt i, mp->start, mp->size); 3321 1.1 matt 3322 1.1 matt #ifdef PTEGCOUNT 3323 1.1 matt pmap_pteg_cnt = PTEGCOUNT; 3324 1.1 matt #else /* PTEGCOUNT */ 3325 1.38 sanjayl 3326 1.1 matt pmap_pteg_cnt = 0x1000; 3327 1.1 matt 3328 1.1 matt while (pmap_pteg_cnt < physmem) 3329 1.1 matt pmap_pteg_cnt <<= 1; 3330 1.1 matt 3331 1.1 matt pmap_pteg_cnt >>= 1; 3332 1.1 matt #endif /* PTEGCOUNT */ 3333 1.1 matt 3334 1.38 sanjayl #ifdef DEBUG 3335 1.85 matt DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt); 3336 1.38 sanjayl #endif 3337 1.38 sanjayl 3338 1.1 matt /* 3339 1.1 matt * Find suitably aligned memory for PTEG hash table. 3340 1.1 matt */ 3341 1.2 matt size = pmap_pteg_cnt * sizeof(struct pteg); 3342 1.53 garbled pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3343 1.38 sanjayl 3344 1.38 sanjayl #ifdef DEBUG 3345 1.38 sanjayl DPRINTFN(BOOT, 3346 1.85 matt "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table); 3347 1.38 sanjayl #endif 3348 1.38 sanjayl 3349 1.38 sanjayl 3350 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3351 1.121 rin if ( (uintptr_t) pmap_pteg_table + size > PMAP_DIRECT_MAPPED_LEN) 3352 1.121 rin panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > PMAP_DIRECT_MAPPED_LEN", 3353 1.1 matt pmap_pteg_table, size); 3354 1.1 matt #endif 3355 1.1 matt 3356 1.32 he memset(__UNVOLATILE(pmap_pteg_table), 0, 3357 1.32 he pmap_pteg_cnt * sizeof(struct pteg)); 3358 1.1 matt pmap_pteg_mask = pmap_pteg_cnt - 1; 3359 1.1 matt 3360 1.1 matt /* 3361 1.1 matt * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3362 1.1 matt * with pages. So we just steal them before giving them to UVM. 3363 1.1 matt */ 3364 1.1 matt size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3365 1.53 garbled pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3366 1.1 matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3367 1.121 rin if ( (uintptr_t) pmap_pvo_table + size > PMAP_DIRECT_MAPPED_LEN) 3368 1.121 rin panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > PMAP_DIRECT_MAPPED_LEN", 3369 1.1 matt pmap_pvo_table, size); 3370 1.1 matt #endif 3371 1.1 matt 3372 1.1 matt for (i = 0; i < pmap_pteg_cnt; i++) 3373 1.1 matt TAILQ_INIT(&pmap_pvo_table[i]); 3374 1.1 matt 3375 1.1 matt #ifndef MSGBUFADDR 3376 1.1 matt /* 3377 1.1 matt * Allocate msgbuf in high memory. 3378 1.1 matt */ 3379 1.53 garbled msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3380 1.1 matt #endif 3381 1.1 matt 3382 1.1 matt for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3383 1.1 matt paddr_t pfstart = atop(mp->start); 3384 1.1 matt paddr_t pfend = atop(mp->start + mp->size); 3385 1.1 matt if (mp->size == 0) 3386 1.1 matt continue; 3387 1.121 rin if (mp->start + mp->size <= PMAP_DIRECT_MAPPED_LEN) { 3388 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend, 3389 1.121 rin VM_FREELIST_DIRECT_MAPPED); 3390 1.121 rin } else if (mp->start >= PMAP_DIRECT_MAPPED_LEN) { 3391 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend, 3392 1.1 matt VM_FREELIST_DEFAULT); 3393 1.1 matt } else { 3394 1.121 rin pfend = atop(PMAP_DIRECT_MAPPED_LEN); 3395 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend, 3396 1.121 rin VM_FREELIST_DIRECT_MAPPED); 3397 1.121 rin pfstart = atop(PMAP_DIRECT_MAPPED_LEN); 3398 1.1 matt pfend = atop(mp->start + mp->size); 3399 1.1 matt uvm_page_physload(pfstart, pfend, pfstart, pfend, 3400 1.1 matt VM_FREELIST_DEFAULT); 3401 1.1 matt } 3402 1.1 matt } 3403 1.1 matt 3404 1.1 matt /* 3405 1.1 matt * Make sure kernel vsid is allocated as well as VSID 0. 3406 1.1 matt */ 3407 1.1 matt pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3408 1.1 matt |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3409 1.53 garbled pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3410 1.53 garbled |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3411 1.1 matt pmap_vsid_bitmap[0] |= 1; 3412 1.1 matt 3413 1.1 matt /* 3414 1.103 thorpej * Initialize kernel pmap. 3415 1.1 matt */ 3416 1.103 thorpej #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 3417 1.1 matt for (i = 0; i < 16; i++) { 3418 1.38 sanjayl pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3419 1.1 matt } 3420 1.102 thorpej pmap_kernel()->pm_vsid = KERNEL_VSIDBITS; 3421 1.1 matt 3422 1.1 matt pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3423 1.1 matt #ifdef KERNEL2_SR 3424 1.1 matt pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3425 1.1 matt #endif 3426 1.53 garbled #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3427 1.103 thorpej 3428 1.103 thorpej #if defined(PMAP_OEA) && defined(PPC_OEA601) 3429 1.105 thorpej if ((MFPVR() >> 16) == MPC601) { 3430 1.103 thorpej for (i = 0; i < 16; i++) { 3431 1.103 thorpej if (iosrtable[i] & SR601_T) { 3432 1.103 thorpej pmap_kernel()->pm_sr[i] = iosrtable[i]; 3433 1.103 thorpej } 3434 1.1 matt } 3435 1.1 matt } 3436 1.103 thorpej #endif /* PMAP_OEA && PPC_OEA601 */ 3437 1.1 matt 3438 1.1 matt #ifdef ALTIVEC 3439 1.1 matt pmap_use_altivec = cpu_altivec; 3440 1.1 matt #endif 3441 1.1 matt 3442 1.1 matt #ifdef DEBUG 3443 1.1 matt if (pmapdebug & PMAPDEBUG_BOOT) { 3444 1.1 matt u_int cnt; 3445 1.94 cherry uvm_physseg_t bank; 3446 1.1 matt char pbuf[9]; 3447 1.94 cherry for (cnt = 0, bank = uvm_physseg_get_first(); 3448 1.94 cherry uvm_physseg_valid_p(bank); 3449 1.94 cherry bank = uvm_physseg_get_next(bank)) { 3450 1.94 cherry cnt += uvm_physseg_get_avail_end(bank) - 3451 1.94 cherry uvm_physseg_get_avail_start(bank); 3452 1.53 garbled printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3453 1.1 matt bank, 3454 1.94 cherry ptoa(uvm_physseg_get_avail_start(bank)), 3455 1.94 cherry ptoa(uvm_physseg_get_avail_end(bank)), 3456 1.94 cherry ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank))); 3457 1.1 matt } 3458 1.1 matt format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3459 1.1 matt printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3460 1.1 matt pbuf, cnt); 3461 1.1 matt } 3462 1.1 matt #endif 3463 1.1 matt 3464 1.106 martin pool_init(&pmap_pvo_pool, sizeof(struct pvo_entry), 3465 1.119 rin PMAP_PVO_ENTRY_ALIGN, 0, 0, "pmap_pvopl", 3466 1.106 martin &pmap_pool_allocator, IPL_VM); 3467 1.1 matt 3468 1.106 martin pool_setlowat(&pmap_pvo_pool, 1008); 3469 1.1 matt 3470 1.1 matt pool_init(&pmap_pool, sizeof(struct pmap), 3471 1.121 rin __alignof(struct pmap), 0, 0, "pmap_pl", 3472 1.121 rin &pmap_pool_allocator, IPL_NONE); 3473 1.121 rin 3474 1.121 rin #if defined(PMAP_OEA64_BRIDGE) 3475 1.121 rin { 3476 1.121 rin struct pmap *pm = pmap_kernel(); 3477 1.121 rin uvm_physseg_t bank; 3478 1.121 rin paddr_t pa; 3479 1.121 rin struct pte pt; 3480 1.121 rin unsigned int ptegidx; 3481 1.121 rin 3482 1.121 rin for (bank = uvm_physseg_get_first(); 3483 1.121 rin uvm_physseg_valid_p(bank); 3484 1.121 rin bank = uvm_physseg_get_next(bank)) { 3485 1.121 rin if (uvm_physseg_get_free_list(bank) != 3486 1.121 rin VM_FREELIST_DIRECT_MAPPED) 3487 1.121 rin continue; 3488 1.121 rin for (pa = uimax(ptoa(uvm_physseg_get_avail_start(bank)), 3489 1.121 rin SEGMENT_LENGTH); 3490 1.121 rin pa < ptoa(uvm_physseg_get_avail_end(bank)); 3491 1.121 rin pa += PAGE_SIZE) { 3492 1.121 rin ptegidx = va_to_pteg(pm, pa); 3493 1.121 rin pmap_pte_create(&pt, pm, pa, pa | PTE_M); 3494 1.121 rin pmap_pte_insert(ptegidx, &pt); 3495 1.121 rin } 3496 1.121 rin } 3497 1.121 rin } 3498 1.121 rin #endif 3499 1.41 matt 3500 1.89 macallan #if defined(PMAP_NEED_MAPKERNEL) 3501 1.41 matt { 3502 1.53 garbled struct pmap *pm = pmap_kernel(); 3503 1.58 garbled #if defined(PMAP_NEED_FULL_MAPKERNEL) 3504 1.41 matt extern int etext[], kernel_text[]; 3505 1.41 matt vaddr_t va, va_etext = (paddr_t) etext; 3506 1.53 garbled #endif 3507 1.53 garbled paddr_t pa, pa_end; 3508 1.42 matt register_t sr; 3509 1.53 garbled struct pte pt; 3510 1.53 garbled unsigned int ptegidx; 3511 1.53 garbled int bank; 3512 1.42 matt 3513 1.53 garbled sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3514 1.53 garbled pm->pm_sr[0] = sr; 3515 1.41 matt 3516 1.53 garbled for (bank = 0; bank < vm_nphysseg; bank++) { 3517 1.73 uebayasi pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3518 1.73 uebayasi pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3519 1.53 garbled for (; pa < pa_end; pa += PAGE_SIZE) { 3520 1.53 garbled ptegidx = va_to_pteg(pm, pa); 3521 1.53 garbled pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3522 1.53 garbled pmap_pte_insert(ptegidx, &pt); 3523 1.53 garbled } 3524 1.53 garbled } 3525 1.53 garbled 3526 1.58 garbled #if defined(PMAP_NEED_FULL_MAPKERNEL) 3527 1.41 matt va = (vaddr_t) kernel_text; 3528 1.41 matt 3529 1.41 matt for (pa = kernelstart; va < va_etext; 3530 1.53 garbled pa += PAGE_SIZE, va += PAGE_SIZE) { 3531 1.53 garbled ptegidx = va_to_pteg(pm, va); 3532 1.53 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3533 1.53 garbled pmap_pte_insert(ptegidx, &pt); 3534 1.53 garbled } 3535 1.41 matt 3536 1.41 matt for (; pa < kernelend; 3537 1.53 garbled pa += PAGE_SIZE, va += PAGE_SIZE) { 3538 1.53 garbled ptegidx = va_to_pteg(pm, va); 3539 1.53 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3540 1.53 garbled pmap_pte_insert(ptegidx, &pt); 3541 1.53 garbled } 3542 1.53 garbled 3543 1.58 garbled for (va = 0, pa = 0; va < kernelstart; 3544 1.53 garbled pa += PAGE_SIZE, va += PAGE_SIZE) { 3545 1.53 garbled ptegidx = va_to_pteg(pm, va); 3546 1.58 garbled if (va < 0x3000) 3547 1.58 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3548 1.58 garbled else 3549 1.58 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3550 1.58 garbled pmap_pte_insert(ptegidx, &pt); 3551 1.58 garbled } 3552 1.58 garbled for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3553 1.58 garbled pa += PAGE_SIZE, va += PAGE_SIZE) { 3554 1.58 garbled ptegidx = va_to_pteg(pm, va); 3555 1.53 garbled pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3556 1.53 garbled pmap_pte_insert(ptegidx, &pt); 3557 1.53 garbled } 3558 1.103 thorpej #endif /* PMAP_NEED_FULL_MAPKERNEL */ 3559 1.99 thorpej } 3560 1.103 thorpej #endif /* PMAP_NEED_MAPKERNEL */ 3561 1.99 thorpej } 3562 1.42 matt 3563 1.99 thorpej /* 3564 1.99 thorpej * Using the data structures prepared in pmap_bootstrap1(), program 3565 1.99 thorpej * the MMU hardware. 3566 1.99 thorpej */ 3567 1.99 thorpej void 3568 1.99 thorpej pmap_bootstrap2(void) 3569 1.99 thorpej { 3570 1.103 thorpej #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 3571 1.99 thorpej for (int i = 0; i < 16; i++) { 3572 1.99 thorpej __asm volatile("mtsrin %0,%1" 3573 1.99 thorpej :: "r"(pmap_kernel()->pm_sr[i]), 3574 1.99 thorpej "r"(i << ADDR_SR_SHFT)); 3575 1.41 matt } 3576 1.99 thorpej #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3577 1.103 thorpej 3578 1.103 thorpej #if defined(PMAP_OEA) 3579 1.109 riastrad __asm volatile("sync; mtsdr1 %0; isync" 3580 1.109 riastrad : 3581 1.109 riastrad : "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)) 3582 1.109 riastrad : "memory"); 3583 1.103 thorpej #elif defined(PMAP_OEA64) || defined(PMAP_OEA64_BRIDGE) 3584 1.109 riastrad __asm volatile("sync; mtsdr1 %0; isync" 3585 1.109 riastrad : 3586 1.109 riastrad : "r"((uintptr_t)pmap_pteg_table | 3587 1.109 riastrad (32 - __builtin_clz(pmap_pteg_mask >> 11))) 3588 1.109 riastrad : "memory"); 3589 1.41 matt #endif 3590 1.99 thorpej tlbia(); 3591 1.91 macallan 3592 1.91 macallan #if defined(PMAPDEBUG) 3593 1.103 thorpej if (pmapdebug) 3594 1.91 macallan pmap_print_mmuregs(); 3595 1.91 macallan #endif 3596 1.1 matt } 3597 1.99 thorpej 3598 1.99 thorpej /* 3599 1.99 thorpej * This is not part of the defined PMAP interface and is specific to the 3600 1.99 thorpej * PowerPC architecture. This is called during initppc, before the system 3601 1.99 thorpej * is really initialized. 3602 1.99 thorpej */ 3603 1.99 thorpej void 3604 1.99 thorpej pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3605 1.99 thorpej { 3606 1.99 thorpej pmap_bootstrap1(kernelstart, kernelend); 3607 1.99 thorpej pmap_bootstrap2(); 3608 1.99 thorpej } 3609