1 1.443 skrll /* $NetBSD: pmap.c,v 1.443 2024/04/13 12:28:01 skrll Exp $ */ 2 1.12 chris 3 1.12 chris /* 4 1.134 thorpej * Copyright 2003 Wasabi Systems, Inc. 5 1.134 thorpej * All rights reserved. 6 1.134 thorpej * 7 1.134 thorpej * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 1.134 thorpej * 9 1.134 thorpej * Redistribution and use in source and binary forms, with or without 10 1.134 thorpej * modification, are permitted provided that the following conditions 11 1.134 thorpej * are met: 12 1.134 thorpej * 1. Redistributions of source code must retain the above copyright 13 1.134 thorpej * notice, this list of conditions and the following disclaimer. 14 1.134 thorpej * 2. Redistributions in binary form must reproduce the above copyright 15 1.134 thorpej * notice, this list of conditions and the following disclaimer in the 16 1.134 thorpej * documentation and/or other materials provided with the distribution. 17 1.134 thorpej * 3. All advertising materials mentioning features or use of this software 18 1.134 thorpej * must display the following acknowledgement: 19 1.134 thorpej * This product includes software developed for the NetBSD Project by 20 1.134 thorpej * Wasabi Systems, Inc. 21 1.134 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 1.134 thorpej * or promote products derived from this software without specific prior 23 1.134 thorpej * written permission. 24 1.134 thorpej * 25 1.134 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 1.134 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 1.134 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 1.134 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 1.134 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 1.134 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 1.134 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 1.134 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 1.134 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 1.134 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 1.134 thorpej * POSSIBILITY OF SUCH DAMAGE. 36 1.134 thorpej */ 37 1.134 thorpej 38 1.134 thorpej /* 39 1.134 thorpej * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40 1.12 chris * Copyright (c) 2001 Richard Earnshaw 41 1.119 chris * Copyright (c) 2001-2002 Christopher Gilbert 42 1.12 chris * All rights reserved. 43 1.12 chris * 44 1.12 chris * 1. Redistributions of source code must retain the above copyright 45 1.12 chris * notice, this list of conditions and the following disclaimer. 46 1.12 chris * 2. Redistributions in binary form must reproduce the above copyright 47 1.12 chris * notice, this list of conditions and the following disclaimer in the 48 1.12 chris * documentation and/or other materials provided with the distribution. 49 1.12 chris * 3. The name of the company nor the name of the author may be used to 50 1.442 skrll * endorse or promote products derived from this software without specific 51 1.12 chris * prior written permission. 52 1.12 chris * 53 1.12 chris * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54 1.12 chris * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55 1.12 chris * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 1.12 chris * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57 1.12 chris * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58 1.12 chris * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59 1.12 chris * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 1.12 chris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 1.12 chris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 1.12 chris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 1.12 chris * SUCH DAMAGE. 64 1.12 chris */ 65 1.1 matt 66 1.1 matt /*- 67 1.405 ad * Copyright (c) 1999, 2020 The NetBSD Foundation, Inc. 68 1.1 matt * All rights reserved. 69 1.1 matt * 70 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 71 1.1 matt * by Charles M. Hannum. 72 1.1 matt * 73 1.1 matt * Redistribution and use in source and binary forms, with or without 74 1.1 matt * modification, are permitted provided that the following conditions 75 1.1 matt * are met: 76 1.1 matt * 1. Redistributions of source code must retain the above copyright 77 1.1 matt * notice, this list of conditions and the following disclaimer. 78 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 79 1.1 matt * notice, this list of conditions and the following disclaimer in the 80 1.1 matt * documentation and/or other materials provided with the distribution. 81 1.1 matt * 82 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 83 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 84 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 85 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 86 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 87 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 88 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 89 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 90 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 91 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 92 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 93 1.1 matt */ 94 1.1 matt 95 1.1 matt /* 96 1.1 matt * Copyright (c) 1994-1998 Mark Brinicombe. 97 1.1 matt * Copyright (c) 1994 Brini. 98 1.1 matt * All rights reserved. 99 1.1 matt * 100 1.1 matt * This code is derived from software written for Brini by Mark Brinicombe 101 1.1 matt * 102 1.1 matt * Redistribution and use in source and binary forms, with or without 103 1.1 matt * modification, are permitted provided that the following conditions 104 1.1 matt * are met: 105 1.1 matt * 1. Redistributions of source code must retain the above copyright 106 1.1 matt * notice, this list of conditions and the following disclaimer. 107 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 108 1.1 matt * notice, this list of conditions and the following disclaimer in the 109 1.1 matt * documentation and/or other materials provided with the distribution. 110 1.1 matt * 3. All advertising materials mentioning features or use of this software 111 1.1 matt * must display the following acknowledgement: 112 1.1 matt * This product includes software developed by Mark Brinicombe. 113 1.1 matt * 4. The name of the author may not be used to endorse or promote products 114 1.1 matt * derived from this software without specific prior written permission. 115 1.1 matt * 116 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 117 1.1 matt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 118 1.1 matt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 119 1.1 matt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 120 1.1 matt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 121 1.1 matt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 122 1.1 matt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 123 1.1 matt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 124 1.1 matt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 125 1.1 matt * 126 1.1 matt * RiscBSD kernel project 127 1.1 matt * 128 1.1 matt * pmap.c 129 1.1 matt * 130 1.223 wiz * Machine dependent vm stuff 131 1.1 matt * 132 1.1 matt * Created : 20/09/94 133 1.1 matt */ 134 1.1 matt 135 1.1 matt /* 136 1.174 matt * armv6 and VIPT cache support by 3am Software Foundry, 137 1.174 matt * Copyright (c) 2007 Microsoft 138 1.174 matt */ 139 1.174 matt 140 1.174 matt /* 141 1.1 matt * Performance improvements, UVM changes, overhauls and part-rewrites 142 1.1 matt * were contributed by Neil A. Carson <neil (at) causality.com>. 143 1.1 matt */ 144 1.1 matt 145 1.1 matt /* 146 1.134 thorpej * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables 147 1.134 thorpej * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi 148 1.134 thorpej * Systems, Inc. 149 1.134 thorpej * 150 1.134 thorpej * There are still a few things outstanding at this time: 151 1.134 thorpej * 152 1.134 thorpej * - There are some unresolved issues for MP systems: 153 1.134 thorpej * 154 1.134 thorpej * o The L1 metadata needs a lock, or more specifically, some places 155 1.134 thorpej * need to acquire an exclusive lock when modifying L1 translation 156 1.134 thorpej * table entries. 157 1.134 thorpej * 158 1.134 thorpej * o When one cpu modifies an L1 entry, and that L1 table is also 159 1.134 thorpej * being used by another cpu, then the latter will need to be told 160 1.134 thorpej * that a tlb invalidation may be necessary. (But only if the old 161 1.134 thorpej * domain number in the L1 entry being over-written is currently 162 1.134 thorpej * the active domain on that cpu). I guess there are lots more tlb 163 1.134 thorpej * shootdown issues too... 164 1.134 thorpej * 165 1.256 matt * o If the vector_page is at 0x00000000 instead of in kernel VA space, 166 1.256 matt * then MP systems will lose big-time because of the MMU domain hack. 167 1.134 thorpej * The only way this can be solved (apart from moving the vector 168 1.134 thorpej * page to 0xffff0000) is to reserve the first 1MB of user address 169 1.134 thorpej * space for kernel use only. This would require re-linking all 170 1.134 thorpej * applications so that the text section starts above this 1MB 171 1.134 thorpej * boundary. 172 1.134 thorpej * 173 1.134 thorpej * o Tracking which VM space is resident in the cache/tlb has not yet 174 1.134 thorpej * been implemented for MP systems. 175 1.134 thorpej * 176 1.134 thorpej * o Finally, there is a pathological condition where two cpus running 177 1.134 thorpej * two separate processes (not lwps) which happen to share an L1 178 1.134 thorpej * can get into a fight over one or more L1 entries. This will result 179 1.134 thorpej * in a significant slow-down if both processes are in tight loops. 180 1.1 matt */ 181 1.1 matt 182 1.1 matt /* Include header files */ 183 1.1 matt 184 1.319 skrll #include "opt_arm_debug.h" 185 1.134 thorpej #include "opt_cpuoptions.h" 186 1.1 matt #include "opt_ddb.h" 187 1.435 skrll #include "opt_efi.h" 188 1.137 martin #include "opt_lockdebug.h" 189 1.137 martin #include "opt_multiprocessor.h" 190 1.1 matt 191 1.271 matt #ifdef MULTIPROCESSOR 192 1.271 matt #define _INTR_PRIVATE 193 1.271 matt #endif 194 1.271 matt 195 1.384 skrll #include <sys/cdefs.h> 196 1.443 skrll __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.443 2024/04/13 12:28:01 skrll Exp $"); 197 1.384 skrll 198 1.171 matt #include <sys/param.h> 199 1.1 matt #include <sys/types.h> 200 1.414 skrll 201 1.417 skrll #include <sys/asan.h> 202 1.414 skrll #include <sys/atomic.h> 203 1.384 skrll #include <sys/bus.h> 204 1.384 skrll #include <sys/cpu.h> 205 1.384 skrll #include <sys/intr.h> 206 1.1 matt #include <sys/kernel.h> 207 1.384 skrll #include <sys/kernhist.h> 208 1.384 skrll #include <sys/kmem.h> 209 1.384 skrll #include <sys/pool.h> 210 1.1 matt #include <sys/proc.h> 211 1.186 matt #include <sys/sysctl.h> 212 1.384 skrll #include <sys/systm.h> 213 1.225 para 214 1.1 matt #include <uvm/uvm.h> 215 1.328 skrll #include <uvm/pmap/pmap_pvt.h> 216 1.1 matt 217 1.263 matt #include <arm/locore.h> 218 1.16 chris 219 1.372 bouyer #ifdef DDB 220 1.372 bouyer #include <arm/db_machdep.h> 221 1.372 bouyer #endif 222 1.372 bouyer 223 1.366 skrll #ifdef VERBOSE_INIT_ARM 224 1.366 skrll #define VPRINTF(...) printf(__VA_ARGS__) 225 1.366 skrll #else 226 1.369 skrll #define VPRINTF(...) __nothing 227 1.366 skrll #endif 228 1.366 skrll 229 1.435 skrll #if defined(EFI_RUNTIME) 230 1.435 skrll #if !defined(ARM_MMU_EXTENDED) 231 1.435 skrll #error EFI_RUNTIME is only supported with ARM_MMU_EXTENDED 232 1.435 skrll #endif 233 1.435 skrll #endif 234 1.435 skrll 235 1.134 thorpej /* 236 1.134 thorpej * pmap_kernel() points here 237 1.134 thorpej */ 238 1.271 matt static struct pmap kernel_pmap_store = { 239 1.271 matt #ifndef ARM_MMU_EXTENDED 240 1.271 matt .pm_activated = true, 241 1.271 matt .pm_domain = PMAP_DOMAIN_KERNEL, 242 1.271 matt .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL, 243 1.271 matt #endif 244 1.271 matt }; 245 1.271 matt struct pmap * const kernel_pmap_ptr = &kernel_pmap_store; 246 1.271 matt #undef pmap_kernel 247 1.271 matt #define pmap_kernel() (&kernel_pmap_store) 248 1.435 skrll 249 1.435 skrll #if defined(EFI_RUNTIME) 250 1.435 skrll static struct pmap efirt_pmap; 251 1.435 skrll 252 1.435 skrll struct pmap * 253 1.435 skrll pmap_efirt(void) 254 1.435 skrll { 255 1.435 skrll return &efirt_pmap; 256 1.435 skrll } 257 1.435 skrll #endif 258 1.435 skrll 259 1.241 matt #ifdef PMAP_NEED_ALLOC_POOLPAGE 260 1.241 matt int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; 261 1.241 matt #endif 262 1.1 matt 263 1.10 chris /* 264 1.134 thorpej * Pool and cache that pmap structures are allocated from. 265 1.134 thorpej * We use a cache to avoid clearing the pm_l2[] array (1KB) 266 1.134 thorpej * in pmap_create(). 267 1.134 thorpej */ 268 1.168 ad static struct pool_cache pmap_cache; 269 1.48 chris 270 1.48 chris /* 271 1.134 thorpej * Pool of PV structures 272 1.10 chris */ 273 1.134 thorpej static struct pool pmap_pv_pool; 274 1.134 thorpej static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); 275 1.134 thorpej static void pmap_bootstrap_pv_page_free(struct pool *, void *); 276 1.134 thorpej static struct pool_allocator pmap_bootstrap_pv_allocator = { 277 1.134 thorpej pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free 278 1.134 thorpej }; 279 1.10 chris 280 1.134 thorpej /* 281 1.134 thorpej * Pool and cache of l2_dtable structures. 282 1.134 thorpej * We use a cache to avoid clearing the structures when they're 283 1.134 thorpej * allocated. (196 bytes) 284 1.134 thorpej */ 285 1.134 thorpej static struct pool_cache pmap_l2dtable_cache; 286 1.134 thorpej static vaddr_t pmap_kernel_l2dtable_kva; 287 1.10 chris 288 1.111 thorpej /* 289 1.134 thorpej * Pool and cache of L2 page descriptors. 290 1.134 thorpej * We use a cache to avoid clearing the descriptor table 291 1.134 thorpej * when they're allocated. (1KB) 292 1.111 thorpej */ 293 1.134 thorpej static struct pool_cache pmap_l2ptp_cache; 294 1.134 thorpej static vaddr_t pmap_kernel_l2ptp_kva; 295 1.134 thorpej static paddr_t pmap_kernel_l2ptp_phys; 296 1.111 thorpej 297 1.183 matt #ifdef PMAPCOUNTERS 298 1.174 matt #define PMAP_EVCNT_INITIALIZER(name) \ 299 1.174 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) 300 1.174 matt 301 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 302 1.194 matt static struct evcnt pmap_ev_vac_clean_one = 303 1.194 matt PMAP_EVCNT_INITIALIZER("clean page (1 color)"); 304 1.194 matt static struct evcnt pmap_ev_vac_flush_one = 305 1.194 matt PMAP_EVCNT_INITIALIZER("flush page (1 color)"); 306 1.194 matt static struct evcnt pmap_ev_vac_flush_lots = 307 1.194 matt PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); 308 1.195 matt static struct evcnt pmap_ev_vac_flush_lots2 = 309 1.195 matt PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); 310 1.194 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); 311 1.194 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); 312 1.194 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); 313 1.195 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); 314 1.194 matt 315 1.174 matt static struct evcnt pmap_ev_vac_color_new = 316 1.174 matt PMAP_EVCNT_INITIALIZER("new page color"); 317 1.174 matt static struct evcnt pmap_ev_vac_color_reuse = 318 1.174 matt PMAP_EVCNT_INITIALIZER("ok first page color"); 319 1.174 matt static struct evcnt pmap_ev_vac_color_ok = 320 1.174 matt PMAP_EVCNT_INITIALIZER("ok page color"); 321 1.182 matt static struct evcnt pmap_ev_vac_color_blind = 322 1.182 matt PMAP_EVCNT_INITIALIZER("blind page color"); 323 1.174 matt static struct evcnt pmap_ev_vac_color_change = 324 1.174 matt PMAP_EVCNT_INITIALIZER("change page color"); 325 1.174 matt static struct evcnt pmap_ev_vac_color_erase = 326 1.174 matt PMAP_EVCNT_INITIALIZER("erase page color"); 327 1.174 matt static struct evcnt pmap_ev_vac_color_none = 328 1.174 matt PMAP_EVCNT_INITIALIZER("no page color"); 329 1.174 matt static struct evcnt pmap_ev_vac_color_restore = 330 1.174 matt PMAP_EVCNT_INITIALIZER("restore page color"); 331 1.174 matt 332 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); 333 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); 334 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); 335 1.182 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); 336 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); 337 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); 338 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); 339 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); 340 1.174 matt #endif 341 1.174 matt 342 1.174 matt static struct evcnt pmap_ev_mappings = 343 1.174 matt PMAP_EVCNT_INITIALIZER("pages mapped"); 344 1.174 matt static struct evcnt pmap_ev_unmappings = 345 1.174 matt PMAP_EVCNT_INITIALIZER("pages unmapped"); 346 1.174 matt static struct evcnt pmap_ev_remappings = 347 1.174 matt PMAP_EVCNT_INITIALIZER("pages remapped"); 348 1.174 matt 349 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_mappings); 350 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_unmappings); 351 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_remappings); 352 1.174 matt 353 1.174 matt static struct evcnt pmap_ev_kernel_mappings = 354 1.174 matt PMAP_EVCNT_INITIALIZER("kernel pages mapped"); 355 1.174 matt static struct evcnt pmap_ev_kernel_unmappings = 356 1.174 matt PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); 357 1.174 matt static struct evcnt pmap_ev_kernel_remappings = 358 1.174 matt PMAP_EVCNT_INITIALIZER("kernel pages remapped"); 359 1.174 matt 360 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); 361 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); 362 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); 363 1.174 matt 364 1.174 matt static struct evcnt pmap_ev_kenter_mappings = 365 1.174 matt PMAP_EVCNT_INITIALIZER("kenter pages mapped"); 366 1.174 matt static struct evcnt pmap_ev_kenter_unmappings = 367 1.174 matt PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); 368 1.174 matt static struct evcnt pmap_ev_kenter_remappings = 369 1.174 matt PMAP_EVCNT_INITIALIZER("kenter pages remapped"); 370 1.174 matt static struct evcnt pmap_ev_pt_mappings = 371 1.174 matt PMAP_EVCNT_INITIALIZER("page table pages mapped"); 372 1.174 matt 373 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); 374 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); 375 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); 376 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); 377 1.174 matt 378 1.271 matt static struct evcnt pmap_ev_fixup_mod = 379 1.271 matt PMAP_EVCNT_INITIALIZER("page modification emulations"); 380 1.271 matt static struct evcnt pmap_ev_fixup_ref = 381 1.271 matt PMAP_EVCNT_INITIALIZER("page reference emulations"); 382 1.271 matt static struct evcnt pmap_ev_fixup_exec = 383 1.271 matt PMAP_EVCNT_INITIALIZER("exec pages fixed up"); 384 1.271 matt static struct evcnt pmap_ev_fixup_pdes = 385 1.271 matt PMAP_EVCNT_INITIALIZER("pdes fixed up"); 386 1.271 matt #ifndef ARM_MMU_EXTENDED 387 1.271 matt static struct evcnt pmap_ev_fixup_ptesync = 388 1.271 matt PMAP_EVCNT_INITIALIZER("ptesync fixed"); 389 1.271 matt #endif 390 1.271 matt 391 1.271 matt EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod); 392 1.271 matt EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref); 393 1.271 matt EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec); 394 1.271 matt EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes); 395 1.271 matt #ifndef ARM_MMU_EXTENDED 396 1.271 matt EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync); 397 1.271 matt #endif 398 1.271 matt 399 1.174 matt #ifdef PMAP_CACHE_VIPT 400 1.174 matt static struct evcnt pmap_ev_exec_mappings = 401 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages mapped"); 402 1.174 matt static struct evcnt pmap_ev_exec_cached = 403 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages cached"); 404 1.174 matt 405 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); 406 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); 407 1.174 matt 408 1.174 matt static struct evcnt pmap_ev_exec_synced = 409 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages synced"); 410 1.174 matt static struct evcnt pmap_ev_exec_synced_map = 411 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); 412 1.174 matt static struct evcnt pmap_ev_exec_synced_unmap = 413 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); 414 1.174 matt static struct evcnt pmap_ev_exec_synced_remap = 415 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); 416 1.174 matt static struct evcnt pmap_ev_exec_synced_clearbit = 417 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); 418 1.345 skrll #ifndef ARM_MMU_EXTENDED 419 1.174 matt static struct evcnt pmap_ev_exec_synced_kremove = 420 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); 421 1.271 matt #endif 422 1.174 matt 423 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); 424 1.274 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); 425 1.271 matt #ifndef ARM_MMU_EXTENDED 426 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); 427 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); 428 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); 429 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); 430 1.271 matt #endif 431 1.174 matt 432 1.174 matt static struct evcnt pmap_ev_exec_discarded_unmap = 433 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); 434 1.174 matt static struct evcnt pmap_ev_exec_discarded_zero = 435 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); 436 1.174 matt static struct evcnt pmap_ev_exec_discarded_copy = 437 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); 438 1.174 matt static struct evcnt pmap_ev_exec_discarded_page_protect = 439 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); 440 1.174 matt static struct evcnt pmap_ev_exec_discarded_clearbit = 441 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); 442 1.174 matt static struct evcnt pmap_ev_exec_discarded_kremove = 443 1.174 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); 444 1.271 matt #ifdef ARM_MMU_EXTENDED 445 1.271 matt static struct evcnt pmap_ev_exec_discarded_modfixup = 446 1.271 matt PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)"); 447 1.271 matt #endif 448 1.174 matt 449 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); 450 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); 451 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); 452 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); 453 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); 454 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); 455 1.271 matt #ifdef ARM_MMU_EXTENDED 456 1.271 matt EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup); 457 1.271 matt #endif 458 1.174 matt #endif /* PMAP_CACHE_VIPT */ 459 1.174 matt 460 1.174 matt static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); 461 1.174 matt static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); 462 1.174 matt static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); 463 1.174 matt 464 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_updates); 465 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_collects); 466 1.174 matt EVCNT_ATTACH_STATIC(pmap_ev_activations); 467 1.174 matt 468 1.174 matt #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) 469 1.174 matt #else 470 1.174 matt #define PMAPCOUNT(x) ((void)0) 471 1.174 matt #endif 472 1.174 matt 473 1.348 skrll #ifdef ARM_MMU_EXTENDED 474 1.348 skrll void pmap_md_pdetab_activate(pmap_t, struct lwp *); 475 1.348 skrll void pmap_md_pdetab_deactivate(pmap_t pm); 476 1.348 skrll #endif 477 1.348 skrll 478 1.134 thorpej /* 479 1.134 thorpej * pmap copy/zero page, and mem(5) hook point 480 1.134 thorpej */ 481 1.54 thorpej static pt_entry_t *csrc_pte, *cdst_pte; 482 1.54 thorpej static vaddr_t csrcp, cdstp; 483 1.271 matt #ifdef MULTIPROCESSOR 484 1.271 matt static size_t cnptes; 485 1.271 matt #define cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) 486 1.271 matt #define cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT)) 487 1.271 matt #define cpu_csrcp(o) (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o)) 488 1.271 matt #define cpu_cdstp(o) (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o)) 489 1.271 matt #else 490 1.271 matt #define cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT)) 491 1.271 matt #define cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT)) 492 1.271 matt #define cpu_csrcp(o) (csrcp + (o)) 493 1.271 matt #define cpu_cdstp(o) (cdstp + (o)) 494 1.271 matt #endif 495 1.271 matt vaddr_t memhook; /* used by mem.c & others */ 496 1.271 matt kmutex_t memlock __cacheline_aligned; /* used by mem.c & others */ 497 1.271 matt kmutex_t pmap_lock __cacheline_aligned; 498 1.373 bouyer kmutex_t kpm_lock __cacheline_aligned; 499 1.161 christos extern void *msgbufaddr; 500 1.186 matt int pmap_kmpages; 501 1.17 chris /* 502 1.134 thorpej * Flag to indicate if pmap_init() has done its thing 503 1.134 thorpej */ 504 1.159 thorpej bool pmap_initialized; 505 1.134 thorpej 506 1.284 matt #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) 507 1.284 matt /* 508 1.324 matt * Virtual end of direct-mapped memory 509 1.284 matt */ 510 1.323 matt vaddr_t pmap_directlimit; 511 1.284 matt #endif 512 1.284 matt 513 1.134 thorpej /* 514 1.134 thorpej * Misc. locking data structures 515 1.17 chris */ 516 1.1 matt 517 1.271 matt static inline void 518 1.271 matt pmap_acquire_pmap_lock(pmap_t pm) 519 1.271 matt { 520 1.372 bouyer #if defined(MULTIPROCESSOR) && defined(DDB) 521 1.373 bouyer if (__predict_false(db_onproc != NULL)) 522 1.372 bouyer return; 523 1.372 bouyer #endif 524 1.379 skrll 525 1.394 ad mutex_enter(&pm->pm_lock); 526 1.271 matt } 527 1.271 matt 528 1.271 matt static inline void 529 1.271 matt pmap_release_pmap_lock(pmap_t pm) 530 1.271 matt { 531 1.372 bouyer #if defined(MULTIPROCESSOR) && defined(DDB) 532 1.373 bouyer if (__predict_false(db_onproc != NULL)) 533 1.372 bouyer return; 534 1.372 bouyer #endif 535 1.394 ad mutex_exit(&pm->pm_lock); 536 1.271 matt } 537 1.271 matt 538 1.271 matt static inline void 539 1.271 matt pmap_acquire_page_lock(struct vm_page_md *md) 540 1.271 matt { 541 1.271 matt mutex_enter(&pmap_lock); 542 1.271 matt } 543 1.271 matt 544 1.271 matt static inline void 545 1.271 matt pmap_release_page_lock(struct vm_page_md *md) 546 1.271 matt { 547 1.271 matt mutex_exit(&pmap_lock); 548 1.271 matt } 549 1.271 matt 550 1.441 rin static inline int __diagused 551 1.271 matt pmap_page_locked_p(struct vm_page_md *md) 552 1.271 matt { 553 1.271 matt return mutex_owned(&pmap_lock); 554 1.271 matt } 555 1.1 matt 556 1.33 chris 557 1.69 thorpej /* 558 1.134 thorpej * Metadata for L1 translation tables. 559 1.69 thorpej */ 560 1.271 matt #ifndef ARM_MMU_EXTENDED 561 1.134 thorpej struct l1_ttable { 562 1.134 thorpej /* Entry on the L1 Table list */ 563 1.134 thorpej SLIST_ENTRY(l1_ttable) l1_link; 564 1.1 matt 565 1.134 thorpej /* Entry on the L1 Least Recently Used list */ 566 1.134 thorpej TAILQ_ENTRY(l1_ttable) l1_lru; 567 1.1 matt 568 1.134 thorpej /* Track how many domains are allocated from this L1 */ 569 1.134 thorpej volatile u_int l1_domain_use_count; 570 1.1 matt 571 1.134 thorpej /* 572 1.134 thorpej * A free-list of domain numbers for this L1. 573 1.134 thorpej * We avoid using ffs() and a bitmap to track domains since ffs() 574 1.134 thorpej * is slow on ARM. 575 1.134 thorpej */ 576 1.242 skrll uint8_t l1_domain_first; 577 1.242 skrll uint8_t l1_domain_free[PMAP_DOMAINS]; 578 1.1 matt 579 1.134 thorpej /* Physical address of this L1 page table */ 580 1.134 thorpej paddr_t l1_physaddr; 581 1.1 matt 582 1.134 thorpej /* KVA of this L1 page table */ 583 1.134 thorpej pd_entry_t *l1_kva; 584 1.134 thorpej }; 585 1.1 matt 586 1.134 thorpej /* 587 1.134 thorpej * L1 Page Tables are tracked using a Least Recently Used list. 588 1.134 thorpej * - New L1s are allocated from the HEAD. 589 1.383 skrll * - Freed L1s are added to the TAIL. 590 1.134 thorpej * - Recently accessed L1s (where an 'access' is some change to one of 591 1.134 thorpej * the userland pmaps which owns this L1) are moved to the TAIL. 592 1.17 chris */ 593 1.134 thorpej static TAILQ_HEAD(, l1_ttable) l1_lru_list; 594 1.226 matt static kmutex_t l1_lru_lock __cacheline_aligned; 595 1.17 chris 596 1.134 thorpej /* 597 1.134 thorpej * A list of all L1 tables 598 1.134 thorpej */ 599 1.134 thorpej static SLIST_HEAD(, l1_ttable) l1_list; 600 1.271 matt #endif /* ARM_MMU_EXTENDED */ 601 1.17 chris 602 1.17 chris /* 603 1.134 thorpej * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 604 1.134 thorpej * 605 1.134 thorpej * This is normally 16MB worth L2 page descriptors for any given pmap. 606 1.134 thorpej * Reference counts are maintained for L2 descriptors so they can be 607 1.134 thorpej * freed when empty. 608 1.17 chris */ 609 1.299 matt struct l2_bucket { 610 1.299 matt pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 611 1.299 matt paddr_t l2b_pa; /* Physical address of same */ 612 1.299 matt u_short l2b_l1slot; /* This L2 table's L1 index */ 613 1.299 matt u_short l2b_occupancy; /* How many active descriptors */ 614 1.299 matt }; 615 1.299 matt 616 1.134 thorpej struct l2_dtable { 617 1.134 thorpej /* The number of L2 page descriptors allocated to this l2_dtable */ 618 1.134 thorpej u_int l2_occupancy; 619 1.17 chris 620 1.134 thorpej /* List of L2 page descriptors */ 621 1.299 matt struct l2_bucket l2_bucket[L2_BUCKET_SIZE]; 622 1.17 chris }; 623 1.17 chris 624 1.17 chris /* 625 1.134 thorpej * Given an L1 table index, calculate the corresponding l2_dtable index 626 1.134 thorpej * and bucket index within the l2_dtable. 627 1.17 chris */ 628 1.271 matt #define L2_BUCKET_XSHIFT (L2_BUCKET_XLOG2 - L1_S_SHIFT) 629 1.271 matt #define L2_BUCKET_XFRAME (~(vaddr_t)0 << L2_BUCKET_XLOG2) 630 1.271 matt #define L2_BUCKET_IDX(l1slot) ((l1slot) >> L2_BUCKET_XSHIFT) 631 1.271 matt #define L2_IDX(l1slot) (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2) 632 1.271 matt #define L2_BUCKET(l1slot) (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1)) 633 1.271 matt 634 1.271 matt __CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE)); 635 1.271 matt __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1)); 636 1.17 chris 637 1.134 thorpej /* 638 1.134 thorpej * Given a virtual address, this macro returns the 639 1.134 thorpej * virtual address required to drop into the next L2 bucket. 640 1.134 thorpej */ 641 1.271 matt #define L2_NEXT_BUCKET_VA(va) (((va) & L2_BUCKET_XFRAME) + L2_BUCKET_XSIZE) 642 1.17 chris 643 1.17 chris /* 644 1.134 thorpej * L2 allocation. 645 1.17 chris */ 646 1.134 thorpej #define pmap_alloc_l2_dtable() \ 647 1.134 thorpej pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) 648 1.134 thorpej #define pmap_free_l2_dtable(l2) \ 649 1.134 thorpej pool_cache_put(&pmap_l2dtable_cache, (l2)) 650 1.134 thorpej #define pmap_alloc_l2_ptp(pap) \ 651 1.134 thorpej ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ 652 1.134 thorpej PR_NOWAIT, (pap))) 653 1.1 matt 654 1.1 matt /* 655 1.134 thorpej * We try to map the page tables write-through, if possible. However, not 656 1.134 thorpej * all CPUs have a write-through cache mode, so on those we have to sync 657 1.134 thorpej * the cache when we frob page tables. 658 1.113 thorpej * 659 1.134 thorpej * We try to evaluate this at compile time, if possible. However, it's 660 1.134 thorpej * not always possible to do that, hence this run-time var. 661 1.134 thorpej */ 662 1.134 thorpej int pmap_needs_pte_sync; 663 1.113 thorpej 664 1.113 thorpej /* 665 1.134 thorpej * Real definition of pv_entry. 666 1.113 thorpej */ 667 1.134 thorpej struct pv_entry { 668 1.183 matt SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ 669 1.134 thorpej pmap_t pv_pmap; /* pmap where mapping lies */ 670 1.134 thorpej vaddr_t pv_va; /* virtual address for mapping */ 671 1.134 thorpej u_int pv_flags; /* flags */ 672 1.134 thorpej }; 673 1.113 thorpej 674 1.113 thorpej /* 675 1.304 skrll * Macros to determine if a mapping might be resident in the 676 1.304 skrll * instruction/data cache and/or TLB 677 1.17 chris */ 678 1.271 matt #if ARM_MMU_V7 > 0 && !defined(ARM_MMU_EXTENDED) 679 1.253 matt /* 680 1.253 matt * Speculative loads by Cortex cores can cause TLB entries to be filled even if 681 1.253 matt * there are no explicit accesses, so there may be always be TLB entries to 682 1.253 matt * flush. If we used ASIDs then this would not be a problem. 683 1.253 matt */ 684 1.253 matt #define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) 685 1.304 skrll #define PV_BEEN_REFD(f) (true) 686 1.253 matt #else 687 1.134 thorpej #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 688 1.304 skrll #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 689 1.253 matt #endif 690 1.174 matt #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) 691 1.268 matt #define PV_IS_KENTRY_P(f) (((f) & PVF_KENTRY) != 0) 692 1.268 matt #define PV_IS_WRITE_P(f) (((f) & PVF_WRITE) != 0) 693 1.17 chris 694 1.17 chris /* 695 1.134 thorpej * Local prototypes 696 1.1 matt */ 697 1.271 matt static bool pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t, size_t); 698 1.134 thorpej static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, 699 1.134 thorpej pt_entry_t **); 700 1.292 joerg static bool pmap_is_current(pmap_t) __unused; 701 1.159 thorpej static bool pmap_is_cached(pmap_t); 702 1.215 uebayasi static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, 703 1.134 thorpej pmap_t, vaddr_t, u_int); 704 1.215 uebayasi static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); 705 1.215 uebayasi static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 706 1.215 uebayasi static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, 707 1.134 thorpej u_int, u_int); 708 1.17 chris 709 1.134 thorpej static void pmap_pinit(pmap_t); 710 1.134 thorpej static int pmap_pmap_ctor(void *, void *, int); 711 1.17 chris 712 1.134 thorpej static void pmap_alloc_l1(pmap_t); 713 1.134 thorpej static void pmap_free_l1(pmap_t); 714 1.271 matt #ifndef ARM_MMU_EXTENDED 715 1.134 thorpej static void pmap_use_l1(pmap_t); 716 1.271 matt #endif 717 1.17 chris 718 1.134 thorpej static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); 719 1.134 thorpej static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); 720 1.134 thorpej static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 721 1.134 thorpej static int pmap_l2ptp_ctor(void *, void *, int); 722 1.134 thorpej static int pmap_l2dtable_ctor(void *, void *, int); 723 1.51 chris 724 1.215 uebayasi static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 725 1.174 matt #ifdef PMAP_CACHE_VIVT 726 1.215 uebayasi static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 727 1.215 uebayasi static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 728 1.174 matt #endif 729 1.17 chris 730 1.215 uebayasi static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); 731 1.174 matt #ifdef PMAP_CACHE_VIVT 732 1.271 matt static bool pmap_clean_page(struct vm_page_md *, bool); 733 1.174 matt #endif 734 1.174 matt #ifdef PMAP_CACHE_VIPT 735 1.215 uebayasi static void pmap_syncicache_page(struct vm_page_md *, paddr_t); 736 1.194 matt enum pmap_flush_op { 737 1.194 matt PMAP_FLUSH_PRIMARY, 738 1.194 matt PMAP_FLUSH_SECONDARY, 739 1.194 matt PMAP_CLEAN_PRIMARY 740 1.194 matt }; 741 1.271 matt #ifndef ARM_MMU_EXTENDED 742 1.215 uebayasi static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); 743 1.174 matt #endif 744 1.271 matt #endif 745 1.215 uebayasi static void pmap_page_remove(struct vm_page_md *, paddr_t); 746 1.328 skrll static void pmap_pv_remove(paddr_t); 747 1.17 chris 748 1.271 matt #ifndef ARM_MMU_EXTENDED 749 1.134 thorpej static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 750 1.271 matt #endif 751 1.134 thorpej static vaddr_t kernel_pt_lookup(paddr_t); 752 1.17 chris 753 1.380 skrll #ifdef ARM_MMU_EXTENDED 754 1.380 skrll static struct pool_cache pmap_l1tt_cache; 755 1.380 skrll 756 1.381 skrll static int pmap_l1tt_ctor(void *, void *, int); 757 1.381 skrll static void * pmap_l1tt_alloc(struct pool *, int); 758 1.381 skrll static void pmap_l1tt_free(struct pool *, void *); 759 1.380 skrll 760 1.380 skrll static struct pool_allocator pmap_l1tt_allocator = { 761 1.380 skrll .pa_alloc = pmap_l1tt_alloc, 762 1.380 skrll .pa_free = pmap_l1tt_free, 763 1.380 skrll .pa_pagesz = L1TT_SIZE, 764 1.380 skrll }; 765 1.380 skrll #endif 766 1.17 chris 767 1.17 chris /* 768 1.134 thorpej * Misc variables 769 1.134 thorpej */ 770 1.134 thorpej vaddr_t virtual_avail; 771 1.134 thorpej vaddr_t virtual_end; 772 1.134 thorpej vaddr_t pmap_curmaxkvaddr; 773 1.17 chris 774 1.196 nonaka paddr_t avail_start; 775 1.196 nonaka paddr_t avail_end; 776 1.17 chris 777 1.174 matt pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); 778 1.174 matt pv_addr_t kernelpages; 779 1.174 matt pv_addr_t kernel_l1pt; 780 1.174 matt pv_addr_t systempage; 781 1.435 skrll #if defined(EFI_RUNTIME) 782 1.435 skrll pv_addr_t efirt_l1pt; 783 1.435 skrll #endif 784 1.17 chris 785 1.251 matt #ifdef PMAP_CACHE_VIPT 786 1.251 matt #define PMAP_VALIDATE_MD_PAGE(md) \ 787 1.251 matt KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ 788 1.251 matt "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ 789 1.251 matt (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); 790 1.251 matt #endif /* PMAP_CACHE_VIPT */ 791 1.1 matt /* 792 1.134 thorpej * A bunch of routines to conditionally flush the caches/TLB depending 793 1.134 thorpej * on whether the specified pmap actually needs to be flushed at any 794 1.134 thorpej * given time. 795 1.1 matt */ 796 1.157 perry static inline void 797 1.259 matt pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags) 798 1.134 thorpej { 799 1.271 matt #ifdef ARM_MMU_EXTENDED 800 1.271 matt pmap_tlb_invalidate_addr(pm, va); 801 1.271 matt #else 802 1.259 matt if (pm->pm_cstate.cs_tlb_id != 0) { 803 1.259 matt if (PV_BEEN_EXECD(flags)) { 804 1.259 matt cpu_tlb_flushID_SE(va); 805 1.259 matt } else if (PV_BEEN_REFD(flags)) { 806 1.259 matt cpu_tlb_flushD_SE(va); 807 1.259 matt } 808 1.259 matt } 809 1.271 matt #endif /* ARM_MMU_EXTENDED */ 810 1.1 matt } 811 1.1 matt 812 1.336 skrll #ifndef ARM_MMU_EXTENDED 813 1.157 perry static inline void 814 1.134 thorpej pmap_tlb_flushID(pmap_t pm) 815 1.1 matt { 816 1.134 thorpej if (pm->pm_cstate.cs_tlb_id) { 817 1.134 thorpej cpu_tlb_flushID(); 818 1.253 matt #if ARM_MMU_V7 == 0 819 1.253 matt /* 820 1.253 matt * Speculative loads by Cortex cores can cause TLB entries to 821 1.253 matt * be filled even if there are no explicit accesses, so there 822 1.253 matt * may be always be TLB entries to flush. If we used ASIDs 823 1.253 matt * then it would not be a problem. 824 1.253 matt * This is not true for other CPUs. 825 1.253 matt */ 826 1.134 thorpej pm->pm_cstate.cs_tlb = 0; 827 1.259 matt #endif /* ARM_MMU_V7 */ 828 1.1 matt } 829 1.134 thorpej } 830 1.1 matt 831 1.157 perry static inline void 832 1.134 thorpej pmap_tlb_flushD(pmap_t pm) 833 1.134 thorpej { 834 1.134 thorpej if (pm->pm_cstate.cs_tlb_d) { 835 1.134 thorpej cpu_tlb_flushD(); 836 1.253 matt #if ARM_MMU_V7 == 0 837 1.253 matt /* 838 1.253 matt * Speculative loads by Cortex cores can cause TLB entries to 839 1.253 matt * be filled even if there are no explicit accesses, so there 840 1.253 matt * may be always be TLB entries to flush. If we used ASIDs 841 1.253 matt * then it would not be a problem. 842 1.253 matt * This is not true for other CPUs. 843 1.253 matt */ 844 1.134 thorpej pm->pm_cstate.cs_tlb_d = 0; 845 1.260 matt #endif /* ARM_MMU_V7 */ 846 1.1 matt } 847 1.308 matt } 848 1.271 matt #endif /* ARM_MMU_EXTENDED */ 849 1.1 matt 850 1.174 matt #ifdef PMAP_CACHE_VIVT 851 1.157 perry static inline void 852 1.259 matt pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags) 853 1.17 chris { 854 1.259 matt if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) { 855 1.259 matt cpu_idcache_wbinv_range(va, PAGE_SIZE); 856 1.259 matt } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) { 857 1.134 thorpej if (do_inv) { 858 1.259 matt if (flags & PVF_WRITE) 859 1.259 matt cpu_dcache_wbinv_range(va, PAGE_SIZE); 860 1.134 thorpej else 861 1.259 matt cpu_dcache_inv_range(va, PAGE_SIZE); 862 1.259 matt } else if (flags & PVF_WRITE) { 863 1.259 matt cpu_dcache_wb_range(va, PAGE_SIZE); 864 1.259 matt } 865 1.1 matt } 866 1.134 thorpej } 867 1.1 matt 868 1.157 perry static inline void 869 1.259 matt pmap_cache_wbinv_all(pmap_t pm, u_int flags) 870 1.134 thorpej { 871 1.259 matt if (PV_BEEN_EXECD(flags)) { 872 1.259 matt if (pm->pm_cstate.cs_cache_id) { 873 1.259 matt cpu_idcache_wbinv_all(); 874 1.259 matt pm->pm_cstate.cs_cache = 0; 875 1.259 matt } 876 1.259 matt } else if (pm->pm_cstate.cs_cache_d) { 877 1.134 thorpej cpu_dcache_wbinv_all(); 878 1.134 thorpej pm->pm_cstate.cs_cache_d = 0; 879 1.134 thorpej } 880 1.134 thorpej } 881 1.174 matt #endif /* PMAP_CACHE_VIVT */ 882 1.1 matt 883 1.258 matt static inline uint8_t 884 1.258 matt pmap_domain(pmap_t pm) 885 1.258 matt { 886 1.271 matt #ifdef ARM_MMU_EXTENDED 887 1.271 matt return pm == pmap_kernel() ? PMAP_DOMAIN_KERNEL : PMAP_DOMAIN_USER; 888 1.271 matt #else 889 1.258 matt return pm->pm_domain; 890 1.271 matt #endif 891 1.258 matt } 892 1.258 matt 893 1.258 matt static inline pd_entry_t * 894 1.258 matt pmap_l1_kva(pmap_t pm) 895 1.258 matt { 896 1.271 matt #ifdef ARM_MMU_EXTENDED 897 1.271 matt return pm->pm_l1; 898 1.271 matt #else 899 1.258 matt return pm->pm_l1->l1_kva; 900 1.271 matt #endif 901 1.258 matt } 902 1.258 matt 903 1.159 thorpej static inline bool 904 1.134 thorpej pmap_is_current(pmap_t pm) 905 1.1 matt { 906 1.182 matt if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) 907 1.174 matt return true; 908 1.1 matt 909 1.174 matt return false; 910 1.134 thorpej } 911 1.1 matt 912 1.159 thorpej static inline bool 913 1.134 thorpej pmap_is_cached(pmap_t pm) 914 1.134 thorpej { 915 1.271 matt #ifdef ARM_MMU_EXTENDED 916 1.318 matt if (pm == pmap_kernel()) 917 1.318 matt return true; 918 1.318 matt #ifdef MULTIPROCESSOR 919 1.318 matt // Is this pmap active on any CPU? 920 1.318 matt if (!kcpuset_iszero(pm->pm_active)) 921 1.318 matt return true; 922 1.318 matt #else 923 1.271 matt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu()); 924 1.318 matt // Is this pmap active? 925 1.318 matt if (PMAP_PAI_ASIDVALID_P(PMAP_PAI(pm, ti), ti)) 926 1.271 matt return true; 927 1.318 matt #endif 928 1.271 matt #else 929 1.267 matt struct cpu_info * const ci = curcpu(); 930 1.271 matt if (pm == pmap_kernel() || ci->ci_pmap_lastuser == NULL 931 1.271 matt || ci->ci_pmap_lastuser == pm) 932 1.271 matt return true; 933 1.271 matt #endif /* ARM_MMU_EXTENDED */ 934 1.17 chris 935 1.174 matt return false; 936 1.134 thorpej } 937 1.1 matt 938 1.134 thorpej /* 939 1.134 thorpej * PTE_SYNC_CURRENT: 940 1.134 thorpej * 941 1.134 thorpej * Make sure the pte is written out to RAM. 942 1.134 thorpej * We need to do this for one of two cases: 943 1.134 thorpej * - We're dealing with the kernel pmap 944 1.134 thorpej * - There is no pmap active in the cache/tlb. 945 1.134 thorpej * - The specified pmap is 'active' in the cache/tlb. 946 1.134 thorpej */ 947 1.316 skrll 948 1.344 christos #ifdef PMAP_INCLUDE_PTE_SYNC 949 1.316 skrll static inline void 950 1.316 skrll pmap_pte_sync_current(pmap_t pm, pt_entry_t *ptep) 951 1.316 skrll { 952 1.316 skrll if (PMAP_NEEDS_PTE_SYNC && pmap_is_cached(pm)) 953 1.316 skrll PTE_SYNC(ptep); 954 1.422 skrll dsb(sy); 955 1.316 skrll } 956 1.316 skrll 957 1.344 christos # define PTE_SYNC_CURRENT(pm, ptep) pmap_pte_sync_current(pm, ptep) 958 1.134 thorpej #else 959 1.344 christos # define PTE_SYNC_CURRENT(pm, ptep) __nothing 960 1.134 thorpej #endif 961 1.1 matt 962 1.1 matt /* 963 1.17 chris * main pv_entry manipulation functions: 964 1.49 thorpej * pmap_enter_pv: enter a mapping onto a vm_page list 965 1.249 skrll * pmap_remove_pv: remove a mapping from a vm_page list 966 1.17 chris * 967 1.17 chris * NOTE: pmap_enter_pv expects to lock the pvh itself 968 1.250 skrll * pmap_remove_pv expects the caller to lock the pvh before calling 969 1.17 chris */ 970 1.17 chris 971 1.17 chris /* 972 1.49 thorpej * pmap_enter_pv: enter a mapping onto a vm_page lst 973 1.17 chris * 974 1.17 chris * => caller should hold the proper lock on pmap_main_lock 975 1.17 chris * => caller should have pmap locked 976 1.49 thorpej * => we will gain the lock on the vm_page and allocate the new pv_entry 977 1.17 chris * => caller should adjust ptp's wire_count before calling 978 1.17 chris * => caller should not adjust pmap's wire_count 979 1.17 chris */ 980 1.134 thorpej static void 981 1.215 uebayasi pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, 982 1.134 thorpej vaddr_t va, u_int flags) 983 1.134 thorpej { 984 1.408 skrll UVMHIST_FUNC(__func__); 985 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", 986 1.408 skrll (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); 987 1.408 skrll UVMHIST_LOG(maphist, "...pv %#jx flags %#jx", 988 1.408 skrll (uintptr_t)pv, flags, 0, 0); 989 1.408 skrll 990 1.182 matt struct pv_entry **pvp; 991 1.17 chris 992 1.205 uebayasi pv->pv_pmap = pm; 993 1.205 uebayasi pv->pv_va = va; 994 1.205 uebayasi pv->pv_flags = flags; 995 1.134 thorpej 996 1.215 uebayasi pvp = &SLIST_FIRST(&md->pvh_list); 997 1.182 matt #ifdef PMAP_CACHE_VIPT 998 1.182 matt /* 999 1.185 matt * Insert unmanaged entries, writeable first, at the head of 1000 1.185 matt * the pv list. 1001 1.182 matt */ 1002 1.268 matt if (__predict_true(!PV_IS_KENTRY_P(flags))) { 1003 1.268 matt while (*pvp != NULL && PV_IS_KENTRY_P((*pvp)->pv_flags)) 1004 1.183 matt pvp = &SLIST_NEXT(*pvp, pv_link); 1005 1.268 matt } 1006 1.268 matt if (!PV_IS_WRITE_P(flags)) { 1007 1.268 matt while (*pvp != NULL && PV_IS_WRITE_P((*pvp)->pv_flags)) 1008 1.185 matt pvp = &SLIST_NEXT(*pvp, pv_link); 1009 1.182 matt } 1010 1.182 matt #endif 1011 1.205 uebayasi SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ 1012 1.205 uebayasi *pvp = pv; /* ... locked list */ 1013 1.215 uebayasi md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); 1014 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 1015 1.205 uebayasi if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) 1016 1.215 uebayasi md->pvh_attrs |= PVF_KMOD; 1017 1.215 uebayasi if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 1018 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 1019 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1020 1.183 matt #endif 1021 1.134 thorpej if (pm == pmap_kernel()) { 1022 1.174 matt PMAPCOUNT(kernel_mappings); 1023 1.134 thorpej if (flags & PVF_WRITE) 1024 1.215 uebayasi md->krw_mappings++; 1025 1.134 thorpej else 1026 1.215 uebayasi md->kro_mappings++; 1027 1.206 uebayasi } else { 1028 1.206 uebayasi if (flags & PVF_WRITE) 1029 1.215 uebayasi md->urw_mappings++; 1030 1.206 uebayasi else 1031 1.215 uebayasi md->uro_mappings++; 1032 1.206 uebayasi } 1033 1.174 matt 1034 1.174 matt #ifdef PMAP_CACHE_VIPT 1035 1.271 matt #ifndef ARM_MMU_EXTENDED 1036 1.174 matt /* 1037 1.251 matt * Even though pmap_vac_me_harder will set PVF_WRITE for us, 1038 1.251 matt * do it here as well to keep the mappings & KVF_WRITE consistent. 1039 1.251 matt */ 1040 1.251 matt if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { 1041 1.251 matt md->pvh_attrs |= PVF_WRITE; 1042 1.251 matt } 1043 1.271 matt #endif 1044 1.251 matt /* 1045 1.174 matt * If this is an exec mapping and its the first exec mapping 1046 1.174 matt * for this page, make sure to sync the I-cache. 1047 1.174 matt */ 1048 1.174 matt if (PV_IS_EXEC_P(flags)) { 1049 1.215 uebayasi if (!PV_IS_EXEC_P(md->pvh_attrs)) { 1050 1.215 uebayasi pmap_syncicache_page(md, pa); 1051 1.174 matt PMAPCOUNT(exec_synced_map); 1052 1.174 matt } 1053 1.174 matt PMAPCOUNT(exec_mappings); 1054 1.174 matt } 1055 1.174 matt #endif 1056 1.174 matt 1057 1.174 matt PMAPCOUNT(mappings); 1058 1.134 thorpej 1059 1.205 uebayasi if (pv->pv_flags & PVF_WIRED) 1060 1.134 thorpej ++pm->pm_stats.wired_count; 1061 1.17 chris } 1062 1.17 chris 1063 1.17 chris /* 1064 1.134 thorpej * 1065 1.134 thorpej * pmap_find_pv: Find a pv entry 1066 1.134 thorpej * 1067 1.134 thorpej * => caller should hold lock on vm_page 1068 1.134 thorpej */ 1069 1.157 perry static inline struct pv_entry * 1070 1.215 uebayasi pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) 1071 1.134 thorpej { 1072 1.134 thorpej struct pv_entry *pv; 1073 1.134 thorpej 1074 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1075 1.134 thorpej if (pm == pv->pv_pmap && va == pv->pv_va) 1076 1.134 thorpej break; 1077 1.134 thorpej } 1078 1.134 thorpej 1079 1.387 skrll return pv; 1080 1.134 thorpej } 1081 1.134 thorpej 1082 1.134 thorpej /* 1083 1.134 thorpej * pmap_remove_pv: try to remove a mapping from a pv_list 1084 1.17 chris * 1085 1.17 chris * => caller should hold proper lock on pmap_main_lock 1086 1.17 chris * => pmap should be locked 1087 1.49 thorpej * => caller should hold lock on vm_page [so that attrs can be adjusted] 1088 1.17 chris * => caller should adjust ptp's wire_count and free PTP if needed 1089 1.17 chris * => caller should NOT adjust pmap's wire_count 1090 1.205 uebayasi * => we return the removed pv 1091 1.17 chris */ 1092 1.134 thorpej static struct pv_entry * 1093 1.215 uebayasi pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1094 1.17 chris { 1095 1.408 skrll UVMHIST_FUNC(__func__); 1096 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", 1097 1.408 skrll (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); 1098 1.408 skrll 1099 1.205 uebayasi struct pv_entry *pv, **prevptr; 1100 1.17 chris 1101 1.215 uebayasi prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ 1102 1.205 uebayasi pv = *prevptr; 1103 1.134 thorpej 1104 1.205 uebayasi while (pv) { 1105 1.205 uebayasi if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ 1106 1.408 skrll UVMHIST_LOG(maphist, "pm %#jx md %#jx flags %#jx", 1107 1.408 skrll (uintptr_t)pm, (uintptr_t)md, pv->pv_flags, 0); 1108 1.205 uebayasi if (pv->pv_flags & PVF_WIRED) { 1109 1.156 scw --pm->pm_stats.wired_count; 1110 1.156 scw } 1111 1.205 uebayasi *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ 1112 1.134 thorpej if (pm == pmap_kernel()) { 1113 1.174 matt PMAPCOUNT(kernel_unmappings); 1114 1.205 uebayasi if (pv->pv_flags & PVF_WRITE) 1115 1.215 uebayasi md->krw_mappings--; 1116 1.134 thorpej else 1117 1.215 uebayasi md->kro_mappings--; 1118 1.206 uebayasi } else { 1119 1.206 uebayasi if (pv->pv_flags & PVF_WRITE) 1120 1.215 uebayasi md->urw_mappings--; 1121 1.206 uebayasi else 1122 1.215 uebayasi md->uro_mappings--; 1123 1.206 uebayasi } 1124 1.174 matt 1125 1.174 matt PMAPCOUNT(unmappings); 1126 1.174 matt #ifdef PMAP_CACHE_VIPT 1127 1.174 matt /* 1128 1.174 matt * If this page has had an exec mapping, then if 1129 1.174 matt * this was the last mapping, discard the contents, 1130 1.174 matt * otherwise sync the i-cache for this page. 1131 1.174 matt */ 1132 1.215 uebayasi if (PV_IS_EXEC_P(md->pvh_attrs)) { 1133 1.215 uebayasi if (SLIST_EMPTY(&md->pvh_list)) { 1134 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 1135 1.174 matt PMAPCOUNT(exec_discarded_unmap); 1136 1.345 skrll } else if (pv->pv_flags & PVF_WRITE) { 1137 1.215 uebayasi pmap_syncicache_page(md, pa); 1138 1.174 matt PMAPCOUNT(exec_synced_unmap); 1139 1.174 matt } 1140 1.174 matt } 1141 1.174 matt #endif /* PMAP_CACHE_VIPT */ 1142 1.17 chris break; 1143 1.17 chris } 1144 1.205 uebayasi prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ 1145 1.205 uebayasi pv = *prevptr; /* advance */ 1146 1.17 chris } 1147 1.134 thorpej 1148 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 1149 1.182 matt /* 1150 1.185 matt * If we no longer have a WRITEABLE KENTRY at the head of list, 1151 1.185 matt * clear the KMOD attribute from the page. 1152 1.185 matt */ 1153 1.215 uebayasi if (SLIST_FIRST(&md->pvh_list) == NULL 1154 1.215 uebayasi || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) 1155 1.215 uebayasi md->pvh_attrs &= ~PVF_KMOD; 1156 1.185 matt 1157 1.185 matt /* 1158 1.182 matt * If this was a writeable page and there are no more writeable 1159 1.183 matt * mappings (ignoring KMPAGE), clear the WRITE flag and writeback 1160 1.183 matt * the contents to memory. 1161 1.182 matt */ 1162 1.251 matt if (arm_cache_prefer_mask != 0) { 1163 1.251 matt if (md->krw_mappings + md->urw_mappings == 0) 1164 1.251 matt md->pvh_attrs &= ~PVF_WRITE; 1165 1.251 matt PMAP_VALIDATE_MD_PAGE(md); 1166 1.251 matt } 1167 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1168 1.271 matt #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ 1169 1.182 matt 1170 1.346 skrll /* return removed pv */ 1171 1.346 skrll return pv; 1172 1.17 chris } 1173 1.17 chris 1174 1.17 chris /* 1175 1.17 chris * 1176 1.17 chris * pmap_modify_pv: Update pv flags 1177 1.17 chris * 1178 1.49 thorpej * => caller should hold lock on vm_page [so that attrs can be adjusted] 1179 1.17 chris * => caller should NOT adjust pmap's wire_count 1180 1.29 rearnsha * => caller must call pmap_vac_me_harder() if writable status of a page 1181 1.29 rearnsha * may have changed. 1182 1.17 chris * => we return the old flags 1183 1.286 skrll * 1184 1.1 matt * Modify a physical-virtual mapping in the pv table 1185 1.1 matt */ 1186 1.134 thorpej static u_int 1187 1.215 uebayasi pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, 1188 1.134 thorpej u_int clr_mask, u_int set_mask) 1189 1.1 matt { 1190 1.1 matt struct pv_entry *npv; 1191 1.1 matt u_int flags, oflags; 1192 1.408 skrll UVMHIST_FUNC(__func__); 1193 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", 1194 1.408 skrll (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); 1195 1.408 skrll UVMHIST_LOG(maphist, "... clr %#jx set %#jx", clr_mask, set_mask, 0, 0); 1196 1.1 matt 1197 1.268 matt KASSERT(!PV_IS_KENTRY_P(clr_mask)); 1198 1.268 matt KASSERT(!PV_IS_KENTRY_P(set_mask)); 1199 1.185 matt 1200 1.408 skrll if ((npv = pmap_find_pv(md, pm, va)) == NULL) { 1201 1.408 skrll UVMHIST_LOG(maphist, "<--- done (not found)", 0, 0, 0, 0); 1202 1.387 skrll return 0; 1203 1.408 skrll } 1204 1.134 thorpej 1205 1.1 matt /* 1206 1.1 matt * There is at least one VA mapping this page. 1207 1.1 matt */ 1208 1.1 matt 1209 1.183 matt if (clr_mask & (PVF_REF | PVF_MOD)) { 1210 1.215 uebayasi md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1211 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 1212 1.215 uebayasi if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 1213 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 1214 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1215 1.271 matt #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ 1216 1.183 matt } 1217 1.134 thorpej 1218 1.134 thorpej oflags = npv->pv_flags; 1219 1.134 thorpej npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1220 1.134 thorpej 1221 1.134 thorpej if ((flags ^ oflags) & PVF_WIRED) { 1222 1.134 thorpej if (flags & PVF_WIRED) 1223 1.134 thorpej ++pm->pm_stats.wired_count; 1224 1.134 thorpej else 1225 1.134 thorpej --pm->pm_stats.wired_count; 1226 1.134 thorpej } 1227 1.134 thorpej 1228 1.134 thorpej if ((flags ^ oflags) & PVF_WRITE) { 1229 1.134 thorpej if (pm == pmap_kernel()) { 1230 1.134 thorpej if (flags & PVF_WRITE) { 1231 1.215 uebayasi md->krw_mappings++; 1232 1.215 uebayasi md->kro_mappings--; 1233 1.134 thorpej } else { 1234 1.215 uebayasi md->kro_mappings++; 1235 1.215 uebayasi md->krw_mappings--; 1236 1.1 matt } 1237 1.134 thorpej } else { 1238 1.206 uebayasi if (flags & PVF_WRITE) { 1239 1.215 uebayasi md->urw_mappings++; 1240 1.215 uebayasi md->uro_mappings--; 1241 1.206 uebayasi } else { 1242 1.215 uebayasi md->uro_mappings++; 1243 1.215 uebayasi md->urw_mappings--; 1244 1.206 uebayasi } 1245 1.1 matt } 1246 1.1 matt } 1247 1.174 matt #ifdef PMAP_CACHE_VIPT 1248 1.251 matt if (arm_cache_prefer_mask != 0) { 1249 1.251 matt if (md->urw_mappings + md->krw_mappings == 0) { 1250 1.251 matt md->pvh_attrs &= ~PVF_WRITE; 1251 1.251 matt } else { 1252 1.251 matt md->pvh_attrs |= PVF_WRITE; 1253 1.251 matt } 1254 1.247 matt } 1255 1.174 matt /* 1256 1.174 matt * We have two cases here: the first is from enter_pv (new exec 1257 1.174 matt * page), the second is a combined pmap_remove_pv/pmap_enter_pv. 1258 1.174 matt * Since in latter, pmap_enter_pv won't do anything, we just have 1259 1.174 matt * to do what pmap_remove_pv would do. 1260 1.174 matt */ 1261 1.215 uebayasi if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs)) 1262 1.215 uebayasi || (PV_IS_EXEC_P(md->pvh_attrs) 1263 1.174 matt || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { 1264 1.215 uebayasi pmap_syncicache_page(md, pa); 1265 1.174 matt PMAPCOUNT(exec_synced_remap); 1266 1.174 matt } 1267 1.345 skrll #ifndef ARM_MMU_EXTENDED 1268 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1269 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 1270 1.271 matt #endif /* PMAP_CACHE_VIPT */ 1271 1.174 matt 1272 1.174 matt PMAPCOUNT(remappings); 1273 1.134 thorpej 1274 1.408 skrll UVMHIST_LOG(maphist, "<--- done", 0, 0, 0, 0); 1275 1.408 skrll 1276 1.387 skrll return oflags; 1277 1.1 matt } 1278 1.1 matt 1279 1.380 skrll 1280 1.380 skrll #if defined(ARM_MMU_EXTENDED) 1281 1.380 skrll int 1282 1.380 skrll pmap_maxproc_set(int nmaxproc) 1283 1.380 skrll { 1284 1.380 skrll static const char pmap_l1ttpool_warnmsg[] = 1285 1.380 skrll "WARNING: l1ttpool limit reached; increase kern.maxproc"; 1286 1.380 skrll 1287 1.403 chs pool_cache_prime(&pmap_l1tt_cache, nmaxproc); 1288 1.380 skrll 1289 1.380 skrll /* 1290 1.380 skrll * Set the hard limit on the pmap_l1tt_cache to the number 1291 1.380 skrll * of processes the kernel is to support. Log the limit 1292 1.380 skrll * reached message max once a minute. 1293 1.380 skrll */ 1294 1.380 skrll pool_cache_sethardlimit(&pmap_l1tt_cache, nmaxproc, 1295 1.380 skrll pmap_l1ttpool_warnmsg, 60); 1296 1.380 skrll 1297 1.380 skrll return 0; 1298 1.380 skrll } 1299 1.380 skrll 1300 1.380 skrll #endif 1301 1.380 skrll 1302 1.134 thorpej /* 1303 1.134 thorpej * Allocate an L1 translation table for the specified pmap. 1304 1.134 thorpej * This is called at pmap creation time. 1305 1.134 thorpej */ 1306 1.134 thorpej static void 1307 1.134 thorpej pmap_alloc_l1(pmap_t pm) 1308 1.1 matt { 1309 1.271 matt #ifdef ARM_MMU_EXTENDED 1310 1.380 skrll vaddr_t va = (vaddr_t)pool_cache_get_paddr(&pmap_l1tt_cache, PR_WAITOK, 1311 1.380 skrll &pm->pm_l1_pa); 1312 1.271 matt 1313 1.271 matt pm->pm_l1 = (pd_entry_t *)va; 1314 1.380 skrll PTE_SYNC_RANGE(pm->pm_l1, L1TT_SIZE / sizeof(pt_entry_t)); 1315 1.271 matt #else 1316 1.134 thorpej struct l1_ttable *l1; 1317 1.242 skrll uint8_t domain; 1318 1.134 thorpej 1319 1.134 thorpej /* 1320 1.134 thorpej * Remove the L1 at the head of the LRU list 1321 1.134 thorpej */ 1322 1.226 matt mutex_spin_enter(&l1_lru_lock); 1323 1.134 thorpej l1 = TAILQ_FIRST(&l1_lru_list); 1324 1.134 thorpej KDASSERT(l1 != NULL); 1325 1.134 thorpej TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1326 1.1 matt 1327 1.134 thorpej /* 1328 1.134 thorpej * Pick the first available domain number, and update 1329 1.134 thorpej * the link to the next number. 1330 1.134 thorpej */ 1331 1.134 thorpej domain = l1->l1_domain_first; 1332 1.134 thorpej l1->l1_domain_first = l1->l1_domain_free[domain]; 1333 1.115 thorpej 1334 1.134 thorpej /* 1335 1.134 thorpej * If there are still free domain numbers in this L1, 1336 1.134 thorpej * put it back on the TAIL of the LRU list. 1337 1.134 thorpej */ 1338 1.134 thorpej if (++l1->l1_domain_use_count < PMAP_DOMAINS) 1339 1.134 thorpej TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1340 1.1 matt 1341 1.226 matt mutex_spin_exit(&l1_lru_lock); 1342 1.1 matt 1343 1.134 thorpej /* 1344 1.134 thorpej * Fix up the relevant bits in the pmap structure 1345 1.134 thorpej */ 1346 1.134 thorpej pm->pm_l1 = l1; 1347 1.230 matt pm->pm_domain = domain + 1; 1348 1.271 matt #endif 1349 1.1 matt } 1350 1.1 matt 1351 1.1 matt /* 1352 1.134 thorpej * Free an L1 translation table. 1353 1.134 thorpej * This is called at pmap destruction time. 1354 1.1 matt */ 1355 1.134 thorpej static void 1356 1.134 thorpej pmap_free_l1(pmap_t pm) 1357 1.1 matt { 1358 1.271 matt #ifdef ARM_MMU_EXTENDED 1359 1.380 skrll pool_cache_put_paddr(&pmap_l1tt_cache, (void *)pm->pm_l1, pm->pm_l1_pa); 1360 1.380 skrll 1361 1.271 matt pm->pm_l1 = NULL; 1362 1.271 matt pm->pm_l1_pa = 0; 1363 1.271 matt #else 1364 1.134 thorpej struct l1_ttable *l1 = pm->pm_l1; 1365 1.1 matt 1366 1.226 matt mutex_spin_enter(&l1_lru_lock); 1367 1.1 matt 1368 1.134 thorpej /* 1369 1.134 thorpej * If this L1 is currently on the LRU list, remove it. 1370 1.134 thorpej */ 1371 1.134 thorpej if (l1->l1_domain_use_count < PMAP_DOMAINS) 1372 1.134 thorpej TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1373 1.1 matt 1374 1.1 matt /* 1375 1.134 thorpej * Free up the domain number which was allocated to the pmap 1376 1.1 matt */ 1377 1.258 matt l1->l1_domain_free[pmap_domain(pm) - 1] = l1->l1_domain_first; 1378 1.258 matt l1->l1_domain_first = pmap_domain(pm) - 1; 1379 1.134 thorpej l1->l1_domain_use_count--; 1380 1.1 matt 1381 1.134 thorpej /* 1382 1.134 thorpej * The L1 now must have at least 1 free domain, so add 1383 1.134 thorpej * it back to the LRU list. If the use count is zero, 1384 1.134 thorpej * put it at the head of the list, otherwise it goes 1385 1.134 thorpej * to the tail. 1386 1.134 thorpej */ 1387 1.134 thorpej if (l1->l1_domain_use_count == 0) 1388 1.134 thorpej TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 1389 1.134 thorpej else 1390 1.134 thorpej TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1391 1.54 thorpej 1392 1.226 matt mutex_spin_exit(&l1_lru_lock); 1393 1.271 matt #endif /* ARM_MMU_EXTENDED */ 1394 1.134 thorpej } 1395 1.54 thorpej 1396 1.271 matt #ifndef ARM_MMU_EXTENDED 1397 1.157 perry static inline void 1398 1.134 thorpej pmap_use_l1(pmap_t pm) 1399 1.134 thorpej { 1400 1.134 thorpej struct l1_ttable *l1; 1401 1.54 thorpej 1402 1.134 thorpej /* 1403 1.134 thorpej * Do nothing if we're in interrupt context. 1404 1.134 thorpej * Access to an L1 by the kernel pmap must not affect 1405 1.134 thorpej * the LRU list. 1406 1.134 thorpej */ 1407 1.171 matt if (cpu_intr_p() || pm == pmap_kernel()) 1408 1.134 thorpej return; 1409 1.54 thorpej 1410 1.134 thorpej l1 = pm->pm_l1; 1411 1.1 matt 1412 1.17 chris /* 1413 1.134 thorpej * If the L1 is not currently on the LRU list, just return 1414 1.17 chris */ 1415 1.134 thorpej if (l1->l1_domain_use_count == PMAP_DOMAINS) 1416 1.134 thorpej return; 1417 1.134 thorpej 1418 1.226 matt mutex_spin_enter(&l1_lru_lock); 1419 1.1 matt 1420 1.10 chris /* 1421 1.134 thorpej * Check the use count again, now that we've acquired the lock 1422 1.10 chris */ 1423 1.134 thorpej if (l1->l1_domain_use_count == PMAP_DOMAINS) { 1424 1.226 matt mutex_spin_exit(&l1_lru_lock); 1425 1.134 thorpej return; 1426 1.134 thorpej } 1427 1.111 thorpej 1428 1.111 thorpej /* 1429 1.134 thorpej * Move the L1 to the back of the LRU list 1430 1.111 thorpej */ 1431 1.134 thorpej TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1432 1.134 thorpej TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1433 1.111 thorpej 1434 1.226 matt mutex_spin_exit(&l1_lru_lock); 1435 1.1 matt } 1436 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 1437 1.1 matt 1438 1.1 matt /* 1439 1.134 thorpej * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) 1440 1.1 matt * 1441 1.134 thorpej * Free an L2 descriptor table. 1442 1.1 matt */ 1443 1.157 perry static inline void 1444 1.271 matt #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) 1445 1.271 matt pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) 1446 1.271 matt #else 1447 1.134 thorpej pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) 1448 1.134 thorpej #endif 1449 1.1 matt { 1450 1.271 matt #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) 1451 1.1 matt /* 1452 1.134 thorpej * Note: With a write-back cache, we may need to sync this 1453 1.134 thorpej * L2 table before re-using it. 1454 1.134 thorpej * This is because it may have belonged to a non-current 1455 1.134 thorpej * pmap, in which case the cache syncs would have been 1456 1.174 matt * skipped for the pages that were being unmapped. If the 1457 1.134 thorpej * L2 table were then to be immediately re-allocated to 1458 1.134 thorpej * the *current* pmap, it may well contain stale mappings 1459 1.134 thorpej * which have not yet been cleared by a cache write-back 1460 1.134 thorpej * and so would still be visible to the mmu. 1461 1.1 matt */ 1462 1.134 thorpej if (need_sync) 1463 1.134 thorpej PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1464 1.271 matt #endif /* PMAP_INCLUDE_PTE_SYNC && PMAP_CACHE_VIVT */ 1465 1.134 thorpej pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); 1466 1.1 matt } 1467 1.1 matt 1468 1.1 matt /* 1469 1.134 thorpej * Returns a pointer to the L2 bucket associated with the specified pmap 1470 1.134 thorpej * and VA, or NULL if no L2 bucket exists for the address. 1471 1.1 matt */ 1472 1.157 perry static inline struct l2_bucket * 1473 1.134 thorpej pmap_get_l2_bucket(pmap_t pm, vaddr_t va) 1474 1.134 thorpej { 1475 1.271 matt const size_t l1slot = l1pte_index(va); 1476 1.134 thorpej struct l2_dtable *l2; 1477 1.134 thorpej struct l2_bucket *l2b; 1478 1.1 matt 1479 1.271 matt if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL || 1480 1.271 matt (l2b = &l2->l2_bucket[L2_BUCKET(l1slot)])->l2b_kva == NULL) 1481 1.387 skrll return NULL; 1482 1.1 matt 1483 1.387 skrll return l2b; 1484 1.1 matt } 1485 1.1 matt 1486 1.1 matt /* 1487 1.134 thorpej * Returns a pointer to the L2 bucket associated with the specified pmap 1488 1.134 thorpej * and VA. 1489 1.1 matt * 1490 1.134 thorpej * If no L2 bucket exists, perform the necessary allocations to put an L2 1491 1.134 thorpej * bucket/page table in place. 1492 1.1 matt * 1493 1.134 thorpej * Note that if a new L2 bucket/page was allocated, the caller *must* 1494 1.286 skrll * increment the bucket occupancy counter appropriately *before* 1495 1.134 thorpej * releasing the pmap's lock to ensure no other thread or cpu deallocates 1496 1.134 thorpej * the bucket/page in the meantime. 1497 1.1 matt */ 1498 1.134 thorpej static struct l2_bucket * 1499 1.134 thorpej pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) 1500 1.134 thorpej { 1501 1.271 matt const size_t l1slot = l1pte_index(va); 1502 1.134 thorpej struct l2_dtable *l2; 1503 1.134 thorpej 1504 1.271 matt if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { 1505 1.134 thorpej /* 1506 1.134 thorpej * No mapping at this address, as there is 1507 1.134 thorpej * no entry in the L1 table. 1508 1.134 thorpej * Need to allocate a new l2_dtable. 1509 1.134 thorpej */ 1510 1.134 thorpej if ((l2 = pmap_alloc_l2_dtable()) == NULL) 1511 1.387 skrll return NULL; 1512 1.134 thorpej 1513 1.134 thorpej /* 1514 1.134 thorpej * Link it into the parent pmap 1515 1.134 thorpej */ 1516 1.271 matt pm->pm_l2[L2_IDX(l1slot)] = l2; 1517 1.134 thorpej } 1518 1.1 matt 1519 1.271 matt struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; 1520 1.1 matt 1521 1.10 chris /* 1522 1.134 thorpej * Fetch pointer to the L2 page table associated with the address. 1523 1.10 chris */ 1524 1.134 thorpej if (l2b->l2b_kva == NULL) { 1525 1.134 thorpej pt_entry_t *ptep; 1526 1.134 thorpej 1527 1.134 thorpej /* 1528 1.134 thorpej * No L2 page table has been allocated. Chances are, this 1529 1.134 thorpej * is because we just allocated the l2_dtable, above. 1530 1.134 thorpej */ 1531 1.271 matt if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_pa)) == NULL) { 1532 1.134 thorpej /* 1533 1.134 thorpej * Oops, no more L2 page tables available at this 1534 1.134 thorpej * time. We may need to deallocate the l2_dtable 1535 1.134 thorpej * if we allocated a new one above. 1536 1.134 thorpej */ 1537 1.134 thorpej if (l2->l2_occupancy == 0) { 1538 1.271 matt pm->pm_l2[L2_IDX(l1slot)] = NULL; 1539 1.134 thorpej pmap_free_l2_dtable(l2); 1540 1.134 thorpej } 1541 1.387 skrll return NULL; 1542 1.134 thorpej } 1543 1.1 matt 1544 1.134 thorpej l2->l2_occupancy++; 1545 1.134 thorpej l2b->l2b_kva = ptep; 1546 1.271 matt l2b->l2b_l1slot = l1slot; 1547 1.271 matt 1548 1.271 matt #ifdef ARM_MMU_EXTENDED 1549 1.271 matt /* 1550 1.271 matt * We know there will be a mapping here, so simply 1551 1.271 matt * enter this PTP into the L1 now. 1552 1.271 matt */ 1553 1.271 matt pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; 1554 1.271 matt pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa 1555 1.271 matt | L1_C_DOM(pmap_domain(pm)); 1556 1.271 matt KASSERT(*pdep == 0); 1557 1.271 matt l1pte_setone(pdep, npde); 1558 1.322 skrll PDE_SYNC(pdep); 1559 1.271 matt #endif 1560 1.134 thorpej } 1561 1.16 chris 1562 1.387 skrll return l2b; 1563 1.1 matt } 1564 1.1 matt 1565 1.1 matt /* 1566 1.134 thorpej * One or more mappings in the specified L2 descriptor table have just been 1567 1.134 thorpej * invalidated. 1568 1.1 matt * 1569 1.134 thorpej * Garbage collect the metadata and descriptor table itself if necessary. 1570 1.1 matt * 1571 1.134 thorpej * The pmap lock must be acquired when this is called (not necessary 1572 1.134 thorpej * for the kernel pmap). 1573 1.1 matt */ 1574 1.134 thorpej static void 1575 1.134 thorpej pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 1576 1.1 matt { 1577 1.134 thorpej KDASSERT(count <= l2b->l2b_occupancy); 1578 1.1 matt 1579 1.134 thorpej /* 1580 1.134 thorpej * Update the bucket's reference count according to how many 1581 1.134 thorpej * PTEs the caller has just invalidated. 1582 1.134 thorpej */ 1583 1.134 thorpej l2b->l2b_occupancy -= count; 1584 1.1 matt 1585 1.1 matt /* 1586 1.134 thorpej * Note: 1587 1.134 thorpej * 1588 1.134 thorpej * Level 2 page tables allocated to the kernel pmap are never freed 1589 1.134 thorpej * as that would require checking all Level 1 page tables and 1590 1.134 thorpej * removing any references to the Level 2 page table. See also the 1591 1.134 thorpej * comment elsewhere about never freeing bootstrap L2 descriptors. 1592 1.134 thorpej * 1593 1.134 thorpej * We make do with just invalidating the mapping in the L2 table. 1594 1.134 thorpej * 1595 1.134 thorpej * This isn't really a big deal in practice and, in fact, leads 1596 1.134 thorpej * to a performance win over time as we don't need to continually 1597 1.134 thorpej * alloc/free. 1598 1.1 matt */ 1599 1.134 thorpej if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) 1600 1.134 thorpej return; 1601 1.1 matt 1602 1.134 thorpej /* 1603 1.134 thorpej * There are no more valid mappings in this level 2 page table. 1604 1.134 thorpej * Go ahead and NULL-out the pointer in the bucket, then 1605 1.134 thorpej * free the page table. 1606 1.134 thorpej */ 1607 1.271 matt const size_t l1slot = l2b->l2b_l1slot; 1608 1.271 matt pt_entry_t * const ptep = l2b->l2b_kva; 1609 1.134 thorpej l2b->l2b_kva = NULL; 1610 1.1 matt 1611 1.271 matt pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; 1612 1.273 matt pd_entry_t pde __diagused = *pdep; 1613 1.1 matt 1614 1.271 matt #ifdef ARM_MMU_EXTENDED 1615 1.271 matt /* 1616 1.271 matt * Invalidate the L1 slot. 1617 1.271 matt */ 1618 1.271 matt KASSERT((pde & L1_TYPE_MASK) == L1_TYPE_C); 1619 1.271 matt #else 1620 1.134 thorpej /* 1621 1.271 matt * If the L1 slot matches the pmap's domain number, then invalidate it. 1622 1.134 thorpej */ 1623 1.271 matt if ((pde & (L1_C_DOM_MASK|L1_TYPE_MASK)) 1624 1.271 matt == (L1_C_DOM(pmap_domain(pm))|L1_TYPE_C)) { 1625 1.271 matt #endif 1626 1.271 matt l1pte_setone(pdep, 0); 1627 1.271 matt PDE_SYNC(pdep); 1628 1.271 matt #ifndef ARM_MMU_EXTENDED 1629 1.1 matt } 1630 1.271 matt #endif 1631 1.1 matt 1632 1.134 thorpej /* 1633 1.134 thorpej * Release the L2 descriptor table back to the pool cache. 1634 1.134 thorpej */ 1635 1.271 matt #if defined(PMAP_INCLUDE_PTE_SYNC) && defined(PMAP_CACHE_VIVT) 1636 1.271 matt pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_pa); 1637 1.134 thorpej #else 1638 1.271 matt pmap_free_l2_ptp(ptep, l2b->l2b_pa); 1639 1.134 thorpej #endif 1640 1.134 thorpej 1641 1.134 thorpej /* 1642 1.134 thorpej * Update the reference count in the associated l2_dtable 1643 1.134 thorpej */ 1644 1.271 matt struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; 1645 1.134 thorpej if (--l2->l2_occupancy > 0) 1646 1.134 thorpej return; 1647 1.1 matt 1648 1.134 thorpej /* 1649 1.134 thorpej * There are no more valid mappings in any of the Level 1 1650 1.134 thorpej * slots managed by this l2_dtable. Go ahead and NULL-out 1651 1.134 thorpej * the pointer in the parent pmap and free the l2_dtable. 1652 1.134 thorpej */ 1653 1.271 matt pm->pm_l2[L2_IDX(l1slot)] = NULL; 1654 1.134 thorpej pmap_free_l2_dtable(l2); 1655 1.1 matt } 1656 1.1 matt 1657 1.380 skrll #if defined(ARM_MMU_EXTENDED) 1658 1.380 skrll /* 1659 1.380 skrll * Pool cache constructors for L1 translation tables 1660 1.380 skrll */ 1661 1.380 skrll 1662 1.380 skrll static int 1663 1.380 skrll pmap_l1tt_ctor(void *arg, void *v, int flags) 1664 1.380 skrll { 1665 1.380 skrll #ifndef PMAP_INCLUDE_PTE_SYNC 1666 1.380 skrll #error not supported 1667 1.380 skrll #endif 1668 1.380 skrll 1669 1.380 skrll memset(v, 0, L1TT_SIZE); 1670 1.380 skrll PTE_SYNC_RANGE(v, L1TT_SIZE / sizeof(pt_entry_t)); 1671 1.380 skrll return 0; 1672 1.380 skrll } 1673 1.380 skrll #endif 1674 1.380 skrll 1675 1.1 matt /* 1676 1.134 thorpej * Pool cache constructors for L2 descriptor tables, metadata and pmap 1677 1.134 thorpej * structures. 1678 1.1 matt */ 1679 1.134 thorpej static int 1680 1.134 thorpej pmap_l2ptp_ctor(void *arg, void *v, int flags) 1681 1.1 matt { 1682 1.134 thorpej #ifndef PMAP_INCLUDE_PTE_SYNC 1683 1.134 thorpej vaddr_t va = (vaddr_t)v & ~PGOFSET; 1684 1.134 thorpej 1685 1.134 thorpej /* 1686 1.134 thorpej * The mappings for these page tables were initially made using 1687 1.134 thorpej * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- 1688 1.134 thorpej * mode will not be right for page table mappings. To avoid 1689 1.134 thorpej * polluting the pmap_kenter_pa() code with a special case for 1690 1.134 thorpej * page tables, we simply fix up the cache-mode here if it's not 1691 1.134 thorpej * correct. 1692 1.134 thorpej */ 1693 1.271 matt if (pte_l2_s_cache_mode != pte_l2_s_cache_mode_pt) { 1694 1.271 matt const struct l2_bucket * const l2b = 1695 1.271 matt pmap_get_l2_bucket(pmap_kernel(), va); 1696 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", va); 1697 1.271 matt pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 1698 1.271 matt const pt_entry_t opte = *ptep; 1699 1.1 matt 1700 1.271 matt if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1701 1.271 matt /* 1702 1.271 matt * Page tables must have the cache-mode set correctly. 1703 1.271 matt */ 1704 1.343 skrll const pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) 1705 1.271 matt | pte_l2_s_cache_mode_pt; 1706 1.271 matt l2pte_set(ptep, npte, opte); 1707 1.271 matt PTE_SYNC(ptep); 1708 1.271 matt cpu_tlb_flushD_SE(va); 1709 1.271 matt cpu_cpwait(); 1710 1.271 matt } 1711 1.134 thorpej } 1712 1.134 thorpej #endif 1713 1.1 matt 1714 1.134 thorpej memset(v, 0, L2_TABLE_SIZE_REAL); 1715 1.134 thorpej PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1716 1.387 skrll return 0; 1717 1.1 matt } 1718 1.1 matt 1719 1.134 thorpej static int 1720 1.134 thorpej pmap_l2dtable_ctor(void *arg, void *v, int flags) 1721 1.93 thorpej { 1722 1.93 thorpej 1723 1.134 thorpej memset(v, 0, sizeof(struct l2_dtable)); 1724 1.387 skrll return 0; 1725 1.134 thorpej } 1726 1.93 thorpej 1727 1.134 thorpej static int 1728 1.134 thorpej pmap_pmap_ctor(void *arg, void *v, int flags) 1729 1.134 thorpej { 1730 1.93 thorpej 1731 1.134 thorpej memset(v, 0, sizeof(struct pmap)); 1732 1.387 skrll return 0; 1733 1.93 thorpej } 1734 1.93 thorpej 1735 1.165 scw static void 1736 1.165 scw pmap_pinit(pmap_t pm) 1737 1.165 scw { 1738 1.257 matt #ifndef ARM_HAS_VBAR 1739 1.165 scw struct l2_bucket *l2b; 1740 1.165 scw 1741 1.165 scw if (vector_page < KERNEL_BASE) { 1742 1.165 scw /* 1743 1.165 scw * Map the vector page. 1744 1.165 scw */ 1745 1.165 scw pmap_enter(pm, vector_page, systempage.pv_pa, 1746 1.262 matt VM_PROT_READ | VM_PROT_EXECUTE, 1747 1.262 matt VM_PROT_READ | VM_PROT_EXECUTE | PMAP_WIRED); 1748 1.165 scw pmap_update(pm); 1749 1.165 scw 1750 1.271 matt pm->pm_pl1vec = pmap_l1_kva(pm) + l1pte_index(vector_page); 1751 1.165 scw l2b = pmap_get_l2_bucket(pm, vector_page); 1752 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", vector_page); 1753 1.271 matt pm->pm_l1vec = l2b->l2b_pa | L1_C_PROTO | 1754 1.258 matt L1_C_DOM(pmap_domain(pm)); 1755 1.165 scw } else 1756 1.165 scw pm->pm_pl1vec = NULL; 1757 1.257 matt #endif 1758 1.165 scw } 1759 1.165 scw 1760 1.174 matt #ifdef PMAP_CACHE_VIVT 1761 1.93 thorpej /* 1762 1.134 thorpej * Since we have a virtually indexed cache, we may need to inhibit caching if 1763 1.134 thorpej * there is more than one mapping and at least one of them is writable. 1764 1.134 thorpej * Since we purge the cache on every context switch, we only need to check for 1765 1.134 thorpej * other mappings within the same pmap, or kernel_pmap. 1766 1.134 thorpej * This function is also called when a page is unmapped, to possibly reenable 1767 1.134 thorpej * caching on any remaining mappings. 1768 1.134 thorpej * 1769 1.134 thorpej * The code implements the following logic, where: 1770 1.134 thorpej * 1771 1.134 thorpej * KW = # of kernel read/write pages 1772 1.134 thorpej * KR = # of kernel read only pages 1773 1.134 thorpej * UW = # of user read/write pages 1774 1.134 thorpej * UR = # of user read only pages 1775 1.286 skrll * 1776 1.134 thorpej * KC = kernel mapping is cacheable 1777 1.134 thorpej * UC = user mapping is cacheable 1778 1.93 thorpej * 1779 1.134 thorpej * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 1780 1.134 thorpej * +--------------------------------------------- 1781 1.134 thorpej * UW=0,UR=0 | --- KC=1 KC=1 KC=0 1782 1.134 thorpej * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 1783 1.134 thorpej * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1784 1.134 thorpej * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1785 1.93 thorpej */ 1786 1.111 thorpej 1787 1.134 thorpej static const int pmap_vac_flags[4][4] = { 1788 1.134 thorpej {-1, 0, 0, PVF_KNC}, 1789 1.134 thorpej {0, 0, PVF_NC, PVF_NC}, 1790 1.134 thorpej {0, PVF_NC, PVF_NC, PVF_NC}, 1791 1.134 thorpej {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} 1792 1.134 thorpej }; 1793 1.93 thorpej 1794 1.157 perry static inline int 1795 1.215 uebayasi pmap_get_vac_flags(const struct vm_page_md *md) 1796 1.134 thorpej { 1797 1.134 thorpej int kidx, uidx; 1798 1.93 thorpej 1799 1.134 thorpej kidx = 0; 1800 1.215 uebayasi if (md->kro_mappings || md->krw_mappings > 1) 1801 1.134 thorpej kidx |= 1; 1802 1.215 uebayasi if (md->krw_mappings) 1803 1.134 thorpej kidx |= 2; 1804 1.134 thorpej 1805 1.134 thorpej uidx = 0; 1806 1.215 uebayasi if (md->uro_mappings || md->urw_mappings > 1) 1807 1.134 thorpej uidx |= 1; 1808 1.215 uebayasi if (md->urw_mappings) 1809 1.134 thorpej uidx |= 2; 1810 1.111 thorpej 1811 1.387 skrll return pmap_vac_flags[uidx][kidx]; 1812 1.111 thorpej } 1813 1.111 thorpej 1814 1.157 perry static inline void 1815 1.215 uebayasi pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1816 1.111 thorpej { 1817 1.134 thorpej int nattr; 1818 1.134 thorpej 1819 1.215 uebayasi nattr = pmap_get_vac_flags(md); 1820 1.111 thorpej 1821 1.134 thorpej if (nattr < 0) { 1822 1.215 uebayasi md->pvh_attrs &= ~PVF_NC; 1823 1.134 thorpej return; 1824 1.134 thorpej } 1825 1.93 thorpej 1826 1.215 uebayasi if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) 1827 1.134 thorpej return; 1828 1.111 thorpej 1829 1.134 thorpej if (pm == pmap_kernel()) 1830 1.215 uebayasi pmap_vac_me_kpmap(md, pa, pm, va); 1831 1.134 thorpej else 1832 1.215 uebayasi pmap_vac_me_user(md, pa, pm, va); 1833 1.134 thorpej 1834 1.215 uebayasi md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; 1835 1.93 thorpej } 1836 1.93 thorpej 1837 1.134 thorpej static void 1838 1.215 uebayasi pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1839 1.1 matt { 1840 1.134 thorpej u_int u_cacheable, u_entries; 1841 1.134 thorpej struct pv_entry *pv; 1842 1.134 thorpej pmap_t last_pmap = pm; 1843 1.134 thorpej 1844 1.286 skrll /* 1845 1.134 thorpej * Pass one, see if there are both kernel and user pmaps for 1846 1.134 thorpej * this page. Calculate whether there are user-writable or 1847 1.134 thorpej * kernel-writable pages. 1848 1.134 thorpej */ 1849 1.134 thorpej u_cacheable = 0; 1850 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1851 1.134 thorpej if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) 1852 1.134 thorpej u_cacheable++; 1853 1.1 matt } 1854 1.1 matt 1855 1.215 uebayasi u_entries = md->urw_mappings + md->uro_mappings; 1856 1.1 matt 1857 1.286 skrll /* 1858 1.134 thorpej * We know we have just been updating a kernel entry, so if 1859 1.134 thorpej * all user pages are already cacheable, then there is nothing 1860 1.134 thorpej * further to do. 1861 1.134 thorpej */ 1862 1.215 uebayasi if (md->k_mappings == 0 && u_cacheable == u_entries) 1863 1.134 thorpej return; 1864 1.1 matt 1865 1.134 thorpej if (u_entries) { 1866 1.286 skrll /* 1867 1.134 thorpej * Scan over the list again, for each entry, if it 1868 1.134 thorpej * might not be set correctly, call pmap_vac_me_user 1869 1.134 thorpej * to recalculate the settings. 1870 1.134 thorpej */ 1871 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1872 1.286 skrll /* 1873 1.134 thorpej * We know kernel mappings will get set 1874 1.134 thorpej * correctly in other calls. We also know 1875 1.134 thorpej * that if the pmap is the same as last_pmap 1876 1.134 thorpej * then we've just handled this entry. 1877 1.134 thorpej */ 1878 1.134 thorpej if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) 1879 1.134 thorpej continue; 1880 1.1 matt 1881 1.286 skrll /* 1882 1.134 thorpej * If there are kernel entries and this page 1883 1.134 thorpej * is writable but non-cacheable, then we can 1884 1.286 skrll * skip this entry also. 1885 1.134 thorpej */ 1886 1.215 uebayasi if (md->k_mappings && 1887 1.134 thorpej (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 1888 1.134 thorpej (PVF_NC | PVF_WRITE)) 1889 1.134 thorpej continue; 1890 1.111 thorpej 1891 1.286 skrll /* 1892 1.286 skrll * Similarly if there are no kernel-writable 1893 1.286 skrll * entries and the page is already 1894 1.134 thorpej * read-only/cacheable. 1895 1.134 thorpej */ 1896 1.215 uebayasi if (md->krw_mappings == 0 && 1897 1.134 thorpej (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) 1898 1.134 thorpej continue; 1899 1.5 toshii 1900 1.286 skrll /* 1901 1.134 thorpej * For some of the remaining cases, we know 1902 1.134 thorpej * that we must recalculate, but for others we 1903 1.134 thorpej * can't tell if they are correct or not, so 1904 1.134 thorpej * we recalculate anyway. 1905 1.134 thorpej */ 1906 1.215 uebayasi pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); 1907 1.134 thorpej } 1908 1.48 chris 1909 1.215 uebayasi if (md->k_mappings == 0) 1910 1.134 thorpej return; 1911 1.111 thorpej } 1912 1.111 thorpej 1913 1.215 uebayasi pmap_vac_me_user(md, pa, pm, va); 1914 1.134 thorpej } 1915 1.111 thorpej 1916 1.134 thorpej static void 1917 1.215 uebayasi pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1918 1.134 thorpej { 1919 1.134 thorpej pmap_t kpmap = pmap_kernel(); 1920 1.184 dogcow struct pv_entry *pv, *npv = NULL; 1921 1.134 thorpej u_int entries = 0; 1922 1.134 thorpej u_int writable = 0; 1923 1.134 thorpej u_int cacheable_entries = 0; 1924 1.134 thorpej u_int kern_cacheable = 0; 1925 1.134 thorpej u_int other_writable = 0; 1926 1.48 chris 1927 1.134 thorpej /* 1928 1.134 thorpej * Count mappings and writable mappings in this pmap. 1929 1.134 thorpej * Include kernel mappings as part of our own. 1930 1.134 thorpej * Keep a pointer to the first one. 1931 1.134 thorpej */ 1932 1.188 matt npv = NULL; 1933 1.271 matt KASSERT(pmap_page_locked_p(md)); 1934 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1935 1.134 thorpej /* Count mappings in the same pmap */ 1936 1.134 thorpej if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { 1937 1.134 thorpej if (entries++ == 0) 1938 1.134 thorpej npv = pv; 1939 1.1 matt 1940 1.134 thorpej /* Cacheable mappings */ 1941 1.134 thorpej if ((pv->pv_flags & PVF_NC) == 0) { 1942 1.134 thorpej cacheable_entries++; 1943 1.134 thorpej if (kpmap == pv->pv_pmap) 1944 1.134 thorpej kern_cacheable++; 1945 1.134 thorpej } 1946 1.110 thorpej 1947 1.134 thorpej /* Writable mappings */ 1948 1.134 thorpej if (pv->pv_flags & PVF_WRITE) 1949 1.134 thorpej ++writable; 1950 1.355 skrll } else if (pv->pv_flags & PVF_WRITE) 1951 1.134 thorpej other_writable = 1; 1952 1.134 thorpej } 1953 1.1 matt 1954 1.134 thorpej /* 1955 1.134 thorpej * Enable or disable caching as necessary. 1956 1.134 thorpej * Note: the first entry might be part of the kernel pmap, 1957 1.134 thorpej * so we can't assume this is indicative of the state of the 1958 1.134 thorpej * other (maybe non-kpmap) entries. 1959 1.134 thorpej */ 1960 1.134 thorpej if ((entries > 1 && writable) || 1961 1.134 thorpej (entries > 0 && pm == kpmap && other_writable)) { 1962 1.271 matt if (cacheable_entries == 0) { 1963 1.134 thorpej return; 1964 1.271 matt } 1965 1.1 matt 1966 1.183 matt for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { 1967 1.134 thorpej if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || 1968 1.134 thorpej (pv->pv_flags & PVF_NC)) 1969 1.134 thorpej continue; 1970 1.1 matt 1971 1.134 thorpej pv->pv_flags |= PVF_NC; 1972 1.26 rearnsha 1973 1.262 matt struct l2_bucket * const l2b 1974 1.262 matt = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1975 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", va); 1976 1.262 matt pt_entry_t * const ptep 1977 1.262 matt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1978 1.262 matt const pt_entry_t opte = *ptep; 1979 1.262 matt pt_entry_t npte = opte & ~L2_S_CACHE_MASK; 1980 1.134 thorpej 1981 1.259 matt if ((va != pv->pv_va || pm != pv->pv_pmap) 1982 1.343 skrll && l2pte_valid_p(opte)) { 1983 1.259 matt pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va, 1984 1.259 matt true, pv->pv_flags); 1985 1.259 matt pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, 1986 1.259 matt pv->pv_flags); 1987 1.134 thorpej } 1988 1.1 matt 1989 1.262 matt l2pte_set(ptep, npte, opte); 1990 1.134 thorpej PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1991 1.134 thorpej } 1992 1.134 thorpej cpu_cpwait(); 1993 1.355 skrll } else if (entries > cacheable_entries) { 1994 1.1 matt /* 1995 1.134 thorpej * Turn cacheing back on for some pages. If it is a kernel 1996 1.134 thorpej * page, only do so if there are no other writable pages. 1997 1.1 matt */ 1998 1.183 matt for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { 1999 1.134 thorpej if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && 2000 1.134 thorpej (kpmap != pv->pv_pmap || other_writable))) 2001 1.134 thorpej continue; 2002 1.134 thorpej 2003 1.134 thorpej pv->pv_flags &= ~PVF_NC; 2004 1.1 matt 2005 1.262 matt struct l2_bucket * const l2b 2006 1.262 matt = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 2007 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", va); 2008 1.262 matt pt_entry_t * const ptep 2009 1.262 matt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2010 1.262 matt const pt_entry_t opte = *ptep; 2011 1.262 matt pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) 2012 1.262 matt | pte_l2_s_cache_mode; 2013 1.134 thorpej 2014 1.266 matt if (l2pte_valid_p(opte)) { 2015 1.259 matt pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, 2016 1.259 matt pv->pv_flags); 2017 1.134 thorpej } 2018 1.1 matt 2019 1.262 matt l2pte_set(ptep, npte, opte); 2020 1.134 thorpej PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 2021 1.134 thorpej } 2022 1.111 thorpej } 2023 1.1 matt } 2024 1.174 matt #endif 2025 1.174 matt 2026 1.174 matt #ifdef PMAP_CACHE_VIPT 2027 1.174 matt static void 2028 1.215 uebayasi pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 2029 1.174 matt { 2030 1.408 skrll 2031 1.271 matt #ifndef ARM_MMU_EXTENDED 2032 1.182 matt struct pv_entry *pv; 2033 1.174 matt vaddr_t tst_mask; 2034 1.174 matt bool bad_alias; 2035 1.183 matt const u_int 2036 1.215 uebayasi rw_mappings = md->urw_mappings + md->krw_mappings, 2037 1.215 uebayasi ro_mappings = md->uro_mappings + md->kro_mappings; 2038 1.174 matt 2039 1.174 matt /* do we need to do anything? */ 2040 1.174 matt if (arm_cache_prefer_mask == 0) 2041 1.174 matt return; 2042 1.174 matt 2043 1.408 skrll UVMHIST_FUNC(__func__); 2044 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx pm %#jx va %#jx", 2045 1.408 skrll (uintptr_t)md, (uintptr_t)pa, (uintptr_t)pm, va); 2046 1.174 matt 2047 1.182 matt KASSERT(!va || pm); 2048 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2049 1.174 matt 2050 1.174 matt /* Already a conflict? */ 2051 1.215 uebayasi if (__predict_false(md->pvh_attrs & PVF_NC)) { 2052 1.174 matt /* just an add, things are already non-cached */ 2053 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_DIRTY)); 2054 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2055 1.174 matt bad_alias = false; 2056 1.174 matt if (va) { 2057 1.174 matt PMAPCOUNT(vac_color_none); 2058 1.174 matt bad_alias = true; 2059 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2060 1.174 matt goto fixup; 2061 1.174 matt } 2062 1.215 uebayasi pv = SLIST_FIRST(&md->pvh_list); 2063 1.174 matt /* the list can't be empty because it would be cachable */ 2064 1.215 uebayasi if (md->pvh_attrs & PVF_KMPAGE) { 2065 1.215 uebayasi tst_mask = md->pvh_attrs; 2066 1.174 matt } else { 2067 1.174 matt KASSERT(pv); 2068 1.174 matt tst_mask = pv->pv_va; 2069 1.183 matt pv = SLIST_NEXT(pv, pv_link); 2070 1.174 matt } 2071 1.179 matt /* 2072 1.179 matt * Only check for a bad alias if we have writable mappings. 2073 1.179 matt */ 2074 1.183 matt tst_mask &= arm_cache_prefer_mask; 2075 1.251 matt if (rw_mappings > 0) { 2076 1.183 matt for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { 2077 1.179 matt /* if there's a bad alias, stop checking. */ 2078 1.179 matt if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) 2079 1.179 matt bad_alias = true; 2080 1.179 matt } 2081 1.215 uebayasi md->pvh_attrs |= PVF_WRITE; 2082 1.183 matt if (!bad_alias) 2083 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 2084 1.183 matt } else { 2085 1.194 matt /* 2086 1.194 matt * We have only read-only mappings. Let's see if there 2087 1.194 matt * are multiple colors in use or if we mapped a KMPAGE. 2088 1.194 matt * If the latter, we have a bad alias. If the former, 2089 1.194 matt * we need to remember that. 2090 1.194 matt */ 2091 1.194 matt for (; pv; pv = SLIST_NEXT(pv, pv_link)) { 2092 1.194 matt if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { 2093 1.215 uebayasi if (md->pvh_attrs & PVF_KMPAGE) 2094 1.194 matt bad_alias = true; 2095 1.194 matt break; 2096 1.194 matt } 2097 1.194 matt } 2098 1.215 uebayasi md->pvh_attrs &= ~PVF_WRITE; 2099 1.194 matt /* 2100 1.286 skrll * No KMPAGE and we exited early, so we must have 2101 1.194 matt * multiple color mappings. 2102 1.194 matt */ 2103 1.194 matt if (!bad_alias && pv != NULL) 2104 1.215 uebayasi md->pvh_attrs |= PVF_MULTCLR; 2105 1.174 matt } 2106 1.194 matt 2107 1.174 matt /* If no conflicting colors, set everything back to cached */ 2108 1.174 matt if (!bad_alias) { 2109 1.183 matt #ifdef DEBUG 2110 1.215 uebayasi if ((md->pvh_attrs & PVF_WRITE) 2111 1.183 matt || ro_mappings < 2) { 2112 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) 2113 1.183 matt KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); 2114 1.183 matt } 2115 1.183 matt #endif 2116 1.215 uebayasi md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; 2117 1.215 uebayasi md->pvh_attrs |= tst_mask | PVF_COLORED; 2118 1.185 matt /* 2119 1.185 matt * Restore DIRTY bit if page is modified 2120 1.185 matt */ 2121 1.215 uebayasi if (md->pvh_attrs & PVF_DMOD) 2122 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 2123 1.183 matt PMAPCOUNT(vac_color_restore); 2124 1.174 matt } else { 2125 1.215 uebayasi KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); 2126 1.215 uebayasi KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); 2127 1.174 matt } 2128 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2129 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2130 1.174 matt } else if (!va) { 2131 1.251 matt KASSERT(pmap_is_page_colored_p(md)); 2132 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_WRITE) 2133 1.215 uebayasi || (md->pvh_attrs & PVF_DIRTY)); 2134 1.194 matt if (rw_mappings == 0) { 2135 1.215 uebayasi md->pvh_attrs &= ~PVF_WRITE; 2136 1.194 matt if (ro_mappings == 1 2137 1.215 uebayasi && (md->pvh_attrs & PVF_MULTCLR)) { 2138 1.194 matt /* 2139 1.194 matt * If this is the last readonly mapping 2140 1.194 matt * but it doesn't match the current color 2141 1.194 matt * for the page, change the current color 2142 1.194 matt * to match this last readonly mapping. 2143 1.194 matt */ 2144 1.215 uebayasi pv = SLIST_FIRST(&md->pvh_list); 2145 1.215 uebayasi tst_mask = (md->pvh_attrs ^ pv->pv_va) 2146 1.194 matt & arm_cache_prefer_mask; 2147 1.194 matt if (tst_mask) { 2148 1.215 uebayasi md->pvh_attrs ^= tst_mask; 2149 1.194 matt PMAPCOUNT(vac_color_change); 2150 1.194 matt } 2151 1.194 matt } 2152 1.194 matt } 2153 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2154 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2155 1.174 matt return; 2156 1.215 uebayasi } else if (!pmap_is_page_colored_p(md)) { 2157 1.174 matt /* not colored so we just use its color */ 2158 1.215 uebayasi KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); 2159 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2160 1.174 matt PMAPCOUNT(vac_color_new); 2161 1.215 uebayasi md->pvh_attrs &= PAGE_SIZE - 1; 2162 1.215 uebayasi md->pvh_attrs |= PVF_COLORED 2163 1.183 matt | (va & arm_cache_prefer_mask) 2164 1.183 matt | (rw_mappings > 0 ? PVF_WRITE : 0); 2165 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2166 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2167 1.174 matt return; 2168 1.215 uebayasi } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { 2169 1.182 matt bad_alias = false; 2170 1.183 matt if (rw_mappings > 0) { 2171 1.182 matt /* 2172 1.194 matt * We now have writeable mappings and if we have 2173 1.194 matt * readonly mappings in more than once color, we have 2174 1.194 matt * an aliasing problem. Regardless mark the page as 2175 1.194 matt * writeable. 2176 1.182 matt */ 2177 1.215 uebayasi if (md->pvh_attrs & PVF_MULTCLR) { 2178 1.194 matt if (ro_mappings < 2) { 2179 1.194 matt /* 2180 1.194 matt * If we only have less than two 2181 1.194 matt * read-only mappings, just flush the 2182 1.194 matt * non-primary colors from the cache. 2183 1.194 matt */ 2184 1.215 uebayasi pmap_flush_page(md, pa, 2185 1.194 matt PMAP_FLUSH_SECONDARY); 2186 1.194 matt } else { 2187 1.194 matt bad_alias = true; 2188 1.182 matt } 2189 1.182 matt } 2190 1.215 uebayasi md->pvh_attrs |= PVF_WRITE; 2191 1.182 matt } 2192 1.182 matt /* If no conflicting colors, set everything back to cached */ 2193 1.182 matt if (!bad_alias) { 2194 1.183 matt #ifdef DEBUG 2195 1.183 matt if (rw_mappings > 0 2196 1.215 uebayasi || (md->pvh_attrs & PMAP_KMPAGE)) { 2197 1.215 uebayasi tst_mask = md->pvh_attrs & arm_cache_prefer_mask; 2198 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) 2199 1.183 matt KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); 2200 1.183 matt } 2201 1.183 matt #endif 2202 1.215 uebayasi if (SLIST_EMPTY(&md->pvh_list)) 2203 1.182 matt PMAPCOUNT(vac_color_reuse); 2204 1.182 matt else 2205 1.182 matt PMAPCOUNT(vac_color_ok); 2206 1.183 matt 2207 1.182 matt /* matching color, just return */ 2208 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2209 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2210 1.182 matt return; 2211 1.182 matt } 2212 1.215 uebayasi KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); 2213 1.215 uebayasi KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); 2214 1.182 matt 2215 1.182 matt /* color conflict. evict from cache. */ 2216 1.182 matt 2217 1.215 uebayasi pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 2218 1.215 uebayasi md->pvh_attrs &= ~PVF_COLORED; 2219 1.215 uebayasi md->pvh_attrs |= PVF_NC; 2220 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2221 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2222 1.183 matt PMAPCOUNT(vac_color_erase); 2223 1.183 matt } else if (rw_mappings == 0 2224 1.215 uebayasi && (md->pvh_attrs & PVF_KMPAGE) == 0) { 2225 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_WRITE) == 0); 2226 1.183 matt 2227 1.183 matt /* 2228 1.183 matt * If the page has dirty cache lines, clean it. 2229 1.183 matt */ 2230 1.215 uebayasi if (md->pvh_attrs & PVF_DIRTY) 2231 1.215 uebayasi pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); 2232 1.183 matt 2233 1.179 matt /* 2234 1.183 matt * If this is the first remapping (we know that there are no 2235 1.183 matt * writeable mappings), then this is a simple color change. 2236 1.183 matt * Otherwise this is a seconary r/o mapping, which means 2237 1.183 matt * we don't have to do anything. 2238 1.179 matt */ 2239 1.183 matt if (ro_mappings == 1) { 2240 1.215 uebayasi KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); 2241 1.215 uebayasi md->pvh_attrs &= PAGE_SIZE - 1; 2242 1.215 uebayasi md->pvh_attrs |= (va & arm_cache_prefer_mask); 2243 1.183 matt PMAPCOUNT(vac_color_change); 2244 1.183 matt } else { 2245 1.183 matt PMAPCOUNT(vac_color_blind); 2246 1.183 matt } 2247 1.215 uebayasi md->pvh_attrs |= PVF_MULTCLR; 2248 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2249 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2250 1.174 matt return; 2251 1.174 matt } else { 2252 1.183 matt if (rw_mappings > 0) 2253 1.215 uebayasi md->pvh_attrs |= PVF_WRITE; 2254 1.182 matt 2255 1.174 matt /* color conflict. evict from cache. */ 2256 1.215 uebayasi pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 2257 1.174 matt 2258 1.174 matt /* the list can't be empty because this was a enter/modify */ 2259 1.215 uebayasi pv = SLIST_FIRST(&md->pvh_list); 2260 1.215 uebayasi if ((md->pvh_attrs & PVF_KMPAGE) == 0) { 2261 1.183 matt KASSERT(pv); 2262 1.183 matt /* 2263 1.183 matt * If there's only one mapped page, change color to the 2264 1.185 matt * page's new color and return. Restore the DIRTY bit 2265 1.185 matt * that was erased by pmap_flush_page. 2266 1.183 matt */ 2267 1.183 matt if (SLIST_NEXT(pv, pv_link) == NULL) { 2268 1.215 uebayasi md->pvh_attrs &= PAGE_SIZE - 1; 2269 1.215 uebayasi md->pvh_attrs |= (va & arm_cache_prefer_mask); 2270 1.215 uebayasi if (md->pvh_attrs & PVF_DMOD) 2271 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 2272 1.183 matt PMAPCOUNT(vac_color_change); 2273 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2274 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2275 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2276 1.183 matt return; 2277 1.183 matt } 2278 1.174 matt } 2279 1.174 matt bad_alias = true; 2280 1.215 uebayasi md->pvh_attrs &= ~PVF_COLORED; 2281 1.215 uebayasi md->pvh_attrs |= PVF_NC; 2282 1.174 matt PMAPCOUNT(vac_color_erase); 2283 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2284 1.174 matt } 2285 1.174 matt 2286 1.174 matt fixup: 2287 1.215 uebayasi KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2288 1.174 matt 2289 1.174 matt /* 2290 1.174 matt * Turn cacheing on/off for all pages. 2291 1.174 matt */ 2292 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 2293 1.262 matt struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap, 2294 1.262 matt pv->pv_va); 2295 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", va); 2296 1.262 matt pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2297 1.262 matt const pt_entry_t opte = *ptep; 2298 1.262 matt pt_entry_t npte = opte & ~L2_S_CACHE_MASK; 2299 1.174 matt if (bad_alias) { 2300 1.174 matt pv->pv_flags |= PVF_NC; 2301 1.174 matt } else { 2302 1.174 matt pv->pv_flags &= ~PVF_NC; 2303 1.262 matt npte |= pte_l2_s_cache_mode; 2304 1.174 matt } 2305 1.183 matt 2306 1.262 matt if (opte == npte) /* only update is there's a change */ 2307 1.174 matt continue; 2308 1.174 matt 2309 1.343 skrll if (l2pte_valid_p(opte)) { 2310 1.262 matt pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags); 2311 1.174 matt } 2312 1.174 matt 2313 1.262 matt l2pte_set(ptep, npte, opte); 2314 1.174 matt PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 2315 1.174 matt } 2316 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 2317 1.174 matt } 2318 1.174 matt #endif /* PMAP_CACHE_VIPT */ 2319 1.174 matt 2320 1.1 matt 2321 1.1 matt /* 2322 1.134 thorpej * Modify pte bits for all ptes corresponding to the given physical address. 2323 1.134 thorpej * We use `maskbits' rather than `clearbits' because we're always passing 2324 1.134 thorpej * constants and the latter would require an extra inversion at run-time. 2325 1.1 matt */ 2326 1.134 thorpej static void 2327 1.215 uebayasi pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) 2328 1.1 matt { 2329 1.134 thorpej struct pv_entry *pv; 2330 1.174 matt #ifdef PMAP_CACHE_VIPT 2331 1.215 uebayasi const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); 2332 1.345 skrll bool need_syncicache = false; 2333 1.443 skrll #ifndef ARM_MMU_EXTENDED 2334 1.262 matt bool need_vac_me_harder = false; 2335 1.174 matt #endif 2336 1.443 skrll #endif /* PMAP_CACHE_VIPT */ 2337 1.1 matt 2338 1.408 skrll UVMHIST_FUNC(__func__); 2339 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx", 2340 1.408 skrll (uintptr_t)md, pa, maskbits, 0); 2341 1.1 matt 2342 1.174 matt #ifdef PMAP_CACHE_VIPT 2343 1.174 matt /* 2344 1.174 matt * If we might want to sync the I-cache and we've modified it, 2345 1.174 matt * then we know we definitely need to sync or discard it. 2346 1.174 matt */ 2347 1.262 matt if (want_syncicache) { 2348 1.345 skrll if (md->pvh_attrs & PVF_MOD) { 2349 1.345 skrll need_syncicache = true; 2350 1.345 skrll } 2351 1.262 matt } 2352 1.174 matt #endif 2353 1.271 matt KASSERT(pmap_page_locked_p(md)); 2354 1.271 matt 2355 1.17 chris /* 2356 1.134 thorpej * Clear saved attributes (modify, reference) 2357 1.17 chris */ 2358 1.215 uebayasi md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 2359 1.134 thorpej 2360 1.215 uebayasi if (SLIST_EMPTY(&md->pvh_list)) { 2361 1.345 skrll #if defined(PMAP_CACHE_VIPT) 2362 1.174 matt if (need_syncicache) { 2363 1.174 matt /* 2364 1.174 matt * No one has it mapped, so just discard it. The next 2365 1.174 matt * exec remapping will cause it to be synced. 2366 1.174 matt */ 2367 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 2368 1.174 matt PMAPCOUNT(exec_discarded_clearbit); 2369 1.174 matt } 2370 1.174 matt #endif 2371 1.17 chris return; 2372 1.1 matt } 2373 1.1 matt 2374 1.17 chris /* 2375 1.425 skrll * Loop over all current mappings setting/clearing as appropriate 2376 1.17 chris */ 2377 1.405 ad for (pv = SLIST_FIRST(&md->pvh_list); pv != NULL;) { 2378 1.271 matt pmap_t pm = pv->pv_pmap; 2379 1.271 matt const vaddr_t va = pv->pv_va; 2380 1.271 matt const u_int oflags = pv->pv_flags; 2381 1.271 matt #ifndef ARM_MMU_EXTENDED 2382 1.185 matt /* 2383 1.185 matt * Kernel entries are unmanaged and as such not to be changed. 2384 1.185 matt */ 2385 1.407 skrll if (PV_IS_KENTRY_P(oflags)) { 2386 1.405 ad pv = SLIST_NEXT(pv, pv_link); 2387 1.185 matt continue; 2388 1.405 ad } 2389 1.271 matt #endif 2390 1.48 chris 2391 1.405 ad /* 2392 1.405 ad * Try to get a hold on the pmap's lock. We must do this 2393 1.405 ad * while still holding the page locked, to know that the 2394 1.405 ad * page is still associated with the pmap and the mapping is 2395 1.405 ad * in place. If a hold can't be had, unlock and wait for 2396 1.405 ad * the pmap's lock to become available and retry. The pmap 2397 1.405 ad * must be ref'd over this dance to stop it disappearing 2398 1.405 ad * behind us. 2399 1.405 ad */ 2400 1.405 ad if (!mutex_tryenter(&pm->pm_lock)) { 2401 1.405 ad pmap_reference(pm); 2402 1.405 ad pmap_release_page_lock(md); 2403 1.405 ad pmap_acquire_pmap_lock(pm); 2404 1.405 ad /* nothing, just wait for it */ 2405 1.271 matt pmap_release_pmap_lock(pm); 2406 1.405 ad pmap_destroy(pm); 2407 1.405 ad /* Restart from the beginning. */ 2408 1.271 matt pmap_acquire_page_lock(md); 2409 1.405 ad pv = SLIST_FIRST(&md->pvh_list); 2410 1.271 matt continue; 2411 1.271 matt } 2412 1.405 ad pv->pv_flags &= ~maskbits; 2413 1.405 ad 2414 1.405 ad struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); 2415 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", va); 2416 1.1 matt 2417 1.262 matt pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 2418 1.262 matt const pt_entry_t opte = *ptep; 2419 1.443 skrll pt_entry_t npte = opte; 2420 1.443 skrll 2421 1.443 skrll #if defined(ARM_MMU_EXTENDED) 2422 1.443 skrll if ((maskbits & PVF_EXEC) != 0 && l2pte_valid_p(opte)) { 2423 1.443 skrll KASSERT((opte & L2_TYPE_S) != 0); 2424 1.443 skrll npte |= L2_XS_XN; 2425 1.443 skrll } 2426 1.271 matt 2427 1.302 matt KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); 2428 1.301 nonaka #endif 2429 1.114 thorpej 2430 1.408 skrll UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx", 2431 1.408 skrll (uintptr_t)pv, (uintptr_t)pm, va, oflags); 2432 1.114 thorpej 2433 1.134 thorpej if (maskbits & (PVF_WRITE|PVF_MOD)) { 2434 1.174 matt #ifdef PMAP_CACHE_VIVT 2435 1.271 matt if ((oflags & PVF_NC)) { 2436 1.286 skrll /* 2437 1.134 thorpej * Entry is not cacheable: 2438 1.134 thorpej * 2439 1.286 skrll * Don't turn caching on again if this is a 2440 1.134 thorpej * modified emulation. This would be 2441 1.134 thorpej * inconsitent with the settings created by 2442 1.134 thorpej * pmap_vac_me_harder(). Otherwise, it's safe 2443 1.134 thorpej * to re-enable cacheing. 2444 1.134 thorpej * 2445 1.134 thorpej * There's no need to call pmap_vac_me_harder() 2446 1.134 thorpej * here: all pages are losing their write 2447 1.134 thorpej * permission. 2448 1.134 thorpej */ 2449 1.134 thorpej if (maskbits & PVF_WRITE) { 2450 1.134 thorpej npte |= pte_l2_s_cache_mode; 2451 1.134 thorpej pv->pv_flags &= ~PVF_NC; 2452 1.134 thorpej } 2453 1.355 skrll } else if (l2pte_writable_p(opte)) { 2454 1.286 skrll /* 2455 1.134 thorpej * Entry is writable/cacheable: check if pmap 2456 1.134 thorpej * is current if it is flush it, otherwise it 2457 1.134 thorpej * won't be in the cache 2458 1.134 thorpej */ 2459 1.271 matt pmap_cache_wbinv_page(pm, va, 2460 1.259 matt (maskbits & PVF_REF) != 0, 2461 1.259 matt oflags|PVF_WRITE); 2462 1.134 thorpej } 2463 1.174 matt #endif 2464 1.111 thorpej 2465 1.134 thorpej /* make the pte read only */ 2466 1.214 jmcneill npte = l2pte_set_readonly(npte); 2467 1.111 thorpej 2468 1.405 ad if ((maskbits & oflags & PVF_WRITE)) { 2469 1.134 thorpej /* 2470 1.134 thorpej * Keep alias accounting up to date 2471 1.134 thorpej */ 2472 1.271 matt if (pm == pmap_kernel()) { 2473 1.215 uebayasi md->krw_mappings--; 2474 1.215 uebayasi md->kro_mappings++; 2475 1.174 matt } else { 2476 1.215 uebayasi md->urw_mappings--; 2477 1.215 uebayasi md->uro_mappings++; 2478 1.134 thorpej } 2479 1.174 matt #ifdef PMAP_CACHE_VIPT 2480 1.251 matt if (arm_cache_prefer_mask != 0) { 2481 1.251 matt if (md->urw_mappings + md->krw_mappings == 0) { 2482 1.251 matt md->pvh_attrs &= ~PVF_WRITE; 2483 1.251 matt } else { 2484 1.251 matt PMAP_VALIDATE_MD_PAGE(md); 2485 1.251 matt } 2486 1.247 matt } 2487 1.174 matt if (want_syncicache) 2488 1.174 matt need_syncicache = true; 2489 1.345 skrll #ifndef ARM_MMU_EXTENDED 2490 1.183 matt need_vac_me_harder = true; 2491 1.174 matt #endif 2492 1.271 matt #endif /* PMAP_CACHE_VIPT */ 2493 1.134 thorpej } 2494 1.134 thorpej } 2495 1.1 matt 2496 1.134 thorpej if (maskbits & PVF_REF) { 2497 1.271 matt if (true 2498 1.271 matt #ifndef ARM_MMU_EXTENDED 2499 1.271 matt && (oflags & PVF_NC) == 0 2500 1.271 matt #endif 2501 1.259 matt && (maskbits & (PVF_WRITE|PVF_MOD)) == 0 2502 1.266 matt && l2pte_valid_p(npte)) { 2503 1.183 matt #ifdef PMAP_CACHE_VIVT 2504 1.134 thorpej /* 2505 1.134 thorpej * Check npte here; we may have already 2506 1.134 thorpej * done the wbinv above, and the validity 2507 1.134 thorpej * of the PTE is the same for opte and 2508 1.134 thorpej * npte. 2509 1.134 thorpej */ 2510 1.271 matt pmap_cache_wbinv_page(pm, va, true, oflags); 2511 1.183 matt #endif 2512 1.134 thorpej } 2513 1.1 matt 2514 1.134 thorpej /* 2515 1.134 thorpej * Make the PTE invalid so that we will take a 2516 1.134 thorpej * page fault the next time the mapping is 2517 1.134 thorpej * referenced. 2518 1.134 thorpej */ 2519 1.134 thorpej npte &= ~L2_TYPE_MASK; 2520 1.134 thorpej npte |= L2_TYPE_INV; 2521 1.134 thorpej } 2522 1.1 matt 2523 1.134 thorpej if (npte != opte) { 2524 1.307 skrll l2pte_reset(ptep); 2525 1.134 thorpej PTE_SYNC(ptep); 2526 1.262 matt 2527 1.134 thorpej /* Flush the TLB entry if a current pmap. */ 2528 1.271 matt pmap_tlb_flush_SE(pm, va, oflags); 2529 1.307 skrll 2530 1.307 skrll l2pte_set(ptep, npte, 0); 2531 1.307 skrll PTE_SYNC(ptep); 2532 1.134 thorpej } 2533 1.1 matt 2534 1.134 thorpej pmap_release_pmap_lock(pm); 2535 1.133 thorpej 2536 1.408 skrll UVMHIST_LOG(maphist, "pm %#jx va %#jx opte %#jx npte %#jx", 2537 1.408 skrll (uintptr_t)pm, va, opte, npte); 2538 1.405 ad 2539 1.405 ad /* Move to next entry. */ 2540 1.405 ad pv = SLIST_NEXT(pv, pv_link); 2541 1.134 thorpej } 2542 1.133 thorpej 2543 1.345 skrll #if defined(PMAP_CACHE_VIPT) 2544 1.174 matt /* 2545 1.174 matt * If we need to sync the I-cache and we haven't done it yet, do it. 2546 1.174 matt */ 2547 1.262 matt if (need_syncicache) { 2548 1.215 uebayasi pmap_syncicache_page(md, pa); 2549 1.174 matt PMAPCOUNT(exec_synced_clearbit); 2550 1.174 matt } 2551 1.345 skrll #ifndef ARM_MMU_EXTENDED 2552 1.183 matt /* 2553 1.187 skrll * If we are changing this to read-only, we need to call vac_me_harder 2554 1.183 matt * so we can change all the read-only pages to cacheable. We pretend 2555 1.183 matt * this as a page deletion. 2556 1.183 matt */ 2557 1.183 matt if (need_vac_me_harder) { 2558 1.215 uebayasi if (md->pvh_attrs & PVF_NC) 2559 1.215 uebayasi pmap_vac_me_harder(md, pa, NULL, 0); 2560 1.183 matt } 2561 1.345 skrll #endif /* !ARM_MMU_EXTENDED */ 2562 1.345 skrll #endif /* PMAP_CACHE_VIPT */ 2563 1.1 matt } 2564 1.1 matt 2565 1.1 matt /* 2566 1.134 thorpej * pmap_clean_page() 2567 1.134 thorpej * 2568 1.134 thorpej * This is a local function used to work out the best strategy to clean 2569 1.134 thorpej * a single page referenced by its entry in the PV table. It's used by 2570 1.309 skrll * pmap_copy_page, pmap_zero_page and maybe some others later on. 2571 1.134 thorpej * 2572 1.134 thorpej * Its policy is effectively: 2573 1.134 thorpej * o If there are no mappings, we don't bother doing anything with the cache. 2574 1.134 thorpej * o If there is one mapping, we clean just that page. 2575 1.134 thorpej * o If there are multiple mappings, we clean the entire cache. 2576 1.134 thorpej * 2577 1.134 thorpej * So that some functions can be further optimised, it returns 0 if it didn't 2578 1.134 thorpej * clean the entire cache, or 1 if it did. 2579 1.134 thorpej * 2580 1.134 thorpej * XXX One bug in this routine is that if the pv_entry has a single page 2581 1.134 thorpej * mapped at 0x00000000 a whole cache clean will be performed rather than 2582 1.134 thorpej * just the 1 page. Since this should not occur in everyday use and if it does 2583 1.134 thorpej * it will just result in not the most efficient clean for the page. 2584 1.1 matt */ 2585 1.174 matt #ifdef PMAP_CACHE_VIVT 2586 1.271 matt static bool 2587 1.271 matt pmap_clean_page(struct vm_page_md *md, bool is_src) 2588 1.1 matt { 2589 1.271 matt struct pv_entry *pv; 2590 1.211 he pmap_t pm_to_clean = NULL; 2591 1.271 matt bool cache_needs_cleaning = false; 2592 1.271 matt vaddr_t page_to_clean = 0; 2593 1.134 thorpej u_int flags = 0; 2594 1.17 chris 2595 1.108 thorpej /* 2596 1.134 thorpej * Since we flush the cache each time we change to a different 2597 1.134 thorpej * user vmspace, we only need to flush the page if it is in the 2598 1.134 thorpej * current pmap. 2599 1.17 chris */ 2600 1.271 matt KASSERT(pmap_page_locked_p(md)); 2601 1.271 matt SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 2602 1.271 matt if (pmap_is_current(pv->pv_pmap)) { 2603 1.271 matt flags |= pv->pv_flags; 2604 1.108 thorpej /* 2605 1.286 skrll * The page is mapped non-cacheable in 2606 1.17 chris * this map. No need to flush the cache. 2607 1.17 chris */ 2608 1.271 matt if (pv->pv_flags & PVF_NC) { 2609 1.17 chris #ifdef DIAGNOSTIC 2610 1.271 matt KASSERT(!cache_needs_cleaning); 2611 1.17 chris #endif 2612 1.17 chris break; 2613 1.271 matt } else if (is_src && (pv->pv_flags & PVF_WRITE) == 0) 2614 1.17 chris continue; 2615 1.108 thorpej if (cache_needs_cleaning) { 2616 1.17 chris page_to_clean = 0; 2617 1.17 chris break; 2618 1.134 thorpej } else { 2619 1.271 matt page_to_clean = pv->pv_va; 2620 1.271 matt pm_to_clean = pv->pv_pmap; 2621 1.134 thorpej } 2622 1.271 matt cache_needs_cleaning = true; 2623 1.17 chris } 2624 1.1 matt } 2625 1.1 matt 2626 1.108 thorpej if (page_to_clean) { 2627 1.259 matt pmap_cache_wbinv_page(pm_to_clean, page_to_clean, 2628 1.259 matt !is_src, flags | PVF_REF); 2629 1.108 thorpej } else if (cache_needs_cleaning) { 2630 1.209 uebayasi pmap_t const pm = curproc->p_vmspace->vm_map.pmap; 2631 1.209 uebayasi 2632 1.259 matt pmap_cache_wbinv_all(pm, flags); 2633 1.271 matt return true; 2634 1.1 matt } 2635 1.271 matt return false; 2636 1.1 matt } 2637 1.174 matt #endif 2638 1.174 matt 2639 1.174 matt #ifdef PMAP_CACHE_VIPT 2640 1.174 matt /* 2641 1.174 matt * Sync a page with the I-cache. Since this is a VIPT, we must pick the 2642 1.174 matt * right cache alias to make sure we flush the right stuff. 2643 1.174 matt */ 2644 1.174 matt void 2645 1.215 uebayasi pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) 2646 1.174 matt { 2647 1.271 matt pmap_t kpm = pmap_kernel(); 2648 1.271 matt const size_t way_size = arm_pcache.icache_type == CACHE_TYPE_PIPT 2649 1.271 matt ? PAGE_SIZE 2650 1.271 matt : arm_pcache.icache_way_size; 2651 1.174 matt 2652 1.408 skrll UVMHIST_FUNC(__func__); 2653 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx (attrs=%#jx)", 2654 1.408 skrll (uintptr_t)md, pa, md->pvh_attrs, 0); 2655 1.408 skrll 2656 1.174 matt /* 2657 1.174 matt * No need to clean the page if it's non-cached. 2658 1.174 matt */ 2659 1.271 matt #ifndef ARM_MMU_EXTENDED 2660 1.215 uebayasi if (md->pvh_attrs & PVF_NC) 2661 1.174 matt return; 2662 1.215 uebayasi KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); 2663 1.271 matt #endif 2664 1.271 matt 2665 1.284 matt pt_entry_t * const ptep = cpu_cdst_pte(0); 2666 1.284 matt const vaddr_t dstp = cpu_cdstp(0); 2667 1.271 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 2668 1.284 matt if (way_size <= PAGE_SIZE) { 2669 1.284 matt bool ok = false; 2670 1.284 matt vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp); 2671 1.284 matt if (ok) { 2672 1.284 matt cpu_icache_sync_range(vdstp, way_size); 2673 1.284 matt return; 2674 1.284 matt } 2675 1.271 matt } 2676 1.271 matt #endif 2677 1.174 matt 2678 1.174 matt /* 2679 1.271 matt * We don't worry about the color of the exec page, we map the 2680 1.271 matt * same page to pages in the way and then do the icache_sync on 2681 1.271 matt * the entire way making sure we are cleaned. 2682 1.174 matt */ 2683 1.271 matt const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode 2684 1.271 matt | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); 2685 1.271 matt 2686 1.271 matt for (size_t i = 0, j = 0; i < way_size; 2687 1.271 matt i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { 2688 1.307 skrll l2pte_reset(ptep + j); 2689 1.307 skrll PTE_SYNC(ptep + j); 2690 1.307 skrll 2691 1.271 matt pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); 2692 1.271 matt /* 2693 1.271 matt * Set up a PTE with to flush these cache lines. 2694 1.271 matt */ 2695 1.271 matt l2pte_set(ptep + j, npte, 0); 2696 1.271 matt } 2697 1.271 matt PTE_SYNC_RANGE(ptep, way_size / L2_S_SIZE); 2698 1.174 matt 2699 1.174 matt /* 2700 1.174 matt * Flush it. 2701 1.174 matt */ 2702 1.271 matt cpu_icache_sync_range(dstp, way_size); 2703 1.271 matt 2704 1.271 matt for (size_t i = 0, j = 0; i < way_size; 2705 1.271 matt i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { 2706 1.271 matt /* 2707 1.271 matt * Unmap the page(s). 2708 1.271 matt */ 2709 1.271 matt l2pte_reset(ptep + j); 2710 1.402 skrll PTE_SYNC(ptep + j); 2711 1.402 skrll 2712 1.271 matt pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); 2713 1.271 matt } 2714 1.174 matt 2715 1.215 uebayasi md->pvh_attrs |= PVF_EXEC; 2716 1.174 matt PMAPCOUNT(exec_synced); 2717 1.174 matt } 2718 1.174 matt 2719 1.271 matt #ifndef ARM_MMU_EXTENDED 2720 1.174 matt void 2721 1.215 uebayasi pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) 2722 1.174 matt { 2723 1.194 matt vsize_t va_offset, end_va; 2724 1.254 matt bool wbinv_p; 2725 1.174 matt 2726 1.194 matt if (arm_cache_prefer_mask == 0) 2727 1.194 matt return; 2728 1.174 matt 2729 1.408 skrll UVMHIST_FUNC(__func__); 2730 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx op %#jx", 2731 1.408 skrll (uintptr_t)md, pa, op, 0); 2732 1.408 skrll 2733 1.194 matt switch (flush) { 2734 1.194 matt case PMAP_FLUSH_PRIMARY: 2735 1.215 uebayasi if (md->pvh_attrs & PVF_MULTCLR) { 2736 1.194 matt va_offset = 0; 2737 1.194 matt end_va = arm_cache_prefer_mask; 2738 1.215 uebayasi md->pvh_attrs &= ~PVF_MULTCLR; 2739 1.194 matt PMAPCOUNT(vac_flush_lots); 2740 1.194 matt } else { 2741 1.215 uebayasi va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2742 1.194 matt end_va = va_offset; 2743 1.194 matt PMAPCOUNT(vac_flush_one); 2744 1.194 matt } 2745 1.194 matt /* 2746 1.194 matt * Mark that the page is no longer dirty. 2747 1.194 matt */ 2748 1.215 uebayasi md->pvh_attrs &= ~PVF_DIRTY; 2749 1.254 matt wbinv_p = true; 2750 1.194 matt break; 2751 1.194 matt case PMAP_FLUSH_SECONDARY: 2752 1.194 matt va_offset = 0; 2753 1.194 matt end_va = arm_cache_prefer_mask; 2754 1.254 matt wbinv_p = true; 2755 1.215 uebayasi md->pvh_attrs &= ~PVF_MULTCLR; 2756 1.194 matt PMAPCOUNT(vac_flush_lots); 2757 1.194 matt break; 2758 1.194 matt case PMAP_CLEAN_PRIMARY: 2759 1.215 uebayasi va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2760 1.194 matt end_va = va_offset; 2761 1.254 matt wbinv_p = false; 2762 1.185 matt /* 2763 1.185 matt * Mark that the page is no longer dirty. 2764 1.185 matt */ 2765 1.215 uebayasi if ((md->pvh_attrs & PVF_DMOD) == 0) 2766 1.215 uebayasi md->pvh_attrs &= ~PVF_DIRTY; 2767 1.194 matt PMAPCOUNT(vac_clean_one); 2768 1.194 matt break; 2769 1.194 matt default: 2770 1.194 matt return; 2771 1.185 matt } 2772 1.174 matt 2773 1.215 uebayasi KASSERT(!(md->pvh_attrs & PVF_NC)); 2774 1.194 matt 2775 1.410 kre UVMHIST_LOG(maphist, "md %#jx (attrs=%#jx)", (uintptr_t)md, 2776 1.410 kre md->pvh_attrs, 0, 0); 2777 1.194 matt 2778 1.254 matt const size_t scache_line_size = arm_scache.dcache_line_size; 2779 1.254 matt 2780 1.194 matt for (; va_offset <= end_va; va_offset += PAGE_SIZE) { 2781 1.271 matt pt_entry_t * const ptep = cpu_cdst_pte(va_offset); 2782 1.271 matt const vaddr_t dstp = cpu_cdstp(va_offset); 2783 1.262 matt const pt_entry_t opte = *ptep; 2784 1.194 matt 2785 1.194 matt if (flush == PMAP_FLUSH_SECONDARY 2786 1.215 uebayasi && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) 2787 1.194 matt continue; 2788 1.194 matt 2789 1.271 matt pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); 2790 1.194 matt /* 2791 1.194 matt * Set up a PTE with the right coloring to flush 2792 1.194 matt * existing cache entries. 2793 1.194 matt */ 2794 1.262 matt const pt_entry_t npte = L2_S_PROTO 2795 1.215 uebayasi | pa 2796 1.194 matt | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) 2797 1.194 matt | pte_l2_s_cache_mode; 2798 1.262 matt l2pte_set(ptep, npte, opte); 2799 1.194 matt PTE_SYNC(ptep); 2800 1.194 matt 2801 1.194 matt /* 2802 1.262 matt * Flush it. Make sure to flush secondary cache too since 2803 1.262 matt * bus_dma will ignore uncached pages. 2804 1.194 matt */ 2805 1.254 matt if (scache_line_size != 0) { 2806 1.286 skrll cpu_dcache_wb_range(dstp, PAGE_SIZE); 2807 1.254 matt if (wbinv_p) { 2808 1.286 skrll cpu_sdcache_wbinv_range(dstp, pa, PAGE_SIZE); 2809 1.271 matt cpu_dcache_inv_range(dstp, PAGE_SIZE); 2810 1.254 matt } else { 2811 1.271 matt cpu_sdcache_wb_range(dstp, pa, PAGE_SIZE); 2812 1.254 matt } 2813 1.254 matt } else { 2814 1.254 matt if (wbinv_p) { 2815 1.271 matt cpu_dcache_wbinv_range(dstp, PAGE_SIZE); 2816 1.254 matt } else { 2817 1.271 matt cpu_dcache_wb_range(dstp, PAGE_SIZE); 2818 1.254 matt } 2819 1.254 matt } 2820 1.194 matt 2821 1.194 matt /* 2822 1.194 matt * Restore the page table entry since we might have interrupted 2823 1.194 matt * pmap_zero_page or pmap_copy_page which was already using 2824 1.194 matt * this pte. 2825 1.194 matt */ 2826 1.271 matt if (opte) { 2827 1.271 matt l2pte_set(ptep, opte, npte); 2828 1.271 matt } else { 2829 1.271 matt l2pte_reset(ptep); 2830 1.271 matt } 2831 1.194 matt PTE_SYNC(ptep); 2832 1.271 matt pmap_tlb_flush_SE(pmap_kernel(), dstp, PVF_REF | PVF_EXEC); 2833 1.194 matt } 2834 1.174 matt } 2835 1.271 matt #endif /* ARM_MMU_EXTENDED */ 2836 1.174 matt #endif /* PMAP_CACHE_VIPT */ 2837 1.1 matt 2838 1.1 matt /* 2839 1.134 thorpej * Routine: pmap_page_remove 2840 1.134 thorpej * Function: 2841 1.134 thorpej * Removes this physical page from 2842 1.134 thorpej * all physical maps in which it resides. 2843 1.134 thorpej * Reflects back modify bits to the pager. 2844 1.1 matt */ 2845 1.134 thorpej static void 2846 1.215 uebayasi pmap_page_remove(struct vm_page_md *md, paddr_t pa) 2847 1.1 matt { 2848 1.134 thorpej struct l2_bucket *l2b; 2849 1.271 matt struct pv_entry *pv; 2850 1.208 uebayasi pt_entry_t *ptep; 2851 1.271 matt #ifndef ARM_MMU_EXTENDED 2852 1.271 matt bool flush = false; 2853 1.271 matt #endif 2854 1.271 matt u_int flags = 0; 2855 1.134 thorpej 2856 1.408 skrll UVMHIST_FUNC(__func__); 2857 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0); 2858 1.71 thorpej 2859 1.426 skrll kpreempt_disable(); 2860 1.418 skrll pmap_acquire_page_lock(md); 2861 1.271 matt struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); 2862 1.271 matt if (*pvp == NULL) { 2863 1.174 matt #ifdef PMAP_CACHE_VIPT 2864 1.174 matt /* 2865 1.174 matt * We *know* the page contents are about to be replaced. 2866 1.174 matt * Discard the exec contents 2867 1.174 matt */ 2868 1.215 uebayasi if (PV_IS_EXEC_P(md->pvh_attrs)) 2869 1.174 matt PMAPCOUNT(exec_discarded_page_protect); 2870 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 2871 1.251 matt PMAP_VALIDATE_MD_PAGE(md); 2872 1.174 matt #endif 2873 1.271 matt pmap_release_page_lock(md); 2874 1.426 skrll kpreempt_enable(); 2875 1.426 skrll 2876 1.134 thorpej return; 2877 1.134 thorpej } 2878 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 2879 1.215 uebayasi KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); 2880 1.174 matt #endif 2881 1.79 thorpej 2882 1.1 matt /* 2883 1.134 thorpej * Clear alias counts 2884 1.1 matt */ 2885 1.182 matt #ifdef PMAP_CACHE_VIVT 2886 1.215 uebayasi md->k_mappings = 0; 2887 1.182 matt #endif 2888 1.215 uebayasi md->urw_mappings = md->uro_mappings = 0; 2889 1.134 thorpej 2890 1.174 matt #ifdef PMAP_CACHE_VIVT 2891 1.271 matt pmap_clean_page(md, false); 2892 1.174 matt #endif 2893 1.134 thorpej 2894 1.405 ad for (pv = *pvp; pv != NULL;) { 2895 1.271 matt pmap_t pm = pv->pv_pmap; 2896 1.271 matt #ifndef ARM_MMU_EXTENDED 2897 1.209 uebayasi if (flush == false && pmap_is_current(pm)) 2898 1.160 thorpej flush = true; 2899 1.271 matt #endif 2900 1.134 thorpej 2901 1.405 ad #ifdef PMAP_CACHE_VIPT 2902 1.405 ad if (pm == pmap_kernel() && PV_IS_KENTRY_P(pv->pv_flags)) { 2903 1.405 ad /* If this was unmanaged mapping, it must be ignored. */ 2904 1.405 ad pvp = &SLIST_NEXT(pv, pv_link); 2905 1.405 ad pv = *pvp; 2906 1.405 ad continue; 2907 1.405 ad } 2908 1.405 ad #endif 2909 1.405 ad 2910 1.405 ad /* 2911 1.405 ad * Try to get a hold on the pmap's lock. We must do this 2912 1.405 ad * while still holding the page locked, to know that the 2913 1.405 ad * page is still associated with the pmap and the mapping is 2914 1.405 ad * in place. If a hold can't be had, unlock and wait for 2915 1.405 ad * the pmap's lock to become available and retry. The pmap 2916 1.405 ad * must be ref'd over this dance to stop it disappearing 2917 1.405 ad * behind us. 2918 1.405 ad */ 2919 1.405 ad if (!mutex_tryenter(&pm->pm_lock)) { 2920 1.405 ad pmap_reference(pm); 2921 1.405 ad pmap_release_page_lock(md); 2922 1.405 ad pmap_acquire_pmap_lock(pm); 2923 1.405 ad /* nothing, just wait for it */ 2924 1.405 ad pmap_release_pmap_lock(pm); 2925 1.405 ad pmap_destroy(pm); 2926 1.405 ad /* Restart from the beginning. */ 2927 1.405 ad pmap_acquire_page_lock(md); 2928 1.405 ad pvp = &SLIST_FIRST(&md->pvh_list); 2929 1.405 ad pv = *pvp; 2930 1.405 ad continue; 2931 1.405 ad } 2932 1.405 ad 2933 1.182 matt if (pm == pmap_kernel()) { 2934 1.182 matt #ifdef PMAP_CACHE_VIPT 2935 1.182 matt if (pv->pv_flags & PVF_WRITE) 2936 1.215 uebayasi md->krw_mappings--; 2937 1.182 matt else 2938 1.215 uebayasi md->kro_mappings--; 2939 1.182 matt #endif 2940 1.174 matt PMAPCOUNT(kernel_unmappings); 2941 1.182 matt } 2942 1.271 matt *pvp = SLIST_NEXT(pv, pv_link); /* remove from list */ 2943 1.174 matt PMAPCOUNT(unmappings); 2944 1.174 matt 2945 1.271 matt pmap_release_page_lock(md); 2946 1.134 thorpej 2947 1.134 thorpej l2b = pmap_get_l2_bucket(pm, pv->pv_va); 2948 1.271 matt KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); 2949 1.134 thorpej 2950 1.134 thorpej ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2951 1.134 thorpej 2952 1.134 thorpej /* 2953 1.134 thorpej * Update statistics 2954 1.134 thorpej */ 2955 1.134 thorpej --pm->pm_stats.resident_count; 2956 1.134 thorpej 2957 1.134 thorpej /* Wired bit */ 2958 1.134 thorpej if (pv->pv_flags & PVF_WIRED) 2959 1.134 thorpej --pm->pm_stats.wired_count; 2960 1.88 thorpej 2961 1.134 thorpej flags |= pv->pv_flags; 2962 1.88 thorpej 2963 1.134 thorpej /* 2964 1.134 thorpej * Invalidate the PTEs. 2965 1.134 thorpej */ 2966 1.262 matt l2pte_reset(ptep); 2967 1.134 thorpej PTE_SYNC_CURRENT(pm, ptep); 2968 1.307 skrll 2969 1.307 skrll #ifdef ARM_MMU_EXTENDED 2970 1.307 skrll pmap_tlb_invalidate_addr(pm, pv->pv_va); 2971 1.307 skrll #endif 2972 1.307 skrll 2973 1.290 skrll pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); 2974 1.307 skrll 2975 1.271 matt pmap_release_pmap_lock(pm); 2976 1.88 thorpej 2977 1.134 thorpej pool_put(&pmap_pv_pool, pv); 2978 1.271 matt pmap_acquire_page_lock(md); 2979 1.405 ad 2980 1.182 matt /* 2981 1.404 skrll * Restart at the beginning of the list. 2982 1.182 matt */ 2983 1.271 matt pvp = &SLIST_FIRST(&md->pvh_list); 2984 1.405 ad pv = *pvp; 2985 1.271 matt } 2986 1.271 matt /* 2987 1.271 matt * if we reach the end of the list and there are still mappings, they 2988 1.271 matt * might be able to be cached now. And they must be kernel mappings. 2989 1.271 matt */ 2990 1.271 matt if (!SLIST_EMPTY(&md->pvh_list)) { 2991 1.271 matt pmap_vac_me_harder(md, pa, pmap_kernel(), 0); 2992 1.134 thorpej } 2993 1.271 matt 2994 1.174 matt #ifdef PMAP_CACHE_VIPT 2995 1.174 matt /* 2996 1.182 matt * Its EXEC cache is now gone. 2997 1.174 matt */ 2998 1.215 uebayasi if (PV_IS_EXEC_P(md->pvh_attrs)) 2999 1.174 matt PMAPCOUNT(exec_discarded_page_protect); 3000 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 3001 1.215 uebayasi KASSERT(md->urw_mappings == 0); 3002 1.215 uebayasi KASSERT(md->uro_mappings == 0); 3003 1.271 matt #ifndef ARM_MMU_EXTENDED 3004 1.251 matt if (arm_cache_prefer_mask != 0) { 3005 1.251 matt if (md->krw_mappings == 0) 3006 1.251 matt md->pvh_attrs &= ~PVF_WRITE; 3007 1.251 matt PMAP_VALIDATE_MD_PAGE(md); 3008 1.251 matt } 3009 1.271 matt #endif /* ARM_MMU_EXTENDED */ 3010 1.271 matt #endif /* PMAP_CACHE_VIPT */ 3011 1.271 matt pmap_release_page_lock(md); 3012 1.88 thorpej 3013 1.271 matt #ifndef ARM_MMU_EXTENDED 3014 1.134 thorpej if (flush) { 3015 1.152 scw /* 3016 1.212 skrll * Note: We can't use pmap_tlb_flush{I,D}() here since that 3017 1.152 scw * would need a subsequent call to pmap_update() to ensure 3018 1.152 scw * curpm->pm_cstate.cs_all is reset. Our callers are not 3019 1.152 scw * required to do that (see pmap(9)), so we can't modify 3020 1.152 scw * the current pmap's state. 3021 1.152 scw */ 3022 1.134 thorpej if (PV_BEEN_EXECD(flags)) 3023 1.152 scw cpu_tlb_flushID(); 3024 1.134 thorpej else 3025 1.152 scw cpu_tlb_flushD(); 3026 1.134 thorpej } 3027 1.88 thorpej cpu_cpwait(); 3028 1.271 matt #endif /* ARM_MMU_EXTENDED */ 3029 1.426 skrll 3030 1.426 skrll kpreempt_enable(); 3031 1.88 thorpej } 3032 1.1 matt 3033 1.134 thorpej /* 3034 1.134 thorpej * pmap_t pmap_create(void) 3035 1.286 skrll * 3036 1.134 thorpej * Create a new pmap structure from scratch. 3037 1.17 chris */ 3038 1.134 thorpej pmap_t 3039 1.134 thorpej pmap_create(void) 3040 1.17 chris { 3041 1.134 thorpej pmap_t pm; 3042 1.134 thorpej 3043 1.168 ad pm = pool_cache_get(&pmap_cache, PR_WAITOK); 3044 1.79 thorpej 3045 1.394 ad mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_NONE); 3046 1.222 rmind 3047 1.394 ad pm->pm_refs = 1; 3048 1.134 thorpej pm->pm_stats.wired_count = 0; 3049 1.134 thorpej pm->pm_stats.resident_count = 1; 3050 1.271 matt #ifdef ARM_MMU_EXTENDED 3051 1.271 matt #ifdef MULTIPROCESSOR 3052 1.271 matt kcpuset_create(&pm->pm_active, true); 3053 1.271 matt kcpuset_create(&pm->pm_onproc, true); 3054 1.271 matt #endif 3055 1.271 matt #else 3056 1.134 thorpej pm->pm_cstate.cs_all = 0; 3057 1.271 matt #endif 3058 1.134 thorpej pmap_alloc_l1(pm); 3059 1.79 thorpej 3060 1.17 chris /* 3061 1.134 thorpej * Note: The pool cache ensures that the pm_l2[] array is already 3062 1.134 thorpej * initialised to zero. 3063 1.17 chris */ 3064 1.32 thorpej 3065 1.134 thorpej pmap_pinit(pm); 3066 1.134 thorpej 3067 1.387 skrll return pm; 3068 1.17 chris } 3069 1.134 thorpej 3070 1.220 macallan u_int 3071 1.220 macallan arm32_mmap_flags(paddr_t pa) 3072 1.220 macallan { 3073 1.220 macallan /* 3074 1.220 macallan * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff 3075 1.220 macallan * and we're using the upper bits in page numbers to pass flags around 3076 1.220 macallan * so we might as well use the same bits 3077 1.220 macallan */ 3078 1.220 macallan return (u_int)pa & PMAP_MD_MASK; 3079 1.220 macallan } 3080 1.1 matt /* 3081 1.198 cegger * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, 3082 1.198 cegger * u_int flags) 3083 1.286 skrll * 3084 1.134 thorpej * Insert the given physical page (p) at 3085 1.134 thorpej * the specified virtual address (v) in the 3086 1.134 thorpej * target physical map with the protection requested. 3087 1.1 matt * 3088 1.134 thorpej * NB: This is the only routine which MAY NOT lazy-evaluate 3089 1.134 thorpej * or lose information. That is, this routine must actually 3090 1.134 thorpej * insert this page into the given map NOW. 3091 1.1 matt */ 3092 1.134 thorpej int 3093 1.198 cegger pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 3094 1.1 matt { 3095 1.134 thorpej struct l2_bucket *l2b; 3096 1.134 thorpej struct vm_page *pg, *opg; 3097 1.134 thorpej u_int nflags; 3098 1.134 thorpej u_int oflags; 3099 1.435 skrll const bool kpm_p = pm == pmap_kernel(); 3100 1.435 skrll #if defined(EFI_RUNTIME) 3101 1.435 skrll const bool efirt_p = pm == pmap_efirt(); 3102 1.435 skrll #else 3103 1.435 skrll const bool efirt_p = false; 3104 1.435 skrll #endif 3105 1.257 matt #ifdef ARM_HAS_VBAR 3106 1.257 matt const bool vector_page_p = false; 3107 1.257 matt #else 3108 1.257 matt const bool vector_page_p = (va == vector_page); 3109 1.257 matt #endif 3110 1.373 bouyer struct pmap_page *pp = pmap_pv_tracked(pa); 3111 1.373 bouyer struct pv_entry *new_pv = NULL; 3112 1.373 bouyer struct pv_entry *old_pv = NULL; 3113 1.373 bouyer int error = 0; 3114 1.71 thorpej 3115 1.406 skrll UVMHIST_FUNC(__func__); 3116 1.406 skrll UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx", 3117 1.359 pgoyette (uintptr_t)pm, va, pa, prot); 3118 1.359 pgoyette UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0); 3119 1.71 thorpej 3120 1.134 thorpej KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); 3121 1.134 thorpej KDASSERT(((va | pa) & PGOFSET) == 0); 3122 1.79 thorpej 3123 1.71 thorpej /* 3124 1.134 thorpej * Get a pointer to the page. Later on in this function, we 3125 1.134 thorpej * test for a managed page by checking pg != NULL. 3126 1.71 thorpej */ 3127 1.134 thorpej pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; 3128 1.373 bouyer /* 3129 1.373 bouyer * if we may need a new pv entry allocate if now, as we can't do it 3130 1.373 bouyer * with the kernel_pmap locked 3131 1.373 bouyer */ 3132 1.373 bouyer if (pg || pp) 3133 1.373 bouyer new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 3134 1.134 thorpej 3135 1.134 thorpej nflags = 0; 3136 1.134 thorpej if (prot & VM_PROT_WRITE) 3137 1.134 thorpej nflags |= PVF_WRITE; 3138 1.134 thorpej if (prot & VM_PROT_EXECUTE) 3139 1.134 thorpej nflags |= PVF_EXEC; 3140 1.134 thorpej if (flags & PMAP_WIRED) 3141 1.134 thorpej nflags |= PVF_WIRED; 3142 1.134 thorpej 3143 1.426 skrll kpreempt_disable(); 3144 1.134 thorpej pmap_acquire_pmap_lock(pm); 3145 1.1 matt 3146 1.1 matt /* 3147 1.134 thorpej * Fetch the L2 bucket which maps this page, allocating one if 3148 1.134 thorpej * necessary for user pmaps. 3149 1.1 matt */ 3150 1.271 matt if (kpm_p) { 3151 1.134 thorpej l2b = pmap_get_l2_bucket(pm, va); 3152 1.271 matt } else { 3153 1.134 thorpej l2b = pmap_alloc_l2_bucket(pm, va); 3154 1.271 matt } 3155 1.134 thorpej if (l2b == NULL) { 3156 1.134 thorpej if (flags & PMAP_CANFAIL) { 3157 1.134 thorpej pmap_release_pmap_lock(pm); 3158 1.426 skrll kpreempt_enable(); 3159 1.426 skrll 3160 1.373 bouyer error = ENOMEM; 3161 1.373 bouyer goto free_pv; 3162 1.134 thorpej } 3163 1.134 thorpej panic("pmap_enter: failed to allocate L2 bucket"); 3164 1.134 thorpej } 3165 1.262 matt pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; 3166 1.262 matt const pt_entry_t opte = *ptep; 3167 1.262 matt pt_entry_t npte = pa; 3168 1.134 thorpej oflags = 0; 3169 1.88 thorpej 3170 1.134 thorpej if (opte) { 3171 1.134 thorpej /* 3172 1.134 thorpej * There is already a mapping at this address. 3173 1.134 thorpej * If the physical address is different, lookup the 3174 1.134 thorpej * vm_page. 3175 1.134 thorpej */ 3176 1.328 skrll if (l2pte_pa(opte) != pa) { 3177 1.328 skrll KASSERT(!pmap_pv_tracked(pa)); 3178 1.134 thorpej opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3179 1.328 skrll } else 3180 1.134 thorpej opg = pg; 3181 1.134 thorpej } else 3182 1.134 thorpej opg = NULL; 3183 1.88 thorpej 3184 1.328 skrll if (pg || pp) { 3185 1.328 skrll KASSERT((pg != NULL) != (pp != NULL)); 3186 1.328 skrll struct vm_page_md *md = (pg != NULL) ? VM_PAGE_TO_MD(pg) : 3187 1.328 skrll PMAP_PAGE_TO_MD(pp); 3188 1.215 uebayasi 3189 1.423 skrll UVMHIST_LOG(maphist, " pg %#jx pp %#jx pvh_attrs %#jx " 3190 1.423 skrll "nflags %#jx", (uintptr_t)pg, (uintptr_t)pp, 3191 1.423 skrll md->pvh_attrs, nflags); 3192 1.423 skrll 3193 1.134 thorpej /* 3194 1.134 thorpej * This is to be a managed mapping. 3195 1.134 thorpej */ 3196 1.271 matt pmap_acquire_page_lock(md); 3197 1.251 matt if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { 3198 1.134 thorpej /* 3199 1.134 thorpej * - The access type indicates that we don't need 3200 1.134 thorpej * to do referenced emulation. 3201 1.134 thorpej * OR 3202 1.134 thorpej * - The physical page has already been referenced 3203 1.134 thorpej * so no need to re-do referenced emulation here. 3204 1.134 thorpej */ 3205 1.214 jmcneill npte |= l2pte_set_readonly(L2_S_PROTO); 3206 1.88 thorpej 3207 1.134 thorpej nflags |= PVF_REF; 3208 1.88 thorpej 3209 1.134 thorpej if ((prot & VM_PROT_WRITE) != 0 && 3210 1.134 thorpej ((flags & VM_PROT_WRITE) != 0 || 3211 1.215 uebayasi (md->pvh_attrs & PVF_MOD) != 0)) { 3212 1.134 thorpej /* 3213 1.134 thorpej * This is a writable mapping, and the 3214 1.134 thorpej * page's mod state indicates it has 3215 1.134 thorpej * already been modified. Make it 3216 1.134 thorpej * writable from the outset. 3217 1.134 thorpej */ 3218 1.214 jmcneill npte = l2pte_set_writable(npte); 3219 1.134 thorpej nflags |= PVF_MOD; 3220 1.134 thorpej } 3221 1.271 matt 3222 1.271 matt #ifdef ARM_MMU_EXTENDED 3223 1.286 skrll /* 3224 1.271 matt * If the page has been cleaned, then the pvh_attrs 3225 1.271 matt * will have PVF_EXEC set, so mark it execute so we 3226 1.271 matt * don't get an access fault when trying to execute 3227 1.271 matt * from it. 3228 1.271 matt */ 3229 1.271 matt if (md->pvh_attrs & nflags & PVF_EXEC) { 3230 1.271 matt npte &= ~L2_XS_XN; 3231 1.271 matt } 3232 1.271 matt #endif 3233 1.134 thorpej } else { 3234 1.134 thorpej /* 3235 1.134 thorpej * Need to do page referenced emulation. 3236 1.134 thorpej */ 3237 1.134 thorpej npte |= L2_TYPE_INV; 3238 1.134 thorpej } 3239 1.88 thorpej 3240 1.252 macallan if (flags & ARM32_MMAP_WRITECOMBINE) { 3241 1.252 macallan npte |= pte_l2_s_wc_mode; 3242 1.252 macallan } else 3243 1.252 macallan npte |= pte_l2_s_cache_mode; 3244 1.1 matt 3245 1.328 skrll if (pg != NULL && pg == opg) { 3246 1.134 thorpej /* 3247 1.134 thorpej * We're changing the attrs of an existing mapping. 3248 1.134 thorpej */ 3249 1.215 uebayasi oflags = pmap_modify_pv(md, pa, pm, va, 3250 1.134 thorpej PVF_WRITE | PVF_EXEC | PVF_WIRED | 3251 1.134 thorpej PVF_MOD | PVF_REF, nflags); 3252 1.1 matt 3253 1.174 matt #ifdef PMAP_CACHE_VIVT 3254 1.134 thorpej /* 3255 1.134 thorpej * We may need to flush the cache if we're 3256 1.134 thorpej * doing rw-ro... 3257 1.134 thorpej */ 3258 1.134 thorpej if (pm->pm_cstate.cs_cache_d && 3259 1.134 thorpej (oflags & PVF_NC) == 0 && 3260 1.214 jmcneill l2pte_writable_p(opte) && 3261 1.134 thorpej (prot & VM_PROT_WRITE) == 0) 3262 1.134 thorpej cpu_dcache_wb_range(va, PAGE_SIZE); 3263 1.174 matt #endif 3264 1.134 thorpej } else { 3265 1.271 matt struct pv_entry *pv; 3266 1.134 thorpej /* 3267 1.134 thorpej * New mapping, or changing the backing page 3268 1.134 thorpej * of an existing mapping. 3269 1.134 thorpej */ 3270 1.134 thorpej if (opg) { 3271 1.215 uebayasi struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3272 1.215 uebayasi paddr_t opa = VM_PAGE_TO_PHYS(opg); 3273 1.215 uebayasi 3274 1.134 thorpej /* 3275 1.134 thorpej * Replacing an existing mapping with a new one. 3276 1.134 thorpej * It is part of our managed memory so we 3277 1.134 thorpej * must remove it from the PV list 3278 1.134 thorpej */ 3279 1.215 uebayasi pv = pmap_remove_pv(omd, opa, pm, va); 3280 1.215 uebayasi pmap_vac_me_harder(omd, opa, pm, 0); 3281 1.205 uebayasi oflags = pv->pv_flags; 3282 1.1 matt 3283 1.174 matt #ifdef PMAP_CACHE_VIVT 3284 1.134 thorpej /* 3285 1.134 thorpej * If the old mapping was valid (ref/mod 3286 1.134 thorpej * emulation creates 'invalid' mappings 3287 1.134 thorpej * initially) then make sure to frob 3288 1.134 thorpej * the cache. 3289 1.134 thorpej */ 3290 1.266 matt if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { 3291 1.259 matt pmap_cache_wbinv_page(pm, va, true, 3292 1.259 matt oflags); 3293 1.134 thorpej } 3294 1.174 matt #endif 3295 1.277 matt } else { 3296 1.373 bouyer pv = new_pv; 3297 1.373 bouyer new_pv = NULL; 3298 1.277 matt if (pv == NULL) { 3299 1.373 bouyer pmap_release_page_lock(md); 3300 1.277 matt pmap_release_pmap_lock(pm); 3301 1.277 matt if ((flags & PMAP_CANFAIL) == 0) 3302 1.277 matt panic("pmap_enter: " 3303 1.277 matt "no pv entries"); 3304 1.277 matt 3305 1.291 skrll pmap_free_l2_bucket(pm, l2b, 0); 3306 1.277 matt UVMHIST_LOG(maphist, " <-- done (ENOMEM)", 3307 1.277 matt 0, 0, 0, 0); 3308 1.427 skrll kpreempt_enable(); 3309 1.427 skrll 3310 1.387 skrll return ENOMEM; 3311 1.277 matt } 3312 1.134 thorpej } 3313 1.25 rearnsha 3314 1.215 uebayasi pmap_enter_pv(md, pa, pv, pm, va, nflags); 3315 1.25 rearnsha } 3316 1.271 matt pmap_release_page_lock(md); 3317 1.134 thorpej } else { 3318 1.134 thorpej /* 3319 1.134 thorpej * We're mapping an unmanaged page. 3320 1.134 thorpej * These are always readable, and possibly writable, from 3321 1.134 thorpej * the get go as we don't need to track ref/mod status. 3322 1.134 thorpej */ 3323 1.214 jmcneill npte |= l2pte_set_readonly(L2_S_PROTO); 3324 1.134 thorpej if (prot & VM_PROT_WRITE) 3325 1.214 jmcneill npte = l2pte_set_writable(npte); 3326 1.25 rearnsha 3327 1.435 skrll if (efirt_p) { 3328 1.435 skrll if (prot & VM_PROT_EXECUTE) { 3329 1.435 skrll npte &= ~L2_XS_XN; /* and executable */ 3330 1.435 skrll } 3331 1.435 skrll } 3332 1.435 skrll 3333 1.134 thorpej /* 3334 1.134 thorpej * Make sure the vector table is mapped cacheable 3335 1.134 thorpej */ 3336 1.271 matt if ((vector_page_p && !kpm_p) 3337 1.257 matt || (flags & ARM32_MMAP_CACHEABLE)) { 3338 1.134 thorpej npte |= pte_l2_s_cache_mode; 3339 1.271 matt #ifdef ARM_MMU_EXTENDED 3340 1.271 matt npte &= ~L2_XS_XN; /* and executable */ 3341 1.271 matt #endif 3342 1.220 macallan } else if (flags & ARM32_MMAP_WRITECOMBINE) { 3343 1.220 macallan npte |= pte_l2_s_wc_mode; 3344 1.220 macallan } 3345 1.134 thorpej if (opg) { 3346 1.134 thorpej /* 3347 1.134 thorpej * Looks like there's an existing 'managed' mapping 3348 1.134 thorpej * at this address. 3349 1.25 rearnsha */ 3350 1.215 uebayasi struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3351 1.215 uebayasi paddr_t opa = VM_PAGE_TO_PHYS(opg); 3352 1.215 uebayasi 3353 1.271 matt pmap_acquire_page_lock(omd); 3354 1.373 bouyer old_pv = pmap_remove_pv(omd, opa, pm, va); 3355 1.215 uebayasi pmap_vac_me_harder(omd, opa, pm, 0); 3356 1.373 bouyer oflags = old_pv->pv_flags; 3357 1.271 matt pmap_release_page_lock(omd); 3358 1.134 thorpej 3359 1.174 matt #ifdef PMAP_CACHE_VIVT 3360 1.266 matt if (!(oflags & PVF_NC) && l2pte_valid_p(opte)) { 3361 1.259 matt pmap_cache_wbinv_page(pm, va, true, oflags); 3362 1.134 thorpej } 3363 1.174 matt #endif 3364 1.25 rearnsha } 3365 1.25 rearnsha } 3366 1.25 rearnsha 3367 1.134 thorpej /* 3368 1.134 thorpej * Make sure userland mappings get the right permissions 3369 1.134 thorpej */ 3370 1.271 matt if (!vector_page_p && !kpm_p) { 3371 1.134 thorpej npte |= L2_S_PROT_U; 3372 1.271 matt #ifdef ARM_MMU_EXTENDED 3373 1.271 matt npte |= L2_XS_nG; /* user pages are not global */ 3374 1.271 matt #endif 3375 1.257 matt } 3376 1.25 rearnsha 3377 1.134 thorpej /* 3378 1.134 thorpej * Keep the stats up to date 3379 1.134 thorpej */ 3380 1.134 thorpej if (opte == 0) { 3381 1.271 matt l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; 3382 1.134 thorpej pm->pm_stats.resident_count++; 3383 1.286 skrll } 3384 1.1 matt 3385 1.359 pgoyette UVMHIST_LOG(maphist, " opte %#jx npte %#jx", opte, npte, 0, 0); 3386 1.1 matt 3387 1.274 matt #if defined(ARM_MMU_EXTENDED) 3388 1.274 matt /* 3389 1.274 matt * If exec protection was requested but the page hasn't been synced, 3390 1.274 matt * sync it now and allow execution from it. 3391 1.274 matt */ 3392 1.435 skrll 3393 1.274 matt if ((nflags & PVF_EXEC) && (npte & L2_XS_XN)) { 3394 1.274 matt struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3395 1.274 matt npte &= ~L2_XS_XN; 3396 1.274 matt pmap_syncicache_page(md, pa); 3397 1.274 matt PMAPCOUNT(exec_synced_map); 3398 1.274 matt } 3399 1.274 matt #endif 3400 1.1 matt /* 3401 1.134 thorpej * If this is just a wiring change, the two PTEs will be 3402 1.134 thorpej * identical, so there's no need to update the page table. 3403 1.1 matt */ 3404 1.134 thorpej if (npte != opte) { 3405 1.307 skrll l2pte_reset(ptep); 3406 1.307 skrll PTE_SYNC(ptep); 3407 1.310 skrll if (l2pte_valid_p(opte)) { 3408 1.310 skrll pmap_tlb_flush_SE(pm, va, oflags); 3409 1.310 skrll } 3410 1.307 skrll l2pte_set(ptep, npte, 0); 3411 1.237 matt PTE_SYNC(ptep); 3412 1.271 matt #ifndef ARM_MMU_EXTENDED 3413 1.271 matt bool is_cached = pmap_is_cached(pm); 3414 1.134 thorpej if (is_cached) { 3415 1.134 thorpej /* 3416 1.134 thorpej * We only need to frob the cache/tlb if this pmap 3417 1.134 thorpej * is current 3418 1.134 thorpej */ 3419 1.266 matt if (!vector_page_p && l2pte_valid_p(npte)) { 3420 1.25 rearnsha /* 3421 1.134 thorpej * This mapping is likely to be accessed as 3422 1.134 thorpej * soon as we return to userland. Fix up the 3423 1.134 thorpej * L1 entry to avoid taking another 3424 1.134 thorpej * page/domain fault. 3425 1.25 rearnsha */ 3426 1.271 matt pd_entry_t *pdep = pmap_l1_kva(pm) 3427 1.271 matt + l1pte_index(va); 3428 1.271 matt pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa 3429 1.271 matt | L1_C_DOM(pmap_domain(pm)); 3430 1.271 matt if (*pdep != pde) { 3431 1.271 matt l1pte_setone(pdep, pde); 3432 1.322 skrll PDE_SYNC(pdep); 3433 1.12 chris } 3434 1.1 matt } 3435 1.1 matt } 3436 1.134 thorpej 3437 1.359 pgoyette UVMHIST_LOG(maphist, " is_cached %jd cs 0x%08jx", 3438 1.271 matt is_cached, pm->pm_cstate.cs_all, 0, 0); 3439 1.134 thorpej 3440 1.134 thorpej if (pg != NULL) { 3441 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3442 1.215 uebayasi 3443 1.271 matt pmap_acquire_page_lock(md); 3444 1.215 uebayasi pmap_vac_me_harder(md, pa, pm, va); 3445 1.271 matt pmap_release_page_lock(md); 3446 1.1 matt } 3447 1.274 matt #endif 3448 1.1 matt } 3449 1.185 matt #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) 3450 1.188 matt if (pg) { 3451 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3452 1.215 uebayasi 3453 1.271 matt pmap_acquire_page_lock(md); 3454 1.271 matt #ifndef ARM_MMU_EXTENDED 3455 1.271 matt KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3456 1.227 matt #endif 3457 1.251 matt PMAP_VALIDATE_MD_PAGE(md); 3458 1.271 matt pmap_release_page_lock(md); 3459 1.188 matt } 3460 1.183 matt #endif 3461 1.134 thorpej 3462 1.134 thorpej pmap_release_pmap_lock(pm); 3463 1.426 skrll kpreempt_enable(); 3464 1.373 bouyer 3465 1.373 bouyer if (old_pv) 3466 1.373 bouyer pool_put(&pmap_pv_pool, old_pv); 3467 1.373 bouyer free_pv: 3468 1.373 bouyer if (new_pv) 3469 1.373 bouyer pool_put(&pmap_pv_pool, new_pv); 3470 1.426 skrll 3471 1.387 skrll return error; 3472 1.1 matt } 3473 1.1 matt 3474 1.1 matt /* 3475 1.1 matt * pmap_remove() 3476 1.1 matt * 3477 1.1 matt * pmap_remove is responsible for nuking a number of mappings for a range 3478 1.1 matt * of virtual address space in the current pmap. To do this efficiently 3479 1.1 matt * is interesting, because in a number of cases a wide virtual address 3480 1.1 matt * range may be supplied that contains few actual mappings. So, the 3481 1.1 matt * optimisations are: 3482 1.134 thorpej * 1. Skip over hunks of address space for which no L1 or L2 entry exists. 3483 1.1 matt * 2. Build up a list of pages we've hit, up to a maximum, so we can 3484 1.1 matt * maybe do just a partial cache clean. This path of execution is 3485 1.1 matt * complicated by the fact that the cache must be flushed _before_ 3486 1.1 matt * the PTE is nuked, being a VAC :-) 3487 1.134 thorpej * 3. If we're called after UVM calls pmap_remove_all(), we can defer 3488 1.134 thorpej * all invalidations until pmap_update(), since pmap_remove_all() has 3489 1.134 thorpej * already flushed the cache. 3490 1.134 thorpej * 4. Maybe later fast-case a single page, but I don't think this is 3491 1.1 matt * going to make _that_ much difference overall. 3492 1.1 matt */ 3493 1.1 matt 3494 1.134 thorpej #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3495 1.1 matt 3496 1.1 matt void 3497 1.200 rmind pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) 3498 1.1 matt { 3499 1.373 bouyer SLIST_HEAD(,pv_entry) opv_list; 3500 1.373 bouyer struct pv_entry *pv, *npv; 3501 1.406 skrll UVMHIST_FUNC(__func__); 3502 1.406 skrll UVMHIST_CALLARGS(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)", 3503 1.359 pgoyette (uintptr_t)pm, sva, eva, 0); 3504 1.1 matt 3505 1.401 skrll #ifdef PMAP_FAULTINFO 3506 1.401 skrll curpcb->pcb_faultinfo.pfi_faultaddr = 0; 3507 1.401 skrll curpcb->pcb_faultinfo.pfi_repeats = 0; 3508 1.401 skrll curpcb->pcb_faultinfo.pfi_faultptep = NULL; 3509 1.401 skrll #endif 3510 1.401 skrll 3511 1.373 bouyer SLIST_INIT(&opv_list); 3512 1.17 chris /* 3513 1.134 thorpej * we lock in the pmap => pv_head direction 3514 1.17 chris */ 3515 1.426 skrll kpreempt_disable(); 3516 1.134 thorpej pmap_acquire_pmap_lock(pm); 3517 1.134 thorpej 3518 1.348 skrll #ifndef ARM_MMU_EXTENDED 3519 1.348 skrll u_int cleanlist_idx, total, cnt; 3520 1.348 skrll struct { 3521 1.348 skrll vaddr_t va; 3522 1.348 skrll pt_entry_t *ptep; 3523 1.348 skrll } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 3524 1.348 skrll 3525 1.134 thorpej if (pm->pm_remove_all || !pmap_is_cached(pm)) { 3526 1.134 thorpej cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3527 1.134 thorpej if (pm->pm_cstate.cs_tlb == 0) 3528 1.160 thorpej pm->pm_remove_all = true; 3529 1.134 thorpej } else 3530 1.134 thorpej cleanlist_idx = 0; 3531 1.134 thorpej total = 0; 3532 1.348 skrll #endif 3533 1.134 thorpej 3534 1.1 matt while (sva < eva) { 3535 1.134 thorpej /* 3536 1.134 thorpej * Do one L2 bucket's worth at a time. 3537 1.134 thorpej */ 3538 1.348 skrll vaddr_t next_bucket = L2_NEXT_BUCKET_VA(sva); 3539 1.134 thorpej if (next_bucket > eva) 3540 1.134 thorpej next_bucket = eva; 3541 1.134 thorpej 3542 1.262 matt struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva); 3543 1.134 thorpej if (l2b == NULL) { 3544 1.134 thorpej sva = next_bucket; 3545 1.134 thorpej continue; 3546 1.134 thorpej } 3547 1.134 thorpej 3548 1.262 matt pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3549 1.348 skrll u_int mappings = 0; 3550 1.134 thorpej 3551 1.348 skrll for (;sva < next_bucket; 3552 1.262 matt sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) { 3553 1.262 matt pt_entry_t opte = *ptep; 3554 1.134 thorpej 3555 1.262 matt if (opte == 0) { 3556 1.156 scw /* Nothing here, move along */ 3557 1.1 matt continue; 3558 1.1 matt } 3559 1.1 matt 3560 1.259 matt u_int flags = PVF_REF; 3561 1.262 matt paddr_t pa = l2pte_pa(opte); 3562 1.262 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 3563 1.1 matt 3564 1.1 matt /* 3565 1.134 thorpej * Update flags. In a number of circumstances, 3566 1.134 thorpej * we could cluster a lot of these and do a 3567 1.134 thorpej * number of sequential pages in one go. 3568 1.1 matt */ 3569 1.262 matt if (pg != NULL) { 3570 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3571 1.215 uebayasi 3572 1.271 matt pmap_acquire_page_lock(md); 3573 1.215 uebayasi pv = pmap_remove_pv(md, pa, pm, sva); 3574 1.215 uebayasi pmap_vac_me_harder(md, pa, pm, 0); 3575 1.271 matt pmap_release_page_lock(md); 3576 1.205 uebayasi if (pv != NULL) { 3577 1.261 matt if (pm->pm_remove_all == false) { 3578 1.261 matt flags = pv->pv_flags; 3579 1.261 matt } 3580 1.373 bouyer SLIST_INSERT_HEAD(&opv_list, 3581 1.373 bouyer pv, pv_link); 3582 1.134 thorpej } 3583 1.134 thorpej } 3584 1.271 matt mappings += PAGE_SIZE / L2_S_SIZE; 3585 1.156 scw 3586 1.266 matt if (!l2pte_valid_p(opte)) { 3587 1.156 scw /* 3588 1.156 scw * Ref/Mod emulation is still active for this 3589 1.156 scw * mapping, therefore it is has not yet been 3590 1.156 scw * accessed. No need to frob the cache/tlb. 3591 1.156 scw */ 3592 1.262 matt l2pte_reset(ptep); 3593 1.134 thorpej PTE_SYNC_CURRENT(pm, ptep); 3594 1.134 thorpej continue; 3595 1.134 thorpej } 3596 1.1 matt 3597 1.271 matt #ifdef ARM_MMU_EXTENDED 3598 1.348 skrll l2pte_reset(ptep); 3599 1.348 skrll PTE_SYNC(ptep); 3600 1.348 skrll if (__predict_false(pm->pm_remove_all == false)) { 3601 1.348 skrll pmap_tlb_flush_SE(pm, sva, flags); 3602 1.271 matt } 3603 1.348 skrll #else 3604 1.1 matt if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3605 1.1 matt /* Add to the clean list. */ 3606 1.174 matt cleanlist[cleanlist_idx].ptep = ptep; 3607 1.134 thorpej cleanlist[cleanlist_idx].va = 3608 1.259 matt sva | (flags & PVF_EXEC); 3609 1.1 matt cleanlist_idx++; 3610 1.271 matt } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3611 1.1 matt /* Nuke everything if needed. */ 3612 1.174 matt #ifdef PMAP_CACHE_VIVT 3613 1.259 matt pmap_cache_wbinv_all(pm, PVF_EXEC); 3614 1.174 matt #endif 3615 1.1 matt /* 3616 1.1 matt * Roll back the previous PTE list, 3617 1.1 matt * and zero out the current PTE. 3618 1.1 matt */ 3619 1.113 thorpej for (cnt = 0; 3620 1.134 thorpej cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { 3621 1.262 matt l2pte_reset(cleanlist[cnt].ptep); 3622 1.181 scw PTE_SYNC(cleanlist[cnt].ptep); 3623 1.1 matt } 3624 1.262 matt l2pte_reset(ptep); 3625 1.134 thorpej PTE_SYNC(ptep); 3626 1.1 matt cleanlist_idx++; 3627 1.160 thorpej pm->pm_remove_all = true; 3628 1.1 matt } else { 3629 1.262 matt l2pte_reset(ptep); 3630 1.134 thorpej PTE_SYNC(ptep); 3631 1.160 thorpej if (pm->pm_remove_all == false) { 3632 1.259 matt pmap_tlb_flush_SE(pm, sva, flags); 3633 1.134 thorpej } 3634 1.134 thorpej } 3635 1.348 skrll #endif 3636 1.134 thorpej } 3637 1.134 thorpej 3638 1.348 skrll #ifndef ARM_MMU_EXTENDED 3639 1.134 thorpej /* 3640 1.134 thorpej * Deal with any left overs 3641 1.134 thorpej */ 3642 1.134 thorpej if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { 3643 1.134 thorpej total += cleanlist_idx; 3644 1.134 thorpej for (cnt = 0; cnt < cleanlist_idx; cnt++) { 3645 1.307 skrll l2pte_reset(cleanlist[cnt].ptep); 3646 1.307 skrll PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); 3647 1.259 matt vaddr_t va = cleanlist[cnt].va; 3648 1.134 thorpej if (pm->pm_cstate.cs_all != 0) { 3649 1.259 matt vaddr_t clva = va & ~PAGE_MASK; 3650 1.259 matt u_int flags = va & PVF_EXEC; 3651 1.174 matt #ifdef PMAP_CACHE_VIVT 3652 1.259 matt pmap_cache_wbinv_page(pm, clva, true, 3653 1.259 matt PVF_REF | PVF_WRITE | flags); 3654 1.174 matt #endif 3655 1.259 matt pmap_tlb_flush_SE(pm, clva, 3656 1.259 matt PVF_REF | flags); 3657 1.134 thorpej } 3658 1.1 matt } 3659 1.1 matt 3660 1.1 matt /* 3661 1.134 thorpej * If it looks like we're removing a whole bunch 3662 1.134 thorpej * of mappings, it's faster to just write-back 3663 1.134 thorpej * the whole cache now and defer TLB flushes until 3664 1.134 thorpej * pmap_update() is called. 3665 1.1 matt */ 3666 1.134 thorpej if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) 3667 1.134 thorpej cleanlist_idx = 0; 3668 1.134 thorpej else { 3669 1.134 thorpej cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3670 1.174 matt #ifdef PMAP_CACHE_VIVT 3671 1.259 matt pmap_cache_wbinv_all(pm, PVF_EXEC); 3672 1.174 matt #endif 3673 1.160 thorpej pm->pm_remove_all = true; 3674 1.134 thorpej } 3675 1.134 thorpej } 3676 1.348 skrll #endif /* ARM_MMU_EXTENDED */ 3677 1.290 skrll 3678 1.290 skrll pmap_free_l2_bucket(pm, l2b, mappings); 3679 1.288 matt pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE); 3680 1.134 thorpej } 3681 1.134 thorpej 3682 1.134 thorpej pmap_release_pmap_lock(pm); 3683 1.426 skrll kpreempt_enable(); 3684 1.426 skrll 3685 1.373 bouyer SLIST_FOREACH_SAFE(pv, &opv_list, pv_link, npv) { 3686 1.373 bouyer pool_put(&pmap_pv_pool, pv); 3687 1.373 bouyer } 3688 1.134 thorpej } 3689 1.134 thorpej 3690 1.358 flxd #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3691 1.182 matt static struct pv_entry * 3692 1.182 matt pmap_kremove_pg(struct vm_page *pg, vaddr_t va) 3693 1.182 matt { 3694 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3695 1.215 uebayasi paddr_t pa = VM_PAGE_TO_PHYS(pg); 3696 1.182 matt struct pv_entry *pv; 3697 1.182 matt 3698 1.215 uebayasi KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); 3699 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); 3700 1.271 matt KASSERT(pmap_page_locked_p(md)); 3701 1.182 matt 3702 1.215 uebayasi pv = pmap_remove_pv(md, pa, pmap_kernel(), va); 3703 1.271 matt KASSERTMSG(pv, "pg %p (pa #%lx) va %#lx", pg, pa, va); 3704 1.268 matt KASSERT(PV_IS_KENTRY_P(pv->pv_flags)); 3705 1.182 matt 3706 1.182 matt /* 3707 1.375 skrll * We are removing a writeable mapping to a cached exec page, if 3708 1.375 skrll * it's the last mapping then clear its execness otherwise sync 3709 1.182 matt * the page to the icache. 3710 1.182 matt */ 3711 1.215 uebayasi if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC 3712 1.182 matt && (pv->pv_flags & PVF_WRITE) != 0) { 3713 1.215 uebayasi if (SLIST_EMPTY(&md->pvh_list)) { 3714 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 3715 1.182 matt PMAPCOUNT(exec_discarded_kremove); 3716 1.182 matt } else { 3717 1.215 uebayasi pmap_syncicache_page(md, pa); 3718 1.182 matt PMAPCOUNT(exec_synced_kremove); 3719 1.182 matt } 3720 1.182 matt } 3721 1.215 uebayasi pmap_vac_me_harder(md, pa, pmap_kernel(), 0); 3722 1.182 matt 3723 1.182 matt return pv; 3724 1.182 matt } 3725 1.358 flxd #endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */ 3726 1.182 matt 3727 1.134 thorpej /* 3728 1.134 thorpej * pmap_kenter_pa: enter an unmanaged, wired kernel mapping 3729 1.134 thorpej * 3730 1.134 thorpej * We assume there is already sufficient KVM space available 3731 1.134 thorpej * to do this, as we can't allocate L2 descriptor tables/metadata 3732 1.134 thorpej * from here. 3733 1.134 thorpej */ 3734 1.134 thorpej void 3735 1.201 cegger pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 3736 1.134 thorpej { 3737 1.358 flxd #ifdef PMAP_CACHE_VIVT 3738 1.358 flxd struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; 3739 1.358 flxd #endif 3740 1.358 flxd #ifdef PMAP_CACHE_VIPT 3741 1.174 matt struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3742 1.174 matt struct vm_page *opg; 3743 1.271 matt #ifndef ARM_MMU_EXTENDED 3744 1.182 matt struct pv_entry *pv = NULL; 3745 1.174 matt #endif 3746 1.358 flxd #endif 3747 1.277 matt struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; 3748 1.174 matt 3749 1.271 matt UVMHIST_FUNC(__func__); 3750 1.271 matt 3751 1.271 matt if (pmap_initialized) { 3752 1.406 skrll UVMHIST_CALLARGS(maphist, 3753 1.406 skrll "va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", va, pa, prot, 3754 1.406 skrll flags); 3755 1.271 matt } 3756 1.134 thorpej 3757 1.426 skrll kpreempt_disable(); 3758 1.271 matt pmap_t kpm = pmap_kernel(); 3759 1.320 matt pmap_acquire_pmap_lock(kpm); 3760 1.271 matt struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3761 1.271 matt const size_t l1slot __diagused = l1pte_index(va); 3762 1.271 matt KASSERTMSG(l2b != NULL, 3763 1.271 matt "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", 3764 1.271 matt va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], 3765 1.271 matt kpm->pm_l2[L2_IDX(l1slot)] 3766 1.271 matt ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] 3767 1.271 matt : NULL, 3768 1.271 matt kpm->pm_l2[L2_IDX(l1slot)] 3769 1.271 matt ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva 3770 1.286 skrll : NULL); 3771 1.271 matt KASSERT(l2b->l2b_kva != NULL); 3772 1.134 thorpej 3773 1.262 matt pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 3774 1.262 matt const pt_entry_t opte = *ptep; 3775 1.134 thorpej 3776 1.174 matt if (opte == 0) { 3777 1.174 matt PMAPCOUNT(kenter_mappings); 3778 1.271 matt l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE; 3779 1.174 matt } else { 3780 1.174 matt PMAPCOUNT(kenter_remappings); 3781 1.358 flxd #ifdef PMAP_CACHE_VIPT 3782 1.174 matt opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3783 1.432 riastrad struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3784 1.358 flxd if (opg && arm_cache_prefer_mask != 0) { 3785 1.174 matt KASSERT(opg != pg); 3786 1.215 uebayasi KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); 3787 1.213 cegger KASSERT((flags & PMAP_KMPAGE) == 0); 3788 1.271 matt #ifndef ARM_MMU_EXTENDED 3789 1.277 matt pmap_acquire_page_lock(omd); 3790 1.182 matt pv = pmap_kremove_pg(opg, va); 3791 1.277 matt pmap_release_page_lock(omd); 3792 1.271 matt #endif 3793 1.174 matt } 3794 1.358 flxd #endif 3795 1.266 matt if (l2pte_valid_p(opte)) { 3796 1.307 skrll l2pte_reset(ptep); 3797 1.307 skrll PTE_SYNC(ptep); 3798 1.174 matt #ifdef PMAP_CACHE_VIVT 3799 1.174 matt cpu_dcache_wbinv_range(va, PAGE_SIZE); 3800 1.174 matt #endif 3801 1.174 matt cpu_tlb_flushD_SE(va); 3802 1.174 matt cpu_cpwait(); 3803 1.174 matt } 3804 1.174 matt } 3805 1.320 matt pmap_release_pmap_lock(kpm); 3806 1.364 skrll pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot); 3807 1.134 thorpej 3808 1.364 skrll if (flags & PMAP_PTE) { 3809 1.364 skrll KASSERT((flags & PMAP_CACHE_MASK) == 0); 3810 1.364 skrll if (!(flags & PMAP_NOCACHE)) 3811 1.364 skrll npte |= pte_l2_s_cache_mode_pt; 3812 1.364 skrll } else { 3813 1.388 skrll switch (flags & (PMAP_CACHE_MASK | PMAP_DEV_MASK)) { 3814 1.388 skrll case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: 3815 1.388 skrll break; 3816 1.364 skrll case PMAP_NOCACHE: 3817 1.388 skrll npte |= pte_l2_s_nocache_mode; 3818 1.364 skrll break; 3819 1.364 skrll case PMAP_WRITE_COMBINE: 3820 1.364 skrll npte |= pte_l2_s_wc_mode; 3821 1.364 skrll break; 3822 1.364 skrll default: 3823 1.364 skrll npte |= pte_l2_s_cache_mode; 3824 1.364 skrll break; 3825 1.364 skrll } 3826 1.364 skrll } 3827 1.271 matt #ifdef ARM_MMU_EXTENDED 3828 1.271 matt if (prot & VM_PROT_EXECUTE) 3829 1.271 matt npte &= ~L2_XS_XN; 3830 1.271 matt #endif 3831 1.307 skrll l2pte_set(ptep, npte, 0); 3832 1.134 thorpej PTE_SYNC(ptep); 3833 1.174 matt 3834 1.174 matt if (pg) { 3835 1.213 cegger if (flags & PMAP_KMPAGE) { 3836 1.215 uebayasi KASSERT(md->urw_mappings == 0); 3837 1.215 uebayasi KASSERT(md->uro_mappings == 0); 3838 1.215 uebayasi KASSERT(md->krw_mappings == 0); 3839 1.215 uebayasi KASSERT(md->kro_mappings == 0); 3840 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3841 1.186 matt KASSERT(pv == NULL); 3842 1.207 uebayasi KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); 3843 1.215 uebayasi KASSERT((md->pvh_attrs & PVF_NC) == 0); 3844 1.182 matt /* if there is a color conflict, evict from cache. */ 3845 1.215 uebayasi if (pmap_is_page_colored_p(md) 3846 1.215 uebayasi && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { 3847 1.183 matt PMAPCOUNT(vac_color_change); 3848 1.215 uebayasi pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 3849 1.215 uebayasi } else if (md->pvh_attrs & PVF_MULTCLR) { 3850 1.195 matt /* 3851 1.195 matt * If this page has multiple colors, expunge 3852 1.195 matt * them. 3853 1.195 matt */ 3854 1.195 matt PMAPCOUNT(vac_flush_lots2); 3855 1.215 uebayasi pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); 3856 1.183 matt } 3857 1.278 matt /* 3858 1.278 matt * Since this is a KMPAGE, there can be no contention 3859 1.278 matt * for this page so don't lock it. 3860 1.278 matt */ 3861 1.215 uebayasi md->pvh_attrs &= PAGE_SIZE - 1; 3862 1.271 matt md->pvh_attrs |= PVF_KMPAGE | PVF_COLORED | PVF_DIRTY 3863 1.183 matt | (va & arm_cache_prefer_mask); 3864 1.271 matt #else /* !PMAP_CACHE_VIPT || ARM_MMU_EXTENDED */ 3865 1.215 uebayasi md->pvh_attrs |= PVF_KMPAGE; 3866 1.186 matt #endif 3867 1.278 matt atomic_inc_32(&pmap_kmpages); 3868 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3869 1.358 flxd } else if (arm_cache_prefer_mask != 0) { 3870 1.182 matt if (pv == NULL) { 3871 1.182 matt pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 3872 1.182 matt KASSERT(pv != NULL); 3873 1.182 matt } 3874 1.271 matt pmap_acquire_page_lock(md); 3875 1.215 uebayasi pmap_enter_pv(md, pa, pv, pmap_kernel(), va, 3876 1.182 matt PVF_WIRED | PVF_KENTRY 3877 1.183 matt | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); 3878 1.183 matt if ((prot & VM_PROT_WRITE) 3879 1.215 uebayasi && !(md->pvh_attrs & PVF_NC)) 3880 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 3881 1.215 uebayasi KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3882 1.215 uebayasi pmap_vac_me_harder(md, pa, pmap_kernel(), va); 3883 1.271 matt pmap_release_page_lock(md); 3884 1.186 matt #endif 3885 1.179 matt } 3886 1.358 flxd #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3887 1.182 matt } else { 3888 1.182 matt if (pv != NULL) 3889 1.182 matt pool_put(&pmap_pv_pool, pv); 3890 1.186 matt #endif 3891 1.174 matt } 3892 1.426 skrll kpreempt_enable(); 3893 1.426 skrll 3894 1.271 matt if (pmap_initialized) { 3895 1.359 pgoyette UVMHIST_LOG(maphist, " <-- done (ptep %#jx: %#jx -> %#jx)", 3896 1.359 pgoyette (uintptr_t)ptep, opte, npte, 0); 3897 1.271 matt } 3898 1.277 matt 3899 1.134 thorpej } 3900 1.134 thorpej 3901 1.134 thorpej void 3902 1.134 thorpej pmap_kremove(vaddr_t va, vsize_t len) 3903 1.134 thorpej { 3904 1.271 matt #ifdef UVMHIST 3905 1.271 matt u_int total_mappings = 0; 3906 1.271 matt #endif 3907 1.174 matt 3908 1.174 matt PMAPCOUNT(kenter_unmappings); 3909 1.134 thorpej 3910 1.406 skrll UVMHIST_FUNC(__func__); 3911 1.406 skrll UVMHIST_CALLARGS(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0); 3912 1.271 matt 3913 1.271 matt const vaddr_t eva = va + len; 3914 1.373 bouyer pmap_t kpm = pmap_kernel(); 3915 1.134 thorpej 3916 1.426 skrll kpreempt_disable(); 3917 1.373 bouyer pmap_acquire_pmap_lock(kpm); 3918 1.320 matt 3919 1.134 thorpej while (va < eva) { 3920 1.271 matt vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); 3921 1.134 thorpej if (next_bucket > eva) 3922 1.134 thorpej next_bucket = eva; 3923 1.134 thorpej 3924 1.307 skrll struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3925 1.134 thorpej KDASSERT(l2b != NULL); 3926 1.134 thorpej 3927 1.262 matt pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; 3928 1.262 matt pt_entry_t *ptep = sptep; 3929 1.271 matt u_int mappings = 0; 3930 1.134 thorpej 3931 1.134 thorpej while (va < next_bucket) { 3932 1.262 matt const pt_entry_t opte = *ptep; 3933 1.262 matt struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3934 1.262 matt if (opg != NULL) { 3935 1.215 uebayasi struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3936 1.215 uebayasi 3937 1.215 uebayasi if (omd->pvh_attrs & PVF_KMPAGE) { 3938 1.215 uebayasi KASSERT(omd->urw_mappings == 0); 3939 1.215 uebayasi KASSERT(omd->uro_mappings == 0); 3940 1.215 uebayasi KASSERT(omd->krw_mappings == 0); 3941 1.215 uebayasi KASSERT(omd->kro_mappings == 0); 3942 1.215 uebayasi omd->pvh_attrs &= ~PVF_KMPAGE; 3943 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3944 1.251 matt if (arm_cache_prefer_mask != 0) { 3945 1.251 matt omd->pvh_attrs &= ~PVF_WRITE; 3946 1.251 matt } 3947 1.186 matt #endif 3948 1.278 matt atomic_dec_32(&pmap_kmpages); 3949 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3950 1.358 flxd } else if (arm_cache_prefer_mask != 0) { 3951 1.278 matt pmap_acquire_page_lock(omd); 3952 1.182 matt pool_put(&pmap_pv_pool, 3953 1.182 matt pmap_kremove_pg(opg, va)); 3954 1.278 matt pmap_release_page_lock(omd); 3955 1.186 matt #endif 3956 1.179 matt } 3957 1.174 matt } 3958 1.266 matt if (l2pte_valid_p(opte)) { 3959 1.307 skrll l2pte_reset(ptep); 3960 1.307 skrll PTE_SYNC(ptep); 3961 1.174 matt #ifdef PMAP_CACHE_VIVT 3962 1.134 thorpej cpu_dcache_wbinv_range(va, PAGE_SIZE); 3963 1.174 matt #endif 3964 1.134 thorpej cpu_tlb_flushD_SE(va); 3965 1.307 skrll 3966 1.271 matt mappings += PAGE_SIZE / L2_S_SIZE; 3967 1.134 thorpej } 3968 1.134 thorpej va += PAGE_SIZE; 3969 1.262 matt ptep += PAGE_SIZE / L2_S_SIZE; 3970 1.134 thorpej } 3971 1.287 matt KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", 3972 1.287 matt mappings, l2b->l2b_occupancy); 3973 1.134 thorpej l2b->l2b_occupancy -= mappings; 3974 1.307 skrll //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 3975 1.271 matt #ifdef UVMHIST 3976 1.271 matt total_mappings += mappings; 3977 1.271 matt #endif 3978 1.134 thorpej } 3979 1.373 bouyer pmap_release_pmap_lock(kpm); 3980 1.134 thorpej cpu_cpwait(); 3981 1.426 skrll kpreempt_enable(); 3982 1.426 skrll 3983 1.359 pgoyette UVMHIST_LOG(maphist, " <--- done (%ju mappings removed)", 3984 1.271 matt total_mappings, 0, 0, 0); 3985 1.134 thorpej } 3986 1.134 thorpej 3987 1.159 thorpej bool 3988 1.134 thorpej pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 3989 1.134 thorpej { 3990 1.365 ryo 3991 1.365 ryo return pmap_extract_coherency(pm, va, pap, NULL); 3992 1.365 ryo } 3993 1.365 ryo 3994 1.365 ryo bool 3995 1.365 ryo pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp) 3996 1.365 ryo { 3997 1.134 thorpej struct l2_dtable *l2; 3998 1.271 matt pd_entry_t *pdep, pde; 3999 1.134 thorpej pt_entry_t *ptep, pte; 4000 1.134 thorpej paddr_t pa; 4001 1.271 matt u_int l1slot; 4002 1.365 ryo bool coherent; 4003 1.134 thorpej 4004 1.426 skrll kpreempt_disable(); 4005 1.134 thorpej pmap_acquire_pmap_lock(pm); 4006 1.134 thorpej 4007 1.271 matt l1slot = l1pte_index(va); 4008 1.271 matt pdep = pmap_l1_kva(pm) + l1slot; 4009 1.271 matt pde = *pdep; 4010 1.134 thorpej 4011 1.271 matt if (l1pte_section_p(pde)) { 4012 1.134 thorpej /* 4013 1.134 thorpej * These should only happen for pmap_kernel() 4014 1.134 thorpej */ 4015 1.134 thorpej KDASSERT(pm == pmap_kernel()); 4016 1.134 thorpej pmap_release_pmap_lock(pm); 4017 1.428 skrll kpreempt_enable(); 4018 1.428 skrll 4019 1.235 matt #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 4020 1.271 matt if (l1pte_supersection_p(pde)) { 4021 1.271 matt pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET); 4022 1.235 matt } else 4023 1.235 matt #endif 4024 1.271 matt pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); 4025 1.365 ryo coherent = (pde & L1_S_CACHE_MASK) == 0; 4026 1.134 thorpej } else { 4027 1.134 thorpej /* 4028 1.134 thorpej * Note that we can't rely on the validity of the L1 4029 1.134 thorpej * descriptor as an indication that a mapping exists. 4030 1.134 thorpej * We have to look it up in the L2 dtable. 4031 1.134 thorpej */ 4032 1.271 matt l2 = pm->pm_l2[L2_IDX(l1slot)]; 4033 1.134 thorpej 4034 1.134 thorpej if (l2 == NULL || 4035 1.271 matt (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { 4036 1.134 thorpej pmap_release_pmap_lock(pm); 4037 1.426 skrll kpreempt_enable(); 4038 1.426 skrll 4039 1.174 matt return false; 4040 1.134 thorpej } 4041 1.134 thorpej 4042 1.283 matt pte = ptep[l2pte_index(va)]; 4043 1.134 thorpej pmap_release_pmap_lock(pm); 4044 1.426 skrll kpreempt_enable(); 4045 1.134 thorpej 4046 1.134 thorpej if (pte == 0) 4047 1.174 matt return false; 4048 1.134 thorpej 4049 1.134 thorpej switch (pte & L2_TYPE_MASK) { 4050 1.134 thorpej case L2_TYPE_L: 4051 1.134 thorpej pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 4052 1.365 ryo coherent = (pte & L2_L_CACHE_MASK) == 0; 4053 1.134 thorpej break; 4054 1.134 thorpej 4055 1.134 thorpej default: 4056 1.283 matt pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK); 4057 1.365 ryo coherent = (pte & L2_S_CACHE_MASK) == 0; 4058 1.134 thorpej break; 4059 1.134 thorpej } 4060 1.134 thorpej } 4061 1.134 thorpej 4062 1.134 thorpej if (pap != NULL) 4063 1.134 thorpej *pap = pa; 4064 1.134 thorpej 4065 1.365 ryo if (coherentp != NULL) 4066 1.365 ryo *coherentp = (pm == pmap_kernel() && coherent); 4067 1.365 ryo 4068 1.174 matt return true; 4069 1.134 thorpej } 4070 1.134 thorpej 4071 1.328 skrll /* 4072 1.328 skrll * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps 4073 1.328 skrll * that map it 4074 1.328 skrll */ 4075 1.328 skrll 4076 1.328 skrll static void 4077 1.328 skrll pmap_pv_remove(paddr_t pa) 4078 1.328 skrll { 4079 1.328 skrll struct pmap_page *pp; 4080 1.328 skrll 4081 1.426 skrll KASSERT(kpreempt_disabled()); 4082 1.328 skrll pp = pmap_pv_tracked(pa); 4083 1.328 skrll if (pp == NULL) 4084 1.328 skrll panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR, 4085 1.328 skrll pa); 4086 1.328 skrll 4087 1.328 skrll struct vm_page_md *md = PMAP_PAGE_TO_MD(pp); 4088 1.328 skrll pmap_page_remove(md, pa); 4089 1.328 skrll } 4090 1.328 skrll 4091 1.328 skrll void 4092 1.328 skrll pmap_pv_protect(paddr_t pa, vm_prot_t prot) 4093 1.328 skrll { 4094 1.328 skrll 4095 1.328 skrll /* the only case is remove at the moment */ 4096 1.328 skrll KASSERT(prot == VM_PROT_NONE); 4097 1.328 skrll pmap_pv_remove(pa); 4098 1.328 skrll } 4099 1.328 skrll 4100 1.134 thorpej void 4101 1.134 thorpej pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 4102 1.134 thorpej { 4103 1.134 thorpej struct l2_bucket *l2b; 4104 1.134 thorpej vaddr_t next_bucket; 4105 1.134 thorpej 4106 1.408 skrll UVMHIST_FUNC(__func__); 4107 1.408 skrll UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx prot %#jx", 4108 1.408 skrll (uintptr_t)pm, sva, eva, prot); 4109 1.134 thorpej 4110 1.134 thorpej if ((prot & VM_PROT_READ) == 0) { 4111 1.134 thorpej pmap_remove(pm, sva, eva); 4112 1.134 thorpej return; 4113 1.134 thorpej } 4114 1.134 thorpej 4115 1.134 thorpej if (prot & VM_PROT_WRITE) { 4116 1.134 thorpej /* 4117 1.134 thorpej * If this is a read->write transition, just ignore it and let 4118 1.134 thorpej * uvm_fault() take care of it later. 4119 1.134 thorpej */ 4120 1.134 thorpej return; 4121 1.134 thorpej } 4122 1.134 thorpej 4123 1.426 skrll kpreempt_disable(); 4124 1.134 thorpej pmap_acquire_pmap_lock(pm); 4125 1.134 thorpej 4126 1.307 skrll #ifndef ARM_MMU_EXTENDED 4127 1.262 matt const bool flush = eva - sva >= PAGE_SIZE * 4; 4128 1.307 skrll u_int flags = 0; 4129 1.307 skrll #endif 4130 1.262 matt u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); 4131 1.134 thorpej 4132 1.134 thorpej while (sva < eva) { 4133 1.271 matt next_bucket = L2_NEXT_BUCKET_VA(sva); 4134 1.134 thorpej if (next_bucket > eva) 4135 1.134 thorpej next_bucket = eva; 4136 1.134 thorpej 4137 1.134 thorpej l2b = pmap_get_l2_bucket(pm, sva); 4138 1.134 thorpej if (l2b == NULL) { 4139 1.134 thorpej sva = next_bucket; 4140 1.134 thorpej continue; 4141 1.134 thorpej } 4142 1.134 thorpej 4143 1.271 matt pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)]; 4144 1.134 thorpej 4145 1.134 thorpej while (sva < next_bucket) { 4146 1.271 matt const pt_entry_t opte = *ptep; 4147 1.271 matt if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) { 4148 1.134 thorpej struct vm_page *pg; 4149 1.307 skrll #ifndef ARM_MMU_EXTENDED 4150 1.134 thorpej u_int f; 4151 1.307 skrll #endif 4152 1.134 thorpej 4153 1.174 matt #ifdef PMAP_CACHE_VIVT 4154 1.174 matt /* 4155 1.174 matt * OK, at this point, we know we're doing 4156 1.174 matt * write-protect operation. If the pmap is 4157 1.174 matt * active, write-back the page. 4158 1.174 matt */ 4159 1.264 kiyohara pmap_cache_wbinv_page(pm, sva, false, 4160 1.264 kiyohara PVF_REF | PVF_WRITE); 4161 1.174 matt #endif 4162 1.174 matt 4163 1.271 matt pg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 4164 1.271 matt pt_entry_t npte = l2pte_set_readonly(opte); 4165 1.307 skrll l2pte_reset(ptep); 4166 1.307 skrll PTE_SYNC(ptep); 4167 1.307 skrll #ifdef ARM_MMU_EXTENDED 4168 1.307 skrll pmap_tlb_flush_SE(pm, sva, PVF_REF); 4169 1.307 skrll #endif 4170 1.307 skrll l2pte_set(ptep, npte, 0); 4171 1.134 thorpej PTE_SYNC(ptep); 4172 1.134 thorpej 4173 1.134 thorpej if (pg != NULL) { 4174 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4175 1.215 uebayasi paddr_t pa = VM_PAGE_TO_PHYS(pg); 4176 1.215 uebayasi 4177 1.271 matt pmap_acquire_page_lock(md); 4178 1.307 skrll #ifndef ARM_MMU_EXTENDED 4179 1.327 skrll f = 4180 1.307 skrll #endif 4181 1.307 skrll pmap_modify_pv(md, pa, pm, sva, 4182 1.307 skrll clr_mask, 0); 4183 1.215 uebayasi pmap_vac_me_harder(md, pa, pm, sva); 4184 1.271 matt pmap_release_page_lock(md); 4185 1.307 skrll #ifndef ARM_MMU_EXTENDED 4186 1.226 matt } else { 4187 1.134 thorpej f = PVF_REF | PVF_EXEC; 4188 1.226 matt } 4189 1.134 thorpej 4190 1.262 matt if (flush) { 4191 1.134 thorpej flags |= f; 4192 1.259 matt } else { 4193 1.259 matt pmap_tlb_flush_SE(pm, sva, f); 4194 1.307 skrll #endif 4195 1.259 matt } 4196 1.1 matt } 4197 1.134 thorpej 4198 1.134 thorpej sva += PAGE_SIZE; 4199 1.271 matt ptep += PAGE_SIZE / L2_S_SIZE; 4200 1.134 thorpej } 4201 1.1 matt } 4202 1.1 matt 4203 1.307 skrll #ifndef ARM_MMU_EXTENDED 4204 1.134 thorpej if (flush) { 4205 1.262 matt if (PV_BEEN_EXECD(flags)) { 4206 1.134 thorpej pmap_tlb_flushID(pm); 4207 1.262 matt } else if (PV_BEEN_REFD(flags)) { 4208 1.134 thorpej pmap_tlb_flushD(pm); 4209 1.262 matt } 4210 1.134 thorpej } 4211 1.307 skrll #endif 4212 1.262 matt 4213 1.262 matt pmap_release_pmap_lock(pm); 4214 1.426 skrll kpreempt_enable(); 4215 1.134 thorpej } 4216 1.134 thorpej 4217 1.134 thorpej void 4218 1.174 matt pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) 4219 1.174 matt { 4220 1.174 matt struct l2_bucket *l2b; 4221 1.174 matt pt_entry_t *ptep; 4222 1.174 matt vaddr_t next_bucket; 4223 1.174 matt vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; 4224 1.174 matt 4225 1.408 skrll UVMHIST_FUNC(__func__); 4226 1.408 skrll UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx", 4227 1.408 skrll (uintptr_t)pm, sva, eva, 0); 4228 1.174 matt 4229 1.174 matt pmap_acquire_pmap_lock(pm); 4230 1.174 matt 4231 1.174 matt while (sva < eva) { 4232 1.271 matt next_bucket = L2_NEXT_BUCKET_VA(sva); 4233 1.174 matt if (next_bucket > eva) 4234 1.174 matt next_bucket = eva; 4235 1.174 matt 4236 1.174 matt l2b = pmap_get_l2_bucket(pm, sva); 4237 1.174 matt if (l2b == NULL) { 4238 1.174 matt sva = next_bucket; 4239 1.174 matt continue; 4240 1.174 matt } 4241 1.174 matt 4242 1.174 matt for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; 4243 1.174 matt sva < next_bucket; 4244 1.271 matt sva += page_size, 4245 1.271 matt ptep += PAGE_SIZE / L2_S_SIZE, 4246 1.271 matt page_size = PAGE_SIZE) { 4247 1.266 matt if (l2pte_valid_p(*ptep)) { 4248 1.174 matt cpu_icache_sync_range(sva, 4249 1.368 riastrad uimin(page_size, eva - sva)); 4250 1.174 matt } 4251 1.174 matt } 4252 1.174 matt } 4253 1.174 matt 4254 1.174 matt pmap_release_pmap_lock(pm); 4255 1.174 matt } 4256 1.174 matt 4257 1.174 matt void 4258 1.134 thorpej pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 4259 1.134 thorpej { 4260 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4261 1.215 uebayasi paddr_t pa = VM_PAGE_TO_PHYS(pg); 4262 1.134 thorpej 4263 1.408 skrll UVMHIST_FUNC(__func__); 4264 1.408 skrll UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx prot %#jx", 4265 1.408 skrll (uintptr_t)md, pa, prot, 0); 4266 1.134 thorpej 4267 1.134 thorpej switch(prot) { 4268 1.174 matt case VM_PROT_READ|VM_PROT_WRITE: 4269 1.271 matt #if defined(ARM_MMU_EXTENDED) 4270 1.271 matt pmap_acquire_page_lock(md); 4271 1.215 uebayasi pmap_clearbit(md, pa, PVF_EXEC); 4272 1.271 matt pmap_release_page_lock(md); 4273 1.174 matt break; 4274 1.174 matt #endif 4275 1.134 thorpej case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 4276 1.174 matt break; 4277 1.134 thorpej 4278 1.134 thorpej case VM_PROT_READ: 4279 1.271 matt #if defined(ARM_MMU_EXTENDED) 4280 1.271 matt pmap_acquire_page_lock(md); 4281 1.215 uebayasi pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); 4282 1.271 matt pmap_release_page_lock(md); 4283 1.174 matt break; 4284 1.174 matt #endif 4285 1.134 thorpej case VM_PROT_READ|VM_PROT_EXECUTE: 4286 1.271 matt pmap_acquire_page_lock(md); 4287 1.215 uebayasi pmap_clearbit(md, pa, PVF_WRITE); 4288 1.271 matt pmap_release_page_lock(md); 4289 1.134 thorpej break; 4290 1.134 thorpej 4291 1.134 thorpej default: 4292 1.215 uebayasi pmap_page_remove(md, pa); 4293 1.134 thorpej break; 4294 1.134 thorpej } 4295 1.134 thorpej } 4296 1.134 thorpej 4297 1.134 thorpej /* 4298 1.134 thorpej * pmap_clear_modify: 4299 1.134 thorpej * 4300 1.134 thorpej * Clear the "modified" attribute for a page. 4301 1.134 thorpej */ 4302 1.159 thorpej bool 4303 1.134 thorpej pmap_clear_modify(struct vm_page *pg) 4304 1.134 thorpej { 4305 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4306 1.215 uebayasi paddr_t pa = VM_PAGE_TO_PHYS(pg); 4307 1.159 thorpej bool rv; 4308 1.134 thorpej 4309 1.271 matt pmap_acquire_page_lock(md); 4310 1.226 matt 4311 1.215 uebayasi if (md->pvh_attrs & PVF_MOD) { 4312 1.160 thorpej rv = true; 4313 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 4314 1.194 matt /* 4315 1.194 matt * If we are going to clear the modified bit and there are 4316 1.194 matt * no other modified bits set, flush the page to memory and 4317 1.194 matt * mark it clean. 4318 1.194 matt */ 4319 1.215 uebayasi if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) 4320 1.215 uebayasi pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); 4321 1.194 matt #endif 4322 1.215 uebayasi pmap_clearbit(md, pa, PVF_MOD); 4323 1.271 matt } else { 4324 1.160 thorpej rv = false; 4325 1.271 matt } 4326 1.271 matt pmap_release_page_lock(md); 4327 1.134 thorpej 4328 1.271 matt return rv; 4329 1.134 thorpej } 4330 1.134 thorpej 4331 1.134 thorpej /* 4332 1.134 thorpej * pmap_clear_reference: 4333 1.134 thorpej * 4334 1.134 thorpej * Clear the "referenced" attribute for a page. 4335 1.134 thorpej */ 4336 1.159 thorpej bool 4337 1.134 thorpej pmap_clear_reference(struct vm_page *pg) 4338 1.134 thorpej { 4339 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4340 1.215 uebayasi paddr_t pa = VM_PAGE_TO_PHYS(pg); 4341 1.159 thorpej bool rv; 4342 1.134 thorpej 4343 1.271 matt pmap_acquire_page_lock(md); 4344 1.226 matt 4345 1.215 uebayasi if (md->pvh_attrs & PVF_REF) { 4346 1.160 thorpej rv = true; 4347 1.215 uebayasi pmap_clearbit(md, pa, PVF_REF); 4348 1.271 matt } else { 4349 1.160 thorpej rv = false; 4350 1.271 matt } 4351 1.271 matt pmap_release_page_lock(md); 4352 1.134 thorpej 4353 1.271 matt return rv; 4354 1.134 thorpej } 4355 1.134 thorpej 4356 1.134 thorpej /* 4357 1.134 thorpej * pmap_is_modified: 4358 1.134 thorpej * 4359 1.134 thorpej * Test if a page has the "modified" attribute. 4360 1.134 thorpej */ 4361 1.134 thorpej /* See <arm/arm32/pmap.h> */ 4362 1.134 thorpej 4363 1.134 thorpej /* 4364 1.134 thorpej * pmap_is_referenced: 4365 1.134 thorpej * 4366 1.134 thorpej * Test if a page has the "referenced" attribute. 4367 1.134 thorpej */ 4368 1.134 thorpej /* See <arm/arm32/pmap.h> */ 4369 1.134 thorpej 4370 1.271 matt #if defined(ARM_MMU_EXTENDED) && 0 4371 1.271 matt int 4372 1.271 matt pmap_prefetchabt_fixup(void *v) 4373 1.271 matt { 4374 1.271 matt struct trapframe * const tf = v; 4375 1.271 matt vaddr_t va = trunc_page(tf->tf_pc); 4376 1.271 matt int rv = ABORT_FIXUP_FAILED; 4377 1.271 matt 4378 1.271 matt if (!TRAP_USERMODE(tf) && va < VM_MAXUSER_ADDRESS) 4379 1.271 matt return rv; 4380 1.271 matt 4381 1.271 matt kpreempt_disable(); 4382 1.271 matt pmap_t pm = curcpu()->ci_pmap_cur; 4383 1.271 matt const size_t l1slot = l1pte_index(va); 4384 1.271 matt struct l2_dtable * const l2 = pm->pm_l2[L2_IDX(l1slot)]; 4385 1.271 matt if (l2 == NULL) 4386 1.271 matt goto out; 4387 1.271 matt 4388 1.271 matt struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; 4389 1.271 matt if (l2b->l2b_kva == NULL) 4390 1.271 matt goto out; 4391 1.271 matt 4392 1.271 matt /* 4393 1.271 matt * Check the PTE itself. 4394 1.286 skrll */ 4395 1.271 matt pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 4396 1.271 matt const pt_entry_t opte = *ptep; 4397 1.271 matt if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0) 4398 1.271 matt goto out; 4399 1.271 matt 4400 1.343 skrll paddr_t pa = l2pte_pa(opte); 4401 1.271 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 4402 1.271 matt KASSERT(pg != NULL); 4403 1.271 matt 4404 1.271 matt struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 4405 1.271 matt 4406 1.271 matt pmap_acquire_page_lock(md); 4407 1.271 matt struct pv_entry * const pv = pmap_find_pv(md, pm, va); 4408 1.271 matt KASSERT(pv != NULL); 4409 1.271 matt 4410 1.271 matt if (PV_IS_EXEC_P(pv->pv_flags)) { 4411 1.307 skrll l2pte_reset(ptep); 4412 1.307 skrll PTE_SYNC(ptep); 4413 1.307 skrll pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); 4414 1.271 matt if (!PV_IS_EXEC_P(md->pvh_attrs)) { 4415 1.271 matt pmap_syncicache_page(md, pa); 4416 1.271 matt } 4417 1.271 matt rv = ABORT_FIXUP_RETURN; 4418 1.307 skrll l2pte_set(ptep, opte & ~L2_XS_XN, 0); 4419 1.307 skrll PTE_SYNC(ptep); 4420 1.271 matt } 4421 1.271 matt pmap_release_page_lock(md); 4422 1.271 matt 4423 1.271 matt out: 4424 1.271 matt kpreempt_enable(); 4425 1.426 skrll 4426 1.271 matt return rv; 4427 1.271 matt } 4428 1.271 matt #endif 4429 1.271 matt 4430 1.134 thorpej int 4431 1.134 thorpej pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) 4432 1.134 thorpej { 4433 1.134 thorpej struct l2_dtable *l2; 4434 1.134 thorpej struct l2_bucket *l2b; 4435 1.134 thorpej paddr_t pa; 4436 1.271 matt const size_t l1slot = l1pte_index(va); 4437 1.134 thorpej int rv = 0; 4438 1.134 thorpej 4439 1.406 skrll UVMHIST_FUNC(__func__); 4440 1.406 skrll UVMHIST_CALLARGS(maphist, "pm=%#jx, va=%#jx, ftype=%#jx, user=%jd", 4441 1.406 skrll (uintptr_t)pm, va, ftype, user); 4442 1.271 matt 4443 1.271 matt va = trunc_page(va); 4444 1.271 matt 4445 1.271 matt KASSERT(!user || (pm != pmap_kernel())); 4446 1.271 matt 4447 1.271 matt #ifdef ARM_MMU_EXTENDED 4448 1.359 pgoyette UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx", 4449 1.363 skrll (uintptr_t)cpu_tlb_info(curcpu()), 4450 1.359 pgoyette (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())), 4451 1.359 pgoyette (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); 4452 1.271 matt #endif 4453 1.271 matt 4454 1.426 skrll kpreempt_disable(); 4455 1.134 thorpej pmap_acquire_pmap_lock(pm); 4456 1.134 thorpej 4457 1.134 thorpej /* 4458 1.134 thorpej * If there is no l2_dtable for this address, then the process 4459 1.134 thorpej * has no business accessing it. 4460 1.134 thorpej * 4461 1.134 thorpej * Note: This will catch userland processes trying to access 4462 1.134 thorpej * kernel addresses. 4463 1.134 thorpej */ 4464 1.271 matt l2 = pm->pm_l2[L2_IDX(l1slot)]; 4465 1.271 matt if (l2 == NULL) { 4466 1.359 pgoyette UVMHIST_LOG(maphist, " no l2 for l1slot %#jx", l1slot, 0, 0, 0); 4467 1.134 thorpej goto out; 4468 1.271 matt } 4469 1.134 thorpej 4470 1.1 matt /* 4471 1.134 thorpej * Likewise if there is no L2 descriptor table 4472 1.1 matt */ 4473 1.271 matt l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; 4474 1.271 matt if (l2b->l2b_kva == NULL) { 4475 1.359 pgoyette UVMHIST_LOG(maphist, " <-- done (no ptep for l1slot %#jx)", 4476 1.359 pgoyette l1slot, 0, 0, 0); 4477 1.134 thorpej goto out; 4478 1.271 matt } 4479 1.134 thorpej 4480 1.134 thorpej /* 4481 1.134 thorpej * Check the PTE itself. 4482 1.134 thorpej */ 4483 1.271 matt pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 4484 1.271 matt pt_entry_t const opte = *ptep; 4485 1.271 matt if (opte == 0 || (opte & L2_TYPE_MASK) == L2_TYPE_L) { 4486 1.424 skrll UVMHIST_LOG(maphist, " <-- done (empty pte)", 4487 1.424 skrll 0, 0, 0, 0); 4488 1.134 thorpej goto out; 4489 1.271 matt } 4490 1.134 thorpej 4491 1.271 matt #ifndef ARM_HAS_VBAR 4492 1.134 thorpej /* 4493 1.134 thorpej * Catch a userland access to the vector page mapped at 0x0 4494 1.134 thorpej */ 4495 1.271 matt if (user && (opte & L2_S_PROT_U) == 0) { 4496 1.271 matt UVMHIST_LOG(maphist, " <-- done (vector_page)", 0, 0, 0, 0); 4497 1.134 thorpej goto out; 4498 1.271 matt } 4499 1.271 matt #endif 4500 1.134 thorpej 4501 1.271 matt pa = l2pte_pa(opte); 4502 1.424 skrll UVMHIST_LOG(maphist, " pa %#jx opte %#jx ", pa, opte, 0, 0); 4503 1.134 thorpej 4504 1.271 matt if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(opte)) { 4505 1.134 thorpej /* 4506 1.134 thorpej * This looks like a good candidate for "page modified" 4507 1.134 thorpej * emulation... 4508 1.134 thorpej */ 4509 1.134 thorpej struct pv_entry *pv; 4510 1.134 thorpej struct vm_page *pg; 4511 1.134 thorpej 4512 1.134 thorpej /* Extract the physical address of the page */ 4513 1.271 matt if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 4514 1.271 matt UVMHIST_LOG(maphist, " <-- done (mod/ref unmanaged page)", 0, 0, 0, 0); 4515 1.134 thorpej goto out; 4516 1.271 matt } 4517 1.134 thorpej 4518 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4519 1.215 uebayasi 4520 1.134 thorpej /* Get the current flags for this page. */ 4521 1.271 matt pmap_acquire_page_lock(md); 4522 1.215 uebayasi pv = pmap_find_pv(md, pm, va); 4523 1.268 matt if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { 4524 1.271 matt pmap_release_page_lock(md); 4525 1.271 matt UVMHIST_LOG(maphist, " <-- done (mod/ref emul: no PV)", 0, 0, 0, 0); 4526 1.134 thorpej goto out; 4527 1.134 thorpej } 4528 1.134 thorpej 4529 1.134 thorpej /* 4530 1.134 thorpej * Do the flags say this page is writable? If not then it 4531 1.134 thorpej * is a genuine write fault. If yes then the write fault is 4532 1.134 thorpej * our fault as we did not reflect the write access in the 4533 1.134 thorpej * PTE. Now we know a write has occurred we can correct this 4534 1.134 thorpej * and also set the modified bit 4535 1.134 thorpej */ 4536 1.134 thorpej if ((pv->pv_flags & PVF_WRITE) == 0) { 4537 1.271 matt pmap_release_page_lock(md); 4538 1.424 skrll UVMHIST_LOG(maphist, " <-- done (write fault)", 0, 0, 0, 0); 4539 1.134 thorpej goto out; 4540 1.134 thorpej } 4541 1.134 thorpej 4542 1.215 uebayasi md->pvh_attrs |= PVF_REF | PVF_MOD; 4543 1.134 thorpej pv->pv_flags |= PVF_REF | PVF_MOD; 4544 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 4545 1.185 matt /* 4546 1.185 matt * If there are cacheable mappings for this page, mark it dirty. 4547 1.185 matt */ 4548 1.215 uebayasi if ((md->pvh_attrs & PVF_NC) == 0) 4549 1.215 uebayasi md->pvh_attrs |= PVF_DIRTY; 4550 1.185 matt #endif 4551 1.271 matt #ifdef ARM_MMU_EXTENDED 4552 1.271 matt if (md->pvh_attrs & PVF_EXEC) { 4553 1.271 matt md->pvh_attrs &= ~PVF_EXEC; 4554 1.271 matt PMAPCOUNT(exec_discarded_modfixup); 4555 1.271 matt } 4556 1.271 matt #endif 4557 1.271 matt pmap_release_page_lock(md); 4558 1.134 thorpej 4559 1.286 skrll /* 4560 1.134 thorpej * Re-enable write permissions for the page. No need to call 4561 1.134 thorpej * pmap_vac_me_harder(), since this is just a 4562 1.134 thorpej * modified-emulation fault, and the PVF_WRITE bit isn't 4563 1.134 thorpej * changing. We've already set the cacheable bits based on 4564 1.134 thorpej * the assumption that we can write to this page. 4565 1.134 thorpej */ 4566 1.271 matt const pt_entry_t npte = 4567 1.271 matt l2pte_set_writable((opte & ~L2_TYPE_MASK) | L2_S_PROTO) 4568 1.271 matt #ifdef ARM_MMU_EXTENDED 4569 1.271 matt | (pm != pmap_kernel() ? L2_XS_nG : 0) 4570 1.271 matt #endif 4571 1.271 matt | 0; 4572 1.307 skrll l2pte_reset(ptep); 4573 1.307 skrll PTE_SYNC(ptep); 4574 1.307 skrll pmap_tlb_flush_SE(pm, va, 4575 1.307 skrll (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); 4576 1.307 skrll l2pte_set(ptep, npte, 0); 4577 1.134 thorpej PTE_SYNC(ptep); 4578 1.271 matt PMAPCOUNT(fixup_mod); 4579 1.134 thorpej rv = 1; 4580 1.359 pgoyette UVMHIST_LOG(maphist, " <-- done (mod/ref emul: changed pte " 4581 1.359 pgoyette "from %#jx to %#jx)", opte, npte, 0, 0); 4582 1.271 matt } else if ((opte & L2_TYPE_MASK) == L2_TYPE_INV) { 4583 1.134 thorpej /* 4584 1.134 thorpej * This looks like a good candidate for "page referenced" 4585 1.134 thorpej * emulation. 4586 1.134 thorpej */ 4587 1.134 thorpej struct vm_page *pg; 4588 1.134 thorpej 4589 1.134 thorpej /* Extract the physical address of the page */ 4590 1.271 matt if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 4591 1.271 matt UVMHIST_LOG(maphist, " <-- done (ref emul: unmanaged page)", 0, 0, 0, 0); 4592 1.134 thorpej goto out; 4593 1.271 matt } 4594 1.134 thorpej 4595 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4596 1.215 uebayasi 4597 1.134 thorpej /* Get the current flags for this page. */ 4598 1.271 matt pmap_acquire_page_lock(md); 4599 1.271 matt struct pv_entry *pv = pmap_find_pv(md, pm, va); 4600 1.268 matt if (pv == NULL || PV_IS_KENTRY_P(pv->pv_flags)) { 4601 1.271 matt pmap_release_page_lock(md); 4602 1.271 matt UVMHIST_LOG(maphist, " <-- done (ref emul no PV)", 0, 0, 0, 0); 4603 1.134 thorpej goto out; 4604 1.134 thorpej } 4605 1.134 thorpej 4606 1.215 uebayasi md->pvh_attrs |= PVF_REF; 4607 1.134 thorpej pv->pv_flags |= PVF_REF; 4608 1.1 matt 4609 1.271 matt pt_entry_t npte = 4610 1.271 matt l2pte_set_readonly((opte & ~L2_TYPE_MASK) | L2_S_PROTO); 4611 1.271 matt #ifdef ARM_MMU_EXTENDED 4612 1.271 matt if (pm != pmap_kernel()) { 4613 1.271 matt npte |= L2_XS_nG; 4614 1.271 matt } 4615 1.271 matt /* 4616 1.271 matt * If we got called from prefetch abort, then ftype will have 4617 1.271 matt * VM_PROT_EXECUTE set. Now see if we have no-execute set in 4618 1.271 matt * the PTE. 4619 1.271 matt */ 4620 1.271 matt if (user && (ftype & VM_PROT_EXECUTE) && (npte & L2_XS_XN)) { 4621 1.271 matt /* 4622 1.271 matt * Is this a mapping of an executable page? 4623 1.271 matt */ 4624 1.271 matt if ((pv->pv_flags & PVF_EXEC) == 0) { 4625 1.281 skrll pmap_release_page_lock(md); 4626 1.271 matt UVMHIST_LOG(maphist, " <-- done (ref emul: no exec)", 4627 1.271 matt 0, 0, 0, 0); 4628 1.271 matt goto out; 4629 1.271 matt } 4630 1.271 matt /* 4631 1.271 matt * If we haven't synced the page, do so now. 4632 1.271 matt */ 4633 1.271 matt if ((md->pvh_attrs & PVF_EXEC) == 0) { 4634 1.359 pgoyette UVMHIST_LOG(maphist, " ref emul: syncicache " 4635 1.359 pgoyette "page #%#jx", pa, 0, 0, 0); 4636 1.271 matt pmap_syncicache_page(md, pa); 4637 1.271 matt PMAPCOUNT(fixup_exec); 4638 1.271 matt } 4639 1.271 matt npte &= ~L2_XS_XN; 4640 1.271 matt } 4641 1.271 matt #endif /* ARM_MMU_EXTENDED */ 4642 1.271 matt pmap_release_page_lock(md); 4643 1.307 skrll l2pte_reset(ptep); 4644 1.307 skrll PTE_SYNC(ptep); 4645 1.307 skrll pmap_tlb_flush_SE(pm, va, 4646 1.307 skrll (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); 4647 1.307 skrll l2pte_set(ptep, npte, 0); 4648 1.271 matt PTE_SYNC(ptep); 4649 1.271 matt PMAPCOUNT(fixup_ref); 4650 1.271 matt rv = 1; 4651 1.359 pgoyette UVMHIST_LOG(maphist, " <-- done (ref emul: changed pte from " 4652 1.359 pgoyette "%#jx to %#jx)", opte, npte, 0, 0); 4653 1.271 matt #ifdef ARM_MMU_EXTENDED 4654 1.271 matt } else if (user && (ftype & VM_PROT_EXECUTE) && (opte & L2_XS_XN)) { 4655 1.271 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 4656 1.271 matt if (pg == NULL) { 4657 1.271 matt UVMHIST_LOG(maphist, " <-- done (unmanaged page)", 0, 0, 0, 0); 4658 1.271 matt goto out; 4659 1.271 matt } 4660 1.271 matt 4661 1.271 matt struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 4662 1.271 matt 4663 1.271 matt /* Get the current flags for this page. */ 4664 1.271 matt pmap_acquire_page_lock(md); 4665 1.271 matt struct pv_entry * const pv = pmap_find_pv(md, pm, va); 4666 1.271 matt if (pv == NULL || (pv->pv_flags & PVF_EXEC) == 0) { 4667 1.271 matt pmap_release_page_lock(md); 4668 1.271 matt UVMHIST_LOG(maphist, " <-- done (no PV or not EXEC)", 0, 0, 0, 0); 4669 1.271 matt goto out; 4670 1.271 matt } 4671 1.134 thorpej 4672 1.271 matt /* 4673 1.271 matt * If we haven't synced the page, do so now. 4674 1.271 matt */ 4675 1.271 matt if ((md->pvh_attrs & PVF_EXEC) == 0) { 4676 1.359 pgoyette UVMHIST_LOG(maphist, "syncicache page #%#jx", 4677 1.271 matt pa, 0, 0, 0); 4678 1.271 matt pmap_syncicache_page(md, pa); 4679 1.271 matt } 4680 1.271 matt pmap_release_page_lock(md); 4681 1.271 matt /* 4682 1.271 matt * Turn off no-execute. 4683 1.271 matt */ 4684 1.271 matt KASSERT(opte & L2_XS_nG); 4685 1.307 skrll l2pte_reset(ptep); 4686 1.307 skrll PTE_SYNC(ptep); 4687 1.307 skrll pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); 4688 1.307 skrll l2pte_set(ptep, opte & ~L2_XS_XN, 0); 4689 1.134 thorpej PTE_SYNC(ptep); 4690 1.134 thorpej rv = 1; 4691 1.271 matt PMAPCOUNT(fixup_exec); 4692 1.359 pgoyette UVMHIST_LOG(maphist, "exec: changed pte from %#jx to %#jx", 4693 1.271 matt opte, opte & ~L2_XS_XN, 0, 0); 4694 1.271 matt #endif 4695 1.134 thorpej } 4696 1.134 thorpej 4697 1.271 matt #ifndef ARM_MMU_EXTENDED 4698 1.134 thorpej /* 4699 1.134 thorpej * We know there is a valid mapping here, so simply 4700 1.134 thorpej * fix up the L1 if necessary. 4701 1.134 thorpej */ 4702 1.271 matt pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; 4703 1.271 matt pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); 4704 1.271 matt if (*pdep != pde) { 4705 1.271 matt l1pte_setone(pdep, pde); 4706 1.322 skrll PDE_SYNC(pdep); 4707 1.134 thorpej rv = 1; 4708 1.271 matt PMAPCOUNT(fixup_pdes); 4709 1.134 thorpej } 4710 1.271 matt #endif 4711 1.134 thorpej 4712 1.134 thorpej #ifdef CPU_SA110 4713 1.134 thorpej /* 4714 1.134 thorpej * There are bugs in the rev K SA110. This is a check for one 4715 1.134 thorpej * of them. 4716 1.134 thorpej */ 4717 1.134 thorpej if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && 4718 1.134 thorpej curcpu()->ci_arm_cpurev < 3) { 4719 1.134 thorpej /* Always current pmap */ 4720 1.271 matt if (l2pte_valid_p(opte)) { 4721 1.134 thorpej extern int kernel_debug; 4722 1.134 thorpej if (kernel_debug & 1) { 4723 1.134 thorpej struct proc *p = curlwp->l_proc; 4724 1.134 thorpej printf("prefetch_abort: page is already " 4725 1.271 matt "mapped - pte=%p *pte=%08x\n", ptep, opte); 4726 1.134 thorpej printf("prefetch_abort: pc=%08lx proc=%p " 4727 1.134 thorpej "process=%s\n", va, p, p->p_comm); 4728 1.134 thorpej printf("prefetch_abort: far=%08x fs=%x\n", 4729 1.134 thorpej cpu_faultaddress(), cpu_faultstatus()); 4730 1.113 thorpej } 4731 1.134 thorpej #ifdef DDB 4732 1.134 thorpej if (kernel_debug & 2) 4733 1.134 thorpej Debugger(); 4734 1.134 thorpej #endif 4735 1.134 thorpej rv = 1; 4736 1.1 matt } 4737 1.1 matt } 4738 1.134 thorpej #endif /* CPU_SA110 */ 4739 1.104 thorpej 4740 1.271 matt #ifndef ARM_MMU_EXTENDED 4741 1.238 matt /* 4742 1.238 matt * If 'rv == 0' at this point, it generally indicates that there is a 4743 1.238 matt * stale TLB entry for the faulting address. That might be due to a 4744 1.238 matt * wrong setting of pmap_needs_pte_sync. So set it and retry. 4745 1.238 matt */ 4746 1.271 matt if (rv == 0 4747 1.271 matt && pm->pm_l1->l1_domain_use_count == 1 4748 1.238 matt && pmap_needs_pte_sync == 0) { 4749 1.240 matt pmap_needs_pte_sync = 1; 4750 1.239 matt PTE_SYNC(ptep); 4751 1.271 matt PMAPCOUNT(fixup_ptesync); 4752 1.238 matt rv = 1; 4753 1.238 matt } 4754 1.271 matt #endif 4755 1.238 matt 4756 1.311 skrll #ifndef MULTIPROCESSOR 4757 1.271 matt #if defined(DEBUG) || 1 4758 1.134 thorpej /* 4759 1.134 thorpej * If 'rv == 0' at this point, it generally indicates that there is a 4760 1.134 thorpej * stale TLB entry for the faulting address. This happens when two or 4761 1.134 thorpej * more processes are sharing an L1. Since we don't flush the TLB on 4762 1.134 thorpej * a context switch between such processes, we can take domain faults 4763 1.134 thorpej * for mappings which exist at the same VA in both processes. EVEN IF 4764 1.134 thorpej * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 4765 1.134 thorpej * example. 4766 1.134 thorpej * 4767 1.134 thorpej * This is extremely likely to happen if pmap_enter() updated the L1 4768 1.134 thorpej * entry for a recently entered mapping. In this case, the TLB is 4769 1.134 thorpej * flushed for the new mapping, but there may still be TLB entries for 4770 1.134 thorpej * other mappings belonging to other processes in the 1MB range 4771 1.134 thorpej * covered by the L1 entry. 4772 1.134 thorpej * 4773 1.134 thorpej * Since 'rv == 0', we know that the L1 already contains the correct 4774 1.134 thorpej * value, so the fault must be due to a stale TLB entry. 4775 1.134 thorpej * 4776 1.134 thorpej * Since we always need to flush the TLB anyway in the case where we 4777 1.134 thorpej * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 4778 1.134 thorpej * stale TLB entries dynamically. 4779 1.134 thorpej * 4780 1.134 thorpej * However, the above condition can ONLY happen if the current L1 is 4781 1.134 thorpej * being shared. If it happens when the L1 is unshared, it indicates 4782 1.134 thorpej * that other parts of the pmap are not doing their job WRT managing 4783 1.134 thorpej * the TLB. 4784 1.134 thorpej */ 4785 1.271 matt if (rv == 0 4786 1.271 matt #ifndef ARM_MMU_EXTENDED 4787 1.271 matt && pm->pm_l1->l1_domain_use_count == 1 4788 1.271 matt #endif 4789 1.271 matt && true) { 4790 1.271 matt #ifdef DEBUG 4791 1.134 thorpej extern int last_fault_code; 4792 1.271 matt #else 4793 1.271 matt int last_fault_code = ftype & VM_PROT_EXECUTE 4794 1.271 matt ? armreg_ifsr_read() 4795 1.271 matt : armreg_dfsr_read(); 4796 1.271 matt #endif 4797 1.134 thorpej printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 4798 1.134 thorpej pm, va, ftype); 4799 1.271 matt printf("fixup: l2 %p, l2b %p, ptep %p, pte %#x\n", 4800 1.271 matt l2, l2b, ptep, opte); 4801 1.271 matt 4802 1.271 matt #ifndef ARM_MMU_EXTENDED 4803 1.271 matt printf("fixup: pdep %p, pde %#x, fsr %#x\n", 4804 1.271 matt pdep, pde, last_fault_code); 4805 1.271 matt #else 4806 1.271 matt printf("fixup: pdep %p, pde %#x, ttbcr %#x\n", 4807 1.271 matt &pmap_l1_kva(pm)[l1slot], pmap_l1_kva(pm)[l1slot], 4808 1.271 matt armreg_ttbcr_read()); 4809 1.271 matt printf("fixup: fsr %#x cpm %p casid %#x contextidr %#x dacr %#x\n", 4810 1.271 matt last_fault_code, curcpu()->ci_pmap_cur, 4811 1.271 matt curcpu()->ci_pmap_asid_cur, 4812 1.271 matt armreg_contextidr_read(), armreg_dacr_read()); 4813 1.271 matt #ifdef _ARM_ARCH_7 4814 1.271 matt if (ftype & VM_PROT_WRITE) 4815 1.271 matt armreg_ats1cuw_write(va); 4816 1.271 matt else 4817 1.271 matt armreg_ats1cur_write(va); 4818 1.422 skrll isb(); 4819 1.271 matt printf("fixup: par %#x\n", armreg_par_read()); 4820 1.271 matt #endif 4821 1.271 matt #endif 4822 1.134 thorpej #ifdef DDB 4823 1.272 matt extern int kernel_debug; 4824 1.255 skrll 4825 1.272 matt if (kernel_debug & 2) { 4826 1.272 matt pmap_release_pmap_lock(pm); 4827 1.272 matt #ifdef UVMHIST 4828 1.272 matt KERNHIST_DUMP(maphist); 4829 1.272 matt #endif 4830 1.271 matt cpu_Debugger(); 4831 1.272 matt pmap_acquire_pmap_lock(pm); 4832 1.272 matt } 4833 1.134 thorpej #endif 4834 1.134 thorpej } 4835 1.134 thorpej #endif 4836 1.311 skrll #endif 4837 1.134 thorpej 4838 1.313 skrll #ifndef ARM_MMU_EXTENDED 4839 1.313 skrll /* Flush the TLB in the shared L1 case - see comment above */ 4840 1.313 skrll pmap_tlb_flush_SE(pm, va, 4841 1.313 skrll (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); 4842 1.313 skrll #endif 4843 1.313 skrll 4844 1.134 thorpej rv = 1; 4845 1.104 thorpej 4846 1.134 thorpej out: 4847 1.134 thorpej pmap_release_pmap_lock(pm); 4848 1.426 skrll kpreempt_enable(); 4849 1.134 thorpej 4850 1.387 skrll return rv; 4851 1.134 thorpej } 4852 1.134 thorpej 4853 1.134 thorpej /* 4854 1.134 thorpej * Routine: pmap_procwr 4855 1.134 thorpej * 4856 1.1 matt * Function: 4857 1.134 thorpej * Synchronize caches corresponding to [addr, addr+len) in p. 4858 1.134 thorpej * 4859 1.134 thorpej */ 4860 1.134 thorpej void 4861 1.134 thorpej pmap_procwr(struct proc *p, vaddr_t va, int len) 4862 1.134 thorpej { 4863 1.345 skrll #ifndef ARM_MMU_EXTENDED 4864 1.345 skrll 4865 1.134 thorpej /* We only need to do anything if it is the current process. */ 4866 1.134 thorpej if (p == curproc) 4867 1.134 thorpej cpu_icache_sync_range(va, len); 4868 1.345 skrll #endif 4869 1.134 thorpej } 4870 1.134 thorpej 4871 1.134 thorpej /* 4872 1.134 thorpej * Routine: pmap_unwire 4873 1.134 thorpej * Function: Clear the wired attribute for a map/virtual-address pair. 4874 1.134 thorpej * 4875 1.134 thorpej * In/out conditions: 4876 1.134 thorpej * The mapping must already exist in the pmap. 4877 1.1 matt */ 4878 1.134 thorpej void 4879 1.134 thorpej pmap_unwire(pmap_t pm, vaddr_t va) 4880 1.134 thorpej { 4881 1.134 thorpej struct l2_bucket *l2b; 4882 1.134 thorpej pt_entry_t *ptep, pte; 4883 1.134 thorpej struct vm_page *pg; 4884 1.134 thorpej paddr_t pa; 4885 1.134 thorpej 4886 1.408 skrll UVMHIST_FUNC(__func__); 4887 1.408 skrll UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pm, va, 0, 0); 4888 1.134 thorpej 4889 1.426 skrll kpreempt_disable(); 4890 1.134 thorpej pmap_acquire_pmap_lock(pm); 4891 1.134 thorpej 4892 1.134 thorpej l2b = pmap_get_l2_bucket(pm, va); 4893 1.134 thorpej KDASSERT(l2b != NULL); 4894 1.134 thorpej 4895 1.134 thorpej ptep = &l2b->l2b_kva[l2pte_index(va)]; 4896 1.134 thorpej pte = *ptep; 4897 1.134 thorpej 4898 1.134 thorpej /* Extract the physical address of the page */ 4899 1.134 thorpej pa = l2pte_pa(pte); 4900 1.1 matt 4901 1.134 thorpej if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 4902 1.134 thorpej /* Update the wired bit in the pv entry for this page. */ 4903 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4904 1.215 uebayasi 4905 1.271 matt pmap_acquire_page_lock(md); 4906 1.215 uebayasi (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); 4907 1.271 matt pmap_release_page_lock(md); 4908 1.134 thorpej } 4909 1.134 thorpej 4910 1.134 thorpej pmap_release_pmap_lock(pm); 4911 1.426 skrll kpreempt_enable(); 4912 1.419 skrll 4913 1.420 skrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 4914 1.134 thorpej } 4915 1.134 thorpej 4916 1.348 skrll #ifdef ARM_MMU_EXTENDED 4917 1.348 skrll void 4918 1.348 skrll pmap_md_pdetab_activate(pmap_t pm, struct lwp *l) 4919 1.348 skrll { 4920 1.406 skrll UVMHIST_FUNC(__func__); 4921 1.406 skrll struct cpu_info * const ci = curcpu(); 4922 1.406 skrll struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); 4923 1.406 skrll 4924 1.406 skrll UVMHIST_CALLARGS(maphist, "pm %#jx (pm->pm_l1_pa %08jx asid %ju)", 4925 1.406 skrll (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0); 4926 1.348 skrll 4927 1.348 skrll /* 4928 1.348 skrll * Assume that TTBR1 has only global mappings and TTBR0 only 4929 1.348 skrll * has non-global mappings. To prevent speculation from doing 4930 1.348 skrll * evil things we disable translation table walks using TTBR0 4931 1.348 skrll * before setting the CONTEXTIDR (ASID) or new TTBR0 value. 4932 1.348 skrll * Once both are set, table walks are reenabled. 4933 1.348 skrll */ 4934 1.348 skrll const uint32_t old_ttbcr = armreg_ttbcr_read(); 4935 1.348 skrll armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); 4936 1.422 skrll isb(); 4937 1.348 skrll 4938 1.348 skrll pmap_tlb_asid_acquire(pm, l); 4939 1.348 skrll 4940 1.348 skrll cpu_setttb(pm->pm_l1_pa, pai->pai_asid); 4941 1.348 skrll /* 4942 1.348 skrll * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 4943 1.348 skrll * have been updated. 4944 1.348 skrll */ 4945 1.422 skrll isb(); 4946 1.348 skrll 4947 1.348 skrll if (pm != pmap_kernel()) { 4948 1.348 skrll armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0); 4949 1.348 skrll } 4950 1.348 skrll cpu_cpwait(); 4951 1.348 skrll 4952 1.348 skrll KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u", 4953 1.348 skrll ci->ci_pmap_asid_cur, pai->pai_asid); 4954 1.348 skrll ci->ci_pmap_cur = pm; 4955 1.348 skrll } 4956 1.348 skrll 4957 1.348 skrll void 4958 1.348 skrll pmap_md_pdetab_deactivate(pmap_t pm) 4959 1.348 skrll { 4960 1.348 skrll 4961 1.406 skrll UVMHIST_FUNC(__func__); 4962 1.406 skrll UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0); 4963 1.348 skrll 4964 1.348 skrll kpreempt_disable(); 4965 1.348 skrll struct cpu_info * const ci = curcpu(); 4966 1.348 skrll /* 4967 1.348 skrll * Disable translation table walks from TTBR0 while no pmap has been 4968 1.348 skrll * activated. 4969 1.348 skrll */ 4970 1.348 skrll const uint32_t old_ttbcr = armreg_ttbcr_read(); 4971 1.348 skrll armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); 4972 1.422 skrll isb(); 4973 1.348 skrll pmap_tlb_asid_deactivate(pm); 4974 1.348 skrll cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID); 4975 1.422 skrll isb(); 4976 1.348 skrll 4977 1.348 skrll ci->ci_pmap_cur = pmap_kernel(); 4978 1.348 skrll KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u", 4979 1.348 skrll ci->ci_pmap_asid_cur); 4980 1.348 skrll kpreempt_enable(); 4981 1.348 skrll } 4982 1.348 skrll #endif 4983 1.348 skrll 4984 1.435 skrll 4985 1.435 skrll #if defined(EFI_RUNTIME) 4986 1.435 skrll void 4987 1.435 skrll pmap_activate_efirt(void) 4988 1.435 skrll { 4989 1.435 skrll struct pmap * const pm = &efirt_pmap; 4990 1.435 skrll 4991 1.435 skrll UVMHIST_FUNC(__func__); 4992 1.435 skrll UVMHIST_CALLARGS(maphist, " (pm=%#jx)", (uintptr_t)pm, 0, 0, 0); 4993 1.435 skrll 4994 1.437 skrll KASSERT(kpreempt_disabled()); 4995 1.437 skrll 4996 1.437 skrll struct cpu_info * const ci = curcpu(); 4997 1.437 skrll struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); 4998 1.437 skrll 4999 1.435 skrll PMAPCOUNT(activations); 5000 1.435 skrll 5001 1.435 skrll /* 5002 1.435 skrll * Assume that TTBR1 has only global mappings and TTBR0 only 5003 1.435 skrll * has non-global mappings. To prevent speculation from doing 5004 1.435 skrll * evil things we disable translation table walks using TTBR0 5005 1.435 skrll * before setting the CONTEXTIDR (ASID) or new TTBR0 value. 5006 1.435 skrll * Once both are set, table walks are reenabled. 5007 1.435 skrll */ 5008 1.435 skrll const uint32_t old_ttbcr = armreg_ttbcr_read(); 5009 1.435 skrll armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); 5010 1.435 skrll isb(); 5011 1.435 skrll 5012 1.435 skrll armreg_contextidr_write(pai->pai_asid); 5013 1.435 skrll armreg_ttbr_write(pm->pm_l1_pa | 5014 1.435 skrll (ci->ci_mpidr ? TTBR_MPATTR : TTBR_UPATTR)); 5015 1.435 skrll /* 5016 1.435 skrll * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 5017 1.435 skrll * have been updated. 5018 1.435 skrll */ 5019 1.435 skrll isb(); 5020 1.435 skrll 5021 1.435 skrll armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0); 5022 1.435 skrll 5023 1.435 skrll ci->ci_pmap_asid_cur = pai->pai_asid; 5024 1.435 skrll ci->ci_pmap_cur = pm; 5025 1.435 skrll 5026 1.435 skrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5027 1.435 skrll } 5028 1.435 skrll 5029 1.435 skrll #endif 5030 1.435 skrll 5031 1.435 skrll 5032 1.134 thorpej void 5033 1.173 scw pmap_activate(struct lwp *l) 5034 1.1 matt { 5035 1.165 scw extern int block_userspace_access; 5036 1.271 matt pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap; 5037 1.271 matt 5038 1.406 skrll UVMHIST_FUNC(__func__); 5039 1.406 skrll UVMHIST_CALLARGS(maphist, "l=%#jx pm=%#jx", (uintptr_t)l, 5040 1.406 skrll (uintptr_t)npm, 0, 0); 5041 1.165 scw 5042 1.437 skrll #ifdef ARM_MMU_EXTENDED 5043 1.437 skrll KASSERT(kpreempt_disabled()); 5044 1.437 skrll #endif 5045 1.437 skrll 5046 1.348 skrll struct cpu_info * const ci = curcpu(); 5047 1.348 skrll 5048 1.173 scw /* 5049 1.173 scw * If activating a non-current lwp or the current lwp is 5050 1.173 scw * already active, just return. 5051 1.173 scw */ 5052 1.271 matt if (false 5053 1.271 matt || l != curlwp 5054 1.271 matt #ifdef ARM_MMU_EXTENDED 5055 1.271 matt || (ci->ci_pmap_cur == npm && 5056 1.271 matt (npm == pmap_kernel() 5057 1.271 matt /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */)) 5058 1.271 matt #else 5059 1.271 matt || npm->pm_activated == true 5060 1.271 matt #endif 5061 1.271 matt || false) { 5062 1.359 pgoyette UVMHIST_LOG(maphist, " <-- (same pmap)", (uintptr_t)curlwp, 5063 1.359 pgoyette (uintptr_t)l, 0, 0); 5064 1.173 scw return; 5065 1.271 matt } 5066 1.173 scw 5067 1.271 matt #ifndef ARM_MMU_EXTENDED 5068 1.271 matt const uint32_t ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) 5069 1.271 matt | (DOMAIN_CLIENT << (pmap_domain(npm) * 2)); 5070 1.134 thorpej 5071 1.165 scw /* 5072 1.165 scw * If TTB and DACR are unchanged, short-circuit all the 5073 1.165 scw * TLB/cache management stuff. 5074 1.165 scw */ 5075 1.271 matt pmap_t opm = ci->ci_lastlwp 5076 1.271 matt ? ci->ci_lastlwp->l_proc->p_vmspace->vm_map.pmap 5077 1.271 matt : NULL; 5078 1.271 matt if (opm != NULL) { 5079 1.271 matt uint32_t odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) 5080 1.271 matt | (DOMAIN_CLIENT << (pmap_domain(opm) * 2)); 5081 1.134 thorpej 5082 1.165 scw if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) 5083 1.165 scw goto all_done; 5084 1.271 matt } 5085 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 5086 1.134 thorpej 5087 1.174 matt PMAPCOUNT(activations); 5088 1.165 scw block_userspace_access = 1; 5089 1.134 thorpej 5090 1.271 matt #ifndef ARM_MMU_EXTENDED 5091 1.165 scw /* 5092 1.165 scw * If switching to a user vmspace which is different to the 5093 1.165 scw * most recent one, and the most recent one is potentially 5094 1.165 scw * live in the cache, we must write-back and invalidate the 5095 1.165 scw * entire cache. 5096 1.165 scw */ 5097 1.271 matt pmap_t rpm = ci->ci_pmap_lastuser; 5098 1.203 scw 5099 1.347 skrll /* 5100 1.347 skrll * XXXSCW: There's a corner case here which can leave turds in the 5101 1.347 skrll * cache as reported in kern/41058. They're probably left over during 5102 1.347 skrll * tear-down and switching away from an exiting process. Until the root 5103 1.347 skrll * cause is identified and fixed, zap the cache when switching pmaps. 5104 1.347 skrll * This will result in a few unnecessary cache flushes, but that's 5105 1.347 skrll * better than silently corrupting data. 5106 1.347 skrll */ 5107 1.203 scw #if 0 5108 1.165 scw if (npm != pmap_kernel() && rpm && npm != rpm && 5109 1.165 scw rpm->pm_cstate.cs_cache) { 5110 1.165 scw rpm->pm_cstate.cs_cache = 0; 5111 1.174 matt #ifdef PMAP_CACHE_VIVT 5112 1.165 scw cpu_idcache_wbinv_all(); 5113 1.174 matt #endif 5114 1.165 scw } 5115 1.203 scw #else 5116 1.203 scw if (rpm) { 5117 1.203 scw rpm->pm_cstate.cs_cache = 0; 5118 1.203 scw if (npm == pmap_kernel()) 5119 1.267 matt ci->ci_pmap_lastuser = NULL; 5120 1.203 scw #ifdef PMAP_CACHE_VIVT 5121 1.203 scw cpu_idcache_wbinv_all(); 5122 1.203 scw #endif 5123 1.203 scw } 5124 1.203 scw #endif 5125 1.134 thorpej 5126 1.165 scw /* No interrupts while we frob the TTB/DACR */ 5127 1.271 matt uint32_t oldirqstate = disable_interrupts(IF32_bits); 5128 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 5129 1.1 matt 5130 1.257 matt #ifndef ARM_HAS_VBAR 5131 1.165 scw /* 5132 1.165 scw * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 5133 1.165 scw * entry corresponding to 'vector_page' in the incoming L1 table 5134 1.165 scw * before switching to it otherwise subsequent interrupts/exceptions 5135 1.165 scw * (including domain faults!) will jump into hyperspace. 5136 1.165 scw */ 5137 1.165 scw if (npm->pm_pl1vec != NULL) { 5138 1.165 scw cpu_tlb_flushID_SE((u_int)vector_page); 5139 1.165 scw cpu_cpwait(); 5140 1.165 scw *npm->pm_pl1vec = npm->pm_l1vec; 5141 1.165 scw PTE_SYNC(npm->pm_pl1vec); 5142 1.165 scw } 5143 1.257 matt #endif 5144 1.1 matt 5145 1.271 matt #ifdef ARM_MMU_EXTENDED 5146 1.348 skrll pmap_md_pdetab_activate(npm, l); 5147 1.271 matt #else 5148 1.165 scw cpu_domains(ndacr); 5149 1.165 scw if (npm == pmap_kernel() || npm == rpm) { 5150 1.134 thorpej /* 5151 1.165 scw * Switching to a kernel thread, or back to the 5152 1.165 scw * same user vmspace as before... Simply update 5153 1.165 scw * the TTB (no TLB flush required) 5154 1.134 thorpej */ 5155 1.237 matt cpu_setttb(npm->pm_l1->l1_physaddr, false); 5156 1.165 scw cpu_cpwait(); 5157 1.165 scw } else { 5158 1.165 scw /* 5159 1.165 scw * Otherwise, update TTB and flush TLB 5160 1.165 scw */ 5161 1.165 scw cpu_context_switch(npm->pm_l1->l1_physaddr); 5162 1.165 scw if (rpm != NULL) 5163 1.165 scw rpm->pm_cstate.cs_tlb = 0; 5164 1.165 scw } 5165 1.165 scw 5166 1.165 scw restore_interrupts(oldirqstate); 5167 1.271 matt #endif /* ARM_MMU_EXTENDED */ 5168 1.165 scw 5169 1.165 scw block_userspace_access = 0; 5170 1.165 scw 5171 1.271 matt #ifndef ARM_MMU_EXTENDED 5172 1.165 scw all_done: 5173 1.165 scw /* 5174 1.165 scw * The new pmap is resident. Make sure it's marked 5175 1.165 scw * as resident in the cache/TLB. 5176 1.165 scw */ 5177 1.165 scw npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 5178 1.165 scw if (npm != pmap_kernel()) 5179 1.267 matt ci->ci_pmap_lastuser = npm; 5180 1.1 matt 5181 1.165 scw /* The old pmap is not longer active */ 5182 1.271 matt if (opm != npm) { 5183 1.271 matt if (opm != NULL) 5184 1.271 matt opm->pm_activated = false; 5185 1.1 matt 5186 1.271 matt /* But the new one is */ 5187 1.271 matt npm->pm_activated = true; 5188 1.271 matt } 5189 1.348 skrll ci->ci_pmap_cur = npm; 5190 1.271 matt #endif 5191 1.271 matt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5192 1.165 scw } 5193 1.1 matt 5194 1.435 skrll 5195 1.165 scw void 5196 1.134 thorpej pmap_deactivate(struct lwp *l) 5197 1.134 thorpej { 5198 1.271 matt pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap; 5199 1.271 matt 5200 1.406 skrll UVMHIST_FUNC(__func__); 5201 1.406 skrll UVMHIST_CALLARGS(maphist, "l=%#jx (pm=%#jx)", (uintptr_t)l, 5202 1.406 skrll (uintptr_t)pm, 0, 0); 5203 1.165 scw 5204 1.271 matt #ifdef ARM_MMU_EXTENDED 5205 1.437 skrll KASSERT(kpreempt_disabled()); 5206 1.348 skrll pmap_md_pdetab_deactivate(pm); 5207 1.271 matt #else 5208 1.178 scw /* 5209 1.178 scw * If the process is exiting, make sure pmap_activate() does 5210 1.178 scw * a full MMU context-switch and cache flush, which we might 5211 1.178 scw * otherwise skip. See PR port-arm/38950. 5212 1.178 scw */ 5213 1.178 scw if (l->l_proc->p_sflag & PS_WEXIT) 5214 1.267 matt curcpu()->ci_lastlwp = NULL; 5215 1.178 scw 5216 1.271 matt pm->pm_activated = false; 5217 1.271 matt #endif 5218 1.271 matt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5219 1.1 matt } 5220 1.1 matt 5221 1.435 skrll 5222 1.435 skrll #if defined(EFI_RUNTIME) 5223 1.435 skrll void 5224 1.435 skrll pmap_deactivate_efirt(void) 5225 1.435 skrll { 5226 1.435 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 5227 1.435 skrll 5228 1.437 skrll KASSERT(kpreempt_disabled()); 5229 1.435 skrll struct cpu_info * const ci = curcpu(); 5230 1.435 skrll 5231 1.435 skrll /* 5232 1.435 skrll * Disable translation table walks from TTBR0 while no pmap has been 5233 1.435 skrll * activated. 5234 1.435 skrll */ 5235 1.435 skrll const uint32_t old_ttbcr = armreg_ttbcr_read(); 5236 1.435 skrll armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); 5237 1.435 skrll isb(); 5238 1.435 skrll 5239 1.435 skrll armreg_contextidr_write(KERNEL_PID); 5240 1.435 skrll isb(); 5241 1.435 skrll 5242 1.435 skrll KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u", 5243 1.435 skrll ci->ci_pmap_asid_cur); 5244 1.435 skrll 5245 1.435 skrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5246 1.435 skrll } 5247 1.435 skrll #endif 5248 1.435 skrll 5249 1.435 skrll 5250 1.1 matt void 5251 1.134 thorpej pmap_update(pmap_t pm) 5252 1.1 matt { 5253 1.1 matt 5254 1.406 skrll UVMHIST_FUNC(__func__); 5255 1.406 skrll UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, 5256 1.359 pgoyette pm->pm_remove_all, 0, 0); 5257 1.337 skrll 5258 1.348 skrll #ifndef ARM_MMU_EXTENDED 5259 1.134 thorpej if (pm->pm_remove_all) { 5260 1.134 thorpej /* 5261 1.134 thorpej * Finish up the pmap_remove_all() optimisation by flushing 5262 1.134 thorpej * the TLB. 5263 1.134 thorpej */ 5264 1.134 thorpej pmap_tlb_flushID(pm); 5265 1.160 thorpej pm->pm_remove_all = false; 5266 1.134 thorpej } 5267 1.1 matt 5268 1.134 thorpej if (pmap_is_current(pm)) { 5269 1.107 thorpej /* 5270 1.134 thorpej * If we're dealing with a current userland pmap, move its L1 5271 1.134 thorpej * to the end of the LRU. 5272 1.107 thorpej */ 5273 1.134 thorpej if (pm != pmap_kernel()) 5274 1.134 thorpej pmap_use_l1(pm); 5275 1.134 thorpej 5276 1.1 matt /* 5277 1.134 thorpej * We can assume we're done with frobbing the cache/tlb for 5278 1.134 thorpej * now. Make sure any future pmap ops don't skip cache/tlb 5279 1.134 thorpej * flushes. 5280 1.1 matt */ 5281 1.134 thorpej pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 5282 1.1 matt } 5283 1.348 skrll #else 5284 1.348 skrll 5285 1.348 skrll kpreempt_disable(); 5286 1.348 skrll #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1 5287 1.348 skrll u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); 5288 1.348 skrll if (pending && pmap_tlb_shootdown_bystanders(pmap)) { 5289 1.348 skrll PMAP_COUNT(shootdown_ipis); 5290 1.348 skrll } 5291 1.348 skrll #endif 5292 1.348 skrll 5293 1.348 skrll /* 5294 1.348 skrll * If pmap_remove_all was called, we deactivated ourselves and released 5295 1.348 skrll * our ASID. Now we have to reactivate ourselves. 5296 1.348 skrll */ 5297 1.348 skrll if (__predict_false(pm->pm_remove_all)) { 5298 1.348 skrll pm->pm_remove_all = false; 5299 1.348 skrll 5300 1.348 skrll KASSERT(pm != pmap_kernel()); 5301 1.348 skrll pmap_md_pdetab_activate(pm, curlwp); 5302 1.348 skrll } 5303 1.348 skrll 5304 1.353 jmcneill if (arm_has_mpext_p) 5305 1.353 jmcneill armreg_bpiallis_write(0); 5306 1.353 jmcneill else 5307 1.353 jmcneill armreg_bpiall_write(0); 5308 1.353 jmcneill 5309 1.348 skrll kpreempt_enable(); 5310 1.348 skrll 5311 1.348 skrll KASSERTMSG(pm == pmap_kernel() 5312 1.348 skrll || curcpu()->ci_pmap_cur != pm 5313 1.348 skrll || pm->pm_pai[0].pai_asid == curcpu()->ci_pmap_asid_cur, 5314 1.348 skrll "pmap/asid %p/%#x != %s cur pmap/asid %p/%#x", pm, 5315 1.348 skrll pm->pm_pai[0].pai_asid, curcpu()->ci_data.cpu_name, 5316 1.348 skrll curcpu()->ci_pmap_cur, curcpu()->ci_pmap_asid_cur); 5317 1.271 matt #endif 5318 1.1 matt 5319 1.174 matt PMAPCOUNT(updates); 5320 1.174 matt 5321 1.96 thorpej /* 5322 1.134 thorpej * make sure TLB/cache operations have completed. 5323 1.96 thorpej */ 5324 1.134 thorpej cpu_cpwait(); 5325 1.337 skrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5326 1.134 thorpej } 5327 1.134 thorpej 5328 1.399 ad bool 5329 1.134 thorpej pmap_remove_all(pmap_t pm) 5330 1.134 thorpej { 5331 1.96 thorpej 5332 1.419 skrll UVMHIST_FUNC(__func__); 5333 1.421 skrll UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0); 5334 1.419 skrll 5335 1.419 skrll KASSERT(pm != pmap_kernel()); 5336 1.419 skrll 5337 1.426 skrll kpreempt_disable(); 5338 1.1 matt /* 5339 1.134 thorpej * The vmspace described by this pmap is about to be torn down. 5340 1.134 thorpej * Until pmap_update() is called, UVM will only make calls 5341 1.134 thorpej * to pmap_remove(). We can make life much simpler by flushing 5342 1.134 thorpej * the cache now, and deferring TLB invalidation to pmap_update(). 5343 1.1 matt */ 5344 1.174 matt #ifdef PMAP_CACHE_VIVT 5345 1.259 matt pmap_cache_wbinv_all(pm, PVF_EXEC); 5346 1.174 matt #endif 5347 1.348 skrll #ifdef ARM_MMU_EXTENDED 5348 1.348 skrll #ifdef MULTIPROCESSOR 5349 1.348 skrll struct cpu_info * const ci = curcpu(); 5350 1.348 skrll // This should be the last CPU with this pmap onproc 5351 1.348 skrll KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci))); 5352 1.348 skrll if (kcpuset_isset(pm->pm_onproc, cpu_index(ci))) 5353 1.348 skrll #endif 5354 1.348 skrll pmap_tlb_asid_deactivate(pm); 5355 1.348 skrll #ifdef MULTIPROCESSOR 5356 1.348 skrll KASSERT(kcpuset_iszero(pm->pm_onproc)); 5357 1.348 skrll #endif 5358 1.348 skrll 5359 1.348 skrll pmap_tlb_asid_release_all(pm); 5360 1.348 skrll #endif 5361 1.160 thorpej pm->pm_remove_all = true; 5362 1.426 skrll kpreempt_enable(); 5363 1.419 skrll 5364 1.420 skrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5365 1.399 ad return false; 5366 1.1 matt } 5367 1.1 matt 5368 1.1 matt /* 5369 1.134 thorpej * Retire the given physical map from service. 5370 1.134 thorpej * Should only be called if the map contains no valid mappings. 5371 1.1 matt */ 5372 1.134 thorpej void 5373 1.134 thorpej pmap_destroy(pmap_t pm) 5374 1.1 matt { 5375 1.406 skrll UVMHIST_FUNC(__func__); 5376 1.406 skrll UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, 5377 1.406 skrll pm ? pm->pm_remove_all : 0, 0, 0); 5378 1.337 skrll 5379 1.134 thorpej if (pm == NULL) 5380 1.134 thorpej return; 5381 1.1 matt 5382 1.134 thorpej if (pm->pm_remove_all) { 5383 1.336 skrll #ifdef ARM_MMU_EXTENDED 5384 1.338 skrll pmap_tlb_asid_release_all(pm); 5385 1.336 skrll #else 5386 1.134 thorpej pmap_tlb_flushID(pm); 5387 1.336 skrll #endif 5388 1.160 thorpej pm->pm_remove_all = false; 5389 1.1 matt } 5390 1.79 thorpej 5391 1.49 thorpej /* 5392 1.134 thorpej * Drop reference count 5393 1.49 thorpej */ 5394 1.436 riastrad membar_release(); 5395 1.394 ad if (atomic_dec_uint_nv(&pm->pm_refs) > 0) { 5396 1.271 matt #ifndef ARM_MMU_EXTENDED 5397 1.134 thorpej if (pmap_is_current(pm)) { 5398 1.134 thorpej if (pm != pmap_kernel()) 5399 1.134 thorpej pmap_use_l1(pm); 5400 1.134 thorpej pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 5401 1.134 thorpej } 5402 1.271 matt #endif 5403 1.134 thorpej return; 5404 1.134 thorpej } 5405 1.436 riastrad membar_acquire(); 5406 1.66 thorpej 5407 1.1 matt /* 5408 1.134 thorpej * reference count is zero, free pmap resources and then free pmap. 5409 1.1 matt */ 5410 1.134 thorpej 5411 1.257 matt #ifndef ARM_HAS_VBAR 5412 1.134 thorpej if (vector_page < KERNEL_BASE) { 5413 1.165 scw KDASSERT(!pmap_is_current(pm)); 5414 1.147 scw 5415 1.134 thorpej /* Remove the vector page mapping */ 5416 1.134 thorpej pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); 5417 1.134 thorpej pmap_update(pm); 5418 1.1 matt } 5419 1.257 matt #endif 5420 1.1 matt 5421 1.134 thorpej pmap_free_l1(pm); 5422 1.134 thorpej 5423 1.271 matt #ifdef ARM_MMU_EXTENDED 5424 1.271 matt #ifdef MULTIPROCESSOR 5425 1.271 matt kcpuset_destroy(pm->pm_active); 5426 1.271 matt kcpuset_destroy(pm->pm_onproc); 5427 1.271 matt #endif 5428 1.271 matt #else 5429 1.267 matt struct cpu_info * const ci = curcpu(); 5430 1.267 matt if (ci->ci_pmap_lastuser == pm) 5431 1.267 matt ci->ci_pmap_lastuser = NULL; 5432 1.271 matt #endif 5433 1.165 scw 5434 1.394 ad mutex_destroy(&pm->pm_lock); 5435 1.168 ad pool_cache_put(&pmap_cache, pm); 5436 1.337 skrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5437 1.134 thorpej } 5438 1.134 thorpej 5439 1.134 thorpej 5440 1.134 thorpej /* 5441 1.134 thorpej * void pmap_reference(pmap_t pm) 5442 1.134 thorpej * 5443 1.134 thorpej * Add a reference to the specified pmap. 5444 1.134 thorpej */ 5445 1.134 thorpej void 5446 1.134 thorpej pmap_reference(pmap_t pm) 5447 1.134 thorpej { 5448 1.1 matt 5449 1.134 thorpej if (pm == NULL) 5450 1.134 thorpej return; 5451 1.1 matt 5452 1.271 matt #ifndef ARM_MMU_EXTENDED 5453 1.134 thorpej pmap_use_l1(pm); 5454 1.271 matt #endif 5455 1.104 thorpej 5456 1.394 ad atomic_inc_uint(&pm->pm_refs); 5457 1.134 thorpej } 5458 1.49 thorpej 5459 1.214 jmcneill #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 5460 1.174 matt 5461 1.174 matt static struct evcnt pmap_prefer_nochange_ev = 5462 1.174 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); 5463 1.174 matt static struct evcnt pmap_prefer_change_ev = 5464 1.174 matt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); 5465 1.174 matt 5466 1.174 matt EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); 5467 1.174 matt EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); 5468 1.174 matt 5469 1.174 matt void 5470 1.174 matt pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) 5471 1.174 matt { 5472 1.174 matt vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); 5473 1.174 matt vaddr_t va = *vap; 5474 1.174 matt vaddr_t diff = (hint - va) & mask; 5475 1.174 matt if (diff == 0) { 5476 1.174 matt pmap_prefer_nochange_ev.ev_count++; 5477 1.174 matt } else { 5478 1.174 matt pmap_prefer_change_ev.ev_count++; 5479 1.174 matt if (__predict_false(td)) 5480 1.174 matt va -= mask + 1; 5481 1.174 matt *vap = va + diff; 5482 1.174 matt } 5483 1.174 matt } 5484 1.214 jmcneill #endif /* ARM_MMU_V6 | ARM_MMU_V7 */ 5485 1.174 matt 5486 1.134 thorpej /* 5487 1.134 thorpej * pmap_zero_page() 5488 1.286 skrll * 5489 1.134 thorpej * Zero a given physical page by mapping it at a page hook point. 5490 1.134 thorpej * In doing the zero page op, the page we zero is mapped cachable, as with 5491 1.134 thorpej * StrongARM accesses to non-cached pages are non-burst making writing 5492 1.134 thorpej * _any_ bulk data very slow. 5493 1.134 thorpej */ 5494 1.214 jmcneill #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 5495 1.134 thorpej void 5496 1.271 matt pmap_zero_page_generic(paddr_t pa) 5497 1.134 thorpej { 5498 1.174 matt #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 5499 1.271 matt struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 5500 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 5501 1.174 matt #endif 5502 1.244 matt #if defined(PMAP_CACHE_VIPT) 5503 1.174 matt /* Choose the last page color it had, if any */ 5504 1.215 uebayasi const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 5505 1.174 matt #else 5506 1.174 matt const vsize_t va_offset = 0; 5507 1.174 matt #endif 5508 1.244 matt #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) 5509 1.244 matt /* 5510 1.244 matt * Is this page mapped at its natural color? 5511 1.244 matt * If we have all of memory mapped, then just convert PA to VA. 5512 1.244 matt */ 5513 1.284 matt bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT 5514 1.271 matt || va_offset == (pa & arm_cache_prefer_mask); 5515 1.271 matt const vaddr_t vdstp = okcolor 5516 1.284 matt ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset)) 5517 1.271 matt : cpu_cdstp(va_offset); 5518 1.244 matt #else 5519 1.244 matt const bool okcolor = false; 5520 1.271 matt const vaddr_t vdstp = cpu_cdstp(va_offset); 5521 1.244 matt #endif 5522 1.271 matt pt_entry_t * const ptep = cpu_cdst_pte(va_offset); 5523 1.1 matt 5524 1.244 matt 5525 1.174 matt #ifdef DEBUG 5526 1.215 uebayasi if (!SLIST_EMPTY(&md->pvh_list)) 5527 1.134 thorpej panic("pmap_zero_page: page has mappings"); 5528 1.134 thorpej #endif 5529 1.1 matt 5530 1.271 matt KDASSERT((pa & PGOFSET) == 0); 5531 1.120 chris 5532 1.244 matt if (!okcolor) { 5533 1.244 matt /* 5534 1.244 matt * Hook in the page, zero it, and purge the cache for that 5535 1.244 matt * zeroed page. Invalidate the TLB as needed. 5536 1.244 matt */ 5537 1.271 matt const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode 5538 1.271 matt | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); 5539 1.271 matt l2pte_set(ptep, npte, 0); 5540 1.244 matt PTE_SYNC(ptep); 5541 1.271 matt cpu_tlb_flushD_SE(vdstp); 5542 1.244 matt cpu_cpwait(); 5543 1.284 matt #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \ 5544 1.284 matt && !defined(ARM_MMU_EXTENDED) 5545 1.244 matt /* 5546 1.244 matt * If we are direct-mapped and our color isn't ok, then before 5547 1.244 matt * we bzero the page invalidate its contents from the cache and 5548 1.244 matt * reset the color to its natural color. 5549 1.244 matt */ 5550 1.271 matt cpu_dcache_inv_range(vdstp, PAGE_SIZE); 5551 1.244 matt md->pvh_attrs &= ~arm_cache_prefer_mask; 5552 1.271 matt md->pvh_attrs |= (pa & arm_cache_prefer_mask); 5553 1.244 matt #endif 5554 1.244 matt } 5555 1.244 matt bzero_page(vdstp); 5556 1.244 matt if (!okcolor) { 5557 1.244 matt /* 5558 1.244 matt * Unmap the page. 5559 1.244 matt */ 5560 1.271 matt l2pte_reset(ptep); 5561 1.244 matt PTE_SYNC(ptep); 5562 1.271 matt cpu_tlb_flushD_SE(vdstp); 5563 1.174 matt #ifdef PMAP_CACHE_VIVT 5564 1.271 matt cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); 5565 1.174 matt #endif 5566 1.244 matt } 5567 1.174 matt #ifdef PMAP_CACHE_VIPT 5568 1.174 matt /* 5569 1.174 matt * This page is now cache resident so it now has a page color. 5570 1.174 matt * Any contents have been obliterated so clear the EXEC flag. 5571 1.174 matt */ 5572 1.271 matt #ifndef ARM_MMU_EXTENDED 5573 1.215 uebayasi if (!pmap_is_page_colored_p(md)) { 5574 1.174 matt PMAPCOUNT(vac_color_new); 5575 1.215 uebayasi md->pvh_attrs |= PVF_COLORED; 5576 1.174 matt } 5577 1.271 matt md->pvh_attrs |= PVF_DIRTY; 5578 1.271 matt #endif 5579 1.215 uebayasi if (PV_IS_EXEC_P(md->pvh_attrs)) { 5580 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 5581 1.174 matt PMAPCOUNT(exec_discarded_zero); 5582 1.174 matt } 5583 1.174 matt #endif 5584 1.134 thorpej } 5585 1.174 matt #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 5586 1.1 matt 5587 1.134 thorpej #if ARM_MMU_XSCALE == 1 5588 1.134 thorpej void 5589 1.271 matt pmap_zero_page_xscale(paddr_t pa) 5590 1.134 thorpej { 5591 1.134 thorpej #ifdef DEBUG 5592 1.271 matt struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 5593 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 5594 1.1 matt 5595 1.215 uebayasi if (!SLIST_EMPTY(&md->pvh_list)) 5596 1.134 thorpej panic("pmap_zero_page: page has mappings"); 5597 1.134 thorpej #endif 5598 1.1 matt 5599 1.271 matt KDASSERT((pa & PGOFSET) == 0); 5600 1.1 matt 5601 1.134 thorpej /* 5602 1.134 thorpej * Hook in the page, zero it, and purge the cache for that 5603 1.134 thorpej * zeroed page. Invalidate the TLB as needed. 5604 1.134 thorpej */ 5605 1.286 skrll 5606 1.271 matt pt_entry_t npte = L2_S_PROTO | pa | 5607 1.134 thorpej L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 5608 1.174 matt L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 5609 1.271 matt l2pte_set(cdst_pte, npte, 0); 5610 1.134 thorpej PTE_SYNC(cdst_pte); 5611 1.134 thorpej cpu_tlb_flushD_SE(cdstp); 5612 1.134 thorpej cpu_cpwait(); 5613 1.134 thorpej bzero_page(cdstp); 5614 1.134 thorpej xscale_cache_clean_minidata(); 5615 1.271 matt l2pte_reset(cdst_pte); 5616 1.271 matt PTE_SYNC(cdst_pte); 5617 1.134 thorpej } 5618 1.134 thorpej #endif /* ARM_MMU_XSCALE == 1 */ 5619 1.1 matt 5620 1.134 thorpej /* pmap_pageidlezero() 5621 1.134 thorpej * 5622 1.134 thorpej * The same as above, except that we assume that the page is not 5623 1.134 thorpej * mapped. This means we never have to flush the cache first. Called 5624 1.134 thorpej * from the idle loop. 5625 1.134 thorpej */ 5626 1.159 thorpej bool 5627 1.271 matt pmap_pageidlezero(paddr_t pa) 5628 1.134 thorpej { 5629 1.160 thorpej bool rv = true; 5630 1.174 matt #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 5631 1.271 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 5632 1.215 uebayasi struct vm_page_md *md = VM_PAGE_TO_MD(pg); 5633 1.174 matt #endif 5634 1.174 matt #ifdef PMAP_CACHE_VIPT 5635 1.174 matt /* Choose the last page color it had, if any */ 5636 1.215 uebayasi const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 5637 1.174 matt #else 5638 1.174 matt const vsize_t va_offset = 0; 5639 1.174 matt #endif 5640 1.271 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 5641 1.284 matt bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT 5642 1.271 matt || va_offset == (pa & arm_cache_prefer_mask); 5643 1.271 matt const vaddr_t vdstp = okcolor 5644 1.284 matt ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset)) 5645 1.271 matt : cpu_cdstp(va_offset); 5646 1.271 matt #else 5647 1.271 matt const bool okcolor = false; 5648 1.271 matt const vaddr_t vdstp = cpu_cdstp(va_offset); 5649 1.271 matt #endif 5650 1.271 matt pt_entry_t * const ptep = cpu_cdst_pte(va_offset); 5651 1.174 matt 5652 1.174 matt 5653 1.134 thorpej #ifdef DEBUG 5654 1.215 uebayasi if (!SLIST_EMPTY(&md->pvh_list)) 5655 1.134 thorpej panic("pmap_pageidlezero: page has mappings"); 5656 1.1 matt #endif 5657 1.1 matt 5658 1.271 matt KDASSERT((pa & PGOFSET) == 0); 5659 1.134 thorpej 5660 1.271 matt if (!okcolor) { 5661 1.271 matt /* 5662 1.271 matt * Hook in the page, zero it, and purge the cache for that 5663 1.271 matt * zeroed page. Invalidate the TLB as needed. 5664 1.271 matt */ 5665 1.271 matt const pt_entry_t npte = L2_S_PROTO | pa | 5666 1.271 matt L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 5667 1.271 matt l2pte_set(ptep, npte, 0); 5668 1.271 matt PTE_SYNC(ptep); 5669 1.271 matt cpu_tlb_flushD_SE(vdstp); 5670 1.271 matt cpu_cpwait(); 5671 1.271 matt } 5672 1.1 matt 5673 1.271 matt uint64_t *ptr = (uint64_t *)vdstp; 5674 1.271 matt for (size_t i = 0; i < PAGE_SIZE / sizeof(*ptr); i++) { 5675 1.174 matt if (sched_curcpu_runnable_p() != 0) { 5676 1.134 thorpej /* 5677 1.134 thorpej * A process has become ready. Abort now, 5678 1.134 thorpej * so we don't keep it waiting while we 5679 1.134 thorpej * do slow memory access to finish this 5680 1.134 thorpej * page. 5681 1.134 thorpej */ 5682 1.160 thorpej rv = false; 5683 1.134 thorpej break; 5684 1.134 thorpej } 5685 1.134 thorpej *ptr++ = 0; 5686 1.11 chris } 5687 1.1 matt 5688 1.174 matt #ifdef PMAP_CACHE_VIVT 5689 1.134 thorpej if (rv) 5690 1.286 skrll /* 5691 1.134 thorpej * if we aborted we'll rezero this page again later so don't 5692 1.134 thorpej * purge it unless we finished it 5693 1.134 thorpej */ 5694 1.271 matt cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); 5695 1.174 matt #elif defined(PMAP_CACHE_VIPT) 5696 1.174 matt /* 5697 1.174 matt * This page is now cache resident so it now has a page color. 5698 1.174 matt * Any contents have been obliterated so clear the EXEC flag. 5699 1.174 matt */ 5700 1.271 matt #ifndef ARM_MMU_EXTENDED 5701 1.215 uebayasi if (!pmap_is_page_colored_p(md)) { 5702 1.174 matt PMAPCOUNT(vac_color_new); 5703 1.215 uebayasi md->pvh_attrs |= PVF_COLORED; 5704 1.174 matt } 5705 1.271 matt #endif 5706 1.215 uebayasi if (PV_IS_EXEC_P(md->pvh_attrs)) { 5707 1.215 uebayasi md->pvh_attrs &= ~PVF_EXEC; 5708 1.174 matt PMAPCOUNT(exec_discarded_zero); 5709 1.174 matt } 5710 1.174 matt #endif 5711 1.174 matt /* 5712 1.174 matt * Unmap the page. 5713 1.174 matt */ 5714 1.271 matt if (!okcolor) { 5715 1.271 matt l2pte_reset(ptep); 5716 1.271 matt PTE_SYNC(ptep); 5717 1.271 matt cpu_tlb_flushD_SE(vdstp); 5718 1.271 matt } 5719 1.1 matt 5720 1.271 matt return rv; 5721 1.1 matt } 5722 1.286 skrll 5723 1.48 chris /* 5724 1.134 thorpej * pmap_copy_page() 5725 1.48 chris * 5726 1.134 thorpej * Copy one physical page into another, by mapping the pages into 5727 1.134 thorpej * hook points. The same comment regarding cachability as in 5728 1.134 thorpej * pmap_zero_page also applies here. 5729 1.48 chris */ 5730 1.214 jmcneill #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 5731 1.1 matt void 5732 1.134 thorpej pmap_copy_page_generic(paddr_t src, paddr_t dst) 5733 1.1 matt { 5734 1.174 matt struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); 5735 1.215 uebayasi struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); 5736 1.174 matt #if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 5737 1.174 matt struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); 5738 1.215 uebayasi struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); 5739 1.174 matt #endif 5740 1.174 matt #ifdef PMAP_CACHE_VIPT 5741 1.215 uebayasi const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; 5742 1.215 uebayasi const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; 5743 1.174 matt #else 5744 1.174 matt const vsize_t src_va_offset = 0; 5745 1.174 matt const vsize_t dst_va_offset = 0; 5746 1.174 matt #endif 5747 1.244 matt #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) 5748 1.244 matt /* 5749 1.244 matt * Is this page mapped at its natural color? 5750 1.244 matt * If we have all of memory mapped, then just convert PA to VA. 5751 1.244 matt */ 5752 1.284 matt bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT 5753 1.271 matt || src_va_offset == (src & arm_cache_prefer_mask); 5754 1.284 matt bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT 5755 1.271 matt || dst_va_offset == (dst & arm_cache_prefer_mask); 5756 1.244 matt const vaddr_t vsrcp = src_okcolor 5757 1.284 matt ? pmap_direct_mapped_phys(src, &src_okcolor, 5758 1.284 matt cpu_csrcp(src_va_offset)) 5759 1.271 matt : cpu_csrcp(src_va_offset); 5760 1.284 matt const vaddr_t vdstp = pmap_direct_mapped_phys(dst, &dst_okcolor, 5761 1.284 matt cpu_cdstp(dst_va_offset)); 5762 1.244 matt #else 5763 1.244 matt const bool src_okcolor = false; 5764 1.244 matt const bool dst_okcolor = false; 5765 1.271 matt const vaddr_t vsrcp = cpu_csrcp(src_va_offset); 5766 1.271 matt const vaddr_t vdstp = cpu_cdstp(dst_va_offset); 5767 1.244 matt #endif 5768 1.271 matt pt_entry_t * const src_ptep = cpu_csrc_pte(src_va_offset); 5769 1.271 matt pt_entry_t * const dst_ptep = cpu_cdst_pte(dst_va_offset); 5770 1.174 matt 5771 1.134 thorpej #ifdef DEBUG 5772 1.215 uebayasi if (!SLIST_EMPTY(&dst_md->pvh_list)) 5773 1.134 thorpej panic("pmap_copy_page: dst page has mappings"); 5774 1.134 thorpej #endif 5775 1.83 thorpej 5776 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 5777 1.215 uebayasi KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); 5778 1.174 matt #endif 5779 1.134 thorpej KDASSERT((src & PGOFSET) == 0); 5780 1.134 thorpej KDASSERT((dst & PGOFSET) == 0); 5781 1.105 thorpej 5782 1.134 thorpej /* 5783 1.134 thorpej * Clean the source page. Hold the source page's lock for 5784 1.134 thorpej * the duration of the copy so that no other mappings can 5785 1.134 thorpej * be created while we have a potentially aliased mapping. 5786 1.134 thorpej */ 5787 1.174 matt #ifdef PMAP_CACHE_VIVT 5788 1.271 matt pmap_acquire_page_lock(src_md); 5789 1.271 matt (void) pmap_clean_page(src_md, true); 5790 1.271 matt pmap_release_page_lock(src_md); 5791 1.174 matt #endif 5792 1.105 thorpej 5793 1.134 thorpej /* 5794 1.134 thorpej * Map the pages into the page hook points, copy them, and purge 5795 1.134 thorpej * the cache for the appropriate page. Invalidate the TLB 5796 1.134 thorpej * as required. 5797 1.134 thorpej */ 5798 1.244 matt if (!src_okcolor) { 5799 1.271 matt const pt_entry_t nsrc_pte = L2_S_PROTO 5800 1.244 matt | src 5801 1.271 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 5802 1.244 matt | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) 5803 1.271 matt #else // defined(PMAP_CACHE_VIVT) || defined(ARM_MMU_EXTENDED) 5804 1.244 matt | pte_l2_s_cache_mode 5805 1.174 matt #endif 5806 1.244 matt | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); 5807 1.271 matt l2pte_set(src_ptep, nsrc_pte, 0); 5808 1.244 matt PTE_SYNC(src_ptep); 5809 1.271 matt cpu_tlb_flushD_SE(vsrcp); 5810 1.244 matt cpu_cpwait(); 5811 1.244 matt } 5812 1.244 matt if (!dst_okcolor) { 5813 1.271 matt const pt_entry_t ndst_pte = L2_S_PROTO | dst | 5814 1.244 matt L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 5815 1.271 matt l2pte_set(dst_ptep, ndst_pte, 0); 5816 1.244 matt PTE_SYNC(dst_ptep); 5817 1.271 matt cpu_tlb_flushD_SE(vdstp); 5818 1.244 matt cpu_cpwait(); 5819 1.244 matt #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) 5820 1.244 matt /* 5821 1.244 matt * If we are direct-mapped and our color isn't ok, then before 5822 1.244 matt * we bcopy to the new page invalidate its contents from the 5823 1.244 matt * cache and reset its color to its natural color. 5824 1.244 matt */ 5825 1.271 matt cpu_dcache_inv_range(vdstp, PAGE_SIZE); 5826 1.244 matt dst_md->pvh_attrs &= ~arm_cache_prefer_mask; 5827 1.244 matt dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); 5828 1.174 matt #endif 5829 1.244 matt } 5830 1.244 matt bcopy_page(vsrcp, vdstp); 5831 1.174 matt #ifdef PMAP_CACHE_VIVT 5832 1.244 matt cpu_dcache_inv_range(vsrcp, PAGE_SIZE); 5833 1.244 matt cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); 5834 1.174 matt #endif 5835 1.174 matt /* 5836 1.174 matt * Unmap the pages. 5837 1.174 matt */ 5838 1.244 matt if (!src_okcolor) { 5839 1.271 matt l2pte_reset(src_ptep); 5840 1.244 matt PTE_SYNC(src_ptep); 5841 1.271 matt cpu_tlb_flushD_SE(vsrcp); 5842 1.244 matt cpu_cpwait(); 5843 1.244 matt } 5844 1.244 matt if (!dst_okcolor) { 5845 1.271 matt l2pte_reset(dst_ptep); 5846 1.244 matt PTE_SYNC(dst_ptep); 5847 1.271 matt cpu_tlb_flushD_SE(vdstp); 5848 1.244 matt cpu_cpwait(); 5849 1.244 matt } 5850 1.174 matt #ifdef PMAP_CACHE_VIPT 5851 1.174 matt /* 5852 1.174 matt * Now that the destination page is in the cache, mark it as colored. 5853 1.174 matt * If this was an exec page, discard it. 5854 1.174 matt */ 5855 1.271 matt pmap_acquire_page_lock(dst_md); 5856 1.271 matt #ifndef ARM_MMU_EXTENDED 5857 1.271 matt if (arm_pcache.cache_type == CACHE_TYPE_PIPT) { 5858 1.271 matt dst_md->pvh_attrs &= ~arm_cache_prefer_mask; 5859 1.271 matt dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); 5860 1.271 matt } 5861 1.215 uebayasi if (!pmap_is_page_colored_p(dst_md)) { 5862 1.174 matt PMAPCOUNT(vac_color_new); 5863 1.215 uebayasi dst_md->pvh_attrs |= PVF_COLORED; 5864 1.174 matt } 5865 1.271 matt dst_md->pvh_attrs |= PVF_DIRTY; 5866 1.271 matt #endif 5867 1.215 uebayasi if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { 5868 1.215 uebayasi dst_md->pvh_attrs &= ~PVF_EXEC; 5869 1.174 matt PMAPCOUNT(exec_discarded_copy); 5870 1.174 matt } 5871 1.271 matt pmap_release_page_lock(dst_md); 5872 1.174 matt #endif 5873 1.1 matt } 5874 1.174 matt #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 5875 1.1 matt 5876 1.134 thorpej #if ARM_MMU_XSCALE == 1 5877 1.1 matt void 5878 1.134 thorpej pmap_copy_page_xscale(paddr_t src, paddr_t dst) 5879 1.1 matt { 5880 1.226 matt struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 5881 1.226 matt struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); 5882 1.134 thorpej #ifdef DEBUG 5883 1.216 uebayasi struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst)); 5884 1.14 chs 5885 1.215 uebayasi if (!SLIST_EMPTY(&dst_md->pvh_list)) 5886 1.134 thorpej panic("pmap_copy_page: dst page has mappings"); 5887 1.134 thorpej #endif 5888 1.13 chris 5889 1.134 thorpej KDASSERT((src & PGOFSET) == 0); 5890 1.134 thorpej KDASSERT((dst & PGOFSET) == 0); 5891 1.14 chs 5892 1.134 thorpej /* 5893 1.134 thorpej * Clean the source page. Hold the source page's lock for 5894 1.134 thorpej * the duration of the copy so that no other mappings can 5895 1.134 thorpej * be created while we have a potentially aliased mapping. 5896 1.134 thorpej */ 5897 1.174 matt #ifdef PMAP_CACHE_VIVT 5898 1.271 matt pmap_acquire_page_lock(src_md); 5899 1.271 matt (void) pmap_clean_page(src_md, true); 5900 1.271 matt pmap_release_page_lock(src_md); 5901 1.174 matt #endif 5902 1.105 thorpej 5903 1.134 thorpej /* 5904 1.134 thorpej * Map the pages into the page hook points, copy them, and purge 5905 1.134 thorpej * the cache for the appropriate page. Invalidate the TLB 5906 1.134 thorpej * as required. 5907 1.134 thorpej */ 5908 1.296 matt const pt_entry_t nsrc_pte = L2_S_PROTO | src 5909 1.296 matt | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) 5910 1.296 matt | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 5911 1.296 matt l2pte_set(csrc_pte, nsrc_pte, 0); 5912 1.134 thorpej PTE_SYNC(csrc_pte); 5913 1.296 matt 5914 1.296 matt const pt_entry_t ndst_pte = L2_S_PROTO | dst 5915 1.296 matt | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) 5916 1.296 matt | L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 5917 1.296 matt l2pte_set(cdst_pte, ndst_pte, 0); 5918 1.134 thorpej PTE_SYNC(cdst_pte); 5919 1.296 matt 5920 1.134 thorpej cpu_tlb_flushD_SE(csrcp); 5921 1.134 thorpej cpu_tlb_flushD_SE(cdstp); 5922 1.134 thorpej cpu_cpwait(); 5923 1.134 thorpej bcopy_page(csrcp, cdstp); 5924 1.134 thorpej xscale_cache_clean_minidata(); 5925 1.296 matt l2pte_reset(csrc_pte); 5926 1.296 matt l2pte_reset(cdst_pte); 5927 1.296 matt PTE_SYNC(csrc_pte); 5928 1.296 matt PTE_SYNC(cdst_pte); 5929 1.1 matt } 5930 1.134 thorpej #endif /* ARM_MMU_XSCALE == 1 */ 5931 1.1 matt 5932 1.1 matt /* 5933 1.134 thorpej * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) 5934 1.1 matt * 5935 1.134 thorpej * Return the start and end addresses of the kernel's virtual space. 5936 1.134 thorpej * These values are setup in pmap_bootstrap and are updated as pages 5937 1.134 thorpej * are allocated. 5938 1.1 matt */ 5939 1.1 matt void 5940 1.134 thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end) 5941 1.1 matt { 5942 1.134 thorpej *start = virtual_avail; 5943 1.134 thorpej *end = virtual_end; 5944 1.1 matt } 5945 1.1 matt 5946 1.1 matt /* 5947 1.134 thorpej * Helper function for pmap_grow_l2_bucket() 5948 1.1 matt */ 5949 1.157 perry static inline int 5950 1.271 matt pmap_grow_map(vaddr_t va, paddr_t *pap) 5951 1.1 matt { 5952 1.2 matt paddr_t pa; 5953 1.1 matt 5954 1.386 skrll KASSERT((va & PGOFSET) == 0); 5955 1.386 skrll 5956 1.160 thorpej if (uvm.page_init_done == false) { 5957 1.174 matt #ifdef PMAP_STEAL_MEMORY 5958 1.174 matt pv_addr_t pv; 5959 1.174 matt pmap_boot_pagealloc(PAGE_SIZE, 5960 1.174 matt #ifdef PMAP_CACHE_VIPT 5961 1.174 matt arm_cache_prefer_mask, 5962 1.174 matt va & arm_cache_prefer_mask, 5963 1.174 matt #else 5964 1.174 matt 0, 0, 5965 1.174 matt #endif 5966 1.174 matt &pv); 5967 1.174 matt pa = pv.pv_pa; 5968 1.174 matt #else 5969 1.160 thorpej if (uvm_page_physget(&pa) == false) 5970 1.387 skrll return 1; 5971 1.174 matt #endif /* PMAP_STEAL_MEMORY */ 5972 1.134 thorpej } else { 5973 1.134 thorpej struct vm_page *pg; 5974 1.134 thorpej pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 5975 1.134 thorpej if (pg == NULL) 5976 1.387 skrll return 1; 5977 1.134 thorpej pa = VM_PAGE_TO_PHYS(pg); 5978 1.174 matt /* 5979 1.395 skrll * This new page must not have any mappings. 5980 1.174 matt */ 5981 1.275 matt struct vm_page_md *md __diagused = VM_PAGE_TO_MD(pg); 5982 1.215 uebayasi KASSERT(SLIST_EMPTY(&md->pvh_list)); 5983 1.134 thorpej } 5984 1.1 matt 5985 1.395 skrll /* 5986 1.396 skrll * Enter it via pmap_kenter_pa and let that routine do the hard work. 5987 1.395 skrll */ 5988 1.397 skrll pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 5989 1.397 skrll PMAP_KMPAGE | PMAP_PTE); 5990 1.385 skrll 5991 1.134 thorpej if (pap) 5992 1.134 thorpej *pap = pa; 5993 1.1 matt 5994 1.174 matt PMAPCOUNT(pt_mappings); 5995 1.1 matt 5996 1.398 skrll const pmap_t kpm __diagused = pmap_kernel(); 5997 1.398 skrll struct l2_bucket * const l2b __diagused = pmap_get_l2_bucket(kpm, va); 5998 1.392 skrll KASSERT(l2b != NULL); 5999 1.392 skrll 6000 1.392 skrll pt_entry_t * const ptep __diagused = &l2b->l2b_kva[l2pte_index(va)]; 6001 1.398 skrll const pt_entry_t pte __diagused = *ptep; 6002 1.398 skrll KASSERT(l2pte_valid_p(pte)); 6003 1.398 skrll KASSERT((pte & L2_S_CACHE_MASK) == pte_l2_s_cache_mode_pt); 6004 1.392 skrll 6005 1.134 thorpej memset((void *)va, 0, PAGE_SIZE); 6006 1.398 skrll 6007 1.387 skrll return 0; 6008 1.1 matt } 6009 1.1 matt 6010 1.1 matt /* 6011 1.134 thorpej * This is the same as pmap_alloc_l2_bucket(), except that it is only 6012 1.134 thorpej * used by pmap_growkernel(). 6013 1.1 matt */ 6014 1.157 perry static inline struct l2_bucket * 6015 1.134 thorpej pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) 6016 1.1 matt { 6017 1.389 skrll const size_t l1slot = l1pte_index(va); 6018 1.134 thorpej struct l2_dtable *l2; 6019 1.134 thorpej vaddr_t nva; 6020 1.134 thorpej 6021 1.391 skrll CTASSERT((PAGE_SIZE % L2_TABLE_SIZE_REAL) == 0); 6022 1.271 matt if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { 6023 1.134 thorpej /* 6024 1.134 thorpej * No mapping at this address, as there is 6025 1.134 thorpej * no entry in the L1 table. 6026 1.134 thorpej * Need to allocate a new l2_dtable. 6027 1.134 thorpej */ 6028 1.134 thorpej nva = pmap_kernel_l2dtable_kva; 6029 1.134 thorpej if ((nva & PGOFSET) == 0) { 6030 1.134 thorpej /* 6031 1.134 thorpej * Need to allocate a backing page 6032 1.134 thorpej */ 6033 1.271 matt if (pmap_grow_map(nva, NULL)) 6034 1.387 skrll return NULL; 6035 1.134 thorpej } 6036 1.1 matt 6037 1.134 thorpej l2 = (struct l2_dtable *)nva; 6038 1.134 thorpej nva += sizeof(struct l2_dtable); 6039 1.82 thorpej 6040 1.134 thorpej if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { 6041 1.134 thorpej /* 6042 1.134 thorpej * The new l2_dtable straddles a page boundary. 6043 1.134 thorpej * Map in another page to cover it. 6044 1.134 thorpej */ 6045 1.386 skrll if (pmap_grow_map(nva & ~PGOFSET, NULL)) 6046 1.387 skrll return NULL; 6047 1.134 thorpej } 6048 1.1 matt 6049 1.134 thorpej pmap_kernel_l2dtable_kva = nva; 6050 1.1 matt 6051 1.134 thorpej /* 6052 1.134 thorpej * Link it into the parent pmap 6053 1.134 thorpej */ 6054 1.271 matt pm->pm_l2[L2_IDX(l1slot)] = l2; 6055 1.82 thorpej } 6056 1.75 reinoud 6057 1.389 skrll struct l2_bucket * const l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; 6058 1.134 thorpej 6059 1.134 thorpej /* 6060 1.134 thorpej * Fetch pointer to the L2 page table associated with the address. 6061 1.134 thorpej */ 6062 1.134 thorpej if (l2b->l2b_kva == NULL) { 6063 1.134 thorpej pt_entry_t *ptep; 6064 1.134 thorpej 6065 1.134 thorpej /* 6066 1.134 thorpej * No L2 page table has been allocated. Chances are, this 6067 1.134 thorpej * is because we just allocated the l2_dtable, above. 6068 1.134 thorpej */ 6069 1.134 thorpej nva = pmap_kernel_l2ptp_kva; 6070 1.134 thorpej ptep = (pt_entry_t *)nva; 6071 1.134 thorpej if ((nva & PGOFSET) == 0) { 6072 1.134 thorpej /* 6073 1.134 thorpej * Need to allocate a backing page 6074 1.134 thorpej */ 6075 1.271 matt if (pmap_grow_map(nva, &pmap_kernel_l2ptp_phys)) 6076 1.387 skrll return NULL; 6077 1.134 thorpej PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 6078 1.134 thorpej } 6079 1.134 thorpej 6080 1.134 thorpej l2->l2_occupancy++; 6081 1.134 thorpej l2b->l2b_kva = ptep; 6082 1.271 matt l2b->l2b_l1slot = l1slot; 6083 1.271 matt l2b->l2b_pa = pmap_kernel_l2ptp_phys; 6084 1.134 thorpej 6085 1.134 thorpej pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 6086 1.134 thorpej pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 6087 1.82 thorpej } 6088 1.1 matt 6089 1.387 skrll return l2b; 6090 1.134 thorpej } 6091 1.134 thorpej 6092 1.134 thorpej vaddr_t 6093 1.134 thorpej pmap_growkernel(vaddr_t maxkvaddr) 6094 1.134 thorpej { 6095 1.408 skrll UVMHIST_FUNC(__func__); 6096 1.429 skrll UVMHIST_CALLARGS(maphist, "growing kernel from %#jx to %#jx", 6097 1.408 skrll pmap_curmaxkvaddr, maxkvaddr, 0, 0); 6098 1.408 skrll 6099 1.134 thorpej pmap_t kpm = pmap_kernel(); 6100 1.271 matt #ifndef ARM_MMU_EXTENDED 6101 1.134 thorpej struct l1_ttable *l1; 6102 1.271 matt #endif 6103 1.134 thorpej int s; 6104 1.134 thorpej 6105 1.134 thorpej if (maxkvaddr <= pmap_curmaxkvaddr) 6106 1.134 thorpej goto out; /* we are OK */ 6107 1.1 matt 6108 1.134 thorpej KDASSERT(maxkvaddr <= virtual_end); 6109 1.34 thorpej 6110 1.134 thorpej /* 6111 1.134 thorpej * whoops! we need to add kernel PTPs 6112 1.134 thorpej */ 6113 1.1 matt 6114 1.417 skrll vaddr_t pmap_maxkvaddr = pmap_curmaxkvaddr; 6115 1.417 skrll 6116 1.373 bouyer s = splvm(); /* to be safe */ 6117 1.373 bouyer mutex_enter(&kpm_lock); 6118 1.1 matt 6119 1.134 thorpej /* Map 1MB at a time */ 6120 1.417 skrll size_t l1slot = l1pte_index(pmap_maxkvaddr); 6121 1.271 matt #ifdef ARM_MMU_EXTENDED 6122 1.271 matt pd_entry_t * const spdep = &kpm->pm_l1[l1slot]; 6123 1.271 matt pd_entry_t *pdep = spdep; 6124 1.271 matt #endif 6125 1.271 matt for (;pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE, 6126 1.271 matt #ifdef ARM_MMU_EXTENDED 6127 1.271 matt pdep++, 6128 1.271 matt #endif 6129 1.271 matt l1slot++) { 6130 1.271 matt struct l2_bucket *l2b = 6131 1.271 matt pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 6132 1.271 matt KASSERT(l2b != NULL); 6133 1.271 matt 6134 1.271 matt const pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa 6135 1.271 matt | L1_C_DOM(PMAP_DOMAIN_KERNEL); 6136 1.271 matt #ifdef ARM_MMU_EXTENDED 6137 1.390 skrll KASSERT(*pdep == 0); 6138 1.271 matt l1pte_setone(pdep, npde); 6139 1.271 matt #else 6140 1.430 skrll /* Distribute new L1 entry to all L1s */ 6141 1.134 thorpej SLIST_FOREACH(l1, &l1_list, l1_link) { 6142 1.271 matt pd_entry_t * const pdep = &l1->l1_kva[l1slot]; 6143 1.271 matt l1pte_setone(pdep, npde); 6144 1.271 matt PDE_SYNC(pdep); 6145 1.134 thorpej } 6146 1.271 matt #endif 6147 1.1 matt } 6148 1.271 matt #ifdef ARM_MMU_EXTENDED 6149 1.271 matt PDE_SYNC_RANGE(spdep, pdep - spdep); 6150 1.271 matt #endif 6151 1.1 matt 6152 1.271 matt #ifdef PMAP_CACHE_VIVT 6153 1.134 thorpej /* 6154 1.134 thorpej * flush out the cache, expensive but growkernel will happen so 6155 1.134 thorpej * rarely 6156 1.134 thorpej */ 6157 1.134 thorpej cpu_dcache_wbinv_all(); 6158 1.134 thorpej cpu_tlb_flushD(); 6159 1.134 thorpej cpu_cpwait(); 6160 1.271 matt #endif 6161 1.134 thorpej 6162 1.373 bouyer mutex_exit(&kpm_lock); 6163 1.134 thorpej splx(s); 6164 1.1 matt 6165 1.417 skrll kasan_shadow_map((void *)pmap_maxkvaddr, 6166 1.417 skrll (size_t)(pmap_curmaxkvaddr - pmap_maxkvaddr)); 6167 1.417 skrll 6168 1.134 thorpej out: 6169 1.387 skrll return pmap_curmaxkvaddr; 6170 1.1 matt } 6171 1.1 matt 6172 1.134 thorpej /************************ Utility routines ****************************/ 6173 1.1 matt 6174 1.257 matt #ifndef ARM_HAS_VBAR 6175 1.134 thorpej /* 6176 1.134 thorpej * vector_page_setprot: 6177 1.134 thorpej * 6178 1.134 thorpej * Manipulate the protection of the vector page. 6179 1.134 thorpej */ 6180 1.134 thorpej void 6181 1.134 thorpej vector_page_setprot(int prot) 6182 1.11 chris { 6183 1.134 thorpej struct l2_bucket *l2b; 6184 1.134 thorpej pt_entry_t *ptep; 6185 1.134 thorpej 6186 1.256 matt #if defined(CPU_ARMV7) || defined(CPU_ARM11) 6187 1.256 matt /* 6188 1.256 matt * If we are using VBAR to use the vectors in the kernel, then it's 6189 1.256 matt * already mapped in the kernel text so no need to anything here. 6190 1.256 matt */ 6191 1.256 matt if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) { 6192 1.256 matt KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0); 6193 1.256 matt return; 6194 1.256 matt } 6195 1.256 matt #endif 6196 1.256 matt 6197 1.134 thorpej l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 6198 1.271 matt KASSERT(l2b != NULL); 6199 1.17 chris 6200 1.134 thorpej ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 6201 1.72 thorpej 6202 1.271 matt const pt_entry_t opte = *ptep; 6203 1.271 matt #ifdef ARM_MMU_EXTENDED 6204 1.271 matt const pt_entry_t npte = (opte & ~(L2_S_PROT_MASK|L2_XS_XN)) 6205 1.271 matt | L2_S_PROT(PTE_KERNEL, prot); 6206 1.271 matt #else 6207 1.271 matt const pt_entry_t npte = (opte & ~L2_S_PROT_MASK) 6208 1.271 matt | L2_S_PROT(PTE_KERNEL, prot); 6209 1.271 matt #endif 6210 1.271 matt l2pte_set(ptep, npte, opte); 6211 1.134 thorpej PTE_SYNC(ptep); 6212 1.134 thorpej cpu_tlb_flushD_SE(vector_page); 6213 1.32 thorpej cpu_cpwait(); 6214 1.17 chris } 6215 1.257 matt #endif 6216 1.17 chris 6217 1.17 chris /* 6218 1.134 thorpej * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 6219 1.160 thorpej * Returns true if the mapping exists, else false. 6220 1.134 thorpej * 6221 1.134 thorpej * NOTE: This function is only used by a couple of arm-specific modules. 6222 1.134 thorpej * It is not safe to take any pmap locks here, since we could be right 6223 1.134 thorpej * in the middle of debugging the pmap anyway... 6224 1.134 thorpej * 6225 1.160 thorpej * It is possible for this routine to return false even though a valid 6226 1.134 thorpej * mapping does exist. This is because we don't lock, so the metadata 6227 1.134 thorpej * state may be inconsistent. 6228 1.134 thorpej * 6229 1.134 thorpej * NOTE: We can return a NULL *ptp in the case where the L1 pde is 6230 1.134 thorpej * a "section" mapping. 6231 1.1 matt */ 6232 1.159 thorpej bool 6233 1.134 thorpej pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) 6234 1.1 matt { 6235 1.134 thorpej struct l2_dtable *l2; 6236 1.271 matt pd_entry_t *pdep, pde; 6237 1.134 thorpej pt_entry_t *ptep; 6238 1.271 matt u_short l1slot; 6239 1.134 thorpej 6240 1.134 thorpej if (pm->pm_l1 == NULL) 6241 1.174 matt return false; 6242 1.134 thorpej 6243 1.271 matt l1slot = l1pte_index(va); 6244 1.271 matt *pdp = pdep = pmap_l1_kva(pm) + l1slot; 6245 1.271 matt pde = *pdep; 6246 1.1 matt 6247 1.271 matt if (l1pte_section_p(pde)) { 6248 1.134 thorpej *ptp = NULL; 6249 1.174 matt return true; 6250 1.1 matt } 6251 1.1 matt 6252 1.271 matt l2 = pm->pm_l2[L2_IDX(l1slot)]; 6253 1.134 thorpej if (l2 == NULL || 6254 1.271 matt (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { 6255 1.174 matt return false; 6256 1.29 rearnsha } 6257 1.21 chris 6258 1.134 thorpej *ptp = &ptep[l2pte_index(va)]; 6259 1.174 matt return true; 6260 1.1 matt } 6261 1.1 matt 6262 1.159 thorpej bool 6263 1.134 thorpej pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) 6264 1.1 matt { 6265 1.1 matt 6266 1.134 thorpej if (pm->pm_l1 == NULL) 6267 1.174 matt return false; 6268 1.50 thorpej 6269 1.271 matt *pdp = pmap_l1_kva(pm) + l1pte_index(va); 6270 1.50 thorpej 6271 1.174 matt return true; 6272 1.1 matt } 6273 1.1 matt 6274 1.134 thorpej /************************ Bootstrapping routines ****************************/ 6275 1.134 thorpej 6276 1.271 matt #ifndef ARM_MMU_EXTENDED 6277 1.134 thorpej static void 6278 1.134 thorpej pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 6279 1.1 matt { 6280 1.134 thorpej int i; 6281 1.134 thorpej 6282 1.134 thorpej l1->l1_kva = l1pt; 6283 1.134 thorpej l1->l1_domain_use_count = 0; 6284 1.134 thorpej l1->l1_domain_first = 0; 6285 1.134 thorpej 6286 1.134 thorpej for (i = 0; i < PMAP_DOMAINS; i++) 6287 1.134 thorpej l1->l1_domain_free[i] = i + 1; 6288 1.1 matt 6289 1.134 thorpej /* 6290 1.134 thorpej * Copy the kernel's L1 entries to each new L1. 6291 1.134 thorpej */ 6292 1.134 thorpej if (pmap_initialized) 6293 1.258 matt memcpy(l1pt, pmap_l1_kva(pmap_kernel()), L1_TABLE_SIZE); 6294 1.50 thorpej 6295 1.134 thorpej if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, 6296 1.160 thorpej &l1->l1_physaddr) == false) 6297 1.134 thorpej panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 6298 1.50 thorpej 6299 1.134 thorpej SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 6300 1.134 thorpej TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 6301 1.1 matt } 6302 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 6303 1.1 matt 6304 1.50 thorpej /* 6305 1.134 thorpej * pmap_bootstrap() is called from the board-specific initarm() routine 6306 1.134 thorpej * once the kernel L1/L2 descriptors tables have been set up. 6307 1.134 thorpej * 6308 1.134 thorpej * This is a somewhat convoluted process since pmap bootstrap is, effectively, 6309 1.134 thorpej * spread over a number of disparate files/functions. 6310 1.50 thorpej * 6311 1.134 thorpej * We are passed the following parameters 6312 1.134 thorpej * - vstart 6313 1.134 thorpej * 1MB-aligned start of managed kernel virtual memory. 6314 1.134 thorpej * - vend 6315 1.134 thorpej * 1MB-aligned end of managed kernel virtual memory. 6316 1.50 thorpej * 6317 1.371 skrll * We use 'kernel_l1pt' to build the metadata (struct l1_ttable and 6318 1.134 thorpej * struct l2_dtable) necessary to track kernel mappings. 6319 1.50 thorpej */ 6320 1.134 thorpej #define PMAP_STATIC_L2_SIZE 16 6321 1.134 thorpej void 6322 1.174 matt pmap_bootstrap(vaddr_t vstart, vaddr_t vend) 6323 1.1 matt { 6324 1.271 matt static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 6325 1.271 matt #ifndef ARM_MMU_EXTENDED 6326 1.134 thorpej static struct l1_ttable static_l1; 6327 1.134 thorpej struct l1_ttable *l1 = &static_l1; 6328 1.271 matt #endif 6329 1.134 thorpej struct l2_dtable *l2; 6330 1.134 thorpej struct l2_bucket *l2b; 6331 1.174 matt pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; 6332 1.134 thorpej pmap_t pm = pmap_kernel(); 6333 1.134 thorpej pt_entry_t *ptep; 6334 1.2 matt paddr_t pa; 6335 1.134 thorpej vsize_t size; 6336 1.271 matt int nptes, l2idx, l2next = 0; 6337 1.134 thorpej 6338 1.271 matt #ifdef ARM_MMU_EXTENDED 6339 1.271 matt KASSERT(pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt); 6340 1.271 matt KASSERT(pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt); 6341 1.271 matt #endif 6342 1.271 matt 6343 1.366 skrll VPRINTF("kpm "); 6344 1.134 thorpej /* 6345 1.134 thorpej * Initialise the kernel pmap object 6346 1.134 thorpej */ 6347 1.271 matt curcpu()->ci_pmap_cur = pm; 6348 1.434 skrll pm->pm_refs = 1; 6349 1.271 matt #ifdef ARM_MMU_EXTENDED 6350 1.271 matt pm->pm_l1 = l1pt; 6351 1.271 matt pm->pm_l1_pa = kernel_l1pt.pv_pa; 6352 1.366 skrll VPRINTF("tlb0 "); 6353 1.271 matt pmap_tlb_info_init(&pmap_tlb0_info); 6354 1.271 matt #ifdef MULTIPROCESSOR 6355 1.366 skrll VPRINTF("kcpusets "); 6356 1.271 matt pm->pm_onproc = kcpuset_running; 6357 1.271 matt pm->pm_active = kcpuset_running; 6358 1.271 matt #endif 6359 1.271 matt #else 6360 1.134 thorpej pm->pm_l1 = l1; 6361 1.271 matt #endif 6362 1.434 skrll mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM); 6363 1.222 rmind 6364 1.435 skrll 6365 1.435 skrll #if defined(EFI_RUNTIME) 6366 1.435 skrll VPRINTF("efirt "); 6367 1.435 skrll memset(&efirt_pmap, 0, sizeof(efirt_pmap)); 6368 1.435 skrll struct pmap * const efipm = &efirt_pmap; 6369 1.435 skrll struct pmap_asid_info * const efipai = PMAP_PAI(efipm, cpu_tlb_info(curcpu())); 6370 1.435 skrll 6371 1.435 skrll efipai->pai_asid = KERNEL_PID; 6372 1.435 skrll efipm->pm_refs = 1; 6373 1.435 skrll efipm->pm_stats.wired_count = 0; 6374 1.435 skrll efipm->pm_stats.resident_count = 1; 6375 1.435 skrll efipm->pm_l1 = (pd_entry_t *)efirt_l1pt.pv_va; 6376 1.435 skrll efipm->pm_l1_pa = efirt_l1pt.pv_pa; 6377 1.435 skrll // Needed? 6378 1.435 skrll #ifdef MULTIPROCESSOR 6379 1.435 skrll kcpuset_create(&efipm->pm_active, true); 6380 1.435 skrll kcpuset_create(&efipm->pm_onproc, true); 6381 1.435 skrll #endif 6382 1.438 skrll mutex_init(&efipm->pm_lock, MUTEX_DEFAULT, IPL_NONE); 6383 1.435 skrll #endif 6384 1.435 skrll 6385 1.366 skrll VPRINTF("locks "); 6386 1.373 bouyer /* 6387 1.373 bouyer * pmap_kenter_pa() and pmap_kremove() may be called from interrupt 6388 1.373 bouyer * context, so its locks have to be at IPL_VM 6389 1.373 bouyer */ 6390 1.373 bouyer mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_VM); 6391 1.373 bouyer mutex_init(&kpm_lock, MUTEX_DEFAULT, IPL_NONE); 6392 1.134 thorpej 6393 1.366 skrll VPRINTF("l1pt "); 6394 1.134 thorpej /* 6395 1.134 thorpej * Scan the L1 translation table created by initarm() and create 6396 1.134 thorpej * the required metadata for all valid mappings found in it. 6397 1.134 thorpej */ 6398 1.275 matt for (size_t l1slot = 0; 6399 1.275 matt l1slot < L1_TABLE_SIZE / sizeof(pd_entry_t); 6400 1.271 matt l1slot++) { 6401 1.271 matt pd_entry_t pde = l1pt[l1slot]; 6402 1.134 thorpej 6403 1.134 thorpej /* 6404 1.134 thorpej * We're only interested in Coarse mappings. 6405 1.134 thorpej * pmap_extract() can deal with section mappings without 6406 1.134 thorpej * recourse to checking L2 metadata. 6407 1.134 thorpej */ 6408 1.134 thorpej if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 6409 1.134 thorpej continue; 6410 1.134 thorpej 6411 1.134 thorpej /* 6412 1.134 thorpej * Lookup the KVA of this L2 descriptor table 6413 1.134 thorpej */ 6414 1.271 matt pa = l1pte_pa(pde); 6415 1.134 thorpej ptep = (pt_entry_t *)kernel_pt_lookup(pa); 6416 1.134 thorpej if (ptep == NULL) { 6417 1.134 thorpej panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 6418 1.271 matt (u_int)l1slot << L1_S_SHIFT, pa); 6419 1.134 thorpej } 6420 1.134 thorpej 6421 1.134 thorpej /* 6422 1.134 thorpej * Fetch the associated L2 metadata structure. 6423 1.134 thorpej * Allocate a new one if necessary. 6424 1.134 thorpej */ 6425 1.271 matt if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL) { 6426 1.134 thorpej if (l2next == PMAP_STATIC_L2_SIZE) 6427 1.134 thorpej panic("pmap_bootstrap: out of static L2s"); 6428 1.271 matt pm->pm_l2[L2_IDX(l1slot)] = l2 = &static_l2[l2next++]; 6429 1.134 thorpej } 6430 1.134 thorpej 6431 1.134 thorpej /* 6432 1.134 thorpej * One more L1 slot tracked... 6433 1.134 thorpej */ 6434 1.134 thorpej l2->l2_occupancy++; 6435 1.134 thorpej 6436 1.134 thorpej /* 6437 1.134 thorpej * Fill in the details of the L2 descriptor in the 6438 1.134 thorpej * appropriate bucket. 6439 1.134 thorpej */ 6440 1.271 matt l2b = &l2->l2_bucket[L2_BUCKET(l1slot)]; 6441 1.134 thorpej l2b->l2b_kva = ptep; 6442 1.271 matt l2b->l2b_pa = pa; 6443 1.271 matt l2b->l2b_l1slot = l1slot; 6444 1.1 matt 6445 1.134 thorpej /* 6446 1.134 thorpej * Establish an initial occupancy count for this descriptor 6447 1.134 thorpej */ 6448 1.134 thorpej for (l2idx = 0; 6449 1.134 thorpej l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 6450 1.134 thorpej l2idx++) { 6451 1.134 thorpej if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 6452 1.293 matt l2b->l2b_occupancy++; 6453 1.134 thorpej } 6454 1.134 thorpej } 6455 1.1 matt 6456 1.134 thorpej /* 6457 1.134 thorpej * Make sure the descriptor itself has the correct cache mode. 6458 1.146 jdolecek * If not, fix it, but whine about the problem. Port-meisters 6459 1.134 thorpej * should consider this a clue to fix up their initarm() 6460 1.134 thorpej * function. :) 6461 1.134 thorpej */ 6462 1.271 matt if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep, 1)) { 6463 1.134 thorpej printf("pmap_bootstrap: WARNING! wrong cache mode for " 6464 1.134 thorpej "L2 pte @ %p\n", ptep); 6465 1.134 thorpej } 6466 1.134 thorpej } 6467 1.61 thorpej 6468 1.366 skrll VPRINTF("cache(l1pt) "); 6469 1.134 thorpej /* 6470 1.134 thorpej * Ensure the primary (kernel) L1 has the correct cache mode for 6471 1.134 thorpej * a page table. Bitch if it is not correctly set. 6472 1.134 thorpej */ 6473 1.271 matt if (pmap_set_pt_cache_mode(l1pt, kernel_l1pt.pv_va, 6474 1.271 matt L1_TABLE_SIZE / L2_S_SIZE)) { 6475 1.271 matt printf("pmap_bootstrap: WARNING! wrong cache mode for " 6476 1.271 matt "primary L1 @ 0x%lx\n", kernel_l1pt.pv_va); 6477 1.1 matt } 6478 1.435 skrll #if defined(EFI_RUNTIME) 6479 1.435 skrll if (pmap_set_pt_cache_mode(l1pt, efirt_l1pt.pv_va, 6480 1.435 skrll L1_TABLE_SIZE / L2_S_SIZE)) { 6481 1.435 skrll printf("pmap_bootstrap: WARNING! wrong cache mode for " 6482 1.435 skrll "EFI RT L1 @ 0x%lx\n", efirt_l1pt.pv_va); 6483 1.435 skrll } 6484 1.435 skrll #endif 6485 1.1 matt 6486 1.271 matt #ifdef PMAP_CACHE_VIVT 6487 1.134 thorpej cpu_dcache_wbinv_all(); 6488 1.134 thorpej cpu_tlb_flushID(); 6489 1.134 thorpej cpu_cpwait(); 6490 1.271 matt #endif 6491 1.1 matt 6492 1.113 thorpej /* 6493 1.134 thorpej * now we allocate the "special" VAs which are used for tmp mappings 6494 1.134 thorpej * by the pmap (and other modules). we allocate the VAs by advancing 6495 1.134 thorpej * virtual_avail (note that there are no pages mapped at these VAs). 6496 1.134 thorpej * 6497 1.134 thorpej * Managed KVM space start from wherever initarm() tells us. 6498 1.113 thorpej */ 6499 1.134 thorpej virtual_avail = vstart; 6500 1.134 thorpej virtual_end = vend; 6501 1.113 thorpej 6502 1.366 skrll VPRINTF("specials "); 6503 1.416 skrll 6504 1.416 skrll pmap_alloc_specials(&virtual_avail, 1, &memhook, NULL); 6505 1.416 skrll 6506 1.174 matt #ifdef PMAP_CACHE_VIPT 6507 1.174 matt /* 6508 1.174 matt * If we have a VIPT cache, we need one page/pte per possible alias 6509 1.174 matt * page so we won't violate cache aliasing rules. 6510 1.174 matt */ 6511 1.286 skrll virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask; 6512 1.271 matt nptes = (arm_cache_prefer_mask >> L2_S_SHIFT) + 1; 6513 1.321 matt nptes = roundup(nptes, PAGE_SIZE / L2_S_SIZE); 6514 1.271 matt if (arm_pcache.icache_type != CACHE_TYPE_PIPT 6515 1.271 matt && arm_pcache.icache_way_size > nptes * L2_S_SIZE) { 6516 1.271 matt nptes = arm_pcache.icache_way_size >> L2_S_SHIFT; 6517 1.321 matt nptes = roundup(nptes, PAGE_SIZE / L2_S_SIZE); 6518 1.271 matt } 6519 1.174 matt #else 6520 1.271 matt nptes = PAGE_SIZE / L2_S_SIZE; 6521 1.271 matt #endif 6522 1.271 matt #ifdef MULTIPROCESSOR 6523 1.271 matt cnptes = nptes; 6524 1.271 matt nptes *= arm_cpu_max; 6525 1.174 matt #endif 6526 1.174 matt pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte); 6527 1.271 matt pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte, nptes); 6528 1.174 matt pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte); 6529 1.271 matt pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte, nptes); 6530 1.275 matt if (msgbufaddr == NULL) { 6531 1.275 matt pmap_alloc_specials(&virtual_avail, 6532 1.275 matt round_page(MSGBUFSIZE) / PAGE_SIZE, 6533 1.275 matt (void *)&msgbufaddr, NULL); 6534 1.275 matt } 6535 1.134 thorpej 6536 1.134 thorpej /* 6537 1.134 thorpej * Allocate a range of kernel virtual address space to be used 6538 1.134 thorpej * for L2 descriptor tables and metadata allocation in 6539 1.134 thorpej * pmap_growkernel(). 6540 1.134 thorpej */ 6541 1.415 skrll size = howmany(virtual_end - pmap_curmaxkvaddr, L1_S_SIZE); 6542 1.134 thorpej pmap_alloc_specials(&virtual_avail, 6543 1.134 thorpej round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 6544 1.134 thorpej &pmap_kernel_l2ptp_kva, NULL); 6545 1.1 matt 6546 1.415 skrll size = howmany(size, L2_BUCKET_SIZE); 6547 1.134 thorpej pmap_alloc_specials(&virtual_avail, 6548 1.134 thorpej round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 6549 1.134 thorpej &pmap_kernel_l2dtable_kva, NULL); 6550 1.1 matt 6551 1.271 matt #ifndef ARM_MMU_EXTENDED 6552 1.134 thorpej /* 6553 1.134 thorpej * init the static-global locks and global pmap list. 6554 1.134 thorpej */ 6555 1.226 matt mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM); 6556 1.1 matt 6557 1.134 thorpej /* 6558 1.134 thorpej * We can now initialise the first L1's metadata. 6559 1.134 thorpej */ 6560 1.134 thorpej SLIST_INIT(&l1_list); 6561 1.134 thorpej TAILQ_INIT(&l1_lru_list); 6562 1.174 matt pmap_init_l1(l1, l1pt); 6563 1.271 matt #endif /* ARM_MMU_EXTENDED */ 6564 1.1 matt 6565 1.257 matt #ifndef ARM_HAS_VBAR 6566 1.165 scw /* Set up vector page L1 details, if necessary */ 6567 1.165 scw if (vector_page < KERNEL_BASE) { 6568 1.271 matt pm->pm_pl1vec = pmap_l1_kva(pm) + l1pte_index(vector_page); 6569 1.165 scw l2b = pmap_get_l2_bucket(pm, vector_page); 6570 1.210 uebayasi KDASSERT(l2b != NULL); 6571 1.271 matt pm->pm_l1vec = l2b->l2b_pa | L1_C_PROTO | 6572 1.258 matt L1_C_DOM(pmap_domain(pm)); 6573 1.165 scw } else 6574 1.165 scw pm->pm_pl1vec = NULL; 6575 1.257 matt #endif 6576 1.165 scw 6577 1.366 skrll VPRINTF("pools "); 6578 1.1 matt /* 6579 1.168 ad * Initialize the pmap cache 6580 1.1 matt */ 6581 1.168 ad pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0, 6582 1.168 ad "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL); 6583 1.1 matt 6584 1.134 thorpej /* 6585 1.134 thorpej * Initialize the pv pool. 6586 1.134 thorpej */ 6587 1.134 thorpej pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl", 6588 1.162 ad &pmap_bootstrap_pv_allocator, IPL_NONE); 6589 1.29 rearnsha 6590 1.134 thorpej /* 6591 1.134 thorpej * Initialize the L2 dtable pool and cache. 6592 1.134 thorpej */ 6593 1.168 ad pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0, 6594 1.168 ad 0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL); 6595 1.1 matt 6596 1.134 thorpej /* 6597 1.134 thorpej * Initialise the L2 descriptor table pool and cache 6598 1.134 thorpej */ 6599 1.367 skrll pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 6600 1.367 skrll L2_TABLE_SIZE_REAL, 0, 0, "l2ptppl", NULL, IPL_NONE, 6601 1.134 thorpej pmap_l2ptp_ctor, NULL, NULL); 6602 1.61 thorpej 6603 1.271 matt mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE); 6604 1.271 matt 6605 1.134 thorpej cpu_dcache_wbinv_all(); 6606 1.1 matt } 6607 1.1 matt 6608 1.271 matt static bool 6609 1.271 matt pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va, size_t nptes) 6610 1.1 matt { 6611 1.271 matt #ifdef ARM_MMU_EXTENDED 6612 1.271 matt return false; 6613 1.271 matt #else 6614 1.271 matt if (pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 6615 1.271 matt && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 6616 1.271 matt return false; 6617 1.271 matt 6618 1.271 matt const vaddr_t eva = va + nptes * PAGE_SIZE; 6619 1.134 thorpej int rv = 0; 6620 1.134 thorpej 6621 1.271 matt while (va < eva) { 6622 1.271 matt /* 6623 1.271 matt * Make sure the descriptor itself has the correct cache mode 6624 1.271 matt */ 6625 1.271 matt pd_entry_t * const pdep = &kl1[l1pte_index(va)]; 6626 1.271 matt pd_entry_t pde = *pdep; 6627 1.134 thorpej 6628 1.271 matt if (l1pte_section_p(pde)) { 6629 1.374 skrll KASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0); 6630 1.271 matt if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 6631 1.271 matt *pdep = (pde & ~L1_S_CACHE_MASK) | 6632 1.271 matt pte_l1_s_cache_mode_pt; 6633 1.271 matt PDE_SYNC(pdep); 6634 1.271 matt cpu_dcache_wbinv_range((vaddr_t)pdep, 6635 1.271 matt sizeof(*pdep)); 6636 1.271 matt rv = 1; 6637 1.271 matt } 6638 1.271 matt return rv; 6639 1.134 thorpej } 6640 1.271 matt vaddr_t pa = l1pte_pa(pde); 6641 1.271 matt pt_entry_t *ptep = (pt_entry_t *)kernel_pt_lookup(pa); 6642 1.134 thorpej if (ptep == NULL) 6643 1.271 matt panic("pmap_bootstrap: No PTP for va %#lx\n", va); 6644 1.134 thorpej 6645 1.271 matt ptep += l2pte_index(va); 6646 1.271 matt const pt_entry_t opte = *ptep; 6647 1.271 matt if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 6648 1.271 matt const pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) 6649 1.271 matt | pte_l2_s_cache_mode_pt; 6650 1.271 matt l2pte_set(ptep, npte, opte); 6651 1.134 thorpej PTE_SYNC(ptep); 6652 1.134 thorpej cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep)); 6653 1.134 thorpej rv = 1; 6654 1.134 thorpej } 6655 1.271 matt va += PAGE_SIZE; 6656 1.134 thorpej } 6657 1.134 thorpej 6658 1.387 skrll return rv; 6659 1.271 matt #endif 6660 1.134 thorpej } 6661 1.1 matt 6662 1.134 thorpej static void 6663 1.134 thorpej pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep) 6664 1.134 thorpej { 6665 1.134 thorpej vaddr_t va = *availp; 6666 1.134 thorpej struct l2_bucket *l2b; 6667 1.1 matt 6668 1.134 thorpej if (ptep) { 6669 1.134 thorpej l2b = pmap_get_l2_bucket(pmap_kernel(), va); 6670 1.134 thorpej if (l2b == NULL) 6671 1.134 thorpej panic("pmap_alloc_specials: no l2b for 0x%lx", va); 6672 1.62 thorpej 6673 1.351 skrll *ptep = &l2b->l2b_kva[l2pte_index(va)]; 6674 1.1 matt } 6675 1.1 matt 6676 1.134 thorpej *vap = va; 6677 1.134 thorpej *availp = va + (PAGE_SIZE * pages); 6678 1.134 thorpej } 6679 1.134 thorpej 6680 1.134 thorpej void 6681 1.134 thorpej pmap_init(void) 6682 1.134 thorpej { 6683 1.1 matt 6684 1.113 thorpej /* 6685 1.134 thorpej * Set the available memory vars - These do not map to real memory 6686 1.134 thorpej * addresses and cannot as the physical memory is fragmented. 6687 1.134 thorpej * They are used by ps for %mem calculations. 6688 1.134 thorpej * One could argue whether this should be the entire memory or just 6689 1.134 thorpej * the memory that is useable in a user process. 6690 1.113 thorpej */ 6691 1.342 cherry avail_start = ptoa(uvm_physseg_get_avail_start(uvm_physseg_get_first())); 6692 1.342 cherry avail_end = ptoa(uvm_physseg_get_avail_end(uvm_physseg_get_last())); 6693 1.63 thorpej 6694 1.1 matt /* 6695 1.134 thorpej * Now we need to free enough pv_entry structures to allow us to get 6696 1.134 thorpej * the kmem_map/kmem_object allocated and inited (done after this 6697 1.134 thorpej * function is finished). to do this we allocate one bootstrap page out 6698 1.134 thorpej * of kernel_map and use it to provide an initial pool of pv_entry 6699 1.134 thorpej * structures. we never free this page. 6700 1.1 matt */ 6701 1.271 matt pool_setlowat(&pmap_pv_pool, (PAGE_SIZE / sizeof(struct pv_entry)) * 2); 6702 1.62 thorpej 6703 1.271 matt #ifdef ARM_MMU_EXTENDED 6704 1.380 skrll /* 6705 1.380 skrll * Initialise the L1 pool and cache. 6706 1.380 skrll */ 6707 1.380 skrll 6708 1.380 skrll pool_cache_bootstrap(&pmap_l1tt_cache, L1TT_SIZE, L1TT_SIZE, 6709 1.380 skrll 0, 0, "l1ttpl", &pmap_l1tt_allocator, IPL_NONE, pmap_l1tt_ctor, 6710 1.380 skrll NULL, NULL); 6711 1.380 skrll 6712 1.380 skrll int error __diagused = pmap_maxproc_set(maxproc); 6713 1.380 skrll KASSERT(error == 0); 6714 1.380 skrll 6715 1.271 matt pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); 6716 1.271 matt #endif 6717 1.191 matt 6718 1.160 thorpej pmap_initialized = true; 6719 1.1 matt } 6720 1.17 chris 6721 1.134 thorpej static vaddr_t last_bootstrap_page = 0; 6722 1.134 thorpej static void *free_bootstrap_pages = NULL; 6723 1.1 matt 6724 1.134 thorpej static void * 6725 1.134 thorpej pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags) 6726 1.1 matt { 6727 1.134 thorpej extern void *pool_page_alloc(struct pool *, int); 6728 1.134 thorpej vaddr_t new_page; 6729 1.134 thorpej void *rv; 6730 1.134 thorpej 6731 1.134 thorpej if (pmap_initialized) 6732 1.387 skrll return pool_page_alloc(pp, flags); 6733 1.134 thorpej 6734 1.134 thorpej if (free_bootstrap_pages) { 6735 1.134 thorpej rv = free_bootstrap_pages; 6736 1.134 thorpej free_bootstrap_pages = *((void **)rv); 6737 1.387 skrll return rv; 6738 1.134 thorpej } 6739 1.134 thorpej 6740 1.271 matt KASSERT(kernel_map != NULL); 6741 1.151 yamt new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 6742 1.151 yamt UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT)); 6743 1.1 matt 6744 1.134 thorpej KASSERT(new_page > last_bootstrap_page); 6745 1.134 thorpej last_bootstrap_page = new_page; 6746 1.387 skrll return (void *)new_page; 6747 1.17 chris } 6748 1.17 chris 6749 1.134 thorpej static void 6750 1.134 thorpej pmap_bootstrap_pv_page_free(struct pool *pp, void *v) 6751 1.17 chris { 6752 1.134 thorpej extern void pool_page_free(struct pool *, void *); 6753 1.17 chris 6754 1.150 joff if ((vaddr_t)v <= last_bootstrap_page) { 6755 1.150 joff *((void **)v) = free_bootstrap_pages; 6756 1.150 joff free_bootstrap_pages = v; 6757 1.134 thorpej return; 6758 1.134 thorpej } 6759 1.114 thorpej 6760 1.150 joff if (pmap_initialized) { 6761 1.150 joff pool_page_free(pp, v); 6762 1.134 thorpej return; 6763 1.57 thorpej } 6764 1.17 chris } 6765 1.17 chris 6766 1.380 skrll 6767 1.380 skrll #if defined(ARM_MMU_EXTENDED) 6768 1.380 skrll static void * 6769 1.380 skrll pmap_l1tt_alloc(struct pool *pp, int flags) 6770 1.380 skrll { 6771 1.380 skrll struct pglist plist; 6772 1.380 skrll vaddr_t va; 6773 1.380 skrll 6774 1.380 skrll const int waitok = flags & PR_WAITOK; 6775 1.380 skrll 6776 1.380 skrll int error = uvm_pglistalloc(L1TT_SIZE, 0, -1, L1TT_SIZE, 0, &plist, 1, 6777 1.380 skrll waitok); 6778 1.380 skrll if (error) 6779 1.380 skrll panic("Cannot allocate L1TT physical pages, %d", error); 6780 1.380 skrll 6781 1.380 skrll struct vm_page *pg = TAILQ_FIRST(&plist); 6782 1.380 skrll #if !defined( __HAVE_MM_MD_DIRECT_MAPPED_PHYS) 6783 1.380 skrll 6784 1.380 skrll /* Allocate a L1 translation table VA */ 6785 1.380 skrll va = uvm_km_alloc(kernel_map, L1TT_SIZE, L1TT_SIZE, UVM_KMF_VAONLY); 6786 1.380 skrll if (va == 0) 6787 1.380 skrll panic("Cannot allocate L1TT KVA"); 6788 1.380 skrll 6789 1.380 skrll const vaddr_t eva = va + L1TT_SIZE; 6790 1.380 skrll vaddr_t mva = va; 6791 1.380 skrll while (pg && mva < eva) { 6792 1.380 skrll paddr_t pa = VM_PAGE_TO_PHYS(pg); 6793 1.380 skrll 6794 1.380 skrll pmap_kenter_pa(mva, pa, 6795 1.380 skrll VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE); 6796 1.380 skrll 6797 1.380 skrll mva += PAGE_SIZE; 6798 1.380 skrll pg = TAILQ_NEXT(pg, pageq.queue); 6799 1.380 skrll } 6800 1.380 skrll KASSERTMSG(pg == NULL && mva == eva, "pg %p mva %" PRIxVADDR 6801 1.380 skrll " eva %" PRIxVADDR, pg, mva, eva); 6802 1.380 skrll #else 6803 1.380 skrll bool ok; 6804 1.380 skrll paddr_t pa = VM_PAGE_TO_PHYS(pg); 6805 1.380 skrll va = pmap_direct_mapped_phys(pa, &ok, 0); 6806 1.380 skrll KASSERT(ok); 6807 1.380 skrll KASSERT(va >= KERNEL_BASE); 6808 1.380 skrll #endif 6809 1.380 skrll 6810 1.380 skrll return (void *)va; 6811 1.380 skrll } 6812 1.380 skrll 6813 1.380 skrll static void 6814 1.380 skrll pmap_l1tt_free(struct pool *pp, void *v) 6815 1.380 skrll { 6816 1.380 skrll vaddr_t va = (vaddr_t)v; 6817 1.380 skrll 6818 1.380 skrll #if !defined( __HAVE_MM_MD_DIRECT_MAPPED_PHYS) 6819 1.382 skrll uvm_km_free(kernel_map, va, L1TT_SIZE, UVM_KMF_WIRED); 6820 1.380 skrll #else 6821 1.382 skrll #if defined(KERNEL_BASE_VOFFSET) 6822 1.382 skrll paddr_t pa = va - KERNEL_BASE_VOFFSET; 6823 1.382 skrll #else 6824 1.382 skrll paddr_t pa = va - KERNEL_BASE + physical_start; 6825 1.382 skrll #endif 6826 1.380 skrll const paddr_t epa = pa + L1TT_SIZE; 6827 1.380 skrll 6828 1.380 skrll for (; pa < epa; pa += PAGE_SIZE) { 6829 1.380 skrll struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 6830 1.380 skrll uvm_pagefree(pg); 6831 1.380 skrll } 6832 1.380 skrll #endif 6833 1.380 skrll } 6834 1.380 skrll #endif 6835 1.380 skrll 6836 1.17 chris /* 6837 1.134 thorpej * pmap_postinit() 6838 1.17 chris * 6839 1.134 thorpej * This routine is called after the vm and kmem subsystems have been 6840 1.134 thorpej * initialised. This allows the pmap code to perform any initialisation 6841 1.341 flxd * that can only be done once the memory allocation is in place. 6842 1.17 chris */ 6843 1.134 thorpej void 6844 1.134 thorpej pmap_postinit(void) 6845 1.17 chris { 6846 1.271 matt #ifndef ARM_MMU_EXTENDED 6847 1.134 thorpej extern paddr_t physical_start, physical_end; 6848 1.134 thorpej struct l1_ttable *l1; 6849 1.134 thorpej struct pglist plist; 6850 1.134 thorpej struct vm_page *m; 6851 1.271 matt pd_entry_t *pdep; 6852 1.134 thorpej vaddr_t va, eva; 6853 1.134 thorpej u_int loop, needed; 6854 1.134 thorpej int error; 6855 1.271 matt #endif 6856 1.114 thorpej 6857 1.271 matt pool_cache_setlowat(&pmap_l2ptp_cache, (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4); 6858 1.169 matt pool_cache_setlowat(&pmap_l2dtable_cache, 6859 1.134 thorpej (PAGE_SIZE / sizeof(struct l2_dtable)) * 2); 6860 1.17 chris 6861 1.271 matt #ifndef ARM_MMU_EXTENDED 6862 1.134 thorpej needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 6863 1.134 thorpej needed -= 1; 6864 1.48 chris 6865 1.225 para l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP); 6866 1.48 chris 6867 1.134 thorpej for (loop = 0; loop < needed; loop++, l1++) { 6868 1.134 thorpej /* Allocate a L1 page table */ 6869 1.151 yamt va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY); 6870 1.134 thorpej if (va == 0) 6871 1.134 thorpej panic("Cannot allocate L1 KVM"); 6872 1.134 thorpej 6873 1.134 thorpej error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, 6874 1.225 para physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1); 6875 1.134 thorpej if (error) 6876 1.134 thorpej panic("Cannot allocate L1 physical pages"); 6877 1.134 thorpej 6878 1.134 thorpej m = TAILQ_FIRST(&plist); 6879 1.134 thorpej eva = va + L1_TABLE_SIZE; 6880 1.271 matt pdep = (pd_entry_t *)va; 6881 1.48 chris 6882 1.134 thorpej while (m && va < eva) { 6883 1.134 thorpej paddr_t pa = VM_PAGE_TO_PHYS(m); 6884 1.48 chris 6885 1.182 matt pmap_kenter_pa(va, pa, 6886 1.265 matt VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE); 6887 1.48 chris 6888 1.134 thorpej va += PAGE_SIZE; 6889 1.176 ad m = TAILQ_NEXT(m, pageq.queue); 6890 1.48 chris } 6891 1.48 chris 6892 1.134 thorpej #ifdef DIAGNOSTIC 6893 1.134 thorpej if (m) 6894 1.134 thorpej panic("pmap_alloc_l1pt: pglist not empty"); 6895 1.134 thorpej #endif /* DIAGNOSTIC */ 6896 1.48 chris 6897 1.271 matt pmap_init_l1(l1, pdep); 6898 1.48 chris } 6899 1.48 chris 6900 1.134 thorpej #ifdef DEBUG 6901 1.134 thorpej printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 6902 1.134 thorpej needed); 6903 1.134 thorpej #endif 6904 1.271 matt #endif /* !ARM_MMU_EXTENDED */ 6905 1.48 chris } 6906 1.48 chris 6907 1.76 thorpej /* 6908 1.134 thorpej * Note that the following routines are used by board-specific initialisation 6909 1.134 thorpej * code to configure the initial kernel page tables. 6910 1.134 thorpej * 6911 1.76 thorpej */ 6912 1.40 thorpej 6913 1.40 thorpej /* 6914 1.46 thorpej * This list exists for the benefit of pmap_map_chunk(). It keeps track 6915 1.46 thorpej * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 6916 1.46 thorpej * find them as necessary. 6917 1.46 thorpej * 6918 1.134 thorpej * Note that the data on this list MUST remain valid after initarm() returns, 6919 1.393 skrll * as pmap_bootstrap() uses it to construct L2 table metadata. 6920 1.46 thorpej */ 6921 1.46 thorpej SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 6922 1.46 thorpej 6923 1.46 thorpej static vaddr_t 6924 1.46 thorpej kernel_pt_lookup(paddr_t pa) 6925 1.46 thorpej { 6926 1.46 thorpej pv_addr_t *pv; 6927 1.46 thorpej 6928 1.46 thorpej SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 6929 1.134 thorpej if (pv->pv_pa == (pa & ~PGOFSET)) 6930 1.387 skrll return pv->pv_va | (pa & PGOFSET); 6931 1.46 thorpej } 6932 1.387 skrll return 0; 6933 1.46 thorpej } 6934 1.46 thorpej 6935 1.46 thorpej /* 6936 1.40 thorpej * pmap_map_section: 6937 1.40 thorpej * 6938 1.40 thorpej * Create a single section mapping. 6939 1.40 thorpej */ 6940 1.40 thorpej void 6941 1.40 thorpej pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 6942 1.40 thorpej { 6943 1.271 matt pd_entry_t * const pdep = (pd_entry_t *) l1pt; 6944 1.271 matt const size_t l1slot = l1pte_index(va); 6945 1.134 thorpej pd_entry_t fl; 6946 1.40 thorpej 6947 1.81 thorpej KASSERT(((va | pa) & L1_S_OFFSET) == 0); 6948 1.40 thorpej 6949 1.134 thorpej switch (cache) { 6950 1.134 thorpej case PTE_NOCACHE: 6951 1.388 skrll fl = pte_l1_s_nocache_mode; 6952 1.134 thorpej break; 6953 1.134 thorpej 6954 1.134 thorpej case PTE_CACHE: 6955 1.134 thorpej fl = pte_l1_s_cache_mode; 6956 1.134 thorpej break; 6957 1.134 thorpej 6958 1.134 thorpej case PTE_PAGETABLE: 6959 1.134 thorpej fl = pte_l1_s_cache_mode_pt; 6960 1.134 thorpej break; 6961 1.388 skrll 6962 1.388 skrll case PTE_DEV: 6963 1.388 skrll default: 6964 1.388 skrll fl = 0; 6965 1.388 skrll break; 6966 1.134 thorpej } 6967 1.134 thorpej 6968 1.271 matt const pd_entry_t npde = L1_S_PROTO | pa | 6969 1.134 thorpej L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 6970 1.271 matt l1pte_setone(pdep + l1slot, npde); 6971 1.271 matt PDE_SYNC(pdep + l1slot); 6972 1.41 thorpej } 6973 1.41 thorpej 6974 1.41 thorpej /* 6975 1.41 thorpej * pmap_map_entry: 6976 1.41 thorpej * 6977 1.41 thorpej * Create a single page mapping. 6978 1.41 thorpej */ 6979 1.41 thorpej void 6980 1.47 thorpej pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 6981 1.41 thorpej { 6982 1.271 matt pd_entry_t * const pdep = (pd_entry_t *) l1pt; 6983 1.271 matt const size_t l1slot = l1pte_index(va); 6984 1.262 matt pt_entry_t npte; 6985 1.262 matt pt_entry_t *ptep; 6986 1.41 thorpej 6987 1.41 thorpej KASSERT(((va | pa) & PGOFSET) == 0); 6988 1.41 thorpej 6989 1.134 thorpej switch (cache) { 6990 1.134 thorpej case PTE_NOCACHE: 6991 1.388 skrll npte = pte_l2_s_nocache_mode; 6992 1.134 thorpej break; 6993 1.134 thorpej 6994 1.134 thorpej case PTE_CACHE: 6995 1.262 matt npte = pte_l2_s_cache_mode; 6996 1.134 thorpej break; 6997 1.134 thorpej 6998 1.134 thorpej case PTE_PAGETABLE: 6999 1.262 matt npte = pte_l2_s_cache_mode_pt; 7000 1.134 thorpej break; 7001 1.388 skrll 7002 1.388 skrll default: 7003 1.388 skrll npte = 0; 7004 1.388 skrll break; 7005 1.134 thorpej } 7006 1.134 thorpej 7007 1.271 matt if ((pdep[l1slot] & L1_TYPE_MASK) != L1_TYPE_C) 7008 1.47 thorpej panic("pmap_map_entry: no L2 table for VA 0x%08lx", va); 7009 1.47 thorpej 7010 1.275 matt ptep = (pt_entry_t *) kernel_pt_lookup(l1pte_pa(pdep[l1slot])); 7011 1.262 matt if (ptep == NULL) 7012 1.47 thorpej panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va); 7013 1.47 thorpej 7014 1.262 matt npte |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot); 7015 1.271 matt #ifdef ARM_MMU_EXTENDED 7016 1.271 matt if (prot & VM_PROT_EXECUTE) { 7017 1.271 matt npte &= ~L2_XS_XN; 7018 1.271 matt } 7019 1.271 matt #endif 7020 1.262 matt ptep += l2pte_index(va); 7021 1.262 matt l2pte_set(ptep, npte, 0); 7022 1.262 matt PTE_SYNC(ptep); 7023 1.42 thorpej } 7024 1.42 thorpej 7025 1.42 thorpej /* 7026 1.42 thorpej * pmap_link_l2pt: 7027 1.42 thorpej * 7028 1.134 thorpej * Link the L2 page table specified by "l2pv" into the L1 7029 1.42 thorpej * page table at the slot for "va". 7030 1.42 thorpej */ 7031 1.42 thorpej void 7032 1.46 thorpej pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv) 7033 1.42 thorpej { 7034 1.271 matt pd_entry_t * const pdep = (pd_entry_t *) l1pt + l1pte_index(va); 7035 1.42 thorpej 7036 1.271 matt KASSERT((va & ((L1_S_SIZE * (PAGE_SIZE / L2_T_SIZE)) - 1)) == 0); 7037 1.46 thorpej KASSERT((l2pv->pv_pa & PGOFSET) == 0); 7038 1.46 thorpej 7039 1.352 skrll const pd_entry_t npde = L1_C_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO 7040 1.271 matt | l2pv->pv_pa; 7041 1.134 thorpej 7042 1.271 matt l1pte_set(pdep, npde); 7043 1.271 matt PDE_SYNC_RANGE(pdep, PAGE_SIZE / L2_T_SIZE); 7044 1.42 thorpej 7045 1.46 thorpej SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 7046 1.43 thorpej } 7047 1.43 thorpej 7048 1.43 thorpej /* 7049 1.43 thorpej * pmap_map_chunk: 7050 1.43 thorpej * 7051 1.43 thorpej * Map a chunk of memory using the most efficient mappings 7052 1.43 thorpej * possible (section, large page, small page) into the 7053 1.43 thorpej * provided L1 and L2 tables at the specified virtual address. 7054 1.43 thorpej */ 7055 1.43 thorpej vsize_t 7056 1.46 thorpej pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size, 7057 1.46 thorpej int prot, int cache) 7058 1.43 thorpej { 7059 1.271 matt pd_entry_t * const pdep = (pd_entry_t *) l1pt; 7060 1.271 matt pt_entry_t f1, f2s, f2l; 7061 1.286 skrll vsize_t resid; 7062 1.43 thorpej 7063 1.130 thorpej resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 7064 1.43 thorpej 7065 1.44 thorpej if (l1pt == 0) 7066 1.44 thorpej panic("pmap_map_chunk: no L1 table provided"); 7067 1.44 thorpej 7068 1.370 skrll // VPRINTF("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " 7069 1.370 skrll // "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 7070 1.43 thorpej 7071 1.134 thorpej switch (cache) { 7072 1.134 thorpej case PTE_NOCACHE: 7073 1.388 skrll f1 = pte_l1_s_nocache_mode; 7074 1.388 skrll f2l = pte_l2_l_nocache_mode; 7075 1.388 skrll f2s = pte_l2_s_nocache_mode; 7076 1.134 thorpej break; 7077 1.134 thorpej 7078 1.134 thorpej case PTE_CACHE: 7079 1.134 thorpej f1 = pte_l1_s_cache_mode; 7080 1.134 thorpej f2l = pte_l2_l_cache_mode; 7081 1.134 thorpej f2s = pte_l2_s_cache_mode; 7082 1.134 thorpej break; 7083 1.134 thorpej 7084 1.134 thorpej case PTE_PAGETABLE: 7085 1.134 thorpej f1 = pte_l1_s_cache_mode_pt; 7086 1.134 thorpej f2l = pte_l2_l_cache_mode_pt; 7087 1.134 thorpej f2s = pte_l2_s_cache_mode_pt; 7088 1.134 thorpej break; 7089 1.388 skrll 7090 1.388 skrll case PTE_DEV: 7091 1.388 skrll default: 7092 1.388 skrll f1 = 0; 7093 1.388 skrll f2l = 0; 7094 1.388 skrll f2s = 0; 7095 1.388 skrll break; 7096 1.134 thorpej } 7097 1.134 thorpej 7098 1.43 thorpej size = resid; 7099 1.43 thorpej 7100 1.43 thorpej while (resid > 0) { 7101 1.271 matt const size_t l1slot = l1pte_index(va); 7102 1.370 skrll #ifdef ARM_MMU_EXTENDED 7103 1.230 matt /* See if we can use a supersection mapping. */ 7104 1.230 matt if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) { 7105 1.230 matt /* Supersection are always domain 0 */ 7106 1.271 matt const pd_entry_t npde = L1_SS_PROTO | pa 7107 1.271 matt | ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN) 7108 1.284 matt | (va & 0x80000000 ? 0 : L1_S_V6_nG) 7109 1.271 matt | L1_S_PROT(PTE_KERNEL, prot) | f1; 7110 1.366 skrll VPRINTF("sS"); 7111 1.271 matt l1pte_set(&pdep[l1slot], npde); 7112 1.271 matt PDE_SYNC_RANGE(&pdep[l1slot], L1_SS_SIZE / L1_S_SIZE); 7113 1.370 skrll // VPRINTF("\npmap_map_chunk: pa=0x%lx va=0x%lx resid=0x%08lx " 7114 1.370 skrll // "npdep=%p pde=0x%x\n", pa, va, resid, &pdep[l1slot], npde); 7115 1.230 matt va += L1_SS_SIZE; 7116 1.230 matt pa += L1_SS_SIZE; 7117 1.230 matt resid -= L1_SS_SIZE; 7118 1.230 matt continue; 7119 1.230 matt } 7120 1.230 matt #endif 7121 1.43 thorpej /* See if we can use a section mapping. */ 7122 1.134 thorpej if (L1_S_MAPPABLE_P(va, pa, resid)) { 7123 1.271 matt const pd_entry_t npde = L1_S_PROTO | pa 7124 1.331 skrll #ifdef ARM_MMU_EXTENDED 7125 1.271 matt | ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN) 7126 1.284 matt | (va & 0x80000000 ? 0 : L1_S_V6_nG) 7127 1.284 matt #endif 7128 1.271 matt | L1_S_PROT(PTE_KERNEL, prot) | f1 7129 1.271 matt | L1_S_DOM(PMAP_DOMAIN_KERNEL); 7130 1.366 skrll VPRINTF("S"); 7131 1.271 matt l1pte_set(&pdep[l1slot], npde); 7132 1.271 matt PDE_SYNC(&pdep[l1slot]); 7133 1.370 skrll // VPRINTF("\npmap_map_chunk: pa=0x%lx va=0x%lx resid=0x%08lx " 7134 1.370 skrll // "npdep=%p pde=0x%x\n", pa, va, resid, &pdep[l1slot], npde); 7135 1.81 thorpej va += L1_S_SIZE; 7136 1.81 thorpej pa += L1_S_SIZE; 7137 1.81 thorpej resid -= L1_S_SIZE; 7138 1.43 thorpej continue; 7139 1.43 thorpej } 7140 1.45 thorpej 7141 1.45 thorpej /* 7142 1.45 thorpej * Ok, we're going to use an L2 table. Make sure 7143 1.45 thorpej * one is actually in the corresponding L1 slot 7144 1.45 thorpej * for the current VA. 7145 1.45 thorpej */ 7146 1.271 matt if ((pdep[l1slot] & L1_TYPE_MASK) != L1_TYPE_C) 7147 1.271 matt panic("%s: no L2 table for VA %#lx", __func__, va); 7148 1.46 thorpej 7149 1.271 matt pt_entry_t *ptep = (pt_entry_t *) kernel_pt_lookup(l1pte_pa(pdep[l1slot])); 7150 1.271 matt if (ptep == NULL) 7151 1.271 matt panic("%s: can't find L2 table for VA %#lx", __func__, 7152 1.271 matt va); 7153 1.271 matt 7154 1.271 matt ptep += l2pte_index(va); 7155 1.43 thorpej 7156 1.43 thorpej /* See if we can use a L2 large page mapping. */ 7157 1.134 thorpej if (L2_L_MAPPABLE_P(va, pa, resid)) { 7158 1.271 matt const pt_entry_t npte = L2_L_PROTO | pa 7159 1.331 skrll #ifdef ARM_MMU_EXTENDED 7160 1.271 matt | ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_L_XN) 7161 1.284 matt | (va & 0x80000000 ? 0 : L2_XS_nG) 7162 1.284 matt #endif 7163 1.271 matt | L2_L_PROT(PTE_KERNEL, prot) | f2l; 7164 1.366 skrll VPRINTF("L"); 7165 1.271 matt l2pte_set(ptep, npte, 0); 7166 1.271 matt PTE_SYNC_RANGE(ptep, L2_L_SIZE / L2_S_SIZE); 7167 1.81 thorpej va += L2_L_SIZE; 7168 1.81 thorpej pa += L2_L_SIZE; 7169 1.81 thorpej resid -= L2_L_SIZE; 7170 1.43 thorpej continue; 7171 1.43 thorpej } 7172 1.43 thorpej 7173 1.366 skrll VPRINTF("P"); 7174 1.331 skrll /* Use a small page mapping. */ 7175 1.331 skrll pt_entry_t npte = L2_S_PROTO | pa 7176 1.331 skrll #ifdef ARM_MMU_EXTENDED 7177 1.271 matt | ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_XN) 7178 1.331 skrll | (va & 0x80000000 ? 0 : L2_XS_nG) 7179 1.134 thorpej #endif 7180 1.331 skrll | L2_S_PROT(PTE_KERNEL, prot) | f2s; 7181 1.284 matt #ifdef ARM_MMU_EXTENDED 7182 1.331 skrll npte &= ((prot & VM_PROT_EXECUTE) ? ~L2_XS_XN : ~0); 7183 1.284 matt #endif 7184 1.262 matt l2pte_set(ptep, npte, 0); 7185 1.262 matt PTE_SYNC(ptep); 7186 1.130 thorpej va += PAGE_SIZE; 7187 1.130 thorpej pa += PAGE_SIZE; 7188 1.130 thorpej resid -= PAGE_SIZE; 7189 1.43 thorpej } 7190 1.366 skrll VPRINTF("\n"); 7191 1.387 skrll return size; 7192 1.135 thorpej } 7193 1.135 thorpej 7194 1.370 skrll /* 7195 1.370 skrll * pmap_unmap_chunk: 7196 1.370 skrll * 7197 1.370 skrll * Unmap a chunk of memory that was previously pmap_map_chunk 7198 1.370 skrll */ 7199 1.370 skrll void 7200 1.370 skrll pmap_unmap_chunk(vaddr_t l1pt, vaddr_t va, vsize_t size) 7201 1.370 skrll { 7202 1.370 skrll pd_entry_t * const pdep = (pd_entry_t *) l1pt; 7203 1.370 skrll const size_t l1slot = l1pte_index(va); 7204 1.370 skrll 7205 1.370 skrll KASSERT(size == L1_SS_SIZE || size == L1_S_SIZE); 7206 1.370 skrll 7207 1.370 skrll l1pte_set(&pdep[l1slot], 0); 7208 1.370 skrll PDE_SYNC_RANGE(&pdep[l1slot], size / L1_S_SIZE); 7209 1.370 skrll 7210 1.370 skrll pmap_tlb_flush_SE(pmap_kernel(), va, PVF_REF); 7211 1.370 skrll } 7212 1.370 skrll 7213 1.370 skrll 7214 1.439 skrll vsize_t 7215 1.439 skrll pmap_kenter_range(vaddr_t va, paddr_t pa, vsize_t size, vm_prot_t prot, 7216 1.439 skrll u_int flags) 7217 1.136 thorpej { 7218 1.439 skrll const vaddr_t root = pmap_devmap_root(); 7219 1.136 thorpej 7220 1.439 skrll int cache; 7221 1.439 skrll switch (flags) { 7222 1.439 skrll case PMAP_DEV: 7223 1.439 skrll cache = PTE_DEV; 7224 1.439 skrll break; 7225 1.439 skrll case PMAP_NOCACHE: 7226 1.439 skrll cache = PTE_NOCACHE; 7227 1.439 skrll break; 7228 1.439 skrll default: 7229 1.439 skrll cache = PTE_CACHE; 7230 1.439 skrll break; 7231 1.135 thorpej } 7232 1.135 thorpej 7233 1.439 skrll return pmap_map_chunk(root, va, pa, size, prot, cache); 7234 1.135 thorpej } 7235 1.135 thorpej 7236 1.85 thorpej 7237 1.85 thorpej /********************** PTE initialization routines **************************/ 7238 1.85 thorpej 7239 1.85 thorpej /* 7240 1.85 thorpej * These routines are called when the CPU type is identified to set up 7241 1.85 thorpej * the PTE prototypes, cache modes, etc. 7242 1.85 thorpej * 7243 1.190 ad * The variables are always here, just in case modules need to reference 7244 1.85 thorpej * them (though, they shouldn't). 7245 1.85 thorpej */ 7246 1.85 thorpej 7247 1.388 skrll pt_entry_t pte_l1_s_nocache_mode; 7248 1.86 thorpej pt_entry_t pte_l1_s_cache_mode; 7249 1.220 macallan pt_entry_t pte_l1_s_wc_mode; 7250 1.134 thorpej pt_entry_t pte_l1_s_cache_mode_pt; 7251 1.86 thorpej pt_entry_t pte_l1_s_cache_mask; 7252 1.86 thorpej 7253 1.388 skrll pt_entry_t pte_l2_l_nocache_mode; 7254 1.86 thorpej pt_entry_t pte_l2_l_cache_mode; 7255 1.220 macallan pt_entry_t pte_l2_l_wc_mode; 7256 1.134 thorpej pt_entry_t pte_l2_l_cache_mode_pt; 7257 1.86 thorpej pt_entry_t pte_l2_l_cache_mask; 7258 1.86 thorpej 7259 1.388 skrll pt_entry_t pte_l2_s_nocache_mode; 7260 1.86 thorpej pt_entry_t pte_l2_s_cache_mode; 7261 1.220 macallan pt_entry_t pte_l2_s_wc_mode; 7262 1.134 thorpej pt_entry_t pte_l2_s_cache_mode_pt; 7263 1.86 thorpej pt_entry_t pte_l2_s_cache_mask; 7264 1.85 thorpej 7265 1.214 jmcneill pt_entry_t pte_l1_s_prot_u; 7266 1.214 jmcneill pt_entry_t pte_l1_s_prot_w; 7267 1.214 jmcneill pt_entry_t pte_l1_s_prot_ro; 7268 1.214 jmcneill pt_entry_t pte_l1_s_prot_mask; 7269 1.214 jmcneill 7270 1.85 thorpej pt_entry_t pte_l2_s_prot_u; 7271 1.85 thorpej pt_entry_t pte_l2_s_prot_w; 7272 1.214 jmcneill pt_entry_t pte_l2_s_prot_ro; 7273 1.85 thorpej pt_entry_t pte_l2_s_prot_mask; 7274 1.85 thorpej 7275 1.214 jmcneill pt_entry_t pte_l2_l_prot_u; 7276 1.214 jmcneill pt_entry_t pte_l2_l_prot_w; 7277 1.214 jmcneill pt_entry_t pte_l2_l_prot_ro; 7278 1.214 jmcneill pt_entry_t pte_l2_l_prot_mask; 7279 1.214 jmcneill 7280 1.230 matt pt_entry_t pte_l1_ss_proto; 7281 1.85 thorpej pt_entry_t pte_l1_s_proto; 7282 1.85 thorpej pt_entry_t pte_l1_c_proto; 7283 1.85 thorpej pt_entry_t pte_l2_s_proto; 7284 1.85 thorpej 7285 1.88 thorpej void (*pmap_copy_page_func)(paddr_t, paddr_t); 7286 1.88 thorpej void (*pmap_zero_page_func)(paddr_t); 7287 1.88 thorpej 7288 1.214 jmcneill #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 7289 1.85 thorpej void 7290 1.85 thorpej pmap_pte_init_generic(void) 7291 1.85 thorpej { 7292 1.85 thorpej 7293 1.388 skrll pte_l1_s_nocache_mode = 0; 7294 1.86 thorpej pte_l1_s_cache_mode = L1_S_B|L1_S_C; 7295 1.220 macallan pte_l1_s_wc_mode = L1_S_B; 7296 1.86 thorpej pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 7297 1.86 thorpej 7298 1.388 skrll pte_l2_l_nocache_mode = 0; 7299 1.86 thorpej pte_l2_l_cache_mode = L2_B|L2_C; 7300 1.220 macallan pte_l2_l_wc_mode = L2_B; 7301 1.86 thorpej pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 7302 1.86 thorpej 7303 1.388 skrll pte_l2_s_nocache_mode = 0; 7304 1.86 thorpej pte_l2_s_cache_mode = L2_B|L2_C; 7305 1.220 macallan pte_l2_s_wc_mode = L2_B; 7306 1.86 thorpej pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 7307 1.85 thorpej 7308 1.134 thorpej /* 7309 1.134 thorpej * If we have a write-through cache, set B and C. If 7310 1.134 thorpej * we have a write-back cache, then we assume setting 7311 1.230 matt * only C will make those pages write-through (except for those 7312 1.230 matt * Cortex CPUs which can read the L1 caches). 7313 1.134 thorpej */ 7314 1.230 matt if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop 7315 1.234 matt #if ARM_MMU_V7 > 0 7316 1.234 matt || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid) 7317 1.234 matt #endif 7318 1.234 matt #if ARM_MMU_V6 > 0 7319 1.234 matt || CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid) /* arm116 errata 399234 */ 7320 1.230 matt #endif 7321 1.230 matt || false) { 7322 1.134 thorpej pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 7323 1.134 thorpej pte_l2_l_cache_mode_pt = L2_B|L2_C; 7324 1.134 thorpej pte_l2_s_cache_mode_pt = L2_B|L2_C; 7325 1.230 matt } else { 7326 1.230 matt pte_l1_s_cache_mode_pt = L1_S_C; /* write through */ 7327 1.230 matt pte_l2_l_cache_mode_pt = L2_C; /* write through */ 7328 1.230 matt pte_l2_s_cache_mode_pt = L2_C; /* write through */ 7329 1.134 thorpej } 7330 1.134 thorpej 7331 1.214 jmcneill pte_l1_s_prot_u = L1_S_PROT_U_generic; 7332 1.214 jmcneill pte_l1_s_prot_w = L1_S_PROT_W_generic; 7333 1.214 jmcneill pte_l1_s_prot_ro = L1_S_PROT_RO_generic; 7334 1.214 jmcneill pte_l1_s_prot_mask = L1_S_PROT_MASK_generic; 7335 1.214 jmcneill 7336 1.85 thorpej pte_l2_s_prot_u = L2_S_PROT_U_generic; 7337 1.85 thorpej pte_l2_s_prot_w = L2_S_PROT_W_generic; 7338 1.214 jmcneill pte_l2_s_prot_ro = L2_S_PROT_RO_generic; 7339 1.85 thorpej pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 7340 1.85 thorpej 7341 1.214 jmcneill pte_l2_l_prot_u = L2_L_PROT_U_generic; 7342 1.214 jmcneill pte_l2_l_prot_w = L2_L_PROT_W_generic; 7343 1.214 jmcneill pte_l2_l_prot_ro = L2_L_PROT_RO_generic; 7344 1.214 jmcneill pte_l2_l_prot_mask = L2_L_PROT_MASK_generic; 7345 1.214 jmcneill 7346 1.230 matt pte_l1_ss_proto = L1_SS_PROTO_generic; 7347 1.85 thorpej pte_l1_s_proto = L1_S_PROTO_generic; 7348 1.85 thorpej pte_l1_c_proto = L1_C_PROTO_generic; 7349 1.85 thorpej pte_l2_s_proto = L2_S_PROTO_generic; 7350 1.88 thorpej 7351 1.88 thorpej pmap_copy_page_func = pmap_copy_page_generic; 7352 1.88 thorpej pmap_zero_page_func = pmap_zero_page_generic; 7353 1.85 thorpej } 7354 1.85 thorpej 7355 1.131 thorpej #if defined(CPU_ARM8) 7356 1.131 thorpej void 7357 1.131 thorpej pmap_pte_init_arm8(void) 7358 1.131 thorpej { 7359 1.131 thorpej 7360 1.134 thorpej /* 7361 1.134 thorpej * ARM8 is compatible with generic, but we need to use 7362 1.134 thorpej * the page tables uncached. 7363 1.134 thorpej */ 7364 1.131 thorpej pmap_pte_init_generic(); 7365 1.134 thorpej 7366 1.134 thorpej pte_l1_s_cache_mode_pt = 0; 7367 1.134 thorpej pte_l2_l_cache_mode_pt = 0; 7368 1.134 thorpej pte_l2_s_cache_mode_pt = 0; 7369 1.131 thorpej } 7370 1.131 thorpej #endif /* CPU_ARM8 */ 7371 1.131 thorpej 7372 1.148 bsh #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) 7373 1.85 thorpej void 7374 1.85 thorpej pmap_pte_init_arm9(void) 7375 1.85 thorpej { 7376 1.85 thorpej 7377 1.85 thorpej /* 7378 1.85 thorpej * ARM9 is compatible with generic, but we want to use 7379 1.85 thorpej * write-through caching for now. 7380 1.85 thorpej */ 7381 1.85 thorpej pmap_pte_init_generic(); 7382 1.86 thorpej 7383 1.86 thorpej pte_l1_s_cache_mode = L1_S_C; 7384 1.86 thorpej pte_l2_l_cache_mode = L2_C; 7385 1.86 thorpej pte_l2_s_cache_mode = L2_C; 7386 1.134 thorpej 7387 1.220 macallan pte_l1_s_wc_mode = L1_S_B; 7388 1.220 macallan pte_l2_l_wc_mode = L2_B; 7389 1.220 macallan pte_l2_s_wc_mode = L2_B; 7390 1.220 macallan 7391 1.134 thorpej pte_l1_s_cache_mode_pt = L1_S_C; 7392 1.134 thorpej pte_l2_l_cache_mode_pt = L2_C; 7393 1.134 thorpej pte_l2_s_cache_mode_pt = L2_C; 7394 1.85 thorpej } 7395 1.204 uebayasi #endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */ 7396 1.174 matt #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 7397 1.138 rearnsha 7398 1.138 rearnsha #if defined(CPU_ARM10) 7399 1.138 rearnsha void 7400 1.138 rearnsha pmap_pte_init_arm10(void) 7401 1.138 rearnsha { 7402 1.138 rearnsha 7403 1.138 rearnsha /* 7404 1.138 rearnsha * ARM10 is compatible with generic, but we want to use 7405 1.138 rearnsha * write-through caching for now. 7406 1.138 rearnsha */ 7407 1.138 rearnsha pmap_pte_init_generic(); 7408 1.138 rearnsha 7409 1.138 rearnsha pte_l1_s_cache_mode = L1_S_B | L1_S_C; 7410 1.138 rearnsha pte_l2_l_cache_mode = L2_B | L2_C; 7411 1.138 rearnsha pte_l2_s_cache_mode = L2_B | L2_C; 7412 1.138 rearnsha 7413 1.220 macallan pte_l1_s_cache_mode = L1_S_B; 7414 1.220 macallan pte_l2_l_cache_mode = L2_B; 7415 1.220 macallan pte_l2_s_cache_mode = L2_B; 7416 1.220 macallan 7417 1.138 rearnsha pte_l1_s_cache_mode_pt = L1_S_C; 7418 1.138 rearnsha pte_l2_l_cache_mode_pt = L2_C; 7419 1.138 rearnsha pte_l2_s_cache_mode_pt = L2_C; 7420 1.138 rearnsha 7421 1.138 rearnsha } 7422 1.138 rearnsha #endif /* CPU_ARM10 */ 7423 1.131 thorpej 7424 1.204 uebayasi #if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH) 7425 1.204 uebayasi void 7426 1.204 uebayasi pmap_pte_init_arm11(void) 7427 1.204 uebayasi { 7428 1.204 uebayasi 7429 1.204 uebayasi /* 7430 1.204 uebayasi * ARM11 is compatible with generic, but we want to use 7431 1.204 uebayasi * write-through caching for now. 7432 1.204 uebayasi */ 7433 1.204 uebayasi pmap_pte_init_generic(); 7434 1.204 uebayasi 7435 1.204 uebayasi pte_l1_s_cache_mode = L1_S_C; 7436 1.204 uebayasi pte_l2_l_cache_mode = L2_C; 7437 1.204 uebayasi pte_l2_s_cache_mode = L2_C; 7438 1.204 uebayasi 7439 1.220 macallan pte_l1_s_wc_mode = L1_S_B; 7440 1.220 macallan pte_l2_l_wc_mode = L2_B; 7441 1.220 macallan pte_l2_s_wc_mode = L2_B; 7442 1.220 macallan 7443 1.204 uebayasi pte_l1_s_cache_mode_pt = L1_S_C; 7444 1.204 uebayasi pte_l2_l_cache_mode_pt = L2_C; 7445 1.204 uebayasi pte_l2_s_cache_mode_pt = L2_C; 7446 1.204 uebayasi } 7447 1.204 uebayasi #endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */ 7448 1.204 uebayasi 7449 1.131 thorpej #if ARM_MMU_SA1 == 1 7450 1.131 thorpej void 7451 1.131 thorpej pmap_pte_init_sa1(void) 7452 1.131 thorpej { 7453 1.131 thorpej 7454 1.134 thorpej /* 7455 1.134 thorpej * The StrongARM SA-1 cache does not have a write-through 7456 1.134 thorpej * mode. So, do the generic initialization, then reset 7457 1.134 thorpej * the page table cache mode to B=1,C=1, and note that 7458 1.134 thorpej * the PTEs need to be sync'd. 7459 1.134 thorpej */ 7460 1.131 thorpej pmap_pte_init_generic(); 7461 1.134 thorpej 7462 1.134 thorpej pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 7463 1.134 thorpej pte_l2_l_cache_mode_pt = L2_B|L2_C; 7464 1.134 thorpej pte_l2_s_cache_mode_pt = L2_B|L2_C; 7465 1.134 thorpej 7466 1.134 thorpej pmap_needs_pte_sync = 1; 7467 1.131 thorpej } 7468 1.134 thorpej #endif /* ARM_MMU_SA1 == 1*/ 7469 1.85 thorpej 7470 1.85 thorpej #if ARM_MMU_XSCALE == 1 7471 1.141 scw #if (ARM_NMMUS > 1) 7472 1.141 scw static u_int xscale_use_minidata; 7473 1.141 scw #endif 7474 1.141 scw 7475 1.85 thorpej void 7476 1.85 thorpej pmap_pte_init_xscale(void) 7477 1.85 thorpej { 7478 1.96 thorpej uint32_t auxctl; 7479 1.134 thorpej int write_through = 0; 7480 1.85 thorpej 7481 1.96 thorpej pte_l1_s_cache_mode = L1_S_B|L1_S_C; 7482 1.220 macallan pte_l1_s_wc_mode = L1_S_B; 7483 1.86 thorpej pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 7484 1.86 thorpej 7485 1.96 thorpej pte_l2_l_cache_mode = L2_B|L2_C; 7486 1.220 macallan pte_l2_l_wc_mode = L2_B; 7487 1.86 thorpej pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 7488 1.86 thorpej 7489 1.96 thorpej pte_l2_s_cache_mode = L2_B|L2_C; 7490 1.220 macallan pte_l2_s_wc_mode = L2_B; 7491 1.86 thorpej pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 7492 1.106 thorpej 7493 1.134 thorpej pte_l1_s_cache_mode_pt = L1_S_C; 7494 1.134 thorpej pte_l2_l_cache_mode_pt = L2_C; 7495 1.134 thorpej pte_l2_s_cache_mode_pt = L2_C; 7496 1.134 thorpej 7497 1.106 thorpej #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 7498 1.106 thorpej /* 7499 1.106 thorpej * The XScale core has an enhanced mode where writes that 7500 1.106 thorpej * miss the cache cause a cache line to be allocated. This 7501 1.106 thorpej * is significantly faster than the traditional, write-through 7502 1.106 thorpej * behavior of this case. 7503 1.106 thorpej */ 7504 1.174 matt pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X); 7505 1.174 matt pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X); 7506 1.174 matt pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X); 7507 1.106 thorpej #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 7508 1.85 thorpej 7509 1.95 thorpej #ifdef XSCALE_CACHE_WRITE_THROUGH 7510 1.95 thorpej /* 7511 1.95 thorpej * Some versions of the XScale core have various bugs in 7512 1.95 thorpej * their cache units, the work-around for which is to run 7513 1.95 thorpej * the cache in write-through mode. Unfortunately, this 7514 1.95 thorpej * has a major (negative) impact on performance. So, we 7515 1.95 thorpej * go ahead and run fast-and-loose, in the hopes that we 7516 1.95 thorpej * don't line up the planets in a way that will trip the 7517 1.95 thorpej * bugs. 7518 1.95 thorpej * 7519 1.95 thorpej * However, we give you the option to be slow-but-correct. 7520 1.95 thorpej */ 7521 1.129 bsh write_through = 1; 7522 1.129 bsh #elif defined(XSCALE_CACHE_WRITE_BACK) 7523 1.134 thorpej /* force write back cache mode */ 7524 1.129 bsh write_through = 0; 7525 1.154 bsh #elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270) 7526 1.129 bsh /* 7527 1.129 bsh * Intel PXA2[15]0 processors are known to have a bug in 7528 1.129 bsh * write-back cache on revision 4 and earlier (stepping 7529 1.129 bsh * A[01] and B[012]). Fixed for C0 and later. 7530 1.129 bsh */ 7531 1.129 bsh { 7532 1.134 thorpej uint32_t id, type; 7533 1.129 bsh 7534 1.129 bsh id = cpufunc_id(); 7535 1.129 bsh type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 7536 1.129 bsh 7537 1.129 bsh if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 7538 1.129 bsh if ((id & CPU_ID_REVISION_MASK) < 5) { 7539 1.129 bsh /* write through for stepping A0-1 and B0-2 */ 7540 1.129 bsh write_through = 1; 7541 1.129 bsh } 7542 1.129 bsh } 7543 1.129 bsh } 7544 1.95 thorpej #endif /* XSCALE_CACHE_WRITE_THROUGH */ 7545 1.129 bsh 7546 1.129 bsh if (write_through) { 7547 1.129 bsh pte_l1_s_cache_mode = L1_S_C; 7548 1.129 bsh pte_l2_l_cache_mode = L2_C; 7549 1.129 bsh pte_l2_s_cache_mode = L2_C; 7550 1.129 bsh } 7551 1.95 thorpej 7552 1.141 scw #if (ARM_NMMUS > 1) 7553 1.141 scw xscale_use_minidata = 1; 7554 1.141 scw #endif 7555 1.141 scw 7556 1.214 jmcneill pte_l1_s_prot_u = L1_S_PROT_U_xscale; 7557 1.214 jmcneill pte_l1_s_prot_w = L1_S_PROT_W_xscale; 7558 1.214 jmcneill pte_l1_s_prot_ro = L1_S_PROT_RO_xscale; 7559 1.214 jmcneill pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale; 7560 1.214 jmcneill 7561 1.85 thorpej pte_l2_s_prot_u = L2_S_PROT_U_xscale; 7562 1.85 thorpej pte_l2_s_prot_w = L2_S_PROT_W_xscale; 7563 1.214 jmcneill pte_l2_s_prot_ro = L2_S_PROT_RO_xscale; 7564 1.85 thorpej pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 7565 1.85 thorpej 7566 1.214 jmcneill pte_l2_l_prot_u = L2_L_PROT_U_xscale; 7567 1.214 jmcneill pte_l2_l_prot_w = L2_L_PROT_W_xscale; 7568 1.214 jmcneill pte_l2_l_prot_ro = L2_L_PROT_RO_xscale; 7569 1.214 jmcneill pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale; 7570 1.214 jmcneill 7571 1.230 matt pte_l1_ss_proto = L1_SS_PROTO_xscale; 7572 1.85 thorpej pte_l1_s_proto = L1_S_PROTO_xscale; 7573 1.85 thorpej pte_l1_c_proto = L1_C_PROTO_xscale; 7574 1.85 thorpej pte_l2_s_proto = L2_S_PROTO_xscale; 7575 1.88 thorpej 7576 1.88 thorpej pmap_copy_page_func = pmap_copy_page_xscale; 7577 1.88 thorpej pmap_zero_page_func = pmap_zero_page_xscale; 7578 1.96 thorpej 7579 1.96 thorpej /* 7580 1.96 thorpej * Disable ECC protection of page table access, for now. 7581 1.96 thorpej */ 7582 1.325 skrll auxctl = armreg_auxctl_read(); 7583 1.96 thorpej auxctl &= ~XSCALE_AUXCTL_P; 7584 1.325 skrll armreg_auxctl_write(auxctl); 7585 1.85 thorpej } 7586 1.87 thorpej 7587 1.87 thorpej /* 7588 1.87 thorpej * xscale_setup_minidata: 7589 1.87 thorpej * 7590 1.87 thorpej * Set up the mini-data cache clean area. We require the 7591 1.87 thorpej * caller to allocate the right amount of physically and 7592 1.87 thorpej * virtually contiguous space. 7593 1.87 thorpej */ 7594 1.87 thorpej void 7595 1.87 thorpej xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa) 7596 1.87 thorpej { 7597 1.87 thorpej pd_entry_t *pde = (pd_entry_t *) l1pt; 7598 1.87 thorpej vsize_t size; 7599 1.96 thorpej uint32_t auxctl; 7600 1.87 thorpej 7601 1.87 thorpej xscale_minidata_clean_addr = va; 7602 1.87 thorpej 7603 1.87 thorpej /* Round it to page size. */ 7604 1.87 thorpej size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 7605 1.87 thorpej 7606 1.87 thorpej for (; size != 0; 7607 1.87 thorpej va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 7608 1.271 matt const size_t l1slot = l1pte_index(va); 7609 1.271 matt pt_entry_t *ptep = (pt_entry_t *) kernel_pt_lookup(l1pte_pa(pde[l1slot])); 7610 1.262 matt if (ptep == NULL) 7611 1.87 thorpej panic("xscale_setup_minidata: can't find L2 table for " 7612 1.87 thorpej "VA 0x%08lx", va); 7613 1.286 skrll 7614 1.262 matt ptep += l2pte_index(va); 7615 1.262 matt pt_entry_t opte = *ptep; 7616 1.286 skrll l2pte_set(ptep, 7617 1.262 matt L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) 7618 1.262 matt | L2_C | L2_XS_T_TEX(TEX_XSCALE_X), opte); 7619 1.87 thorpej } 7620 1.96 thorpej 7621 1.96 thorpej /* 7622 1.96 thorpej * Configure the mini-data cache for write-back with 7623 1.96 thorpej * read/write-allocate. 7624 1.96 thorpej * 7625 1.96 thorpej * NOTE: In order to reconfigure the mini-data cache, we must 7626 1.96 thorpej * make sure it contains no valid data! In order to do that, 7627 1.96 thorpej * we must issue a global data cache invalidate command! 7628 1.96 thorpej * 7629 1.96 thorpej * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 7630 1.96 thorpej * THIS IS VERY IMPORTANT! 7631 1.96 thorpej */ 7632 1.134 thorpej 7633 1.96 thorpej /* Invalidate data and mini-data. */ 7634 1.157 perry __asm volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 7635 1.325 skrll auxctl = armreg_auxctl_read(); 7636 1.96 thorpej auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 7637 1.325 skrll armreg_auxctl_write(auxctl); 7638 1.87 thorpej } 7639 1.141 scw 7640 1.141 scw /* 7641 1.141 scw * Change the PTEs for the specified kernel mappings such that they 7642 1.141 scw * will use the mini data cache instead of the main data cache. 7643 1.141 scw */ 7644 1.141 scw void 7645 1.141 scw pmap_uarea(vaddr_t va) 7646 1.141 scw { 7647 1.141 scw vaddr_t next_bucket, eva; 7648 1.141 scw 7649 1.141 scw #if (ARM_NMMUS > 1) 7650 1.141 scw if (xscale_use_minidata == 0) 7651 1.141 scw return; 7652 1.141 scw #endif 7653 1.141 scw 7654 1.141 scw eva = va + USPACE; 7655 1.141 scw 7656 1.141 scw while (va < eva) { 7657 1.271 matt next_bucket = L2_NEXT_BUCKET_VA(va); 7658 1.141 scw if (next_bucket > eva) 7659 1.141 scw next_bucket = eva; 7660 1.141 scw 7661 1.262 matt struct l2_bucket *l2b = pmap_get_l2_bucket(pmap_kernel(), va); 7662 1.141 scw KDASSERT(l2b != NULL); 7663 1.141 scw 7664 1.262 matt pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; 7665 1.262 matt pt_entry_t *ptep = sptep; 7666 1.141 scw 7667 1.141 scw while (va < next_bucket) { 7668 1.262 matt const pt_entry_t opte = *ptep; 7669 1.268 matt if (!l2pte_minidata_p(opte)) { 7670 1.141 scw cpu_dcache_wbinv_range(va, PAGE_SIZE); 7671 1.141 scw cpu_tlb_flushD_SE(va); 7672 1.262 matt l2pte_set(ptep, opte & ~L2_B, opte); 7673 1.141 scw } 7674 1.262 matt ptep += PAGE_SIZE / L2_S_SIZE; 7675 1.141 scw va += PAGE_SIZE; 7676 1.141 scw } 7677 1.141 scw PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 7678 1.141 scw } 7679 1.141 scw cpu_cpwait(); 7680 1.141 scw } 7681 1.85 thorpej #endif /* ARM_MMU_XSCALE == 1 */ 7682 1.134 thorpej 7683 1.221 bsh 7684 1.221 bsh #if defined(CPU_ARM11MPCORE) 7685 1.221 bsh void 7686 1.221 bsh pmap_pte_init_arm11mpcore(void) 7687 1.221 bsh { 7688 1.221 bsh 7689 1.388 skrll /* cache mode is controlled by 5 bits (B, C, TEX[2:0]) */ 7690 1.221 bsh pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6; 7691 1.221 bsh pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6; 7692 1.221 bsh #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 7693 1.221 bsh /* use extended small page (without APn, with TEX) */ 7694 1.221 bsh pte_l2_s_cache_mask = L2_XS_CACHE_MASK_armv6; 7695 1.221 bsh #else 7696 1.221 bsh pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6c; 7697 1.221 bsh #endif 7698 1.221 bsh 7699 1.221 bsh /* write-back, write-allocate */ 7700 1.221 bsh pte_l1_s_cache_mode = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01); 7701 1.221 bsh pte_l2_l_cache_mode = L2_C | L2_B | L2_V6_L_TEX(0x01); 7702 1.221 bsh #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 7703 1.221 bsh pte_l2_s_cache_mode = L2_C | L2_B | L2_V6_XS_TEX(0x01); 7704 1.221 bsh #else 7705 1.221 bsh /* no TEX. read-allocate */ 7706 1.221 bsh pte_l2_s_cache_mode = L2_C | L2_B; 7707 1.221 bsh #endif 7708 1.221 bsh /* 7709 1.221 bsh * write-back, write-allocate for page tables. 7710 1.221 bsh */ 7711 1.221 bsh pte_l1_s_cache_mode_pt = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01); 7712 1.221 bsh pte_l2_l_cache_mode_pt = L2_C | L2_B | L2_V6_L_TEX(0x01); 7713 1.221 bsh #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 7714 1.221 bsh pte_l2_s_cache_mode_pt = L2_C | L2_B | L2_V6_XS_TEX(0x01); 7715 1.221 bsh #else 7716 1.221 bsh pte_l2_s_cache_mode_pt = L2_C | L2_B; 7717 1.221 bsh #endif 7718 1.221 bsh 7719 1.221 bsh pte_l1_s_prot_u = L1_S_PROT_U_armv6; 7720 1.221 bsh pte_l1_s_prot_w = L1_S_PROT_W_armv6; 7721 1.221 bsh pte_l1_s_prot_ro = L1_S_PROT_RO_armv6; 7722 1.221 bsh pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6; 7723 1.221 bsh 7724 1.221 bsh #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) 7725 1.221 bsh pte_l2_s_prot_u = L2_S_PROT_U_armv6n; 7726 1.221 bsh pte_l2_s_prot_w = L2_S_PROT_W_armv6n; 7727 1.221 bsh pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n; 7728 1.221 bsh pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n; 7729 1.221 bsh 7730 1.221 bsh #else 7731 1.221 bsh /* with AP[0..3] */ 7732 1.221 bsh pte_l2_s_prot_u = L2_S_PROT_U_generic; 7733 1.221 bsh pte_l2_s_prot_w = L2_S_PROT_W_generic; 7734 1.221 bsh pte_l2_s_prot_ro = L2_S_PROT_RO_generic; 7735 1.221 bsh pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 7736 1.221 bsh #endif 7737 1.221 bsh 7738 1.221 bsh #ifdef ARM11MPCORE_COMPAT_MMU 7739 1.221 bsh /* with AP[0..3] */ 7740 1.221 bsh pte_l2_l_prot_u = L2_L_PROT_U_generic; 7741 1.221 bsh pte_l2_l_prot_w = L2_L_PROT_W_generic; 7742 1.221 bsh pte_l2_l_prot_ro = L2_L_PROT_RO_generic; 7743 1.221 bsh pte_l2_l_prot_mask = L2_L_PROT_MASK_generic; 7744 1.221 bsh 7745 1.230 matt pte_l1_ss_proto = L1_SS_PROTO_armv6; 7746 1.221 bsh pte_l1_s_proto = L1_S_PROTO_armv6; 7747 1.221 bsh pte_l1_c_proto = L1_C_PROTO_armv6; 7748 1.221 bsh pte_l2_s_proto = L2_S_PROTO_armv6c; 7749 1.221 bsh #else 7750 1.221 bsh pte_l2_l_prot_u = L2_L_PROT_U_armv6n; 7751 1.221 bsh pte_l2_l_prot_w = L2_L_PROT_W_armv6n; 7752 1.221 bsh pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n; 7753 1.221 bsh pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n; 7754 1.221 bsh 7755 1.230 matt pte_l1_ss_proto = L1_SS_PROTO_armv6; 7756 1.221 bsh pte_l1_s_proto = L1_S_PROTO_armv6; 7757 1.221 bsh pte_l1_c_proto = L1_C_PROTO_armv6; 7758 1.221 bsh pte_l2_s_proto = L2_S_PROTO_armv6n; 7759 1.221 bsh #endif 7760 1.221 bsh 7761 1.221 bsh pmap_copy_page_func = pmap_copy_page_generic; 7762 1.221 bsh pmap_zero_page_func = pmap_zero_page_generic; 7763 1.221 bsh pmap_needs_pte_sync = 1; 7764 1.221 bsh } 7765 1.221 bsh #endif /* CPU_ARM11MPCORE */ 7766 1.221 bsh 7767 1.221 bsh 7768 1.388 skrll #if ARM_MMU_V6 == 1 7769 1.388 skrll void 7770 1.388 skrll pmap_pte_init_armv6(void) 7771 1.388 skrll { 7772 1.388 skrll /* 7773 1.388 skrll * The ARMv6-A MMU is mostly compatible with generic. If the 7774 1.388 skrll * AP field is zero, that now means "no access" rather than 7775 1.388 skrll * read-only. The prototypes are a little different because of 7776 1.388 skrll * the XN bit. 7777 1.388 skrll */ 7778 1.388 skrll pmap_pte_init_generic(); 7779 1.388 skrll 7780 1.388 skrll pte_l1_s_nocache_mode = L1_S_XS_TEX(1); 7781 1.388 skrll pte_l2_l_nocache_mode = L2_XS_L_TEX(1); 7782 1.388 skrll pte_l2_s_nocache_mode = L2_XS_T_TEX(1); 7783 1.388 skrll 7784 1.388 skrll #ifdef ARM11_COMPAT_MMU 7785 1.388 skrll /* with AP[0..3] */ 7786 1.388 skrll pte_l1_ss_proto = L1_SS_PROTO_armv6; 7787 1.388 skrll #else 7788 1.388 skrll pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6n; 7789 1.388 skrll pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6n; 7790 1.388 skrll pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6n; 7791 1.388 skrll 7792 1.388 skrll pte_l1_ss_proto = L1_SS_PROTO_armv6; 7793 1.388 skrll pte_l1_s_proto = L1_S_PROTO_armv6; 7794 1.388 skrll pte_l1_c_proto = L1_C_PROTO_armv6; 7795 1.388 skrll pte_l2_s_proto = L2_S_PROTO_armv6n; 7796 1.388 skrll 7797 1.388 skrll pte_l1_s_prot_u = L1_S_PROT_U_armv6; 7798 1.388 skrll pte_l1_s_prot_w = L1_S_PROT_W_armv6; 7799 1.388 skrll pte_l1_s_prot_ro = L1_S_PROT_RO_armv6; 7800 1.388 skrll pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6; 7801 1.388 skrll 7802 1.388 skrll pte_l2_l_prot_u = L2_L_PROT_U_armv6n; 7803 1.388 skrll pte_l2_l_prot_w = L2_L_PROT_W_armv6n; 7804 1.388 skrll pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n; 7805 1.388 skrll pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n; 7806 1.388 skrll 7807 1.388 skrll pte_l2_s_prot_u = L2_S_PROT_U_armv6n; 7808 1.388 skrll pte_l2_s_prot_w = L2_S_PROT_W_armv6n; 7809 1.388 skrll pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n; 7810 1.388 skrll pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n; 7811 1.388 skrll 7812 1.388 skrll #endif 7813 1.388 skrll } 7814 1.388 skrll #endif /* ARM_MMU_V6 */ 7815 1.388 skrll 7816 1.214 jmcneill #if ARM_MMU_V7 == 1 7817 1.214 jmcneill void 7818 1.214 jmcneill pmap_pte_init_armv7(void) 7819 1.214 jmcneill { 7820 1.214 jmcneill /* 7821 1.214 jmcneill * The ARMv7-A MMU is mostly compatible with generic. If the 7822 1.214 jmcneill * AP field is zero, that now means "no access" rather than 7823 1.214 jmcneill * read-only. The prototypes are a little different because of 7824 1.214 jmcneill * the XN bit. 7825 1.214 jmcneill */ 7826 1.214 jmcneill pmap_pte_init_generic(); 7827 1.214 jmcneill 7828 1.271 matt pmap_needs_pte_sync = 1; 7829 1.271 matt 7830 1.388 skrll pte_l1_s_nocache_mode = L1_S_XS_TEX(1); 7831 1.388 skrll pte_l2_l_nocache_mode = L2_XS_L_TEX(1); 7832 1.388 skrll pte_l2_s_nocache_mode = L2_XS_T_TEX(1); 7833 1.388 skrll 7834 1.214 jmcneill pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv7; 7835 1.214 jmcneill pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv7; 7836 1.214 jmcneill pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv7; 7837 1.214 jmcneill 7838 1.271 matt /* 7839 1.271 matt * If the core support coherent walk then updates to translation tables 7840 1.271 matt * do not require a clean to the point of unification to ensure 7841 1.271 matt * visibility by subsequent translation table walks. That means we can 7842 1.271 matt * map everything shareable and cached and the right thing will happen. 7843 1.271 matt */ 7844 1.271 matt if (__SHIFTOUT(armreg_mmfr3_read(), __BITS(23,20))) { 7845 1.271 matt pmap_needs_pte_sync = 0; 7846 1.271 matt 7847 1.237 matt /* 7848 1.237 matt * write-back, no write-allocate, shareable for normal pages. 7849 1.237 matt */ 7850 1.271 matt pte_l1_s_cache_mode |= L1_S_V6_S; 7851 1.271 matt pte_l2_l_cache_mode |= L2_XS_S; 7852 1.271 matt pte_l2_s_cache_mode |= L2_XS_S; 7853 1.284 matt } 7854 1.237 matt 7855 1.284 matt /* 7856 1.284 matt * Page tables are just all other memory. We can use write-back since 7857 1.284 matt * pmap_needs_pte_sync is 1 (or the MMU can read out of cache). 7858 1.284 matt */ 7859 1.284 matt pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode; 7860 1.284 matt pte_l2_l_cache_mode_pt = pte_l2_l_cache_mode; 7861 1.284 matt pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; 7862 1.271 matt 7863 1.271 matt /* 7864 1.271 matt * Check the Memory Model Features to see if this CPU supports 7865 1.271 matt * the TLBIASID coproc op. 7866 1.271 matt */ 7867 1.271 matt if (__SHIFTOUT(armreg_mmfr2_read(), __BITS(16,19)) >= 2) { 7868 1.271 matt arm_has_tlbiasid_p = true; 7869 1.349 skrll } else if (__SHIFTOUT(armreg_mmfr2_read(), __BITS(12,15)) >= 2) { 7870 1.349 skrll arm_has_tlbiasid_p = true; 7871 1.237 matt } 7872 1.237 matt 7873 1.353 jmcneill /* 7874 1.353 jmcneill * Check the MPIDR to see if this CPU supports MP extensions. 7875 1.353 jmcneill */ 7876 1.353 jmcneill #ifdef MULTIPROCESSOR 7877 1.353 jmcneill arm_has_mpext_p = (armreg_mpidr_read() & (MPIDR_MP|MPIDR_U)) == MPIDR_MP; 7878 1.353 jmcneill #else 7879 1.353 jmcneill arm_has_mpext_p = false; 7880 1.353 jmcneill #endif 7881 1.353 jmcneill 7882 1.214 jmcneill pte_l1_s_prot_u = L1_S_PROT_U_armv7; 7883 1.214 jmcneill pte_l1_s_prot_w = L1_S_PROT_W_armv7; 7884 1.214 jmcneill pte_l1_s_prot_ro = L1_S_PROT_RO_armv7; 7885 1.214 jmcneill pte_l1_s_prot_mask = L1_S_PROT_MASK_armv7; 7886 1.214 jmcneill 7887 1.214 jmcneill pte_l2_s_prot_u = L2_S_PROT_U_armv7; 7888 1.214 jmcneill pte_l2_s_prot_w = L2_S_PROT_W_armv7; 7889 1.214 jmcneill pte_l2_s_prot_ro = L2_S_PROT_RO_armv7; 7890 1.214 jmcneill pte_l2_s_prot_mask = L2_S_PROT_MASK_armv7; 7891 1.214 jmcneill 7892 1.214 jmcneill pte_l2_l_prot_u = L2_L_PROT_U_armv7; 7893 1.214 jmcneill pte_l2_l_prot_w = L2_L_PROT_W_armv7; 7894 1.214 jmcneill pte_l2_l_prot_ro = L2_L_PROT_RO_armv7; 7895 1.214 jmcneill pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7; 7896 1.214 jmcneill 7897 1.230 matt pte_l1_ss_proto = L1_SS_PROTO_armv7; 7898 1.214 jmcneill pte_l1_s_proto = L1_S_PROTO_armv7; 7899 1.214 jmcneill pte_l1_c_proto = L1_C_PROTO_armv7; 7900 1.214 jmcneill pte_l2_s_proto = L2_S_PROTO_armv7; 7901 1.237 matt 7902 1.214 jmcneill } 7903 1.214 jmcneill #endif /* ARM_MMU_V7 */ 7904 1.214 jmcneill 7905 1.170 chris /* 7906 1.170 chris * return the PA of the current L1 table, for use when handling a crash dump 7907 1.170 chris */ 7908 1.271 matt uint32_t 7909 1.271 matt pmap_kernel_L1_addr(void) 7910 1.170 chris { 7911 1.271 matt #ifdef ARM_MMU_EXTENDED 7912 1.271 matt return pmap_kernel()->pm_l1_pa; 7913 1.271 matt #else 7914 1.170 chris return pmap_kernel()->pm_l1->l1_physaddr; 7915 1.271 matt #endif 7916 1.170 chris } 7917 1.170 chris 7918 1.134 thorpej #if defined(DDB) 7919 1.134 thorpej /* 7920 1.134 thorpej * A couple of ddb-callable functions for dumping pmaps 7921 1.134 thorpej */ 7922 1.134 thorpej void pmap_dump(pmap_t); 7923 1.134 thorpej 7924 1.134 thorpej static pt_entry_t ncptes[64]; 7925 1.134 thorpej static void pmap_dump_ncpg(pmap_t); 7926 1.134 thorpej 7927 1.134 thorpej void 7928 1.134 thorpej pmap_dump(pmap_t pm) 7929 1.134 thorpej { 7930 1.134 thorpej struct l2_dtable *l2; 7931 1.134 thorpej struct l2_bucket *l2b; 7932 1.134 thorpej pt_entry_t *ptep, pte; 7933 1.134 thorpej vaddr_t l2_va, l2b_va, va; 7934 1.134 thorpej int i, j, k, occ, rows = 0; 7935 1.134 thorpej 7936 1.134 thorpej if (pm == pmap_kernel()) 7937 1.134 thorpej printf("pmap_kernel (%p): ", pm); 7938 1.134 thorpej else 7939 1.134 thorpej printf("user pmap (%p): ", pm); 7940 1.134 thorpej 7941 1.271 matt #ifdef ARM_MMU_EXTENDED 7942 1.271 matt printf("l1 at %p\n", pmap_l1_kva(pm)); 7943 1.271 matt #else 7944 1.258 matt printf("domain %d, l1 at %p\n", pmap_domain(pm), pmap_l1_kva(pm)); 7945 1.271 matt #endif 7946 1.134 thorpej 7947 1.134 thorpej l2_va = 0; 7948 1.134 thorpej for (i = 0; i < L2_SIZE; i++, l2_va += 0x01000000) { 7949 1.134 thorpej l2 = pm->pm_l2[i]; 7950 1.134 thorpej 7951 1.134 thorpej if (l2 == NULL || l2->l2_occupancy == 0) 7952 1.134 thorpej continue; 7953 1.134 thorpej 7954 1.134 thorpej l2b_va = l2_va; 7955 1.134 thorpej for (j = 0; j < L2_BUCKET_SIZE; j++, l2b_va += 0x00100000) { 7956 1.134 thorpej l2b = &l2->l2_bucket[j]; 7957 1.134 thorpej 7958 1.134 thorpej if (l2b->l2b_occupancy == 0 || l2b->l2b_kva == NULL) 7959 1.134 thorpej continue; 7960 1.134 thorpej 7961 1.134 thorpej ptep = l2b->l2b_kva; 7962 1.286 skrll 7963 1.134 thorpej for (k = 0; k < 256 && ptep[k] == 0; k++) 7964 1.134 thorpej ; 7965 1.134 thorpej 7966 1.134 thorpej k &= ~63; 7967 1.134 thorpej occ = l2b->l2b_occupancy; 7968 1.134 thorpej va = l2b_va + (k * 4096); 7969 1.134 thorpej for (; k < 256; k++, va += 0x1000) { 7970 1.142 chris char ch = ' '; 7971 1.134 thorpej if ((k % 64) == 0) { 7972 1.134 thorpej if ((rows % 8) == 0) { 7973 1.134 thorpej printf( 7974 1.134 thorpej " |0000 |8000 |10000 |18000 |20000 |28000 |30000 |38000\n"); 7975 1.134 thorpej } 7976 1.134 thorpej printf("%08lx: ", va); 7977 1.134 thorpej } 7978 1.134 thorpej 7979 1.134 thorpej ncptes[k & 63] = 0; 7980 1.134 thorpej pte = ptep[k]; 7981 1.134 thorpej if (pte == 0) { 7982 1.134 thorpej ch = '.'; 7983 1.134 thorpej } else { 7984 1.134 thorpej occ--; 7985 1.388 skrll switch (pte & 0x4c) { 7986 1.134 thorpej case 0x00: 7987 1.388 skrll ch = 'N'; /* No cache No buff */ 7988 1.134 thorpej break; 7989 1.134 thorpej case 0x04: 7990 1.134 thorpej ch = 'B'; /* No cache buff */ 7991 1.134 thorpej break; 7992 1.134 thorpej case 0x08: 7993 1.388 skrll ch = 'C'; /* Cache No buff */ 7994 1.134 thorpej break; 7995 1.134 thorpej case 0x0c: 7996 1.134 thorpej ch = 'F'; /* Cache Buff */ 7997 1.134 thorpej break; 7998 1.388 skrll case 0x40: 7999 1.388 skrll ch = 'D'; 8000 1.388 skrll break; 8001 1.388 skrll case 0x48: 8002 1.388 skrll ch = 'm'; /* Xscale mini-data */ 8003 1.388 skrll break; 8004 1.388 skrll default: 8005 1.388 skrll ch = '?'; 8006 1.388 skrll break; 8007 1.134 thorpej } 8008 1.134 thorpej 8009 1.134 thorpej if ((pte & L2_S_PROT_U) == L2_S_PROT_U) 8010 1.134 thorpej ch += 0x20; 8011 1.134 thorpej 8012 1.134 thorpej if ((pte & 0xc) == 0) 8013 1.134 thorpej ncptes[k & 63] = pte; 8014 1.134 thorpej } 8015 1.134 thorpej 8016 1.134 thorpej if ((k % 64) == 63) { 8017 1.134 thorpej rows++; 8018 1.134 thorpej printf("%c\n", ch); 8019 1.134 thorpej pmap_dump_ncpg(pm); 8020 1.134 thorpej if (occ == 0) 8021 1.134 thorpej break; 8022 1.134 thorpej } else 8023 1.134 thorpej printf("%c", ch); 8024 1.134 thorpej } 8025 1.134 thorpej } 8026 1.134 thorpej } 8027 1.134 thorpej } 8028 1.134 thorpej 8029 1.134 thorpej static void 8030 1.134 thorpej pmap_dump_ncpg(pmap_t pm) 8031 1.134 thorpej { 8032 1.134 thorpej struct vm_page *pg; 8033 1.215 uebayasi struct vm_page_md *md; 8034 1.134 thorpej struct pv_entry *pv; 8035 1.134 thorpej int i; 8036 1.134 thorpej 8037 1.134 thorpej for (i = 0; i < 63; i++) { 8038 1.134 thorpej if (ncptes[i] == 0) 8039 1.134 thorpej continue; 8040 1.134 thorpej 8041 1.134 thorpej pg = PHYS_TO_VM_PAGE(l2pte_pa(ncptes[i])); 8042 1.134 thorpej if (pg == NULL) 8043 1.134 thorpej continue; 8044 1.215 uebayasi md = VM_PAGE_TO_MD(pg); 8045 1.134 thorpej 8046 1.134 thorpej printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n", 8047 1.155 yamt VM_PAGE_TO_PHYS(pg), 8048 1.215 uebayasi md->krw_mappings, md->kro_mappings, 8049 1.215 uebayasi md->urw_mappings, md->uro_mappings); 8050 1.134 thorpej 8051 1.215 uebayasi SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 8052 1.134 thorpej printf(" %c va 0x%08lx, flags 0x%x\n", 8053 1.134 thorpej (pm == pv->pv_pmap) ? '*' : ' ', 8054 1.134 thorpej pv->pv_va, pv->pv_flags); 8055 1.134 thorpej } 8056 1.134 thorpej } 8057 1.134 thorpej } 8058 1.134 thorpej #endif 8059 1.174 matt 8060 1.174 matt #ifdef PMAP_STEAL_MEMORY 8061 1.174 matt void 8062 1.174 matt pmap_boot_pageadd(pv_addr_t *newpv) 8063 1.174 matt { 8064 1.174 matt pv_addr_t *pv, *npv; 8065 1.174 matt 8066 1.174 matt if ((pv = SLIST_FIRST(&pmap_boot_freeq)) != NULL) { 8067 1.174 matt if (newpv->pv_pa < pv->pv_va) { 8068 1.174 matt KASSERT(newpv->pv_pa + newpv->pv_size <= pv->pv_pa); 8069 1.174 matt if (newpv->pv_pa + newpv->pv_size == pv->pv_pa) { 8070 1.174 matt newpv->pv_size += pv->pv_size; 8071 1.174 matt SLIST_REMOVE_HEAD(&pmap_boot_freeq, pv_list); 8072 1.174 matt } 8073 1.174 matt pv = NULL; 8074 1.174 matt } else { 8075 1.174 matt for (; (npv = SLIST_NEXT(pv, pv_list)) != NULL; 8076 1.174 matt pv = npv) { 8077 1.174 matt KASSERT(pv->pv_pa + pv->pv_size < npv->pv_pa); 8078 1.174 matt KASSERT(pv->pv_pa < newpv->pv_pa); 8079 1.174 matt if (newpv->pv_pa > npv->pv_pa) 8080 1.174 matt continue; 8081 1.174 matt if (pv->pv_pa + pv->pv_size == newpv->pv_pa) { 8082 1.174 matt pv->pv_size += newpv->pv_size; 8083 1.174 matt return; 8084 1.174 matt } 8085 1.174 matt if (newpv->pv_pa + newpv->pv_size < npv->pv_pa) 8086 1.174 matt break; 8087 1.174 matt newpv->pv_size += npv->pv_size; 8088 1.174 matt SLIST_INSERT_AFTER(pv, newpv, pv_list); 8089 1.174 matt SLIST_REMOVE_AFTER(newpv, pv_list); 8090 1.174 matt return; 8091 1.174 matt } 8092 1.174 matt } 8093 1.174 matt } 8094 1.174 matt 8095 1.174 matt if (pv) { 8096 1.174 matt SLIST_INSERT_AFTER(pv, newpv, pv_list); 8097 1.174 matt } else { 8098 1.174 matt SLIST_INSERT_HEAD(&pmap_boot_freeq, newpv, pv_list); 8099 1.174 matt } 8100 1.174 matt } 8101 1.174 matt 8102 1.174 matt void 8103 1.174 matt pmap_boot_pagealloc(psize_t amount, psize_t mask, psize_t match, 8104 1.174 matt pv_addr_t *rpv) 8105 1.174 matt { 8106 1.174 matt pv_addr_t *pv, **pvp; 8107 1.174 matt 8108 1.174 matt KASSERT(amount & PGOFSET); 8109 1.174 matt KASSERT((mask & PGOFSET) == 0); 8110 1.174 matt KASSERT((match & PGOFSET) == 0); 8111 1.174 matt KASSERT(amount != 0); 8112 1.174 matt 8113 1.174 matt for (pvp = &SLIST_FIRST(&pmap_boot_freeq); 8114 1.174 matt (pv = *pvp) != NULL; 8115 1.174 matt pvp = &SLIST_NEXT(pv, pv_list)) { 8116 1.174 matt pv_addr_t *newpv; 8117 1.174 matt psize_t off; 8118 1.174 matt /* 8119 1.377 skrll * If this entry is too small to satisfy the request... 8120 1.174 matt */ 8121 1.174 matt KASSERT(pv->pv_size > 0); 8122 1.174 matt if (pv->pv_size < amount) 8123 1.174 matt continue; 8124 1.174 matt 8125 1.174 matt for (off = 0; off <= mask; off += PAGE_SIZE) { 8126 1.174 matt if (((pv->pv_pa + off) & mask) == match 8127 1.174 matt && off + amount <= pv->pv_size) 8128 1.174 matt break; 8129 1.174 matt } 8130 1.174 matt if (off > mask) 8131 1.174 matt continue; 8132 1.174 matt 8133 1.174 matt rpv->pv_va = pv->pv_va + off; 8134 1.174 matt rpv->pv_pa = pv->pv_pa + off; 8135 1.174 matt rpv->pv_size = amount; 8136 1.174 matt pv->pv_size -= amount; 8137 1.174 matt if (pv->pv_size == 0) { 8138 1.174 matt KASSERT(off == 0); 8139 1.174 matt KASSERT((vaddr_t) pv == rpv->pv_va); 8140 1.174 matt *pvp = SLIST_NEXT(pv, pv_list); 8141 1.174 matt } else if (off == 0) { 8142 1.174 matt KASSERT((vaddr_t) pv == rpv->pv_va); 8143 1.174 matt newpv = (pv_addr_t *) (rpv->pv_va + amount); 8144 1.174 matt *newpv = *pv; 8145 1.174 matt newpv->pv_pa += amount; 8146 1.174 matt newpv->pv_va += amount; 8147 1.174 matt *pvp = newpv; 8148 1.174 matt } else if (off < pv->pv_size) { 8149 1.174 matt newpv = (pv_addr_t *) (rpv->pv_va + amount); 8150 1.174 matt *newpv = *pv; 8151 1.174 matt newpv->pv_size -= off; 8152 1.174 matt newpv->pv_pa += off + amount; 8153 1.174 matt newpv->pv_va += off + amount; 8154 1.174 matt 8155 1.174 matt SLIST_NEXT(pv, pv_list) = newpv; 8156 1.174 matt pv->pv_size = off; 8157 1.174 matt } else { 8158 1.174 matt KASSERT((vaddr_t) pv != rpv->pv_va); 8159 1.174 matt } 8160 1.174 matt memset((void *)rpv->pv_va, 0, amount); 8161 1.174 matt return; 8162 1.174 matt } 8163 1.174 matt 8164 1.376 skrll if (!uvm_physseg_valid_p(uvm_physseg_get_first())) 8165 1.174 matt panic("pmap_boot_pagealloc: couldn't allocate memory"); 8166 1.174 matt 8167 1.174 matt for (pvp = &SLIST_FIRST(&pmap_boot_freeq); 8168 1.174 matt (pv = *pvp) != NULL; 8169 1.174 matt pvp = &SLIST_NEXT(pv, pv_list)) { 8170 1.174 matt if (SLIST_NEXT(pv, pv_list) == NULL) 8171 1.174 matt break; 8172 1.174 matt } 8173 1.174 matt KASSERT(mask == 0); 8174 1.376 skrll 8175 1.376 skrll for (uvm_physseg_t ups = uvm_physseg_get_first(); 8176 1.376 skrll uvm_physseg_valid_p(ups); 8177 1.376 skrll ups = uvm_physseg_get_next(ups)) { 8178 1.376 skrll 8179 1.376 skrll paddr_t spn = uvm_physseg_get_start(ups); 8180 1.376 skrll paddr_t epn = uvm_physseg_get_end(ups); 8181 1.376 skrll if (spn == atop(pv->pv_pa + pv->pv_size) 8182 1.376 skrll && pv->pv_va + pv->pv_size <= ptoa(epn)) { 8183 1.174 matt rpv->pv_va = pv->pv_va; 8184 1.174 matt rpv->pv_pa = pv->pv_pa; 8185 1.174 matt rpv->pv_size = amount; 8186 1.174 matt *pvp = NULL; 8187 1.174 matt pmap_map_chunk(kernel_l1pt.pv_va, 8188 1.376 skrll ptoa(spn) + (pv->pv_va - pv->pv_pa), 8189 1.376 skrll ptoa(spn), 8190 1.174 matt amount - pv->pv_size, 8191 1.174 matt VM_PROT_READ|VM_PROT_WRITE, 8192 1.174 matt PTE_CACHE); 8193 1.376 skrll 8194 1.376 skrll uvm_physseg_unplug(spn, atop(amount - pv->pv_size)); 8195 1.174 matt memset((void *)rpv->pv_va, 0, rpv->pv_size); 8196 1.174 matt return; 8197 1.174 matt } 8198 1.286 skrll } 8199 1.174 matt 8200 1.174 matt panic("pmap_boot_pagealloc: couldn't allocate memory"); 8201 1.174 matt } 8202 1.174 matt 8203 1.174 matt vaddr_t 8204 1.174 matt pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) 8205 1.174 matt { 8206 1.174 matt pv_addr_t pv; 8207 1.174 matt 8208 1.174 matt pmap_boot_pagealloc(size, 0, 0, &pv); 8209 1.174 matt 8210 1.174 matt return pv.pv_va; 8211 1.174 matt } 8212 1.174 matt #endif /* PMAP_STEAL_MEMORY */ 8213 1.186 matt 8214 1.186 matt SYSCTL_SETUP(sysctl_machdep_pmap_setup, "sysctl machdep.kmpages setup") 8215 1.186 matt { 8216 1.186 matt sysctl_createv(clog, 0, NULL, NULL, 8217 1.186 matt CTLFLAG_PERMANENT, 8218 1.186 matt CTLTYPE_NODE, "machdep", NULL, 8219 1.186 matt NULL, 0, NULL, 0, 8220 1.186 matt CTL_MACHDEP, CTL_EOL); 8221 1.186 matt 8222 1.186 matt sysctl_createv(clog, 0, NULL, NULL, 8223 1.186 matt CTLFLAG_PERMANENT, 8224 1.186 matt CTLTYPE_INT, "kmpages", 8225 1.186 matt SYSCTL_DESCR("count of pages allocated to kernel memory allocators"), 8226 1.186 matt NULL, 0, &pmap_kmpages, 0, 8227 1.186 matt CTL_MACHDEP, CTL_CREATE, CTL_EOL); 8228 1.186 matt } 8229 1.241 matt 8230 1.241 matt #ifdef PMAP_NEED_ALLOC_POOLPAGE 8231 1.241 matt struct vm_page * 8232 1.241 matt arm_pmap_alloc_poolpage(int flags) 8233 1.241 matt { 8234 1.241 matt /* 8235 1.241 matt * On some systems, only some pages may be "coherent" for dma and we 8236 1.248 matt * want to prefer those for pool pages (think mbufs) but fallback to 8237 1.360 skrll * any page if none is available. 8238 1.241 matt */ 8239 1.248 matt if (arm_poolpage_vmfreelist != VM_FREELIST_DEFAULT) { 8240 1.241 matt return uvm_pagealloc_strat(NULL, 0, NULL, flags, 8241 1.361 skrll UVM_PGA_STRAT_FALLBACK, arm_poolpage_vmfreelist); 8242 1.248 matt } 8243 1.241 matt 8244 1.241 matt return uvm_pagealloc(NULL, 0, NULL, flags); 8245 1.241 matt } 8246 1.241 matt #endif 8247 1.271 matt 8248 1.271 matt #if defined(ARM_MMU_EXTENDED) && defined(MULTIPROCESSOR) 8249 1.271 matt void 8250 1.271 matt pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci) 8251 1.271 matt { 8252 1.271 matt /* nothing */ 8253 1.271 matt } 8254 1.271 matt 8255 1.271 matt int 8256 1.271 matt pic_ipi_shootdown(void *arg) 8257 1.271 matt { 8258 1.334 skrll #if PMAP_TLB_NEED_SHOOTDOWN 8259 1.294 ozaki pmap_tlb_shootdown_process(); 8260 1.271 matt #endif 8261 1.271 matt return 1; 8262 1.271 matt } 8263 1.271 matt #endif /* ARM_MMU_EXTENDED && MULTIPROCESSOR */ 8264 1.284 matt 8265 1.284 matt 8266 1.284 matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 8267 1.284 matt vaddr_t 8268 1.284 matt pmap_direct_mapped_phys(paddr_t pa, bool *ok_p, vaddr_t va) 8269 1.284 matt { 8270 1.284 matt bool ok = false; 8271 1.284 matt if (physical_start <= pa && pa < physical_end) { 8272 1.324 matt #ifdef KERNEL_BASE_VOFFSET 8273 1.324 matt const vaddr_t newva = pa + KERNEL_BASE_VOFFSET; 8274 1.324 matt #else 8275 1.324 matt const vaddr_t newva = KERNEL_BASE + pa - physical_start; 8276 1.324 matt #endif 8277 1.284 matt #ifdef ARM_MMU_EXTENDED 8278 1.323 matt if (newva >= KERNEL_BASE && newva < pmap_directlimit) { 8279 1.324 matt #endif 8280 1.284 matt va = newva; 8281 1.284 matt ok = true; 8282 1.324 matt #ifdef ARM_MMU_EXTENDED 8283 1.284 matt } 8284 1.284 matt #endif 8285 1.284 matt } 8286 1.284 matt KASSERT(ok_p); 8287 1.284 matt *ok_p = ok; 8288 1.284 matt return va; 8289 1.284 matt } 8290 1.284 matt 8291 1.284 matt vaddr_t 8292 1.284 matt pmap_map_poolpage(paddr_t pa) 8293 1.284 matt { 8294 1.284 matt bool ok __diagused; 8295 1.284 matt vaddr_t va = pmap_direct_mapped_phys(pa, &ok, 0); 8296 1.326 matt KASSERTMSG(ok, "pa %#lx not direct mappable", pa); 8297 1.284 matt #if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 8298 1.284 matt if (arm_cache_prefer_mask != 0) { 8299 1.284 matt struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); 8300 1.285 skrll struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 8301 1.284 matt pmap_acquire_page_lock(md); 8302 1.284 matt pmap_vac_me_harder(md, pa, pmap_kernel(), va); 8303 1.284 matt pmap_release_page_lock(md); 8304 1.284 matt } 8305 1.284 matt #endif 8306 1.284 matt return va; 8307 1.284 matt } 8308 1.284 matt 8309 1.284 matt paddr_t 8310 1.284 matt pmap_unmap_poolpage(vaddr_t va) 8311 1.284 matt { 8312 1.284 matt KASSERT(va >= KERNEL_BASE); 8313 1.284 matt #ifdef PMAP_CACHE_VIVT 8314 1.284 matt cpu_idcache_wbinv_range(va, PAGE_SIZE); 8315 1.284 matt #endif 8316 1.324 matt #if defined(KERNEL_BASE_VOFFSET) 8317 1.324 matt return va - KERNEL_BASE_VOFFSET; 8318 1.324 matt #else 8319 1.284 matt return va - KERNEL_BASE + physical_start; 8320 1.284 matt #endif 8321 1.284 matt } 8322 1.284 matt #endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 8323